diff --git a/Makefile b/Makefile
index 36dc1390c..f35843c5c 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,4 @@ gqlgen:
go run scripts/gqlgen.go
build:
- packr2 build
-
-build-win:
- CGO_ENABLED=1 GOOS=windows GOARCH=amd64 packr2 build -o stash.exe -v
\ No newline at end of file
+ CGO_ENABLED=1 packr2 build -mod=vendor -v
\ No newline at end of file
diff --git a/go.mod b/go.mod
index 8f04f3e09..cf5a0ba38 100644
--- a/go.mod
+++ b/go.mod
@@ -16,5 +16,4 @@ require (
github.com/spf13/afero v1.2.0 // indirect
github.com/vektah/gqlparser v1.1.0
golang.org/x/image v0.0.0-20190118043309-183bebdce1b2 // indirect
- golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e
)
diff --git a/go.sum b/go.sum
index f54e95c35..0c2d3ef83 100644
--- a/go.sum
+++ b/go.sum
@@ -7,19 +7,13 @@ git.apache.org/thrift.git v0.0.0-20180924222215-a9235805469b/go.mod h1:fPE2ZNJGy
github.com/99designs/gqlgen v0.4.5-0.20190127090136-055fb4bc9a6a h1:oTsAt8YXjEk1fo7uZR7gya1jrH48oPulx5oF6zWTHRw=
github.com/99designs/gqlgen v0.4.5-0.20190127090136-055fb4bc9a6a/go.mod h1:st7qHA6ssU3uRZkmv+wzrzgX4srvIqEIdE5iuRW8GhE=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Boostport/migration v0.15.0/go.mod h1:cT0NWVRLid2n9b2K8mPSeT5nWT2gt4rEtVmlhvo2QB8=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/DATA-DOG/go-sqlmock v1.3.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
-github.com/Masterminds/squirrel v1.1.0 h1:baP1qLdoQCeTw3ifCdOq2dkYc6vGcmRdaociKLbEJXs=
-github.com/Masterminds/squirrel v1.1.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
-github.com/OpenPeeDeeP/depguard v0.0.0-20180806142446-a69c782687b2/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o=
github.com/PuerkitoBio/goquery v1.5.0 h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=
github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg=
-github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/agnivade/levenshtein v1.0.1 h1:3oJU7J3FGFmyhn8KHjmVaZCN5hxTr7GxgRue+sxIXdQ=
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
@@ -27,12 +21,10 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNg
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
-github.com/apache/calcite-avatica-go/v3 v3.2.0/go.mod h1:Kipaz+iNp/AUBrUPxiLGjn1Km6PLkG0jZoL9VnwFMhg=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/aws/aws-sdk-go v1.15.54/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/bmatcuk/doublestar v1.1.1 h1:YroD6BJCZBYx06yYFEWvUuKVWQn3vLLQAVmDmvTSaiQ=
github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
@@ -41,7 +33,6 @@ github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMe
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/etcd v3.3.11+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
@@ -69,7 +60,6 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
-github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
@@ -78,67 +68,30 @@ github.com/fsouza/fake-gcs-server v1.3.0/go.mod h1:Lq+43m2znsXfDKHnQMfdA0HpYYAEJ
github.com/go-chi/chi v3.3.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-chi/chi v4.0.1+incompatible h1:RSRC5qmFPtO90t7pTL0DBMNpZFsb/sHF3RXVlDgFisA=
github.com/go-chi/chi v4.0.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
-github.com/go-critic/checkers v0.0.0-20181204210945-97246d3b3c67/go.mod h1:Cg5JCP9M6m93z6fecpRcVgD2lZf2RvPtb85ldjiShZc=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-ini/ini v1.39.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-lintpack/lintpack v0.5.1/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM=
-github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-toolsmith/astcast v0.0.0-20181028201508-b7a89ed70af1/go.mod h1:TEo3Ghaj7PsZawQHxT/oBvo4HK/sl1RcuUHDKTTju+o=
-github.com/go-toolsmith/astcopy v0.0.0-20180903214859-79b422d080c4/go.mod h1:c9CPdq2AzM8oPomdlPniEfPAC6g1s7NqZzODt8y6ib8=
-github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
-github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg=
-github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk=
-github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks=
-github.com/go-toolsmith/pkgload v0.0.0-20181120203407-5122569a890b/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks=
-github.com/go-toolsmith/strparse v0.0.0-20180903215201-830b6daa1241/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
-github.com/go-toolsmith/typep v0.0.0-20181030061450-d63dc7650676/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
github.com/gobuffalo/buffalo v0.12.8-0.20181004233540-fac9bb505aa8/go.mod h1:sLyT7/dceRXJUxSsE813JTQtA3Eb1vjxWfo/N//vXIY=
github.com/gobuffalo/buffalo v0.13.0/go.mod h1:Mjn1Ba9wpIbpbrD+lIDMy99pQ0H0LiddMIIDGse7qT4=
-github.com/gobuffalo/buffalo v0.13.1/go.mod h1:K9c22KLfDz7obgxvHv1amvJtCQEZNiox9+q6FDJ1Zcs=
-github.com/gobuffalo/buffalo v0.13.2/go.mod h1:vA8I4Dwcfkx7RAzIRHVDZxfS3QJR7muiOjX4r8P2/GE=
-github.com/gobuffalo/buffalo v0.13.4/go.mod h1:y2jbKkO0k49OrNIOAkbWQiPBqxAFpHn5OKnkc7BDh+I=
-github.com/gobuffalo/buffalo v0.13.5/go.mod h1:hPcP12TkFSZmT3gUVHZ24KRhTX3deSgu6QSgn0nbWf4=
-github.com/gobuffalo/buffalo v0.13.6/go.mod h1:/Pm0MPLusPhWDayjRD+/vKYnelScIiv0sX9YYek0wpg=
-github.com/gobuffalo/buffalo v0.13.7/go.mod h1:3gQwZhI8DSbqmDqlFh7kfwuv/wd40rqdVxXtFWlCQHw=
-github.com/gobuffalo/buffalo v0.13.9/go.mod h1:vIItiQkTHq46D1p+bw8mFc5w3BwrtJhMvYjSIYK3yjE=
-github.com/gobuffalo/buffalo v0.13.12/go.mod h1:Y9e0p0cdo/eI+lHm7EFzlkc9YzjwGo5QeDj+FbsyqVA=
github.com/gobuffalo/buffalo-plugins v1.0.2/go.mod h1:pOp/uF7X3IShFHyobahTkTLZaeUXwb0GrUTb9ngJWTs=
github.com/gobuffalo/buffalo-plugins v1.0.4/go.mod h1:pWS1vjtQ6uD17MVFWf7i3zfThrEKWlI5+PYLw/NaDB4=
github.com/gobuffalo/buffalo-plugins v1.4.3/go.mod h1:uCzTY0woez4nDMdQjkcOYKanngeUVRO2HZi7ezmAjWY=
github.com/gobuffalo/buffalo-plugins v1.5.1/go.mod h1:jbmwSZK5+PiAP9cC09VQOrGMZFCa/P0UMlIS3O12r5w=
-github.com/gobuffalo/buffalo-plugins v1.6.1/go.mod h1:/XZt7UuuDnx5P4v3cStK0+XoYiNOA2f0wDIsm1oLJQA=
github.com/gobuffalo/buffalo-plugins v1.6.4/go.mod h1:/+N1aophkA2jZ1ifB2O3Y9yGwu6gKOVMtUmJnbg+OZI=
github.com/gobuffalo/buffalo-plugins v1.6.5/go.mod h1:0HVkbgrVs/MnPZ/FOseDMVanCTm2RNcdM0PuXcL1NNI=
-github.com/gobuffalo/buffalo-plugins v1.6.6/go.mod h1:hSWAEkJyL9RENJlmanMivgnNkrQ9RC4xJARz8dQryi0=
github.com/gobuffalo/buffalo-plugins v1.6.7/go.mod h1:ZGZRkzz2PiKWHs0z7QsPBOTo2EpcGRArMEym6ghKYgk=
github.com/gobuffalo/buffalo-plugins v1.6.9/go.mod h1:yYlYTrPdMCz+6/+UaXg5Jm4gN3xhsvsQ2ygVatZV5vw=
-github.com/gobuffalo/buffalo-plugins v1.6.10/go.mod h1:HxzPZjAEzh9H0gnHelObxxrut9O+1dxydf7U93SYsc8=
github.com/gobuffalo/buffalo-plugins v1.6.11/go.mod h1:eAA6xJIL8OuynJZ8amXjRmHND6YiusVAaJdHDN1Lu8Q=
-github.com/gobuffalo/buffalo-plugins v1.7.2/go.mod h1:vEbx30cLFeeZ48gBA/rkhbqC2M/2JpsKs5CoESWhkPw=
-github.com/gobuffalo/buffalo-plugins v1.8.1/go.mod h1:vu71J3fD4b7KKywJQ1tyaJGtahG837Cj6kgbxX0e4UI=
github.com/gobuffalo/buffalo-plugins v1.8.2/go.mod h1:9te6/VjEQ7pKp7lXlDIMqzxgGpjlKoAcAANdCgoR960=
github.com/gobuffalo/buffalo-plugins v1.8.3/go.mod h1:IAWq6vjZJVXebIq2qGTLOdlXzmpyTZ5iJG5b59fza5U=
-github.com/gobuffalo/buffalo-plugins v1.9.3/go.mod h1:BNRunDThMZKjqx6R+n14Rk3sRSOWgbMuzCKXLqbd7m0=
github.com/gobuffalo/buffalo-plugins v1.9.4/go.mod h1:grCV6DGsQlVzQwk6XdgcL3ZPgLm9BVxlBmXPMF8oBHI=
github.com/gobuffalo/buffalo-plugins v1.10.0/go.mod h1:4osg8d9s60txLuGwXnqH+RCjPHj9K466cDFRl3PErHI=
github.com/gobuffalo/buffalo-plugins v1.11.0 h1:yZ6USaSdAKpogRS8DZJgeG7/CTPGmyhplwifphmmegw=
github.com/gobuffalo/buffalo-plugins v1.11.0/go.mod h1:rtIvAYRjYibgmWhnjKmo7OadtnxuMG5ZQLr25ozAzjg=
-github.com/gobuffalo/buffalo-plugins v1.12.0 h1:5rvYQ7mwfPwUW9zqcMd9ahWtPVOOouMKZjv88q45Z7c=
-github.com/gobuffalo/buffalo-plugins v1.12.0/go.mod h1:kw4Mj2vQXqe4X5TI36PEQgswbL30heGQwJEeDKd1v+4=
github.com/gobuffalo/buffalo-pop v1.0.5/go.mod h1:Fw/LfFDnSmB/vvQXPvcXEjzP98Tc+AudyNWUBWKCwQ8=
-github.com/gobuffalo/buffalo-pop v1.1.2/go.mod h1:czNLXcYbg5/fjr+uht0NyjZaQ0V2W23H1jzyORgCzQ4=
-github.com/gobuffalo/buffalo-pop v1.1.5/go.mod h1:H01JIg42XwOHS4gRMhSeDZqBovNVlfBUsVXckU617s4=
-github.com/gobuffalo/buffalo-pop v1.1.8/go.mod h1:1uaxOFzzVud/zR5f1OEBr21tMVLQS3OZpQ1A5cr0svE=
-github.com/gobuffalo/buffalo-pop v1.1.13/go.mod h1:47GQoBjCMcl5Pw40iCWHQYJvd0HsT9kdaOPWgnzHzk4=
-github.com/gobuffalo/buffalo-pop v1.1.14/go.mod h1:sAMh6+s7wytCn5cHqZIuItJbAqzvs6M7FemLexl+pwc=
-github.com/gobuffalo/buffalo-pop v1.1.15/go.mod h1:vnvvxhbEFAaEbac9E2ZPjsBeL7WHkma2UyKNVA4y9Wo=
-github.com/gobuffalo/buffalo-pop v1.2.1/go.mod h1:SHqojN0bVzaAzCbQDdWtsib202FDIxqwmCO8VDdweF4=
-github.com/gobuffalo/buffalo-pop v1.3.0/go.mod h1:P0PhA225dRGyv0WkgYjYKqgoxPdDPDFZDvHj60AGF5w=
-github.com/gobuffalo/buffalo-pop v1.6.0/go.mod h1:vrEVNOBKe042HjSNMj72J4FgER/VG6lt4xW6WMpTdlY=
github.com/gobuffalo/envy v1.6.4/go.mod h1:Abh+Jfw475/NWtYMEt+hnJWRiC8INKWibIMyNt1w2Mc=
github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ=
github.com/gobuffalo/envy v1.6.6/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ=
@@ -152,24 +105,14 @@ github.com/gobuffalo/envy v1.6.12/go.mod h1:qJNrJhKkZpEW0glh5xP2syQHH5kgdmgsKss2
github.com/gobuffalo/events v1.0.3/go.mod h1:Txo8WmqScapa7zimEQIwgiJBvMECMe9gJjsKNPN3uZw=
github.com/gobuffalo/events v1.0.7/go.mod h1:z8txf6H9jWhQ5Scr7YPLWg/cgXBRj8Q4uYI+rsVCCSQ=
github.com/gobuffalo/events v1.0.8/go.mod h1:A5KyqT1sA+3GJiBE4QKZibse9mtOcI9nw8gGrDdqYGs=
-github.com/gobuffalo/events v1.1.1/go.mod h1:Ia9OgHMco9pEhJaPrPQJ4u4+IZlkxYVco2VbJ2XgnAE=
github.com/gobuffalo/events v1.1.3/go.mod h1:9yPGWYv11GENtzrIRApwQRMYSbUgCsZ1w6R503fCfrk=
github.com/gobuffalo/events v1.1.4/go.mod h1:09/YRRgZHEOts5Isov+g9X2xajxdvOAcUuAHIX/O//A=
github.com/gobuffalo/events v1.1.5/go.mod h1:3YUSzgHfYctSjEjLCWbkXP6djH2M+MLaVRzb4ymbAK0=
-github.com/gobuffalo/events v1.1.6/go.mod h1:H/3ZB9BA+WorMb/0F79UvU6u0Cyo2hU97WA51bG2ONY=
github.com/gobuffalo/events v1.1.7/go.mod h1:6fGqxH2ing5XMb3EYRq9LEkVlyPGs4oO/eLzh+S8CxY=
github.com/gobuffalo/events v1.1.8/go.mod h1:UFy+W6X6VbCWS8k2iT81HYX65dMtiuVycMy04cplt/8=
github.com/gobuffalo/events v1.1.9 h1:ukq5ys/h0TuiX7eLJyZBD1dJOy0r19JTEYmgXKG9j+Y=
github.com/gobuffalo/events v1.1.9/go.mod h1:/0nf8lMtP5TkgNbzYxR6Bl4GzBy5s5TebgNTdRfRbPM=
-github.com/gobuffalo/events v1.2.0 h1:YovlMNcwNTfIm/3OdB+KemDOm8yUz4XIH+4kbMhGXWw=
-github.com/gobuffalo/events v1.2.0/go.mod h1:pxvpvsKXKZNPtHuIxUV3K+g+KP5o4forzaeFj++bh68=
github.com/gobuffalo/fizz v1.0.12/go.mod h1:C0sltPxpYK8Ftvf64kbsQa2yiCZY4RZviurNxXdAKwc=
-github.com/gobuffalo/fizz v1.0.15/go.mod h1:EI3mEpjImuji6Bwu++N2uXhljQwOhwtimZQJ89zwyF4=
-github.com/gobuffalo/fizz v1.0.16/go.mod h1:EI3mEpjImuji6Bwu++N2uXhljQwOhwtimZQJ89zwyF4=
-github.com/gobuffalo/fizz v1.1.2/go.mod h1:THqzNTlNxNaF5hq3ddp16SnEcl2m83bTeTzJEoD+kqc=
-github.com/gobuffalo/fizz v1.1.3/go.mod h1:THqzNTlNxNaF5hq3ddp16SnEcl2m83bTeTzJEoD+kqc=
-github.com/gobuffalo/fizz v1.3.0/go.mod h1:THqzNTlNxNaF5hq3ddp16SnEcl2m83bTeTzJEoD+kqc=
-github.com/gobuffalo/fizz v1.5.0/go.mod h1:Uu3ch14M4S7LDU7LAP1GQ+KNCRmZYd05Gqasc96XLa0=
github.com/gobuffalo/flect v0.0.0-20180907193754-dc14d8acaf9f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA=
github.com/gobuffalo/flect v0.0.0-20181002182613-4571df4b1daf/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA=
github.com/gobuffalo/flect v0.0.0-20181007231023-ae7ed6bfe683/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA=
@@ -177,8 +120,6 @@ github.com/gobuffalo/flect v0.0.0-20181018182602-fd24a256709f/go.mod h1:rCiQgmAE
github.com/gobuffalo/flect v0.0.0-20181019110701-3d6f0b585514/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA=
github.com/gobuffalo/flect v0.0.0-20181024204909-8f6be1a8c6c2/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA=
github.com/gobuffalo/flect v0.0.0-20181104133451-1f6e9779237a/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA=
-github.com/gobuffalo/flect v0.0.0-20181108195648-8fe1b44cfe32/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA=
-github.com/gobuffalo/flect v0.0.0-20181109221320-179d36177b5b/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI=
github.com/gobuffalo/flect v0.0.0-20181114183036-47375f6d8328/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI=
github.com/gobuffalo/flect v0.0.0-20181210151238-24a2b68e0316/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk=
github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk=
@@ -190,17 +131,12 @@ github.com/gobuffalo/genny v0.0.0-20181005145118-318a41a134cc/go.mod h1:WAd8HmjM
github.com/gobuffalo/genny v0.0.0-20181007153042-b8de7d566757/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA=
github.com/gobuffalo/genny v0.0.0-20181012161047-33e5f43d83a6/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA=
github.com/gobuffalo/genny v0.0.0-20181017160347-90a774534246/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA=
-github.com/gobuffalo/genny v0.0.0-20181019144442-df0a36fdd146/go.mod h1:IyRrGrQb/sbHu/0z9i5mbpZroIsdxjCYfj+zFiFiWZQ=
github.com/gobuffalo/genny v0.0.0-20181024195656-51392254bf53/go.mod h1:o9GEH5gn5sCKLVB5rHFC4tq40rQ3VRUzmx6WwmaqISE=
github.com/gobuffalo/genny v0.0.0-20181025145300-af3f81d526b8/go.mod h1:uZ1fFYvdcP8mu0B/Ynarf6dsGvp7QFIpk/QACUuFUVI=
github.com/gobuffalo/genny v0.0.0-20181027191429-94d6cfb5c7fc/go.mod h1:x7SkrQQBx204Y+O9EwRXeszLJDTaWN0GnEasxgLrQTA=
github.com/gobuffalo/genny v0.0.0-20181027195209-3887b7171c4f/go.mod h1:JbKx8HSWICu5zyqWOa0dVV1pbbXOHusrSzQUprW6g+w=
-github.com/gobuffalo/genny v0.0.0-20181030163439-ed103521b8ec/go.mod h1:3Xm9z7/2oRxlB7PSPLxvadZ60/0UIek1YWmcC7QSaVs=
github.com/gobuffalo/genny v0.0.0-20181106193839-7dcb0924caf1/go.mod h1:x61yHxvbDCgQ/7cOAbJCacZQuHgB0KMSzoYcw5debjU=
github.com/gobuffalo/genny v0.0.0-20181107223128-f18346459dbe/go.mod h1:utQD3aKKEsdb03oR+Vi/6ztQb1j7pO10N3OBoowRcSU=
-github.com/gobuffalo/genny v0.0.0-20181109163038-9539921b620f/go.mod h1:118bnhJR2oviiji++mZj0IH/IaFBCzwkWHaI4OQq5hQ=
-github.com/gobuffalo/genny v0.0.0-20181110202416-7b7d8756a9e2/go.mod h1:118bnhJR2oviiji++mZj0IH/IaFBCzwkWHaI4OQq5hQ=
-github.com/gobuffalo/genny v0.0.0-20181111200257-599b33630ab4/go.mod h1:w+iD/cdtIpPDFax6LlUFuCdXFD0DLRUXsfp3IeT/Doc=
github.com/gobuffalo/genny v0.0.0-20181114215459-0a4decd77f5d/go.mod h1:kN2KZ8VgXF9VIIOj/GM0Eo7YK+un4Q3tTreKOf0q1ng=
github.com/gobuffalo/genny v0.0.0-20181119162812-e8ff4adce8bb/go.mod h1:BA9htSe4bZwBDJLe8CUkoqkypq3hn3+CkoHqVOW718E=
github.com/gobuffalo/genny v0.0.0-20181127225641-2d959acc795b/go.mod h1:l54xLXNkteX/PdZ+HlgPk1qtcrgeOr3XUBBPDbH+7CQ=
@@ -213,23 +149,15 @@ github.com/gobuffalo/genny v0.0.0-20181211165820-e26c8466f14d/go.mod h1:sHnK+ZSU
github.com/gobuffalo/genny v0.0.0-20190104222617-a71664fc38e7/go.mod h1:QPsQ1FnhEsiU8f+O0qKWXz2RE4TiDqLVChWkBuh1WaY=
github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5 h1:boQS3dA9PxhyufJEWIILrG6pJQbDnpwP2rFyvWacdoY=
github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5/go.mod h1:CIaHCrSIuJ4il6ka3Hub4DR4adDrGoXGEEt2FbBxoIo=
-github.com/gobuffalo/genny v0.0.0-20190124191459-3310289fa4b4 h1:0hgER6ADOc40ws1xYtrSjYq7OQqMz/LsgaGooEz9RqY=
-github.com/gobuffalo/genny v0.0.0-20190124191459-3310289fa4b4/go.mod h1:yIRqxhZV2sAzb+B3iPUMLauTRrYP8tJUlZ1zV9teKik=
github.com/gobuffalo/github_flavored_markdown v1.0.4/go.mod h1:uRowCdK+q8d/RF0Kt3/DSalaIXbb0De/dmTqMQdkQ4I=
github.com/gobuffalo/github_flavored_markdown v1.0.5/go.mod h1:U0643QShPF+OF2tJvYNiYDLDGDuQmJZXsf/bHOJPsMY=
github.com/gobuffalo/github_flavored_markdown v1.0.7/go.mod h1:w93Pd9Lz6LvyQXEG6DktTPHkOtCbr+arAD5mkwMzXLI=
github.com/gobuffalo/httptest v1.0.2/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E=
-github.com/gobuffalo/httptest v1.0.3/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E=
-github.com/gobuffalo/httptest v1.0.4/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E=
-github.com/gobuffalo/httptest v1.0.5/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E=
-github.com/gobuffalo/httptest v1.0.6/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E=
github.com/gobuffalo/licenser v0.0.0-20180924033006-eae28e638a42/go.mod h1:Ubo90Np8gpsSZqNScZZkVXXAo5DGhTb+WYFIjlnog8w=
github.com/gobuffalo/licenser v0.0.0-20181025145548-437d89de4f75/go.mod h1:x3lEpYxkRG/XtGCUNkio+6RZ/dlOvLzTI9M1auIwFcw=
github.com/gobuffalo/licenser v0.0.0-20181027200154-58051a75da95/go.mod h1:BzhaaxGd1tq1+OLKObzgdCV9kqVhbTulxOpYbvMQWS0=
github.com/gobuffalo/licenser v0.0.0-20181109171355-91a2a7aac9a7/go.mod h1:m+Ygox92pi9bdg+gVaycvqE8RVSjZp7mWw75+K5NPHk=
-github.com/gobuffalo/licenser v0.0.0-20181116224424-1b7fd3f9cbb4/go.mod h1:icHYfF2FVDi6CpI8BK9Sy1ChkSijz/0GNN7Qzzdk6JE=
github.com/gobuffalo/licenser v0.0.0-20181128165715-cc7305f8abed/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE=
-github.com/gobuffalo/licenser v0.0.0-20181128170751-82cc989582b9/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE=
github.com/gobuffalo/licenser v0.0.0-20181203160806-fe900bbede07/go.mod h1:ph6VDNvOzt1CdfaWC+9XwcBnlSTBz2j49PBwum6RFaU=
github.com/gobuffalo/licenser v0.0.0-20181211173111-f8a311c51159/go.mod h1:ve/Ue99DRuvnTaLq2zKa6F4KtHiYf7W046tDjuGYPfM=
github.com/gobuffalo/logger v0.0.0-20181022175615-46cfb361fc27/go.mod h1:8sQkgyhWipz1mIctHF4jTxmJh1Vxhp7mP8IqbljgJZo=
@@ -246,37 +174,21 @@ github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9h
github.com/gobuffalo/meta v0.0.0-20181018155829-df62557efcd3/go.mod h1:XTTOhwMNryif3x9LkTTBO/Llrveezd71u3quLd0u7CM=
github.com/gobuffalo/meta v0.0.0-20181018192820-8c6cef77dab3/go.mod h1:E94EPzx9NERGCY69UWlcj6Hipf2uK/vnfrF4QD0plVE=
github.com/gobuffalo/meta v0.0.0-20181025145500-3a985a084b0a/go.mod h1:YDAKBud2FP7NZdruCSlmTmDOZbVSa6bpK7LJ/A/nlKg=
-github.com/gobuffalo/meta v0.0.0-20181109154556-f76929ccd5fa/go.mod h1:1rYI5QsanV6cLpT1BlTAkrFi9rtCZrGkvSK8PglwfS8=
github.com/gobuffalo/meta v0.0.0-20181114191255-b130ebedd2f7/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE=
-github.com/gobuffalo/meta v0.0.0-20181116202903-8850e47774f5/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE=
github.com/gobuffalo/meta v0.0.0-20181127070345-0d7e59dd540b/go.mod h1:RLO7tMvE0IAKAM8wny1aN12pvEKn7EtkBLkUZR00Qf8=
github.com/gobuffalo/meta v0.0.0-20190120163247-50bbb1fa260d h1:cP3lJDiGboBok8q6axF0rqWjlg/MOCfgjuwuNp5TlhE=
github.com/gobuffalo/meta v0.0.0-20190120163247-50bbb1fa260d/go.mod h1:KKsH44nIK2gA8p0PJmRT9GvWJUdphkDUA8AJEvFWiqM=
-github.com/gobuffalo/meta v0.0.0-20190121163014-ecaa953cbfb3/go.mod h1:KLfkGnS+Tucc+iTkUcAUBtxpwOJGfhw2pHRLddPxMQY=
-github.com/gobuffalo/meta v0.0.0-20190126124307-c8fb6f4eb5a9 h1:fCkQorKjSY55bEwmkK58ZY9ECD2ZioRoG6KCriSL9Zw=
-github.com/gobuffalo/meta v0.0.0-20190126124307-c8fb6f4eb5a9/go.mod h1:zoh6GLgkk9+iI/62dST4amAuVAczZrBXoAk/t64n7Ew=
github.com/gobuffalo/mw-basicauth v1.0.3/go.mod h1:dg7+ilMZOKnQFHDefUzUHufNyTswVUviCBgF244C1+0=
-github.com/gobuffalo/mw-basicauth v1.0.6/go.mod h1:RFyeGeDLZlVgp/eBflqu2eavFqyv0j0fVVP87WPYFwY=
-github.com/gobuffalo/mw-basicauth v1.0.7/go.mod h1:xJ9/OSiOWl+kZkjaSun62srODr3Cx8OB4AKr+G4FlS4=
github.com/gobuffalo/mw-contenttype v0.0.0-20180802152300-74f5a47f4d56/go.mod h1:7EvcmzBbeCvFtQm5GqF9ys6QnCxz2UM1x0moiWLq1No=
-github.com/gobuffalo/mw-contenttype v0.0.0-20190129203934-2554e742333b/go.mod h1:7x87+mDrr9Peh7AqhOtESyJLanMd2zQNz2Hts+vtBoE=
github.com/gobuffalo/mw-csrf v0.0.0-20180802151833-446ff26e108b/go.mod h1:sbGtb8DmDZuDUQoxjr8hG1ZbLtZboD9xsn6p77ppcHo=
-github.com/gobuffalo/mw-csrf v0.0.0-20190129204204-25460a055517/go.mod h1:o5u+nnN0Oa7LBeDYH9QP36qeMPnXV9qbVnbZ4D+Kb0Q=
github.com/gobuffalo/mw-forcessl v0.0.0-20180802152810-73921ae7a130/go.mod h1:JvNHRj7bYNAMUr/5XMkZaDcw3jZhUZpsmzhd//FFWmQ=
github.com/gobuffalo/mw-i18n v0.0.0-20180802152014-e3060b7e13d6/go.mod h1:91AQfukc52A6hdfIfkxzyr+kpVYDodgAeT5cjX1UIj4=
-github.com/gobuffalo/mw-i18n v0.0.0-20181027200759-09e0c99be4d3/go.mod h1:1PpGPgqP8VsfUppgBA9FrTOXjI6X9gjqhh/8dmg48lg=
-github.com/gobuffalo/mw-i18n v0.0.0-20190129204410-552713a3ebb4/go.mod h1:rBg2eHxsyxVjtYra6fGy4GSF5C8NysOvz+Znnzk42EM=
github.com/gobuffalo/mw-paramlogger v0.0.0-20181005191442-d6ee392ec72e/go.mod h1:6OJr6VwSzgJMqWMj7TYmRUqzNe2LXu/W1rRW4MAz/ME=
-github.com/gobuffalo/mw-paramlogger v0.0.0-20190129202837-395da1998525/go.mod h1:gEo/ABCsKqvpp/KCxN2AIzDEe0OJUXbJ9293FYrXw+w=
github.com/gobuffalo/mw-tokenauth v0.0.0-20181001105134-8545f626c189/go.mod h1:UqBF00IfKvd39ni5+yI5MLMjAf4gX7cDKN/26zDOD6c=
-github.com/gobuffalo/mw-tokenauth v0.0.0-20190129201951-95847f29c5c8/go.mod h1:n2oa93LHGD94hGI+PoJO+6cf60DNrXrAIv9L/Ke3GXc=
github.com/gobuffalo/packd v0.0.0-20181027182251-01ad393492c8/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc=
github.com/gobuffalo/packd v0.0.0-20181027190505-aafc0d02c411/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc=
github.com/gobuffalo/packd v0.0.0-20181027194105-7ae579e6d213/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc=
-github.com/gobuffalo/packd v0.0.0-20181028162033-6d52e0eabf41/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI=
-github.com/gobuffalo/packd v0.0.0-20181029140631-cf76bd87a5a6/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI=
github.com/gobuffalo/packd v0.0.0-20181031195726-c82734870264/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI=
-github.com/gobuffalo/packd v0.0.0-20181103221656-16c4ed88b296/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI=
github.com/gobuffalo/packd v0.0.0-20181104210303-d376b15f8e96/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI=
github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI=
github.com/gobuffalo/packd v0.0.0-20181114190715-f25c5d2471d7/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI=
@@ -287,16 +199,9 @@ github.com/gobuffalo/packd v0.0.0-20181212173646-eca3b8fd6687/go.mod h1:LYc0TGKF
github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24=
github.com/gobuffalo/packr v1.15.0/go.mod h1:t5gXzEhIviQwVlNx/+3SfS07GS+cZ2hn76WLzPp6MGI=
github.com/gobuffalo/packr v1.15.1/go.mod h1:IeqicJ7jm8182yrVmNbM6PR4g79SjN9tZLH8KduZZwE=
-github.com/gobuffalo/packr v1.16.0/go.mod h1:Yx/lcR/7mDLXhuJSzsz2MauD/HUwSc+EK6oigMRGGsM=
github.com/gobuffalo/packr v1.19.0/go.mod h1:MstrNkfCQhd5o+Ct4IJ0skWlxN8emOq8DsoT1G98VIU=
github.com/gobuffalo/packr v1.20.0/go.mod h1:JDytk1t2gP+my1ig7iI4NcVaXr886+N0ecUga6884zw=
github.com/gobuffalo/packr v1.21.0/go.mod h1:H00jGfj1qFKxscFJSw8wcL4hpQtPe1PfU2wa6sg/SR0=
-github.com/gobuffalo/packr v1.21.5/go.mod h1:zCvDxrZzFmq5Xd7Jw4vaGe/OYwzuXnma31D2EbTHMWk=
-github.com/gobuffalo/packr v1.21.7/go.mod h1:73tmYjwi4Cvb1eNiAwpmrzZ0gxVA4KBqVSZ2FNeJodM=
-github.com/gobuffalo/packr v1.21.9 h1:zBaEhCmJpYy/UdHGAGIC3vO5Uh7RW091le41+Ydcg4E=
-github.com/gobuffalo/packr v1.21.9/go.mod h1:GC76q6nMzRtR+AEN/VV4w0z2/4q7SOaEmXh3Ooa8sOE=
-github.com/gobuffalo/packr/v2 v2.0.0-rc.5/go.mod h1:e6gmOfhf3KmT4zl2X/NDRSfBXk2oV4TXZ+NNOM0xwt8=
-github.com/gobuffalo/packr/v2 v2.0.0-rc.7/go.mod h1:BzhceHWfF3DMAkbPUONHYWs63uacCZxygFY1b4H9N2A=
github.com/gobuffalo/packr/v2 v2.0.0-rc.8/go.mod h1:y60QCdzwuMwO2R49fdQhsjCPv7tLQFR0ayzxxla9zes=
github.com/gobuffalo/packr/v2 v2.0.0-rc.9/go.mod h1:fQqADRfZpEsgkc7c/K7aMew3n4aF1Kji7+lIZeR98Fc=
github.com/gobuffalo/packr/v2 v2.0.0-rc.10/go.mod h1:4CWWn4I5T3v4c1OsJ55HbHlUEKNWMITG5iIkdr4Px4w=
@@ -314,7 +219,6 @@ github.com/gobuffalo/plush v3.7.23+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5s
github.com/gobuffalo/plush v3.7.30+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI=
github.com/gobuffalo/plush v3.7.31+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI=
github.com/gobuffalo/plush v3.7.32+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI=
-github.com/gobuffalo/plush v3.7.33+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI=
github.com/gobuffalo/plushgen v0.0.0-20181128164830-d29dcb966cb2/go.mod h1:r9QwptTFnuvSaSRjpSp4S2/4e2D3tJhARYbvEBcKSb4=
github.com/gobuffalo/plushgen v0.0.0-20181203163832-9fc4964505c2/go.mod h1:opEdT33AA2HdrIwK1aibqnTJDVVKXC02Bar/GT1YRVs=
github.com/gobuffalo/plushgen v0.0.0-20181207152837-eedb135bd51b/go.mod h1:Lcw7HQbEVm09sAQrCLzIxuhFbB3nAgp4c55E+UlynR0=
@@ -322,26 +226,14 @@ github.com/gobuffalo/plushgen v0.0.0-20190104222512-177cd2b872b3/go.mod h1:tYxCo
github.com/gobuffalo/pop v4.8.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg=
github.com/gobuffalo/pop v4.8.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg=
github.com/gobuffalo/pop v4.8.4+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg=
-github.com/gobuffalo/pop v4.8.5+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg=
-github.com/gobuffalo/pop v4.8.7+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg=
-github.com/gobuffalo/pop v4.8.8+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg=
-github.com/gobuffalo/pop v4.9.0+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg=
-github.com/gobuffalo/pop v4.9.1+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg=
-github.com/gobuffalo/pop v4.9.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg=
-github.com/gobuffalo/pop v4.9.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg=
-github.com/gobuffalo/pop v4.9.5+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg=
-github.com/gobuffalo/pop v4.9.6+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg=
github.com/gobuffalo/release v1.0.35/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4=
github.com/gobuffalo/release v1.0.38/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4=
github.com/gobuffalo/release v1.0.42/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug=
-github.com/gobuffalo/release v1.0.51/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug=
github.com/gobuffalo/release v1.0.52/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug=
github.com/gobuffalo/release v1.0.53/go.mod h1:FdF257nd8rqhNaqtDWFGhxdJ/Ig4J7VcS3KL7n/a+aA=
github.com/gobuffalo/release v1.0.54/go.mod h1:Pe5/RxRa/BE8whDpGfRqSI7D1a0evGK1T4JDm339tJc=
github.com/gobuffalo/release v1.0.61/go.mod h1:mfIO38ujUNVDlBziIYqXquYfBF+8FDHUjKZgYC1Hj24=
-github.com/gobuffalo/release v1.0.63/go.mod h1:/7hQAikt0l8Iu/tAX7slC1qiOhD6Nb+3KMmn/htiUfc=
github.com/gobuffalo/release v1.0.72/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU=
-github.com/gobuffalo/release v1.0.74/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU=
github.com/gobuffalo/release v1.1.1/go.mod h1:Sluak1Xd6kcp6snkluR1jeXAogdJZpFFRzTYRs/2uwg=
github.com/gobuffalo/release v1.1.3/go.mod h1:CuXc5/m+4zuq8idoDt1l4va0AXAn/OSs08uHOfMVr8E=
github.com/gobuffalo/release v1.1.6/go.mod h1:18naWa3kBsqO0cItXZNJuefCKOENpbbUIqRL1g+p6z0=
@@ -358,23 +250,11 @@ github.com/gobuffalo/uuid v2.0.5+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw
github.com/gobuffalo/validate v2.0.3+incompatible/go.mod h1:N+EtDe0J8252BgfzQUChBgfd6L93m9weay53EWFVsMM=
github.com/gobuffalo/x v0.0.0-20181003152136-452098b06085/go.mod h1:WevpGD+5YOreDJznWevcn8NTmQEW5STSBgIkpkjzqXc=
github.com/gobuffalo/x v0.0.0-20181007152206-913e47c59ca7/go.mod h1:9rDPXaB3kXdKWzMc4odGQQdG2e2DIEmANy5aSJ9yesY=
-github.com/gobuffalo/x v0.0.0-20181025165825-f204f550da9d/go.mod h1:Qh2Pb/Ak1Ko2mzHlGPigrnxkhO4WTTCI1jJM58sbgtE=
-github.com/gobuffalo/x v0.0.0-20181025192250-1ef645d63fe8/go.mod h1:AIlnMGlYXOCsoCntLPFLYtrJNS/pc2HD4IdSXH62TpU=
-github.com/gobuffalo/x v0.0.0-20181109195216-5b3131238124/go.mod h1:GpdLUY6/Ztf/3FfxfwsLkDqAGZ0brhlh7LzIibHyZp0=
-github.com/gobuffalo/x v0.0.0-20181110221217-14085ca3e1a9/go.mod h1:ig5vdn4+5IPtxgESlZWo1SSDyHKKef8EjVVKhY9kkIQ=
-github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
-github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gocql/gocql v0.0.0-20181124151448-70385f88b28b/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
-github.com/gocraft/dbr v0.0.0-20181029195440-042fe86dc2da h1:iBCx9/LR++diJWHizvo5tuFH7jeJ2+X5SSA0Fb/i8Kk=
-github.com/gocraft/dbr v0.0.0-20181029195440-042fe86dc2da/go.mod h1:K/9g3pPouf13kP5K7pdriQEJAy272R9yXuWuDIEWJTM=
-github.com/gocraft/dbr v0.0.0-20190131145710-48a049970bd2 h1:zPA5FYTrmWSPMYWc3xJOTMGiqqm8lrdawrQqnamsw6w=
-github.com/gocraft/dbr v0.0.0-20190131145710-48a049970bd2/go.mod h1:K/9g3pPouf13kP5K7pdriQEJAy272R9yXuWuDIEWJTM=
github.com/gofrs/uuid v3.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/golang-migrate/migrate v3.5.4+incompatible h1:R7OzwvCJTCgwapPCiX6DyBiu2czIUMDCB118gFTKTUA=
-github.com/golang-migrate/migrate v3.5.4+incompatible/go.mod h1:IsVUlFN5puWOmXrqjgGUfIRIbU7mr8oNBE2tyERd9Wk=
github.com/golang-migrate/migrate/v4 v4.2.2 h1:m9WF3B3yge1mKm5+/q6C3qPETMWqphrod3+osb+sP8A=
github.com/golang-migrate/migrate/v4 v4.2.2/go.mod h1:JRwdki93/aFawDXMUM4GcRu/FAIfyw+1Kuyd9vkbaeA=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -384,29 +264,6 @@ github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
-github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
-github.com/golangci/errcheck v0.0.0-20181003203344-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
-github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
-github.com/golangci/go-tools v0.0.0-20180902103155-93eecd106a0b/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM=
-github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o=
-github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
-github.com/golangci/gofmt v0.0.0-20181105071733-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
-github.com/golangci/golangci-lint v1.12.5/go.mod h1:iMfuFWFYJ1CZxlMQfNWvPj3c22PuyUkw9RQ1UfhDFDk=
-github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb/go.mod h1:ON/c2UR0VAAv6ZEAFKhjCLplESSmRFfZcDLASbI1GWo=
-github.com/golangci/govet v0.0.0-20180818181408-44ddbe260190/go.mod h1:pPwb+AK755h3/r73avHz5bEN6sa51/2HEZlLaV53hCo=
-github.com/golangci/ineffassign v0.0.0-20180808204949-2ee8f2867dde/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU=
-github.com/golangci/interfacer v0.0.0-20180902080945-01958817a6ec/go.mod h1:yBorupihJ5OYDFE7/EZwrslyNyZaaidqqVptYTcNxnk=
-github.com/golangci/lint v0.0.0-20170908181259-c2187e7932b5/go.mod h1:zs8jPuoOp76KrjiydDqO3CGeS4v9gq77HNNiYcxxTGw=
-github.com/golangci/lint v0.0.0-20180902080404-c2187e7932b5/go.mod h1:zs8jPuoOp76KrjiydDqO3CGeS4v9gq77HNNiYcxxTGw=
-github.com/golangci/lint-1 v0.0.0-20180610141402-4bf9709227d1/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
-github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
-github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
-github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
-github.com/golangci/tools v0.0.0-20180902102414-2cefd77fef9b/go.mod h1:zgj6NOYXOC1cexsdtDceI4/mj3aXK4JOVg9AV3C5LWI=
-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
-github.com/golangci/unparam v0.0.0-20180902112548-7ad9dbcccc16/go.mod h1:KW2L33j82vo0S0U6RP6uUQSuat+0Q457Yf+1mXC98/M=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
@@ -417,7 +274,6 @@ github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.1.2/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w=
@@ -427,18 +283,14 @@ github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA
github.com/h2non/filetype v1.0.6 h1:g84/+gdkAT1hnYO+tHpCLoikm13Ju55OkN4KCb1uGEQ=
github.com/h2non/filetype v1.0.6/go.mod h1:isekKqOuhMj+s/7r3rIeTErIRy4Rub5uBWHfvMusLMU=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
-github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
-github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU=
@@ -464,21 +316,13 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kshvakov/clickhouse v1.3.4/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE=
-github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
-github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
-github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
-github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
-github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/markbates/deplist v1.0.4/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM=
github.com/markbates/deplist v1.0.5/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM=
github.com/markbates/going v1.0.2/go.mod h1:UWCk3zm0UKefHZ7l8BNqi26UyiEMniznk8naLdTcy6c=
-github.com/markbates/going v1.0.3/go.mod h1:fQiT6v6yQar9UD6bd/D4Z5Afbk9J6BBVBtLiyY4gp2o=
github.com/markbates/grift v1.0.4/go.mod h1:wbmtW74veyx+cgfwFhlnnMWqhoz55rnHR47oMXzsyVs=
-github.com/markbates/grift v1.0.5/go.mod h1:EHmVIjOQoj/OOBDzlZ8RW0ZkvOtQ4xRHjrPvmfoiFaU=
github.com/markbates/hmax v1.0.0/go.mod h1:cOkR9dktiESxIMu+65oc/r/bdY4bE8zZw3OLhLx0X2c=
github.com/markbates/inflect v1.0.0/go.mod h1:oTeZL2KHA7CUX6X+fovmK9OvIOFuqu0TwdQrZjLTh88=
github.com/markbates/inflect v1.0.1/go.mod h1:uv3UVNBe5qBIfCm8O8Q+DW+S1EopeyINj+Ikhc7rnCk=
@@ -490,33 +334,24 @@ github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4/go.mod h1:Ld9puTsI
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2 h1:JgVTCPf0uBVcUSWpyXmGpgOc62nK5HWUBKAGc3Qqa5k=
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
github.com/markbates/refresh v1.4.10/go.mod h1:NDPHvotuZmTmesXxr95C9bjlw1/0frJwtME2dzcVKhc=
-github.com/markbates/refresh v1.4.11/go.mod h1:awpJuyo4zgexB/JaHfmBX0sRdvOjo2dXwIayWIz9i3g=
-github.com/markbates/refresh v1.5.0/go.mod h1:ZYMLkxV+x7wXQ2Xd7bXAPyF0EXiEWAMfiy/4URYb1+M=
github.com/markbates/safe v1.0.0/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc=
github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
-github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
-github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mongodb/mongo-go-driver v0.1.0/go.mod h1:NK/HWDIIZkaYsnYa0hmtP443T5ELr0KDecmIioVuuyU=
github.com/monoculum/formam v0.0.0-20180901015400-4e68be1d79ba/go.mod h1:RKgILGEJq24YyJ2ban8EO0RUVSJlF1pGsEvoLEACr/Q=
-github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -528,7 +363,6 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
-github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
@@ -545,29 +379,19 @@ github.com/rogpeppe/go-internal v1.1.0 h1:g0fH8RicVgNl+zVZDCDfbdWxAWoAEJyI7I3TZY
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI=
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
-github.com/rubenv/sql-migrate v0.0.0-20181213081019-5a8808c14925 h1:Kd1g/YuXjhiyHrGlppC2X3UTOEt9oHRU/yeHDKnyPZA=
-github.com/rubenv/sql-migrate v0.0.0-20181213081019-5a8808c14925/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
-github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
-github.com/shurcooL/go v0.0.0-20190121191506-3fef8c783dec/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
-github.com/shurcooL/highlight_diff v0.0.0-20181222201841-111da2e7d480/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
github.com/shurcooL/highlight_go v0.0.0-20170515013102-78fb10f4a5f8/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
-github.com/shurcooL/highlight_go v0.0.0-20181215221002-9d8641ddf2e1/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/octicon v0.0.0-20180602230221-c42b0e3b24d9/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
-github.com/shurcooL/octicon v0.0.0-20181222203144-9ff1a4cf27f4/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/vfsgen v0.0.0-20180121065927-ffb13db8def0/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
-github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.1.0/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A=
github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A=
@@ -578,24 +402,17 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
-github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/spf13/viper v1.2.1/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI=
-github.com/spf13/viper v1.3.0/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -604,13 +421,9 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ugorji/go/codec v0.0.0-20190128213124-ee1426cffec0/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA=
github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA=
github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA=
-github.com/unrolled/secure v0.0.0-20181022170031-4b6b7cf51606/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA=
-github.com/unrolled/secure v0.0.0-20190103195806-76e6d4e9b90c/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA=
github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/vektah/dataloaden v0.2.0/go.mod h1:vxM6NuRlgiR0M6wbVTJeKp9vQIs81ZMfCYO+4yq/jbE=
@@ -618,10 +431,8 @@ github.com/vektah/gqlparser v1.1.0 h1:3668p2gUlO+PiS81x957Rpr3/FPRWG6cxgCXAvTS1h
github.com/vektah/gqlparser v1.1.0/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
-github.com/xinsnake/go-http-digest-auth-client v0.4.0/go.mod h1:QK1t1v7ylyGb363vGWu+6Irh7gyFj+N7+UZzM0L6g8I=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI=
-golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -630,7 +441,6 @@ golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20181024171144-74cb1d3d52f4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -638,9 +448,6 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190102171810-8d7daa0c54b3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc h1:F5tKCVGp+MUAHhKp5MZtGqAlGX3+oCsiL1Q629FL90M=
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190122013713-64072686203f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190130090550-b01c7a725664 h1:YbZJ76lQ1BqNhVe7dKTSB67wDrc2VPRR75IyGyyPDX8=
-golang.org/x/crypto v0.0.0-20190130090550-b01c7a725664/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190118043309-183bebdce1b2 h1:FNSSV4jv1PrPsiM2iKGpqLPPgYACqh9Muav7Pollk1k=
golang.org/x/image v0.0.0-20190118043309-183bebdce1b2/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
@@ -659,19 +466,13 @@ golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181017193950-04a2e542c03f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181213202711-891ebc4b82d6/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190119204137-ed066c81e75e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -688,32 +489,22 @@ golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180927150500-dad3d9fb7b6e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181005133103-4497e2df6f9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181011152604-fa43e7bc11ba/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181019084534-8f1d3d21f81b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181022134430-8a28ead16f52/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181024145615-5cd93ef61a7c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181025063200-d989b31c8746/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026064943-731415f00dce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181030150119-7e31e0c00fa0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181106135930-3a76605856fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181128092732-4ed8d59d0b35/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181213150753-586ba8c9bb14/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190108104531-7fbe1cd0fcc2/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190116161447-11f53e031339 h1:g/Jesu8+QLnA0CPzF3E1pURg0Byr7i6jLoX5sqjcAh0=
golang.org/x/sys v0.0.0-20190116161447-11f53e031339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190130150945-aca44879d564 h1:o6ENHFwwr1TZ9CUPQcfo1HGvLP1OPsPOTB7xCIOPNmU=
-golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180826000951-f6ba57429505/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180924175601-e93be7f42f9f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -722,49 +513,32 @@ golang.org/x/tools v0.0.0-20181006002542-f60d9635b16a/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181013182035-5e66757b835f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181017214349-06f26fdaaa28/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181019005945-6adeb8aab2de/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181024171208-a2dc47679d30/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181026183834-f60e5f99f081/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030151751-bb28844c46df/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181102223251-96e9e165b75e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181105230042-78dc5bac0cac/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181107215632-34b416bd17b3/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181109152631-138c20b93253/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181109202920-92d8274bd7b8/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181111003725-6d71ab8aade0/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181114190951-94339b83286c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181119130350-139d099f6620/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181120060634-fc4f04983f62/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181122213734-04b5d21e00f1/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181127195227-b4e97c0ed882/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181127232545-e782529d0ddd/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181201035826-d0ca3933b724/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181203210056-e5f3ab76ea4b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181205224935-3576414c54a4/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181206194817-bcd4e47d0288/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181207183836-8bc39b988060/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181212172921-837e80568c09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181213190329-bbccd8cae4a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181220024903-92cdcd90bf52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190102213336-ca9055ed7d04/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190104182027-498d95493402/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190108222858-421f03a57a64/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190111214448-fc1d57b08d7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190118193359-16909d206f00 h1:6OmoTtlNJlHuWNIjTEyUtMBHrryp8NRuf/XtnC7MmXM=
golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190124004107-78ee07aa9465/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6 h1:iZgcI2DDp6zW5v9Z/5+f0NuqoxNdmzg4hivjk2WLXpY=
golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190130190128-9bdeaddf5f7f h1:phwpKT9f+doEU8H+Khk7QtSFIIzNwRqtcj7hzVhTP1Y=
-golang.org/x/tools v0.0.0-20190130190128-9bdeaddf5f7f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20180921000521-920bb1beccf7/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181015145326-625cd1887957/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
@@ -784,17 +558,9 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw=
-gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw=
-gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.39.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
-gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
-gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
-gopkg.in/jcmturner/gokrb5.v6 v6.0.1/go.mod h1:NFjHNLrHQiruory+EmqDXCGv6CrjkeYeA+bR9mIfNFk=
-gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
-gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
@@ -804,7 +570,3 @@ honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
sourcegraph.com/sourcegraph/appdash v0.0.0-20180110180208-2cc67fd64755/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k=
-sourcegraph.com/sourcegraph/go-diff v0.0.0-20171119081133-3f415a150aec/go.mod h1:R09mWeb9JcPbO+A3cYDc11xjz0wp6r9+KnqdqROAoRU=
-sourcegraph.com/sqs/pbtypes v0.0.0-20160107090929-4d1b9dc7ffc3/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
-upper.io/db.v3 v3.5.7+incompatible h1:3MJSnJQ+NMxBxuNwO+gOKFiugwv+f61LbyuZYSPzoi4=
-upper.io/db.v3 v3.5.7+incompatible/go.mod h1:FgTdD24eBjJAbPKsQSiHUNgXjOR4Lub3u1UMHSIh82Y=
diff --git a/internal/api/api-packr.go b/internal/api/api-packr.go
index c5fb4b9ce..b98ecf78e 100644
--- a/internal/api/api-packr.go
+++ b/internal/api/api-packr.go
@@ -1,3 +1,4 @@
+// +build !skippackr
// Code generated by github.com/gobuffalo/packr/v2. DO NOT EDIT.
// You can use the "packr clean" command to clean up this,
diff --git a/internal/database/database-packr.go b/internal/database/database-packr.go
index d6f8ff90b..624f4de8f 100644
--- a/internal/database/database-packr.go
+++ b/internal/database/database-packr.go
@@ -1,3 +1,4 @@
+// +build !skippackr
// Code generated by github.com/gobuffalo/packr/v2. DO NOT EDIT.
// You can use the "packr clean" command to clean up this,
diff --git a/packrd/packed-packr.go b/packrd/packed-packr.go
index 24848ddf5..b9124ef03 100644
--- a/packrd/packed-packr.go
+++ b/packrd/packed-packr.go
@@ -1,3 +1,4 @@
+// +build !skippackr
// Code generated by github.com/gobuffalo/packr/v2. DO NOT EDIT.
// You can use the "packr2 clean" command to clean up this,
diff --git a/vendor/github.com/99designs/gqlgen/LICENSE b/vendor/github.com/99designs/gqlgen/LICENSE
new file mode 100644
index 000000000..18e1b2493
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2018 Adam Scarr
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/99designs/gqlgen/complexity/complexity.go b/vendor/github.com/99designs/gqlgen/complexity/complexity.go
new file mode 100644
index 000000000..d5b46bf45
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/complexity/complexity.go
@@ -0,0 +1,104 @@
+package complexity
+
+import (
+ "github.com/99designs/gqlgen/graphql"
+ "github.com/vektah/gqlparser/ast"
+)
+
+func Calculate(es graphql.ExecutableSchema, op *ast.OperationDefinition, vars map[string]interface{}) int {
+ walker := complexityWalker{
+ es: es,
+ schema: es.Schema(),
+ vars: vars,
+ }
+ return walker.selectionSetComplexity(op.SelectionSet)
+}
+
+type complexityWalker struct {
+ es graphql.ExecutableSchema
+ schema *ast.Schema
+ vars map[string]interface{}
+}
+
+func (cw complexityWalker) selectionSetComplexity(selectionSet ast.SelectionSet) int {
+ var complexity int
+ for _, selection := range selectionSet {
+ switch s := selection.(type) {
+ case *ast.Field:
+ fieldDefinition := cw.schema.Types[s.Definition.Type.Name()]
+ var childComplexity int
+ switch fieldDefinition.Kind {
+ case ast.Object, ast.Interface, ast.Union:
+ childComplexity = cw.selectionSetComplexity(s.SelectionSet)
+ }
+
+ args := s.ArgumentMap(cw.vars)
+ var fieldComplexity int
+ if s.ObjectDefinition.Kind == ast.Interface {
+ fieldComplexity = cw.interfaceFieldComplexity(s.ObjectDefinition, s.Name, childComplexity, args)
+ } else {
+ fieldComplexity = cw.fieldComplexity(s.ObjectDefinition.Name, s.Name, childComplexity, args)
+ }
+ complexity = safeAdd(complexity, fieldComplexity)
+
+ case *ast.FragmentSpread:
+ complexity = safeAdd(complexity, cw.selectionSetComplexity(s.Definition.SelectionSet))
+
+ case *ast.InlineFragment:
+ complexity = safeAdd(complexity, cw.selectionSetComplexity(s.SelectionSet))
+ }
+ }
+ return complexity
+}
+
+func (cw complexityWalker) interfaceFieldComplexity(def *ast.Definition, field string, childComplexity int, args map[string]interface{}) int {
+ // Interfaces don't have their own separate field costs, so they have to assume the worst case.
+ // We iterate over all implementors and choose the most expensive one.
+ maxComplexity := 0
+ implementors := cw.schema.GetPossibleTypes(def)
+ for _, t := range implementors {
+ fieldComplexity := cw.fieldComplexity(t.Name, field, childComplexity, args)
+ if fieldComplexity > maxComplexity {
+ maxComplexity = fieldComplexity
+ }
+ }
+ return maxComplexity
+}
+
+func (cw complexityWalker) fieldComplexity(object, field string, childComplexity int, args map[string]interface{}) int {
+ if customComplexity, ok := cw.es.Complexity(object, field, childComplexity, args); ok && customComplexity >= childComplexity {
+ return customComplexity
+ }
+ // default complexity calculation
+ return safeAdd(1, childComplexity)
+}
+
+const maxInt = int(^uint(0) >> 1)
+
+// safeAdd is a saturating add of a and b that ignores negative operands.
+// If a + b would overflow through normal Go addition,
+// it returns the maximum integer value instead.
+//
+// Adding complexities with this function prevents attackers from intentionally
+// overflowing the complexity calculation to allow overly-complex queries.
+//
+// It also helps mitigate the impact of custom complexities that accidentally
+// return negative values.
+func safeAdd(a, b int) int {
+ // Ignore negative operands.
+ if a < 0 {
+ if b < 0 {
+ return 1
+ }
+ return b
+ } else if b < 0 {
+ return a
+ }
+
+ c := a + b
+ if c < a {
+ // Set c to maximum integer instead of overflowing.
+ c = maxInt
+ }
+ return c
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/bool.go b/vendor/github.com/99designs/gqlgen/graphql/bool.go
new file mode 100644
index 000000000..b175ca986
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/bool.go
@@ -0,0 +1,30 @@
+package graphql
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+func MarshalBoolean(b bool) Marshaler {
+ return WriterFunc(func(w io.Writer) {
+ if b {
+ w.Write(trueLit)
+ } else {
+ w.Write(falseLit)
+ }
+ })
+}
+
+func UnmarshalBoolean(v interface{}) (bool, error) {
+ switch v := v.(type) {
+ case string:
+ return strings.ToLower(v) == "true", nil
+ case int:
+ return v != 0, nil
+ case bool:
+ return v, nil
+ default:
+ return false, fmt.Errorf("%T is not a bool", v)
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/context.go b/vendor/github.com/99designs/gqlgen/graphql/context.go
new file mode 100644
index 000000000..39393cb27
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/context.go
@@ -0,0 +1,253 @@
+package graphql
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "github.com/vektah/gqlparser/ast"
+ "github.com/vektah/gqlparser/gqlerror"
+)
+
+type Resolver func(ctx context.Context) (res interface{}, err error)
+type FieldMiddleware func(ctx context.Context, next Resolver) (res interface{}, err error)
+type RequestMiddleware func(ctx context.Context, next func(ctx context.Context) []byte) []byte
+
+type RequestContext struct {
+ RawQuery string
+ Variables map[string]interface{}
+ Doc *ast.QueryDocument
+
+ ComplexityLimit int
+ OperationComplexity int
+ DisableIntrospection bool
+
+ // ErrorPresenter will be used to generate the error
+ // message from errors given to Error().
+ ErrorPresenter ErrorPresenterFunc
+ Recover RecoverFunc
+ ResolverMiddleware FieldMiddleware
+ DirectiveMiddleware FieldMiddleware
+ RequestMiddleware RequestMiddleware
+ Tracer Tracer
+
+ errorsMu sync.Mutex
+ Errors gqlerror.List
+ extensionsMu sync.Mutex
+ Extensions map[string]interface{}
+}
+
+func DefaultResolverMiddleware(ctx context.Context, next Resolver) (res interface{}, err error) {
+ return next(ctx)
+}
+
+func DefaultDirectiveMiddleware(ctx context.Context, next Resolver) (res interface{}, err error) {
+ return next(ctx)
+}
+
+func DefaultRequestMiddleware(ctx context.Context, next func(ctx context.Context) []byte) []byte {
+ return next(ctx)
+}
+
+func NewRequestContext(doc *ast.QueryDocument, query string, variables map[string]interface{}) *RequestContext {
+ return &RequestContext{
+ Doc: doc,
+ RawQuery: query,
+ Variables: variables,
+ ResolverMiddleware: DefaultResolverMiddleware,
+ DirectiveMiddleware: DefaultDirectiveMiddleware,
+ RequestMiddleware: DefaultRequestMiddleware,
+ Recover: DefaultRecover,
+ ErrorPresenter: DefaultErrorPresenter,
+ Tracer: &NopTracer{},
+ }
+}
+
+type key string
+
+const (
+ request key = "request_context"
+ resolver key = "resolver_context"
+)
+
+func GetRequestContext(ctx context.Context) *RequestContext {
+ val := ctx.Value(request)
+ if val == nil {
+ return nil
+ }
+
+ return val.(*RequestContext)
+}
+
+func WithRequestContext(ctx context.Context, rc *RequestContext) context.Context {
+ return context.WithValue(ctx, request, rc)
+}
+
+type ResolverContext struct {
+ Parent *ResolverContext
+ // The name of the type this field belongs to
+ Object string
+ // These are the args after processing, they can be mutated in middleware to change what the resolver will get.
+ Args map[string]interface{}
+ // The raw field
+ Field CollectedField
+ // The index of array in path.
+ Index *int
+ // The result object of resolver
+ Result interface{}
+}
+
+func (r *ResolverContext) Path() []interface{} {
+ var path []interface{}
+ for it := r; it != nil; it = it.Parent {
+ if it.Index != nil {
+ path = append(path, *it.Index)
+ } else if it.Field.Field != nil {
+ path = append(path, it.Field.Alias)
+ }
+ }
+
+ // because we are walking up the chain, all the elements are backwards, do an inplace flip.
+ for i := len(path)/2 - 1; i >= 0; i-- {
+ opp := len(path) - 1 - i
+ path[i], path[opp] = path[opp], path[i]
+ }
+
+ return path
+}
+
+func GetResolverContext(ctx context.Context) *ResolverContext {
+ val, _ := ctx.Value(resolver).(*ResolverContext)
+ return val
+}
+
+func WithResolverContext(ctx context.Context, rc *ResolverContext) context.Context {
+ rc.Parent = GetResolverContext(ctx)
+ return context.WithValue(ctx, resolver, rc)
+}
+
+// This is just a convenient wrapper method for CollectFields
+func CollectFieldsCtx(ctx context.Context, satisfies []string) []CollectedField {
+ resctx := GetResolverContext(ctx)
+ return CollectFields(ctx, resctx.Field.Selections, satisfies)
+}
+
+// Errorf sends an error string to the client, passing it through the formatter.
+func (c *RequestContext) Errorf(ctx context.Context, format string, args ...interface{}) {
+ c.errorsMu.Lock()
+ defer c.errorsMu.Unlock()
+
+ c.Errors = append(c.Errors, c.ErrorPresenter(ctx, fmt.Errorf(format, args...)))
+}
+
+// Error sends an error to the client, passing it through the formatter.
+func (c *RequestContext) Error(ctx context.Context, err error) {
+ c.errorsMu.Lock()
+ defer c.errorsMu.Unlock()
+
+ c.Errors = append(c.Errors, c.ErrorPresenter(ctx, err))
+}
+
+// HasError returns true if the current field has already errored
+func (c *RequestContext) HasError(rctx *ResolverContext) bool {
+ c.errorsMu.Lock()
+ defer c.errorsMu.Unlock()
+ path := rctx.Path()
+
+ for _, err := range c.Errors {
+ if equalPath(err.Path, path) {
+ return true
+ }
+ }
+ return false
+}
+
+// GetErrors returns a list of errors that occurred in the current field
+func (c *RequestContext) GetErrors(rctx *ResolverContext) gqlerror.List {
+ c.errorsMu.Lock()
+ defer c.errorsMu.Unlock()
+ path := rctx.Path()
+
+ var errs gqlerror.List
+ for _, err := range c.Errors {
+ if equalPath(err.Path, path) {
+ errs = append(errs, err)
+ }
+ }
+ return errs
+}
+
+func equalPath(a []interface{}, b []interface{}) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// AddError is a convenience method for adding an error to the current response
+func AddError(ctx context.Context, err error) {
+ GetRequestContext(ctx).Error(ctx, err)
+}
+
+// AddErrorf is a convenience method for adding an error to the current response
+func AddErrorf(ctx context.Context, format string, args ...interface{}) {
+ GetRequestContext(ctx).Errorf(ctx, format, args...)
+}
+
+// RegisterExtension registers an extension, returns error if extension has already been registered
+func (c *RequestContext) RegisterExtension(key string, value interface{}) error {
+ c.extensionsMu.Lock()
+ defer c.extensionsMu.Unlock()
+
+ if c.Extensions == nil {
+ c.Extensions = make(map[string]interface{})
+ }
+
+ if _, ok := c.Extensions[key]; ok {
+ return fmt.Errorf("extension already registered for key %s", key)
+ }
+
+ c.Extensions[key] = value
+ return nil
+}
+
+// ChainFieldMiddleware add chain by FieldMiddleware
+func ChainFieldMiddleware(handleFunc ...FieldMiddleware) FieldMiddleware {
+ n := len(handleFunc)
+
+ if n > 1 {
+ lastI := n - 1
+ return func(ctx context.Context, next Resolver) (interface{}, error) {
+ var (
+ chainHandler Resolver
+ curI int
+ )
+ chainHandler = func(currentCtx context.Context) (interface{}, error) {
+ if curI == lastI {
+ return next(currentCtx)
+ }
+ curI++
+ res, err := handleFunc[curI](currentCtx, chainHandler)
+ curI--
+ return res, err
+
+ }
+ return handleFunc[0](ctx, chainHandler)
+ }
+ }
+
+ if n == 1 {
+ return handleFunc[0]
+ }
+
+ return func(ctx context.Context, next Resolver) (interface{}, error) {
+ return next(ctx)
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/error.go b/vendor/github.com/99designs/gqlgen/graphql/error.go
new file mode 100644
index 000000000..7f161a430
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/error.go
@@ -0,0 +1,31 @@
+package graphql
+
+import (
+ "context"
+
+ "github.com/vektah/gqlparser/gqlerror"
+)
+
+type ErrorPresenterFunc func(context.Context, error) *gqlerror.Error
+
+type ExtendedError interface {
+ Extensions() map[string]interface{}
+}
+
+func DefaultErrorPresenter(ctx context.Context, err error) *gqlerror.Error {
+ if gqlerr, ok := err.(*gqlerror.Error); ok {
+ gqlerr.Path = GetResolverContext(ctx).Path()
+ return gqlerr
+ }
+
+ var extensions map[string]interface{}
+ if ee, ok := err.(ExtendedError); ok {
+ extensions = ee.Extensions()
+ }
+
+ return &gqlerror.Error{
+ Message: err.Error(),
+ Path: GetResolverContext(ctx).Path(),
+ Extensions: extensions,
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/exec.go b/vendor/github.com/99designs/gqlgen/graphql/exec.go
new file mode 100644
index 000000000..9beb31490
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/exec.go
@@ -0,0 +1,135 @@
+package graphql
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/vektah/gqlparser/ast"
+)
+
+type ExecutableSchema interface {
+ Schema() *ast.Schema
+
+ Complexity(typeName, fieldName string, childComplexity int, args map[string]interface{}) (int, bool)
+ Query(ctx context.Context, op *ast.OperationDefinition) *Response
+ Mutation(ctx context.Context, op *ast.OperationDefinition) *Response
+ Subscription(ctx context.Context, op *ast.OperationDefinition) func() *Response
+}
+
+func CollectFields(ctx context.Context, selSet ast.SelectionSet, satisfies []string) []CollectedField {
+ return collectFields(GetRequestContext(ctx), selSet, satisfies, map[string]bool{})
+}
+
+func collectFields(reqCtx *RequestContext, selSet ast.SelectionSet, satisfies []string, visited map[string]bool) []CollectedField {
+ var groupedFields []CollectedField
+
+ for _, sel := range selSet {
+ switch sel := sel.(type) {
+ case *ast.Field:
+ if !shouldIncludeNode(sel.Directives, reqCtx.Variables) {
+ continue
+ }
+ f := getOrCreateField(&groupedFields, sel.Alias, func() CollectedField {
+ return CollectedField{Field: sel}
+ })
+
+ f.Selections = append(f.Selections, sel.SelectionSet...)
+ case *ast.InlineFragment:
+ if !shouldIncludeNode(sel.Directives, reqCtx.Variables) || !instanceOf(sel.TypeCondition, satisfies) {
+ continue
+ }
+ for _, childField := range collectFields(reqCtx, sel.SelectionSet, satisfies, visited) {
+ f := getOrCreateField(&groupedFields, childField.Name, func() CollectedField { return childField })
+ f.Selections = append(f.Selections, childField.Selections...)
+ }
+
+ case *ast.FragmentSpread:
+ if !shouldIncludeNode(sel.Directives, reqCtx.Variables) {
+ continue
+ }
+ fragmentName := sel.Name
+ if _, seen := visited[fragmentName]; seen {
+ continue
+ }
+ visited[fragmentName] = true
+
+ fragment := reqCtx.Doc.Fragments.ForName(fragmentName)
+ if fragment == nil {
+ // should never happen, validator has already run
+ panic(fmt.Errorf("missing fragment %s", fragmentName))
+ }
+
+ if !instanceOf(fragment.TypeCondition, satisfies) {
+ continue
+ }
+
+ for _, childField := range collectFields(reqCtx, fragment.SelectionSet, satisfies, visited) {
+ f := getOrCreateField(&groupedFields, childField.Name, func() CollectedField { return childField })
+ f.Selections = append(f.Selections, childField.Selections...)
+ }
+
+ default:
+ panic(fmt.Errorf("unsupported %T", sel))
+ }
+ }
+
+ return groupedFields
+}
+
+type CollectedField struct {
+ *ast.Field
+
+ Selections ast.SelectionSet
+}
+
+func instanceOf(val string, satisfies []string) bool {
+ for _, s := range satisfies {
+ if val == s {
+ return true
+ }
+ }
+ return false
+}
+
+func getOrCreateField(c *[]CollectedField, name string, creator func() CollectedField) *CollectedField {
+ for i, cf := range *c {
+ if cf.Alias == name {
+ return &(*c)[i]
+ }
+ }
+
+ f := creator()
+
+ *c = append(*c, f)
+ return &(*c)[len(*c)-1]
+}
+
+func shouldIncludeNode(directives ast.DirectiveList, variables map[string]interface{}) bool {
+ skip, include := false, true
+
+ if d := directives.ForName("skip"); d != nil {
+ skip = resolveIfArgument(d, variables)
+ }
+
+ if d := directives.ForName("include"); d != nil {
+ include = resolveIfArgument(d, variables)
+ }
+
+ return !skip && include
+}
+
+func resolveIfArgument(d *ast.Directive, variables map[string]interface{}) bool {
+ arg := d.Arguments.ForName("if")
+ if arg == nil {
+ panic(fmt.Sprintf("%s: argument 'if' not defined", d.Name))
+ }
+ value, err := arg.Value.Value(variables)
+ if err != nil {
+ panic(err)
+ }
+ ret, ok := value.(bool)
+ if !ok {
+ panic(fmt.Sprintf("%s: argument 'if' is not a boolean", d.Name))
+ }
+ return ret
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/fieldset.go b/vendor/github.com/99designs/gqlgen/graphql/fieldset.go
new file mode 100644
index 000000000..351e266fd
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/fieldset.go
@@ -0,0 +1,63 @@
+package graphql
+
+import (
+ "io"
+ "sync"
+)
+
+type FieldSet struct {
+ fields []CollectedField
+ Values []Marshaler
+ delayed []delayedResult
+}
+
+type delayedResult struct {
+ i int
+ f func() Marshaler
+}
+
+func NewFieldSet(fields []CollectedField) *FieldSet {
+ return &FieldSet{
+ fields: fields,
+ Values: make([]Marshaler, len(fields)),
+ }
+}
+
+func (m *FieldSet) Concurrently(i int, f func() Marshaler) {
+ m.delayed = append(m.delayed, delayedResult{i: i, f: f})
+}
+
+func (m *FieldSet) Dispatch() {
+ if len(m.delayed) == 1 {
+ // only one concurrent task, no need to spawn a goroutine or deal create waitgroups
+ d := m.delayed[0]
+ m.Values[d.i] = d.f()
+ } else if len(m.delayed) > 1 {
+ // more than one concurrent task, use the main goroutine to do one, only spawn goroutines for the others
+
+ var wg sync.WaitGroup
+ for _, d := range m.delayed[1:] {
+ wg.Add(1)
+ go func(d delayedResult) {
+ m.Values[d.i] = d.f()
+ wg.Done()
+ }(d)
+ }
+
+ m.Values[m.delayed[0].i] = m.delayed[0].f()
+ wg.Wait()
+ }
+}
+
+func (m *FieldSet) MarshalGQL(writer io.Writer) {
+ writer.Write(openBrace)
+ for i, field := range m.fields {
+ if i != 0 {
+ writer.Write(comma)
+ }
+ writeQuotedString(writer, field.Alias)
+ writer.Write(colon)
+ m.Values[i].MarshalGQL(writer)
+ }
+ writer.Write(closeBrace)
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/float.go b/vendor/github.com/99designs/gqlgen/graphql/float.go
new file mode 100644
index 000000000..fabbad046
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/float.go
@@ -0,0 +1,31 @@
+package graphql
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+)
+
+func MarshalFloat(f float64) Marshaler {
+ return WriterFunc(func(w io.Writer) {
+ io.WriteString(w, fmt.Sprintf("%g", f))
+ })
+}
+
+func UnmarshalFloat(v interface{}) (float64, error) {
+ switch v := v.(type) {
+ case string:
+ return strconv.ParseFloat(v, 64)
+ case int:
+ return float64(v), nil
+ case int64:
+ return float64(v), nil
+ case float64:
+ return v, nil
+ case json.Number:
+ return strconv.ParseFloat(string(v), 64)
+ default:
+ return 0, fmt.Errorf("%T is not an float", v)
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/id.go b/vendor/github.com/99designs/gqlgen/graphql/id.go
new file mode 100644
index 000000000..a5a7960f3
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/id.go
@@ -0,0 +1,36 @@
+package graphql
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+)
+
+func MarshalID(s string) Marshaler {
+ return WriterFunc(func(w io.Writer) {
+ io.WriteString(w, strconv.Quote(s))
+ })
+}
+func UnmarshalID(v interface{}) (string, error) {
+ switch v := v.(type) {
+ case string:
+ return v, nil
+ case json.Number:
+ return string(v), nil
+ case int:
+ return strconv.Itoa(v), nil
+ case float64:
+ return fmt.Sprintf("%f", v), nil
+ case bool:
+ if v {
+ return "true", nil
+ } else {
+ return "false", nil
+ }
+ case nil:
+ return "null", nil
+ default:
+ return "", fmt.Errorf("%T is not a string", v)
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/int.go b/vendor/github.com/99designs/gqlgen/graphql/int.go
new file mode 100644
index 000000000..ff87574ca
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/int.go
@@ -0,0 +1,29 @@
+package graphql
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+)
+
+func MarshalInt(i int) Marshaler {
+ return WriterFunc(func(w io.Writer) {
+ io.WriteString(w, strconv.Itoa(i))
+ })
+}
+
+func UnmarshalInt(v interface{}) (int, error) {
+ switch v := v.(type) {
+ case string:
+ return strconv.Atoi(v)
+ case int:
+ return v, nil
+ case int64:
+ return int(v), nil
+ case json.Number:
+ return strconv.Atoi(string(v))
+ default:
+ return 0, fmt.Errorf("%T is not an int", v)
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/introspection/introspection.go b/vendor/github.com/99designs/gqlgen/graphql/introspection/introspection.go
new file mode 100644
index 000000000..ca0b065f8
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/introspection/introspection.go
@@ -0,0 +1,72 @@
+// introspection implements the spec defined in https://github.com/facebook/graphql/blob/master/spec/Section%204%20--%20Introspection.md#schema-introspection
+package introspection
+
+import "github.com/vektah/gqlparser/ast"
+
+type (
+ Directive struct {
+ Name string
+ Description string
+ Locations []string
+ Args []InputValue
+ }
+
+ EnumValue struct {
+ Name string
+ Description string
+ deprecation *ast.Directive
+ }
+
+ Field struct {
+ Name string
+ Description string
+ Type *Type
+ Args []InputValue
+ deprecation *ast.Directive
+ }
+
+ InputValue struct {
+ Name string
+ Description string
+ DefaultValue *string
+ Type *Type
+ }
+)
+
+func WrapSchema(schema *ast.Schema) *Schema {
+ return &Schema{schema: schema}
+}
+
+func (f *EnumValue) IsDeprecated() bool {
+ return f.deprecation != nil
+}
+
+func (f *EnumValue) DeprecationReason() *string {
+ if f.deprecation == nil {
+ return nil
+ }
+
+ reason := f.deprecation.Arguments.ForName("reason")
+ if reason == nil {
+ return nil
+ }
+
+ return &reason.Value.Raw
+}
+
+func (f *Field) IsDeprecated() bool {
+ return f.deprecation != nil
+}
+
+func (f *Field) DeprecationReason() *string {
+ if f.deprecation == nil {
+ return nil
+ }
+
+ reason := f.deprecation.Arguments.ForName("reason")
+ if reason == nil {
+ return nil
+ }
+
+ return &reason.Value.Raw
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/introspection/query.go b/vendor/github.com/99designs/gqlgen/graphql/introspection/query.go
new file mode 100644
index 000000000..b1e4fbc6e
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/introspection/query.go
@@ -0,0 +1,104 @@
+package introspection
+
+// Query is the query generated by graphiql to determine type information
+const Query = `
+query IntrospectionQuery {
+ __schema {
+ queryType {
+ name
+ }
+ mutationType {
+ name
+ }
+ subscriptionType {
+ name
+ }
+ types {
+ ...FullType
+ }
+ directives {
+ name
+ description
+ locations
+ args {
+ ...InputValue
+ }
+ }
+ }
+}
+
+fragment FullType on __Type {
+ kind
+ name
+ description
+ fields(includeDeprecated: true) {
+ name
+ description
+ args {
+ ...InputValue
+ }
+ type {
+ ...TypeRef
+ }
+ isDeprecated
+ deprecationReason
+ }
+ inputFields {
+ ...InputValue
+ }
+ interfaces {
+ ...TypeRef
+ }
+ enumValues(includeDeprecated: true) {
+ name
+ description
+ isDeprecated
+ deprecationReason
+ }
+ possibleTypes {
+ ...TypeRef
+ }
+}
+
+fragment InputValue on __InputValue {
+ name
+ description
+ type {
+ ...TypeRef
+ }
+ defaultValue
+}
+
+fragment TypeRef on __Type {
+ kind
+ name
+ ofType {
+ kind
+ name
+ ofType {
+ kind
+ name
+ ofType {
+ kind
+ name
+ ofType {
+ kind
+ name
+ ofType {
+ kind
+ name
+ ofType {
+ kind
+ name
+ ofType {
+ kind
+ name
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+`
diff --git a/vendor/github.com/99designs/gqlgen/graphql/introspection/schema.go b/vendor/github.com/99designs/gqlgen/graphql/introspection/schema.go
new file mode 100644
index 000000000..b5d2c4822
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/introspection/schema.go
@@ -0,0 +1,68 @@
+package introspection
+
+import (
+ "strings"
+
+ "github.com/vektah/gqlparser/ast"
+)
+
+type Schema struct {
+ schema *ast.Schema
+}
+
+func (s *Schema) Types() []Type {
+ var types []Type
+ for _, typ := range s.schema.Types {
+ if strings.HasPrefix(typ.Name, "__") {
+ continue
+ }
+ types = append(types, *WrapTypeFromDef(s.schema, typ))
+ }
+ return types
+}
+
+func (s *Schema) QueryType() *Type {
+ return WrapTypeFromDef(s.schema, s.schema.Query)
+}
+
+func (s *Schema) MutationType() *Type {
+ return WrapTypeFromDef(s.schema, s.schema.Mutation)
+}
+
+func (s *Schema) SubscriptionType() *Type {
+ return WrapTypeFromDef(s.schema, s.schema.Subscription)
+}
+
+func (s *Schema) Directives() []Directive {
+ var res []Directive
+
+ for _, d := range s.schema.Directives {
+ res = append(res, s.directiveFromDef(d))
+ }
+
+ return res
+}
+
+func (s *Schema) directiveFromDef(d *ast.DirectiveDefinition) Directive {
+ var locs []string
+ for _, loc := range d.Locations {
+ locs = append(locs, string(loc))
+ }
+
+ var args []InputValue
+ for _, arg := range d.Arguments {
+ args = append(args, InputValue{
+ Name: arg.Name,
+ Description: arg.Description,
+ DefaultValue: defaultValue(arg.DefaultValue),
+ Type: WrapTypeFromType(s.schema, arg.Type),
+ })
+ }
+
+ return Directive{
+ Name: d.Name,
+ Description: d.Description,
+ Locations: locs,
+ Args: args,
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/introspection/type.go b/vendor/github.com/99designs/gqlgen/graphql/introspection/type.go
new file mode 100644
index 000000000..b963aa0e2
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/introspection/type.go
@@ -0,0 +1,172 @@
+package introspection
+
+import (
+ "strings"
+
+ "github.com/vektah/gqlparser/ast"
+)
+
+type Type struct {
+ schema *ast.Schema
+ def *ast.Definition
+ typ *ast.Type
+}
+
+func WrapTypeFromDef(s *ast.Schema, def *ast.Definition) *Type {
+ if def == nil {
+ return nil
+ }
+ return &Type{schema: s, def: def}
+}
+
+func WrapTypeFromType(s *ast.Schema, typ *ast.Type) *Type {
+ if typ == nil {
+ return nil
+ }
+
+ if !typ.NonNull && typ.NamedType != "" {
+ return &Type{schema: s, def: s.Types[typ.NamedType]}
+ }
+ return &Type{schema: s, typ: typ}
+}
+
+func (t *Type) Kind() string {
+ if t.typ != nil {
+ if t.typ.NonNull {
+ return "NON_NULL"
+ }
+
+ if t.typ.Elem != nil {
+ return "LIST"
+ }
+ } else {
+ return string(t.def.Kind)
+ }
+
+ panic("UNKNOWN")
+}
+
+func (t *Type) Name() *string {
+ if t.def == nil {
+ return nil
+ }
+ return &t.def.Name
+}
+
+func (t *Type) Description() string {
+ if t.def == nil {
+ return ""
+ }
+ return t.def.Description
+}
+
+func (t *Type) Fields(includeDeprecated bool) []Field {
+ if t.def == nil || (t.def.Kind != ast.Object && t.def.Kind != ast.Interface) {
+ return nil
+ }
+ var fields []Field
+ for _, f := range t.def.Fields {
+ if strings.HasPrefix(f.Name, "__") {
+ continue
+ }
+
+ var args []InputValue
+ for _, arg := range f.Arguments {
+ args = append(args, InputValue{
+ Type: WrapTypeFromType(t.schema, arg.Type),
+ Name: arg.Name,
+ Description: arg.Description,
+ DefaultValue: defaultValue(arg.DefaultValue),
+ })
+ }
+
+ fields = append(fields, Field{
+ Name: f.Name,
+ Description: f.Description,
+ Args: args,
+ Type: WrapTypeFromType(t.schema, f.Type),
+ deprecation: f.Directives.ForName("deprecated"),
+ })
+ }
+ return fields
+}
+
+func (t *Type) InputFields() []InputValue {
+ if t.def == nil || t.def.Kind != ast.InputObject {
+ return nil
+ }
+
+ var res []InputValue
+ for _, f := range t.def.Fields {
+ res = append(res, InputValue{
+ Name: f.Name,
+ Description: f.Description,
+ Type: WrapTypeFromType(t.schema, f.Type),
+ DefaultValue: defaultValue(f.DefaultValue),
+ })
+ }
+ return res
+}
+
+func defaultValue(value *ast.Value) *string {
+ if value == nil {
+ return nil
+ }
+ val := value.String()
+ return &val
+}
+
+func (t *Type) Interfaces() []Type {
+ if t.def == nil || t.def.Kind != ast.Object {
+ return nil
+ }
+
+ var res []Type
+ for _, intf := range t.def.Interfaces {
+ res = append(res, *WrapTypeFromDef(t.schema, t.schema.Types[intf]))
+ }
+
+ return res
+}
+
+func (t *Type) PossibleTypes() []Type {
+ if t.def == nil || (t.def.Kind != ast.Interface && t.def.Kind != ast.Union) {
+ return nil
+ }
+
+ var res []Type
+ for _, pt := range t.schema.GetPossibleTypes(t.def) {
+ res = append(res, *WrapTypeFromDef(t.schema, pt))
+ }
+ return res
+}
+
+func (t *Type) EnumValues(includeDeprecated bool) []EnumValue {
+ if t.def == nil || t.def.Kind != ast.Enum {
+ return nil
+ }
+
+ var res []EnumValue
+ for _, val := range t.def.EnumValues {
+ res = append(res, EnumValue{
+ Name: val.Name,
+ Description: val.Description,
+ deprecation: val.Directives.ForName("deprecated"),
+ })
+ }
+ return res
+}
+
+func (t *Type) OfType() *Type {
+ if t.typ == nil {
+ return nil
+ }
+ if t.typ.NonNull {
+ // fake non null nodes
+ cpy := *t.typ
+ cpy.NonNull = false
+
+ return WrapTypeFromType(t.schema, &cpy)
+ }
+ return WrapTypeFromType(t.schema, t.typ.Elem)
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/jsonw.go b/vendor/github.com/99designs/gqlgen/graphql/jsonw.go
new file mode 100644
index 000000000..db95d8e44
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/jsonw.go
@@ -0,0 +1,52 @@
+package graphql
+
+import (
+ "io"
+)
+
+var nullLit = []byte(`null`)
+var trueLit = []byte(`true`)
+var falseLit = []byte(`false`)
+var openBrace = []byte(`{`)
+var closeBrace = []byte(`}`)
+var openBracket = []byte(`[`)
+var closeBracket = []byte(`]`)
+var colon = []byte(`:`)
+var comma = []byte(`,`)
+
+var Null = &lit{nullLit}
+var True = &lit{trueLit}
+var False = &lit{falseLit}
+
+type Marshaler interface {
+ MarshalGQL(w io.Writer)
+}
+
+type Unmarshaler interface {
+ UnmarshalGQL(v interface{}) error
+}
+
+type WriterFunc func(writer io.Writer)
+
+func (f WriterFunc) MarshalGQL(w io.Writer) {
+ f(w)
+}
+
+type Array []Marshaler
+
+func (a Array) MarshalGQL(writer io.Writer) {
+ writer.Write(openBracket)
+ for i, val := range a {
+ if i != 0 {
+ writer.Write(comma)
+ }
+ val.MarshalGQL(writer)
+ }
+ writer.Write(closeBracket)
+}
+
+type lit struct{ b []byte }
+
+func (l lit) MarshalGQL(w io.Writer) {
+ w.Write(l.b)
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/map.go b/vendor/github.com/99designs/gqlgen/graphql/map.go
new file mode 100644
index 000000000..1e91d1d98
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/map.go
@@ -0,0 +1,24 @@
+package graphql
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+func MarshalMap(val map[string]interface{}) Marshaler {
+ return WriterFunc(func(w io.Writer) {
+ err := json.NewEncoder(w).Encode(val)
+ if err != nil {
+ panic(err)
+ }
+ })
+}
+
+func UnmarshalMap(v interface{}) (map[string]interface{}, error) {
+ if m, ok := v.(map[string]interface{}); ok {
+ return m, nil
+ }
+
+ return nil, fmt.Errorf("%T is not a map", v)
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/oneshot.go b/vendor/github.com/99designs/gqlgen/graphql/oneshot.go
new file mode 100644
index 000000000..dd31f5baa
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/oneshot.go
@@ -0,0 +1,14 @@
+package graphql
+
+func OneShot(resp *Response) func() *Response {
+ var oneshot bool
+
+ return func() *Response {
+ if oneshot {
+ return nil
+ }
+ oneshot = true
+
+ return resp
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/recovery.go b/vendor/github.com/99designs/gqlgen/graphql/recovery.go
new file mode 100644
index 000000000..3aa032dc5
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/recovery.go
@@ -0,0 +1,19 @@
+package graphql
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "runtime/debug"
+)
+
+type RecoverFunc func(ctx context.Context, err interface{}) (userMessage error)
+
+func DefaultRecover(ctx context.Context, err interface{}) error {
+ fmt.Fprintln(os.Stderr, err)
+ fmt.Fprintln(os.Stderr)
+ debug.PrintStack()
+
+ return errors.New("internal system error")
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/response.go b/vendor/github.com/99designs/gqlgen/graphql/response.go
new file mode 100644
index 000000000..6fe55d56d
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/response.go
@@ -0,0 +1,24 @@
+package graphql
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/vektah/gqlparser/gqlerror"
+)
+
+// Errors are intentionally serialized first based on the advice in
+// https://github.com/facebook/graphql/commit/7b40390d48680b15cb93e02d46ac5eb249689876#diff-757cea6edf0288677a9eea4cfc801d87R107
+// and https://github.com/facebook/graphql/pull/384
+type Response struct {
+ Errors gqlerror.List `json:"errors,omitempty"`
+ Data json.RawMessage `json:"data"`
+ Extensions map[string]interface{} `json:"extensions,omitempty"`
+}
+
+func ErrorResponse(ctx context.Context, messagef string, args ...interface{}) *Response {
+ return &Response{
+ Errors: gqlerror.List{{Message: fmt.Sprintf(messagef, args...)}},
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/string.go b/vendor/github.com/99designs/gqlgen/graphql/string.go
new file mode 100644
index 000000000..7c1b7d957
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/string.go
@@ -0,0 +1,68 @@
+package graphql
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+)
+
+const encodeHex = "0123456789ABCDEF"
+
+func MarshalString(s string) Marshaler {
+ return WriterFunc(func(w io.Writer) {
+ writeQuotedString(w, s)
+ })
+}
+
+func writeQuotedString(w io.Writer, s string) {
+ start := 0
+ io.WriteString(w, `"`)
+
+ for i, c := range s {
+ if c < 0x20 || c == '\\' || c == '"' {
+ io.WriteString(w, s[start:i])
+
+ switch c {
+ case '\t':
+ io.WriteString(w, `\t`)
+ case '\r':
+ io.WriteString(w, `\r`)
+ case '\n':
+ io.WriteString(w, `\n`)
+ case '\\':
+ io.WriteString(w, `\\`)
+ case '"':
+ io.WriteString(w, `\"`)
+ default:
+ io.WriteString(w, `\u00`)
+ w.Write([]byte{encodeHex[c>>4], encodeHex[c&0xf]})
+ }
+
+ start = i + 1
+ }
+ }
+
+ io.WriteString(w, s[start:])
+ io.WriteString(w, `"`)
+}
+
+func UnmarshalString(v interface{}) (string, error) {
+ switch v := v.(type) {
+ case string:
+ return v, nil
+ case int:
+ return strconv.Itoa(v), nil
+ case float64:
+ return fmt.Sprintf("%f", v), nil
+ case bool:
+ if v {
+ return "true", nil
+ } else {
+ return "false", nil
+ }
+ case nil:
+ return "null", nil
+ default:
+ return "", fmt.Errorf("%T is not a string", v)
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/time.go b/vendor/github.com/99designs/gqlgen/graphql/time.go
new file mode 100644
index 000000000..4f4485602
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/time.go
@@ -0,0 +1,21 @@
+package graphql
+
+import (
+ "errors"
+ "io"
+ "strconv"
+ "time"
+)
+
+func MarshalTime(t time.Time) Marshaler {
+ return WriterFunc(func(w io.Writer) {
+ io.WriteString(w, strconv.Quote(t.Format(time.RFC3339)))
+ })
+}
+
+func UnmarshalTime(v interface{}) (time.Time, error) {
+ if tmpStr, ok := v.(string); ok {
+ return time.Parse(time.RFC3339, tmpStr)
+ }
+ return time.Time{}, errors.New("time should be RFC3339 formatted string")
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/tracer.go b/vendor/github.com/99designs/gqlgen/graphql/tracer.go
new file mode 100644
index 000000000..0597ce8cc
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/tracer.go
@@ -0,0 +1,58 @@
+package graphql
+
+import (
+ "context"
+)
+
+var _ Tracer = (*NopTracer)(nil)
+
+type Tracer interface {
+ StartOperationParsing(ctx context.Context) context.Context
+ EndOperationParsing(ctx context.Context)
+ StartOperationValidation(ctx context.Context) context.Context
+ EndOperationValidation(ctx context.Context)
+ StartOperationExecution(ctx context.Context) context.Context
+ StartFieldExecution(ctx context.Context, field CollectedField) context.Context
+ StartFieldResolverExecution(ctx context.Context, rc *ResolverContext) context.Context
+ StartFieldChildExecution(ctx context.Context) context.Context
+ EndFieldExecution(ctx context.Context)
+ EndOperationExecution(ctx context.Context)
+}
+
+type NopTracer struct{}
+
+func (NopTracer) StartOperationParsing(ctx context.Context) context.Context {
+ return ctx
+}
+
+func (NopTracer) EndOperationParsing(ctx context.Context) {
+}
+
+func (NopTracer) StartOperationValidation(ctx context.Context) context.Context {
+ return ctx
+}
+
+func (NopTracer) EndOperationValidation(ctx context.Context) {
+}
+
+func (NopTracer) StartOperationExecution(ctx context.Context) context.Context {
+ return ctx
+}
+
+func (NopTracer) StartFieldExecution(ctx context.Context, field CollectedField) context.Context {
+ return ctx
+}
+
+func (NopTracer) StartFieldResolverExecution(ctx context.Context, rc *ResolverContext) context.Context {
+ return ctx
+}
+
+func (NopTracer) StartFieldChildExecution(ctx context.Context) context.Context {
+ return ctx
+}
+
+func (NopTracer) EndFieldExecution(ctx context.Context) {
+}
+
+func (NopTracer) EndOperationExecution(ctx context.Context) {
+}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/version.go b/vendor/github.com/99designs/gqlgen/graphql/version.go
new file mode 100644
index 000000000..8cf3c9ba9
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/graphql/version.go
@@ -0,0 +1,3 @@
+package graphql
+
+const Version = "dev"
diff --git a/vendor/github.com/99designs/gqlgen/handler/context.go b/vendor/github.com/99designs/gqlgen/handler/context.go
new file mode 100644
index 000000000..2992aa3d4
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/handler/context.go
@@ -0,0 +1,57 @@
+package handler
+
+import "context"
+
+type key string
+
+const (
+ initpayload key = "ws_initpayload_context"
+)
+
+// InitPayload is a structure that is parsed from the websocket init message payload. TO use
+// request headers for non-websocket, instead wrap the graphql handler in a middleware.
+type InitPayload map[string]interface{}
+
+// GetString safely gets a string value from the payload. It returns an empty string if the
+// payload is nil or the value isn't set.
+func (payload InitPayload) GetString(key string) string {
+ if payload == nil {
+ return ""
+ }
+
+ if value, ok := payload[key]; ok {
+ res, _ := value.(string)
+ return res
+ }
+
+ return ""
+}
+
+// Authorization is a short hand for getting the Authorization header from the
+// payload.
+func (payload InitPayload) Authorization() string {
+ if value := payload.GetString("Authorization"); value != "" {
+ return value
+ }
+
+ if value := payload.GetString("authorization"); value != "" {
+ return value
+ }
+
+ return ""
+}
+
+func withInitPayload(ctx context.Context, payload InitPayload) context.Context {
+ return context.WithValue(ctx, initpayload, payload)
+}
+
+// GetInitPayload gets a map of the data sent with the connection_init message, which is used by
+// graphql clients as a stand-in for HTTP headers.
+func GetInitPayload(ctx context.Context) InitPayload {
+ payload, ok := ctx.Value(initpayload).(InitPayload)
+ if !ok {
+ return nil
+ }
+
+ return payload
+}
diff --git a/vendor/github.com/99designs/gqlgen/handler/graphql.go b/vendor/github.com/99designs/gqlgen/handler/graphql.go
new file mode 100644
index 000000000..918671a9a
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/handler/graphql.go
@@ -0,0 +1,467 @@
+package handler
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ "github.com/99designs/gqlgen/complexity"
+ "github.com/99designs/gqlgen/graphql"
+ "github.com/gorilla/websocket"
+ "github.com/hashicorp/golang-lru"
+ "github.com/vektah/gqlparser/ast"
+ "github.com/vektah/gqlparser/gqlerror"
+ "github.com/vektah/gqlparser/parser"
+ "github.com/vektah/gqlparser/validator"
+)
+
+type params struct {
+ Query string `json:"query"`
+ OperationName string `json:"operationName"`
+ Variables map[string]interface{} `json:"variables"`
+}
+
+type Config struct {
+ cacheSize int
+ upgrader websocket.Upgrader
+ recover graphql.RecoverFunc
+ errorPresenter graphql.ErrorPresenterFunc
+ resolverHook graphql.FieldMiddleware
+ requestHook graphql.RequestMiddleware
+ tracer graphql.Tracer
+ complexityLimit int
+ disableIntrospection bool
+}
+
+func (c *Config) newRequestContext(es graphql.ExecutableSchema, doc *ast.QueryDocument, op *ast.OperationDefinition, query string, variables map[string]interface{}) *graphql.RequestContext {
+ reqCtx := graphql.NewRequestContext(doc, query, variables)
+ reqCtx.DisableIntrospection = c.disableIntrospection
+
+ if hook := c.recover; hook != nil {
+ reqCtx.Recover = hook
+ }
+
+ if hook := c.errorPresenter; hook != nil {
+ reqCtx.ErrorPresenter = hook
+ }
+
+ if hook := c.resolverHook; hook != nil {
+ reqCtx.ResolverMiddleware = hook
+ }
+
+ if hook := c.requestHook; hook != nil {
+ reqCtx.RequestMiddleware = hook
+ }
+
+ if hook := c.tracer; hook != nil {
+ reqCtx.Tracer = hook
+ }
+
+ if c.complexityLimit > 0 {
+ reqCtx.ComplexityLimit = c.complexityLimit
+ operationComplexity := complexity.Calculate(es, op, variables)
+ reqCtx.OperationComplexity = operationComplexity
+ }
+
+ return reqCtx
+}
+
+type Option func(cfg *Config)
+
+func WebsocketUpgrader(upgrader websocket.Upgrader) Option {
+ return func(cfg *Config) {
+ cfg.upgrader = upgrader
+ }
+}
+
+func RecoverFunc(recover graphql.RecoverFunc) Option {
+ return func(cfg *Config) {
+ cfg.recover = recover
+ }
+}
+
+// ErrorPresenter transforms errors found while resolving into errors that will be returned to the user. It provides
+// a good place to add any extra fields, like error.type, that might be desired by your frontend. Check the default
+// implementation in graphql.DefaultErrorPresenter for an example.
+func ErrorPresenter(f graphql.ErrorPresenterFunc) Option {
+ return func(cfg *Config) {
+ cfg.errorPresenter = f
+ }
+}
+
+// IntrospectionEnabled = false will forbid clients from calling introspection endpoints. Can be useful in prod when you dont
+// want clients introspecting the full schema.
+func IntrospectionEnabled(enabled bool) Option {
+ return func(cfg *Config) {
+ cfg.disableIntrospection = !enabled
+ }
+}
+
+// ComplexityLimit sets a maximum query complexity that is allowed to be executed.
+// If a query is submitted that exceeds the limit, a 422 status code will be returned.
+func ComplexityLimit(limit int) Option {
+ return func(cfg *Config) {
+ cfg.complexityLimit = limit
+ }
+}
+
+// ResolverMiddleware allows you to define a function that will be called around every resolver,
+// useful for logging.
+func ResolverMiddleware(middleware graphql.FieldMiddleware) Option {
+ return func(cfg *Config) {
+ if cfg.resolverHook == nil {
+ cfg.resolverHook = middleware
+ return
+ }
+
+ lastResolve := cfg.resolverHook
+ cfg.resolverHook = func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) {
+ return lastResolve(ctx, func(ctx context.Context) (res interface{}, err error) {
+ return middleware(ctx, next)
+ })
+ }
+ }
+}
+
+// RequestMiddleware allows you to define a function that will be called around the root request,
+// after the query has been parsed. This is useful for logging
+func RequestMiddleware(middleware graphql.RequestMiddleware) Option {
+ return func(cfg *Config) {
+ if cfg.requestHook == nil {
+ cfg.requestHook = middleware
+ return
+ }
+
+ lastResolve := cfg.requestHook
+ cfg.requestHook = func(ctx context.Context, next func(ctx context.Context) []byte) []byte {
+ return lastResolve(ctx, func(ctx context.Context) []byte {
+ return middleware(ctx, next)
+ })
+ }
+ }
+}
+
+// Tracer allows you to add a request/resolver tracer that will be called around the root request,
+// calling resolver. This is useful for tracing
+func Tracer(tracer graphql.Tracer) Option {
+ return func(cfg *Config) {
+ if cfg.tracer == nil {
+ cfg.tracer = tracer
+
+ } else {
+ lastResolve := cfg.tracer
+ cfg.tracer = &tracerWrapper{
+ tracer1: lastResolve,
+ tracer2: tracer,
+ }
+ }
+
+ opt := RequestMiddleware(func(ctx context.Context, next func(ctx context.Context) []byte) []byte {
+ ctx = tracer.StartOperationExecution(ctx)
+ resp := next(ctx)
+ tracer.EndOperationExecution(ctx)
+
+ return resp
+ })
+ opt(cfg)
+ }
+}
+
+type tracerWrapper struct {
+ tracer1 graphql.Tracer
+ tracer2 graphql.Tracer
+}
+
+func (tw *tracerWrapper) StartOperationParsing(ctx context.Context) context.Context {
+ ctx = tw.tracer1.StartOperationParsing(ctx)
+ ctx = tw.tracer2.StartOperationParsing(ctx)
+ return ctx
+}
+
+func (tw *tracerWrapper) EndOperationParsing(ctx context.Context) {
+ tw.tracer2.EndOperationParsing(ctx)
+ tw.tracer1.EndOperationParsing(ctx)
+}
+
+func (tw *tracerWrapper) StartOperationValidation(ctx context.Context) context.Context {
+ ctx = tw.tracer1.StartOperationValidation(ctx)
+ ctx = tw.tracer2.StartOperationValidation(ctx)
+ return ctx
+}
+
+func (tw *tracerWrapper) EndOperationValidation(ctx context.Context) {
+ tw.tracer2.EndOperationValidation(ctx)
+ tw.tracer1.EndOperationValidation(ctx)
+}
+
+func (tw *tracerWrapper) StartOperationExecution(ctx context.Context) context.Context {
+ ctx = tw.tracer1.StartOperationExecution(ctx)
+ ctx = tw.tracer2.StartOperationExecution(ctx)
+ return ctx
+}
+
+func (tw *tracerWrapper) StartFieldExecution(ctx context.Context, field graphql.CollectedField) context.Context {
+ ctx = tw.tracer1.StartFieldExecution(ctx, field)
+ ctx = tw.tracer2.StartFieldExecution(ctx, field)
+ return ctx
+}
+
+func (tw *tracerWrapper) StartFieldResolverExecution(ctx context.Context, rc *graphql.ResolverContext) context.Context {
+ ctx = tw.tracer1.StartFieldResolverExecution(ctx, rc)
+ ctx = tw.tracer2.StartFieldResolverExecution(ctx, rc)
+ return ctx
+}
+
+func (tw *tracerWrapper) StartFieldChildExecution(ctx context.Context) context.Context {
+ ctx = tw.tracer1.StartFieldChildExecution(ctx)
+ ctx = tw.tracer2.StartFieldChildExecution(ctx)
+ return ctx
+}
+
+func (tw *tracerWrapper) EndFieldExecution(ctx context.Context) {
+ tw.tracer2.EndFieldExecution(ctx)
+ tw.tracer1.EndFieldExecution(ctx)
+}
+
+func (tw *tracerWrapper) EndOperationExecution(ctx context.Context) {
+ tw.tracer2.EndOperationExecution(ctx)
+ tw.tracer1.EndOperationExecution(ctx)
+}
+
+// CacheSize sets the maximum size of the query cache.
+// If size is less than or equal to 0, the cache is disabled.
+func CacheSize(size int) Option {
+ return func(cfg *Config) {
+ cfg.cacheSize = size
+ }
+}
+
+const DefaultCacheSize = 1000
+
+func GraphQL(exec graphql.ExecutableSchema, options ...Option) http.HandlerFunc {
+ cfg := &Config{
+ cacheSize: DefaultCacheSize,
+ upgrader: websocket.Upgrader{
+ ReadBufferSize: 1024,
+ WriteBufferSize: 1024,
+ },
+ }
+
+ for _, option := range options {
+ option(cfg)
+ }
+
+ var cache *lru.Cache
+ if cfg.cacheSize > 0 {
+ var err error
+ cache, err = lru.New(cfg.cacheSize)
+ if err != nil {
+ // An error is only returned for non-positive cache size
+ // and we already checked for that.
+ panic("unexpected error creating cache: " + err.Error())
+ }
+ }
+ if cfg.tracer == nil {
+ cfg.tracer = &graphql.NopTracer{}
+ }
+
+ handler := &graphqlHandler{
+ cfg: cfg,
+ cache: cache,
+ exec: exec,
+ }
+
+ return handler.ServeHTTP
+}
+
+var _ http.Handler = (*graphqlHandler)(nil)
+
+type graphqlHandler struct {
+ cfg *Config
+ cache *lru.Cache
+ exec graphql.ExecutableSchema
+}
+
+func (gh *graphqlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method == http.MethodOptions {
+ w.Header().Set("Allow", "OPTIONS, GET, POST")
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+
+ if strings.Contains(r.Header.Get("Upgrade"), "websocket") {
+ connectWs(gh.exec, w, r, gh.cfg, gh.cache)
+ return
+ }
+
+ var reqParams params
+ switch r.Method {
+ case http.MethodGet:
+ reqParams.Query = r.URL.Query().Get("query")
+ reqParams.OperationName = r.URL.Query().Get("operationName")
+
+ if variables := r.URL.Query().Get("variables"); variables != "" {
+ if err := jsonDecode(strings.NewReader(variables), &reqParams.Variables); err != nil {
+ sendErrorf(w, http.StatusBadRequest, "variables could not be decoded")
+ return
+ }
+ }
+ case http.MethodPost:
+ if err := jsonDecode(r.Body, &reqParams); err != nil {
+ sendErrorf(w, http.StatusBadRequest, "json body could not be decoded: "+err.Error())
+ return
+ }
+ default:
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+
+ ctx := r.Context()
+
+ var doc *ast.QueryDocument
+ var cacheHit bool
+ if gh.cache != nil {
+ val, ok := gh.cache.Get(reqParams.Query)
+ if ok {
+ doc = val.(*ast.QueryDocument)
+ cacheHit = true
+ }
+ }
+
+ ctx, doc, gqlErr := gh.parseOperation(ctx, &parseOperationArgs{
+ Query: reqParams.Query,
+ CachedDoc: doc,
+ })
+ if gqlErr != nil {
+ sendError(w, http.StatusUnprocessableEntity, gqlErr)
+ return
+ }
+
+ ctx, op, vars, listErr := gh.validateOperation(ctx, &validateOperationArgs{
+ Doc: doc,
+ OperationName: reqParams.OperationName,
+ CacheHit: cacheHit,
+ R: r,
+ Variables: reqParams.Variables,
+ })
+ if len(listErr) != 0 {
+ sendError(w, http.StatusUnprocessableEntity, listErr...)
+ return
+ }
+
+ if gh.cache != nil && !cacheHit {
+ gh.cache.Add(reqParams.Query, doc)
+ }
+
+ reqCtx := gh.cfg.newRequestContext(gh.exec, doc, op, reqParams.Query, vars)
+ ctx = graphql.WithRequestContext(ctx, reqCtx)
+
+ defer func() {
+ if err := recover(); err != nil {
+ userErr := reqCtx.Recover(ctx, err)
+ sendErrorf(w, http.StatusUnprocessableEntity, userErr.Error())
+ }
+ }()
+
+ if reqCtx.ComplexityLimit > 0 && reqCtx.OperationComplexity > reqCtx.ComplexityLimit {
+ sendErrorf(w, http.StatusUnprocessableEntity, "operation has complexity %d, which exceeds the limit of %d", reqCtx.OperationComplexity, reqCtx.ComplexityLimit)
+ return
+ }
+
+ switch op.Operation {
+ case ast.Query:
+ b, err := json.Marshal(gh.exec.Query(ctx, op))
+ if err != nil {
+ panic(err)
+ }
+ w.Write(b)
+ case ast.Mutation:
+ b, err := json.Marshal(gh.exec.Mutation(ctx, op))
+ if err != nil {
+ panic(err)
+ }
+ w.Write(b)
+ default:
+ sendErrorf(w, http.StatusBadRequest, "unsupported operation type")
+ }
+}
+
+type parseOperationArgs struct {
+ Query string
+ CachedDoc *ast.QueryDocument
+}
+
+func (gh *graphqlHandler) parseOperation(ctx context.Context, args *parseOperationArgs) (context.Context, *ast.QueryDocument, *gqlerror.Error) {
+ ctx = gh.cfg.tracer.StartOperationParsing(ctx)
+ defer func() { gh.cfg.tracer.EndOperationParsing(ctx) }()
+
+ if args.CachedDoc != nil {
+ return ctx, args.CachedDoc, nil
+ }
+
+ doc, gqlErr := parser.ParseQuery(&ast.Source{Input: args.Query})
+ if gqlErr != nil {
+ return ctx, nil, gqlErr
+ }
+
+ return ctx, doc, nil
+}
+
+type validateOperationArgs struct {
+ Doc *ast.QueryDocument
+ OperationName string
+ CacheHit bool
+ R *http.Request
+ Variables map[string]interface{}
+}
+
+func (gh *graphqlHandler) validateOperation(ctx context.Context, args *validateOperationArgs) (context.Context, *ast.OperationDefinition, map[string]interface{}, gqlerror.List) {
+ ctx = gh.cfg.tracer.StartOperationValidation(ctx)
+ defer func() { gh.cfg.tracer.EndOperationValidation(ctx) }()
+
+ if !args.CacheHit {
+ listErr := validator.Validate(gh.exec.Schema(), args.Doc)
+ if len(listErr) != 0 {
+ return ctx, nil, nil, listErr
+ }
+ }
+
+ op := args.Doc.Operations.ForName(args.OperationName)
+ if op == nil {
+ return ctx, nil, nil, gqlerror.List{gqlerror.Errorf("operation %s not found", args.OperationName)}
+ }
+
+ if op.Operation != ast.Query && args.R.Method == http.MethodGet {
+ return ctx, nil, nil, gqlerror.List{gqlerror.Errorf("GET requests only allow query operations")}
+ }
+
+ vars, err := validator.VariableValues(gh.exec.Schema(), op, args.Variables)
+ if err != nil {
+ return ctx, nil, nil, gqlerror.List{err}
+ }
+
+ return ctx, op, vars, nil
+}
+
+func jsonDecode(r io.Reader, val interface{}) error {
+ dec := json.NewDecoder(r)
+ dec.UseNumber()
+ return dec.Decode(val)
+}
+
+func sendError(w http.ResponseWriter, code int, errors ...*gqlerror.Error) {
+ w.WriteHeader(code)
+ b, err := json.Marshal(&graphql.Response{Errors: errors})
+ if err != nil {
+ panic(err)
+ }
+ w.Write(b)
+}
+
+func sendErrorf(w http.ResponseWriter, code int, format string, args ...interface{}) {
+ sendError(w, code, &gqlerror.Error{Message: fmt.Sprintf(format, args...)})
+}
diff --git a/vendor/github.com/99designs/gqlgen/handler/playground.go b/vendor/github.com/99designs/gqlgen/handler/playground.go
new file mode 100644
index 000000000..f1687defb
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/handler/playground.go
@@ -0,0 +1,54 @@
+package handler
+
+import (
+ "html/template"
+ "net/http"
+)
+
+var page = template.Must(template.New("graphiql").Parse(`
+
+
+
+
+
+
+
+
+ {{.title}}
+
+
+
+
+
+
+
+`))
+
+func Playground(title string, endpoint string) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ err := page.Execute(w, map[string]string{
+ "title": title,
+ "endpoint": endpoint,
+ "version": "1.7.8",
+ })
+ if err != nil {
+ panic(err)
+ }
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/handler/stub.go b/vendor/github.com/99designs/gqlgen/handler/stub.go
new file mode 100644
index 000000000..d237e1889
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/handler/stub.go
@@ -0,0 +1,51 @@
+package handler
+
+import (
+ "context"
+
+ "github.com/99designs/gqlgen/graphql"
+ "github.com/vektah/gqlparser"
+ "github.com/vektah/gqlparser/ast"
+)
+
+type executableSchemaStub struct {
+ NextResp chan struct{}
+}
+
+var _ graphql.ExecutableSchema = &executableSchemaStub{}
+
+func (e *executableSchemaStub) Schema() *ast.Schema {
+ return gqlparser.MustLoadSchema(&ast.Source{Input: `
+ schema { query: Query }
+ type Query {
+ me: User!
+ user(id: Int): User!
+ }
+ type User { name: String! }
+ `})
+}
+
+func (e *executableSchemaStub) Complexity(typeName, field string, childComplexity int, args map[string]interface{}) (int, bool) {
+ return 0, false
+}
+
+func (e *executableSchemaStub) Query(ctx context.Context, op *ast.OperationDefinition) *graphql.Response {
+ return &graphql.Response{Data: []byte(`{"name":"test"}`)}
+}
+
+func (e *executableSchemaStub) Mutation(ctx context.Context, op *ast.OperationDefinition) *graphql.Response {
+ return graphql.ErrorResponse(ctx, "mutations are not supported")
+}
+
+func (e *executableSchemaStub) Subscription(ctx context.Context, op *ast.OperationDefinition) func() *graphql.Response {
+ return func() *graphql.Response {
+ select {
+ case <-ctx.Done():
+ return nil
+ case <-e.NextResp:
+ return &graphql.Response{
+ Data: []byte(`{"name":"test"}`),
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/handler/websocket.go b/vendor/github.com/99designs/gqlgen/handler/websocket.go
new file mode 100644
index 000000000..c3dc38e0e
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/handler/websocket.go
@@ -0,0 +1,286 @@
+package handler
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "sync"
+
+ "github.com/99designs/gqlgen/graphql"
+ "github.com/gorilla/websocket"
+ "github.com/hashicorp/golang-lru"
+ "github.com/vektah/gqlparser"
+ "github.com/vektah/gqlparser/ast"
+ "github.com/vektah/gqlparser/gqlerror"
+ "github.com/vektah/gqlparser/validator"
+)
+
+const (
+ connectionInitMsg = "connection_init" // Client -> Server
+ connectionTerminateMsg = "connection_terminate" // Client -> Server
+ startMsg = "start" // Client -> Server
+ stopMsg = "stop" // Client -> Server
+ connectionAckMsg = "connection_ack" // Server -> Client
+ connectionErrorMsg = "connection_error" // Server -> Client
+ dataMsg = "data" // Server -> Client
+ errorMsg = "error" // Server -> Client
+ completeMsg = "complete" // Server -> Client
+ //connectionKeepAliveMsg = "ka" // Server -> Client TODO: keepalives
+)
+
+type operationMessage struct {
+ Payload json.RawMessage `json:"payload,omitempty"`
+ ID string `json:"id,omitempty"`
+ Type string `json:"type"`
+}
+
+type wsConnection struct {
+ ctx context.Context
+ conn *websocket.Conn
+ exec graphql.ExecutableSchema
+ active map[string]context.CancelFunc
+ mu sync.Mutex
+ cfg *Config
+ cache *lru.Cache
+
+ initPayload InitPayload
+}
+
+func connectWs(exec graphql.ExecutableSchema, w http.ResponseWriter, r *http.Request, cfg *Config, cache *lru.Cache) {
+ ws, err := cfg.upgrader.Upgrade(w, r, http.Header{
+ "Sec-Websocket-Protocol": []string{"graphql-ws"},
+ })
+ if err != nil {
+ log.Printf("unable to upgrade %T to websocket %s: ", w, err.Error())
+ sendErrorf(w, http.StatusBadRequest, "unable to upgrade")
+ return
+ }
+
+ conn := wsConnection{
+ active: map[string]context.CancelFunc{},
+ exec: exec,
+ conn: ws,
+ ctx: r.Context(),
+ cfg: cfg,
+ cache: cache,
+ }
+
+ if !conn.init() {
+ return
+ }
+
+ conn.run()
+}
+
+func (c *wsConnection) init() bool {
+ message := c.readOp()
+ if message == nil {
+ c.close(websocket.CloseProtocolError, "decoding error")
+ return false
+ }
+
+ switch message.Type {
+ case connectionInitMsg:
+ if len(message.Payload) > 0 {
+ c.initPayload = make(InitPayload)
+ err := json.Unmarshal(message.Payload, &c.initPayload)
+ if err != nil {
+ return false
+ }
+ }
+
+ c.write(&operationMessage{Type: connectionAckMsg})
+ case connectionTerminateMsg:
+ c.close(websocket.CloseNormalClosure, "terminated")
+ return false
+ default:
+ c.sendConnectionError("unexpected message %s", message.Type)
+ c.close(websocket.CloseProtocolError, "unexpected message")
+ return false
+ }
+
+ return true
+}
+
+func (c *wsConnection) write(msg *operationMessage) {
+ c.mu.Lock()
+ c.conn.WriteJSON(msg)
+ c.mu.Unlock()
+}
+
+func (c *wsConnection) run() {
+ for {
+ message := c.readOp()
+ if message == nil {
+ return
+ }
+
+ switch message.Type {
+ case startMsg:
+ if !c.subscribe(message) {
+ return
+ }
+ case stopMsg:
+ c.mu.Lock()
+ closer := c.active[message.ID]
+ c.mu.Unlock()
+ if closer == nil {
+ c.sendError(message.ID, gqlerror.Errorf("%s is not running, cannot stop", message.ID))
+ continue
+ }
+
+ closer()
+ case connectionTerminateMsg:
+ c.close(websocket.CloseNormalClosure, "terminated")
+ return
+ default:
+ c.sendConnectionError("unexpected message %s", message.Type)
+ c.close(websocket.CloseProtocolError, "unexpected message")
+ return
+ }
+ }
+}
+
+func (c *wsConnection) subscribe(message *operationMessage) bool {
+ var reqParams params
+ if err := jsonDecode(bytes.NewReader(message.Payload), &reqParams); err != nil {
+ c.sendConnectionError("invalid json")
+ return false
+ }
+
+ var (
+ doc *ast.QueryDocument
+ cacheHit bool
+ )
+ if c.cache != nil {
+ val, ok := c.cache.Get(reqParams.Query)
+ if ok {
+ doc = val.(*ast.QueryDocument)
+ cacheHit = true
+ }
+ }
+ if !cacheHit {
+ var qErr gqlerror.List
+ doc, qErr = gqlparser.LoadQuery(c.exec.Schema(), reqParams.Query)
+ if qErr != nil {
+ c.sendError(message.ID, qErr...)
+ return true
+ }
+ if c.cache != nil {
+ c.cache.Add(reqParams.Query, doc)
+ }
+ }
+
+ op := doc.Operations.ForName(reqParams.OperationName)
+ if op == nil {
+ c.sendError(message.ID, gqlerror.Errorf("operation %s not found", reqParams.OperationName))
+ return true
+ }
+
+ vars, err := validator.VariableValues(c.exec.Schema(), op, reqParams.Variables)
+ if err != nil {
+ c.sendError(message.ID, err)
+ return true
+ }
+ reqCtx := c.cfg.newRequestContext(c.exec, doc, op, reqParams.Query, vars)
+ ctx := graphql.WithRequestContext(c.ctx, reqCtx)
+
+ if c.initPayload != nil {
+ ctx = withInitPayload(ctx, c.initPayload)
+ }
+
+ if op.Operation != ast.Subscription {
+ var result *graphql.Response
+ if op.Operation == ast.Query {
+ result = c.exec.Query(ctx, op)
+ } else {
+ result = c.exec.Mutation(ctx, op)
+ }
+
+ c.sendData(message.ID, result)
+ c.write(&operationMessage{ID: message.ID, Type: completeMsg})
+ return true
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ c.mu.Lock()
+ c.active[message.ID] = cancel
+ c.mu.Unlock()
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ userErr := reqCtx.Recover(ctx, r)
+ c.sendError(message.ID, &gqlerror.Error{Message: userErr.Error()})
+ }
+ }()
+ next := c.exec.Subscription(ctx, op)
+ for result := next(); result != nil; result = next() {
+ c.sendData(message.ID, result)
+ }
+
+ c.write(&operationMessage{ID: message.ID, Type: completeMsg})
+
+ c.mu.Lock()
+ delete(c.active, message.ID)
+ c.mu.Unlock()
+ cancel()
+ }()
+
+ return true
+}
+
+func (c *wsConnection) sendData(id string, response *graphql.Response) {
+ b, err := json.Marshal(response)
+ if err != nil {
+ c.sendError(id, gqlerror.Errorf("unable to encode json response: %s", err.Error()))
+ return
+ }
+
+ c.write(&operationMessage{Type: dataMsg, ID: id, Payload: b})
+}
+
+func (c *wsConnection) sendError(id string, errors ...*gqlerror.Error) {
+ var errs []error
+ for _, err := range errors {
+ errs = append(errs, err)
+ }
+ b, err := json.Marshal(errs)
+ if err != nil {
+ panic(err)
+ }
+ c.write(&operationMessage{Type: errorMsg, ID: id, Payload: b})
+}
+
+func (c *wsConnection) sendConnectionError(format string, args ...interface{}) {
+ b, err := json.Marshal(&gqlerror.Error{Message: fmt.Sprintf(format, args...)})
+ if err != nil {
+ panic(err)
+ }
+
+ c.write(&operationMessage{Type: connectionErrorMsg, Payload: b})
+}
+
+func (c *wsConnection) readOp() *operationMessage {
+ _, r, err := c.conn.NextReader()
+ if err != nil {
+ c.sendConnectionError("invalid json")
+ return nil
+ }
+ message := operationMessage{}
+ if err := jsonDecode(r, &message); err != nil {
+ c.sendConnectionError("invalid json")
+ return nil
+ }
+
+ return &message
+}
+
+func (c *wsConnection) close(closeCode int, message string) {
+ c.mu.Lock()
+ _ = c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(closeCode, message))
+ c.mu.Unlock()
+ _ = c.conn.Close()
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/.gitattributes b/vendor/github.com/PuerkitoBio/goquery/.gitattributes
new file mode 100644
index 000000000..0cc26ec01
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/.gitattributes
@@ -0,0 +1 @@
+testdata/* linguist-vendored
diff --git a/vendor/github.com/PuerkitoBio/goquery/.gitignore b/vendor/github.com/PuerkitoBio/goquery/.gitignore
new file mode 100644
index 000000000..970381cd2
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/.gitignore
@@ -0,0 +1,16 @@
+# editor temporary files
+*.sublime-*
+.DS_Store
+*.swp
+#*.*#
+tags
+
+# direnv config
+.env*
+
+# test binaries
+*.test
+
+# coverage and profilte outputs
+*.out
+
diff --git a/vendor/github.com/PuerkitoBio/goquery/.travis.yml b/vendor/github.com/PuerkitoBio/goquery/.travis.yml
new file mode 100644
index 000000000..cc1402d5c
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+
+go:
+ - 1.1
+ - 1.2.x
+ - 1.3.x
+ - 1.4.x
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - "1.10.x"
+ - 1.11.x
+ - tip
+
diff --git a/vendor/github.com/PuerkitoBio/goquery/LICENSE b/vendor/github.com/PuerkitoBio/goquery/LICENSE
new file mode 100644
index 000000000..f743d3728
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/LICENSE
@@ -0,0 +1,12 @@
+Copyright (c) 2012-2016, Martin Angers & Contributors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/PuerkitoBio/goquery/README.md b/vendor/github.com/PuerkitoBio/goquery/README.md
new file mode 100644
index 000000000..84f9af39e
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/README.md
@@ -0,0 +1,179 @@
+# goquery - a little like that j-thing, only in Go
+[](http://travis-ci.org/PuerkitoBio/goquery) [](http://godoc.org/github.com/PuerkitoBio/goquery) [](https://sourcegraph.com/github.com/PuerkitoBio/goquery?badge)
+
+goquery brings a syntax and a set of features similar to [jQuery][] to the [Go language][go]. It is based on Go's [net/html package][html] and the CSS Selector library [cascadia][]. Since the net/html parser returns nodes, and not a full-featured DOM tree, jQuery's stateful manipulation functions (like height(), css(), detach()) have been left off.
+
+Also, because the net/html parser requires UTF-8 encoding, so does goquery: it is the caller's responsibility to ensure that the source document provides UTF-8 encoded HTML. See the [wiki][] for various options to do this.
+
+Syntax-wise, it is as close as possible to jQuery, with the same function names when possible, and that warm and fuzzy chainable interface. jQuery being the ultra-popular library that it is, I felt that writing a similar HTML-manipulating library was better to follow its API than to start anew (in the same spirit as Go's `fmt` package), even though some of its methods are less than intuitive (looking at you, [index()][index]...).
+
+## Table of Contents
+
+* [Installation](#installation)
+* [Changelog](#changelog)
+* [API](#api)
+* [Examples](#examples)
+* [Related Projects](#related-projects)
+* [Support](#support)
+* [License](#license)
+
+## Installation
+
+Please note that because of the net/html dependency, goquery requires Go1.1+.
+
+ $ go get github.com/PuerkitoBio/goquery
+
+(optional) To run unit tests:
+
+ $ cd $GOPATH/src/github.com/PuerkitoBio/goquery
+ $ go test
+
+(optional) To run benchmarks (warning: it runs for a few minutes):
+
+ $ cd $GOPATH/src/github.com/PuerkitoBio/goquery
+ $ go test -bench=".*"
+
+## Changelog
+
+**Note that goquery's API is now stable, and will not break.**
+
+* **2018-11-15 (v1.5.0)** : Go module support (thanks @Zaba505).
+* **2018-06-07 (v1.4.1)** : Add `NewDocumentFromReader` examples.
+* **2018-03-24 (v1.4.0)** : Deprecate `NewDocument(url)` and `NewDocumentFromResponse(response)`.
+* **2018-01-28 (v1.3.0)** : Add `ToEnd` constant to `Slice` until the end of the selection (thanks to @davidjwilkins for raising the issue).
+* **2018-01-11 (v1.2.0)** : Add `AddBack*` and deprecate `AndSelf` (thanks to @davidjwilkins).
+* **2017-02-12 (v1.1.0)** : Add `SetHtml` and `SetText` (thanks to @glebtv).
+* **2016-12-29 (v1.0.2)** : Optimize allocations for `Selection.Text` (thanks to @radovskyb).
+* **2016-08-28 (v1.0.1)** : Optimize performance for large documents.
+* **2016-07-27 (v1.0.0)** : Tag version 1.0.0.
+* **2016-06-15** : Invalid selector strings internally compile to a `Matcher` implementation that never matches any node (instead of a panic). So for example, `doc.Find("~")` returns an empty `*Selection` object.
+* **2016-02-02** : Add `NodeName` utility function similar to the DOM's `nodeName` property. It returns the tag name of the first element in a selection, and other relevant values of non-element nodes (see godoc for details). Add `OuterHtml` utility function similar to the DOM's `outerHTML` property (named `OuterHtml` in small caps for consistency with the existing `Html` method on the `Selection`).
+* **2015-04-20** : Add `AttrOr` helper method to return the attribute's value or a default value if absent. Thanks to [piotrkowalczuk][piotr].
+* **2015-02-04** : Add more manipulation functions - Prepend* - thanks again to [Andrew Stone][thatguystone].
+* **2014-11-28** : Add more manipulation functions - ReplaceWith*, Wrap* and Unwrap - thanks again to [Andrew Stone][thatguystone].
+* **2014-11-07** : Add manipulation functions (thanks to [Andrew Stone][thatguystone]) and `*Matcher` functions, that receive compiled cascadia selectors instead of selector strings, thus avoiding potential panics thrown by goquery via `cascadia.MustCompile` calls. This results in better performance (selectors can be compiled once and reused) and more idiomatic error handling (you can handle cascadia's compilation errors, instead of recovering from panics, which had been bugging me for a long time). Note that the actual type expected is a `Matcher` interface, that `cascadia.Selector` implements. Other matcher implementations could be used.
+* **2014-11-06** : Change import paths of net/html to golang.org/x/net/html (see https://groups.google.com/forum/#!topic/golang-nuts/eD8dh3T9yyA). Make sure to update your code to use the new import path too when you call goquery with `html.Node`s.
+* **v0.3.2** : Add `NewDocumentFromReader()` (thanks jweir) which allows creating a goquery document from an io.Reader.
+* **v0.3.1** : Add `NewDocumentFromResponse()` (thanks assassingj) which allows creating a goquery document from an http response.
+* **v0.3.0** : Add `EachWithBreak()` which allows to break out of an `Each()` loop by returning false. This function was added instead of changing the existing `Each()` to avoid breaking compatibility.
+* **v0.2.1** : Make go-getable, now that [go.net/html is Go1.0-compatible][gonet] (thanks to @matrixik for pointing this out).
+* **v0.2.0** : Add support for negative indices in Slice(). **BREAKING CHANGE** `Document.Root` is removed, `Document` is now a `Selection` itself (a selection of one, the root element, just like `Document.Root` was before). Add jQuery's Closest() method.
+* **v0.1.1** : Add benchmarks to use as baseline for refactorings, refactor Next...() and Prev...() methods to use the new html package's linked list features (Next/PrevSibling, FirstChild). Good performance boost (40+% in some cases).
+* **v0.1.0** : Initial release.
+
+## API
+
+goquery exposes two structs, `Document` and `Selection`, and the `Matcher` interface. Unlike jQuery, which is loaded as part of a DOM document, and thus acts on its containing document, goquery doesn't know which HTML document to act upon. So it needs to be told, and that's what the `Document` type is for. It holds the root document node as the initial Selection value to manipulate.
+
+jQuery often has many variants for the same function (no argument, a selector string argument, a jQuery object argument, a DOM element argument, ...). Instead of exposing the same features in goquery as a single method with variadic empty interface arguments, statically-typed signatures are used following this naming convention:
+
+* When the jQuery equivalent can be called with no argument, it has the same name as jQuery for the no argument signature (e.g.: `Prev()`), and the version with a selector string argument is called `XxxFiltered()` (e.g.: `PrevFiltered()`)
+* When the jQuery equivalent **requires** one argument, the same name as jQuery is used for the selector string version (e.g.: `Is()`)
+* The signatures accepting a jQuery object as argument are defined in goquery as `XxxSelection()` and take a `*Selection` object as argument (e.g.: `FilterSelection()`)
+* The signatures accepting a DOM element as argument in jQuery are defined in goquery as `XxxNodes()` and take a variadic argument of type `*html.Node` (e.g.: `FilterNodes()`)
+* The signatures accepting a function as argument in jQuery are defined in goquery as `XxxFunction()` and take a function as argument (e.g.: `FilterFunction()`)
+* The goquery methods that can be called with a selector string have a corresponding version that take a `Matcher` interface and are defined as `XxxMatcher()` (e.g.: `IsMatcher()`)
+
+Utility functions that are not in jQuery but are useful in Go are implemented as functions (that take a `*Selection` as parameter), to avoid a potential naming clash on the `*Selection`'s methods (reserved for jQuery-equivalent behaviour).
+
+The complete [godoc reference documentation can be found here][doc].
+
+Please note that Cascadia's selectors do not necessarily match all supported selectors of jQuery (Sizzle). See the [cascadia project][cascadia] for details. Invalid selector strings compile to a `Matcher` that fails to match any node. Behaviour of the various functions that take a selector string as argument follows from that fact, e.g. (where `~` is an invalid selector string):
+
+* `Find("~")` returns an empty selection because the selector string doesn't match anything.
+* `Add("~")` returns a new selection that holds the same nodes as the original selection, because it didn't add any node (selector string didn't match anything).
+* `ParentsFiltered("~")` returns an empty selection because the selector string doesn't match anything.
+* `ParentsUntil("~")` returns all parents of the selection because the selector string didn't match any element to stop before the top element.
+
+## Examples
+
+See some tips and tricks in the [wiki][].
+
+Adapted from example_test.go:
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+
+ "github.com/PuerkitoBio/goquery"
+)
+
+func ExampleScrape() {
+ // Request the HTML page.
+ res, err := http.Get("http://metalsucks.net")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != 200 {
+ log.Fatalf("status code error: %d %s", res.StatusCode, res.Status)
+ }
+
+ // Load the HTML document
+ doc, err := goquery.NewDocumentFromReader(res.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Find the review items
+ doc.Find(".sidebar-reviews article .content-block").Each(func(i int, s *goquery.Selection) {
+ // For each item found, get the band and title
+ band := s.Find("a").Text()
+ title := s.Find("i").Text()
+ fmt.Printf("Review %d: %s - %s\n", i, band, title)
+ })
+}
+
+func main() {
+ ExampleScrape()
+}
+```
+
+## Related Projects
+
+- [Goq][goq], an HTML deserialization and scraping library based on goquery and struct tags.
+- [andybalholm/cascadia][cascadia], the CSS selector library used by goquery.
+- [suntong/cascadia][cascadiacli], a command-line interface to the cascadia CSS selector library, useful to test selectors.
+- [asciimoo/colly](https://github.com/asciimoo/colly), a lightning fast and elegant Scraping Framework
+- [gnulnx/goperf](https://github.com/gnulnx/goperf), a website performance test tool that also fetches static assets.
+- [MontFerret/ferret](https://github.com/MontFerret/ferret), declarative web scraping.
+
+## Support
+
+There are a number of ways you can support the project:
+
+* Use it, star it, build something with it, spread the word!
+ - If you do build something open-source or otherwise publicly-visible, let me know so I can add it to the [Related Projects](#related-projects) section!
+* Raise issues to improve the project (note: doc typos and clarifications are issues too!)
+ - Please search existing issues before opening a new one - it may have already been adressed.
+* Pull requests: please discuss new code in an issue first, unless the fix is really trivial.
+ - Make sure new code is tested.
+ - Be mindful of existing code - PRs that break existing code have a high probability of being declined, unless it fixes a serious issue.
+
+If you desperately want to send money my way, I have a BuyMeACoffee.com page:
+
+
+
+## License
+
+The [BSD 3-Clause license][bsd], the same as the [Go language][golic]. Cascadia's license is [here][caslic].
+
+[jquery]: http://jquery.com/
+[go]: http://golang.org/
+[cascadia]: https://github.com/andybalholm/cascadia
+[cascadiacli]: https://github.com/suntong/cascadia
+[bsd]: http://opensource.org/licenses/BSD-3-Clause
+[golic]: http://golang.org/LICENSE
+[caslic]: https://github.com/andybalholm/cascadia/blob/master/LICENSE
+[doc]: http://godoc.org/github.com/PuerkitoBio/goquery
+[index]: http://api.jquery.com/index/
+[gonet]: https://github.com/golang/net/
+[html]: http://godoc.org/golang.org/x/net/html
+[wiki]: https://github.com/PuerkitoBio/goquery/wiki/Tips-and-tricks
+[thatguystone]: https://github.com/thatguystone
+[piotr]: https://github.com/piotrkowalczuk
+[goq]: https://github.com/andrewstuart/goq
diff --git a/vendor/github.com/PuerkitoBio/goquery/array.go b/vendor/github.com/PuerkitoBio/goquery/array.go
new file mode 100644
index 000000000..1b1f6cbe6
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/array.go
@@ -0,0 +1,124 @@
+package goquery
+
+import (
+ "golang.org/x/net/html"
+)
+
+const (
+ maxUint = ^uint(0)
+ maxInt = int(maxUint >> 1)
+
+ // ToEnd is a special index value that can be used as end index in a call
+ // to Slice so that all elements are selected until the end of the Selection.
+ // It is equivalent to passing (*Selection).Length().
+ ToEnd = maxInt
+)
+
+// First reduces the set of matched elements to the first in the set.
+// It returns a new Selection object, and an empty Selection object if the
+// the selection is empty.
+func (s *Selection) First() *Selection {
+ return s.Eq(0)
+}
+
+// Last reduces the set of matched elements to the last in the set.
+// It returns a new Selection object, and an empty Selection object if
+// the selection is empty.
+func (s *Selection) Last() *Selection {
+ return s.Eq(-1)
+}
+
+// Eq reduces the set of matched elements to the one at the specified index.
+// If a negative index is given, it counts backwards starting at the end of the
+// set. It returns a new Selection object, and an empty Selection object if the
+// index is invalid.
+func (s *Selection) Eq(index int) *Selection {
+ if index < 0 {
+ index += len(s.Nodes)
+ }
+
+ if index >= len(s.Nodes) || index < 0 {
+ return newEmptySelection(s.document)
+ }
+
+ return s.Slice(index, index+1)
+}
+
+// Slice reduces the set of matched elements to a subset specified by a range
+// of indices. The start index is 0-based and indicates the index of the first
+// element to select. The end index is 0-based and indicates the index at which
+// the elements stop being selected (the end index is not selected).
+//
+// The indices may be negative, in which case they represent an offset from the
+// end of the selection.
+//
+// The special value ToEnd may be specified as end index, in which case all elements
+// until the end are selected. This works both for a positive and negative start
+// index.
+func (s *Selection) Slice(start, end int) *Selection {
+ if start < 0 {
+ start += len(s.Nodes)
+ }
+ if end == ToEnd {
+ end = len(s.Nodes)
+ } else if end < 0 {
+ end += len(s.Nodes)
+ }
+ return pushStack(s, s.Nodes[start:end])
+}
+
+// Get retrieves the underlying node at the specified index.
+// Get without parameter is not implemented, since the node array is available
+// on the Selection object.
+func (s *Selection) Get(index int) *html.Node {
+ if index < 0 {
+ index += len(s.Nodes) // Negative index gets from the end
+ }
+ return s.Nodes[index]
+}
+
+// Index returns the position of the first element within the Selection object
+// relative to its sibling elements.
+func (s *Selection) Index() int {
+ if len(s.Nodes) > 0 {
+ return newSingleSelection(s.Nodes[0], s.document).PrevAll().Length()
+ }
+ return -1
+}
+
+// IndexSelector returns the position of the first element within the
+// Selection object relative to the elements matched by the selector, or -1 if
+// not found.
+func (s *Selection) IndexSelector(selector string) int {
+ if len(s.Nodes) > 0 {
+ sel := s.document.Find(selector)
+ return indexInSlice(sel.Nodes, s.Nodes[0])
+ }
+ return -1
+}
+
+// IndexMatcher returns the position of the first element within the
+// Selection object relative to the elements matched by the matcher, or -1 if
+// not found.
+func (s *Selection) IndexMatcher(m Matcher) int {
+ if len(s.Nodes) > 0 {
+ sel := s.document.FindMatcher(m)
+ return indexInSlice(sel.Nodes, s.Nodes[0])
+ }
+ return -1
+}
+
+// IndexOfNode returns the position of the specified node within the Selection
+// object, or -1 if not found.
+func (s *Selection) IndexOfNode(node *html.Node) int {
+ return indexInSlice(s.Nodes, node)
+}
+
+// IndexOfSelection returns the position of the first node in the specified
+// Selection object within this Selection object, or -1 if not found.
+func (s *Selection) IndexOfSelection(sel *Selection) int {
+ if sel != nil && len(sel.Nodes) > 0 {
+ return indexInSlice(s.Nodes, sel.Nodes[0])
+ }
+ return -1
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/doc.go b/vendor/github.com/PuerkitoBio/goquery/doc.go
new file mode 100644
index 000000000..71146a780
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/doc.go
@@ -0,0 +1,123 @@
+// Copyright (c) 2012-2016, Martin Angers & Contributors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation and/or
+// other materials provided with the distribution.
+// * Neither the name of the author nor the names of its contributors may be used to
+// endorse or promote products derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
+// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
+// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package goquery implements features similar to jQuery, including the chainable
+syntax, to manipulate and query an HTML document.
+
+It brings a syntax and a set of features similar to jQuery to the Go language.
+It is based on Go's net/html package and the CSS Selector library cascadia.
+Since the net/html parser returns nodes, and not a full-featured DOM
+tree, jQuery's stateful manipulation functions (like height(), css(), detach())
+have been left off.
+
+Also, because the net/html parser requires UTF-8 encoding, so does goquery: it is
+the caller's responsibility to ensure that the source document provides UTF-8 encoded HTML.
+See the repository's wiki for various options on how to do this.
+
+Syntax-wise, it is as close as possible to jQuery, with the same method names when
+possible, and that warm and fuzzy chainable interface. jQuery being the
+ultra-popular library that it is, writing a similar HTML-manipulating
+library was better to follow its API than to start anew (in the same spirit as
+Go's fmt package), even though some of its methods are less than intuitive (looking
+at you, index()...).
+
+It is hosted on GitHub, along with additional documentation in the README.md
+file: https://github.com/puerkitobio/goquery
+
+Please note that because of the net/html dependency, goquery requires Go1.1+.
+
+The various methods are split into files based on the category of behavior.
+The three dots (...) indicate that various "overloads" are available.
+
+* array.go : array-like positional manipulation of the selection.
+ - Eq()
+ - First()
+ - Get()
+ - Index...()
+ - Last()
+ - Slice()
+
+* expand.go : methods that expand or augment the selection's set.
+ - Add...()
+ - AndSelf()
+ - Union(), which is an alias for AddSelection()
+
+* filter.go : filtering methods, that reduce the selection's set.
+ - End()
+ - Filter...()
+ - Has...()
+ - Intersection(), which is an alias of FilterSelection()
+ - Not...()
+
+* iteration.go : methods to loop over the selection's nodes.
+ - Each()
+ - EachWithBreak()
+ - Map()
+
+* manipulation.go : methods for modifying the document
+ - After...()
+ - Append...()
+ - Before...()
+ - Clone()
+ - Empty()
+ - Prepend...()
+ - Remove...()
+ - ReplaceWith...()
+ - Unwrap()
+ - Wrap...()
+ - WrapAll...()
+ - WrapInner...()
+
+* property.go : methods that inspect and get the node's properties values.
+ - Attr*(), RemoveAttr(), SetAttr()
+ - AddClass(), HasClass(), RemoveClass(), ToggleClass()
+ - Html()
+ - Length()
+ - Size(), which is an alias for Length()
+ - Text()
+
+* query.go : methods that query, or reflect, a node's identity.
+ - Contains()
+ - Is...()
+
+* traversal.go : methods to traverse the HTML document tree.
+ - Children...()
+ - Contents()
+ - Find...()
+ - Next...()
+ - Parent[s]...()
+ - Prev...()
+ - Siblings...()
+
+* type.go : definition of the types exposed by goquery.
+ - Document
+ - Selection
+ - Matcher
+
+* utilities.go : definition of helper functions (and not methods on a *Selection)
+that are not part of jQuery, but are useful to goquery.
+ - NodeName
+ - OuterHtml
+*/
+package goquery
diff --git a/vendor/github.com/PuerkitoBio/goquery/expand.go b/vendor/github.com/PuerkitoBio/goquery/expand.go
new file mode 100644
index 000000000..7caade531
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/expand.go
@@ -0,0 +1,70 @@
+package goquery
+
+import "golang.org/x/net/html"
+
+// Add adds the selector string's matching nodes to those in the current
+// selection and returns a new Selection object.
+// The selector string is run in the context of the document of the current
+// Selection object.
+func (s *Selection) Add(selector string) *Selection {
+ return s.AddNodes(findWithMatcher([]*html.Node{s.document.rootNode}, compileMatcher(selector))...)
+}
+
+// AddMatcher adds the matcher's matching nodes to those in the current
+// selection and returns a new Selection object.
+// The matcher is run in the context of the document of the current
+// Selection object.
+func (s *Selection) AddMatcher(m Matcher) *Selection {
+ return s.AddNodes(findWithMatcher([]*html.Node{s.document.rootNode}, m)...)
+}
+
+// AddSelection adds the specified Selection object's nodes to those in the
+// current selection and returns a new Selection object.
+func (s *Selection) AddSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.AddNodes()
+ }
+ return s.AddNodes(sel.Nodes...)
+}
+
+// Union is an alias for AddSelection.
+func (s *Selection) Union(sel *Selection) *Selection {
+ return s.AddSelection(sel)
+}
+
+// AddNodes adds the specified nodes to those in the
+// current selection and returns a new Selection object.
+func (s *Selection) AddNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, appendWithoutDuplicates(s.Nodes, nodes, nil))
+}
+
+// AndSelf adds the previous set of elements on the stack to the current set.
+// It returns a new Selection object containing the current Selection combined
+// with the previous one.
+// Deprecated: This function has been deprecated and is now an alias for AddBack().
+func (s *Selection) AndSelf() *Selection {
+ return s.AddBack()
+}
+
+// AddBack adds the previous set of elements on the stack to the current set.
+// It returns a new Selection object containing the current Selection combined
+// with the previous one.
+func (s *Selection) AddBack() *Selection {
+ return s.AddSelection(s.prevSel)
+}
+
+// AddBackFiltered reduces the previous set of elements on the stack to those that
+// match the selector string, and adds them to the current set.
+// It returns a new Selection object containing the current Selection combined
+// with the filtered previous one
+func (s *Selection) AddBackFiltered(selector string) *Selection {
+ return s.AddSelection(s.prevSel.Filter(selector))
+}
+
+// AddBackMatcher reduces the previous set of elements on the stack to those that match
+// the mateher, and adds them to the curernt set.
+// It returns a new Selection object containing the current Selection combined
+// with the filtered previous one
+func (s *Selection) AddBackMatcher(m Matcher) *Selection {
+ return s.AddSelection(s.prevSel.FilterMatcher(m))
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/filter.go b/vendor/github.com/PuerkitoBio/goquery/filter.go
new file mode 100644
index 000000000..9138ffb33
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/filter.go
@@ -0,0 +1,163 @@
+package goquery
+
+import "golang.org/x/net/html"
+
+// Filter reduces the set of matched elements to those that match the selector string.
+// It returns a new Selection object for this subset of matching elements.
+func (s *Selection) Filter(selector string) *Selection {
+ return s.FilterMatcher(compileMatcher(selector))
+}
+
+// FilterMatcher reduces the set of matched elements to those that match
+// the given matcher. It returns a new Selection object for this subset
+// of matching elements.
+func (s *Selection) FilterMatcher(m Matcher) *Selection {
+ return pushStack(s, winnow(s, m, true))
+}
+
+// Not removes elements from the Selection that match the selector string.
+// It returns a new Selection object with the matching elements removed.
+func (s *Selection) Not(selector string) *Selection {
+ return s.NotMatcher(compileMatcher(selector))
+}
+
+// NotMatcher removes elements from the Selection that match the given matcher.
+// It returns a new Selection object with the matching elements removed.
+func (s *Selection) NotMatcher(m Matcher) *Selection {
+ return pushStack(s, winnow(s, m, false))
+}
+
+// FilterFunction reduces the set of matched elements to those that pass the function's test.
+// It returns a new Selection object for this subset of elements.
+func (s *Selection) FilterFunction(f func(int, *Selection) bool) *Selection {
+ return pushStack(s, winnowFunction(s, f, true))
+}
+
+// NotFunction removes elements from the Selection that pass the function's test.
+// It returns a new Selection object with the matching elements removed.
+func (s *Selection) NotFunction(f func(int, *Selection) bool) *Selection {
+ return pushStack(s, winnowFunction(s, f, false))
+}
+
+// FilterNodes reduces the set of matched elements to those that match the specified nodes.
+// It returns a new Selection object for this subset of elements.
+func (s *Selection) FilterNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, winnowNodes(s, nodes, true))
+}
+
+// NotNodes removes elements from the Selection that match the specified nodes.
+// It returns a new Selection object with the matching elements removed.
+func (s *Selection) NotNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, winnowNodes(s, nodes, false))
+}
+
+// FilterSelection reduces the set of matched elements to those that match a
+// node in the specified Selection object.
+// It returns a new Selection object for this subset of elements.
+func (s *Selection) FilterSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return pushStack(s, winnowNodes(s, nil, true))
+ }
+ return pushStack(s, winnowNodes(s, sel.Nodes, true))
+}
+
+// NotSelection removes elements from the Selection that match a node in the specified
+// Selection object. It returns a new Selection object with the matching elements removed.
+func (s *Selection) NotSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return pushStack(s, winnowNodes(s, nil, false))
+ }
+ return pushStack(s, winnowNodes(s, sel.Nodes, false))
+}
+
+// Intersection is an alias for FilterSelection.
+func (s *Selection) Intersection(sel *Selection) *Selection {
+ return s.FilterSelection(sel)
+}
+
+// Has reduces the set of matched elements to those that have a descendant
+// that matches the selector.
+// It returns a new Selection object with the matching elements.
+func (s *Selection) Has(selector string) *Selection {
+ return s.HasSelection(s.document.Find(selector))
+}
+
+// HasMatcher reduces the set of matched elements to those that have a descendant
+// that matches the matcher.
+// It returns a new Selection object with the matching elements.
+func (s *Selection) HasMatcher(m Matcher) *Selection {
+ return s.HasSelection(s.document.FindMatcher(m))
+}
+
+// HasNodes reduces the set of matched elements to those that have a
+// descendant that matches one of the nodes.
+// It returns a new Selection object with the matching elements.
+func (s *Selection) HasNodes(nodes ...*html.Node) *Selection {
+ return s.FilterFunction(func(_ int, sel *Selection) bool {
+ // Add all nodes that contain one of the specified nodes
+ for _, n := range nodes {
+ if sel.Contains(n) {
+ return true
+ }
+ }
+ return false
+ })
+}
+
+// HasSelection reduces the set of matched elements to those that have a
+// descendant that matches one of the nodes of the specified Selection object.
+// It returns a new Selection object with the matching elements.
+func (s *Selection) HasSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.HasNodes()
+ }
+ return s.HasNodes(sel.Nodes...)
+}
+
+// End ends the most recent filtering operation in the current chain and
+// returns the set of matched elements to its previous state.
+func (s *Selection) End() *Selection {
+ if s.prevSel != nil {
+ return s.prevSel
+ }
+ return newEmptySelection(s.document)
+}
+
+// Filter based on the matcher, and the indicator to keep (Filter) or
+// to get rid of (Not) the matching elements.
+func winnow(sel *Selection, m Matcher, keep bool) []*html.Node {
+ // Optimize if keep is requested
+ if keep {
+ return m.Filter(sel.Nodes)
+ }
+ // Use grep
+ return grep(sel, func(i int, s *Selection) bool {
+ return !m.Match(s.Get(0))
+ })
+}
+
+// Filter based on an array of nodes, and the indicator to keep (Filter) or
+// to get rid of (Not) the matching elements.
+func winnowNodes(sel *Selection, nodes []*html.Node, keep bool) []*html.Node {
+ if len(nodes)+len(sel.Nodes) < minNodesForSet {
+ return grep(sel, func(i int, s *Selection) bool {
+ return isInSlice(nodes, s.Get(0)) == keep
+ })
+ }
+
+ set := make(map[*html.Node]bool)
+ for _, n := range nodes {
+ set[n] = true
+ }
+ return grep(sel, func(i int, s *Selection) bool {
+ return set[s.Get(0)] == keep
+ })
+}
+
+// Filter based on a function test, and the indicator to keep (Filter) or
+// to get rid of (Not) the matching elements.
+func winnowFunction(sel *Selection, f func(int, *Selection) bool, keep bool) []*html.Node {
+ return grep(sel, func(i int, s *Selection) bool {
+ return f(i, s) == keep
+ })
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/go.mod b/vendor/github.com/PuerkitoBio/goquery/go.mod
new file mode 100644
index 000000000..2fa1332a5
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/go.mod
@@ -0,0 +1,6 @@
+module github.com/PuerkitoBio/goquery
+
+require (
+ github.com/andybalholm/cascadia v1.0.0
+ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a
+)
diff --git a/vendor/github.com/PuerkitoBio/goquery/go.sum b/vendor/github.com/PuerkitoBio/goquery/go.sum
new file mode 100644
index 000000000..11c575754
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/go.sum
@@ -0,0 +1,5 @@
+github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
+github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
diff --git a/vendor/github.com/PuerkitoBio/goquery/iteration.go b/vendor/github.com/PuerkitoBio/goquery/iteration.go
new file mode 100644
index 000000000..e246f2e0e
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/iteration.go
@@ -0,0 +1,39 @@
+package goquery
+
+// Each iterates over a Selection object, executing a function for each
+// matched element. It returns the current Selection object. The function
+// f is called for each element in the selection with the index of the
+// element in that selection starting at 0, and a *Selection that contains
+// only that element.
+func (s *Selection) Each(f func(int, *Selection)) *Selection {
+ for i, n := range s.Nodes {
+ f(i, newSingleSelection(n, s.document))
+ }
+ return s
+}
+
+// EachWithBreak iterates over a Selection object, executing a function for each
+// matched element. It is identical to Each except that it is possible to break
+// out of the loop by returning false in the callback function. It returns the
+// current Selection object.
+func (s *Selection) EachWithBreak(f func(int, *Selection) bool) *Selection {
+ for i, n := range s.Nodes {
+ if !f(i, newSingleSelection(n, s.document)) {
+ return s
+ }
+ }
+ return s
+}
+
+// Map passes each element in the current matched set through a function,
+// producing a slice of string holding the returned values. The function
+// f is called for each element in the selection with the index of the
+// element in that selection starting at 0, and a *Selection that contains
+// only that element.
+func (s *Selection) Map(f func(int, *Selection) string) (result []string) {
+ for i, n := range s.Nodes {
+ result = append(result, f(i, newSingleSelection(n, s.document)))
+ }
+
+ return result
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/manipulation.go b/vendor/github.com/PuerkitoBio/goquery/manipulation.go
new file mode 100644
index 000000000..34eb7570f
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/manipulation.go
@@ -0,0 +1,574 @@
+package goquery
+
+import (
+ "strings"
+
+ "golang.org/x/net/html"
+)
+
+// After applies the selector from the root document and inserts the matched elements
+// after the elements in the set of matched elements.
+//
+// If one of the matched elements in the selection is not currently in the
+// document, it's impossible to insert nodes after it, so it will be ignored.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) After(selector string) *Selection {
+ return s.AfterMatcher(compileMatcher(selector))
+}
+
+// AfterMatcher applies the matcher from the root document and inserts the matched elements
+// after the elements in the set of matched elements.
+//
+// If one of the matched elements in the selection is not currently in the
+// document, it's impossible to insert nodes after it, so it will be ignored.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AfterMatcher(m Matcher) *Selection {
+ return s.AfterNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// AfterSelection inserts the elements in the selection after each element in the set of matched
+// elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AfterSelection(sel *Selection) *Selection {
+ return s.AfterNodes(sel.Nodes...)
+}
+
+// AfterHtml parses the html and inserts it after the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AfterHtml(html string) *Selection {
+ return s.AfterNodes(parseHtml(html)...)
+}
+
+// AfterNodes inserts the nodes after each element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AfterNodes(ns ...*html.Node) *Selection {
+ return s.manipulateNodes(ns, true, func(sn *html.Node, n *html.Node) {
+ if sn.Parent != nil {
+ sn.Parent.InsertBefore(n, sn.NextSibling)
+ }
+ })
+}
+
+// Append appends the elements specified by the selector to the end of each element
+// in the set of matched elements, following those rules:
+//
+// 1) The selector is applied to the root document.
+//
+// 2) Elements that are part of the document will be moved to the new location.
+//
+// 3) If there are multiple locations to append to, cloned nodes will be
+// appended to all target locations except the last one, which will be moved
+// as noted in (2).
+func (s *Selection) Append(selector string) *Selection {
+ return s.AppendMatcher(compileMatcher(selector))
+}
+
+// AppendMatcher appends the elements specified by the matcher to the end of each element
+// in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AppendMatcher(m Matcher) *Selection {
+ return s.AppendNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// AppendSelection appends the elements in the selection to the end of each element
+// in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AppendSelection(sel *Selection) *Selection {
+ return s.AppendNodes(sel.Nodes...)
+}
+
+// AppendHtml parses the html and appends it to the set of matched elements.
+func (s *Selection) AppendHtml(html string) *Selection {
+ return s.AppendNodes(parseHtml(html)...)
+}
+
+// AppendNodes appends the specified nodes to each node in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) AppendNodes(ns ...*html.Node) *Selection {
+ return s.manipulateNodes(ns, false, func(sn *html.Node, n *html.Node) {
+ sn.AppendChild(n)
+ })
+}
+
+// Before inserts the matched elements before each element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) Before(selector string) *Selection {
+ return s.BeforeMatcher(compileMatcher(selector))
+}
+
+// BeforeMatcher inserts the matched elements before each element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) BeforeMatcher(m Matcher) *Selection {
+ return s.BeforeNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// BeforeSelection inserts the elements in the selection before each element in the set of matched
+// elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) BeforeSelection(sel *Selection) *Selection {
+ return s.BeforeNodes(sel.Nodes...)
+}
+
+// BeforeHtml parses the html and inserts it before the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) BeforeHtml(html string) *Selection {
+ return s.BeforeNodes(parseHtml(html)...)
+}
+
+// BeforeNodes inserts the nodes before each element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) BeforeNodes(ns ...*html.Node) *Selection {
+ return s.manipulateNodes(ns, false, func(sn *html.Node, n *html.Node) {
+ if sn.Parent != nil {
+ sn.Parent.InsertBefore(n, sn)
+ }
+ })
+}
+
+// Clone creates a deep copy of the set of matched nodes. The new nodes will not be
+// attached to the document.
+func (s *Selection) Clone() *Selection {
+ ns := newEmptySelection(s.document)
+ ns.Nodes = cloneNodes(s.Nodes)
+ return ns
+}
+
+// Empty removes all children nodes from the set of matched elements.
+// It returns the children nodes in a new Selection.
+func (s *Selection) Empty() *Selection {
+ var nodes []*html.Node
+
+ for _, n := range s.Nodes {
+ for c := n.FirstChild; c != nil; c = n.FirstChild {
+ n.RemoveChild(c)
+ nodes = append(nodes, c)
+ }
+ }
+
+ return pushStack(s, nodes)
+}
+
+// Prepend prepends the elements specified by the selector to each element in
+// the set of matched elements, following the same rules as Append.
+func (s *Selection) Prepend(selector string) *Selection {
+ return s.PrependMatcher(compileMatcher(selector))
+}
+
+// PrependMatcher prepends the elements specified by the matcher to each
+// element in the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) PrependMatcher(m Matcher) *Selection {
+ return s.PrependNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// PrependSelection prepends the elements in the selection to each element in
+// the set of matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) PrependSelection(sel *Selection) *Selection {
+ return s.PrependNodes(sel.Nodes...)
+}
+
+// PrependHtml parses the html and prepends it to the set of matched elements.
+func (s *Selection) PrependHtml(html string) *Selection {
+ return s.PrependNodes(parseHtml(html)...)
+}
+
+// PrependNodes prepends the specified nodes to each node in the set of
+// matched elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) PrependNodes(ns ...*html.Node) *Selection {
+ return s.manipulateNodes(ns, true, func(sn *html.Node, n *html.Node) {
+ // sn.FirstChild may be nil, in which case this functions like
+ // sn.AppendChild()
+ sn.InsertBefore(n, sn.FirstChild)
+ })
+}
+
+// Remove removes the set of matched elements from the document.
+// It returns the same selection, now consisting of nodes not in the document.
+func (s *Selection) Remove() *Selection {
+ for _, n := range s.Nodes {
+ if n.Parent != nil {
+ n.Parent.RemoveChild(n)
+ }
+ }
+
+ return s
+}
+
+// RemoveFiltered removes the set of matched elements by selector.
+// It returns the Selection of removed nodes.
+func (s *Selection) RemoveFiltered(selector string) *Selection {
+ return s.RemoveMatcher(compileMatcher(selector))
+}
+
+// RemoveMatcher removes the set of matched elements.
+// It returns the Selection of removed nodes.
+func (s *Selection) RemoveMatcher(m Matcher) *Selection {
+ return s.FilterMatcher(m).Remove()
+}
+
+// ReplaceWith replaces each element in the set of matched elements with the
+// nodes matched by the given selector.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWith(selector string) *Selection {
+ return s.ReplaceWithMatcher(compileMatcher(selector))
+}
+
+// ReplaceWithMatcher replaces each element in the set of matched elements with
+// the nodes matched by the given Matcher.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWithMatcher(m Matcher) *Selection {
+ return s.ReplaceWithNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// ReplaceWithSelection replaces each element in the set of matched elements with
+// the nodes from the given Selection.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWithSelection(sel *Selection) *Selection {
+ return s.ReplaceWithNodes(sel.Nodes...)
+}
+
+// ReplaceWithHtml replaces each element in the set of matched elements with
+// the parsed HTML.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWithHtml(html string) *Selection {
+ return s.ReplaceWithNodes(parseHtml(html)...)
+}
+
+// ReplaceWithNodes replaces each element in the set of matched elements with
+// the given nodes.
+// It returns the removed elements.
+//
+// This follows the same rules as Selection.Append.
+func (s *Selection) ReplaceWithNodes(ns ...*html.Node) *Selection {
+ s.AfterNodes(ns...)
+ return s.Remove()
+}
+
+// SetHtml sets the html content of each element in the selection to
+// specified html string.
+func (s *Selection) SetHtml(html string) *Selection {
+ return setHtmlNodes(s, parseHtml(html)...)
+}
+
+// SetText sets the content of each element in the selection to specified content.
+// The provided text string is escaped.
+func (s *Selection) SetText(text string) *Selection {
+ return s.SetHtml(html.EscapeString(text))
+}
+
+// Unwrap removes the parents of the set of matched elements, leaving the matched
+// elements (and their siblings, if any) in their place.
+// It returns the original selection.
+func (s *Selection) Unwrap() *Selection {
+ s.Parent().Each(func(i int, ss *Selection) {
+ // For some reason, jquery allows unwrap to remove the element, so
+ // allowing it here too. Same for . Why it allows those elements to
+ // be unwrapped while not allowing body is a mystery to me.
+ if ss.Nodes[0].Data != "body" {
+ ss.ReplaceWithSelection(ss.Contents())
+ }
+ })
+
+ return s
+}
+
+// Wrap wraps each element in the set of matched elements inside the first
+// element matched by the given selector. The matched child is cloned before
+// being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) Wrap(selector string) *Selection {
+ return s.WrapMatcher(compileMatcher(selector))
+}
+
+// WrapMatcher wraps each element in the set of matched elements inside the
+// first element matched by the given matcher. The matched child is cloned
+// before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapMatcher(m Matcher) *Selection {
+ return s.wrapNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// WrapSelection wraps each element in the set of matched elements inside the
+// first element in the given Selection. The element is cloned before being
+// inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapSelection(sel *Selection) *Selection {
+ return s.wrapNodes(sel.Nodes...)
+}
+
+// WrapHtml wraps each element in the set of matched elements inside the inner-
+// most child of the given HTML.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapHtml(html string) *Selection {
+ return s.wrapNodes(parseHtml(html)...)
+}
+
+// WrapNode wraps each element in the set of matched elements inside the inner-
+// most child of the given node. The given node is copied before being inserted
+// into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapNode(n *html.Node) *Selection {
+ return s.wrapNodes(n)
+}
+
+func (s *Selection) wrapNodes(ns ...*html.Node) *Selection {
+ s.Each(func(i int, ss *Selection) {
+ ss.wrapAllNodes(ns...)
+ })
+
+ return s
+}
+
+// WrapAll wraps a single HTML structure, matched by the given selector, around
+// all elements in the set of matched elements. The matched child is cloned
+// before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAll(selector string) *Selection {
+ return s.WrapAllMatcher(compileMatcher(selector))
+}
+
+// WrapAllMatcher wraps a single HTML structure, matched by the given Matcher,
+// around all elements in the set of matched elements. The matched child is
+// cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAllMatcher(m Matcher) *Selection {
+ return s.wrapAllNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// WrapAllSelection wraps a single HTML structure, the first node of the given
+// Selection, around all elements in the set of matched elements. The matched
+// child is cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAllSelection(sel *Selection) *Selection {
+ return s.wrapAllNodes(sel.Nodes...)
+}
+
+// WrapAllHtml wraps the given HTML structure around all elements in the set of
+// matched elements. The matched child is cloned before being inserted into the
+// document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAllHtml(html string) *Selection {
+ return s.wrapAllNodes(parseHtml(html)...)
+}
+
+func (s *Selection) wrapAllNodes(ns ...*html.Node) *Selection {
+ if len(ns) > 0 {
+ return s.WrapAllNode(ns[0])
+ }
+ return s
+}
+
+// WrapAllNode wraps the given node around the first element in the Selection,
+// making all other nodes in the Selection children of the given node. The node
+// is cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapAllNode(n *html.Node) *Selection {
+ if s.Size() == 0 {
+ return s
+ }
+
+ wrap := cloneNode(n)
+
+ first := s.Nodes[0]
+ if first.Parent != nil {
+ first.Parent.InsertBefore(wrap, first)
+ first.Parent.RemoveChild(first)
+ }
+
+ for c := getFirstChildEl(wrap); c != nil; c = getFirstChildEl(wrap) {
+ wrap = c
+ }
+
+ newSingleSelection(wrap, s.document).AppendSelection(s)
+
+ return s
+}
+
+// WrapInner wraps an HTML structure, matched by the given selector, around the
+// content of element in the set of matched elements. The matched child is
+// cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInner(selector string) *Selection {
+ return s.WrapInnerMatcher(compileMatcher(selector))
+}
+
+// WrapInnerMatcher wraps an HTML structure, matched by the given selector,
+// around the content of element in the set of matched elements. The matched
+// child is cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInnerMatcher(m Matcher) *Selection {
+ return s.wrapInnerNodes(m.MatchAll(s.document.rootNode)...)
+}
+
+// WrapInnerSelection wraps an HTML structure, matched by the given selector,
+// around the content of element in the set of matched elements. The matched
+// child is cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInnerSelection(sel *Selection) *Selection {
+ return s.wrapInnerNodes(sel.Nodes...)
+}
+
+// WrapInnerHtml wraps an HTML structure, matched by the given selector, around
+// the content of element in the set of matched elements. The matched child is
+// cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInnerHtml(html string) *Selection {
+ return s.wrapInnerNodes(parseHtml(html)...)
+}
+
+// WrapInnerNode wraps an HTML structure, matched by the given selector, around
+// the content of element in the set of matched elements. The matched child is
+// cloned before being inserted into the document.
+//
+// It returns the original set of elements.
+func (s *Selection) WrapInnerNode(n *html.Node) *Selection {
+ return s.wrapInnerNodes(n)
+}
+
+func (s *Selection) wrapInnerNodes(ns ...*html.Node) *Selection {
+ if len(ns) == 0 {
+ return s
+ }
+
+ s.Each(func(i int, s *Selection) {
+ contents := s.Contents()
+
+ if contents.Size() > 0 {
+ contents.wrapAllNodes(ns...)
+ } else {
+ s.AppendNodes(cloneNode(ns[0]))
+ }
+ })
+
+ return s
+}
+
+func parseHtml(h string) []*html.Node {
+ // Errors are only returned when the io.Reader returns any error besides
+ // EOF, but strings.Reader never will
+ nodes, err := html.ParseFragment(strings.NewReader(h), &html.Node{Type: html.ElementNode})
+ if err != nil {
+ panic("goquery: failed to parse HTML: " + err.Error())
+ }
+ return nodes
+}
+
+func setHtmlNodes(s *Selection, ns ...*html.Node) *Selection {
+ for _, n := range s.Nodes {
+ for c := n.FirstChild; c != nil; c = n.FirstChild {
+ n.RemoveChild(c)
+ }
+ for _, c := range ns {
+ n.AppendChild(cloneNode(c))
+ }
+ }
+ return s
+}
+
+// Get the first child that is an ElementNode
+func getFirstChildEl(n *html.Node) *html.Node {
+ c := n.FirstChild
+ for c != nil && c.Type != html.ElementNode {
+ c = c.NextSibling
+ }
+ return c
+}
+
+// Deep copy a slice of nodes.
+func cloneNodes(ns []*html.Node) []*html.Node {
+ cns := make([]*html.Node, 0, len(ns))
+
+ for _, n := range ns {
+ cns = append(cns, cloneNode(n))
+ }
+
+ return cns
+}
+
+// Deep copy a node. The new node has clones of all the original node's
+// children but none of its parents or siblings.
+func cloneNode(n *html.Node) *html.Node {
+ nn := &html.Node{
+ Type: n.Type,
+ DataAtom: n.DataAtom,
+ Data: n.Data,
+ Attr: make([]html.Attribute, len(n.Attr)),
+ }
+
+ copy(nn.Attr, n.Attr)
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ nn.AppendChild(cloneNode(c))
+ }
+
+ return nn
+}
+
+func (s *Selection) manipulateNodes(ns []*html.Node, reverse bool,
+ f func(sn *html.Node, n *html.Node)) *Selection {
+
+ lasti := s.Size() - 1
+
+ // net.Html doesn't provide document fragments for insertion, so to get
+ // things in the correct order with After() and Prepend(), the callback
+ // needs to be called on the reverse of the nodes.
+ if reverse {
+ for i, j := 0, len(ns)-1; i < j; i, j = i+1, j-1 {
+ ns[i], ns[j] = ns[j], ns[i]
+ }
+ }
+
+ for i, sn := range s.Nodes {
+ for _, n := range ns {
+ if i != lasti {
+ f(sn, cloneNode(n))
+ } else {
+ if n.Parent != nil {
+ n.Parent.RemoveChild(n)
+ }
+ f(sn, n)
+ }
+ }
+ }
+
+ return s
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/property.go b/vendor/github.com/PuerkitoBio/goquery/property.go
new file mode 100644
index 000000000..411126db2
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/property.go
@@ -0,0 +1,275 @@
+package goquery
+
+import (
+ "bytes"
+ "regexp"
+ "strings"
+
+ "golang.org/x/net/html"
+)
+
+var rxClassTrim = regexp.MustCompile("[\t\r\n]")
+
+// Attr gets the specified attribute's value for the first element in the
+// Selection. To get the value for each element individually, use a looping
+// construct such as Each or Map method.
+func (s *Selection) Attr(attrName string) (val string, exists bool) {
+ if len(s.Nodes) == 0 {
+ return
+ }
+ return getAttributeValue(attrName, s.Nodes[0])
+}
+
+// AttrOr works like Attr but returns default value if attribute is not present.
+func (s *Selection) AttrOr(attrName, defaultValue string) string {
+ if len(s.Nodes) == 0 {
+ return defaultValue
+ }
+
+ val, exists := getAttributeValue(attrName, s.Nodes[0])
+ if !exists {
+ return defaultValue
+ }
+
+ return val
+}
+
+// RemoveAttr removes the named attribute from each element in the set of matched elements.
+func (s *Selection) RemoveAttr(attrName string) *Selection {
+ for _, n := range s.Nodes {
+ removeAttr(n, attrName)
+ }
+
+ return s
+}
+
+// SetAttr sets the given attribute on each element in the set of matched elements.
+func (s *Selection) SetAttr(attrName, val string) *Selection {
+ for _, n := range s.Nodes {
+ attr := getAttributePtr(attrName, n)
+ if attr == nil {
+ n.Attr = append(n.Attr, html.Attribute{Key: attrName, Val: val})
+ } else {
+ attr.Val = val
+ }
+ }
+
+ return s
+}
+
+// Text gets the combined text contents of each element in the set of matched
+// elements, including their descendants.
+func (s *Selection) Text() string {
+ var buf bytes.Buffer
+
+ // Slightly optimized vs calling Each: no single selection object created
+ var f func(*html.Node)
+ f = func(n *html.Node) {
+ if n.Type == html.TextNode {
+ // Keep newlines and spaces, like jQuery
+ buf.WriteString(n.Data)
+ }
+ if n.FirstChild != nil {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ f(c)
+ }
+ }
+ }
+ for _, n := range s.Nodes {
+ f(n)
+ }
+
+ return buf.String()
+}
+
+// Size is an alias for Length.
+func (s *Selection) Size() int {
+ return s.Length()
+}
+
+// Length returns the number of elements in the Selection object.
+func (s *Selection) Length() int {
+ return len(s.Nodes)
+}
+
+// Html gets the HTML contents of the first element in the set of matched
+// elements. It includes text and comment nodes.
+func (s *Selection) Html() (ret string, e error) {
+ // Since there is no .innerHtml, the HTML content must be re-created from
+ // the nodes using html.Render.
+ var buf bytes.Buffer
+
+ if len(s.Nodes) > 0 {
+ for c := s.Nodes[0].FirstChild; c != nil; c = c.NextSibling {
+ e = html.Render(&buf, c)
+ if e != nil {
+ return
+ }
+ }
+ ret = buf.String()
+ }
+
+ return
+}
+
+// AddClass adds the given class(es) to each element in the set of matched elements.
+// Multiple class names can be specified, separated by a space or via multiple arguments.
+func (s *Selection) AddClass(class ...string) *Selection {
+ classStr := strings.TrimSpace(strings.Join(class, " "))
+
+ if classStr == "" {
+ return s
+ }
+
+ tcls := getClassesSlice(classStr)
+ for _, n := range s.Nodes {
+ curClasses, attr := getClassesAndAttr(n, true)
+ for _, newClass := range tcls {
+ if !strings.Contains(curClasses, " "+newClass+" ") {
+ curClasses += newClass + " "
+ }
+ }
+
+ setClasses(n, attr, curClasses)
+ }
+
+ return s
+}
+
+// HasClass determines whether any of the matched elements are assigned the
+// given class.
+func (s *Selection) HasClass(class string) bool {
+ class = " " + class + " "
+ for _, n := range s.Nodes {
+ classes, _ := getClassesAndAttr(n, false)
+ if strings.Contains(classes, class) {
+ return true
+ }
+ }
+ return false
+}
+
+// RemoveClass removes the given class(es) from each element in the set of matched elements.
+// Multiple class names can be specified, separated by a space or via multiple arguments.
+// If no class name is provided, all classes are removed.
+func (s *Selection) RemoveClass(class ...string) *Selection {
+ var rclasses []string
+
+ classStr := strings.TrimSpace(strings.Join(class, " "))
+ remove := classStr == ""
+
+ if !remove {
+ rclasses = getClassesSlice(classStr)
+ }
+
+ for _, n := range s.Nodes {
+ if remove {
+ removeAttr(n, "class")
+ } else {
+ classes, attr := getClassesAndAttr(n, true)
+ for _, rcl := range rclasses {
+ classes = strings.Replace(classes, " "+rcl+" ", " ", -1)
+ }
+
+ setClasses(n, attr, classes)
+ }
+ }
+
+ return s
+}
+
+// ToggleClass adds or removes the given class(es) for each element in the set of matched elements.
+// Multiple class names can be specified, separated by a space or via multiple arguments.
+func (s *Selection) ToggleClass(class ...string) *Selection {
+ classStr := strings.TrimSpace(strings.Join(class, " "))
+
+ if classStr == "" {
+ return s
+ }
+
+ tcls := getClassesSlice(classStr)
+
+ for _, n := range s.Nodes {
+ classes, attr := getClassesAndAttr(n, true)
+ for _, tcl := range tcls {
+ if strings.Contains(classes, " "+tcl+" ") {
+ classes = strings.Replace(classes, " "+tcl+" ", " ", -1)
+ } else {
+ classes += tcl + " "
+ }
+ }
+
+ setClasses(n, attr, classes)
+ }
+
+ return s
+}
+
+func getAttributePtr(attrName string, n *html.Node) *html.Attribute {
+ if n == nil {
+ return nil
+ }
+
+ for i, a := range n.Attr {
+ if a.Key == attrName {
+ return &n.Attr[i]
+ }
+ }
+ return nil
+}
+
+// Private function to get the specified attribute's value from a node.
+func getAttributeValue(attrName string, n *html.Node) (val string, exists bool) {
+ if a := getAttributePtr(attrName, n); a != nil {
+ val = a.Val
+ exists = true
+ }
+ return
+}
+
+// Get and normalize the "class" attribute from the node.
+func getClassesAndAttr(n *html.Node, create bool) (classes string, attr *html.Attribute) {
+ // Applies only to element nodes
+ if n.Type == html.ElementNode {
+ attr = getAttributePtr("class", n)
+ if attr == nil && create {
+ n.Attr = append(n.Attr, html.Attribute{
+ Key: "class",
+ Val: "",
+ })
+ attr = &n.Attr[len(n.Attr)-1]
+ }
+ }
+
+ if attr == nil {
+ classes = " "
+ } else {
+ classes = rxClassTrim.ReplaceAllString(" "+attr.Val+" ", " ")
+ }
+
+ return
+}
+
+func getClassesSlice(classes string) []string {
+ return strings.Split(rxClassTrim.ReplaceAllString(" "+classes+" ", " "), " ")
+}
+
+func removeAttr(n *html.Node, attrName string) {
+ for i, a := range n.Attr {
+ if a.Key == attrName {
+ n.Attr[i], n.Attr[len(n.Attr)-1], n.Attr =
+ n.Attr[len(n.Attr)-1], html.Attribute{}, n.Attr[:len(n.Attr)-1]
+ return
+ }
+ }
+}
+
+func setClasses(n *html.Node, attr *html.Attribute, classes string) {
+ classes = strings.TrimSpace(classes)
+ if classes == "" {
+ removeAttr(n, "class")
+ return
+ }
+
+ attr.Val = classes
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/query.go b/vendor/github.com/PuerkitoBio/goquery/query.go
new file mode 100644
index 000000000..fe86bf0bf
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/query.go
@@ -0,0 +1,49 @@
+package goquery
+
+import "golang.org/x/net/html"
+
+// Is checks the current matched set of elements against a selector and
+// returns true if at least one of these elements matches.
+func (s *Selection) Is(selector string) bool {
+ return s.IsMatcher(compileMatcher(selector))
+}
+
+// IsMatcher checks the current matched set of elements against a matcher and
+// returns true if at least one of these elements matches.
+func (s *Selection) IsMatcher(m Matcher) bool {
+ if len(s.Nodes) > 0 {
+ if len(s.Nodes) == 1 {
+ return m.Match(s.Nodes[0])
+ }
+ return len(m.Filter(s.Nodes)) > 0
+ }
+
+ return false
+}
+
+// IsFunction checks the current matched set of elements against a predicate and
+// returns true if at least one of these elements matches.
+func (s *Selection) IsFunction(f func(int, *Selection) bool) bool {
+ return s.FilterFunction(f).Length() > 0
+}
+
+// IsSelection checks the current matched set of elements against a Selection object
+// and returns true if at least one of these elements matches.
+func (s *Selection) IsSelection(sel *Selection) bool {
+ return s.FilterSelection(sel).Length() > 0
+}
+
+// IsNodes checks the current matched set of elements against the specified nodes
+// and returns true if at least one of these elements matches.
+func (s *Selection) IsNodes(nodes ...*html.Node) bool {
+ return s.FilterNodes(nodes...).Length() > 0
+}
+
+// Contains returns true if the specified Node is within,
+// at any depth, one of the nodes in the Selection object.
+// It is NOT inclusive, to behave like jQuery's implementation, and
+// unlike Javascript's .contains, so if the contained
+// node is itself in the selection, it returns false.
+func (s *Selection) Contains(n *html.Node) bool {
+ return sliceContains(s.Nodes, n)
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/traversal.go b/vendor/github.com/PuerkitoBio/goquery/traversal.go
new file mode 100644
index 000000000..5fa5315ac
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/traversal.go
@@ -0,0 +1,698 @@
+package goquery
+
+import "golang.org/x/net/html"
+
+type siblingType int
+
+// Sibling type, used internally when iterating over children at the same
+// level (siblings) to specify which nodes are requested.
+const (
+ siblingPrevUntil siblingType = iota - 3
+ siblingPrevAll
+ siblingPrev
+ siblingAll
+ siblingNext
+ siblingNextAll
+ siblingNextUntil
+ siblingAllIncludingNonElements
+)
+
+// Find gets the descendants of each element in the current set of matched
+// elements, filtered by a selector. It returns a new Selection object
+// containing these matched elements.
+func (s *Selection) Find(selector string) *Selection {
+ return pushStack(s, findWithMatcher(s.Nodes, compileMatcher(selector)))
+}
+
+// FindMatcher gets the descendants of each element in the current set of matched
+// elements, filtered by the matcher. It returns a new Selection object
+// containing these matched elements.
+func (s *Selection) FindMatcher(m Matcher) *Selection {
+ return pushStack(s, findWithMatcher(s.Nodes, m))
+}
+
+// FindSelection gets the descendants of each element in the current
+// Selection, filtered by a Selection. It returns a new Selection object
+// containing these matched elements.
+func (s *Selection) FindSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return pushStack(s, nil)
+ }
+ return s.FindNodes(sel.Nodes...)
+}
+
+// FindNodes gets the descendants of each element in the current
+// Selection, filtered by some nodes. It returns a new Selection object
+// containing these matched elements.
+func (s *Selection) FindNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
+ if sliceContains(s.Nodes, n) {
+ return []*html.Node{n}
+ }
+ return nil
+ }))
+}
+
+// Contents gets the children of each element in the Selection,
+// including text and comment nodes. It returns a new Selection object
+// containing these elements.
+func (s *Selection) Contents() *Selection {
+ return pushStack(s, getChildrenNodes(s.Nodes, siblingAllIncludingNonElements))
+}
+
+// ContentsFiltered gets the children of each element in the Selection,
+// filtered by the specified selector. It returns a new Selection
+// object containing these elements. Since selectors only act on Element nodes,
+// this function is an alias to ChildrenFiltered unless the selector is empty,
+// in which case it is an alias to Contents.
+func (s *Selection) ContentsFiltered(selector string) *Selection {
+ if selector != "" {
+ return s.ChildrenFiltered(selector)
+ }
+ return s.Contents()
+}
+
+// ContentsMatcher gets the children of each element in the Selection,
+// filtered by the specified matcher. It returns a new Selection
+// object containing these elements. Since matchers only act on Element nodes,
+// this function is an alias to ChildrenMatcher.
+func (s *Selection) ContentsMatcher(m Matcher) *Selection {
+ return s.ChildrenMatcher(m)
+}
+
+// Children gets the child elements of each element in the Selection.
+// It returns a new Selection object containing these elements.
+func (s *Selection) Children() *Selection {
+ return pushStack(s, getChildrenNodes(s.Nodes, siblingAll))
+}
+
+// ChildrenFiltered gets the child elements of each element in the Selection,
+// filtered by the specified selector. It returns a new
+// Selection object containing these elements.
+func (s *Selection) ChildrenFiltered(selector string) *Selection {
+ return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), compileMatcher(selector))
+}
+
+// ChildrenMatcher gets the child elements of each element in the Selection,
+// filtered by the specified matcher. It returns a new
+// Selection object containing these elements.
+func (s *Selection) ChildrenMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), m)
+}
+
+// Parent gets the parent of each element in the Selection. It returns a
+// new Selection object containing the matched elements.
+func (s *Selection) Parent() *Selection {
+ return pushStack(s, getParentNodes(s.Nodes))
+}
+
+// ParentFiltered gets the parent of each element in the Selection filtered by a
+// selector. It returns a new Selection object containing the matched elements.
+func (s *Selection) ParentFiltered(selector string) *Selection {
+ return filterAndPush(s, getParentNodes(s.Nodes), compileMatcher(selector))
+}
+
+// ParentMatcher gets the parent of each element in the Selection filtered by a
+// matcher. It returns a new Selection object containing the matched elements.
+func (s *Selection) ParentMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getParentNodes(s.Nodes), m)
+}
+
+// Closest gets the first element that matches the selector by testing the
+// element itself and traversing up through its ancestors in the DOM tree.
+func (s *Selection) Closest(selector string) *Selection {
+ cs := compileMatcher(selector)
+ return s.ClosestMatcher(cs)
+}
+
+// ClosestMatcher gets the first element that matches the matcher by testing the
+// element itself and traversing up through its ancestors in the DOM tree.
+func (s *Selection) ClosestMatcher(m Matcher) *Selection {
+ return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
+ // For each node in the selection, test the node itself, then each parent
+ // until a match is found.
+ for ; n != nil; n = n.Parent {
+ if m.Match(n) {
+ return []*html.Node{n}
+ }
+ }
+ return nil
+ }))
+}
+
+// ClosestNodes gets the first element that matches one of the nodes by testing the
+// element itself and traversing up through its ancestors in the DOM tree.
+func (s *Selection) ClosestNodes(nodes ...*html.Node) *Selection {
+ set := make(map[*html.Node]bool)
+ for _, n := range nodes {
+ set[n] = true
+ }
+ return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
+ // For each node in the selection, test the node itself, then each parent
+ // until a match is found.
+ for ; n != nil; n = n.Parent {
+ if set[n] {
+ return []*html.Node{n}
+ }
+ }
+ return nil
+ }))
+}
+
+// ClosestSelection gets the first element that matches one of the nodes in the
+// Selection by testing the element itself and traversing up through its ancestors
+// in the DOM tree.
+func (s *Selection) ClosestSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return pushStack(s, nil)
+ }
+ return s.ClosestNodes(sel.Nodes...)
+}
+
+// Parents gets the ancestors of each element in the current Selection. It
+// returns a new Selection object with the matched elements.
+func (s *Selection) Parents() *Selection {
+ return pushStack(s, getParentsNodes(s.Nodes, nil, nil))
+}
+
+// ParentsFiltered gets the ancestors of each element in the current
+// Selection. It returns a new Selection object with the matched elements.
+func (s *Selection) ParentsFiltered(selector string) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), compileMatcher(selector))
+}
+
+// ParentsMatcher gets the ancestors of each element in the current
+// Selection. It returns a new Selection object with the matched elements.
+func (s *Selection) ParentsMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), m)
+}
+
+// ParentsUntil gets the ancestors of each element in the Selection, up to but
+// not including the element matched by the selector. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) ParentsUntil(selector string) *Selection {
+ return pushStack(s, getParentsNodes(s.Nodes, compileMatcher(selector), nil))
+}
+
+// ParentsUntilMatcher gets the ancestors of each element in the Selection, up to but
+// not including the element matched by the matcher. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) ParentsUntilMatcher(m Matcher) *Selection {
+ return pushStack(s, getParentsNodes(s.Nodes, m, nil))
+}
+
+// ParentsUntilSelection gets the ancestors of each element in the Selection,
+// up to but not including the elements in the specified Selection. It returns a
+// new Selection object containing the matched elements.
+func (s *Selection) ParentsUntilSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.Parents()
+ }
+ return s.ParentsUntilNodes(sel.Nodes...)
+}
+
+// ParentsUntilNodes gets the ancestors of each element in the Selection,
+// up to but not including the specified nodes. It returns a
+// new Selection object containing the matched elements.
+func (s *Selection) ParentsUntilNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, getParentsNodes(s.Nodes, nil, nodes))
+}
+
+// ParentsFilteredUntil is like ParentsUntil, with the option to filter the
+// results based on a selector string. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) ParentsFilteredUntil(filterSelector, untilSelector string) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
+}
+
+// ParentsFilteredUntilMatcher is like ParentsUntilMatcher, with the option to filter the
+// results based on a matcher. It returns a new Selection object containing the matched elements.
+func (s *Selection) ParentsFilteredUntilMatcher(filter, until Matcher) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, until, nil), filter)
+}
+
+// ParentsFilteredUntilSelection is like ParentsUntilSelection, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) ParentsFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
+ return s.ParentsMatcherUntilSelection(compileMatcher(filterSelector), sel)
+}
+
+// ParentsMatcherUntilSelection is like ParentsUntilSelection, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) ParentsMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
+ if sel == nil {
+ return s.ParentsMatcher(filter)
+ }
+ return s.ParentsMatcherUntilNodes(filter, sel.Nodes...)
+}
+
+// ParentsFilteredUntilNodes is like ParentsUntilNodes, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) ParentsFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), compileMatcher(filterSelector))
+}
+
+// ParentsMatcherUntilNodes is like ParentsUntilNodes, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) ParentsMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), filter)
+}
+
+// Siblings gets the siblings of each element in the Selection. It returns
+// a new Selection object containing the matched elements.
+func (s *Selection) Siblings() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil))
+}
+
+// SiblingsFiltered gets the siblings of each element in the Selection
+// filtered by a selector. It returns a new Selection object containing the
+// matched elements.
+func (s *Selection) SiblingsFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), compileMatcher(selector))
+}
+
+// SiblingsMatcher gets the siblings of each element in the Selection
+// filtered by a matcher. It returns a new Selection object containing the
+// matched elements.
+func (s *Selection) SiblingsMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), m)
+}
+
+// Next gets the immediately following sibling of each element in the
+// Selection. It returns a new Selection object containing the matched elements.
+func (s *Selection) Next() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil))
+}
+
+// NextFiltered gets the immediately following sibling of each element in the
+// Selection filtered by a selector. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) NextFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), compileMatcher(selector))
+}
+
+// NextMatcher gets the immediately following sibling of each element in the
+// Selection filtered by a matcher. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) NextMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), m)
+}
+
+// NextAll gets all the following siblings of each element in the
+// Selection. It returns a new Selection object containing the matched elements.
+func (s *Selection) NextAll() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil))
+}
+
+// NextAllFiltered gets all the following siblings of each element in the
+// Selection filtered by a selector. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) NextAllFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), compileMatcher(selector))
+}
+
+// NextAllMatcher gets all the following siblings of each element in the
+// Selection filtered by a matcher. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) NextAllMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), m)
+}
+
+// Prev gets the immediately preceding sibling of each element in the
+// Selection. It returns a new Selection object containing the matched elements.
+func (s *Selection) Prev() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil))
+}
+
+// PrevFiltered gets the immediately preceding sibling of each element in the
+// Selection filtered by a selector. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) PrevFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), compileMatcher(selector))
+}
+
+// PrevMatcher gets the immediately preceding sibling of each element in the
+// Selection filtered by a matcher. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) PrevMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), m)
+}
+
+// PrevAll gets all the preceding siblings of each element in the
+// Selection. It returns a new Selection object containing the matched elements.
+func (s *Selection) PrevAll() *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil))
+}
+
+// PrevAllFiltered gets all the preceding siblings of each element in the
+// Selection filtered by a selector. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) PrevAllFiltered(selector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), compileMatcher(selector))
+}
+
+// PrevAllMatcher gets all the preceding siblings of each element in the
+// Selection filtered by a matcher. It returns a new Selection object
+// containing the matched elements.
+func (s *Selection) PrevAllMatcher(m Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), m)
+}
+
+// NextUntil gets all following siblings of each element up to but not
+// including the element matched by the selector. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) NextUntil(selector string) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ compileMatcher(selector), nil))
+}
+
+// NextUntilMatcher gets all following siblings of each element up to but not
+// including the element matched by the matcher. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) NextUntilMatcher(m Matcher) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ m, nil))
+}
+
+// NextUntilSelection gets all following siblings of each element up to but not
+// including the element matched by the Selection. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) NextUntilSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.NextAll()
+ }
+ return s.NextUntilNodes(sel.Nodes...)
+}
+
+// NextUntilNodes gets all following siblings of each element up to but not
+// including the element matched by the nodes. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) NextUntilNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ nil, nodes))
+}
+
+// PrevUntil gets all preceding siblings of each element up to but not
+// including the element matched by the selector. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) PrevUntil(selector string) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ compileMatcher(selector), nil))
+}
+
+// PrevUntilMatcher gets all preceding siblings of each element up to but not
+// including the element matched by the matcher. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) PrevUntilMatcher(m Matcher) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ m, nil))
+}
+
+// PrevUntilSelection gets all preceding siblings of each element up to but not
+// including the element matched by the Selection. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) PrevUntilSelection(sel *Selection) *Selection {
+ if sel == nil {
+ return s.PrevAll()
+ }
+ return s.PrevUntilNodes(sel.Nodes...)
+}
+
+// PrevUntilNodes gets all preceding siblings of each element up to but not
+// including the element matched by the nodes. It returns a new Selection
+// object containing the matched elements.
+func (s *Selection) PrevUntilNodes(nodes ...*html.Node) *Selection {
+ return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ nil, nodes))
+}
+
+// NextFilteredUntil is like NextUntil, with the option to filter
+// the results based on a selector string.
+// It returns a new Selection object containing the matched elements.
+func (s *Selection) NextFilteredUntil(filterSelector, untilSelector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
+}
+
+// NextFilteredUntilMatcher is like NextUntilMatcher, with the option to filter
+// the results based on a matcher.
+// It returns a new Selection object containing the matched elements.
+func (s *Selection) NextFilteredUntilMatcher(filter, until Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ until, nil), filter)
+}
+
+// NextFilteredUntilSelection is like NextUntilSelection, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) NextFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
+ return s.NextMatcherUntilSelection(compileMatcher(filterSelector), sel)
+}
+
+// NextMatcherUntilSelection is like NextUntilSelection, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) NextMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
+ if sel == nil {
+ return s.NextMatcher(filter)
+ }
+ return s.NextMatcherUntilNodes(filter, sel.Nodes...)
+}
+
+// NextFilteredUntilNodes is like NextUntilNodes, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) NextFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ nil, nodes), compileMatcher(filterSelector))
+}
+
+// NextMatcherUntilNodes is like NextUntilNodes, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) NextMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
+ nil, nodes), filter)
+}
+
+// PrevFilteredUntil is like PrevUntil, with the option to filter
+// the results based on a selector string.
+// It returns a new Selection object containing the matched elements.
+func (s *Selection) PrevFilteredUntil(filterSelector, untilSelector string) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
+}
+
+// PrevFilteredUntilMatcher is like PrevUntilMatcher, with the option to filter
+// the results based on a matcher.
+// It returns a new Selection object containing the matched elements.
+func (s *Selection) PrevFilteredUntilMatcher(filter, until Matcher) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ until, nil), filter)
+}
+
+// PrevFilteredUntilSelection is like PrevUntilSelection, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) PrevFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
+ return s.PrevMatcherUntilSelection(compileMatcher(filterSelector), sel)
+}
+
+// PrevMatcherUntilSelection is like PrevUntilSelection, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) PrevMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
+ if sel == nil {
+ return s.PrevMatcher(filter)
+ }
+ return s.PrevMatcherUntilNodes(filter, sel.Nodes...)
+}
+
+// PrevFilteredUntilNodes is like PrevUntilNodes, with the
+// option to filter the results based on a selector string. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) PrevFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ nil, nodes), compileMatcher(filterSelector))
+}
+
+// PrevMatcherUntilNodes is like PrevUntilNodes, with the
+// option to filter the results based on a matcher. It returns a new
+// Selection object containing the matched elements.
+func (s *Selection) PrevMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
+ return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
+ nil, nodes), filter)
+}
+
+// Filter and push filters the nodes based on a matcher, and pushes the results
+// on the stack, with the srcSel as previous selection.
+func filterAndPush(srcSel *Selection, nodes []*html.Node, m Matcher) *Selection {
+ // Create a temporary Selection with the specified nodes to filter using winnow
+ sel := &Selection{nodes, srcSel.document, nil}
+ // Filter based on matcher and push on stack
+ return pushStack(srcSel, winnow(sel, m, true))
+}
+
+// Internal implementation of Find that return raw nodes.
+func findWithMatcher(nodes []*html.Node, m Matcher) []*html.Node {
+ // Map nodes to find the matches within the children of each node
+ return mapNodes(nodes, func(i int, n *html.Node) (result []*html.Node) {
+ // Go down one level, becausejQuery's Find selects only within descendants
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type == html.ElementNode {
+ result = append(result, m.MatchAll(c)...)
+ }
+ }
+ return
+ })
+}
+
+// Internal implementation to get all parent nodes, stopping at the specified
+// node (or nil if no stop).
+func getParentsNodes(nodes []*html.Node, stopm Matcher, stopNodes []*html.Node) []*html.Node {
+ return mapNodes(nodes, func(i int, n *html.Node) (result []*html.Node) {
+ for p := n.Parent; p != nil; p = p.Parent {
+ sel := newSingleSelection(p, nil)
+ if stopm != nil {
+ if sel.IsMatcher(stopm) {
+ break
+ }
+ } else if len(stopNodes) > 0 {
+ if sel.IsNodes(stopNodes...) {
+ break
+ }
+ }
+ if p.Type == html.ElementNode {
+ result = append(result, p)
+ }
+ }
+ return
+ })
+}
+
+// Internal implementation of sibling nodes that return a raw slice of matches.
+func getSiblingNodes(nodes []*html.Node, st siblingType, untilm Matcher, untilNodes []*html.Node) []*html.Node {
+ var f func(*html.Node) bool
+
+ // If the requested siblings are ...Until, create the test function to
+ // determine if the until condition is reached (returns true if it is)
+ if st == siblingNextUntil || st == siblingPrevUntil {
+ f = func(n *html.Node) bool {
+ if untilm != nil {
+ // Matcher-based condition
+ sel := newSingleSelection(n, nil)
+ return sel.IsMatcher(untilm)
+ } else if len(untilNodes) > 0 {
+ // Nodes-based condition
+ sel := newSingleSelection(n, nil)
+ return sel.IsNodes(untilNodes...)
+ }
+ return false
+ }
+ }
+
+ return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
+ return getChildrenWithSiblingType(n.Parent, st, n, f)
+ })
+}
+
+// Gets the children nodes of each node in the specified slice of nodes,
+// based on the sibling type request.
+func getChildrenNodes(nodes []*html.Node, st siblingType) []*html.Node {
+ return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
+ return getChildrenWithSiblingType(n, st, nil, nil)
+ })
+}
+
+// Gets the children of the specified parent, based on the requested sibling
+// type, skipping a specified node if required.
+func getChildrenWithSiblingType(parent *html.Node, st siblingType, skipNode *html.Node,
+ untilFunc func(*html.Node) bool) (result []*html.Node) {
+
+ // Create the iterator function
+ var iter = func(cur *html.Node) (ret *html.Node) {
+ // Based on the sibling type requested, iterate the right way
+ for {
+ switch st {
+ case siblingAll, siblingAllIncludingNonElements:
+ if cur == nil {
+ // First iteration, start with first child of parent
+ // Skip node if required
+ if ret = parent.FirstChild; ret == skipNode && skipNode != nil {
+ ret = skipNode.NextSibling
+ }
+ } else {
+ // Skip node if required
+ if ret = cur.NextSibling; ret == skipNode && skipNode != nil {
+ ret = skipNode.NextSibling
+ }
+ }
+ case siblingPrev, siblingPrevAll, siblingPrevUntil:
+ if cur == nil {
+ // Start with previous sibling of the skip node
+ ret = skipNode.PrevSibling
+ } else {
+ ret = cur.PrevSibling
+ }
+ case siblingNext, siblingNextAll, siblingNextUntil:
+ if cur == nil {
+ // Start with next sibling of the skip node
+ ret = skipNode.NextSibling
+ } else {
+ ret = cur.NextSibling
+ }
+ default:
+ panic("Invalid sibling type.")
+ }
+ if ret == nil || ret.Type == html.ElementNode || st == siblingAllIncludingNonElements {
+ return
+ }
+ // Not a valid node, try again from this one
+ cur = ret
+ }
+ }
+
+ for c := iter(nil); c != nil; c = iter(c) {
+ // If this is an ...Until case, test before append (returns true
+ // if the until condition is reached)
+ if st == siblingNextUntil || st == siblingPrevUntil {
+ if untilFunc(c) {
+ return
+ }
+ }
+ result = append(result, c)
+ if st == siblingNext || st == siblingPrev {
+ // Only one node was requested (immediate next or previous), so exit
+ return
+ }
+ }
+ return
+}
+
+// Internal implementation of parent nodes that return a raw slice of Nodes.
+func getParentNodes(nodes []*html.Node) []*html.Node {
+ return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
+ if n.Parent != nil && n.Parent.Type == html.ElementNode {
+ return []*html.Node{n.Parent}
+ }
+ return nil
+ })
+}
+
+// Internal map function used by many traversing methods. Takes the source nodes
+// to iterate on and the mapping function that returns an array of nodes.
+// Returns an array of nodes mapped by calling the callback function once for
+// each node in the source nodes.
+func mapNodes(nodes []*html.Node, f func(int, *html.Node) []*html.Node) (result []*html.Node) {
+ set := make(map[*html.Node]bool)
+ for i, n := range nodes {
+ if vals := f(i, n); len(vals) > 0 {
+ result = appendWithoutDuplicates(result, vals, set)
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/type.go b/vendor/github.com/PuerkitoBio/goquery/type.go
new file mode 100644
index 000000000..6ad51dbc5
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/type.go
@@ -0,0 +1,141 @@
+package goquery
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/andybalholm/cascadia"
+
+ "golang.org/x/net/html"
+)
+
+// Document represents an HTML document to be manipulated. Unlike jQuery, which
+// is loaded as part of a DOM document, and thus acts upon its containing
+// document, GoQuery doesn't know which HTML document to act upon. So it needs
+// to be told, and that's what the Document class is for. It holds the root
+// document node to manipulate, and can make selections on this document.
+type Document struct {
+ *Selection
+ Url *url.URL
+ rootNode *html.Node
+}
+
+// NewDocumentFromNode is a Document constructor that takes a root html Node
+// as argument.
+func NewDocumentFromNode(root *html.Node) *Document {
+ return newDocument(root, nil)
+}
+
+// NewDocument is a Document constructor that takes a string URL as argument.
+// It loads the specified document, parses it, and stores the root Document
+// node, ready to be manipulated.
+//
+// Deprecated: Use the net/http standard library package to make the request
+// and validate the response before calling goquery.NewDocumentFromReader
+// with the response's body.
+func NewDocument(url string) (*Document, error) {
+ // Load the URL
+ res, e := http.Get(url)
+ if e != nil {
+ return nil, e
+ }
+ return NewDocumentFromResponse(res)
+}
+
+// NewDocumentFromReader returns a Document from an io.Reader.
+// It returns an error as second value if the reader's data cannot be parsed
+// as html. It does not check if the reader is also an io.Closer, the
+// provided reader is never closed by this call. It is the responsibility
+// of the caller to close it if required.
+func NewDocumentFromReader(r io.Reader) (*Document, error) {
+ root, e := html.Parse(r)
+ if e != nil {
+ return nil, e
+ }
+ return newDocument(root, nil), nil
+}
+
+// NewDocumentFromResponse is another Document constructor that takes an http response as argument.
+// It loads the specified response's document, parses it, and stores the root Document
+// node, ready to be manipulated. The response's body is closed on return.
+//
+// Deprecated: Use goquery.NewDocumentFromReader with the response's body.
+func NewDocumentFromResponse(res *http.Response) (*Document, error) {
+ if res == nil {
+ return nil, errors.New("Response is nil")
+ }
+ defer res.Body.Close()
+ if res.Request == nil {
+ return nil, errors.New("Response.Request is nil")
+ }
+
+ // Parse the HTML into nodes
+ root, e := html.Parse(res.Body)
+ if e != nil {
+ return nil, e
+ }
+
+ // Create and fill the document
+ return newDocument(root, res.Request.URL), nil
+}
+
+// CloneDocument creates a deep-clone of a document.
+func CloneDocument(doc *Document) *Document {
+ return newDocument(cloneNode(doc.rootNode), doc.Url)
+}
+
+// Private constructor, make sure all fields are correctly filled.
+func newDocument(root *html.Node, url *url.URL) *Document {
+ // Create and fill the document
+ d := &Document{nil, url, root}
+ d.Selection = newSingleSelection(root, d)
+ return d
+}
+
+// Selection represents a collection of nodes matching some criteria. The
+// initial Selection can be created by using Document.Find, and then
+// manipulated using the jQuery-like chainable syntax and methods.
+type Selection struct {
+ Nodes []*html.Node
+ document *Document
+ prevSel *Selection
+}
+
+// Helper constructor to create an empty selection
+func newEmptySelection(doc *Document) *Selection {
+ return &Selection{nil, doc, nil}
+}
+
+// Helper constructor to create a selection of only one node
+func newSingleSelection(node *html.Node, doc *Document) *Selection {
+ return &Selection{[]*html.Node{node}, doc, nil}
+}
+
+// Matcher is an interface that defines the methods to match
+// HTML nodes against a compiled selector string. Cascadia's
+// Selector implements this interface.
+type Matcher interface {
+ Match(*html.Node) bool
+ MatchAll(*html.Node) []*html.Node
+ Filter([]*html.Node) []*html.Node
+}
+
+// compileMatcher compiles the selector string s and returns
+// the corresponding Matcher. If s is an invalid selector string,
+// it returns a Matcher that fails all matches.
+func compileMatcher(s string) Matcher {
+ cs, err := cascadia.Compile(s)
+ if err != nil {
+ return invalidMatcher{}
+ }
+ return cs
+}
+
+// invalidMatcher is a Matcher that always fails to match.
+type invalidMatcher struct{}
+
+func (invalidMatcher) Match(n *html.Node) bool { return false }
+func (invalidMatcher) MatchAll(n *html.Node) []*html.Node { return nil }
+func (invalidMatcher) Filter(ns []*html.Node) []*html.Node { return nil }
diff --git a/vendor/github.com/PuerkitoBio/goquery/utilities.go b/vendor/github.com/PuerkitoBio/goquery/utilities.go
new file mode 100644
index 000000000..b4c061a4d
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/utilities.go
@@ -0,0 +1,161 @@
+package goquery
+
+import (
+ "bytes"
+
+ "golang.org/x/net/html"
+)
+
+// used to determine if a set (map[*html.Node]bool) should be used
+// instead of iterating over a slice. The set uses more memory and
+// is slower than slice iteration for small N.
+const minNodesForSet = 1000
+
+var nodeNames = []string{
+ html.ErrorNode: "#error",
+ html.TextNode: "#text",
+ html.DocumentNode: "#document",
+ html.CommentNode: "#comment",
+}
+
+// NodeName returns the node name of the first element in the selection.
+// It tries to behave in a similar way as the DOM's nodeName property
+// (https://developer.mozilla.org/en-US/docs/Web/API/Node/nodeName).
+//
+// Go's net/html package defines the following node types, listed with
+// the corresponding returned value from this function:
+//
+// ErrorNode : #error
+// TextNode : #text
+// DocumentNode : #document
+// ElementNode : the element's tag name
+// CommentNode : #comment
+// DoctypeNode : the name of the document type
+//
+func NodeName(s *Selection) string {
+ if s.Length() == 0 {
+ return ""
+ }
+ switch n := s.Get(0); n.Type {
+ case html.ElementNode, html.DoctypeNode:
+ return n.Data
+ default:
+ if n.Type >= 0 && int(n.Type) < len(nodeNames) {
+ return nodeNames[n.Type]
+ }
+ return ""
+ }
+}
+
+// OuterHtml returns the outer HTML rendering of the first item in
+// the selection - that is, the HTML including the first element's
+// tag and attributes.
+//
+// Unlike InnerHtml, this is a function and not a method on the Selection,
+// because this is not a jQuery method (in javascript-land, this is
+// a property provided by the DOM).
+func OuterHtml(s *Selection) (string, error) {
+ var buf bytes.Buffer
+
+ if s.Length() == 0 {
+ return "", nil
+ }
+ n := s.Get(0)
+ if err := html.Render(&buf, n); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+// Loop through all container nodes to search for the target node.
+func sliceContains(container []*html.Node, contained *html.Node) bool {
+ for _, n := range container {
+ if nodeContains(n, contained) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Checks if the contained node is within the container node.
+func nodeContains(container *html.Node, contained *html.Node) bool {
+ // Check if the parent of the contained node is the container node, traversing
+ // upward until the top is reached, or the container is found.
+ for contained = contained.Parent; contained != nil; contained = contained.Parent {
+ if container == contained {
+ return true
+ }
+ }
+ return false
+}
+
+// Checks if the target node is in the slice of nodes.
+func isInSlice(slice []*html.Node, node *html.Node) bool {
+ return indexInSlice(slice, node) > -1
+}
+
+// Returns the index of the target node in the slice, or -1.
+func indexInSlice(slice []*html.Node, node *html.Node) int {
+ if node != nil {
+ for i, n := range slice {
+ if n == node {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// Appends the new nodes to the target slice, making sure no duplicate is added.
+// There is no check to the original state of the target slice, so it may still
+// contain duplicates. The target slice is returned because append() may create
+// a new underlying array. If targetSet is nil, a local set is created with the
+// target if len(target) + len(nodes) is greater than minNodesForSet.
+func appendWithoutDuplicates(target []*html.Node, nodes []*html.Node, targetSet map[*html.Node]bool) []*html.Node {
+ // if there are not that many nodes, don't use the map, faster to just use nested loops
+ // (unless a non-nil targetSet is passed, in which case the caller knows better).
+ if targetSet == nil && len(target)+len(nodes) < minNodesForSet {
+ for _, n := range nodes {
+ if !isInSlice(target, n) {
+ target = append(target, n)
+ }
+ }
+ return target
+ }
+
+ // if a targetSet is passed, then assume it is reliable, otherwise create one
+ // and initialize it with the current target contents.
+ if targetSet == nil {
+ targetSet = make(map[*html.Node]bool, len(target))
+ for _, n := range target {
+ targetSet[n] = true
+ }
+ }
+ for _, n := range nodes {
+ if !targetSet[n] {
+ target = append(target, n)
+ targetSet[n] = true
+ }
+ }
+
+ return target
+}
+
+// Loop through a selection, returning only those nodes that pass the predicate
+// function.
+func grep(sel *Selection, predicate func(i int, s *Selection) bool) (result []*html.Node) {
+ for i, n := range sel.Nodes {
+ if predicate(i, newSingleSelection(n, sel.document)) {
+ result = append(result, n)
+ }
+ }
+ return result
+}
+
+// Creates a new Selection object based on the specified nodes, and keeps the
+// source Selection object on the stack (linked list).
+func pushStack(fromSel *Selection, nodes []*html.Node) *Selection {
+ result := &Selection{nodes, fromSel.document, fromSel}
+ return result
+}
diff --git a/vendor/github.com/agnivade/levenshtein/.gitignore b/vendor/github.com/agnivade/levenshtein/.gitignore
new file mode 100644
index 000000000..345780a44
--- /dev/null
+++ b/vendor/github.com/agnivade/levenshtein/.gitignore
@@ -0,0 +1,5 @@
+coverage.txt
+fuzz/fuzz-fuzz.zip
+fuzz/corpus/corpus/*
+fuzz/corpus/suppressions/*
+fuzz/corpus/crashes/*
diff --git a/vendor/github.com/agnivade/levenshtein/.travis.yml b/vendor/github.com/agnivade/levenshtein/.travis.yml
new file mode 100644
index 000000000..f830ec4ec
--- /dev/null
+++ b/vendor/github.com/agnivade/levenshtein/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+
+go:
+- 1.9.x
+- 1.10.x
+- 1.11.x
+- tip
diff --git a/vendor/github.com/agnivade/levenshtein/License.txt b/vendor/github.com/agnivade/levenshtein/License.txt
new file mode 100644
index 000000000..54b51f499
--- /dev/null
+++ b/vendor/github.com/agnivade/levenshtein/License.txt
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Agniva De Sarker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/agnivade/levenshtein/Makefile b/vendor/github.com/agnivade/levenshtein/Makefile
new file mode 100644
index 000000000..4bef27dd1
--- /dev/null
+++ b/vendor/github.com/agnivade/levenshtein/Makefile
@@ -0,0 +1,13 @@
+all: test install
+
+install:
+ go install
+
+lint:
+ gofmt -l -s -w . && go tool vet -all . && golint
+
+test:
+ go test -race -v -coverprofile=coverage.txt -covermode=atomic
+
+bench:
+ go test -run=XXX -bench=. -benchmem
diff --git a/vendor/github.com/agnivade/levenshtein/README.md b/vendor/github.com/agnivade/levenshtein/README.md
new file mode 100644
index 000000000..b0fd81df7
--- /dev/null
+++ b/vendor/github.com/agnivade/levenshtein/README.md
@@ -0,0 +1,57 @@
+levenshtein [](https://travis-ci.org/agnivade/levenshtein) [](https://goreportcard.com/report/github.com/agnivade/levenshtein) [](https://godoc.org/github.com/agnivade/levenshtein)
+===========
+
+[Go](http://golang.org) package to calculate the [Levenshtein Distance](http://en.wikipedia.org/wiki/Levenshtein_distance)
+
+The library is fully capable of working with non-ascii strings. But the strings are not normalized. That is left as a user-dependant use case. Please normalize the strings before passing it to the library if you have such a requirement.
+- https://blog.golang.org/normalization
+
+Install
+-------
+
+ go get github.com/agnivade/levenshtein
+
+Example
+-------
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/agnivade/levenshtein"
+)
+
+func main() {
+ s1 := "kitten"
+ s2 := "sitting"
+ distance := levenshtein.ComputeDistance(s1, s2)
+ fmt.Printf("The distance between %s and %s is %d.\n", s1, s2, distance)
+ // Output:
+ // The distance between kitten and sitting is 3.
+}
+
+```
+
+Benchmarks
+----------
+
+```
+name time/op
+Simple/ASCII-4 537ns ± 2%
+Simple/French-4 956ns ± 0%
+Simple/Nordic-4 1.95µs ± 1%
+Simple/Tibetan-4 1.53µs ± 2%
+
+name alloc/op
+Simple/ASCII-4 96.0B ± 0%
+Simple/French-4 128B ± 0%
+Simple/Nordic-4 192B ± 0%
+Simple/Tibetan-4 144B ± 0%
+
+name allocs/op
+Simple/ASCII-4 1.00 ± 0%
+Simple/French-4 1.00 ± 0%
+Simple/Nordic-4 1.00 ± 0%
+Simple/Tibetan-4 1.00 ± 0%
+```
diff --git a/vendor/github.com/agnivade/levenshtein/go.mod b/vendor/github.com/agnivade/levenshtein/go.mod
new file mode 100644
index 000000000..b2921fb35
--- /dev/null
+++ b/vendor/github.com/agnivade/levenshtein/go.mod
@@ -0,0 +1 @@
+module github.com/agnivade/levenshtein
diff --git a/vendor/github.com/agnivade/levenshtein/levenshtein.go b/vendor/github.com/agnivade/levenshtein/levenshtein.go
new file mode 100644
index 000000000..6b08acade
--- /dev/null
+++ b/vendor/github.com/agnivade/levenshtein/levenshtein.go
@@ -0,0 +1,75 @@
+// Package levenshtein is a Go implementation to calculate Levenshtein Distance.
+//
+// Implementation taken from
+// https://gist.github.com/andrei-m/982927#gistcomment-1931258
+package levenshtein
+
+import "unicode/utf8"
+
+// ComputeDistance computes the levenshtein distance between the two
+// strings passed as an argument. The return value is the levenshtein distance
+//
+// Works on runes (Unicode code points) but does not normalize
+// the input strings. See https://blog.golang.org/normalization
+// and the golang.org/x/text/unicode/norm pacage.
+func ComputeDistance(a, b string) int {
+ if len(a) == 0 {
+ return utf8.RuneCountInString(b)
+ }
+
+ if len(b) == 0 {
+ return utf8.RuneCountInString(a)
+ }
+
+ if a == b {
+ return 0
+ }
+
+ // We need to convert to []rune if the strings are non-ascii.
+ // This could be avoided by using utf8.RuneCountInString
+ // and then doing some juggling with rune indices.
+ // The primary challenge is keeping track of the previous rune.
+ // With a range loop, its not that easy. And with a for-loop
+ // we need to keep track of the inter-rune width using utf8.DecodeRuneInString
+ s1 := []rune(a)
+ s2 := []rune(b)
+
+ // swap to save some memory O(min(a,b)) instead of O(a)
+ if len(s1) > len(s2) {
+ s1, s2 = s2, s1
+ }
+ lenS1 := len(s1)
+ lenS2 := len(s2)
+
+ // init the row
+ x := make([]int, lenS1+1)
+ for i := 0; i <= lenS1; i++ {
+ x[i] = i
+ }
+
+ // fill in the rest
+ for i := 1; i <= lenS2; i++ {
+ prev := i
+ var current int
+
+ for j := 1; j <= lenS1; j++ {
+
+ if s2[i-1] == s1[j-1] {
+ current = x[j-1] // match
+ } else {
+ current = min(min(x[j-1]+1, prev+1), x[j]+1)
+ }
+ x[j-1] = prev
+ prev = current
+ }
+ x[lenS1] = prev
+ }
+ return x[lenS1]
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/andybalholm/cascadia/.travis.yml b/vendor/github.com/andybalholm/cascadia/.travis.yml
new file mode 100644
index 000000000..6f227517d
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.3
+ - 1.4
+
+install:
+ - go get github.com/andybalholm/cascadia
+
+script:
+ - go test -v
+
+notifications:
+ email: false
diff --git a/vendor/github.com/andybalholm/cascadia/LICENSE b/vendor/github.com/andybalholm/cascadia/LICENSE
new file mode 100644
index 000000000..ee5ad35ac
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2011 Andy Balholm. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/andybalholm/cascadia/README.md b/vendor/github.com/andybalholm/cascadia/README.md
new file mode 100644
index 000000000..9021cb92a
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/README.md
@@ -0,0 +1,7 @@
+# cascadia
+
+[](https://travis-ci.org/andybalholm/cascadia)
+
+The Cascadia package implements CSS selectors for use with the parse trees produced by the html package.
+
+To test CSS selectors without writing Go code, check out [cascadia](https://github.com/suntong/cascadia) the command line tool, a thin wrapper around this package.
diff --git a/vendor/github.com/andybalholm/cascadia/go.mod b/vendor/github.com/andybalholm/cascadia/go.mod
new file mode 100644
index 000000000..e6febbbfe
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/go.mod
@@ -0,0 +1,3 @@
+module "github.com/andybalholm/cascadia"
+
+require "golang.org/x/net" v0.0.0-20180218175443-cbe0f9307d01
diff --git a/vendor/github.com/andybalholm/cascadia/parser.go b/vendor/github.com/andybalholm/cascadia/parser.go
new file mode 100644
index 000000000..495db9ccf
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/parser.go
@@ -0,0 +1,835 @@
+// Package cascadia is an implementation of CSS selectors.
+package cascadia
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/html"
+)
+
+// a parser for CSS selectors
+type parser struct {
+ s string // the source text
+ i int // the current position
+}
+
+// parseEscape parses a backslash escape.
+func (p *parser) parseEscape() (result string, err error) {
+ if len(p.s) < p.i+2 || p.s[p.i] != '\\' {
+ return "", errors.New("invalid escape sequence")
+ }
+
+ start := p.i + 1
+ c := p.s[start]
+ switch {
+ case c == '\r' || c == '\n' || c == '\f':
+ return "", errors.New("escaped line ending outside string")
+ case hexDigit(c):
+ // unicode escape (hex)
+ var i int
+ for i = start; i < p.i+6 && i < len(p.s) && hexDigit(p.s[i]); i++ {
+ // empty
+ }
+ v, _ := strconv.ParseUint(p.s[start:i], 16, 21)
+ if len(p.s) > i {
+ switch p.s[i] {
+ case '\r':
+ i++
+ if len(p.s) > i && p.s[i] == '\n' {
+ i++
+ }
+ case ' ', '\t', '\n', '\f':
+ i++
+ }
+ }
+ p.i = i
+ return string(rune(v)), nil
+ }
+
+ // Return the literal character after the backslash.
+ result = p.s[start : start+1]
+ p.i += 2
+ return result, nil
+}
+
+func hexDigit(c byte) bool {
+ return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
+}
+
+// nameStart returns whether c can be the first character of an identifier
+// (not counting an initial hyphen, or an escape sequence).
+func nameStart(c byte) bool {
+ return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c > 127
+}
+
+// nameChar returns whether c can be a character within an identifier
+// (not counting an escape sequence).
+func nameChar(c byte) bool {
+ return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c > 127 ||
+ c == '-' || '0' <= c && c <= '9'
+}
+
+// parseIdentifier parses an identifier.
+func (p *parser) parseIdentifier() (result string, err error) {
+ startingDash := false
+ if len(p.s) > p.i && p.s[p.i] == '-' {
+ startingDash = true
+ p.i++
+ }
+
+ if len(p.s) <= p.i {
+ return "", errors.New("expected identifier, found EOF instead")
+ }
+
+ if c := p.s[p.i]; !(nameStart(c) || c == '\\') {
+ return "", fmt.Errorf("expected identifier, found %c instead", c)
+ }
+
+ result, err = p.parseName()
+ if startingDash && err == nil {
+ result = "-" + result
+ }
+ return
+}
+
+// parseName parses a name (which is like an identifier, but doesn't have
+// extra restrictions on the first character).
+func (p *parser) parseName() (result string, err error) {
+ i := p.i
+loop:
+ for i < len(p.s) {
+ c := p.s[i]
+ switch {
+ case nameChar(c):
+ start := i
+ for i < len(p.s) && nameChar(p.s[i]) {
+ i++
+ }
+ result += p.s[start:i]
+ case c == '\\':
+ p.i = i
+ val, err := p.parseEscape()
+ if err != nil {
+ return "", err
+ }
+ i = p.i
+ result += val
+ default:
+ break loop
+ }
+ }
+
+ if result == "" {
+ return "", errors.New("expected name, found EOF instead")
+ }
+
+ p.i = i
+ return result, nil
+}
+
+// parseString parses a single- or double-quoted string.
+func (p *parser) parseString() (result string, err error) {
+ i := p.i
+ if len(p.s) < i+2 {
+ return "", errors.New("expected string, found EOF instead")
+ }
+
+ quote := p.s[i]
+ i++
+
+loop:
+ for i < len(p.s) {
+ switch p.s[i] {
+ case '\\':
+ if len(p.s) > i+1 {
+ switch c := p.s[i+1]; c {
+ case '\r':
+ if len(p.s) > i+2 && p.s[i+2] == '\n' {
+ i += 3
+ continue loop
+ }
+ fallthrough
+ case '\n', '\f':
+ i += 2
+ continue loop
+ }
+ }
+ p.i = i
+ val, err := p.parseEscape()
+ if err != nil {
+ return "", err
+ }
+ i = p.i
+ result += val
+ case quote:
+ break loop
+ case '\r', '\n', '\f':
+ return "", errors.New("unexpected end of line in string")
+ default:
+ start := i
+ for i < len(p.s) {
+ if c := p.s[i]; c == quote || c == '\\' || c == '\r' || c == '\n' || c == '\f' {
+ break
+ }
+ i++
+ }
+ result += p.s[start:i]
+ }
+ }
+
+ if i >= len(p.s) {
+ return "", errors.New("EOF in string")
+ }
+
+ // Consume the final quote.
+ i++
+
+ p.i = i
+ return result, nil
+}
+
+// parseRegex parses a regular expression; the end is defined by encountering an
+// unmatched closing ')' or ']' which is not consumed
+func (p *parser) parseRegex() (rx *regexp.Regexp, err error) {
+ i := p.i
+ if len(p.s) < i+2 {
+ return nil, errors.New("expected regular expression, found EOF instead")
+ }
+
+ // number of open parens or brackets;
+ // when it becomes negative, finished parsing regex
+ open := 0
+
+loop:
+ for i < len(p.s) {
+ switch p.s[i] {
+ case '(', '[':
+ open++
+ case ')', ']':
+ open--
+ if open < 0 {
+ break loop
+ }
+ }
+ i++
+ }
+
+ if i >= len(p.s) {
+ return nil, errors.New("EOF in regular expression")
+ }
+ rx, err = regexp.Compile(p.s[p.i:i])
+ p.i = i
+ return rx, err
+}
+
+// skipWhitespace consumes whitespace characters and comments.
+// It returns true if there was actually anything to skip.
+func (p *parser) skipWhitespace() bool {
+ i := p.i
+ for i < len(p.s) {
+ switch p.s[i] {
+ case ' ', '\t', '\r', '\n', '\f':
+ i++
+ continue
+ case '/':
+ if strings.HasPrefix(p.s[i:], "/*") {
+ end := strings.Index(p.s[i+len("/*"):], "*/")
+ if end != -1 {
+ i += end + len("/**/")
+ continue
+ }
+ }
+ }
+ break
+ }
+
+ if i > p.i {
+ p.i = i
+ return true
+ }
+
+ return false
+}
+
+// consumeParenthesis consumes an opening parenthesis and any following
+// whitespace. It returns true if there was actually a parenthesis to skip.
+func (p *parser) consumeParenthesis() bool {
+ if p.i < len(p.s) && p.s[p.i] == '(' {
+ p.i++
+ p.skipWhitespace()
+ return true
+ }
+ return false
+}
+
+// consumeClosingParenthesis consumes a closing parenthesis and any preceding
+// whitespace. It returns true if there was actually a parenthesis to skip.
+func (p *parser) consumeClosingParenthesis() bool {
+ i := p.i
+ p.skipWhitespace()
+ if p.i < len(p.s) && p.s[p.i] == ')' {
+ p.i++
+ return true
+ }
+ p.i = i
+ return false
+}
+
+// parseTypeSelector parses a type selector (one that matches by tag name).
+func (p *parser) parseTypeSelector() (result Selector, err error) {
+ tag, err := p.parseIdentifier()
+ if err != nil {
+ return nil, err
+ }
+
+ return typeSelector(tag), nil
+}
+
+// parseIDSelector parses a selector that matches by id attribute.
+func (p *parser) parseIDSelector() (Selector, error) {
+ if p.i >= len(p.s) {
+ return nil, fmt.Errorf("expected id selector (#id), found EOF instead")
+ }
+ if p.s[p.i] != '#' {
+ return nil, fmt.Errorf("expected id selector (#id), found '%c' instead", p.s[p.i])
+ }
+
+ p.i++
+ id, err := p.parseName()
+ if err != nil {
+ return nil, err
+ }
+
+ return attributeEqualsSelector("id", id), nil
+}
+
+// parseClassSelector parses a selector that matches by class attribute.
+func (p *parser) parseClassSelector() (Selector, error) {
+ if p.i >= len(p.s) {
+ return nil, fmt.Errorf("expected class selector (.class), found EOF instead")
+ }
+ if p.s[p.i] != '.' {
+ return nil, fmt.Errorf("expected class selector (.class), found '%c' instead", p.s[p.i])
+ }
+
+ p.i++
+ class, err := p.parseIdentifier()
+ if err != nil {
+ return nil, err
+ }
+
+ return attributeIncludesSelector("class", class), nil
+}
+
+// parseAttributeSelector parses a selector that matches by attribute value.
+func (p *parser) parseAttributeSelector() (Selector, error) {
+ if p.i >= len(p.s) {
+ return nil, fmt.Errorf("expected attribute selector ([attribute]), found EOF instead")
+ }
+ if p.s[p.i] != '[' {
+ return nil, fmt.Errorf("expected attribute selector ([attribute]), found '%c' instead", p.s[p.i])
+ }
+
+ p.i++
+ p.skipWhitespace()
+ key, err := p.parseIdentifier()
+ if err != nil {
+ return nil, err
+ }
+
+ p.skipWhitespace()
+ if p.i >= len(p.s) {
+ return nil, errors.New("unexpected EOF in attribute selector")
+ }
+
+ if p.s[p.i] == ']' {
+ p.i++
+ return attributeExistsSelector(key), nil
+ }
+
+ if p.i+2 >= len(p.s) {
+ return nil, errors.New("unexpected EOF in attribute selector")
+ }
+
+ op := p.s[p.i : p.i+2]
+ if op[0] == '=' {
+ op = "="
+ } else if op[1] != '=' {
+ return nil, fmt.Errorf(`expected equality operator, found "%s" instead`, op)
+ }
+ p.i += len(op)
+
+ p.skipWhitespace()
+ if p.i >= len(p.s) {
+ return nil, errors.New("unexpected EOF in attribute selector")
+ }
+ var val string
+ var rx *regexp.Regexp
+ if op == "#=" {
+ rx, err = p.parseRegex()
+ } else {
+ switch p.s[p.i] {
+ case '\'', '"':
+ val, err = p.parseString()
+ default:
+ val, err = p.parseIdentifier()
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ p.skipWhitespace()
+ if p.i >= len(p.s) {
+ return nil, errors.New("unexpected EOF in attribute selector")
+ }
+ if p.s[p.i] != ']' {
+ return nil, fmt.Errorf("expected ']', found '%c' instead", p.s[p.i])
+ }
+ p.i++
+
+ switch op {
+ case "=":
+ return attributeEqualsSelector(key, val), nil
+ case "!=":
+ return attributeNotEqualSelector(key, val), nil
+ case "~=":
+ return attributeIncludesSelector(key, val), nil
+ case "|=":
+ return attributeDashmatchSelector(key, val), nil
+ case "^=":
+ return attributePrefixSelector(key, val), nil
+ case "$=":
+ return attributeSuffixSelector(key, val), nil
+ case "*=":
+ return attributeSubstringSelector(key, val), nil
+ case "#=":
+ return attributeRegexSelector(key, rx), nil
+ }
+
+ return nil, fmt.Errorf("attribute operator %q is not supported", op)
+}
+
+var errExpectedParenthesis = errors.New("expected '(' but didn't find it")
+var errExpectedClosingParenthesis = errors.New("expected ')' but didn't find it")
+var errUnmatchedParenthesis = errors.New("unmatched '('")
+
+// parsePseudoclassSelector parses a pseudoclass selector like :not(p).
+func (p *parser) parsePseudoclassSelector() (Selector, error) {
+ if p.i >= len(p.s) {
+ return nil, fmt.Errorf("expected pseudoclass selector (:pseudoclass), found EOF instead")
+ }
+ if p.s[p.i] != ':' {
+ return nil, fmt.Errorf("expected attribute selector (:pseudoclass), found '%c' instead", p.s[p.i])
+ }
+
+ p.i++
+ name, err := p.parseIdentifier()
+ if err != nil {
+ return nil, err
+ }
+ name = toLowerASCII(name)
+
+ switch name {
+ case "not", "has", "haschild":
+ if !p.consumeParenthesis() {
+ return nil, errExpectedParenthesis
+ }
+ sel, parseErr := p.parseSelectorGroup()
+ if parseErr != nil {
+ return nil, parseErr
+ }
+ if !p.consumeClosingParenthesis() {
+ return nil, errExpectedClosingParenthesis
+ }
+
+ switch name {
+ case "not":
+ return negatedSelector(sel), nil
+ case "has":
+ return hasDescendantSelector(sel), nil
+ case "haschild":
+ return hasChildSelector(sel), nil
+ }
+
+ case "contains", "containsown":
+ if !p.consumeParenthesis() {
+ return nil, errExpectedParenthesis
+ }
+ if p.i == len(p.s) {
+ return nil, errUnmatchedParenthesis
+ }
+ var val string
+ switch p.s[p.i] {
+ case '\'', '"':
+ val, err = p.parseString()
+ default:
+ val, err = p.parseIdentifier()
+ }
+ if err != nil {
+ return nil, err
+ }
+ val = strings.ToLower(val)
+ p.skipWhitespace()
+ if p.i >= len(p.s) {
+ return nil, errors.New("unexpected EOF in pseudo selector")
+ }
+ if !p.consumeClosingParenthesis() {
+ return nil, errExpectedClosingParenthesis
+ }
+
+ switch name {
+ case "contains":
+ return textSubstrSelector(val), nil
+ case "containsown":
+ return ownTextSubstrSelector(val), nil
+ }
+
+ case "matches", "matchesown":
+ if !p.consumeParenthesis() {
+ return nil, errExpectedParenthesis
+ }
+ rx, err := p.parseRegex()
+ if err != nil {
+ return nil, err
+ }
+ if p.i >= len(p.s) {
+ return nil, errors.New("unexpected EOF in pseudo selector")
+ }
+ if !p.consumeClosingParenthesis() {
+ return nil, errExpectedClosingParenthesis
+ }
+
+ switch name {
+ case "matches":
+ return textRegexSelector(rx), nil
+ case "matchesown":
+ return ownTextRegexSelector(rx), nil
+ }
+
+ case "nth-child", "nth-last-child", "nth-of-type", "nth-last-of-type":
+ if !p.consumeParenthesis() {
+ return nil, errExpectedParenthesis
+ }
+ a, b, err := p.parseNth()
+ if err != nil {
+ return nil, err
+ }
+ if !p.consumeClosingParenthesis() {
+ return nil, errExpectedClosingParenthesis
+ }
+ if a == 0 {
+ switch name {
+ case "nth-child":
+ return simpleNthChildSelector(b, false), nil
+ case "nth-of-type":
+ return simpleNthChildSelector(b, true), nil
+ case "nth-last-child":
+ return simpleNthLastChildSelector(b, false), nil
+ case "nth-last-of-type":
+ return simpleNthLastChildSelector(b, true), nil
+ }
+ }
+ return nthChildSelector(a, b,
+ name == "nth-last-child" || name == "nth-last-of-type",
+ name == "nth-of-type" || name == "nth-last-of-type"),
+ nil
+
+ case "first-child":
+ return simpleNthChildSelector(1, false), nil
+ case "last-child":
+ return simpleNthLastChildSelector(1, false), nil
+ case "first-of-type":
+ return simpleNthChildSelector(1, true), nil
+ case "last-of-type":
+ return simpleNthLastChildSelector(1, true), nil
+ case "only-child":
+ return onlyChildSelector(false), nil
+ case "only-of-type":
+ return onlyChildSelector(true), nil
+ case "input":
+ return inputSelector, nil
+ case "empty":
+ return emptyElementSelector, nil
+ case "root":
+ return rootSelector, nil
+ }
+
+ return nil, fmt.Errorf("unknown pseudoclass :%s", name)
+}
+
+// parseInteger parses a decimal integer.
+func (p *parser) parseInteger() (int, error) {
+ i := p.i
+ start := i
+ for i < len(p.s) && '0' <= p.s[i] && p.s[i] <= '9' {
+ i++
+ }
+ if i == start {
+ return 0, errors.New("expected integer, but didn't find it")
+ }
+ p.i = i
+
+ val, err := strconv.Atoi(p.s[start:i])
+ if err != nil {
+ return 0, err
+ }
+
+ return val, nil
+}
+
+// parseNth parses the argument for :nth-child (normally of the form an+b).
+func (p *parser) parseNth() (a, b int, err error) {
+ // initial state
+ if p.i >= len(p.s) {
+ goto eof
+ }
+ switch p.s[p.i] {
+ case '-':
+ p.i++
+ goto negativeA
+ case '+':
+ p.i++
+ goto positiveA
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ goto positiveA
+ case 'n', 'N':
+ a = 1
+ p.i++
+ goto readN
+ case 'o', 'O', 'e', 'E':
+ id, nameErr := p.parseName()
+ if nameErr != nil {
+ return 0, 0, nameErr
+ }
+ id = toLowerASCII(id)
+ if id == "odd" {
+ return 2, 1, nil
+ }
+ if id == "even" {
+ return 2, 0, nil
+ }
+ return 0, 0, fmt.Errorf("expected 'odd' or 'even', but found '%s' instead", id)
+ default:
+ goto invalid
+ }
+
+positiveA:
+ if p.i >= len(p.s) {
+ goto eof
+ }
+ switch p.s[p.i] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ a, err = p.parseInteger()
+ if err != nil {
+ return 0, 0, err
+ }
+ goto readA
+ case 'n', 'N':
+ a = 1
+ p.i++
+ goto readN
+ default:
+ goto invalid
+ }
+
+negativeA:
+ if p.i >= len(p.s) {
+ goto eof
+ }
+ switch p.s[p.i] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ a, err = p.parseInteger()
+ if err != nil {
+ return 0, 0, err
+ }
+ a = -a
+ goto readA
+ case 'n', 'N':
+ a = -1
+ p.i++
+ goto readN
+ default:
+ goto invalid
+ }
+
+readA:
+ if p.i >= len(p.s) {
+ goto eof
+ }
+ switch p.s[p.i] {
+ case 'n', 'N':
+ p.i++
+ goto readN
+ default:
+ // The number we read as a is actually b.
+ return 0, a, nil
+ }
+
+readN:
+ p.skipWhitespace()
+ if p.i >= len(p.s) {
+ goto eof
+ }
+ switch p.s[p.i] {
+ case '+':
+ p.i++
+ p.skipWhitespace()
+ b, err = p.parseInteger()
+ if err != nil {
+ return 0, 0, err
+ }
+ return a, b, nil
+ case '-':
+ p.i++
+ p.skipWhitespace()
+ b, err = p.parseInteger()
+ if err != nil {
+ return 0, 0, err
+ }
+ return a, -b, nil
+ default:
+ return a, 0, nil
+ }
+
+eof:
+ return 0, 0, errors.New("unexpected EOF while attempting to parse expression of form an+b")
+
+invalid:
+ return 0, 0, errors.New("unexpected character while attempting to parse expression of form an+b")
+}
+
+// parseSimpleSelectorSequence parses a selector sequence that applies to
+// a single element.
+func (p *parser) parseSimpleSelectorSequence() (Selector, error) {
+ var result Selector
+
+ if p.i >= len(p.s) {
+ return nil, errors.New("expected selector, found EOF instead")
+ }
+
+ switch p.s[p.i] {
+ case '*':
+ // It's the universal selector. Just skip over it, since it doesn't affect the meaning.
+ p.i++
+ case '#', '.', '[', ':':
+ // There's no type selector. Wait to process the other till the main loop.
+ default:
+ r, err := p.parseTypeSelector()
+ if err != nil {
+ return nil, err
+ }
+ result = r
+ }
+
+loop:
+ for p.i < len(p.s) {
+ var ns Selector
+ var err error
+ switch p.s[p.i] {
+ case '#':
+ ns, err = p.parseIDSelector()
+ case '.':
+ ns, err = p.parseClassSelector()
+ case '[':
+ ns, err = p.parseAttributeSelector()
+ case ':':
+ ns, err = p.parsePseudoclassSelector()
+ default:
+ break loop
+ }
+ if err != nil {
+ return nil, err
+ }
+ if result == nil {
+ result = ns
+ } else {
+ result = intersectionSelector(result, ns)
+ }
+ }
+
+ if result == nil {
+ result = func(n *html.Node) bool {
+ return n.Type == html.ElementNode
+ }
+ }
+
+ return result, nil
+}
+
+// parseSelector parses a selector that may include combinators.
+func (p *parser) parseSelector() (result Selector, err error) {
+ p.skipWhitespace()
+ result, err = p.parseSimpleSelectorSequence()
+ if err != nil {
+ return
+ }
+
+ for {
+ var combinator byte
+ if p.skipWhitespace() {
+ combinator = ' '
+ }
+ if p.i >= len(p.s) {
+ return
+ }
+
+ switch p.s[p.i] {
+ case '+', '>', '~':
+ combinator = p.s[p.i]
+ p.i++
+ p.skipWhitespace()
+ case ',', ')':
+ // These characters can't begin a selector, but they can legally occur after one.
+ return
+ }
+
+ if combinator == 0 {
+ return
+ }
+
+ c, err := p.parseSimpleSelectorSequence()
+ if err != nil {
+ return nil, err
+ }
+
+ switch combinator {
+ case ' ':
+ result = descendantSelector(result, c)
+ case '>':
+ result = childSelector(result, c)
+ case '+':
+ result = siblingSelector(result, c, true)
+ case '~':
+ result = siblingSelector(result, c, false)
+ }
+ }
+
+ panic("unreachable")
+}
+
+// parseSelectorGroup parses a group of selectors, separated by commas.
+func (p *parser) parseSelectorGroup() (result Selector, err error) {
+ result, err = p.parseSelector()
+ if err != nil {
+ return
+ }
+
+ for p.i < len(p.s) {
+ if p.s[p.i] != ',' {
+ return result, nil
+ }
+ p.i++
+ c, err := p.parseSelector()
+ if err != nil {
+ return nil, err
+ }
+ result = unionSelector(result, c)
+ }
+
+ return
+}
diff --git a/vendor/github.com/andybalholm/cascadia/selector.go b/vendor/github.com/andybalholm/cascadia/selector.go
new file mode 100644
index 000000000..9fb05ccb7
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/selector.go
@@ -0,0 +1,622 @@
+package cascadia
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "golang.org/x/net/html"
+)
+
+// the Selector type, and functions for creating them
+
+// A Selector is a function which tells whether a node matches or not.
+type Selector func(*html.Node) bool
+
+// hasChildMatch returns whether n has any child that matches a.
+func hasChildMatch(n *html.Node, a Selector) bool {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if a(c) {
+ return true
+ }
+ }
+ return false
+}
+
+// hasDescendantMatch performs a depth-first search of n's descendants,
+// testing whether any of them match a. It returns true as soon as a match is
+// found, or false if no match is found.
+func hasDescendantMatch(n *html.Node, a Selector) bool {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if a(c) || (c.Type == html.ElementNode && hasDescendantMatch(c, a)) {
+ return true
+ }
+ }
+ return false
+}
+
+// Compile parses a selector and returns, if successful, a Selector object
+// that can be used to match against html.Node objects.
+func Compile(sel string) (Selector, error) {
+ p := &parser{s: sel}
+ compiled, err := p.parseSelectorGroup()
+ if err != nil {
+ return nil, err
+ }
+
+ if p.i < len(sel) {
+ return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
+ }
+
+ return compiled, nil
+}
+
+// MustCompile is like Compile, but panics instead of returning an error.
+func MustCompile(sel string) Selector {
+ compiled, err := Compile(sel)
+ if err != nil {
+ panic(err)
+ }
+ return compiled
+}
+
+// MatchAll returns a slice of the nodes that match the selector,
+// from n and its children.
+func (s Selector) MatchAll(n *html.Node) []*html.Node {
+ return s.matchAllInto(n, nil)
+}
+
+func (s Selector) matchAllInto(n *html.Node, storage []*html.Node) []*html.Node {
+ if s(n) {
+ storage = append(storage, n)
+ }
+
+ for child := n.FirstChild; child != nil; child = child.NextSibling {
+ storage = s.matchAllInto(child, storage)
+ }
+
+ return storage
+}
+
+// Match returns true if the node matches the selector.
+func (s Selector) Match(n *html.Node) bool {
+ return s(n)
+}
+
+// MatchFirst returns the first node that matches s, from n and its children.
+func (s Selector) MatchFirst(n *html.Node) *html.Node {
+ if s.Match(n) {
+ return n
+ }
+
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ m := s.MatchFirst(c)
+ if m != nil {
+ return m
+ }
+ }
+ return nil
+}
+
+// Filter returns the nodes in nodes that match the selector.
+func (s Selector) Filter(nodes []*html.Node) (result []*html.Node) {
+ for _, n := range nodes {
+ if s(n) {
+ result = append(result, n)
+ }
+ }
+ return result
+}
+
+// typeSelector returns a Selector that matches elements with a given tag name.
+func typeSelector(tag string) Selector {
+ tag = toLowerASCII(tag)
+ return func(n *html.Node) bool {
+ return n.Type == html.ElementNode && n.Data == tag
+ }
+}
+
+// toLowerASCII returns s with all ASCII capital letters lowercased.
+func toLowerASCII(s string) string {
+ var b []byte
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; 'A' <= c && c <= 'Z' {
+ if b == nil {
+ b = make([]byte, len(s))
+ copy(b, s)
+ }
+ b[i] = s[i] + ('a' - 'A')
+ }
+ }
+
+ if b == nil {
+ return s
+ }
+
+ return string(b)
+}
+
+// attributeSelector returns a Selector that matches elements
+// where the attribute named key satisifes the function f.
+func attributeSelector(key string, f func(string) bool) Selector {
+ key = toLowerASCII(key)
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ for _, a := range n.Attr {
+ if a.Key == key && f(a.Val) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// attributeExistsSelector returns a Selector that matches elements that have
+// an attribute named key.
+func attributeExistsSelector(key string) Selector {
+ return attributeSelector(key, func(string) bool { return true })
+}
+
+// attributeEqualsSelector returns a Selector that matches elements where
+// the attribute named key has the value val.
+func attributeEqualsSelector(key, val string) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ return s == val
+ })
+}
+
+// attributeNotEqualSelector returns a Selector that matches elements where
+// the attribute named key does not have the value val.
+func attributeNotEqualSelector(key, val string) Selector {
+ key = toLowerASCII(key)
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ for _, a := range n.Attr {
+ if a.Key == key && a.Val == val {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+// attributeIncludesSelector returns a Selector that matches elements where
+// the attribute named key is a whitespace-separated list that includes val.
+func attributeIncludesSelector(key, val string) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ for s != "" {
+ i := strings.IndexAny(s, " \t\r\n\f")
+ if i == -1 {
+ return s == val
+ }
+ if s[:i] == val {
+ return true
+ }
+ s = s[i+1:]
+ }
+ return false
+ })
+}
+
+// attributeDashmatchSelector returns a Selector that matches elements where
+// the attribute named key equals val or starts with val plus a hyphen.
+func attributeDashmatchSelector(key, val string) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ if s == val {
+ return true
+ }
+ if len(s) <= len(val) {
+ return false
+ }
+ if s[:len(val)] == val && s[len(val)] == '-' {
+ return true
+ }
+ return false
+ })
+}
+
+// attributePrefixSelector returns a Selector that matches elements where
+// the attribute named key starts with val.
+func attributePrefixSelector(key, val string) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ if strings.TrimSpace(s) == "" {
+ return false
+ }
+ return strings.HasPrefix(s, val)
+ })
+}
+
+// attributeSuffixSelector returns a Selector that matches elements where
+// the attribute named key ends with val.
+func attributeSuffixSelector(key, val string) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ if strings.TrimSpace(s) == "" {
+ return false
+ }
+ return strings.HasSuffix(s, val)
+ })
+}
+
+// attributeSubstringSelector returns a Selector that matches nodes where
+// the attribute named key contains val.
+func attributeSubstringSelector(key, val string) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ if strings.TrimSpace(s) == "" {
+ return false
+ }
+ return strings.Contains(s, val)
+ })
+}
+
+// attributeRegexSelector returns a Selector that matches nodes where
+// the attribute named key matches the regular expression rx
+func attributeRegexSelector(key string, rx *regexp.Regexp) Selector {
+ return attributeSelector(key,
+ func(s string) bool {
+ return rx.MatchString(s)
+ })
+}
+
+// intersectionSelector returns a selector that matches nodes that match
+// both a and b.
+func intersectionSelector(a, b Selector) Selector {
+ return func(n *html.Node) bool {
+ return a(n) && b(n)
+ }
+}
+
+// unionSelector returns a selector that matches elements that match
+// either a or b.
+func unionSelector(a, b Selector) Selector {
+ return func(n *html.Node) bool {
+ return a(n) || b(n)
+ }
+}
+
+// negatedSelector returns a selector that matches elements that do not match a.
+func negatedSelector(a Selector) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ return !a(n)
+ }
+}
+
+// writeNodeText writes the text contained in n and its descendants to b.
+func writeNodeText(n *html.Node, b *bytes.Buffer) {
+ switch n.Type {
+ case html.TextNode:
+ b.WriteString(n.Data)
+ case html.ElementNode:
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ writeNodeText(c, b)
+ }
+ }
+}
+
+// nodeText returns the text contained in n and its descendants.
+func nodeText(n *html.Node) string {
+ var b bytes.Buffer
+ writeNodeText(n, &b)
+ return b.String()
+}
+
+// nodeOwnText returns the contents of the text nodes that are direct
+// children of n.
+func nodeOwnText(n *html.Node) string {
+ var b bytes.Buffer
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type == html.TextNode {
+ b.WriteString(c.Data)
+ }
+ }
+ return b.String()
+}
+
+// textSubstrSelector returns a selector that matches nodes that
+// contain the given text.
+func textSubstrSelector(val string) Selector {
+ return func(n *html.Node) bool {
+ text := strings.ToLower(nodeText(n))
+ return strings.Contains(text, val)
+ }
+}
+
+// ownTextSubstrSelector returns a selector that matches nodes that
+// directly contain the given text
+func ownTextSubstrSelector(val string) Selector {
+ return func(n *html.Node) bool {
+ text := strings.ToLower(nodeOwnText(n))
+ return strings.Contains(text, val)
+ }
+}
+
+// textRegexSelector returns a selector that matches nodes whose text matches
+// the specified regular expression
+func textRegexSelector(rx *regexp.Regexp) Selector {
+ return func(n *html.Node) bool {
+ return rx.MatchString(nodeText(n))
+ }
+}
+
+// ownTextRegexSelector returns a selector that matches nodes whose text
+// directly matches the specified regular expression
+func ownTextRegexSelector(rx *regexp.Regexp) Selector {
+ return func(n *html.Node) bool {
+ return rx.MatchString(nodeOwnText(n))
+ }
+}
+
+// hasChildSelector returns a selector that matches elements
+// with a child that matches a.
+func hasChildSelector(a Selector) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ return hasChildMatch(n, a)
+ }
+}
+
+// hasDescendantSelector returns a selector that matches elements
+// with any descendant that matches a.
+func hasDescendantSelector(a Selector) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ return hasDescendantMatch(n, a)
+ }
+}
+
+// nthChildSelector returns a selector that implements :nth-child(an+b).
+// If last is true, implements :nth-last-child instead.
+// If ofType is true, implements :nth-of-type instead.
+func nthChildSelector(a, b int, last, ofType bool) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
+
+ if parent.Type == html.DocumentNode {
+ return false
+ }
+
+ i := -1
+ count := 0
+ for c := parent.FirstChild; c != nil; c = c.NextSibling {
+ if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if c == n {
+ i = count
+ if !last {
+ break
+ }
+ }
+ }
+
+ if i == -1 {
+ // This shouldn't happen, since n should always be one of its parent's children.
+ return false
+ }
+
+ if last {
+ i = count - i + 1
+ }
+
+ i -= b
+ if a == 0 {
+ return i == 0
+ }
+
+ return i%a == 0 && i/a >= 0
+ }
+}
+
+// simpleNthChildSelector returns a selector that implements :nth-child(b).
+// If ofType is true, implements :nth-of-type instead.
+func simpleNthChildSelector(b int, ofType bool) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
+
+ if parent.Type == html.DocumentNode {
+ return false
+ }
+
+ count := 0
+ for c := parent.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if c == n {
+ return count == b
+ }
+ if count >= b {
+ return false
+ }
+ }
+ return false
+ }
+}
+
+// simpleNthLastChildSelector returns a selector that implements
+// :nth-last-child(b). If ofType is true, implements :nth-last-of-type
+// instead.
+func simpleNthLastChildSelector(b int, ofType bool) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
+
+ if parent.Type == html.DocumentNode {
+ return false
+ }
+
+ count := 0
+ for c := parent.LastChild; c != nil; c = c.PrevSibling {
+ if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if c == n {
+ return count == b
+ }
+ if count >= b {
+ return false
+ }
+ }
+ return false
+ }
+}
+
+// onlyChildSelector returns a selector that implements :only-child.
+// If ofType is true, it implements :only-of-type instead.
+func onlyChildSelector(ofType bool) Selector {
+ return func(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
+
+ if parent.Type == html.DocumentNode {
+ return false
+ }
+
+ count := 0
+ for c := parent.FirstChild; c != nil; c = c.NextSibling {
+ if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if count > 1 {
+ return false
+ }
+ }
+
+ return count == 1
+ }
+}
+
+// inputSelector is a Selector that matches input, select, textarea and button elements.
+func inputSelector(n *html.Node) bool {
+ return n.Type == html.ElementNode && (n.Data == "input" || n.Data == "select" || n.Data == "textarea" || n.Data == "button")
+}
+
+// emptyElementSelector is a Selector that matches empty elements.
+func emptyElementSelector(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ switch c.Type {
+ case html.ElementNode, html.TextNode:
+ return false
+ }
+ }
+
+ return true
+}
+
+// descendantSelector returns a Selector that matches an element if
+// it matches d and has an ancestor that matches a.
+func descendantSelector(a, d Selector) Selector {
+ return func(n *html.Node) bool {
+ if !d(n) {
+ return false
+ }
+
+ for p := n.Parent; p != nil; p = p.Parent {
+ if a(p) {
+ return true
+ }
+ }
+
+ return false
+ }
+}
+
+// childSelector returns a Selector that matches an element if
+// it matches d and its parent matches a.
+func childSelector(a, d Selector) Selector {
+ return func(n *html.Node) bool {
+ return d(n) && n.Parent != nil && a(n.Parent)
+ }
+}
+
+// siblingSelector returns a Selector that matches an element
+// if it matches s2 and in is preceded by an element that matches s1.
+// If adjacent is true, the sibling must be immediately before the element.
+func siblingSelector(s1, s2 Selector, adjacent bool) Selector {
+ return func(n *html.Node) bool {
+ if !s2(n) {
+ return false
+ }
+
+ if adjacent {
+ for n = n.PrevSibling; n != nil; n = n.PrevSibling {
+ if n.Type == html.TextNode || n.Type == html.CommentNode {
+ continue
+ }
+ return s1(n)
+ }
+ return false
+ }
+
+ // Walk backwards looking for element that matches s1
+ for c := n.PrevSibling; c != nil; c = c.PrevSibling {
+ if s1(c) {
+ return true
+ }
+ }
+
+ return false
+ }
+}
+
+// rootSelector implements :root
+func rootSelector(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ if n.Parent == nil {
+ return false
+ }
+ return n.Parent.Type == html.DocumentNode
+}
diff --git a/vendor/github.com/bmatcuk/doublestar/.gitignore b/vendor/github.com/bmatcuk/doublestar/.gitignore
new file mode 100644
index 000000000..76d92ba4b
--- /dev/null
+++ b/vendor/github.com/bmatcuk/doublestar/.gitignore
@@ -0,0 +1,29 @@
+# vi
+*~
+*.swp
+*.swo
+
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/bmatcuk/doublestar/.travis.yml b/vendor/github.com/bmatcuk/doublestar/.travis.yml
new file mode 100644
index 000000000..cf3c884ad
--- /dev/null
+++ b/vendor/github.com/bmatcuk/doublestar/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+
+before_install:
+ - go get -t -v ./...
+
+script:
+ - go test -race -coverprofile=coverage.txt -covermode=atomic
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
+
diff --git a/vendor/github.com/bmatcuk/doublestar/LICENSE b/vendor/github.com/bmatcuk/doublestar/LICENSE
new file mode 100644
index 000000000..309c9d1d1
--- /dev/null
+++ b/vendor/github.com/bmatcuk/doublestar/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Bob Matcuk
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/bmatcuk/doublestar/README.md b/vendor/github.com/bmatcuk/doublestar/README.md
new file mode 100644
index 000000000..8e365c5e3
--- /dev/null
+++ b/vendor/github.com/bmatcuk/doublestar/README.md
@@ -0,0 +1,109 @@
+
+[](https://travis-ci.org/bmatcuk/doublestar)
+[](https://codecov.io/github/bmatcuk/doublestar?branch=master)
+
+# doublestar
+
+**doublestar** is a [golang](http://golang.org/) implementation of path pattern
+matching and globbing with support for "doublestar" (aka globstar: `**`)
+patterns.
+
+doublestar patterns match files and directories recursively. For example, if
+you had the following directory structure:
+
+```
+grandparent
+`-- parent
+ |-- child1
+ `-- child2
+```
+
+You could find the children with patterns such as: `**/child*`,
+`grandparent/**/child?`, `**/parent/*`, or even just `**` by itself (which will
+return all files and directories recursively).
+
+Bash's globstar is doublestar's inspiration and, as such, works similarly.
+Note that the doublestar must appear as a path component by itself. A pattern
+such as `/path**` is invalid and will be treated the same as `/path*`, but
+`/path*/**` should achieve the desired result. Additionally, `/path/**` will
+match all directories and files under the path directory, but `/path/**/` will
+only match directories.
+
+## Installation
+
+**doublestar** can be installed via `go get`:
+
+```bash
+go get github.com/bmatcuk/doublestar
+```
+
+To use it in your code, you must import it:
+
+```go
+import "github.com/bmatcuk/doublestar"
+```
+
+## Functions
+
+### Match
+```go
+func Match(pattern, name string) (bool, error)
+```
+
+Match returns true if `name` matches the file name `pattern`
+([see below](#patterns)). `name` and `pattern` are split on forward slash (`/`)
+characters and may be relative or absolute.
+
+Note: `Match()` is meant to be a drop-in replacement for `path.Match()`. As
+such, it always uses `/` as the path separator. If you are writing code that
+will run on systems where `/` is not the path separator (such as Windows), you
+want to use `PathMatch()` (below) instead.
+
+
+### PathMatch
+```go
+func PathMatch(pattern, name string) (bool, error)
+```
+
+PathMatch returns true if `name` matches the file name `pattern`
+([see below](#patterns)). The difference between Match and PathMatch is that
+PathMatch will automatically use your system's path separator to split `name`
+and `pattern`.
+
+`PathMatch()` is meant to be a drop-in replacement for `filepath.Match()`.
+
+### Glob
+```go
+func Glob(pattern string) ([]string, error)
+```
+
+Glob finds all files and directories in the filesystem that match `pattern`
+([see below](#patterns)). `pattern` may be relative (to the current working
+directory), or absolute.
+
+`Glob()` is meant to be a drop-in replacement for `filepath.Glob()`.
+
+## Patterns
+
+**doublestar** supports the following special terms in the patterns:
+
+Special Terms | Meaning
+------------- | -------
+`*` | matches any sequence of non-path-separators
+`**` | matches any sequence of characters, including path separators
+`?` | matches any single non-path-separator character
+`[class]` | matches any single non-path-separator character against a class of characters ([see below](#character-classes))
+`{alt1,...}` | matches a sequence of characters if one of the comma-separated alternatives matches
+
+Any character with a special meaning can be escaped with a backslash (`\`).
+
+### Character Classes
+
+Character classes support the following:
+
+Class | Meaning
+---------- | -------
+`[abc]` | matches any single character within the set
+`[a-z]` | matches any single character in the range
+`[^class]` | matches any single character which does *not* match the class
+
diff --git a/vendor/github.com/bmatcuk/doublestar/doublestar.go b/vendor/github.com/bmatcuk/doublestar/doublestar.go
new file mode 100644
index 000000000..ceab4e35b
--- /dev/null
+++ b/vendor/github.com/bmatcuk/doublestar/doublestar.go
@@ -0,0 +1,455 @@
+package doublestar
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "unicode/utf8"
+)
+
+var ErrBadPattern = path.ErrBadPattern
+
+// Split a path on the given separator, respecting escaping.
+func splitPathOnSeparator(path string, separator rune) []string {
+ // if the separator is '\\', then we can just split...
+ if separator == '\\' {
+ return strings.Split(path, string(separator))
+ }
+
+ // otherwise, we need to be careful of situations where the separator was escaped
+ cnt := strings.Count(path, string(separator))
+ if cnt == 0 {
+ return []string{path}
+ }
+ ret := make([]string, cnt+1)
+ pathlen := len(path)
+ separatorLen := utf8.RuneLen(separator)
+ idx := 0
+ for start := 0; start < pathlen; {
+ end := indexRuneWithEscaping(path[start:], separator)
+ if end == -1 {
+ end = pathlen
+ } else {
+ end += start
+ }
+ ret[idx] = path[start:end]
+ start = end + separatorLen
+ idx++
+ }
+ return ret[:idx]
+}
+
+// Find the first index of a rune in a string,
+// ignoring any times the rune is escaped using "\".
+func indexRuneWithEscaping(s string, r rune) int {
+ end := strings.IndexRune(s, r)
+ if end == -1 {
+ return -1
+ }
+ if end > 0 && s[end-1] == '\\' {
+ start := end + utf8.RuneLen(r)
+ end = indexRuneWithEscaping(s[start:], r)
+ if end != -1 {
+ end += start
+ }
+ }
+ return end
+}
+
+// Match returns true if name matches the shell file name pattern.
+// The pattern syntax is:
+//
+// pattern:
+// { term }
+// term:
+// '*' matches any sequence of non-path-separators
+// '**' matches any sequence of characters, including
+// path separators.
+// '?' matches any single non-path-separator character
+// '[' [ '^' ] { character-range } ']'
+// character class (must be non-empty)
+// '{' { term } [ ',' { term } ... ] '}'
+// c matches character c (c != '*', '?', '\\', '[')
+// '\\' c matches character c
+//
+// character-range:
+// c matches character c (c != '\\', '-', ']')
+// '\\' c matches character c
+// lo '-' hi matches character c for lo <= c <= hi
+//
+// Match requires pattern to match all of name, not just a substring.
+// The path-separator defaults to the '/' character. The only possible
+// returned error is ErrBadPattern, when pattern is malformed.
+//
+// Note: this is meant as a drop-in replacement for path.Match() which
+// always uses '/' as the path separator. If you want to support systems
+// which use a different path separator (such as Windows), what you want
+// is the PathMatch() function below.
+//
+func Match(pattern, name string) (bool, error) {
+ return matchWithSeparator(pattern, name, '/')
+}
+
+// PathMatch is like Match except that it uses your system's path separator.
+// For most systems, this will be '/'. However, for Windows, it would be '\\'.
+// Note that for systems where the path separator is '\\', escaping is
+// disabled.
+//
+// Note: this is meant as a drop-in replacement for filepath.Match().
+//
+func PathMatch(pattern, name string) (bool, error) {
+ return matchWithSeparator(pattern, name, os.PathSeparator)
+}
+
+// Match returns true if name matches the shell file name pattern.
+// The pattern syntax is:
+//
+// pattern:
+// { term }
+// term:
+// '*' matches any sequence of non-path-separators
+// '**' matches any sequence of characters, including
+// path separators.
+// '?' matches any single non-path-separator character
+// '[' [ '^' ] { character-range } ']'
+// character class (must be non-empty)
+// '{' { term } [ ',' { term } ... ] '}'
+// c matches character c (c != '*', '?', '\\', '[')
+// '\\' c matches character c
+//
+// character-range:
+// c matches character c (c != '\\', '-', ']')
+// '\\' c matches character c, unless separator is '\\'
+// lo '-' hi matches character c for lo <= c <= hi
+//
+// Match requires pattern to match all of name, not just a substring.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+func matchWithSeparator(pattern, name string, separator rune) (bool, error) {
+ patternComponents := splitPathOnSeparator(pattern, separator)
+ nameComponents := splitPathOnSeparator(name, separator)
+ return doMatching(patternComponents, nameComponents)
+}
+
+func doMatching(patternComponents, nameComponents []string) (matched bool, err error) {
+ // check for some base-cases
+ patternLen, nameLen := len(patternComponents), len(nameComponents)
+ if patternLen == 0 && nameLen == 0 {
+ return true, nil
+ }
+ if patternLen == 0 || nameLen == 0 {
+ return false, nil
+ }
+
+ patIdx, nameIdx := 0, 0
+ for patIdx < patternLen && nameIdx < nameLen {
+ if patternComponents[patIdx] == "**" {
+ // if our last pattern component is a doublestar, we're done -
+ // doublestar will match any remaining name components, if any.
+ if patIdx++; patIdx >= patternLen {
+ return true, nil
+ }
+
+ // otherwise, try matching remaining components
+ for ; nameIdx < nameLen; nameIdx++ {
+ if m, _ := doMatching(patternComponents[patIdx:], nameComponents[nameIdx:]); m {
+ return true, nil
+ }
+ }
+ return false, nil
+ } else {
+ // try matching components
+ matched, err = matchComponent(patternComponents[patIdx], nameComponents[nameIdx])
+ if !matched || err != nil {
+ return
+ }
+ }
+ patIdx++
+ nameIdx++
+ }
+ return patIdx >= patternLen && nameIdx >= nameLen, nil
+}
+
+// Glob returns the names of all files matching pattern or nil
+// if there is no matching file. The syntax of pattern is the same
+// as in Match. The pattern may describe hierarchical names such as
+// /usr/*/bin/ed (assuming the Separator is '/').
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+// Your system path separator is automatically used. This means on
+// systems where the separator is '\\' (Windows), escaping will be
+// disabled.
+//
+// Note: this is meant as a drop-in replacement for filepath.Glob().
+//
+func Glob(pattern string) (matches []string, err error) {
+ patternComponents := splitPathOnSeparator(filepath.ToSlash(pattern), '/')
+ if len(patternComponents) == 0 {
+ return nil, nil
+ }
+
+ // On Windows systems, this will return the drive name ('C:'), on others,
+ // it will return an empty string.
+ volumeName := filepath.VolumeName(pattern)
+
+ // If the first pattern component is equal to the volume name, then the
+ // pattern is an absolute path.
+ if patternComponents[0] == volumeName {
+ return doGlob(fmt.Sprintf("%s%s", volumeName, string(os.PathSeparator)), patternComponents[1:], matches)
+ }
+
+ // otherwise, it's a relative pattern
+ return doGlob(".", patternComponents, matches)
+}
+
+// Perform a glob
+func doGlob(basedir string, components, matches []string) (m []string, e error) {
+ m = matches
+ e = nil
+
+ // figure out how many components we don't need to glob because they're
+ // just names without patterns - we'll use os.Lstat below to check if that
+ // path actually exists
+ patLen := len(components)
+ patIdx := 0
+ for ; patIdx < patLen; patIdx++ {
+ if strings.IndexAny(components[patIdx], "*?[{\\") >= 0 {
+ break
+ }
+ }
+ if patIdx > 0 {
+ basedir = filepath.Join(basedir, filepath.Join(components[0:patIdx]...))
+ }
+
+ // Lstat will return an error if the file/directory doesn't exist
+ fi, err := os.Lstat(basedir)
+ if err != nil {
+ return
+ }
+
+ // if there are no more components, we've found a match
+ if patIdx >= patLen {
+ m = append(m, basedir)
+ return
+ }
+
+ // otherwise, we need to check each item in the directory...
+ // first, if basedir is a symlink, follow it...
+ if (fi.Mode() & os.ModeSymlink) != 0 {
+ fi, err = os.Stat(basedir)
+ if err != nil {
+ return
+ }
+ }
+
+ // confirm it's a directory...
+ if !fi.IsDir() {
+ return
+ }
+
+ // read directory
+ dir, err := os.Open(basedir)
+ if err != nil {
+ return
+ }
+ defer dir.Close()
+
+ files, _ := dir.Readdir(-1)
+ lastComponent := (patIdx + 1) >= patLen
+ if components[patIdx] == "**" {
+ // if the current component is a doublestar, we'll try depth-first
+ for _, file := range files {
+ // if symlink, we may want to follow
+ if (file.Mode() & os.ModeSymlink) != 0 {
+ file, err = os.Stat(filepath.Join(basedir, file.Name()))
+ if err != nil {
+ continue
+ }
+ }
+
+ if file.IsDir() {
+ // recurse into directories
+ if lastComponent {
+ m = append(m, filepath.Join(basedir, file.Name()))
+ }
+ m, e = doGlob(filepath.Join(basedir, file.Name()), components[patIdx:], m)
+ } else if lastComponent {
+ // if the pattern's last component is a doublestar, we match filenames, too
+ m = append(m, filepath.Join(basedir, file.Name()))
+ }
+ }
+ if lastComponent {
+ return // we're done
+ }
+ patIdx++
+ lastComponent = (patIdx + 1) >= patLen
+ }
+
+ // check items in current directory and recurse
+ var match bool
+ for _, file := range files {
+ match, e = matchComponent(components[patIdx], file.Name())
+ if e != nil {
+ return
+ }
+ if match {
+ if lastComponent {
+ m = append(m, filepath.Join(basedir, file.Name()))
+ } else {
+ m, e = doGlob(filepath.Join(basedir, file.Name()), components[patIdx+1:], m)
+ }
+ }
+ }
+ return
+}
+
+// Attempt to match a single pattern component with a path component
+func matchComponent(pattern, name string) (bool, error) {
+ // check some base cases
+ patternLen, nameLen := len(pattern), len(name)
+ if patternLen == 0 && nameLen == 0 {
+ return true, nil
+ }
+ if patternLen == 0 {
+ return false, nil
+ }
+ if nameLen == 0 && pattern != "*" {
+ return false, nil
+ }
+
+ // check for matches one rune at a time
+ patIdx, nameIdx := 0, 0
+ for patIdx < patternLen && nameIdx < nameLen {
+ patRune, patAdj := utf8.DecodeRuneInString(pattern[patIdx:])
+ nameRune, nameAdj := utf8.DecodeRuneInString(name[nameIdx:])
+ if patRune == '\\' {
+ // handle escaped runes
+ patIdx += patAdj
+ patRune, patAdj = utf8.DecodeRuneInString(pattern[patIdx:])
+ if patRune == utf8.RuneError {
+ return false, ErrBadPattern
+ } else if patRune == nameRune {
+ patIdx += patAdj
+ nameIdx += nameAdj
+ } else {
+ return false, nil
+ }
+ } else if patRune == '*' {
+ // handle stars
+ if patIdx += patAdj; patIdx >= patternLen {
+ // a star at the end of a pattern will always
+ // match the rest of the path
+ return true, nil
+ }
+
+ // check if we can make any matches
+ for ; nameIdx < nameLen; nameIdx += nameAdj {
+ if m, _ := matchComponent(pattern[patIdx:], name[nameIdx:]); m {
+ return true, nil
+ }
+ }
+ return false, nil
+ } else if patRune == '[' {
+ // handle character sets
+ patIdx += patAdj
+ endClass := indexRuneWithEscaping(pattern[patIdx:], ']')
+ if endClass == -1 {
+ return false, ErrBadPattern
+ }
+ endClass += patIdx
+ classRunes := []rune(pattern[patIdx:endClass])
+ classRunesLen := len(classRunes)
+ if classRunesLen > 0 {
+ classIdx := 0
+ matchClass := false
+ if classRunes[0] == '^' {
+ classIdx++
+ }
+ for classIdx < classRunesLen {
+ low := classRunes[classIdx]
+ if low == '-' {
+ return false, ErrBadPattern
+ }
+ classIdx++
+ if low == '\\' {
+ if classIdx < classRunesLen {
+ low = classRunes[classIdx]
+ classIdx++
+ } else {
+ return false, ErrBadPattern
+ }
+ }
+ high := low
+ if classIdx < classRunesLen && classRunes[classIdx] == '-' {
+ // we have a range of runes
+ if classIdx++; classIdx >= classRunesLen {
+ return false, ErrBadPattern
+ }
+ high = classRunes[classIdx]
+ if high == '-' {
+ return false, ErrBadPattern
+ }
+ classIdx++
+ if high == '\\' {
+ if classIdx < classRunesLen {
+ high = classRunes[classIdx]
+ classIdx++
+ } else {
+ return false, ErrBadPattern
+ }
+ }
+ }
+ if low <= nameRune && nameRune <= high {
+ matchClass = true
+ }
+ }
+ if matchClass == (classRunes[0] == '^') {
+ return false, nil
+ }
+ } else {
+ return false, ErrBadPattern
+ }
+ patIdx = endClass + 1
+ nameIdx += nameAdj
+ } else if patRune == '{' {
+ // handle alternatives such as {alt1,alt2,...}
+ patIdx += patAdj
+ endOptions := indexRuneWithEscaping(pattern[patIdx:], '}')
+ if endOptions == -1 {
+ return false, ErrBadPattern
+ }
+ endOptions += patIdx
+ options := splitPathOnSeparator(pattern[patIdx:endOptions], ',')
+ patIdx = endOptions + 1
+ for _, o := range options {
+ m, e := matchComponent(o+pattern[patIdx:], name[nameIdx:])
+ if e != nil {
+ return false, e
+ }
+ if m {
+ return true, nil
+ }
+ }
+ return false, nil
+ } else if patRune == '?' || patRune == nameRune {
+ // handle single-rune wildcard
+ patIdx += patAdj
+ nameIdx += nameAdj
+ } else {
+ return false, nil
+ }
+ }
+ if patIdx >= patternLen && nameIdx >= nameLen {
+ return true, nil
+ }
+ if nameIdx >= nameLen && pattern[patIdx:] == "*" || pattern[patIdx:] == "**" {
+ return true, nil
+ }
+ return false, nil
+}
diff --git a/vendor/github.com/bmatcuk/doublestar/go.mod b/vendor/github.com/bmatcuk/doublestar/go.mod
new file mode 100644
index 000000000..1d0378b15
--- /dev/null
+++ b/vendor/github.com/bmatcuk/doublestar/go.mod
@@ -0,0 +1 @@
+module github.com/bmatcuk/doublestar
diff --git a/vendor/github.com/disintegration/imaging/.travis.yml b/vendor/github.com/disintegration/imaging/.travis.yml
new file mode 100644
index 000000000..1d4bdd5c0
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+go:
+ - "1.7.x"
+ - "1.8.x"
+ - "1.9.x"
+ - "1.10.x"
+ - "1.11.x"
+
+before_install:
+ - go get github.com/mattn/goveralls
+
+script:
+ - go test -v -race -cover
+ - $GOPATH/bin/goveralls -service=travis-ci
diff --git a/vendor/github.com/disintegration/imaging/LICENSE b/vendor/github.com/disintegration/imaging/LICENSE
new file mode 100644
index 000000000..a4144a9d2
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2012 Grigory Dryapak
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/disintegration/imaging/README.md b/vendor/github.com/disintegration/imaging/README.md
new file mode 100644
index 000000000..f7d6fb4d6
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/README.md
@@ -0,0 +1,197 @@
+# Imaging
+
+[](https://godoc.org/github.com/disintegration/imaging)
+[](https://travis-ci.org/disintegration/imaging)
+[](https://coveralls.io/github/disintegration/imaging?branch=master)
+[](https://goreportcard.com/report/github.com/disintegration/imaging)
+
+Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.).
+
+All the image processing functions provided by the package accept any image type that implements `image.Image` interface
+as an input, and return a new image of `*image.NRGBA` type (32bit RGBA colors, non-premultiplied alpha).
+
+## Installation
+
+ go get -u github.com/disintegration/imaging
+
+## Documentation
+
+http://godoc.org/github.com/disintegration/imaging
+
+## Usage examples
+
+A few usage examples can be found below. See the documentation for the full list of supported functions.
+
+### Image resizing
+
+```go
+// Resize srcImage to size = 128x128px using the Lanczos filter.
+dstImage128 := imaging.Resize(srcImage, 128, 128, imaging.Lanczos)
+
+// Resize srcImage to width = 800px preserving the aspect ratio.
+dstImage800 := imaging.Resize(srcImage, 800, 0, imaging.Lanczos)
+
+// Scale down srcImage to fit the 800x600px bounding box.
+dstImageFit := imaging.Fit(srcImage, 800, 600, imaging.Lanczos)
+
+// Resize and crop the srcImage to fill the 100x100px area.
+dstImageFill := imaging.Fill(srcImage, 100, 100, imaging.Center, imaging.Lanczos)
+```
+
+Imaging supports image resizing using various resampling filters. The most notable ones:
+- `Lanczos` - A high-quality resampling filter for photographic images yielding sharp results.
+- `CatmullRom` - A sharp cubic filter that is faster than Lanczos filter while providing similar results.
+- `MitchellNetravali` - A cubic filter that produces smoother results with less ringing artifacts than CatmullRom.
+- `Linear` - Bilinear resampling filter, produces smooth output. Faster than cubic filters.
+- `Box` - Simple and fast averaging filter appropriate for downscaling. When upscaling it's similar to NearestNeighbor.
+- `NearestNeighbor` - Fastest resampling filter, no antialiasing.
+
+The full list of supported filters: NearestNeighbor, Box, Linear, Hermite, MitchellNetravali, CatmullRom, BSpline, Gaussian, Lanczos, Hann, Hamming, Blackman, Bartlett, Welch, Cosine. Custom filters can be created using ResampleFilter struct.
+
+**Resampling filters comparison**
+
+Original image:
+
+
+
+The same image resized from 600x400px to 150x100px using different resampling filters.
+From faster (lower quality) to slower (higher quality):
+
+Filter | Resize result
+--------------------------|---------------------------------------------
+`imaging.NearestNeighbor` | 
+`imaging.Linear` | 
+`imaging.CatmullRom` | 
+`imaging.Lanczos` | 
+
+
+### Gaussian Blur
+
+```go
+dstImage := imaging.Blur(srcImage, 0.5)
+```
+
+Sigma parameter allows to control the strength of the blurring effect.
+
+Original image | Sigma = 0.5 | Sigma = 1.5
+-----------------------------------|----------------------------------------|---------------------------------------
+ |  | 
+
+### Sharpening
+
+```go
+dstImage := imaging.Sharpen(srcImage, 0.5)
+```
+
+`Sharpen` uses gaussian function internally. Sigma parameter allows to control the strength of the sharpening effect.
+
+Original image | Sigma = 0.5 | Sigma = 1.5
+-----------------------------------|-------------------------------------------|------------------------------------------
+ |  | 
+
+### Gamma correction
+
+```go
+dstImage := imaging.AdjustGamma(srcImage, 0.75)
+```
+
+Original image | Gamma = 0.75 | Gamma = 1.25
+-----------------------------------|------------------------------------------|-----------------------------------------
+ |  | 
+
+### Contrast adjustment
+
+```go
+dstImage := imaging.AdjustContrast(srcImage, 20)
+```
+
+Original image | Contrast = 15 | Contrast = -15
+-----------------------------------|--------------------------------------------|-------------------------------------------
+ |  | 
+
+### Brightness adjustment
+
+```go
+dstImage := imaging.AdjustBrightness(srcImage, 20)
+```
+
+Original image | Brightness = 10 | Brightness = -10
+-----------------------------------|----------------------------------------------|---------------------------------------------
+ |  | 
+
+### Saturation adjustment
+
+```go
+dstImage := imaging.AdjustSaturation(srcImage, 20)
+```
+
+Original image | Saturation = 30 | Saturation = -30
+-----------------------------------|----------------------------------------------|---------------------------------------------
+ |  | 
+
+## Example code
+
+```go
+package main
+
+import (
+ "image"
+ "image/color"
+ "log"
+
+ "github.com/disintegration/imaging"
+)
+
+func main() {
+ // Open a test image.
+ src, err := imaging.Open("testdata/flowers.png")
+ if err != nil {
+ log.Fatalf("failed to open image: %v", err)
+ }
+
+ // Crop the original image to 300x300px size using the center anchor.
+ src = imaging.CropAnchor(src, 300, 300, imaging.Center)
+
+ // Resize the cropped image to width = 200px preserving the aspect ratio.
+ src = imaging.Resize(src, 200, 0, imaging.Lanczos)
+
+ // Create a blurred version of the image.
+ img1 := imaging.Blur(src, 5)
+
+ // Create a grayscale version of the image with higher contrast and sharpness.
+ img2 := imaging.Grayscale(src)
+ img2 = imaging.AdjustContrast(img2, 20)
+ img2 = imaging.Sharpen(img2, 2)
+
+ // Create an inverted version of the image.
+ img3 := imaging.Invert(src)
+
+ // Create an embossed version of the image using a convolution filter.
+ img4 := imaging.Convolve3x3(
+ src,
+ [9]float64{
+ -1, -1, 0,
+ -1, 1, 1,
+ 0, 1, 1,
+ },
+ nil,
+ )
+
+ // Create a new image and paste the four produced images into it.
+ dst := imaging.New(400, 400, color.NRGBA{0, 0, 0, 0})
+ dst = imaging.Paste(dst, img1, image.Pt(0, 0))
+ dst = imaging.Paste(dst, img2, image.Pt(0, 200))
+ dst = imaging.Paste(dst, img3, image.Pt(200, 0))
+ dst = imaging.Paste(dst, img4, image.Pt(200, 200))
+
+ // Save the resulting image as JPEG.
+ err = imaging.Save(dst, "testdata/out_example.jpg")
+ if err != nil {
+ log.Fatalf("failed to save image: %v", err)
+ }
+}
+```
+
+Output:
+
+
diff --git a/vendor/github.com/disintegration/imaging/adjust.go b/vendor/github.com/disintegration/imaging/adjust.go
new file mode 100644
index 000000000..9e628ffce
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/adjust.go
@@ -0,0 +1,252 @@
+package imaging
+
+import (
+ "image"
+ "image/color"
+ "math"
+)
+
+// Grayscale produces a grayscale version of the image.
+func Grayscale(img image.Image) *image.NRGBA {
+ src := newScanner(img)
+ dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
+ parallel(0, src.h, func(ys <-chan int) {
+ for y := range ys {
+ i := y * dst.Stride
+ src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
+ for x := 0; x < src.w; x++ {
+ d := dst.Pix[i : i+3 : i+3]
+ r := d[0]
+ g := d[1]
+ b := d[2]
+ f := 0.299*float64(r) + 0.587*float64(g) + 0.114*float64(b)
+ y := uint8(f + 0.5)
+ d[0] = y
+ d[1] = y
+ d[2] = y
+ i += 4
+ }
+ }
+ })
+ return dst
+}
+
+// Invert produces an inverted (negated) version of the image.
+func Invert(img image.Image) *image.NRGBA {
+ src := newScanner(img)
+ dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
+ parallel(0, src.h, func(ys <-chan int) {
+ for y := range ys {
+ i := y * dst.Stride
+ src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
+ for x := 0; x < src.w; x++ {
+ d := dst.Pix[i : i+3 : i+3]
+ d[0] = 255 - d[0]
+ d[1] = 255 - d[1]
+ d[2] = 255 - d[2]
+ i += 4
+ }
+ }
+ })
+ return dst
+}
+
+// AdjustSaturation changes the saturation of the image using the percentage parameter and returns the adjusted image.
+// The percentage must be in the range (-100, 100).
+// The percentage = 0 gives the original image.
+// The percentage = 100 gives the image with the saturation value doubled for each pixel.
+// The percentage = -100 gives the image with the saturation value zeroed for each pixel (grayscale).
+//
+// Examples:
+// dstImage = imaging.AdjustSaturation(srcImage, 25) // Increase image saturation by 25%.
+// dstImage = imaging.AdjustSaturation(srcImage, -10) // Decrease image saturation by 10%.
+//
+func AdjustSaturation(img image.Image, percentage float64) *image.NRGBA {
+ percentage = math.Min(math.Max(percentage, -100), 100)
+ multiplier := 1 + percentage/100
+
+ return AdjustFunc(img, func(c color.NRGBA) color.NRGBA {
+ h, s, l := rgbToHSL(c.R, c.G, c.B)
+ s *= multiplier
+ if s > 1 {
+ s = 1
+ }
+ r, g, b := hslToRGB(h, s, l)
+ return color.NRGBA{r, g, b, c.A}
+ })
+}
+
+// AdjustContrast changes the contrast of the image using the percentage parameter and returns the adjusted image.
+// The percentage must be in range (-100, 100). The percentage = 0 gives the original image.
+// The percentage = -100 gives solid gray image.
+//
+// Examples:
+//
+// dstImage = imaging.AdjustContrast(srcImage, -10) // Decrease image contrast by 10%.
+// dstImage = imaging.AdjustContrast(srcImage, 20) // Increase image contrast by 20%.
+//
+func AdjustContrast(img image.Image, percentage float64) *image.NRGBA {
+ percentage = math.Min(math.Max(percentage, -100.0), 100.0)
+ lut := make([]uint8, 256)
+
+ v := (100.0 + percentage) / 100.0
+ for i := 0; i < 256; i++ {
+ if 0 <= v && v <= 1 {
+ lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*v) * 255.0)
+ } else if 1 < v && v < 2 {
+ lut[i] = clamp((0.5 + (float64(i)/255.0-0.5)*(1/(2.0-v))) * 255.0)
+ } else {
+ lut[i] = uint8(float64(i)/255.0+0.5) * 255
+ }
+ }
+
+ return adjustLUT(img, lut)
+}
+
+// AdjustBrightness changes the brightness of the image using the percentage parameter and returns the adjusted image.
+// The percentage must be in range (-100, 100). The percentage = 0 gives the original image.
+// The percentage = -100 gives solid black image. The percentage = 100 gives solid white image.
+//
+// Examples:
+//
+// dstImage = imaging.AdjustBrightness(srcImage, -15) // Decrease image brightness by 15%.
+// dstImage = imaging.AdjustBrightness(srcImage, 10) // Increase image brightness by 10%.
+//
+func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA {
+ percentage = math.Min(math.Max(percentage, -100.0), 100.0)
+ lut := make([]uint8, 256)
+
+ shift := 255.0 * percentage / 100.0
+ for i := 0; i < 256; i++ {
+ lut[i] = clamp(float64(i) + shift)
+ }
+
+ return adjustLUT(img, lut)
+}
+
+// AdjustGamma performs a gamma correction on the image and returns the adjusted image.
+// Gamma parameter must be positive. Gamma = 1.0 gives the original image.
+// Gamma less than 1.0 darkens the image and gamma greater than 1.0 lightens it.
+//
+// Example:
+//
+// dstImage = imaging.AdjustGamma(srcImage, 0.7)
+//
+func AdjustGamma(img image.Image, gamma float64) *image.NRGBA {
+ e := 1.0 / math.Max(gamma, 0.0001)
+ lut := make([]uint8, 256)
+
+ for i := 0; i < 256; i++ {
+ lut[i] = clamp(math.Pow(float64(i)/255.0, e) * 255.0)
+ }
+
+ return adjustLUT(img, lut)
+}
+
+// AdjustSigmoid changes the contrast of the image using a sigmoidal function and returns the adjusted image.
+// It's a non-linear contrast change useful for photo adjustments as it preserves highlight and shadow detail.
+// The midpoint parameter is the midpoint of contrast that must be between 0 and 1, typically 0.5.
+// The factor parameter indicates how much to increase or decrease the contrast, typically in range (-10, 10).
+// If the factor parameter is positive the image contrast is increased otherwise the contrast is decreased.
+//
+// Examples:
+//
+// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, 3.0) // Increase the contrast.
+// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, -3.0) // Decrease the contrast.
+//
+func AdjustSigmoid(img image.Image, midpoint, factor float64) *image.NRGBA {
+ if factor == 0 {
+ return Clone(img)
+ }
+
+ lut := make([]uint8, 256)
+ a := math.Min(math.Max(midpoint, 0.0), 1.0)
+ b := math.Abs(factor)
+ sig0 := sigmoid(a, b, 0)
+ sig1 := sigmoid(a, b, 1)
+ e := 1.0e-6
+
+ if factor > 0 {
+ for i := 0; i < 256; i++ {
+ x := float64(i) / 255.0
+ sigX := sigmoid(a, b, x)
+ f := (sigX - sig0) / (sig1 - sig0)
+ lut[i] = clamp(f * 255.0)
+ }
+ } else {
+ for i := 0; i < 256; i++ {
+ x := float64(i) / 255.0
+ arg := math.Min(math.Max((sig1-sig0)*x+sig0, e), 1.0-e)
+ f := a - math.Log(1.0/arg-1.0)/b
+ lut[i] = clamp(f * 255.0)
+ }
+ }
+
+ return adjustLUT(img, lut)
+}
+
+func sigmoid(a, b, x float64) float64 {
+ return 1 / (1 + math.Exp(b*(a-x)))
+}
+
+// adjustLUT applies the given lookup table to the colors of the image.
+func adjustLUT(img image.Image, lut []uint8) *image.NRGBA {
+ src := newScanner(img)
+ dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
+ lut = lut[0:256]
+ parallel(0, src.h, func(ys <-chan int) {
+ for y := range ys {
+ i := y * dst.Stride
+ src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
+ for x := 0; x < src.w; x++ {
+ d := dst.Pix[i : i+3 : i+3]
+ d[0] = lut[d[0]]
+ d[1] = lut[d[1]]
+ d[2] = lut[d[2]]
+ i += 4
+ }
+ }
+ })
+ return dst
+}
+
+// AdjustFunc applies the fn function to each pixel of the img image and returns the adjusted image.
+//
+// Example:
+//
+// dstImage = imaging.AdjustFunc(
+// srcImage,
+// func(c color.NRGBA) color.NRGBA {
+// // Shift the red channel by 16.
+// r := int(c.R) + 16
+// if r > 255 {
+// r = 255
+// }
+// return color.NRGBA{uint8(r), c.G, c.B, c.A}
+// }
+// )
+//
+func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGBA {
+ src := newScanner(img)
+ dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
+ parallel(0, src.h, func(ys <-chan int) {
+ for y := range ys {
+ i := y * dst.Stride
+ src.scan(0, y, src.w, y+1, dst.Pix[i:i+src.w*4])
+ for x := 0; x < src.w; x++ {
+ d := dst.Pix[i : i+4 : i+4]
+ r := d[0]
+ g := d[1]
+ b := d[2]
+ a := d[3]
+ c := fn(color.NRGBA{r, g, b, a})
+ d[0] = c.R
+ d[1] = c.G
+ d[2] = c.B
+ d[3] = c.A
+ i += 4
+ }
+ }
+ })
+ return dst
+}
diff --git a/vendor/github.com/disintegration/imaging/convolution.go b/vendor/github.com/disintegration/imaging/convolution.go
new file mode 100644
index 000000000..11eddc162
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/convolution.go
@@ -0,0 +1,148 @@
+package imaging
+
+import (
+ "image"
+)
+
+// ConvolveOptions are convolution parameters.
+type ConvolveOptions struct {
+ // If Normalize is true the kernel is normalized before convolution.
+ Normalize bool
+
+ // If Abs is true the absolute value of each color channel is taken after convolution.
+ Abs bool
+
+ // Bias is added to each color channel value after convolution.
+ Bias int
+}
+
+// Convolve3x3 convolves the image with the specified 3x3 convolution kernel.
+// Default parameters are used if a nil *ConvolveOptions is passed.
+func Convolve3x3(img image.Image, kernel [9]float64, options *ConvolveOptions) *image.NRGBA {
+ return convolve(img, kernel[:], options)
+}
+
+// Convolve5x5 convolves the image with the specified 5x5 convolution kernel.
+// Default parameters are used if a nil *ConvolveOptions is passed.
+func Convolve5x5(img image.Image, kernel [25]float64, options *ConvolveOptions) *image.NRGBA {
+ return convolve(img, kernel[:], options)
+}
+
+func convolve(img image.Image, kernel []float64, options *ConvolveOptions) *image.NRGBA {
+ src := toNRGBA(img)
+ w := src.Bounds().Max.X
+ h := src.Bounds().Max.Y
+ dst := image.NewNRGBA(image.Rect(0, 0, w, h))
+
+ if w < 1 || h < 1 {
+ return dst
+ }
+
+ if options == nil {
+ options = &ConvolveOptions{}
+ }
+
+ if options.Normalize {
+ normalizeKernel(kernel)
+ }
+
+ type coef struct {
+ x, y int
+ k float64
+ }
+ var coefs []coef
+ var m int
+
+ switch len(kernel) {
+ case 9:
+ m = 1
+ case 25:
+ m = 2
+ }
+
+ i := 0
+ for y := -m; y <= m; y++ {
+ for x := -m; x <= m; x++ {
+ if kernel[i] != 0 {
+ coefs = append(coefs, coef{x: x, y: y, k: kernel[i]})
+ }
+ i++
+ }
+ }
+
+ parallel(0, h, func(ys <-chan int) {
+ for y := range ys {
+ for x := 0; x < w; x++ {
+ var r, g, b float64
+ for _, c := range coefs {
+ ix := x + c.x
+ if ix < 0 {
+ ix = 0
+ } else if ix >= w {
+ ix = w - 1
+ }
+
+ iy := y + c.y
+ if iy < 0 {
+ iy = 0
+ } else if iy >= h {
+ iy = h - 1
+ }
+
+ off := iy*src.Stride + ix*4
+ s := src.Pix[off : off+3 : off+3]
+ r += float64(s[0]) * c.k
+ g += float64(s[1]) * c.k
+ b += float64(s[2]) * c.k
+ }
+
+ if options.Abs {
+ if r < 0 {
+ r = -r
+ }
+ if g < 0 {
+ g = -g
+ }
+ if b < 0 {
+ b = -b
+ }
+ }
+
+ if options.Bias != 0 {
+ r += float64(options.Bias)
+ g += float64(options.Bias)
+ b += float64(options.Bias)
+ }
+
+ srcOff := y*src.Stride + x*4
+ dstOff := y*dst.Stride + x*4
+ d := dst.Pix[dstOff : dstOff+4 : dstOff+4]
+ d[0] = clamp(r)
+ d[1] = clamp(g)
+ d[2] = clamp(b)
+ d[3] = src.Pix[srcOff+3]
+ }
+ }
+ })
+
+ return dst
+}
+
+func normalizeKernel(kernel []float64) {
+ var sum, sumpos float64
+ for i := range kernel {
+ sum += kernel[i]
+ if kernel[i] > 0 {
+ sumpos += kernel[i]
+ }
+ }
+ if sum != 0 {
+ for i := range kernel {
+ kernel[i] /= sum
+ }
+ } else if sumpos != 0 {
+ for i := range kernel {
+ kernel[i] /= sumpos
+ }
+ }
+}
diff --git a/vendor/github.com/disintegration/imaging/doc.go b/vendor/github.com/disintegration/imaging/doc.go
new file mode 100644
index 000000000..c98c91250
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/doc.go
@@ -0,0 +1,7 @@
+/*
+Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.).
+
+All the image processing functions provided by the package accept any image type that implements image.Image interface
+as an input, and return a new image of *image.NRGBA type (32bit RGBA colors, non-premultiplied alpha).
+*/
+package imaging
diff --git a/vendor/github.com/disintegration/imaging/effects.go b/vendor/github.com/disintegration/imaging/effects.go
new file mode 100644
index 000000000..47316b701
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/effects.go
@@ -0,0 +1,169 @@
+package imaging
+
+import (
+ "image"
+ "math"
+)
+
+func gaussianBlurKernel(x, sigma float64) float64 {
+ return math.Exp(-(x*x)/(2*sigma*sigma)) / (sigma * math.Sqrt(2*math.Pi))
+}
+
+// Blur produces a blurred version of the image using a Gaussian function.
+// Sigma parameter must be positive and indicates how much the image will be blurred.
+//
+// Example:
+//
+// dstImage := imaging.Blur(srcImage, 3.5)
+//
+func Blur(img image.Image, sigma float64) *image.NRGBA {
+ if sigma <= 0 {
+ return Clone(img)
+ }
+
+ radius := int(math.Ceil(sigma * 3.0))
+ kernel := make([]float64, radius+1)
+
+ for i := 0; i <= radius; i++ {
+ kernel[i] = gaussianBlurKernel(float64(i), sigma)
+ }
+
+ return blurVertical(blurHorizontal(img, kernel), kernel)
+}
+
+func blurHorizontal(img image.Image, kernel []float64) *image.NRGBA {
+ src := newScanner(img)
+ dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
+ radius := len(kernel) - 1
+
+ parallel(0, src.h, func(ys <-chan int) {
+ scanLine := make([]uint8, src.w*4)
+ scanLineF := make([]float64, len(scanLine))
+ for y := range ys {
+ src.scan(0, y, src.w, y+1, scanLine)
+ for i, v := range scanLine {
+ scanLineF[i] = float64(v)
+ }
+ for x := 0; x < src.w; x++ {
+ min := x - radius
+ if min < 0 {
+ min = 0
+ }
+ max := x + radius
+ if max > src.w-1 {
+ max = src.w - 1
+ }
+ var r, g, b, a, wsum float64
+ for ix := min; ix <= max; ix++ {
+ i := ix * 4
+ weight := kernel[absint(x-ix)]
+ wsum += weight
+ s := scanLineF[i : i+4 : i+4]
+ wa := s[3] * weight
+ r += s[0] * wa
+ g += s[1] * wa
+ b += s[2] * wa
+ a += wa
+ }
+ if a != 0 {
+ aInv := 1 / a
+ j := y*dst.Stride + x*4
+ d := dst.Pix[j : j+4 : j+4]
+ d[0] = clamp(r * aInv)
+ d[1] = clamp(g * aInv)
+ d[2] = clamp(b * aInv)
+ d[3] = clamp(a / wsum)
+ }
+ }
+ }
+ })
+
+ return dst
+}
+
+func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
+ src := newScanner(img)
+ dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
+ radius := len(kernel) - 1
+
+ parallel(0, src.w, func(xs <-chan int) {
+ scanLine := make([]uint8, src.h*4)
+ scanLineF := make([]float64, len(scanLine))
+ for x := range xs {
+ src.scan(x, 0, x+1, src.h, scanLine)
+ for i, v := range scanLine {
+ scanLineF[i] = float64(v)
+ }
+ for y := 0; y < src.h; y++ {
+ min := y - radius
+ if min < 0 {
+ min = 0
+ }
+ max := y + radius
+ if max > src.h-1 {
+ max = src.h - 1
+ }
+ var r, g, b, a, wsum float64
+ for iy := min; iy <= max; iy++ {
+ i := iy * 4
+ weight := kernel[absint(y-iy)]
+ wsum += weight
+ s := scanLineF[i : i+4 : i+4]
+ wa := s[3] * weight
+ r += s[0] * wa
+ g += s[1] * wa
+ b += s[2] * wa
+ a += wa
+ }
+ if a != 0 {
+ aInv := 1 / a
+ j := y*dst.Stride + x*4
+ d := dst.Pix[j : j+4 : j+4]
+ d[0] = clamp(r * aInv)
+ d[1] = clamp(g * aInv)
+ d[2] = clamp(b * aInv)
+ d[3] = clamp(a / wsum)
+ }
+ }
+ }
+ })
+
+ return dst
+}
+
+// Sharpen produces a sharpened version of the image.
+// Sigma parameter must be positive and indicates how much the image will be sharpened.
+//
+// Example:
+//
+// dstImage := imaging.Sharpen(srcImage, 3.5)
+//
+func Sharpen(img image.Image, sigma float64) *image.NRGBA {
+ if sigma <= 0 {
+ return Clone(img)
+ }
+
+ src := newScanner(img)
+ dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
+ blurred := Blur(img, sigma)
+
+ parallel(0, src.h, func(ys <-chan int) {
+ scanLine := make([]uint8, src.w*4)
+ for y := range ys {
+ src.scan(0, y, src.w, y+1, scanLine)
+ j := y * dst.Stride
+ for i := 0; i < src.w*4; i++ {
+ val := int(scanLine[i])<<1 - int(blurred.Pix[j])
+ if val < 0 {
+ val = 0
+ } else if val > 0xff {
+ val = 0xff
+ }
+ dst.Pix[j] = uint8(val)
+ j++
+ }
+ }
+ })
+
+ return dst
+}
diff --git a/vendor/github.com/disintegration/imaging/go.mod b/vendor/github.com/disintegration/imaging/go.mod
new file mode 100644
index 000000000..126e8cc66
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/go.mod
@@ -0,0 +1,3 @@
+module github.com/disintegration/imaging
+
+require golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81
diff --git a/vendor/github.com/disintegration/imaging/go.sum b/vendor/github.com/disintegration/imaging/go.sum
new file mode 100644
index 000000000..20c92e460
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81 h1:00VmoueYNlNz/aHIilyyQz/MHSqGoWJzpFv/HW8xpzI=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
diff --git a/vendor/github.com/disintegration/imaging/histogram.go b/vendor/github.com/disintegration/imaging/histogram.go
new file mode 100644
index 000000000..c547fe822
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/histogram.go
@@ -0,0 +1,52 @@
+package imaging
+
+import (
+ "image"
+ "sync"
+)
+
+// Histogram returns a normalized histogram of an image.
+//
+// Resulting histogram is represented as an array of 256 floats, where
+// histogram[i] is a probability of a pixel being of a particular luminance i.
+func Histogram(img image.Image) [256]float64 {
+ var mu sync.Mutex
+ var histogram [256]float64
+ var total float64
+
+ src := newScanner(img)
+ if src.w == 0 || src.h == 0 {
+ return histogram
+ }
+
+ parallel(0, src.h, func(ys <-chan int) {
+ var tmpHistogram [256]float64
+ var tmpTotal float64
+ scanLine := make([]uint8, src.w*4)
+ for y := range ys {
+ src.scan(0, y, src.w, y+1, scanLine)
+ i := 0
+ for x := 0; x < src.w; x++ {
+ s := scanLine[i : i+3 : i+3]
+ r := s[0]
+ g := s[1]
+ b := s[2]
+ y := 0.299*float32(r) + 0.587*float32(g) + 0.114*float32(b)
+ tmpHistogram[int(y+0.5)]++
+ tmpTotal++
+ i += 4
+ }
+ }
+ mu.Lock()
+ for i := 0; i < 256; i++ {
+ histogram[i] += tmpHistogram[i]
+ }
+ total += tmpTotal
+ mu.Unlock()
+ })
+
+ for i := 0; i < 256; i++ {
+ histogram[i] = histogram[i] / total
+ }
+ return histogram
+}
diff --git a/vendor/github.com/disintegration/imaging/io.go b/vendor/github.com/disintegration/imaging/io.go
new file mode 100644
index 000000000..f6c6da86b
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/io.go
@@ -0,0 +1,444 @@
+package imaging
+
+import (
+ "encoding/binary"
+ "errors"
+ "image"
+ "image/draw"
+ "image/gif"
+ "image/jpeg"
+ "image/png"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/image/bmp"
+ "golang.org/x/image/tiff"
+)
+
+type fileSystem interface {
+ Create(string) (io.WriteCloser, error)
+ Open(string) (io.ReadCloser, error)
+}
+
+type localFS struct{}
+
+func (localFS) Create(name string) (io.WriteCloser, error) { return os.Create(name) }
+func (localFS) Open(name string) (io.ReadCloser, error) { return os.Open(name) }
+
+var fs fileSystem = localFS{}
+
+type decodeConfig struct {
+ autoOrientation bool
+}
+
+var defaultDecodeConfig = decodeConfig{
+ autoOrientation: false,
+}
+
+// DecodeOption sets an optional parameter for the Decode and Open functions.
+type DecodeOption func(*decodeConfig)
+
+// AutoOrientation returns a DecodeOption that sets the auto-orientation mode.
+// If auto-orientation is enabled, the image will be transformed after decoding
+// according to the EXIF orientation tag (if present). By default it's disabled.
+func AutoOrientation(enabled bool) DecodeOption {
+ return func(c *decodeConfig) {
+ c.autoOrientation = enabled
+ }
+}
+
+// Decode reads an image from r.
+func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) {
+ cfg := defaultDecodeConfig
+ for _, option := range opts {
+ option(&cfg)
+ }
+
+ if !cfg.autoOrientation {
+ img, _, err := image.Decode(r)
+ return img, err
+ }
+
+ var orient orientation
+ pr, pw := io.Pipe()
+ r = io.TeeReader(r, pw)
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ orient = readOrientation(pr)
+ io.Copy(ioutil.Discard, pr)
+ }()
+
+ img, _, err := image.Decode(r)
+ pw.Close()
+ <-done
+ if err != nil {
+ return nil, err
+ }
+
+ return fixOrientation(img, orient), nil
+}
+
+// Open loads an image from file.
+//
+// Examples:
+//
+// // Load an image from file.
+// img, err := imaging.Open("test.jpg")
+//
+// // Load an image and transform it depending on the EXIF orientation tag (if present).
+// img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true))
+//
+func Open(filename string, opts ...DecodeOption) (image.Image, error) {
+ file, err := fs.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return Decode(file, opts...)
+}
+
+// Format is an image file format.
+type Format int
+
+// Image file formats.
+const (
+ JPEG Format = iota
+ PNG
+ GIF
+ TIFF
+ BMP
+)
+
+var formatExts = map[string]Format{
+ "jpg": JPEG,
+ "jpeg": JPEG,
+ "png": PNG,
+ "gif": GIF,
+ "tif": TIFF,
+ "tiff": TIFF,
+ "bmp": BMP,
+}
+
+var formatNames = map[Format]string{
+ JPEG: "JPEG",
+ PNG: "PNG",
+ GIF: "GIF",
+ TIFF: "TIFF",
+ BMP: "BMP",
+}
+
+func (f Format) String() string {
+ return formatNames[f]
+}
+
+// ErrUnsupportedFormat means the given image format is not supported.
+var ErrUnsupportedFormat = errors.New("imaging: unsupported image format")
+
+// FormatFromExtension parses image format from filename extension:
+// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
+func FormatFromExtension(ext string) (Format, error) {
+ if f, ok := formatExts[strings.ToLower(strings.TrimPrefix(ext, "."))]; ok {
+ return f, nil
+ }
+ return -1, ErrUnsupportedFormat
+}
+
+// FormatFromFilename parses image format from filename:
+// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
+func FormatFromFilename(filename string) (Format, error) {
+ ext := filepath.Ext(filename)
+ return FormatFromExtension(ext)
+}
+
+type encodeConfig struct {
+ jpegQuality int
+ gifNumColors int
+ gifQuantizer draw.Quantizer
+ gifDrawer draw.Drawer
+ pngCompressionLevel png.CompressionLevel
+}
+
+var defaultEncodeConfig = encodeConfig{
+ jpegQuality: 95,
+ gifNumColors: 256,
+ gifQuantizer: nil,
+ gifDrawer: nil,
+ pngCompressionLevel: png.DefaultCompression,
+}
+
+// EncodeOption sets an optional parameter for the Encode and Save functions.
+type EncodeOption func(*encodeConfig)
+
+// JPEGQuality returns an EncodeOption that sets the output JPEG quality.
+// Quality ranges from 1 to 100 inclusive, higher is better. Default is 95.
+func JPEGQuality(quality int) EncodeOption {
+ return func(c *encodeConfig) {
+ c.jpegQuality = quality
+ }
+}
+
+// GIFNumColors returns an EncodeOption that sets the maximum number of colors
+// used in the GIF-encoded image. It ranges from 1 to 256. Default is 256.
+func GIFNumColors(numColors int) EncodeOption {
+ return func(c *encodeConfig) {
+ c.gifNumColors = numColors
+ }
+}
+
+// GIFQuantizer returns an EncodeOption that sets the quantizer that is used to produce
+// a palette of the GIF-encoded image.
+func GIFQuantizer(quantizer draw.Quantizer) EncodeOption {
+ return func(c *encodeConfig) {
+ c.gifQuantizer = quantizer
+ }
+}
+
+// GIFDrawer returns an EncodeOption that sets the drawer that is used to convert
+// the source image to the desired palette of the GIF-encoded image.
+func GIFDrawer(drawer draw.Drawer) EncodeOption {
+ return func(c *encodeConfig) {
+ c.gifDrawer = drawer
+ }
+}
+
+// PNGCompressionLevel returns an EncodeOption that sets the compression level
+// of the PNG-encoded image. Default is png.DefaultCompression.
+func PNGCompressionLevel(level png.CompressionLevel) EncodeOption {
+ return func(c *encodeConfig) {
+ c.pngCompressionLevel = level
+ }
+}
+
+// Encode writes the image img to w in the specified format (JPEG, PNG, GIF, TIFF or BMP).
+func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) error {
+ cfg := defaultEncodeConfig
+ for _, option := range opts {
+ option(&cfg)
+ }
+
+ switch format {
+ case JPEG:
+ if nrgba, ok := img.(*image.NRGBA); ok && nrgba.Opaque() {
+ rgba := &image.RGBA{
+ Pix: nrgba.Pix,
+ Stride: nrgba.Stride,
+ Rect: nrgba.Rect,
+ }
+ return jpeg.Encode(w, rgba, &jpeg.Options{Quality: cfg.jpegQuality})
+ }
+ return jpeg.Encode(w, img, &jpeg.Options{Quality: cfg.jpegQuality})
+
+ case PNG:
+ encoder := png.Encoder{CompressionLevel: cfg.pngCompressionLevel}
+ return encoder.Encode(w, img)
+
+ case GIF:
+ return gif.Encode(w, img, &gif.Options{
+ NumColors: cfg.gifNumColors,
+ Quantizer: cfg.gifQuantizer,
+ Drawer: cfg.gifDrawer,
+ })
+
+ case TIFF:
+ return tiff.Encode(w, img, &tiff.Options{Compression: tiff.Deflate, Predictor: true})
+
+ case BMP:
+ return bmp.Encode(w, img)
+ }
+
+ return ErrUnsupportedFormat
+}
+
+// Save saves the image to file with the specified filename.
+// The format is determined from the filename extension:
+// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported.
+//
+// Examples:
+//
+// // Save the image as PNG.
+// err := imaging.Save(img, "out.png")
+//
+// // Save the image as JPEG with optional quality parameter set to 80.
+// err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80))
+//
+func Save(img image.Image, filename string, opts ...EncodeOption) (err error) {
+ f, err := FormatFromFilename(filename)
+ if err != nil {
+ return err
+ }
+ file, err := fs.Create(filename)
+ if err != nil {
+ return err
+ }
+ err = Encode(file, img, f, opts...)
+ errc := file.Close()
+ if err == nil {
+ err = errc
+ }
+ return err
+}
+
+// orientation is an EXIF flag that specifies the transformation
+// that should be applied to image to display it correctly.
+type orientation int
+
+const (
+ orientationUnspecified = 0
+ orientationNormal = 1
+ orientationFlipH = 2
+ orientationRotate180 = 3
+ orientationFlipV = 4
+ orientationTranspose = 5
+ orientationRotate270 = 6
+ orientationTransverse = 7
+ orientationRotate90 = 8
+)
+
+// readOrientation tries to read the orientation EXIF flag from image data in r.
+// If the EXIF data block is not found or the orientation flag is not found
+// or any other error occures while reading the data, it returns the
+// orientationUnspecified (0) value.
+func readOrientation(r io.Reader) orientation {
+ const (
+ markerSOI = 0xffd8
+ markerAPP1 = 0xffe1
+ exifHeader = 0x45786966
+ byteOrderBE = 0x4d4d
+ byteOrderLE = 0x4949
+ orientationTag = 0x0112
+ )
+
+ // Check if JPEG SOI marker is present.
+ var soi uint16
+ if err := binary.Read(r, binary.BigEndian, &soi); err != nil {
+ return orientationUnspecified
+ }
+ if soi != markerSOI {
+ return orientationUnspecified // Missing JPEG SOI marker.
+ }
+
+ // Find JPEG APP1 marker.
+ for {
+ var marker, size uint16
+ if err := binary.Read(r, binary.BigEndian, &marker); err != nil {
+ return orientationUnspecified
+ }
+ if err := binary.Read(r, binary.BigEndian, &size); err != nil {
+ return orientationUnspecified
+ }
+ if marker>>8 != 0xff {
+ return orientationUnspecified // Invalid JPEG marker.
+ }
+ if marker == markerAPP1 {
+ break
+ }
+ if size < 2 {
+ return orientationUnspecified // Invalid block size.
+ }
+ if _, err := io.CopyN(ioutil.Discard, r, int64(size-2)); err != nil {
+ return orientationUnspecified
+ }
+ }
+
+ // Check if EXIF header is present.
+ var header uint32
+ if err := binary.Read(r, binary.BigEndian, &header); err != nil {
+ return orientationUnspecified
+ }
+ if header != exifHeader {
+ return orientationUnspecified
+ }
+ if _, err := io.CopyN(ioutil.Discard, r, 2); err != nil {
+ return orientationUnspecified
+ }
+
+ // Read byte order information.
+ var (
+ byteOrderTag uint16
+ byteOrder binary.ByteOrder
+ )
+ if err := binary.Read(r, binary.BigEndian, &byteOrderTag); err != nil {
+ return orientationUnspecified
+ }
+ switch byteOrderTag {
+ case byteOrderBE:
+ byteOrder = binary.BigEndian
+ case byteOrderLE:
+ byteOrder = binary.LittleEndian
+ default:
+ return orientationUnspecified // Invalid byte order flag.
+ }
+ if _, err := io.CopyN(ioutil.Discard, r, 2); err != nil {
+ return orientationUnspecified
+ }
+
+ // Skip the EXIF offset.
+ var offset uint32
+ if err := binary.Read(r, byteOrder, &offset); err != nil {
+ return orientationUnspecified
+ }
+ if offset < 8 {
+ return orientationUnspecified // Invalid offset value.
+ }
+ if _, err := io.CopyN(ioutil.Discard, r, int64(offset-8)); err != nil {
+ return orientationUnspecified
+ }
+
+ // Read the number of tags.
+ var numTags uint16
+ if err := binary.Read(r, byteOrder, &numTags); err != nil {
+ return orientationUnspecified
+ }
+
+ // Find the orientation tag.
+ for i := 0; i < int(numTags); i++ {
+ var tag uint16
+ if err := binary.Read(r, byteOrder, &tag); err != nil {
+ return orientationUnspecified
+ }
+ if tag != orientationTag {
+ if _, err := io.CopyN(ioutil.Discard, r, 10); err != nil {
+ return orientationUnspecified
+ }
+ continue
+ }
+ if _, err := io.CopyN(ioutil.Discard, r, 6); err != nil {
+ return orientationUnspecified
+ }
+ var val uint16
+ if err := binary.Read(r, byteOrder, &val); err != nil {
+ return orientationUnspecified
+ }
+ if val < 1 || val > 8 {
+ return orientationUnspecified // Invalid tag value.
+ }
+ return orientation(val)
+ }
+ return orientationUnspecified // Missing orientation tag.
+}
+
+// fixOrientation applies a transform to img corresponding to the given orientation flag.
+func fixOrientation(img image.Image, o orientation) image.Image {
+ switch o {
+ case orientationNormal:
+ case orientationFlipH:
+ img = FlipH(img)
+ case orientationFlipV:
+ img = FlipV(img)
+ case orientationRotate90:
+ img = Rotate90(img)
+ case orientationRotate180:
+ img = Rotate180(img)
+ case orientationRotate270:
+ img = Rotate270(img)
+ case orientationTranspose:
+ img = Transpose(img)
+ case orientationTransverse:
+ img = Transverse(img)
+ }
+ return img
+}
diff --git a/vendor/github.com/disintegration/imaging/resize.go b/vendor/github.com/disintegration/imaging/resize.go
new file mode 100644
index 000000000..706435e3d
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/resize.go
@@ -0,0 +1,595 @@
+package imaging
+
+import (
+ "image"
+ "math"
+)
+
+type indexWeight struct {
+ index int
+ weight float64
+}
+
+func precomputeWeights(dstSize, srcSize int, filter ResampleFilter) [][]indexWeight {
+ du := float64(srcSize) / float64(dstSize)
+ scale := du
+ if scale < 1.0 {
+ scale = 1.0
+ }
+ ru := math.Ceil(scale * filter.Support)
+
+ out := make([][]indexWeight, dstSize)
+ tmp := make([]indexWeight, 0, dstSize*int(ru+2)*2)
+
+ for v := 0; v < dstSize; v++ {
+ fu := (float64(v)+0.5)*du - 0.5
+
+ begin := int(math.Ceil(fu - ru))
+ if begin < 0 {
+ begin = 0
+ }
+ end := int(math.Floor(fu + ru))
+ if end > srcSize-1 {
+ end = srcSize - 1
+ }
+
+ var sum float64
+ for u := begin; u <= end; u++ {
+ w := filter.Kernel((float64(u) - fu) / scale)
+ if w != 0 {
+ sum += w
+ tmp = append(tmp, indexWeight{index: u, weight: w})
+ }
+ }
+ if sum != 0 {
+ for i := range tmp {
+ tmp[i].weight /= sum
+ }
+ }
+
+ out[v] = tmp
+ tmp = tmp[len(tmp):]
+ }
+
+ return out
+}
+
+// Resize resizes the image to the specified width and height using the specified resampling
+// filter and returns the transformed image. If one of width or height is 0, the image aspect
+// ratio is preserved.
+//
+// Example:
+//
+// dstImage := imaging.Resize(srcImage, 800, 600, imaging.Lanczos)
+//
+func Resize(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
+ dstW, dstH := width, height
+ if dstW < 0 || dstH < 0 {
+ return &image.NRGBA{}
+ }
+ if dstW == 0 && dstH == 0 {
+ return &image.NRGBA{}
+ }
+
+ srcW := img.Bounds().Dx()
+ srcH := img.Bounds().Dy()
+ if srcW <= 0 || srcH <= 0 {
+ return &image.NRGBA{}
+ }
+
+ // If new width or height is 0 then preserve aspect ratio, minimum 1px.
+ if dstW == 0 {
+ tmpW := float64(dstH) * float64(srcW) / float64(srcH)
+ dstW = int(math.Max(1.0, math.Floor(tmpW+0.5)))
+ }
+ if dstH == 0 {
+ tmpH := float64(dstW) * float64(srcH) / float64(srcW)
+ dstH = int(math.Max(1.0, math.Floor(tmpH+0.5)))
+ }
+
+ if filter.Support <= 0 {
+ // Nearest-neighbor special case.
+ return resizeNearest(img, dstW, dstH)
+ }
+
+ if srcW != dstW && srcH != dstH {
+ return resizeVertical(resizeHorizontal(img, dstW, filter), dstH, filter)
+ }
+ if srcW != dstW {
+ return resizeHorizontal(img, dstW, filter)
+ }
+ if srcH != dstH {
+ return resizeVertical(img, dstH, filter)
+ }
+ return Clone(img)
+}
+
+func resizeHorizontal(img image.Image, width int, filter ResampleFilter) *image.NRGBA {
+ src := newScanner(img)
+ dst := image.NewNRGBA(image.Rect(0, 0, width, src.h))
+ weights := precomputeWeights(width, src.w, filter)
+ parallel(0, src.h, func(ys <-chan int) {
+ scanLine := make([]uint8, src.w*4)
+ for y := range ys {
+ src.scan(0, y, src.w, y+1, scanLine)
+ j0 := y * dst.Stride
+ for x := range weights {
+ var r, g, b, a float64
+ for _, w := range weights[x] {
+ i := w.index * 4
+ s := scanLine[i : i+4 : i+4]
+ aw := float64(s[3]) * w.weight
+ r += float64(s[0]) * aw
+ g += float64(s[1]) * aw
+ b += float64(s[2]) * aw
+ a += aw
+ }
+ if a != 0 {
+ aInv := 1 / a
+ j := j0 + x*4
+ d := dst.Pix[j : j+4 : j+4]
+ d[0] = clamp(r * aInv)
+ d[1] = clamp(g * aInv)
+ d[2] = clamp(b * aInv)
+ d[3] = clamp(a)
+ }
+ }
+ }
+ })
+ return dst
+}
+
+func resizeVertical(img image.Image, height int, filter ResampleFilter) *image.NRGBA {
+ src := newScanner(img)
+ dst := image.NewNRGBA(image.Rect(0, 0, src.w, height))
+ weights := precomputeWeights(height, src.h, filter)
+ parallel(0, src.w, func(xs <-chan int) {
+ scanLine := make([]uint8, src.h*4)
+ for x := range xs {
+ src.scan(x, 0, x+1, src.h, scanLine)
+ for y := range weights {
+ var r, g, b, a float64
+ for _, w := range weights[y] {
+ i := w.index * 4
+ s := scanLine[i : i+4 : i+4]
+ aw := float64(s[3]) * w.weight
+ r += float64(s[0]) * aw
+ g += float64(s[1]) * aw
+ b += float64(s[2]) * aw
+ a += aw
+ }
+ if a != 0 {
+ aInv := 1 / a
+ j := y*dst.Stride + x*4
+ d := dst.Pix[j : j+4 : j+4]
+ d[0] = clamp(r * aInv)
+ d[1] = clamp(g * aInv)
+ d[2] = clamp(b * aInv)
+ d[3] = clamp(a)
+ }
+ }
+ }
+ })
+ return dst
+}
+
+// resizeNearest is a fast nearest-neighbor resize, no filtering.
+func resizeNearest(img image.Image, width, height int) *image.NRGBA {
+ dst := image.NewNRGBA(image.Rect(0, 0, width, height))
+ dx := float64(img.Bounds().Dx()) / float64(width)
+ dy := float64(img.Bounds().Dy()) / float64(height)
+
+ if dx > 1 && dy > 1 {
+ src := newScanner(img)
+ parallel(0, height, func(ys <-chan int) {
+ for y := range ys {
+ srcY := int((float64(y) + 0.5) * dy)
+ dstOff := y * dst.Stride
+ for x := 0; x < width; x++ {
+ srcX := int((float64(x) + 0.5) * dx)
+ src.scan(srcX, srcY, srcX+1, srcY+1, dst.Pix[dstOff:dstOff+4])
+ dstOff += 4
+ }
+ }
+ })
+ } else {
+ src := toNRGBA(img)
+ parallel(0, height, func(ys <-chan int) {
+ for y := range ys {
+ srcY := int((float64(y) + 0.5) * dy)
+ srcOff0 := srcY * src.Stride
+ dstOff := y * dst.Stride
+ for x := 0; x < width; x++ {
+ srcX := int((float64(x) + 0.5) * dx)
+ srcOff := srcOff0 + srcX*4
+ copy(dst.Pix[dstOff:dstOff+4], src.Pix[srcOff:srcOff+4])
+ dstOff += 4
+ }
+ }
+ })
+ }
+
+ return dst
+}
+
+// Fit scales down the image using the specified resample filter to fit the specified
+// maximum width and height and returns the transformed image.
+//
+// Example:
+//
+// dstImage := imaging.Fit(srcImage, 800, 600, imaging.Lanczos)
+//
+func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
+ maxW, maxH := width, height
+
+ if maxW <= 0 || maxH <= 0 {
+ return &image.NRGBA{}
+ }
+
+ srcBounds := img.Bounds()
+ srcW := srcBounds.Dx()
+ srcH := srcBounds.Dy()
+
+ if srcW <= 0 || srcH <= 0 {
+ return &image.NRGBA{}
+ }
+
+ if srcW <= maxW && srcH <= maxH {
+ return Clone(img)
+ }
+
+ srcAspectRatio := float64(srcW) / float64(srcH)
+ maxAspectRatio := float64(maxW) / float64(maxH)
+
+ var newW, newH int
+ if srcAspectRatio > maxAspectRatio {
+ newW = maxW
+ newH = int(float64(newW) / srcAspectRatio)
+ } else {
+ newH = maxH
+ newW = int(float64(newH) * srcAspectRatio)
+ }
+
+ return Resize(img, newW, newH, filter)
+}
+
+// Fill creates an image with the specified dimensions and fills it with the scaled source image.
+// To achieve the correct aspect ratio without stretching, the source image will be cropped.
+//
+// Example:
+//
+// dstImage := imaging.Fill(srcImage, 800, 600, imaging.Center, imaging.Lanczos)
+//
+func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
+ dstW, dstH := width, height
+
+ if dstW <= 0 || dstH <= 0 {
+ return &image.NRGBA{}
+ }
+
+ srcBounds := img.Bounds()
+ srcW := srcBounds.Dx()
+ srcH := srcBounds.Dy()
+
+ if srcW <= 0 || srcH <= 0 {
+ return &image.NRGBA{}
+ }
+
+ if srcW == dstW && srcH == dstH {
+ return Clone(img)
+ }
+
+ if srcW >= 100 && srcH >= 100 {
+ return cropAndResize(img, dstW, dstH, anchor, filter)
+ }
+ return resizeAndCrop(img, dstW, dstH, anchor, filter)
+}
+
+// cropAndResize crops the image to the smallest possible size that has the required aspect ratio using
+// the given anchor point, then scales it to the specified dimensions and returns the transformed image.
+//
+// This is generally faster than resizing first, but may result in inaccuracies when used on small source images.
+func cropAndResize(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
+ dstW, dstH := width, height
+
+ srcBounds := img.Bounds()
+ srcW := srcBounds.Dx()
+ srcH := srcBounds.Dy()
+ srcAspectRatio := float64(srcW) / float64(srcH)
+ dstAspectRatio := float64(dstW) / float64(dstH)
+
+ var tmp *image.NRGBA
+ if srcAspectRatio < dstAspectRatio {
+ cropH := float64(srcW) * float64(dstH) / float64(dstW)
+ tmp = CropAnchor(img, srcW, int(math.Max(1, cropH)+0.5), anchor)
+ } else {
+ cropW := float64(srcH) * float64(dstW) / float64(dstH)
+ tmp = CropAnchor(img, int(math.Max(1, cropW)+0.5), srcH, anchor)
+ }
+
+ return Resize(tmp, dstW, dstH, filter)
+}
+
+// resizeAndCrop resizes the image to the smallest possible size that will cover the specified dimensions,
+// crops the resized image to the specified dimensions using the given anchor point and returns
+// the transformed image.
+func resizeAndCrop(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
+ dstW, dstH := width, height
+
+ srcBounds := img.Bounds()
+ srcW := srcBounds.Dx()
+ srcH := srcBounds.Dy()
+ srcAspectRatio := float64(srcW) / float64(srcH)
+ dstAspectRatio := float64(dstW) / float64(dstH)
+
+ var tmp *image.NRGBA
+ if srcAspectRatio < dstAspectRatio {
+ tmp = Resize(img, dstW, 0, filter)
+ } else {
+ tmp = Resize(img, 0, dstH, filter)
+ }
+
+ return CropAnchor(tmp, dstW, dstH, anchor)
+}
+
+// Thumbnail scales the image up or down using the specified resample filter, crops it
+// to the specified width and hight and returns the transformed image.
+//
+// Example:
+//
+// dstImage := imaging.Thumbnail(srcImage, 100, 100, imaging.Lanczos)
+//
+func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
+ return Fill(img, width, height, Center, filter)
+}
+
+// ResampleFilter specifies a resampling filter to be used for image resizing.
+//
+// General filter recommendations:
+//
+// - Lanczos
+// A high-quality resampling filter for photographic images yielding sharp results.
+//
+// - CatmullRom
+// A sharp cubic filter that is faster than Lanczos filter while providing similar results.
+//
+// - MitchellNetravali
+// A cubic filter that produces smoother results with less ringing artifacts than CatmullRom.
+//
+// - Linear
+// Bilinear resampling filter, produces a smooth output. Faster than cubic filters.
+//
+// - Box
+// Simple and fast averaging filter appropriate for downscaling.
+// When upscaling it's similar to NearestNeighbor.
+//
+// - NearestNeighbor
+// Fastest resampling filter, no antialiasing.
+//
+type ResampleFilter struct {
+ Support float64
+ Kernel func(float64) float64
+}
+
+// NearestNeighbor is a nearest-neighbor filter (no anti-aliasing).
+var NearestNeighbor ResampleFilter
+
+// Box filter (averaging pixels).
+var Box ResampleFilter
+
+// Linear filter.
+var Linear ResampleFilter
+
+// Hermite cubic spline filter (BC-spline; B=0; C=0).
+var Hermite ResampleFilter
+
+// MitchellNetravali is Mitchell-Netravali cubic filter (BC-spline; B=1/3; C=1/3).
+var MitchellNetravali ResampleFilter
+
+// CatmullRom is a Catmull-Rom - sharp cubic filter (BC-spline; B=0; C=0.5).
+var CatmullRom ResampleFilter
+
+// BSpline is a smooth cubic filter (BC-spline; B=1; C=0).
+var BSpline ResampleFilter
+
+// Gaussian is a Gaussian blurring filter.
+var Gaussian ResampleFilter
+
+// Bartlett is a Bartlett-windowed sinc filter (3 lobes).
+var Bartlett ResampleFilter
+
+// Lanczos filter (3 lobes).
+var Lanczos ResampleFilter
+
+// Hann is a Hann-windowed sinc filter (3 lobes).
+var Hann ResampleFilter
+
+// Hamming is a Hamming-windowed sinc filter (3 lobes).
+var Hamming ResampleFilter
+
+// Blackman is a Blackman-windowed sinc filter (3 lobes).
+var Blackman ResampleFilter
+
+// Welch is a Welch-windowed sinc filter (parabolic window, 3 lobes).
+var Welch ResampleFilter
+
+// Cosine is a Cosine-windowed sinc filter (3 lobes).
+var Cosine ResampleFilter
+
+func bcspline(x, b, c float64) float64 {
+ var y float64
+ x = math.Abs(x)
+ if x < 1.0 {
+ y = ((12-9*b-6*c)*x*x*x + (-18+12*b+6*c)*x*x + (6 - 2*b)) / 6
+ } else if x < 2.0 {
+ y = ((-b-6*c)*x*x*x + (6*b+30*c)*x*x + (-12*b-48*c)*x + (8*b + 24*c)) / 6
+ }
+ return y
+}
+
+func sinc(x float64) float64 {
+ if x == 0 {
+ return 1
+ }
+ return math.Sin(math.Pi*x) / (math.Pi * x)
+}
+
+func init() {
+ NearestNeighbor = ResampleFilter{
+ Support: 0.0, // special case - not applying the filter
+ }
+
+ Box = ResampleFilter{
+ Support: 0.5,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x <= 0.5 {
+ return 1.0
+ }
+ return 0
+ },
+ }
+
+ Linear = ResampleFilter{
+ Support: 1.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 1.0 {
+ return 1.0 - x
+ }
+ return 0
+ },
+ }
+
+ Hermite = ResampleFilter{
+ Support: 1.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 1.0 {
+ return bcspline(x, 0.0, 0.0)
+ }
+ return 0
+ },
+ }
+
+ MitchellNetravali = ResampleFilter{
+ Support: 2.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 2.0 {
+ return bcspline(x, 1.0/3.0, 1.0/3.0)
+ }
+ return 0
+ },
+ }
+
+ CatmullRom = ResampleFilter{
+ Support: 2.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 2.0 {
+ return bcspline(x, 0.0, 0.5)
+ }
+ return 0
+ },
+ }
+
+ BSpline = ResampleFilter{
+ Support: 2.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 2.0 {
+ return bcspline(x, 1.0, 0.0)
+ }
+ return 0
+ },
+ }
+
+ Gaussian = ResampleFilter{
+ Support: 2.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 2.0 {
+ return math.Exp(-2 * x * x)
+ }
+ return 0
+ },
+ }
+
+ Bartlett = ResampleFilter{
+ Support: 3.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 3.0 {
+ return sinc(x) * (3.0 - x) / 3.0
+ }
+ return 0
+ },
+ }
+
+ Lanczos = ResampleFilter{
+ Support: 3.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 3.0 {
+ return sinc(x) * sinc(x/3.0)
+ }
+ return 0
+ },
+ }
+
+ Hann = ResampleFilter{
+ Support: 3.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 3.0 {
+ return sinc(x) * (0.5 + 0.5*math.Cos(math.Pi*x/3.0))
+ }
+ return 0
+ },
+ }
+
+ Hamming = ResampleFilter{
+ Support: 3.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 3.0 {
+ return sinc(x) * (0.54 + 0.46*math.Cos(math.Pi*x/3.0))
+ }
+ return 0
+ },
+ }
+
+ Blackman = ResampleFilter{
+ Support: 3.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 3.0 {
+ return sinc(x) * (0.42 - 0.5*math.Cos(math.Pi*x/3.0+math.Pi) + 0.08*math.Cos(2.0*math.Pi*x/3.0))
+ }
+ return 0
+ },
+ }
+
+ Welch = ResampleFilter{
+ Support: 3.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 3.0 {
+ return sinc(x) * (1.0 - (x * x / 9.0))
+ }
+ return 0
+ },
+ }
+
+ Cosine = ResampleFilter{
+ Support: 3.0,
+ Kernel: func(x float64) float64 {
+ x = math.Abs(x)
+ if x < 3.0 {
+ return sinc(x) * math.Cos((math.Pi/2.0)*(x/3.0))
+ }
+ return 0
+ },
+ }
+}
diff --git a/vendor/github.com/disintegration/imaging/scanner.go b/vendor/github.com/disintegration/imaging/scanner.go
new file mode 100644
index 000000000..37d92cef8
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/scanner.go
@@ -0,0 +1,285 @@
+package imaging
+
+import (
+ "image"
+ "image/color"
+)
+
+type scanner struct {
+ image image.Image
+ w, h int
+ palette []color.NRGBA
+}
+
+func newScanner(img image.Image) *scanner {
+ s := &scanner{
+ image: img,
+ w: img.Bounds().Dx(),
+ h: img.Bounds().Dy(),
+ }
+ if img, ok := img.(*image.Paletted); ok {
+ s.palette = make([]color.NRGBA, len(img.Palette))
+ for i := 0; i < len(img.Palette); i++ {
+ s.palette[i] = color.NRGBAModel.Convert(img.Palette[i]).(color.NRGBA)
+ }
+ }
+ return s
+}
+
+// scan scans the given rectangular region of the image into dst.
+func (s *scanner) scan(x1, y1, x2, y2 int, dst []uint8) {
+ switch img := s.image.(type) {
+ case *image.NRGBA:
+ size := (x2 - x1) * 4
+ j := 0
+ i := y1*img.Stride + x1*4
+ if size == 4 {
+ for y := y1; y < y2; y++ {
+ d := dst[j : j+4 : j+4]
+ s := img.Pix[i : i+4 : i+4]
+ d[0] = s[0]
+ d[1] = s[1]
+ d[2] = s[2]
+ d[3] = s[3]
+ j += size
+ i += img.Stride
+ }
+ } else {
+ for y := y1; y < y2; y++ {
+ copy(dst[j:j+size], img.Pix[i:i+size])
+ j += size
+ i += img.Stride
+ }
+ }
+
+ case *image.NRGBA64:
+ j := 0
+ for y := y1; y < y2; y++ {
+ i := y*img.Stride + x1*8
+ for x := x1; x < x2; x++ {
+ s := img.Pix[i : i+8 : i+8]
+ d := dst[j : j+4 : j+4]
+ d[0] = s[0]
+ d[1] = s[2]
+ d[2] = s[4]
+ d[3] = s[6]
+ j += 4
+ i += 8
+ }
+ }
+
+ case *image.RGBA:
+ j := 0
+ for y := y1; y < y2; y++ {
+ i := y*img.Stride + x1*4
+ for x := x1; x < x2; x++ {
+ d := dst[j : j+4 : j+4]
+ a := img.Pix[i+3]
+ switch a {
+ case 0:
+ d[0] = 0
+ d[1] = 0
+ d[2] = 0
+ d[3] = a
+ case 0xff:
+ s := img.Pix[i : i+4 : i+4]
+ d[0] = s[0]
+ d[1] = s[1]
+ d[2] = s[2]
+ d[3] = a
+ default:
+ s := img.Pix[i : i+4 : i+4]
+ r16 := uint16(s[0])
+ g16 := uint16(s[1])
+ b16 := uint16(s[2])
+ a16 := uint16(a)
+ d[0] = uint8(r16 * 0xff / a16)
+ d[1] = uint8(g16 * 0xff / a16)
+ d[2] = uint8(b16 * 0xff / a16)
+ d[3] = a
+ }
+ j += 4
+ i += 4
+ }
+ }
+
+ case *image.RGBA64:
+ j := 0
+ for y := y1; y < y2; y++ {
+ i := y*img.Stride + x1*8
+ for x := x1; x < x2; x++ {
+ s := img.Pix[i : i+8 : i+8]
+ d := dst[j : j+4 : j+4]
+ a := s[6]
+ switch a {
+ case 0:
+ d[0] = 0
+ d[1] = 0
+ d[2] = 0
+ case 0xff:
+ d[0] = s[0]
+ d[1] = s[2]
+ d[2] = s[4]
+ default:
+ r32 := uint32(s[0])<<8 | uint32(s[1])
+ g32 := uint32(s[2])<<8 | uint32(s[3])
+ b32 := uint32(s[4])<<8 | uint32(s[5])
+ a32 := uint32(s[6])<<8 | uint32(s[7])
+ d[0] = uint8((r32 * 0xffff / a32) >> 8)
+ d[1] = uint8((g32 * 0xffff / a32) >> 8)
+ d[2] = uint8((b32 * 0xffff / a32) >> 8)
+ }
+ d[3] = a
+ j += 4
+ i += 8
+ }
+ }
+
+ case *image.Gray:
+ j := 0
+ for y := y1; y < y2; y++ {
+ i := y*img.Stride + x1
+ for x := x1; x < x2; x++ {
+ c := img.Pix[i]
+ d := dst[j : j+4 : j+4]
+ d[0] = c
+ d[1] = c
+ d[2] = c
+ d[3] = 0xff
+ j += 4
+ i++
+ }
+ }
+
+ case *image.Gray16:
+ j := 0
+ for y := y1; y < y2; y++ {
+ i := y*img.Stride + x1*2
+ for x := x1; x < x2; x++ {
+ c := img.Pix[i]
+ d := dst[j : j+4 : j+4]
+ d[0] = c
+ d[1] = c
+ d[2] = c
+ d[3] = 0xff
+ j += 4
+ i += 2
+ }
+ }
+
+ case *image.YCbCr:
+ j := 0
+ x1 += img.Rect.Min.X
+ x2 += img.Rect.Min.X
+ y1 += img.Rect.Min.Y
+ y2 += img.Rect.Min.Y
+
+ hy := img.Rect.Min.Y / 2
+ hx := img.Rect.Min.X / 2
+ for y := y1; y < y2; y++ {
+ iy := (y-img.Rect.Min.Y)*img.YStride + (x1 - img.Rect.Min.X)
+
+ var yBase int
+ switch img.SubsampleRatio {
+ case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio422:
+ yBase = (y - img.Rect.Min.Y) * img.CStride
+ case image.YCbCrSubsampleRatio420, image.YCbCrSubsampleRatio440:
+ yBase = (y/2 - hy) * img.CStride
+ }
+
+ for x := x1; x < x2; x++ {
+ var ic int
+ switch img.SubsampleRatio {
+ case image.YCbCrSubsampleRatio444, image.YCbCrSubsampleRatio440:
+ ic = yBase + (x - img.Rect.Min.X)
+ case image.YCbCrSubsampleRatio422, image.YCbCrSubsampleRatio420:
+ ic = yBase + (x/2 - hx)
+ default:
+ ic = img.COffset(x, y)
+ }
+
+ yy1 := int32(img.Y[iy]) * 0x10101
+ cb1 := int32(img.Cb[ic]) - 128
+ cr1 := int32(img.Cr[ic]) - 128
+
+ r := yy1 + 91881*cr1
+ if uint32(r)&0xff000000 == 0 {
+ r >>= 16
+ } else {
+ r = ^(r >> 31)
+ }
+
+ g := yy1 - 22554*cb1 - 46802*cr1
+ if uint32(g)&0xff000000 == 0 {
+ g >>= 16
+ } else {
+ g = ^(g >> 31)
+ }
+
+ b := yy1 + 116130*cb1
+ if uint32(b)&0xff000000 == 0 {
+ b >>= 16
+ } else {
+ b = ^(b >> 31)
+ }
+
+ d := dst[j : j+4 : j+4]
+ d[0] = uint8(r)
+ d[1] = uint8(g)
+ d[2] = uint8(b)
+ d[3] = 0xff
+
+ iy++
+ j += 4
+ }
+ }
+
+ case *image.Paletted:
+ j := 0
+ for y := y1; y < y2; y++ {
+ i := y*img.Stride + x1
+ for x := x1; x < x2; x++ {
+ c := s.palette[img.Pix[i]]
+ d := dst[j : j+4 : j+4]
+ d[0] = c.R
+ d[1] = c.G
+ d[2] = c.B
+ d[3] = c.A
+ j += 4
+ i++
+ }
+ }
+
+ default:
+ j := 0
+ b := s.image.Bounds()
+ x1 += b.Min.X
+ x2 += b.Min.X
+ y1 += b.Min.Y
+ y2 += b.Min.Y
+ for y := y1; y < y2; y++ {
+ for x := x1; x < x2; x++ {
+ r16, g16, b16, a16 := s.image.At(x, y).RGBA()
+ d := dst[j : j+4 : j+4]
+ switch a16 {
+ case 0xffff:
+ d[0] = uint8(r16 >> 8)
+ d[1] = uint8(g16 >> 8)
+ d[2] = uint8(b16 >> 8)
+ d[3] = 0xff
+ case 0:
+ d[0] = 0
+ d[1] = 0
+ d[2] = 0
+ d[3] = 0
+ default:
+ d[0] = uint8(((r16 * 0xffff) / a16) >> 8)
+ d[1] = uint8(((g16 * 0xffff) / a16) >> 8)
+ d[2] = uint8(((b16 * 0xffff) / a16) >> 8)
+ d[3] = uint8(a16 >> 8)
+ }
+ j += 4
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/disintegration/imaging/tools.go b/vendor/github.com/disintegration/imaging/tools.go
new file mode 100644
index 000000000..0ec19a039
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/tools.go
@@ -0,0 +1,249 @@
+package imaging
+
+import (
+ "bytes"
+ "image"
+ "image/color"
+ "math"
+)
+
+// New creates a new image with the specified width and height, and fills it with the specified color.
+func New(width, height int, fillColor color.Color) *image.NRGBA {
+ if width <= 0 || height <= 0 {
+ return &image.NRGBA{}
+ }
+
+ c := color.NRGBAModel.Convert(fillColor).(color.NRGBA)
+ if (c == color.NRGBA{0, 0, 0, 0}) {
+ return image.NewNRGBA(image.Rect(0, 0, width, height))
+ }
+
+ return &image.NRGBA{
+ Pix: bytes.Repeat([]byte{c.R, c.G, c.B, c.A}, width*height),
+ Stride: 4 * width,
+ Rect: image.Rect(0, 0, width, height),
+ }
+}
+
+// Clone returns a copy of the given image.
+func Clone(img image.Image) *image.NRGBA {
+ src := newScanner(img)
+ dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))
+ size := src.w * 4
+ parallel(0, src.h, func(ys <-chan int) {
+ for y := range ys {
+ i := y * dst.Stride
+ src.scan(0, y, src.w, y+1, dst.Pix[i:i+size])
+ }
+ })
+ return dst
+}
+
+// Anchor is the anchor point for image alignment.
+type Anchor int
+
+// Anchor point positions.
+const (
+ Center Anchor = iota
+ TopLeft
+ Top
+ TopRight
+ Left
+ Right
+ BottomLeft
+ Bottom
+ BottomRight
+)
+
+func anchorPt(b image.Rectangle, w, h int, anchor Anchor) image.Point {
+ var x, y int
+ switch anchor {
+ case TopLeft:
+ x = b.Min.X
+ y = b.Min.Y
+ case Top:
+ x = b.Min.X + (b.Dx()-w)/2
+ y = b.Min.Y
+ case TopRight:
+ x = b.Max.X - w
+ y = b.Min.Y
+ case Left:
+ x = b.Min.X
+ y = b.Min.Y + (b.Dy()-h)/2
+ case Right:
+ x = b.Max.X - w
+ y = b.Min.Y + (b.Dy()-h)/2
+ case BottomLeft:
+ x = b.Min.X
+ y = b.Max.Y - h
+ case Bottom:
+ x = b.Min.X + (b.Dx()-w)/2
+ y = b.Max.Y - h
+ case BottomRight:
+ x = b.Max.X - w
+ y = b.Max.Y - h
+ default:
+ x = b.Min.X + (b.Dx()-w)/2
+ y = b.Min.Y + (b.Dy()-h)/2
+ }
+ return image.Pt(x, y)
+}
+
+// Crop cuts out a rectangular region with the specified bounds
+// from the image and returns the cropped image.
+func Crop(img image.Image, rect image.Rectangle) *image.NRGBA {
+ r := rect.Intersect(img.Bounds()).Sub(img.Bounds().Min)
+ if r.Empty() {
+ return &image.NRGBA{}
+ }
+ src := newScanner(img)
+ dst := image.NewNRGBA(image.Rect(0, 0, r.Dx(), r.Dy()))
+ rowSize := r.Dx() * 4
+ parallel(r.Min.Y, r.Max.Y, func(ys <-chan int) {
+ for y := range ys {
+ i := (y - r.Min.Y) * dst.Stride
+ src.scan(r.Min.X, y, r.Max.X, y+1, dst.Pix[i:i+rowSize])
+ }
+ })
+ return dst
+}
+
+// CropAnchor cuts out a rectangular region with the specified size
+// from the image using the specified anchor point and returns the cropped image.
+func CropAnchor(img image.Image, width, height int, anchor Anchor) *image.NRGBA {
+ srcBounds := img.Bounds()
+ pt := anchorPt(srcBounds, width, height, anchor)
+ r := image.Rect(0, 0, width, height).Add(pt)
+ b := srcBounds.Intersect(r)
+ return Crop(img, b)
+}
+
+// CropCenter cuts out a rectangular region with the specified size
+// from the center of the image and returns the cropped image.
+func CropCenter(img image.Image, width, height int) *image.NRGBA {
+ return CropAnchor(img, width, height, Center)
+}
+
+// Paste pastes the img image to the background image at the specified position and returns the combined image.
+func Paste(background, img image.Image, pos image.Point) *image.NRGBA {
+ dst := Clone(background)
+ pos = pos.Sub(background.Bounds().Min)
+ pasteRect := image.Rectangle{Min: pos, Max: pos.Add(img.Bounds().Size())}
+ interRect := pasteRect.Intersect(dst.Bounds())
+ if interRect.Empty() {
+ return dst
+ }
+ src := newScanner(img)
+ parallel(interRect.Min.Y, interRect.Max.Y, func(ys <-chan int) {
+ for y := range ys {
+ x1 := interRect.Min.X - pasteRect.Min.X
+ x2 := interRect.Max.X - pasteRect.Min.X
+ y1 := y - pasteRect.Min.Y
+ y2 := y1 + 1
+ i1 := y*dst.Stride + interRect.Min.X*4
+ i2 := i1 + interRect.Dx()*4
+ src.scan(x1, y1, x2, y2, dst.Pix[i1:i2])
+ }
+ })
+ return dst
+}
+
+// PasteCenter pastes the img image to the center of the background image and returns the combined image.
+func PasteCenter(background, img image.Image) *image.NRGBA {
+ bgBounds := background.Bounds()
+ bgW := bgBounds.Dx()
+ bgH := bgBounds.Dy()
+ bgMinX := bgBounds.Min.X
+ bgMinY := bgBounds.Min.Y
+
+ centerX := bgMinX + bgW/2
+ centerY := bgMinY + bgH/2
+
+ x0 := centerX - img.Bounds().Dx()/2
+ y0 := centerY - img.Bounds().Dy()/2
+
+ return Paste(background, img, image.Pt(x0, y0))
+}
+
+// Overlay draws the img image over the background image at given position
+// and returns the combined image. Opacity parameter is the opacity of the img
+// image layer, used to compose the images, it must be from 0.0 to 1.0.
+//
+// Examples:
+//
+// // Draw spriteImage over backgroundImage at the given position (x=50, y=50).
+// dstImage := imaging.Overlay(backgroundImage, spriteImage, image.Pt(50, 50), 1.0)
+//
+// // Blend two opaque images of the same size.
+// dstImage := imaging.Overlay(imageOne, imageTwo, image.Pt(0, 0), 0.5)
+//
+func Overlay(background, img image.Image, pos image.Point, opacity float64) *image.NRGBA {
+ opacity = math.Min(math.Max(opacity, 0.0), 1.0) // Ensure 0.0 <= opacity <= 1.0.
+ dst := Clone(background)
+ pos = pos.Sub(background.Bounds().Min)
+ pasteRect := image.Rectangle{Min: pos, Max: pos.Add(img.Bounds().Size())}
+ interRect := pasteRect.Intersect(dst.Bounds())
+ if interRect.Empty() {
+ return dst
+ }
+ src := newScanner(img)
+ parallel(interRect.Min.Y, interRect.Max.Y, func(ys <-chan int) {
+ scanLine := make([]uint8, interRect.Dx()*4)
+ for y := range ys {
+ x1 := interRect.Min.X - pasteRect.Min.X
+ x2 := interRect.Max.X - pasteRect.Min.X
+ y1 := y - pasteRect.Min.Y
+ y2 := y1 + 1
+ src.scan(x1, y1, x2, y2, scanLine)
+ i := y*dst.Stride + interRect.Min.X*4
+ j := 0
+ for x := interRect.Min.X; x < interRect.Max.X; x++ {
+ d := dst.Pix[i : i+4 : i+4]
+ r1 := float64(d[0])
+ g1 := float64(d[1])
+ b1 := float64(d[2])
+ a1 := float64(d[3])
+
+ s := scanLine[j : j+4 : j+4]
+ r2 := float64(s[0])
+ g2 := float64(s[1])
+ b2 := float64(s[2])
+ a2 := float64(s[3])
+
+ coef2 := opacity * a2 / 255
+ coef1 := (1 - coef2) * a1 / 255
+ coefSum := coef1 + coef2
+ coef1 /= coefSum
+ coef2 /= coefSum
+
+ d[0] = uint8(r1*coef1 + r2*coef2)
+ d[1] = uint8(g1*coef1 + g2*coef2)
+ d[2] = uint8(b1*coef1 + b2*coef2)
+ d[3] = uint8(math.Min(a1+a2*opacity*(255-a1)/255, 255))
+
+ i += 4
+ j += 4
+ }
+ }
+ })
+ return dst
+}
+
+// OverlayCenter overlays the img image to the center of the background image and
+// returns the combined image. Opacity parameter is the opacity of the img
+// image layer, used to compose the images, it must be from 0.0 to 1.0.
+func OverlayCenter(background, img image.Image, opacity float64) *image.NRGBA {
+ bgBounds := background.Bounds()
+ bgW := bgBounds.Dx()
+ bgH := bgBounds.Dy()
+ bgMinX := bgBounds.Min.X
+ bgMinY := bgBounds.Min.Y
+
+ centerX := bgMinX + bgW/2
+ centerY := bgMinY + bgH/2
+
+ x0 := centerX - img.Bounds().Dx()/2
+ y0 := centerY - img.Bounds().Dy()/2
+
+ return Overlay(background, img, image.Point{x0, y0}, opacity)
+}
diff --git a/vendor/github.com/disintegration/imaging/transform.go b/vendor/github.com/disintegration/imaging/transform.go
new file mode 100644
index 000000000..fe4a92f9d
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/transform.go
@@ -0,0 +1,268 @@
+package imaging
+
+import (
+ "image"
+ "image/color"
+ "math"
+)
+
+// FlipH flips the image horizontally (from left to right) and returns the transformed image.
+func FlipH(img image.Image) *image.NRGBA {
+ src := newScanner(img)
+ dstW := src.w
+ dstH := src.h
+ rowSize := dstW * 4
+ dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
+ parallel(0, dstH, func(ys <-chan int) {
+ for dstY := range ys {
+ i := dstY * dst.Stride
+ srcY := dstY
+ src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
+ reverse(dst.Pix[i : i+rowSize])
+ }
+ })
+ return dst
+}
+
+// FlipV flips the image vertically (from top to bottom) and returns the transformed image.
+func FlipV(img image.Image) *image.NRGBA {
+ src := newScanner(img)
+ dstW := src.w
+ dstH := src.h
+ rowSize := dstW * 4
+ dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
+ parallel(0, dstH, func(ys <-chan int) {
+ for dstY := range ys {
+ i := dstY * dst.Stride
+ srcY := dstH - dstY - 1
+ src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
+ }
+ })
+ return dst
+}
+
+// Transpose flips the image horizontally and rotates 90 degrees counter-clockwise.
+func Transpose(img image.Image) *image.NRGBA {
+ src := newScanner(img)
+ dstW := src.h
+ dstH := src.w
+ rowSize := dstW * 4
+ dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
+ parallel(0, dstH, func(ys <-chan int) {
+ for dstY := range ys {
+ i := dstY * dst.Stride
+ srcX := dstY
+ src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
+ }
+ })
+ return dst
+}
+
+// Transverse flips the image vertically and rotates 90 degrees counter-clockwise.
+func Transverse(img image.Image) *image.NRGBA {
+ src := newScanner(img)
+ dstW := src.h
+ dstH := src.w
+ rowSize := dstW * 4
+ dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
+ parallel(0, dstH, func(ys <-chan int) {
+ for dstY := range ys {
+ i := dstY * dst.Stride
+ srcX := dstH - dstY - 1
+ src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
+ reverse(dst.Pix[i : i+rowSize])
+ }
+ })
+ return dst
+}
+
+// Rotate90 rotates the image 90 degrees counter-clockwise and returns the transformed image.
+func Rotate90(img image.Image) *image.NRGBA {
+ src := newScanner(img)
+ dstW := src.h
+ dstH := src.w
+ rowSize := dstW * 4
+ dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
+ parallel(0, dstH, func(ys <-chan int) {
+ for dstY := range ys {
+ i := dstY * dst.Stride
+ srcX := dstH - dstY - 1
+ src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
+ }
+ })
+ return dst
+}
+
+// Rotate180 rotates the image 180 degrees counter-clockwise and returns the transformed image.
+func Rotate180(img image.Image) *image.NRGBA {
+ src := newScanner(img)
+ dstW := src.w
+ dstH := src.h
+ rowSize := dstW * 4
+ dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
+ parallel(0, dstH, func(ys <-chan int) {
+ for dstY := range ys {
+ i := dstY * dst.Stride
+ srcY := dstH - dstY - 1
+ src.scan(0, srcY, src.w, srcY+1, dst.Pix[i:i+rowSize])
+ reverse(dst.Pix[i : i+rowSize])
+ }
+ })
+ return dst
+}
+
+// Rotate270 rotates the image 270 degrees counter-clockwise and returns the transformed image.
+func Rotate270(img image.Image) *image.NRGBA {
+ src := newScanner(img)
+ dstW := src.h
+ dstH := src.w
+ rowSize := dstW * 4
+ dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
+ parallel(0, dstH, func(ys <-chan int) {
+ for dstY := range ys {
+ i := dstY * dst.Stride
+ srcX := dstY
+ src.scan(srcX, 0, srcX+1, src.h, dst.Pix[i:i+rowSize])
+ reverse(dst.Pix[i : i+rowSize])
+ }
+ })
+ return dst
+}
+
+// Rotate rotates an image by the given angle counter-clockwise .
+// The angle parameter is the rotation angle in degrees.
+// The bgColor parameter specifies the color of the uncovered zone after the rotation.
+func Rotate(img image.Image, angle float64, bgColor color.Color) *image.NRGBA {
+ angle = angle - math.Floor(angle/360)*360
+
+ switch angle {
+ case 0:
+ return Clone(img)
+ case 90:
+ return Rotate90(img)
+ case 180:
+ return Rotate180(img)
+ case 270:
+ return Rotate270(img)
+ }
+
+ src := toNRGBA(img)
+ srcW := src.Bounds().Max.X
+ srcH := src.Bounds().Max.Y
+ dstW, dstH := rotatedSize(srcW, srcH, angle)
+ dst := image.NewNRGBA(image.Rect(0, 0, dstW, dstH))
+
+ if dstW <= 0 || dstH <= 0 {
+ return dst
+ }
+
+ srcXOff := float64(srcW)/2 - 0.5
+ srcYOff := float64(srcH)/2 - 0.5
+ dstXOff := float64(dstW)/2 - 0.5
+ dstYOff := float64(dstH)/2 - 0.5
+
+ bgColorNRGBA := color.NRGBAModel.Convert(bgColor).(color.NRGBA)
+ sin, cos := math.Sincos(math.Pi * angle / 180)
+
+ parallel(0, dstH, func(ys <-chan int) {
+ for dstY := range ys {
+ for dstX := 0; dstX < dstW; dstX++ {
+ xf, yf := rotatePoint(float64(dstX)-dstXOff, float64(dstY)-dstYOff, sin, cos)
+ xf, yf = xf+srcXOff, yf+srcYOff
+ interpolatePoint(dst, dstX, dstY, src, xf, yf, bgColorNRGBA)
+ }
+ }
+ })
+
+ return dst
+}
+
+func rotatePoint(x, y, sin, cos float64) (float64, float64) {
+ return x*cos - y*sin, x*sin + y*cos
+}
+
+func rotatedSize(w, h int, angle float64) (int, int) {
+ if w <= 0 || h <= 0 {
+ return 0, 0
+ }
+
+ sin, cos := math.Sincos(math.Pi * angle / 180)
+ x1, y1 := rotatePoint(float64(w-1), 0, sin, cos)
+ x2, y2 := rotatePoint(float64(w-1), float64(h-1), sin, cos)
+ x3, y3 := rotatePoint(0, float64(h-1), sin, cos)
+
+ minx := math.Min(x1, math.Min(x2, math.Min(x3, 0)))
+ maxx := math.Max(x1, math.Max(x2, math.Max(x3, 0)))
+ miny := math.Min(y1, math.Min(y2, math.Min(y3, 0)))
+ maxy := math.Max(y1, math.Max(y2, math.Max(y3, 0)))
+
+ neww := maxx - minx + 1
+ if neww-math.Floor(neww) > 0.1 {
+ neww++
+ }
+ newh := maxy - miny + 1
+ if newh-math.Floor(newh) > 0.1 {
+ newh++
+ }
+
+ return int(neww), int(newh)
+}
+
+func interpolatePoint(dst *image.NRGBA, dstX, dstY int, src *image.NRGBA, xf, yf float64, bgColor color.NRGBA) {
+ j := dstY*dst.Stride + dstX*4
+ d := dst.Pix[j : j+4 : j+4]
+
+ x0 := int(math.Floor(xf))
+ y0 := int(math.Floor(yf))
+ bounds := src.Bounds()
+ if !image.Pt(x0, y0).In(image.Rect(bounds.Min.X-1, bounds.Min.Y-1, bounds.Max.X, bounds.Max.Y)) {
+ d[0] = bgColor.R
+ d[1] = bgColor.G
+ d[2] = bgColor.B
+ d[3] = bgColor.A
+ return
+ }
+
+ xq := xf - float64(x0)
+ yq := yf - float64(y0)
+ points := [4]image.Point{
+ {x0, y0},
+ {x0 + 1, y0},
+ {x0, y0 + 1},
+ {x0 + 1, y0 + 1},
+ }
+ weights := [4]float64{
+ (1 - xq) * (1 - yq),
+ xq * (1 - yq),
+ (1 - xq) * yq,
+ xq * yq,
+ }
+
+ var r, g, b, a float64
+ for i := 0; i < 4; i++ {
+ p := points[i]
+ w := weights[i]
+ if p.In(bounds) {
+ i := p.Y*src.Stride + p.X*4
+ s := src.Pix[i : i+4 : i+4]
+ wa := float64(s[3]) * w
+ r += float64(s[0]) * wa
+ g += float64(s[1]) * wa
+ b += float64(s[2]) * wa
+ a += wa
+ } else {
+ wa := float64(bgColor.A) * w
+ r += float64(bgColor.R) * wa
+ g += float64(bgColor.G) * wa
+ b += float64(bgColor.B) * wa
+ a += wa
+ }
+ }
+ if a != 0 {
+ aInv := 1 / a
+ d[0] = clamp(r * aInv)
+ d[1] = clamp(g * aInv)
+ d[2] = clamp(b * aInv)
+ d[3] = clamp(a)
+ }
+}
diff --git a/vendor/github.com/disintegration/imaging/utils.go b/vendor/github.com/disintegration/imaging/utils.go
new file mode 100644
index 000000000..6c7af1a51
--- /dev/null
+++ b/vendor/github.com/disintegration/imaging/utils.go
@@ -0,0 +1,167 @@
+package imaging
+
+import (
+ "image"
+ "math"
+ "runtime"
+ "sync"
+)
+
+// parallel processes the data in separate goroutines.
+func parallel(start, stop int, fn func(<-chan int)) {
+ count := stop - start
+ if count < 1 {
+ return
+ }
+
+ procs := runtime.GOMAXPROCS(0)
+ if procs > count {
+ procs = count
+ }
+
+ c := make(chan int, count)
+ for i := start; i < stop; i++ {
+ c <- i
+ }
+ close(c)
+
+ var wg sync.WaitGroup
+ for i := 0; i < procs; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ fn(c)
+ }()
+ }
+ wg.Wait()
+}
+
+// absint returns the absolute value of i.
+func absint(i int) int {
+ if i < 0 {
+ return -i
+ }
+ return i
+}
+
+// clamp rounds and clamps float64 value to fit into uint8.
+func clamp(x float64) uint8 {
+ v := int64(x + 0.5)
+ if v > 255 {
+ return 255
+ }
+ if v > 0 {
+ return uint8(v)
+ }
+ return 0
+}
+
+func reverse(pix []uint8) {
+ if len(pix) <= 4 {
+ return
+ }
+ i := 0
+ j := len(pix) - 4
+ for i < j {
+ pi := pix[i : i+4 : i+4]
+ pj := pix[j : j+4 : j+4]
+ pi[0], pj[0] = pj[0], pi[0]
+ pi[1], pj[1] = pj[1], pi[1]
+ pi[2], pj[2] = pj[2], pi[2]
+ pi[3], pj[3] = pj[3], pi[3]
+ i += 4
+ j -= 4
+ }
+}
+
+func toNRGBA(img image.Image) *image.NRGBA {
+ if img, ok := img.(*image.NRGBA); ok {
+ return &image.NRGBA{
+ Pix: img.Pix,
+ Stride: img.Stride,
+ Rect: img.Rect.Sub(img.Rect.Min),
+ }
+ }
+ return Clone(img)
+}
+
+// rgbToHSL converts a color from RGB to HSL.
+func rgbToHSL(r, g, b uint8) (float64, float64, float64) {
+ rr := float64(r) / 255
+ gg := float64(g) / 255
+ bb := float64(b) / 255
+
+ max := math.Max(rr, math.Max(gg, bb))
+ min := math.Min(rr, math.Min(gg, bb))
+
+ l := (max + min) / 2
+
+ if max == min {
+ return 0, 0, l
+ }
+
+ var h, s float64
+ d := max - min
+ if l > 0.5 {
+ s = d / (2 - max - min)
+ } else {
+ s = d / (max + min)
+ }
+
+ switch max {
+ case rr:
+ h = (gg - bb) / d
+ if g < b {
+ h += 6
+ }
+ case gg:
+ h = (bb-rr)/d + 2
+ case bb:
+ h = (rr-gg)/d + 4
+ }
+ h /= 6
+
+ return h, s, l
+}
+
+// hslToRGB converts a color from HSL to RGB.
+func hslToRGB(h, s, l float64) (uint8, uint8, uint8) {
+ var r, g, b float64
+ if s == 0 {
+ v := clamp(l * 255)
+ return v, v, v
+ }
+
+ var q float64
+ if l < 0.5 {
+ q = l * (1 + s)
+ } else {
+ q = l + s - l*s
+ }
+ p := 2*l - q
+
+ r = hueToRGB(p, q, h+1/3.0)
+ g = hueToRGB(p, q, h)
+ b = hueToRGB(p, q, h-1/3.0)
+
+ return clamp(r * 255), clamp(g * 255), clamp(b * 255)
+}
+
+func hueToRGB(p, q, t float64) float64 {
+ if t < 0 {
+ t++
+ }
+ if t > 1 {
+ t--
+ }
+ if t < 1/6.0 {
+ return p + (q-p)*6*t
+ }
+ if t < 1/2.0 {
+ return q
+ }
+ if t < 2/3.0 {
+ return p + (q-p)*(2/3.0-t)*6
+ }
+ return p
+}
diff --git a/vendor/github.com/go-chi/chi/.gitignore b/vendor/github.com/go-chi/chi/.gitignore
new file mode 100644
index 000000000..ba22c99a9
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/.gitignore
@@ -0,0 +1,3 @@
+.idea
+*.sw?
+.vscode
diff --git a/vendor/github.com/go-chi/chi/.travis.yml b/vendor/github.com/go-chi/chi/.travis.yml
new file mode 100644
index 000000000..44ecf0d78
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+ - 1.10.x
+ - 1.11.x
+
+script:
+ - go get -d -t ./...
+ - go vet ./...
+ - go test ./...
+ - >
+ go_version=$(go version);
+ if [ ${go_version:13:4} = "1.11" ]; then
+ go get -u golang.org/x/tools/cmd/goimports;
+ goimports -d -e ./ | grep '.*' && { echo; echo "Aborting due to non-empty goimports output."; exit 1; } || :;
+ fi
+
diff --git a/vendor/github.com/go-chi/chi/CHANGELOG.md b/vendor/github.com/go-chi/chi/CHANGELOG.md
new file mode 100644
index 000000000..d03e40c64
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/CHANGELOG.md
@@ -0,0 +1,139 @@
+# Changelog
+
+## v4.0.0 (2019-01-10)
+
+- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8
+- router: respond with 404 on router with no routes (#362)
+- router: additional check to ensure wildcard is at the end of a url pattern (#333)
+- middleware: deprecate use of http.CloseNotifier (#347)
+- middleware: fix RedirectSlashes to include query params on redirect (#334)
+- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0
+
+
+## v3.3.4 (2019-01-07)
+
+- Minor middleware improvements. No changes to core library/router. Moving v3 into its
+- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11
+- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4
+
+
+## v3.3.3 (2018-08-27)
+
+- Minor release
+- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3
+
+
+## v3.3.2 (2017-12-22)
+
+- Support to route trailing slashes on mounted sub-routers (#281)
+- middleware: new `ContentCharset` to check matching charsets. Thank you
+ @csucu for your community contribution!
+
+
+## v3.3.1 (2017-11-20)
+
+- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types
+- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value
+- Minor bug fixes
+
+
+## v3.3.0 (2017-10-10)
+
+- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage
+- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function
+
+
+## v3.2.1 (2017-08-31)
+
+- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface
+ and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path
+- Add new `RouteMethod` to `*Context`
+- Add new `Routes` pointer to `*Context`
+- Add new `middleware.GetHead` to route missing HEAD requests to GET handler
+- Updated benchmarks (see README)
+
+
+## v3.1.5 (2017-08-02)
+
+- Setup golint and go vet for the project
+- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler`
+ to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler`
+
+
+## v3.1.0 (2017-07-10)
+
+- Fix a few minor issues after v3 release
+- Move `docgen` sub-pkg to https://github.com/go-chi/docgen
+- Move `render` sub-pkg to https://github.com/go-chi/render
+- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime
+ suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in
+ https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage.
+
+
+## v3.0.0 (2017-06-21)
+
+- Major update to chi library with many exciting updates, but also some *breaking changes*
+- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as
+ `/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the
+ same router
+- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example:
+ `r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")`
+- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as
+ `r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like
+ in `_examples/custom-handler`
+- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their
+ own using file handler with the stdlib, see `_examples/fileserver` for an example
+- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()`
+- Moved the chi project to its own organization, to allow chi-related community packages to
+ be easily discovered and supported, at: https://github.com/go-chi
+- *NOTE:* please update your import paths to `"github.com/go-chi/chi"`
+- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2
+
+
+## v2.1.0 (2017-03-30)
+
+- Minor improvements and update to the chi core library
+- Introduced a brand new `chi/render` sub-package to complete the story of building
+ APIs to offer a pattern for managing well-defined request / response payloads. Please
+ check out the updated `_examples/rest` example for how it works.
+- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface
+
+
+## v2.0.0 (2017-01-06)
+
+- After many months of v2 being in an RC state with many companies and users running it in
+ production, the inclusion of some improvements to the middlewares, we are very pleased to
+ announce v2.0.0 of chi.
+
+
+## v2.0.0-rc1 (2016-07-26)
+
+- Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular
+ community `"net/context"` package has been included in the standard library as `"context"` and
+ utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other
+ request-scoped values. We're very excited about the new context addition and are proud to
+ introduce chi v2, a minimal and powerful routing package for building large HTTP services,
+ with zero external dependencies. Chi focuses on idiomatic design and encourages the use of
+ stdlib HTTP handlers and middlwares.
+- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc`
+- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()`
+- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`,
+ which provides direct access to URL routing parameters, the routing path and the matching
+ routing patterns.
+- Users upgrading from chi v1 to v2, need to:
+ 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to
+ the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)`
+ 2. Use `chi.URLParam(r *http.Request, paramKey string) string`
+ or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value
+
+
+## v1.0.0 (2016-07-01)
+
+- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older.
+
+
+## v0.9.0 (2016-03-31)
+
+- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33)
+- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters
+ has changed to: `chi.URLParam(ctx, "id")`
diff --git a/vendor/github.com/go-chi/chi/CONTRIBUTING.md b/vendor/github.com/go-chi/chi/CONTRIBUTING.md
new file mode 100644
index 000000000..c0ac2dfe8
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing
+
+## Prerequisites
+
+1. [Install Go][go-install].
+2. Download the sources and switch the working directory:
+
+ ```bash
+ go get -u -d github.com/go-chi/chi
+ cd $GOPATH/src/github.com/go-chi/chi
+ ```
+
+## Submitting a Pull Request
+
+A typical workflow is:
+
+1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip]
+2. [Create a topic branch.][branch]
+3. Add tests for your change.
+4. Run `go test`. If your tests pass, return to the step 3.
+5. Implement the change and ensure the steps from the previous step pass.
+6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline.
+7. [Add, commit and push your changes.][git-help]
+8. [Submit a pull request.][pull-req]
+
+[go-install]: https://golang.org/doc/install
+[go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html
+[fork]: https://help.github.com/articles/fork-a-repo
+[branch]: http://learn.github.com/p/branching.html
+[git-help]: https://guides.github.com
+[pull-req]: https://help.github.com/articles/using-pull-requests
diff --git a/vendor/github.com/go-chi/chi/LICENSE b/vendor/github.com/go-chi/chi/LICENSE
new file mode 100644
index 000000000..d99f02ffa
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc.
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/go-chi/chi/README.md b/vendor/github.com/go-chi/chi/README.md
new file mode 100644
index 000000000..d36d4db53
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/README.md
@@ -0,0 +1,438 @@
+#
+
+
+[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis]
+
+`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's
+especially good at helping you write large REST API services that are kept maintainable as your
+project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to
+handle signaling, cancelation and request-scoped values across a handler chain.
+
+The focus of the project has been to seek out an elegant and comfortable design for writing
+REST API servers, written during the development of the Pressly API service that powers our
+public API service, which in turn powers all of our client-side applications.
+
+The key considerations of chi's design are: project structure, maintainability, standard http
+handlers (stdlib-only), developer productivity, and deconstructing a large system into many small
+parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also
+included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render) and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too!
+
+## Install
+
+`go get -u github.com/go-chi/chi`
+
+
+## Features
+
+* **Lightweight** - cloc'd in ~1000 LOC for the chi router
+* **Fast** - yes, see [benchmarks](#benchmarks)
+* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http`
+* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and subrouter mounting
+* **Context control** - built on new `context` package, providing value chaining, cancelations and timeouts
+* **Robust** - in production at Pressly, CloudFlare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91))
+* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown
+* **No external dependencies** - plain ol' Go stdlib + net/http
+
+
+## Examples
+
+See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples.
+
+
+**As easy as:**
+
+```go
+package main
+
+import (
+ "net/http"
+ "github.com/go-chi/chi"
+)
+
+func main() {
+ r := chi.NewRouter()
+ r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("welcome"))
+ })
+ http.ListenAndServe(":3000", r)
+}
+```
+
+**REST Preview:**
+
+Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs
+in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in
+Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)).
+
+I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed
+above, they will show you all the features of chi and serve as a good form of documentation.
+
+```go
+import (
+ //...
+ "context"
+ "github.com/go-chi/chi"
+ "github.com/go-chi/chi/middleware"
+)
+
+func main() {
+ r := chi.NewRouter()
+
+ // A good base middleware stack
+ r.Use(middleware.RequestID)
+ r.Use(middleware.RealIP)
+ r.Use(middleware.Logger)
+ r.Use(middleware.Recoverer)
+
+ // Set a timeout value on the request context (ctx), that will signal
+ // through ctx.Done() that the request has timed out and further
+ // processing should be stopped.
+ r.Use(middleware.Timeout(60 * time.Second))
+
+ r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("hi"))
+ })
+
+ // RESTy routes for "articles" resource
+ r.Route("/articles", func(r chi.Router) {
+ r.With(paginate).Get("/", listArticles) // GET /articles
+ r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017
+
+ r.Post("/", createArticle) // POST /articles
+ r.Get("/search", searchArticles) // GET /articles/search
+
+ // Regexp url parameters:
+ r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto
+
+ // Subrouters:
+ r.Route("/{articleID}", func(r chi.Router) {
+ r.Use(ArticleCtx)
+ r.Get("/", getArticle) // GET /articles/123
+ r.Put("/", updateArticle) // PUT /articles/123
+ r.Delete("/", deleteArticle) // DELETE /articles/123
+ })
+ })
+
+ // Mount the admin sub-router
+ r.Mount("/admin", adminRouter())
+
+ http.ListenAndServe(":3333", r)
+}
+
+func ArticleCtx(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ articleID := chi.URLParam(r, "articleID")
+ article, err := dbGetArticle(articleID)
+ if err != nil {
+ http.Error(w, http.StatusText(404), 404)
+ return
+ }
+ ctx := context.WithValue(r.Context(), "article", article)
+ next.ServeHTTP(w, r.WithContext(ctx))
+ })
+}
+
+func getArticle(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ article, ok := ctx.Value("article").(*Article)
+ if !ok {
+ http.Error(w, http.StatusText(422), 422)
+ return
+ }
+ w.Write([]byte(fmt.Sprintf("title:%s", article.Title)))
+}
+
+// A completely separate router for administrator routes
+func adminRouter() http.Handler {
+ r := chi.NewRouter()
+ r.Use(AdminOnly)
+ r.Get("/", adminIndex)
+ r.Get("/accounts", adminListAccounts)
+ return r
+}
+
+func AdminOnly(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ perm, ok := ctx.Value("acl.permission").(YourPermissionType)
+ if !ok || !perm.IsAdmin() {
+ http.Error(w, http.StatusText(403), 403)
+ return
+ }
+ next.ServeHTTP(w, r)
+ })
+}
+```
+
+
+## Router design
+
+chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree).
+The router is fully compatible with `net/http`.
+
+Built on top of the tree is the `Router` interface:
+
+```go
+// Router consisting of the core routing methods used by chi's Mux,
+// using only the standard net/http.
+type Router interface {
+ http.Handler
+ Routes
+
+ // Use appends one of more middlewares onto the Router stack.
+ Use(middlewares ...func(http.Handler) http.Handler)
+
+ // With adds inline middlewares for an endpoint handler.
+ With(middlewares ...func(http.Handler) http.Handler) Router
+
+ // Group adds a new inline-Router along the current routing
+ // path, with a fresh middleware stack for the inline-Router.
+ Group(fn func(r Router)) Router
+
+ // Route mounts a sub-Router along a `pattern`` string.
+ Route(pattern string, fn func(r Router)) Router
+
+ // Mount attaches another http.Handler along ./pattern/*
+ Mount(pattern string, h http.Handler)
+
+ // Handle and HandleFunc adds routes for `pattern` that matches
+ // all HTTP methods.
+ Handle(pattern string, h http.Handler)
+ HandleFunc(pattern string, h http.HandlerFunc)
+
+ // Method and MethodFunc adds routes for `pattern` that matches
+ // the `method` HTTP method.
+ Method(method, pattern string, h http.Handler)
+ MethodFunc(method, pattern string, h http.HandlerFunc)
+
+ // HTTP-method routing along `pattern`
+ Connect(pattern string, h http.HandlerFunc)
+ Delete(pattern string, h http.HandlerFunc)
+ Get(pattern string, h http.HandlerFunc)
+ Head(pattern string, h http.HandlerFunc)
+ Options(pattern string, h http.HandlerFunc)
+ Patch(pattern string, h http.HandlerFunc)
+ Post(pattern string, h http.HandlerFunc)
+ Put(pattern string, h http.HandlerFunc)
+ Trace(pattern string, h http.HandlerFunc)
+
+ // NotFound defines a handler to respond whenever a route could
+ // not be found.
+ NotFound(h http.HandlerFunc)
+
+ // MethodNotAllowed defines a handler to respond whenever a method is
+ // not allowed.
+ MethodNotAllowed(h http.HandlerFunc)
+}
+
+// Routes interface adds two methods for router traversal, which is also
+// used by the github.com/go-chi/docgen package to generate documentation for Routers.
+type Routes interface {
+ // Routes returns the routing tree in an easily traversable structure.
+ Routes() []Route
+
+ // Middlewares returns the list of middlewares in use by the router.
+ Middlewares() Middlewares
+
+ // Match searches the routing tree for a handler that matches
+ // the method/path - similar to routing a http request, but without
+ // executing the handler thereafter.
+ Match(rctx *Context, method, path string) bool
+}
+```
+
+Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern
+supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters
+can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters
+and `chi.URLParam(r, "*")` for a wildcard parameter.
+
+
+### Middleware handlers
+
+chi's middlewares are just stdlib net/http middleware handlers. There is nothing special
+about them, which means the router and all the tooling is designed to be compatible and
+friendly with any middleware in the community. This offers much better extensibility and reuse
+of packages and is at the heart of chi's purpose.
+
+Here is an example of a standard net/http middleware handler using the new request context
+available in Go. This middleware sets a hypothetical user identifier on the request
+context and calls the next handler in the chain.
+
+```go
+// HTTP middleware setting a value on the request context
+func MyMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := context.WithValue(r.Context(), "user", "123")
+ next.ServeHTTP(w, r.WithContext(ctx))
+ })
+}
+```
+
+
+### Request handlers
+
+chi uses standard net/http request handlers. This little snippet is an example of a http.Handler
+func that reads a user identifier from the request context - hypothetically, identifying
+the user sending an authenticated request, validated+set by a previous middleware handler.
+
+```go
+// HTTP handler accessing data from the request context.
+func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
+ user := r.Context().Value("user").(string)
+ w.Write([]byte(fmt.Sprintf("hi %s", user)))
+}
+```
+
+
+### URL parameters
+
+chi's router parses and stores URL parameters right onto the request context. Here is
+an example of how to access URL params in your net/http handlers. And of course, middlewares
+are able to access the same information.
+
+```go
+// HTTP handler accessing the url routing parameters.
+func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
+ userID := chi.URLParam(r, "userID") // from a route like /users/{userID}
+
+ ctx := r.Context()
+ key := ctx.Value("key").(string)
+
+ w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key)))
+}
+```
+
+
+## Middlewares
+
+chi comes equipped with an optional `middleware` package, providing a suite of standard
+`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible
+with `net/http` can be used with chi's mux.
+
+### Core middlewares
+
+-----------------------------------------------------------------------------------------------------------
+| chi/middleware Handler | description |
+|:----------------------|:---------------------------------------------------------------------------------
+| AllowContentType | Explicit whitelist of accepted request Content-Types |
+| Compress | Gzip compression for clients that accept compressed responses |
+| GetHead | Automatically route undefined HEAD requests to GET handlers |
+| Heartbeat | Monitoring endpoint to check the servers pulse |
+| Logger | Logs the start and end of each request with the elapsed processing time |
+| NoCache | Sets response headers to prevent clients from caching |
+| Profiler | Easily attach net/http/pprof to your routers |
+| RealIP | Sets a http.Request's RemoteAddr to either X-Forwarded-For or X-Real-IP |
+| Recoverer | Gracefully absorb panics and prints the stack trace |
+| RequestID | Injects a request ID into the context of each request |
+| RedirectSlashes | Redirect slashes on routing paths |
+| SetHeader | Short-hand middleware to set a response header key/value |
+| StripSlashes | Strip slashes on routing paths |
+| Throttle | Puts a ceiling on the number of concurrent requests |
+| Timeout | Signals to the request context when the timeout deadline is reached |
+| URLFormat | Parse extension from url and put it on request context |
+| WithValue | Short-hand middleware to set a key/value on the request context |
+-----------------------------------------------------------------------------------------------------------
+
+### Auxiliary middlewares & packages
+
+Please see https://github.com/go-chi for additional packages.
+
+--------------------------------------------------------------------------------------------------------------------
+| package | description |
+|:---------------------------------------------------|:-------------------------------------------------------------
+| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) |
+| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime |
+| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication |
+| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing |
+| [httpcoala](https://github.com/go-chi/httpcoala) | HTTP request coalescer |
+| [chi-authz](https://github.com/casbin/chi-authz) | Request ACL via https://github.com/hsluoyz/casbin |
+| [phi](https://github.com/fate-lovely/phi) | Port chi to [fasthttp](https://github.com/valyala/fasthttp) |
+--------------------------------------------------------------------------------------------------------------------
+
+please [submit a PR](./CONTRIBUTING.md) if you'd like to include a link to a chi-compatible middleware
+
+
+## context?
+
+`context` is a tiny pkg that provides simple interface to signal context across call stacks
+and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani)
+and is available in stdlib since go1.7.
+
+Learn more at https://blog.golang.org/context
+
+and..
+* Docs: https://golang.org/pkg/context
+* Source: https://github.com/golang/go/tree/master/src/context
+
+
+## Benchmarks
+
+The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark
+
+Results as of Jan 9, 2019 with Go 1.11.4 on Linux X1 Carbon laptop
+
+```shell
+BenchmarkChi_Param 3000000 475 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_Param5 2000000 696 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_Param20 1000000 1275 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_ParamWrite 3000000 505 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GithubStatic 3000000 508 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GithubParam 2000000 669 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GithubAll 10000 134627 ns/op 87699 B/op 609 allocs/op
+BenchmarkChi_GPlusStatic 3000000 402 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GPlusParam 3000000 500 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GPlus2Params 3000000 586 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_GPlusAll 200000 7237 ns/op 5616 B/op 39 allocs/op
+BenchmarkChi_ParseStatic 3000000 408 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_ParseParam 3000000 488 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_Parse2Params 3000000 551 ns/op 432 B/op 3 allocs/op
+BenchmarkChi_ParseAll 100000 13508 ns/op 11232 B/op 78 allocs/op
+BenchmarkChi_StaticAll 20000 81933 ns/op 67826 B/op 471 allocs/op
+```
+
+Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc
+
+NOTE: the allocs in the benchmark above are from the calls to http.Request's
+`WithContext(context.Context)` method that clones the http.Request, sets the `Context()`
+on the duplicated (alloc'd) request and returns it the new request object. This is just
+how setting context on a request in Go works.
+
+
+## Credits
+
+* Carl Jackson for https://github.com/zenazn/goji
+ * Parts of chi's thinking comes from goji, and chi's middleware package
+ sources from goji.
+* Armon Dadgar for https://github.com/armon/go-radix
+* Contributions: [@VojtechVitek](https://github.com/VojtechVitek)
+
+We'll be more than happy to see [your contributions](./CONTRIBUTING.md)!
+
+
+## Beyond REST
+
+chi is just a http router that lets you decompose request handling into many smaller layers.
+Many companies including Pressly.com (of course) use chi to write REST services for their public
+APIs. But, REST is just a convention for managing state via HTTP, and there's a lot of other pieces
+required to write a complete client-server system or network of microservices.
+
+Looking ahead beyond REST, I also recommend some newer works in the field coming from
+[gRPC](https://github.com/grpc/grpc-go), [NATS](https://nats.io), [go-kit](https://github.com/go-kit/kit)
+and even [graphql](https://github.com/graphql-go/graphql). They're all pretty cool with their
+own unique approaches and benefits. Specifically, I'd look at gRPC since it makes client-server
+communication feel like a single program on a single computer, no need to hand-write a client library
+and the request/response payloads are typed contracts. NATS is pretty amazing too as a super
+fast and lightweight pub-sub transport that can speak protobufs, with nice service discovery -
+an excellent combination with gRPC.
+
+
+## License
+
+Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka)
+
+Licensed under [MIT License](./LICENSE)
+
+[GoDoc]: https://godoc.org/github.com/go-chi/chi
+[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg
+[Travis]: https://travis-ci.org/go-chi/chi
+[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master
diff --git a/vendor/github.com/go-chi/chi/chain.go b/vendor/github.com/go-chi/chi/chain.go
new file mode 100644
index 000000000..88e684613
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/chain.go
@@ -0,0 +1,49 @@
+package chi
+
+import "net/http"
+
+// Chain returns a Middlewares type from a slice of middleware handlers.
+func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares {
+ return Middlewares(middlewares)
+}
+
+// Handler builds and returns a http.Handler from the chain of middlewares,
+// with `h http.Handler` as the final handler.
+func (mws Middlewares) Handler(h http.Handler) http.Handler {
+ return &ChainHandler{mws, h, chain(mws, h)}
+}
+
+// HandlerFunc builds and returns a http.Handler from the chain of middlewares,
+// with `h http.Handler` as the final handler.
+func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler {
+ return &ChainHandler{mws, h, chain(mws, h)}
+}
+
+// ChainHandler is a http.Handler with support for handler composition and
+// execution.
+type ChainHandler struct {
+ Middlewares Middlewares
+ Endpoint http.Handler
+ chain http.Handler
+}
+
+func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ c.chain.ServeHTTP(w, r)
+}
+
+// chain builds a http.Handler composed of an inline middleware stack and endpoint
+// handler in the order they are passed.
+func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler {
+ // Return ahead of time if there aren't any middlewares for the chain
+ if len(middlewares) == 0 {
+ return endpoint
+ }
+
+ // Wrap the end handler with the middleware chain
+ h := middlewares[len(middlewares)-1](endpoint)
+ for i := len(middlewares) - 2; i >= 0; i-- {
+ h = middlewares[i](h)
+ }
+
+ return h
+}
diff --git a/vendor/github.com/go-chi/chi/chi.go b/vendor/github.com/go-chi/chi/chi.go
new file mode 100644
index 000000000..9962229d0
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/chi.go
@@ -0,0 +1,134 @@
+//
+// Package chi is a small, idiomatic and composable router for building HTTP services.
+//
+// chi requires Go 1.7 or newer.
+//
+// Example:
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/go-chi/chi"
+// "github.com/go-chi/chi/middleware"
+// )
+//
+// func main() {
+// r := chi.NewRouter()
+// r.Use(middleware.Logger)
+// r.Use(middleware.Recoverer)
+//
+// r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+// w.Write([]byte("root."))
+// })
+//
+// http.ListenAndServe(":3333", r)
+// }
+//
+// See github.com/go-chi/chi/_examples/ for more in-depth examples.
+//
+// URL patterns allow for easy matching of path components in HTTP
+// requests. The matching components can then be accessed using
+// chi.URLParam(). All patterns must begin with a slash.
+//
+// A simple named placeholder {name} matches any sequence of characters
+// up to the next / or the end of the URL. Trailing slashes on paths must
+// be handled explicitly.
+//
+// A placeholder with a name followed by a colon allows a regular
+// expression match, for example {number:\\d+}. The regular expression
+// syntax is Go's normal regexp RE2 syntax, except that regular expressions
+// including { or } are not supported, and / will never be
+// matched. An anonymous regexp pattern is allowed, using an empty string
+// before the colon in the placeholder, such as {:\\d+}
+//
+// The special placeholder of asterisk matches the rest of the requested
+// URL. Any trailing characters in the pattern are ignored. This is the only
+// placeholder which will match / characters.
+//
+// Examples:
+// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/"
+// "/user/{name}/info" matches "/user/jsmith/info"
+// "/page/*" matches "/page/intro/latest"
+// "/page/*/index" also matches "/page/intro/latest"
+// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01"
+//
+package chi
+
+import "net/http"
+
+// NewRouter returns a new Mux object that implements the Router interface.
+func NewRouter() *Mux {
+ return NewMux()
+}
+
+// Router consisting of the core routing methods used by chi's Mux,
+// using only the standard net/http.
+type Router interface {
+ http.Handler
+ Routes
+
+ // Use appends one of more middlewares onto the Router stack.
+ Use(middlewares ...func(http.Handler) http.Handler)
+
+ // With adds inline middlewares for an endpoint handler.
+ With(middlewares ...func(http.Handler) http.Handler) Router
+
+ // Group adds a new inline-Router along the current routing
+ // path, with a fresh middleware stack for the inline-Router.
+ Group(fn func(r Router)) Router
+
+ // Route mounts a sub-Router along a `pattern`` string.
+ Route(pattern string, fn func(r Router)) Router
+
+ // Mount attaches another http.Handler along ./pattern/*
+ Mount(pattern string, h http.Handler)
+
+ // Handle and HandleFunc adds routes for `pattern` that matches
+ // all HTTP methods.
+ Handle(pattern string, h http.Handler)
+ HandleFunc(pattern string, h http.HandlerFunc)
+
+ // Method and MethodFunc adds routes for `pattern` that matches
+ // the `method` HTTP method.
+ Method(method, pattern string, h http.Handler)
+ MethodFunc(method, pattern string, h http.HandlerFunc)
+
+ // HTTP-method routing along `pattern`
+ Connect(pattern string, h http.HandlerFunc)
+ Delete(pattern string, h http.HandlerFunc)
+ Get(pattern string, h http.HandlerFunc)
+ Head(pattern string, h http.HandlerFunc)
+ Options(pattern string, h http.HandlerFunc)
+ Patch(pattern string, h http.HandlerFunc)
+ Post(pattern string, h http.HandlerFunc)
+ Put(pattern string, h http.HandlerFunc)
+ Trace(pattern string, h http.HandlerFunc)
+
+ // NotFound defines a handler to respond whenever a route could
+ // not be found.
+ NotFound(h http.HandlerFunc)
+
+ // MethodNotAllowed defines a handler to respond whenever a method is
+ // not allowed.
+ MethodNotAllowed(h http.HandlerFunc)
+}
+
+// Routes interface adds two methods for router traversal, which is also
+// used by the `docgen` subpackage to generation documentation for Routers.
+type Routes interface {
+ // Routes returns the routing tree in an easily traversable structure.
+ Routes() []Route
+
+ // Middlewares returns the list of middlewares in use by the router.
+ Middlewares() Middlewares
+
+ // Match searches the routing tree for a handler that matches
+ // the method/path - similar to routing a http request, but without
+ // executing the handler thereafter.
+ Match(rctx *Context, method, path string) bool
+}
+
+// Middlewares type is a slice of standard middleware handlers with methods
+// to compose middleware chains and http.Handler's.
+type Middlewares []func(http.Handler) http.Handler
diff --git a/vendor/github.com/go-chi/chi/context.go b/vendor/github.com/go-chi/chi/context.go
new file mode 100644
index 000000000..229c9cbfb
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/context.go
@@ -0,0 +1,161 @@
+package chi
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "strings"
+)
+
+var (
+ // RouteCtxKey is the context.Context key to store the request context.
+ RouteCtxKey = &contextKey{"RouteContext"}
+)
+
+// Context is the default routing context set on the root node of a
+// request context to track route patterns, URL parameters and
+// an optional routing path.
+type Context struct {
+ Routes Routes
+
+ // Routing path/method override used during the route search.
+ // See Mux#routeHTTP method.
+ RoutePath string
+ RouteMethod string
+
+ // Routing pattern stack throughout the lifecycle of the request,
+ // across all connected routers. It is a record of all matching
+ // patterns across a stack of sub-routers.
+ RoutePatterns []string
+
+ // URLParams are the stack of routeParams captured during the
+ // routing lifecycle across a stack of sub-routers.
+ URLParams RouteParams
+
+ // The endpoint routing pattern that matched the request URI path
+ // or `RoutePath` of the current sub-router. This value will update
+ // during the lifecycle of a request passing through a stack of
+ // sub-routers.
+ routePattern string
+
+ // Route parameters matched for the current sub-router. It is
+ // intentionally unexported so it cant be tampered.
+ routeParams RouteParams
+
+ // methodNotAllowed hint
+ methodNotAllowed bool
+}
+
+// NewRouteContext returns a new routing Context object.
+func NewRouteContext() *Context {
+ return &Context{}
+}
+
+// Reset a routing context to its initial state.
+func (x *Context) Reset() {
+ x.Routes = nil
+ x.RoutePath = ""
+ x.RouteMethod = ""
+ x.RoutePatterns = x.RoutePatterns[:0]
+ x.URLParams.Keys = x.URLParams.Keys[:0]
+ x.URLParams.Values = x.URLParams.Values[:0]
+
+ x.routePattern = ""
+ x.routeParams.Keys = x.routeParams.Keys[:0]
+ x.routeParams.Values = x.routeParams.Values[:0]
+ x.methodNotAllowed = false
+}
+
+// URLParam returns the corresponding URL parameter value from the request
+// routing context.
+func (x *Context) URLParam(key string) string {
+ for k := len(x.URLParams.Keys) - 1; k >= 0; k-- {
+ if x.URLParams.Keys[k] == key {
+ return x.URLParams.Values[k]
+ }
+ }
+ return ""
+}
+
+// RoutePattern builds the routing pattern string for the particular
+// request, at the particular point during routing. This means, the value
+// will change throughout the execution of a request in a router. That is
+// why its advised to only use this value after calling the next handler.
+//
+// For example,
+//
+// func Instrument(next http.Handler) http.Handler {
+// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+// next.ServeHTTP(w, r)
+// routePattern := chi.RouteContext(r.Context()).RoutePattern()
+// measure(w, r, routePattern)
+// })
+// }
+func (x *Context) RoutePattern() string {
+ routePattern := strings.Join(x.RoutePatterns, "")
+ return strings.Replace(routePattern, "/*/", "/", -1)
+}
+
+// RouteContext returns chi's routing Context object from a
+// http.Request Context.
+func RouteContext(ctx context.Context) *Context {
+ return ctx.Value(RouteCtxKey).(*Context)
+}
+
+// URLParam returns the url parameter from a http.Request object.
+func URLParam(r *http.Request, key string) string {
+ if rctx := RouteContext(r.Context()); rctx != nil {
+ return rctx.URLParam(key)
+ }
+ return ""
+}
+
+// URLParamFromCtx returns the url parameter from a http.Request Context.
+func URLParamFromCtx(ctx context.Context, key string) string {
+ if rctx := RouteContext(ctx); rctx != nil {
+ return rctx.URLParam(key)
+ }
+ return ""
+}
+
+// RouteParams is a structure to track URL routing parameters efficiently.
+type RouteParams struct {
+ Keys, Values []string
+}
+
+// Add will append a URL parameter to the end of the route param
+func (s *RouteParams) Add(key, value string) {
+ (*s).Keys = append((*s).Keys, key)
+ (*s).Values = append((*s).Values, value)
+}
+
+// ServerBaseContext wraps an http.Handler to set the request context to the
+// `baseCtx`.
+func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler {
+ fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ baseCtx := baseCtx
+
+ // Copy over default net/http server context keys
+ if v, ok := ctx.Value(http.ServerContextKey).(*http.Server); ok {
+ baseCtx = context.WithValue(baseCtx, http.ServerContextKey, v)
+ }
+ if v, ok := ctx.Value(http.LocalAddrContextKey).(net.Addr); ok {
+ baseCtx = context.WithValue(baseCtx, http.LocalAddrContextKey, v)
+ }
+
+ h.ServeHTTP(w, r.WithContext(baseCtx))
+ })
+ return fn
+}
+
+// contextKey is a value for use with context.WithValue. It's used as
+// a pointer so it fits in an interface{} without allocation. This technique
+// for defining context keys was copied from Go 1.7's new use of context in net/http.
+type contextKey struct {
+ name string
+}
+
+func (k *contextKey) String() string {
+ return "chi context value " + k.name
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/compress.go b/vendor/github.com/go-chi/chi/middleware/compress.go
new file mode 100644
index 000000000..966e5e375
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/compress.go
@@ -0,0 +1,275 @@
+package middleware
+
+import (
+ "bufio"
+ "compress/flate"
+ "compress/gzip"
+ "errors"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+)
+
+var encoders = map[string]EncoderFunc{}
+
+var encodingPrecedence = []string{"br", "gzip", "deflate"}
+
+func init() {
+ // TODO:
+ // lzma: Opera.
+ // sdch: Chrome, Android. Gzip output + dictionary header.
+ // br: Brotli, see https://github.com/go-chi/chi/pull/326
+
+ // TODO: Exception for old MSIE browsers that can't handle non-HTML?
+ // https://zoompf.com/blog/2012/02/lose-the-wait-http-compression
+ SetEncoder("gzip", encoderGzip)
+
+ // HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951)
+ // wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32
+ // checksum compared to CRC-32 used in "gzip" and thus is faster.
+ //
+ // But.. some old browsers (MSIE, Safari 5.1) incorrectly expect
+ // raw DEFLATE data only, without the mentioned zlib wrapper.
+ // Because of this major confusion, most modern browsers try it
+ // both ways, first looking for zlib headers.
+ // Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548
+ //
+ // The list of browsers having problems is quite big, see:
+ // http://zoompf.com/blog/2012/02/lose-the-wait-http-compression
+ // https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results
+ //
+ // That's why we prefer gzip over deflate. It's just more reliable
+ // and not significantly slower than gzip.
+ SetEncoder("deflate", encoderDeflate)
+
+ // NOTE: Not implemented, intentionally:
+ // case "compress": // LZW. Deprecated.
+ // case "bzip2": // Too slow on-the-fly.
+ // case "zopfli": // Too slow on-the-fly.
+ // case "xz": // Too slow on-the-fly.
+}
+
+// An EncoderFunc is a function that wraps the provided ResponseWriter with a
+// streaming compression algorithm and returns it.
+//
+// In case of failure, the function should return nil.
+type EncoderFunc func(w http.ResponseWriter, level int) io.Writer
+
+// SetEncoder can be used to set the implementation of a compression algorithm.
+//
+// The encoding should be a standardised identifier. See:
+// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding
+//
+// For example, add the Brotli algortithm:
+//
+// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc"
+//
+// middleware.SetEncoder("br", func(w http.ResponseWriter, level int) io.Writer {
+// params := brotli_enc.NewBrotliParams()
+// params.SetQuality(level)
+// return brotli_enc.NewBrotliWriter(params, w)
+// })
+func SetEncoder(encoding string, fn EncoderFunc) {
+ encoding = strings.ToLower(encoding)
+ if encoding == "" {
+ panic("the encoding can not be empty")
+ }
+ if fn == nil {
+ panic("attempted to set a nil encoder function")
+ }
+ encoders[encoding] = fn
+
+ var e string
+ for _, v := range encodingPrecedence {
+ if v == encoding {
+ e = v
+ }
+ }
+
+ if e == "" {
+ encodingPrecedence = append([]string{e}, encodingPrecedence...)
+ }
+}
+
+var defaultContentTypes = map[string]struct{}{
+ "text/html": {},
+ "text/css": {},
+ "text/plain": {},
+ "text/javascript": {},
+ "application/javascript": {},
+ "application/x-javascript": {},
+ "application/json": {},
+ "application/atom+xml": {},
+ "application/rss+xml": {},
+ "image/svg+xml": {},
+}
+
+// DefaultCompress is a middleware that compresses response
+// body of predefined content types to a data format based
+// on Accept-Encoding request header. It uses a default
+// compression level.
+func DefaultCompress(next http.Handler) http.Handler {
+ return Compress(flate.DefaultCompression)(next)
+}
+
+// Compress is a middleware that compresses response
+// body of a given content types to a data format based
+// on Accept-Encoding request header. It uses a given
+// compression level.
+//
+// NOTE: make sure to set the Content-Type header on your response
+// otherwise this middleware will not compress the response body. For ex, in
+// your handler you should set w.Header().Set("Content-Type", http.DetectContentType(yourBody))
+// or set it manually.
+func Compress(level int, types ...string) func(next http.Handler) http.Handler {
+ contentTypes := defaultContentTypes
+ if len(types) > 0 {
+ contentTypes = make(map[string]struct{}, len(types))
+ for _, t := range types {
+ contentTypes[t] = struct{}{}
+ }
+ }
+
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ encoder, encoding := selectEncoder(r.Header)
+
+ cw := &compressResponseWriter{
+ ResponseWriter: w,
+ w: w,
+ contentTypes: contentTypes,
+ encoder: encoder,
+ encoding: encoding,
+ level: level,
+ }
+ defer cw.Close()
+
+ next.ServeHTTP(cw, r)
+ }
+
+ return http.HandlerFunc(fn)
+ }
+}
+
+func selectEncoder(h http.Header) (EncoderFunc, string) {
+ header := h.Get("Accept-Encoding")
+
+ // Parse the names of all accepted algorithms from the header.
+ accepted := strings.Split(strings.ToLower(header), ",")
+
+ // Find supported encoder by accepted list by precedence
+ for _, name := range encodingPrecedence {
+ if fn, ok := encoders[name]; ok && matchAcceptEncoding(accepted, name) {
+ return fn, name
+ }
+ }
+
+ // No encoder found to match the accepted encoding
+ return nil, ""
+}
+
+func matchAcceptEncoding(accepted []string, encoding string) bool {
+ for _, v := range accepted {
+ if strings.Index(v, encoding) >= 0 {
+ return true
+ }
+ }
+ return false
+}
+
+type compressResponseWriter struct {
+ http.ResponseWriter
+ w io.Writer
+ encoder EncoderFunc
+ encoding string
+ contentTypes map[string]struct{}
+ level int
+ wroteHeader bool
+}
+
+func (w *compressResponseWriter) WriteHeader(code int) {
+ if w.wroteHeader {
+ return
+ }
+ w.wroteHeader = true
+ defer w.ResponseWriter.WriteHeader(code)
+
+ // Already compressed data?
+ if w.Header().Get("Content-Encoding") != "" {
+ return
+ }
+
+ // Parse the first part of the Content-Type response header.
+ contentType := ""
+ parts := strings.Split(w.Header().Get("Content-Type"), ";")
+ if len(parts) > 0 {
+ contentType = parts[0]
+ }
+
+ // Is the content type compressable?
+ if _, ok := w.contentTypes[contentType]; !ok {
+ return
+ }
+
+ if w.encoder != nil && w.encoding != "" {
+ if wr := w.encoder(w.ResponseWriter, w.level); wr != nil {
+ w.w = wr
+ w.Header().Set("Content-Encoding", w.encoding)
+
+ // The content-length after compression is unknown
+ w.Header().Del("Content-Length")
+ }
+ }
+}
+
+func (w *compressResponseWriter) Write(p []byte) (int, error) {
+ if !w.wroteHeader {
+ w.ResponseWriter.WriteHeader(http.StatusOK)
+ }
+
+ return w.w.Write(p)
+}
+
+func (w *compressResponseWriter) Flush() {
+ if f, ok := w.w.(http.Flusher); ok {
+ f.Flush()
+ }
+}
+
+func (w *compressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ if hj, ok := w.w.(http.Hijacker); ok {
+ return hj.Hijack()
+ }
+ return nil, nil, errors.New("chi/middleware: http.Hijacker is unavailable on the writer")
+}
+
+func (w *compressResponseWriter) Push(target string, opts *http.PushOptions) error {
+ if ps, ok := w.w.(http.Pusher); ok {
+ return ps.Push(target, opts)
+ }
+ return errors.New("chi/middleware: http.Pusher is unavailable on the writer")
+}
+
+func (w *compressResponseWriter) Close() error {
+ if c, ok := w.w.(io.WriteCloser); ok {
+ return c.Close()
+ }
+ return errors.New("chi/middleware: io.WriteCloser is unavailable on the writer")
+}
+
+func encoderGzip(w http.ResponseWriter, level int) io.Writer {
+ gw, err := gzip.NewWriterLevel(w, level)
+ if err != nil {
+ return nil
+ }
+ return gw
+}
+
+func encoderDeflate(w http.ResponseWriter, level int) io.Writer {
+ dw, err := flate.NewWriter(w, level)
+ if err != nil {
+ return nil
+ }
+ return dw
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/content_charset.go b/vendor/github.com/go-chi/chi/middleware/content_charset.go
new file mode 100644
index 000000000..07b5ce6f6
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/content_charset.go
@@ -0,0 +1,51 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// ContentCharset generates a handler that writes a 415 Unsupported Media Type response if none of the charsets match.
+// An empty charset will allow requests with no Content-Type header or no specified charset.
+func ContentCharset(charsets ...string) func(next http.Handler) http.Handler {
+ for i, c := range charsets {
+ charsets[i] = strings.ToLower(c)
+ }
+
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !contentEncoding(r.Header.Get("Content-Type"), charsets...) {
+ w.WriteHeader(http.StatusUnsupportedMediaType)
+ return
+ }
+
+ next.ServeHTTP(w, r)
+ })
+ }
+}
+
+// Check the content encoding against a list of acceptable values.
+func contentEncoding(ce string, charsets ...string) bool {
+ _, ce = split(strings.ToLower(ce), ";")
+ _, ce = split(ce, "charset=")
+ ce, _ = split(ce, ";")
+ for _, c := range charsets {
+ if ce == c {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Split a string in two parts, cleaning any whitespace.
+func split(str, sep string) (string, string) {
+ var a, b string
+ var parts = strings.SplitN(str, sep, 2)
+ a = strings.TrimSpace(parts[0])
+ if len(parts) == 2 {
+ b = strings.TrimSpace(parts[1])
+ }
+
+ return a, b
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/content_type.go b/vendor/github.com/go-chi/chi/middleware/content_type.go
new file mode 100644
index 000000000..3a2dc20af
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/content_type.go
@@ -0,0 +1,45 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// SetHeader is a convenience handler to set a response header key/value
+func SetHeader(key, value string) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set(key, value)
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
+
+// AllowContentType enforces a whitelist of request Content-Types otherwise responds
+// with a 415 Unsupported Media Type status.
+func AllowContentType(contentTypes ...string) func(next http.Handler) http.Handler {
+ cT := []string{}
+ for _, t := range contentTypes {
+ cT = append(cT, strings.ToLower(t))
+ }
+
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ s := strings.ToLower(strings.TrimSpace(r.Header.Get("Content-Type")))
+ if i := strings.Index(s, ";"); i > -1 {
+ s = s[0:i]
+ }
+
+ for _, t := range cT {
+ if t == s {
+ next.ServeHTTP(w, r)
+ return
+ }
+ }
+
+ w.WriteHeader(http.StatusUnsupportedMediaType)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/get_head.go b/vendor/github.com/go-chi/chi/middleware/get_head.go
new file mode 100644
index 000000000..86068a96d
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/get_head.go
@@ -0,0 +1,39 @@
+package middleware
+
+import (
+ "net/http"
+
+ "github.com/go-chi/chi"
+)
+
+// GetHead automatically route undefined HEAD requests to GET handlers.
+func GetHead(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "HEAD" {
+ rctx := chi.RouteContext(r.Context())
+ routePath := rctx.RoutePath
+ if routePath == "" {
+ if r.URL.RawPath != "" {
+ routePath = r.URL.RawPath
+ } else {
+ routePath = r.URL.Path
+ }
+ }
+
+ // Temporary routing context to look-ahead before routing the request
+ tctx := chi.NewRouteContext()
+
+ // Attempt to find a HEAD handler for the routing path, if not found, traverse
+ // the router as through its a GET route, but proceed with the request
+ // with the HEAD method.
+ if !rctx.Routes.Match(tctx, "HEAD", routePath) {
+ rctx.RouteMethod = "GET"
+ rctx.RoutePath = routePath
+ next.ServeHTTP(w, r)
+ return
+ }
+ }
+
+ next.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/heartbeat.go b/vendor/github.com/go-chi/chi/middleware/heartbeat.go
new file mode 100644
index 000000000..fe822fb53
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/heartbeat.go
@@ -0,0 +1,26 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// Heartbeat endpoint middleware useful to setting up a path like
+// `/ping` that load balancers or uptime testing external services
+// can make a request before hitting any routes. It's also convenient
+// to place this above ACL middlewares as well.
+func Heartbeat(endpoint string) func(http.Handler) http.Handler {
+ f := func(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "GET" && strings.EqualFold(r.URL.Path, endpoint) {
+ w.Header().Set("Content-Type", "text/plain")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte("."))
+ return
+ }
+ h.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+ return f
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/logger.go b/vendor/github.com/go-chi/chi/middleware/logger.go
new file mode 100644
index 000000000..9f119d565
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/logger.go
@@ -0,0 +1,158 @@
+package middleware
+
+import (
+ "bytes"
+ "context"
+ "log"
+ "net/http"
+ "os"
+ "time"
+)
+
+var (
+ // LogEntryCtxKey is the context.Context key to store the request log entry.
+ LogEntryCtxKey = &contextKey{"LogEntry"}
+
+ // DefaultLogger is called by the Logger middleware handler to log each request.
+ // Its made a package-level variable so that it can be reconfigured for custom
+ // logging configurations.
+ DefaultLogger = RequestLogger(&DefaultLogFormatter{Logger: log.New(os.Stdout, "", log.LstdFlags), NoColor: false})
+)
+
+// Logger is a middleware that logs the start and end of each request, along
+// with some useful data about what was requested, what the response status was,
+// and how long it took to return. When standard output is a TTY, Logger will
+// print in color, otherwise it will print in black and white. Logger prints a
+// request ID if one is provided.
+//
+// Alternatively, look at https://github.com/pressly/lg and the `lg.RequestLogger`
+// middleware pkg.
+func Logger(next http.Handler) http.Handler {
+ return DefaultLogger(next)
+}
+
+// RequestLogger returns a logger handler using a custom LogFormatter.
+func RequestLogger(f LogFormatter) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ entry := f.NewLogEntry(r)
+ ww := NewWrapResponseWriter(w, r.ProtoMajor)
+
+ t1 := time.Now()
+ defer func() {
+ entry.Write(ww.Status(), ww.BytesWritten(), time.Since(t1))
+ }()
+
+ next.ServeHTTP(ww, WithLogEntry(r, entry))
+ }
+ return http.HandlerFunc(fn)
+ }
+}
+
+// LogFormatter initiates the beginning of a new LogEntry per request.
+// See DefaultLogFormatter for an example implementation.
+type LogFormatter interface {
+ NewLogEntry(r *http.Request) LogEntry
+}
+
+// LogEntry records the final log when a request completes.
+// See defaultLogEntry for an example implementation.
+type LogEntry interface {
+ Write(status, bytes int, elapsed time.Duration)
+ Panic(v interface{}, stack []byte)
+}
+
+// GetLogEntry returns the in-context LogEntry for a request.
+func GetLogEntry(r *http.Request) LogEntry {
+ entry, _ := r.Context().Value(LogEntryCtxKey).(LogEntry)
+ return entry
+}
+
+// WithLogEntry sets the in-context LogEntry for a request.
+func WithLogEntry(r *http.Request, entry LogEntry) *http.Request {
+ r = r.WithContext(context.WithValue(r.Context(), LogEntryCtxKey, entry))
+ return r
+}
+
+// LoggerInterface accepts printing to stdlib logger or compatible logger.
+type LoggerInterface interface {
+ Print(v ...interface{})
+}
+
+// DefaultLogFormatter is a simple logger that implements a LogFormatter.
+type DefaultLogFormatter struct {
+ Logger LoggerInterface
+ NoColor bool
+}
+
+// NewLogEntry creates a new LogEntry for the request.
+func (l *DefaultLogFormatter) NewLogEntry(r *http.Request) LogEntry {
+ useColor := !l.NoColor
+ entry := &defaultLogEntry{
+ DefaultLogFormatter: l,
+ request: r,
+ buf: &bytes.Buffer{},
+ useColor: useColor,
+ }
+
+ reqID := GetReqID(r.Context())
+ if reqID != "" {
+ cW(entry.buf, useColor, nYellow, "[%s] ", reqID)
+ }
+ cW(entry.buf, useColor, nCyan, "\"")
+ cW(entry.buf, useColor, bMagenta, "%s ", r.Method)
+
+ scheme := "http"
+ if r.TLS != nil {
+ scheme = "https"
+ }
+ cW(entry.buf, useColor, nCyan, "%s://%s%s %s\" ", scheme, r.Host, r.RequestURI, r.Proto)
+
+ entry.buf.WriteString("from ")
+ entry.buf.WriteString(r.RemoteAddr)
+ entry.buf.WriteString(" - ")
+
+ return entry
+}
+
+type defaultLogEntry struct {
+ *DefaultLogFormatter
+ request *http.Request
+ buf *bytes.Buffer
+ useColor bool
+}
+
+func (l *defaultLogEntry) Write(status, bytes int, elapsed time.Duration) {
+ switch {
+ case status < 200:
+ cW(l.buf, l.useColor, bBlue, "%03d", status)
+ case status < 300:
+ cW(l.buf, l.useColor, bGreen, "%03d", status)
+ case status < 400:
+ cW(l.buf, l.useColor, bCyan, "%03d", status)
+ case status < 500:
+ cW(l.buf, l.useColor, bYellow, "%03d", status)
+ default:
+ cW(l.buf, l.useColor, bRed, "%03d", status)
+ }
+
+ cW(l.buf, l.useColor, bBlue, " %dB", bytes)
+
+ l.buf.WriteString(" in ")
+ if elapsed < 500*time.Millisecond {
+ cW(l.buf, l.useColor, nGreen, "%s", elapsed)
+ } else if elapsed < 5*time.Second {
+ cW(l.buf, l.useColor, nYellow, "%s", elapsed)
+ } else {
+ cW(l.buf, l.useColor, nRed, "%s", elapsed)
+ }
+
+ l.Logger.Print(l.buf.String())
+}
+
+func (l *defaultLogEntry) Panic(v interface{}, stack []byte) {
+ panicEntry := l.NewLogEntry(l.request).(*defaultLogEntry)
+ cW(panicEntry.buf, l.useColor, bRed, "panic: %+v", v)
+ l.Logger.Print(panicEntry.buf.String())
+ l.Logger.Print(string(stack))
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/middleware.go b/vendor/github.com/go-chi/chi/middleware/middleware.go
new file mode 100644
index 000000000..be6a44fad
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/middleware.go
@@ -0,0 +1,12 @@
+package middleware
+
+// contextKey is a value for use with context.WithValue. It's used as
+// a pointer so it fits in an interface{} without allocation. This technique
+// for defining context keys was copied from Go 1.7's new use of context in net/http.
+type contextKey struct {
+ name string
+}
+
+func (k *contextKey) String() string {
+ return "chi/middleware context value " + k.name
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/nocache.go b/vendor/github.com/go-chi/chi/middleware/nocache.go
new file mode 100644
index 000000000..2412829e1
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/nocache.go
@@ -0,0 +1,58 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "net/http"
+ "time"
+)
+
+// Unix epoch time
+var epoch = time.Unix(0, 0).Format(time.RFC1123)
+
+// Taken from https://github.com/mytrile/nocache
+var noCacheHeaders = map[string]string{
+ "Expires": epoch,
+ "Cache-Control": "no-cache, no-store, no-transform, must-revalidate, private, max-age=0",
+ "Pragma": "no-cache",
+ "X-Accel-Expires": "0",
+}
+
+var etagHeaders = []string{
+ "ETag",
+ "If-Modified-Since",
+ "If-Match",
+ "If-None-Match",
+ "If-Range",
+ "If-Unmodified-Since",
+}
+
+// NoCache is a simple piece of middleware that sets a number of HTTP headers to prevent
+// a router (or subrouter) from being cached by an upstream proxy and/or client.
+//
+// As per http://wiki.nginx.org/HttpProxyModule - NoCache sets:
+// Expires: Thu, 01 Jan 1970 00:00:00 UTC
+// Cache-Control: no-cache, private, max-age=0
+// X-Accel-Expires: 0
+// Pragma: no-cache (for HTTP/1.0 proxies/clients)
+func NoCache(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+
+ // Delete any ETag headers that may have been set
+ for _, v := range etagHeaders {
+ if r.Header.Get(v) != "" {
+ r.Header.Del(v)
+ }
+ }
+
+ // Set our NoCache headers
+ for k, v := range noCacheHeaders {
+ w.Header().Set(k, v)
+ }
+
+ h.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/profiler.go b/vendor/github.com/go-chi/chi/middleware/profiler.go
new file mode 100644
index 000000000..1d44b8259
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/profiler.go
@@ -0,0 +1,55 @@
+package middleware
+
+import (
+ "expvar"
+ "fmt"
+ "net/http"
+ "net/http/pprof"
+
+ "github.com/go-chi/chi"
+)
+
+// Profiler is a convenient subrouter used for mounting net/http/pprof. ie.
+//
+// func MyService() http.Handler {
+// r := chi.NewRouter()
+// // ..middlewares
+// r.Mount("/debug", middleware.Profiler())
+// // ..routes
+// return r
+// }
+func Profiler() http.Handler {
+ r := chi.NewRouter()
+ r.Use(NoCache)
+
+ r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, r.RequestURI+"/pprof/", 301)
+ })
+ r.HandleFunc("/pprof", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, r.RequestURI+"/", 301)
+ })
+
+ r.HandleFunc("/pprof/*", pprof.Index)
+ r.HandleFunc("/pprof/cmdline", pprof.Cmdline)
+ r.HandleFunc("/pprof/profile", pprof.Profile)
+ r.HandleFunc("/pprof/symbol", pprof.Symbol)
+ r.HandleFunc("/pprof/trace", pprof.Trace)
+ r.HandleFunc("/vars", expVars)
+
+ return r
+}
+
+// Replicated from expvar.go as not public.
+func expVars(w http.ResponseWriter, r *http.Request) {
+ first := true
+ w.Header().Set("Content-Type", "application/json")
+ fmt.Fprintf(w, "{\n")
+ expvar.Do(func(kv expvar.KeyValue) {
+ if !first {
+ fmt.Fprintf(w, ",\n")
+ }
+ first = false
+ fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+ })
+ fmt.Fprintf(w, "\n}\n")
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/realip.go b/vendor/github.com/go-chi/chi/middleware/realip.go
new file mode 100644
index 000000000..146c2b0a0
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/realip.go
@@ -0,0 +1,54 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
+var xRealIP = http.CanonicalHeaderKey("X-Real-IP")
+
+// RealIP is a middleware that sets a http.Request's RemoteAddr to the results
+// of parsing either the X-Forwarded-For header or the X-Real-IP header (in that
+// order).
+//
+// This middleware should be inserted fairly early in the middleware stack to
+// ensure that subsequent layers (e.g., request loggers) which examine the
+// RemoteAddr will see the intended value.
+//
+// You should only use this middleware if you can trust the headers passed to
+// you (in particular, the two headers this middleware uses), for example
+// because you have placed a reverse proxy like HAProxy or nginx in front of
+// chi. If your reverse proxies are configured to pass along arbitrary header
+// values from the client, or if you use this middleware without a reverse
+// proxy, malicious clients will be able to make you very sad (or, depending on
+// how you're using RemoteAddr, vulnerable to an attack of some sort).
+func RealIP(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ if rip := realIP(r); rip != "" {
+ r.RemoteAddr = rip
+ }
+ h.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
+
+func realIP(r *http.Request) string {
+ var ip string
+
+ if xff := r.Header.Get(xForwardedFor); xff != "" {
+ i := strings.Index(xff, ", ")
+ if i == -1 {
+ i = len(xff)
+ }
+ ip = xff[:i]
+ } else if xrip := r.Header.Get(xRealIP); xrip != "" {
+ ip = xrip
+ }
+
+ return ip
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/recoverer.go b/vendor/github.com/go-chi/chi/middleware/recoverer.go
new file mode 100644
index 000000000..57fc3eb9d
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/recoverer.go
@@ -0,0 +1,39 @@
+package middleware
+
+// The original work was derived from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "runtime/debug"
+)
+
+// Recoverer is a middleware that recovers from panics, logs the panic (and a
+// backtrace), and returns a HTTP 500 (Internal Server Error) status if
+// possible. Recoverer prints a request ID if one is provided.
+//
+// Alternatively, look at https://github.com/pressly/lg middleware pkgs.
+func Recoverer(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ defer func() {
+ if rvr := recover(); rvr != nil {
+
+ logEntry := GetLogEntry(r)
+ if logEntry != nil {
+ logEntry.Panic(rvr, debug.Stack())
+ } else {
+ fmt.Fprintf(os.Stderr, "Panic: %+v\n", rvr)
+ debug.PrintStack()
+ }
+
+ http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
+ }
+ }()
+
+ next.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/request_id.go b/vendor/github.com/go-chi/chi/middleware/request_id.go
new file mode 100644
index 000000000..65b58f633
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/request_id.go
@@ -0,0 +1,92 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "os"
+ "strings"
+ "sync/atomic"
+)
+
+// Key to use when setting the request ID.
+type ctxKeyRequestID int
+
+// RequestIDKey is the key that holds the unique request ID in a request context.
+const RequestIDKey ctxKeyRequestID = 0
+
+var prefix string
+var reqid uint64
+
+// A quick note on the statistics here: we're trying to calculate the chance that
+// two randomly generated base62 prefixes will collide. We use the formula from
+// http://en.wikipedia.org/wiki/Birthday_problem
+//
+// P[m, n] \approx 1 - e^{-m^2/2n}
+//
+// We ballpark an upper bound for $m$ by imagining (for whatever reason) a server
+// that restarts every second over 10 years, for $m = 86400 * 365 * 10 = 315360000$
+//
+// For a $k$ character base-62 identifier, we have $n(k) = 62^k$
+//
+// Plugging this in, we find $P[m, n(10)] \approx 5.75%$, which is good enough for
+// our purposes, and is surely more than anyone would ever need in practice -- a
+// process that is rebooted a handful of times a day for a hundred years has less
+// than a millionth of a percent chance of generating two colliding IDs.
+
+func init() {
+ hostname, err := os.Hostname()
+ if hostname == "" || err != nil {
+ hostname = "localhost"
+ }
+ var buf [12]byte
+ var b64 string
+ for len(b64) < 10 {
+ rand.Read(buf[:])
+ b64 = base64.StdEncoding.EncodeToString(buf[:])
+ b64 = strings.NewReplacer("+", "", "/", "").Replace(b64)
+ }
+
+ prefix = fmt.Sprintf("%s/%s", hostname, b64[0:10])
+}
+
+// RequestID is a middleware that injects a request ID into the context of each
+// request. A request ID is a string of the form "host.example.com/random-0001",
+// where "random" is a base62 random string that uniquely identifies this go
+// process, and where the last number is an atomically incremented request
+// counter.
+func RequestID(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ requestID := r.Header.Get("X-Request-Id")
+ if requestID == "" {
+ myid := atomic.AddUint64(&reqid, 1)
+ requestID = fmt.Sprintf("%s-%06d", prefix, myid)
+ }
+ ctx = context.WithValue(ctx, RequestIDKey, requestID)
+ next.ServeHTTP(w, r.WithContext(ctx))
+ }
+ return http.HandlerFunc(fn)
+}
+
+// GetReqID returns a request ID from the given context if one is present.
+// Returns the empty string if a request ID cannot be found.
+func GetReqID(ctx context.Context) string {
+ if ctx == nil {
+ return ""
+ }
+ if reqID, ok := ctx.Value(RequestIDKey).(string); ok {
+ return reqID
+ }
+ return ""
+}
+
+// NextRequestID generates the next request ID in the sequence.
+func NextRequestID() uint64 {
+ return atomic.AddUint64(&reqid, 1)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/strip.go b/vendor/github.com/go-chi/chi/middleware/strip.go
new file mode 100644
index 000000000..2b8b1842a
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/strip.go
@@ -0,0 +1,56 @@
+package middleware
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/go-chi/chi"
+)
+
+// StripSlashes is a middleware that will match request paths with a trailing
+// slash, strip it from the path and continue routing through the mux, if a route
+// matches, then it will serve the handler.
+func StripSlashes(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ var path string
+ rctx := chi.RouteContext(r.Context())
+ if rctx.RoutePath != "" {
+ path = rctx.RoutePath
+ } else {
+ path = r.URL.Path
+ }
+ if len(path) > 1 && path[len(path)-1] == '/' {
+ rctx.RoutePath = path[:len(path)-1]
+ }
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+}
+
+// RedirectSlashes is a middleware that will match request paths with a trailing
+// slash and redirect to the same path, less the trailing slash.
+//
+// NOTE: RedirectSlashes middleware is *incompatible* with http.FileServer,
+// see https://github.com/go-chi/chi/issues/343
+func RedirectSlashes(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ var path string
+ rctx := chi.RouteContext(r.Context())
+ if rctx.RoutePath != "" {
+ path = rctx.RoutePath
+ } else {
+ path = r.URL.Path
+ }
+ if len(path) > 1 && path[len(path)-1] == '/' {
+ if r.URL.RawQuery != "" {
+ path = fmt.Sprintf("%s?%s", path[:len(path)-1], r.URL.RawQuery)
+ } else {
+ path = path[:len(path)-1]
+ }
+ http.Redirect(w, r, path, 301)
+ return
+ }
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/terminal.go b/vendor/github.com/go-chi/chi/middleware/terminal.go
new file mode 100644
index 000000000..a5d424100
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/terminal.go
@@ -0,0 +1,63 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "fmt"
+ "io"
+ "os"
+)
+
+var (
+ // Normal colors
+ nBlack = []byte{'\033', '[', '3', '0', 'm'}
+ nRed = []byte{'\033', '[', '3', '1', 'm'}
+ nGreen = []byte{'\033', '[', '3', '2', 'm'}
+ nYellow = []byte{'\033', '[', '3', '3', 'm'}
+ nBlue = []byte{'\033', '[', '3', '4', 'm'}
+ nMagenta = []byte{'\033', '[', '3', '5', 'm'}
+ nCyan = []byte{'\033', '[', '3', '6', 'm'}
+ nWhite = []byte{'\033', '[', '3', '7', 'm'}
+ // Bright colors
+ bBlack = []byte{'\033', '[', '3', '0', ';', '1', 'm'}
+ bRed = []byte{'\033', '[', '3', '1', ';', '1', 'm'}
+ bGreen = []byte{'\033', '[', '3', '2', ';', '1', 'm'}
+ bYellow = []byte{'\033', '[', '3', '3', ';', '1', 'm'}
+ bBlue = []byte{'\033', '[', '3', '4', ';', '1', 'm'}
+ bMagenta = []byte{'\033', '[', '3', '5', ';', '1', 'm'}
+ bCyan = []byte{'\033', '[', '3', '6', ';', '1', 'm'}
+ bWhite = []byte{'\033', '[', '3', '7', ';', '1', 'm'}
+
+ reset = []byte{'\033', '[', '0', 'm'}
+)
+
+var isTTY bool
+
+func init() {
+ // This is sort of cheating: if stdout is a character device, we assume
+ // that means it's a TTY. Unfortunately, there are many non-TTY
+ // character devices, but fortunately stdout is rarely set to any of
+ // them.
+ //
+ // We could solve this properly by pulling in a dependency on
+ // code.google.com/p/go.crypto/ssh/terminal, for instance, but as a
+ // heuristic for whether to print in color or in black-and-white, I'd
+ // really rather not.
+ fi, err := os.Stdout.Stat()
+ if err == nil {
+ m := os.ModeDevice | os.ModeCharDevice
+ isTTY = fi.Mode()&m == m
+ }
+}
+
+// colorWrite
+func cW(w io.Writer, useColor bool, color []byte, s string, args ...interface{}) {
+ if isTTY && useColor {
+ w.Write(color)
+ }
+ fmt.Fprintf(w, s, args...)
+ if isTTY && useColor {
+ w.Write(reset)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/throttle.go b/vendor/github.com/go-chi/chi/middleware/throttle.go
new file mode 100644
index 000000000..d935e2ce6
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/throttle.go
@@ -0,0 +1,101 @@
+package middleware
+
+import (
+ "net/http"
+ "time"
+)
+
+const (
+ errCapacityExceeded = "Server capacity exceeded."
+ errTimedOut = "Timed out while waiting for a pending request to complete."
+ errContextCanceled = "Context was canceled."
+)
+
+var (
+ defaultBacklogTimeout = time.Second * 60
+)
+
+// Throttle is a middleware that limits number of currently processed requests
+// at a time.
+func Throttle(limit int) func(http.Handler) http.Handler {
+ return ThrottleBacklog(limit, 0, defaultBacklogTimeout)
+}
+
+// ThrottleBacklog is a middleware that limits number of currently processed
+// requests at a time and provides a backlog for holding a finite number of
+// pending requests.
+func ThrottleBacklog(limit int, backlogLimit int, backlogTimeout time.Duration) func(http.Handler) http.Handler {
+ if limit < 1 {
+ panic("chi/middleware: Throttle expects limit > 0")
+ }
+
+ if backlogLimit < 0 {
+ panic("chi/middleware: Throttle expects backlogLimit to be positive")
+ }
+
+ t := throttler{
+ tokens: make(chan token, limit),
+ backlogTokens: make(chan token, limit+backlogLimit),
+ backlogTimeout: backlogTimeout,
+ }
+
+ // Filling tokens.
+ for i := 0; i < limit+backlogLimit; i++ {
+ if i < limit {
+ t.tokens <- token{}
+ }
+ t.backlogTokens <- token{}
+ }
+
+ fn := func(h http.Handler) http.Handler {
+ t.h = h
+ return &t
+ }
+
+ return fn
+}
+
+// token represents a request that is being processed.
+type token struct{}
+
+// throttler limits number of currently processed requests at a time.
+type throttler struct {
+ h http.Handler
+ tokens chan token
+ backlogTokens chan token
+ backlogTimeout time.Duration
+}
+
+// ServeHTTP is the primary throttler request handler
+func (t *throttler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ select {
+ case <-ctx.Done():
+ http.Error(w, errContextCanceled, http.StatusServiceUnavailable)
+ return
+ case btok := <-t.backlogTokens:
+ timer := time.NewTimer(t.backlogTimeout)
+
+ defer func() {
+ t.backlogTokens <- btok
+ }()
+
+ select {
+ case <-timer.C:
+ http.Error(w, errTimedOut, http.StatusServiceUnavailable)
+ return
+ case <-ctx.Done():
+ http.Error(w, errContextCanceled, http.StatusServiceUnavailable)
+ return
+ case tok := <-t.tokens:
+ defer func() {
+ t.tokens <- tok
+ }()
+ t.h.ServeHTTP(w, r)
+ }
+ return
+ default:
+ http.Error(w, errCapacityExceeded, http.StatusServiceUnavailable)
+ return
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/timeout.go b/vendor/github.com/go-chi/chi/middleware/timeout.go
new file mode 100644
index 000000000..8e373536c
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/timeout.go
@@ -0,0 +1,49 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+ "time"
+)
+
+// Timeout is a middleware that cancels ctx after a given timeout and return
+// a 504 Gateway Timeout error to the client.
+//
+// It's required that you select the ctx.Done() channel to check for the signal
+// if the context has reached its deadline and return, otherwise the timeout
+// signal will be just ignored.
+//
+// ie. a route/handler may look like:
+//
+// r.Get("/long", func(w http.ResponseWriter, r *http.Request) {
+// ctx := r.Context()
+// processTime := time.Duration(rand.Intn(4)+1) * time.Second
+//
+// select {
+// case <-ctx.Done():
+// return
+//
+// case <-time.After(processTime):
+// // The above channel simulates some hard work.
+// }
+//
+// w.Write([]byte("done"))
+// })
+//
+func Timeout(timeout time.Duration) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx, cancel := context.WithTimeout(r.Context(), timeout)
+ defer func() {
+ cancel()
+ if ctx.Err() == context.DeadlineExceeded {
+ w.WriteHeader(http.StatusGatewayTimeout)
+ }
+ }()
+
+ r = r.WithContext(ctx)
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/url_format.go b/vendor/github.com/go-chi/chi/middleware/url_format.go
new file mode 100644
index 000000000..5749e4f32
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/url_format.go
@@ -0,0 +1,72 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+ "strings"
+
+ "github.com/go-chi/chi"
+)
+
+var (
+ // URLFormatCtxKey is the context.Context key to store the URL format data
+ // for a request.
+ URLFormatCtxKey = &contextKey{"URLFormat"}
+)
+
+// URLFormat is a middleware that parses the url extension from a request path and stores it
+// on the context as a string under the key `middleware.URLFormatCtxKey`. The middleware will
+// trim the suffix from the routing path and continue routing.
+//
+// Routers should not include a url parameter for the suffix when using this middleware.
+//
+// Sample usage.. for url paths: `/articles/1`, `/articles/1.json` and `/articles/1.xml`
+//
+// func routes() http.Handler {
+// r := chi.NewRouter()
+// r.Use(middleware.URLFormat)
+//
+// r.Get("/articles/{id}", ListArticles)
+//
+// return r
+// }
+//
+// func ListArticles(w http.ResponseWriter, r *http.Request) {
+// urlFormat, _ := r.Context().Value(middleware.URLFormatCtxKey).(string)
+//
+// switch urlFormat {
+// case "json":
+// render.JSON(w, r, articles)
+// case "xml:"
+// render.XML(w, r, articles)
+// default:
+// render.JSON(w, r, articles)
+// }
+// }
+//
+func URLFormat(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var format string
+ path := r.URL.Path
+
+ if strings.Index(path, ".") > 0 {
+ base := strings.LastIndex(path, "/")
+ idx := strings.Index(path[base:], ".")
+
+ if idx > 0 {
+ idx += base
+ format = path[idx+1:]
+
+ rctx := chi.RouteContext(r.Context())
+ rctx.RoutePath = path[:idx]
+ }
+ }
+
+ r = r.WithContext(context.WithValue(ctx, URLFormatCtxKey, format))
+
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/value.go b/vendor/github.com/go-chi/chi/middleware/value.go
new file mode 100644
index 000000000..fbbd0393f
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/value.go
@@ -0,0 +1,17 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+)
+
+// WithValue is a middleware that sets a given key/value in a context chain.
+func WithValue(key interface{}, val interface{}) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ r = r.WithContext(context.WithValue(r.Context(), key, val))
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/middleware/wrap_writer.go b/vendor/github.com/go-chi/chi/middleware/wrap_writer.go
new file mode 100644
index 000000000..5e5594f8d
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/middleware/wrap_writer.go
@@ -0,0 +1,183 @@
+package middleware
+
+// The original work was derived from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+// NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to
+// hook into various parts of the response process.
+func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter {
+ _, fl := w.(http.Flusher)
+
+ bw := basicWriter{ResponseWriter: w}
+
+ if protoMajor == 2 {
+ _, ps := w.(http.Pusher)
+ if fl && ps {
+ return &http2FancyWriter{bw}
+ }
+ } else {
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ if fl && hj && rf {
+ return &httpFancyWriter{bw}
+ }
+ }
+ if fl {
+ return &flushWriter{bw}
+ }
+
+ return &bw
+}
+
+// WrapResponseWriter is a proxy around an http.ResponseWriter that allows you to hook
+// into various parts of the response process.
+type WrapResponseWriter interface {
+ http.ResponseWriter
+ // Status returns the HTTP status of the request, or 0 if one has not
+ // yet been sent.
+ Status() int
+ // BytesWritten returns the total number of bytes sent to the client.
+ BytesWritten() int
+ // Tee causes the response body to be written to the given io.Writer in
+ // addition to proxying the writes through. Only one io.Writer can be
+ // tee'd to at once: setting a second one will overwrite the first.
+ // Writes will be sent to the proxy before being written to this
+ // io.Writer. It is illegal for the tee'd writer to be modified
+ // concurrently with writes.
+ Tee(io.Writer)
+ // Unwrap returns the original proxied target.
+ Unwrap() http.ResponseWriter
+}
+
+// basicWriter wraps a http.ResponseWriter that implements the minimal
+// http.ResponseWriter interface.
+type basicWriter struct {
+ http.ResponseWriter
+ wroteHeader bool
+ code int
+ bytes int
+ tee io.Writer
+}
+
+func (b *basicWriter) WriteHeader(code int) {
+ if !b.wroteHeader {
+ b.code = code
+ b.wroteHeader = true
+ b.ResponseWriter.WriteHeader(code)
+ }
+}
+
+func (b *basicWriter) Write(buf []byte) (int, error) {
+ b.WriteHeader(http.StatusOK)
+ n, err := b.ResponseWriter.Write(buf)
+ if b.tee != nil {
+ _, err2 := b.tee.Write(buf[:n])
+ // Prefer errors generated by the proxied writer.
+ if err == nil {
+ err = err2
+ }
+ }
+ b.bytes += n
+ return n, err
+}
+
+func (b *basicWriter) maybeWriteHeader() {
+ if !b.wroteHeader {
+ b.WriteHeader(http.StatusOK)
+ }
+}
+
+func (b *basicWriter) Status() int {
+ return b.code
+}
+
+func (b *basicWriter) BytesWritten() int {
+ return b.bytes
+}
+
+func (b *basicWriter) Tee(w io.Writer) {
+ b.tee = w
+}
+
+func (b *basicWriter) Unwrap() http.ResponseWriter {
+ return b.ResponseWriter
+}
+
+type flushWriter struct {
+ basicWriter
+}
+
+func (f *flushWriter) Flush() {
+ f.wroteHeader = true
+
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+var _ http.Flusher = &flushWriter{}
+
+// httpFancyWriter is a HTTP writer that additionally satisfies
+// http.Flusher, http.Hijacker, and io.ReaderFrom. It exists for the common case
+// of wrapping the http.ResponseWriter that package http gives you, in order to
+// make the proxied object support the full method set of the proxied object.
+type httpFancyWriter struct {
+ basicWriter
+}
+
+func (f *httpFancyWriter) Flush() {
+ f.wroteHeader = true
+
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+func (f *httpFancyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hj := f.basicWriter.ResponseWriter.(http.Hijacker)
+ return hj.Hijack()
+}
+
+func (f *http2FancyWriter) Push(target string, opts *http.PushOptions) error {
+ return f.basicWriter.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func (f *httpFancyWriter) ReadFrom(r io.Reader) (int64, error) {
+ if f.basicWriter.tee != nil {
+ n, err := io.Copy(&f.basicWriter, r)
+ f.basicWriter.bytes += int(n)
+ return n, err
+ }
+ rf := f.basicWriter.ResponseWriter.(io.ReaderFrom)
+ f.basicWriter.maybeWriteHeader()
+ n, err := rf.ReadFrom(r)
+ f.basicWriter.bytes += int(n)
+ return n, err
+}
+
+var _ http.Flusher = &httpFancyWriter{}
+var _ http.Hijacker = &httpFancyWriter{}
+var _ http.Pusher = &http2FancyWriter{}
+var _ io.ReaderFrom = &httpFancyWriter{}
+
+// http2FancyWriter is a HTTP2 writer that additionally satisfies
+// http.Flusher, and io.ReaderFrom. It exists for the common case
+// of wrapping the http.ResponseWriter that package http gives you, in order to
+// make the proxied object support the full method set of the proxied object.
+type http2FancyWriter struct {
+ basicWriter
+}
+
+func (f *http2FancyWriter) Flush() {
+ f.wroteHeader = true
+
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+var _ http.Flusher = &http2FancyWriter{}
diff --git a/vendor/github.com/go-chi/chi/mux.go b/vendor/github.com/go-chi/chi/mux.go
new file mode 100644
index 000000000..e553287e4
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/mux.go
@@ -0,0 +1,460 @@
+package chi
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+ "sync"
+)
+
+var _ Router = &Mux{}
+
+// Mux is a simple HTTP route multiplexer that parses a request path,
+// records any URL params, and executes an end handler. It implements
+// the http.Handler interface and is friendly with the standard library.
+//
+// Mux is designed to be fast, minimal and offer a powerful API for building
+// modular and composable HTTP services with a large set of handlers. It's
+// particularly useful for writing large REST API services that break a handler
+// into many smaller parts composed of middlewares and end handlers.
+type Mux struct {
+ // The radix trie router
+ tree *node
+
+ // The middleware stack
+ middlewares []func(http.Handler) http.Handler
+
+ // Controls the behaviour of middleware chain generation when a mux
+ // is registered as an inline group inside another mux.
+ inline bool
+ parent *Mux
+
+ // The computed mux handler made of the chained middleware stack and
+ // the tree router
+ handler http.Handler
+
+ // Routing context pool
+ pool *sync.Pool
+
+ // Custom route not found handler
+ notFoundHandler http.HandlerFunc
+
+ // Custom method not allowed handler
+ methodNotAllowedHandler http.HandlerFunc
+}
+
+// NewMux returns a newly initialized Mux object that implements the Router
+// interface.
+func NewMux() *Mux {
+ mux := &Mux{tree: &node{}, pool: &sync.Pool{}}
+ mux.pool.New = func() interface{} {
+ return NewRouteContext()
+ }
+ return mux
+}
+
+// ServeHTTP is the single method of the http.Handler interface that makes
+// Mux interoperable with the standard library. It uses a sync.Pool to get and
+// reuse routing contexts for each request.
+func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ // Ensure the mux has some routes defined on the mux
+ if mx.handler == nil {
+ mx.NotFoundHandler().ServeHTTP(w, r)
+ return
+ }
+
+ // Check if a routing context already exists from a parent router.
+ rctx, _ := r.Context().Value(RouteCtxKey).(*Context)
+ if rctx != nil {
+ mx.handler.ServeHTTP(w, r)
+ return
+ }
+
+ // Fetch a RouteContext object from the sync pool, and call the computed
+ // mx.handler that is comprised of mx.middlewares + mx.routeHTTP.
+ // Once the request is finished, reset the routing context and put it back
+ // into the pool for reuse from another request.
+ rctx = mx.pool.Get().(*Context)
+ rctx.Reset()
+ rctx.Routes = mx
+ r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx))
+ mx.handler.ServeHTTP(w, r)
+ mx.pool.Put(rctx)
+}
+
+// Use appends a middleware handler to the Mux middleware stack.
+//
+// The middleware stack for any Mux will execute before searching for a matching
+// route to a specific handler, which provides opportunity to respond early,
+// change the course of the request execution, or set request-scoped values for
+// the next http.Handler.
+func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) {
+ if mx.handler != nil {
+ panic("chi: all middlewares must be defined before routes on a mux")
+ }
+ mx.middlewares = append(mx.middlewares, middlewares...)
+}
+
+// Handle adds the route `pattern` that matches any http method to
+// execute the `handler` http.Handler.
+func (mx *Mux) Handle(pattern string, handler http.Handler) {
+ mx.handle(mALL, pattern, handler)
+}
+
+// HandleFunc adds the route `pattern` that matches any http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mALL, pattern, handlerFn)
+}
+
+// Method adds the route `pattern` that matches `method` http method to
+// execute the `handler` http.Handler.
+func (mx *Mux) Method(method, pattern string, handler http.Handler) {
+ m, ok := methodMap[strings.ToUpper(method)]
+ if !ok {
+ panic(fmt.Sprintf("chi: '%s' http method is not supported.", method))
+ }
+ mx.handle(m, pattern, handler)
+}
+
+// MethodFunc adds the route `pattern` that matches `method` http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) {
+ mx.Method(method, pattern, handlerFn)
+}
+
+// Connect adds the route `pattern` that matches a CONNECT http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mCONNECT, pattern, handlerFn)
+}
+
+// Delete adds the route `pattern` that matches a DELETE http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mDELETE, pattern, handlerFn)
+}
+
+// Get adds the route `pattern` that matches a GET http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mGET, pattern, handlerFn)
+}
+
+// Head adds the route `pattern` that matches a HEAD http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mHEAD, pattern, handlerFn)
+}
+
+// Options adds the route `pattern` that matches a OPTIONS http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mOPTIONS, pattern, handlerFn)
+}
+
+// Patch adds the route `pattern` that matches a PATCH http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mPATCH, pattern, handlerFn)
+}
+
+// Post adds the route `pattern` that matches a POST http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mPOST, pattern, handlerFn)
+}
+
+// Put adds the route `pattern` that matches a PUT http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mPUT, pattern, handlerFn)
+}
+
+// Trace adds the route `pattern` that matches a TRACE http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mTRACE, pattern, handlerFn)
+}
+
+// NotFound sets a custom http.HandlerFunc for routing paths that could
+// not be found. The default 404 handler is `http.NotFound`.
+func (mx *Mux) NotFound(handlerFn http.HandlerFunc) {
+ // Build NotFound handler chain
+ m := mx
+ hFn := handlerFn
+ if mx.inline && mx.parent != nil {
+ m = mx.parent
+ hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
+ }
+
+ // Update the notFoundHandler from this point forward
+ m.notFoundHandler = hFn
+ m.updateSubRoutes(func(subMux *Mux) {
+ if subMux.notFoundHandler == nil {
+ subMux.NotFound(hFn)
+ }
+ })
+}
+
+// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the
+// method is unresolved. The default handler returns a 405 with an empty body.
+func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) {
+ // Build MethodNotAllowed handler chain
+ m := mx
+ hFn := handlerFn
+ if mx.inline && mx.parent != nil {
+ m = mx.parent
+ hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
+ }
+
+ // Update the methodNotAllowedHandler from this point forward
+ m.methodNotAllowedHandler = hFn
+ m.updateSubRoutes(func(subMux *Mux) {
+ if subMux.methodNotAllowedHandler == nil {
+ subMux.MethodNotAllowed(hFn)
+ }
+ })
+}
+
+// With adds inline middlewares for an endpoint handler.
+func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router {
+ // Similarly as in handle(), we must build the mux handler once further
+ // middleware registration isn't allowed for this stack, like now.
+ if !mx.inline && mx.handler == nil {
+ mx.buildRouteHandler()
+ }
+
+ // Copy middlewares from parent inline muxs
+ var mws Middlewares
+ if mx.inline {
+ mws = make(Middlewares, len(mx.middlewares))
+ copy(mws, mx.middlewares)
+ }
+ mws = append(mws, middlewares...)
+
+ im := &Mux{pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws}
+
+ return im
+}
+
+// Group creates a new inline-Mux with a fresh middleware stack. It's useful
+// for a group of handlers along the same routing path that use an additional
+// set of middlewares. See _examples/.
+func (mx *Mux) Group(fn func(r Router)) Router {
+ im := mx.With().(*Mux)
+ if fn != nil {
+ fn(im)
+ }
+ return im
+}
+
+// Route creates a new Mux with a fresh middleware stack and mounts it
+// along the `pattern` as a subrouter. Effectively, this is a short-hand
+// call to Mount. See _examples/.
+func (mx *Mux) Route(pattern string, fn func(r Router)) Router {
+ subRouter := NewRouter()
+ if fn != nil {
+ fn(subRouter)
+ }
+ mx.Mount(pattern, subRouter)
+ return subRouter
+}
+
+// Mount attaches another http.Handler or chi Router as a subrouter along a routing
+// path. It's very useful to split up a large API as many independent routers and
+// compose them as a single service using Mount. See _examples/.
+//
+// Note that Mount() simply sets a wildcard along the `pattern` that will continue
+// routing at the `handler`, which in most cases is another chi.Router. As a result,
+// if you define two Mount() routes on the exact same pattern the mount will panic.
+func (mx *Mux) Mount(pattern string, handler http.Handler) {
+ // Provide runtime safety for ensuring a pattern isn't mounted on an existing
+ // routing pattern.
+ if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") {
+ panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern))
+ }
+
+ // Assign sub-Router's with the parent not found & method not allowed handler if not specified.
+ subr, ok := handler.(*Mux)
+ if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil {
+ subr.NotFound(mx.notFoundHandler)
+ }
+ if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil {
+ subr.MethodNotAllowed(mx.methodNotAllowedHandler)
+ }
+
+ // Wrap the sub-router in a handlerFunc to scope the request path for routing.
+ mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ rctx := RouteContext(r.Context())
+ rctx.RoutePath = mx.nextRoutePath(rctx)
+ handler.ServeHTTP(w, r)
+ })
+
+ if pattern == "" || pattern[len(pattern)-1] != '/' {
+ mx.handle(mALL|mSTUB, pattern, mountHandler)
+ mx.handle(mALL|mSTUB, pattern+"/", mountHandler)
+ pattern += "/"
+ }
+
+ method := mALL
+ subroutes, _ := handler.(Routes)
+ if subroutes != nil {
+ method |= mSTUB
+ }
+ n := mx.handle(method, pattern+"*", mountHandler)
+
+ if subroutes != nil {
+ n.subroutes = subroutes
+ }
+}
+
+// Routes returns a slice of routing information from the tree,
+// useful for traversing available routes of a router.
+func (mx *Mux) Routes() []Route {
+ return mx.tree.routes()
+}
+
+// Middlewares returns a slice of middleware handler functions.
+func (mx *Mux) Middlewares() Middlewares {
+ return mx.middlewares
+}
+
+// Match searches the routing tree for a handler that matches the method/path.
+// It's similar to routing a http request, but without executing the handler
+// thereafter.
+//
+// Note: the *Context state is updated during execution, so manage
+// the state carefully or make a NewRouteContext().
+func (mx *Mux) Match(rctx *Context, method, path string) bool {
+ m, ok := methodMap[method]
+ if !ok {
+ return false
+ }
+
+ node, _, h := mx.tree.FindRoute(rctx, m, path)
+
+ if node != nil && node.subroutes != nil {
+ rctx.RoutePath = mx.nextRoutePath(rctx)
+ return node.subroutes.Match(rctx, method, rctx.RoutePath)
+ }
+
+ return h != nil
+}
+
+// NotFoundHandler returns the default Mux 404 responder whenever a route
+// cannot be found.
+func (mx *Mux) NotFoundHandler() http.HandlerFunc {
+ if mx.notFoundHandler != nil {
+ return mx.notFoundHandler
+ }
+ return http.NotFound
+}
+
+// MethodNotAllowedHandler returns the default Mux 405 responder whenever
+// a method cannot be resolved for a route.
+func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc {
+ if mx.methodNotAllowedHandler != nil {
+ return mx.methodNotAllowedHandler
+ }
+ return methodNotAllowedHandler
+}
+
+// buildRouteHandler builds the single mux handler that is a chain of the middleware
+// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this
+// point, no other middlewares can be registered on this Mux's stack. But you can still
+// compose additional middlewares via Group()'s or using a chained middleware handler.
+func (mx *Mux) buildRouteHandler() {
+ mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP))
+}
+
+// handle registers a http.Handler in the routing tree for a particular http method
+// and routing pattern.
+func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node {
+ if len(pattern) == 0 || pattern[0] != '/' {
+ panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern))
+ }
+
+ // Build the final routing handler for this Mux.
+ if !mx.inline && mx.handler == nil {
+ mx.buildRouteHandler()
+ }
+
+ // Build endpoint handler with inline middlewares for the route
+ var h http.Handler
+ if mx.inline {
+ mx.handler = http.HandlerFunc(mx.routeHTTP)
+ h = Chain(mx.middlewares...).Handler(handler)
+ } else {
+ h = handler
+ }
+
+ // Add the endpoint to the tree and return the node
+ return mx.tree.InsertRoute(method, pattern, h)
+}
+
+// routeHTTP routes a http.Request through the Mux routing tree to serve
+// the matching handler for a particular http method.
+func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) {
+ // Grab the route context object
+ rctx := r.Context().Value(RouteCtxKey).(*Context)
+
+ // The request routing path
+ routePath := rctx.RoutePath
+ if routePath == "" {
+ if r.URL.RawPath != "" {
+ routePath = r.URL.RawPath
+ } else {
+ routePath = r.URL.Path
+ }
+ }
+
+ // Check if method is supported by chi
+ if rctx.RouteMethod == "" {
+ rctx.RouteMethod = r.Method
+ }
+ method, ok := methodMap[rctx.RouteMethod]
+ if !ok {
+ mx.MethodNotAllowedHandler().ServeHTTP(w, r)
+ return
+ }
+
+ // Find the route
+ if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil {
+ h.ServeHTTP(w, r)
+ return
+ }
+ if rctx.methodNotAllowed {
+ mx.MethodNotAllowedHandler().ServeHTTP(w, r)
+ } else {
+ mx.NotFoundHandler().ServeHTTP(w, r)
+ }
+}
+
+func (mx *Mux) nextRoutePath(rctx *Context) string {
+ routePath := "/"
+ nx := len(rctx.routeParams.Keys) - 1 // index of last param in list
+ if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx {
+ routePath += rctx.routeParams.Values[nx]
+ }
+ return routePath
+}
+
+// Recursively update data on child routers.
+func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) {
+ for _, r := range mx.tree.routes() {
+ subMux, ok := r.SubRoutes.(*Mux)
+ if !ok {
+ continue
+ }
+ fn(subMux)
+ }
+}
+
+// methodNotAllowedHandler is a helper function to respond with a 405,
+// method not allowed.
+func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(405)
+ w.Write(nil)
+}
diff --git a/vendor/github.com/go-chi/chi/tree.go b/vendor/github.com/go-chi/chi/tree.go
new file mode 100644
index 000000000..4dce0a3d8
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/tree.go
@@ -0,0 +1,847 @@
+package chi
+
+// Radix tree implementation below is a based on the original work by
+// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go
+// (MIT licensed). It's been heavily modified for use as a HTTP routing tree.
+
+import (
+ "fmt"
+ "math"
+ "net/http"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+type methodTyp int
+
+const (
+ mSTUB methodTyp = 1 << iota
+ mCONNECT
+ mDELETE
+ mGET
+ mHEAD
+ mOPTIONS
+ mPATCH
+ mPOST
+ mPUT
+ mTRACE
+)
+
+var mALL = mCONNECT | mDELETE | mGET | mHEAD |
+ mOPTIONS | mPATCH | mPOST | mPUT | mTRACE
+
+var methodMap = map[string]methodTyp{
+ http.MethodConnect: mCONNECT,
+ http.MethodDelete: mDELETE,
+ http.MethodGet: mGET,
+ http.MethodHead: mHEAD,
+ http.MethodOptions: mOPTIONS,
+ http.MethodPatch: mPATCH,
+ http.MethodPost: mPOST,
+ http.MethodPut: mPUT,
+ http.MethodTrace: mTRACE,
+}
+
+// RegisterMethod adds support for custom HTTP method handlers, available
+// via Router#Method and Router#MethodFunc
+func RegisterMethod(method string) {
+ if method == "" {
+ return
+ }
+ method = strings.ToUpper(method)
+ if _, ok := methodMap[method]; ok {
+ return
+ }
+ n := len(methodMap)
+ if n > strconv.IntSize {
+ panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize))
+ }
+ mt := methodTyp(math.Exp2(float64(n)))
+ methodMap[method] = mt
+ mALL |= mt
+}
+
+type nodeTyp uint8
+
+const (
+ ntStatic nodeTyp = iota // /home
+ ntRegexp // /{id:[0-9]+}
+ ntParam // /{user}
+ ntCatchAll // /api/v1/*
+)
+
+type node struct {
+ // node type: static, regexp, param, catchAll
+ typ nodeTyp
+
+ // first byte of the prefix
+ label byte
+
+ // first byte of the child prefix
+ tail byte
+
+ // prefix is the common prefix we ignore
+ prefix string
+
+ // regexp matcher for regexp nodes
+ rex *regexp.Regexp
+
+ // HTTP handler endpoints on the leaf node
+ endpoints endpoints
+
+ // subroutes on the leaf node
+ subroutes Routes
+
+ // child nodes should be stored in-order for iteration,
+ // in groups of the node type.
+ children [ntCatchAll + 1]nodes
+}
+
+// endpoints is a mapping of http method constants to handlers
+// for a given route.
+type endpoints map[methodTyp]*endpoint
+
+type endpoint struct {
+ // endpoint handler
+ handler http.Handler
+
+ // pattern is the routing pattern for handler nodes
+ pattern string
+
+ // parameter keys recorded on handler nodes
+ paramKeys []string
+}
+
+func (s endpoints) Value(method methodTyp) *endpoint {
+ mh, ok := s[method]
+ if !ok {
+ mh = &endpoint{}
+ s[method] = mh
+ }
+ return mh
+}
+
+func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node {
+ var parent *node
+ search := pattern
+
+ for {
+ // Handle key exhaustion
+ if len(search) == 0 {
+ // Insert or update the node's leaf handler
+ n.setEndpoint(method, handler, pattern)
+ return n
+ }
+
+ // We're going to be searching for a wild node next,
+ // in this case, we need to get the tail
+ var label = search[0]
+ var segTail byte
+ var segEndIdx int
+ var segTyp nodeTyp
+ var segRexpat string
+ if label == '{' || label == '*' {
+ segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search)
+ }
+
+ var prefix string
+ if segTyp == ntRegexp {
+ prefix = segRexpat
+ }
+
+ // Look for the edge to attach to
+ parent = n
+ n = n.getEdge(segTyp, label, segTail, prefix)
+
+ // No edge, create one
+ if n == nil {
+ child := &node{label: label, tail: segTail, prefix: search}
+ hn := parent.addChild(child, search)
+ hn.setEndpoint(method, handler, pattern)
+
+ return hn
+ }
+
+ // Found an edge to match the pattern
+
+ if n.typ > ntStatic {
+ // We found a param node, trim the param from the search path and continue.
+ // This param/wild pattern segment would already be on the tree from a previous
+ // call to addChild when creating a new node.
+ search = search[segEndIdx:]
+ continue
+ }
+
+ // Static nodes fall below here.
+ // Determine longest prefix of the search key on match.
+ commonPrefix := longestPrefix(search, n.prefix)
+ if commonPrefix == len(n.prefix) {
+ // the common prefix is as long as the current node's prefix we're attempting to insert.
+ // keep the search going.
+ search = search[commonPrefix:]
+ continue
+ }
+
+ // Split the node
+ child := &node{
+ typ: ntStatic,
+ prefix: search[:commonPrefix],
+ }
+ parent.replaceChild(search[0], segTail, child)
+
+ // Restore the existing node
+ n.label = n.prefix[commonPrefix]
+ n.prefix = n.prefix[commonPrefix:]
+ child.addChild(n, n.prefix)
+
+ // If the new key is a subset, set the method/handler on this node and finish.
+ search = search[commonPrefix:]
+ if len(search) == 0 {
+ child.setEndpoint(method, handler, pattern)
+ return child
+ }
+
+ // Create a new edge for the node
+ subchild := &node{
+ typ: ntStatic,
+ label: search[0],
+ prefix: search,
+ }
+ hn := child.addChild(subchild, search)
+ hn.setEndpoint(method, handler, pattern)
+ return hn
+ }
+}
+
+// addChild appends the new `child` node to the tree using the `pattern` as the trie key.
+// For a URL router like chi's, we split the static, param, regexp and wildcard segments
+// into different nodes. In addition, addChild will recursively call itself until every
+// pattern segment is added to the url pattern tree as individual nodes, depending on type.
+func (n *node) addChild(child *node, prefix string) *node {
+ search := prefix
+
+ // handler leaf node added to the tree is the child.
+ // this may be overridden later down the flow
+ hn := child
+
+ // Parse next segment
+ segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search)
+
+ // Add child depending on next up segment
+ switch segTyp {
+
+ case ntStatic:
+ // Search prefix is all static (that is, has no params in path)
+ // noop
+
+ default:
+ // Search prefix contains a param, regexp or wildcard
+
+ if segTyp == ntRegexp {
+ rex, err := regexp.Compile(segRexpat)
+ if err != nil {
+ panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat))
+ }
+ child.prefix = segRexpat
+ child.rex = rex
+ }
+
+ if segStartIdx == 0 {
+ // Route starts with a param
+ child.typ = segTyp
+
+ if segTyp == ntCatchAll {
+ segStartIdx = -1
+ } else {
+ segStartIdx = segEndIdx
+ }
+ if segStartIdx < 0 {
+ segStartIdx = len(search)
+ }
+ child.tail = segTail // for params, we set the tail
+
+ if segStartIdx != len(search) {
+ // add static edge for the remaining part, split the end.
+ // its not possible to have adjacent param nodes, so its certainly
+ // going to be a static node next.
+
+ search = search[segStartIdx:] // advance search position
+
+ nn := &node{
+ typ: ntStatic,
+ label: search[0],
+ prefix: search,
+ }
+ hn = child.addChild(nn, search)
+ }
+
+ } else if segStartIdx > 0 {
+ // Route has some param
+
+ // starts with a static segment
+ child.typ = ntStatic
+ child.prefix = search[:segStartIdx]
+ child.rex = nil
+
+ // add the param edge node
+ search = search[segStartIdx:]
+
+ nn := &node{
+ typ: segTyp,
+ label: search[0],
+ tail: segTail,
+ }
+ hn = child.addChild(nn, search)
+
+ }
+ }
+
+ n.children[child.typ] = append(n.children[child.typ], child)
+ n.children[child.typ].Sort()
+ return hn
+}
+
+func (n *node) replaceChild(label, tail byte, child *node) {
+ for i := 0; i < len(n.children[child.typ]); i++ {
+ if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail {
+ n.children[child.typ][i] = child
+ n.children[child.typ][i].label = label
+ n.children[child.typ][i].tail = tail
+ return
+ }
+ }
+ panic("chi: replacing missing child")
+}
+
+func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node {
+ nds := n.children[ntyp]
+ for i := 0; i < len(nds); i++ {
+ if nds[i].label == label && nds[i].tail == tail {
+ if ntyp == ntRegexp && nds[i].prefix != prefix {
+ continue
+ }
+ return nds[i]
+ }
+ }
+ return nil
+}
+
+func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) {
+ // Set the handler for the method type on the node
+ if n.endpoints == nil {
+ n.endpoints = make(endpoints, 0)
+ }
+
+ paramKeys := patParamKeys(pattern)
+
+ if method&mSTUB == mSTUB {
+ n.endpoints.Value(mSTUB).handler = handler
+ }
+ if method&mALL == mALL {
+ h := n.endpoints.Value(mALL)
+ h.handler = handler
+ h.pattern = pattern
+ h.paramKeys = paramKeys
+ for _, m := range methodMap {
+ h := n.endpoints.Value(m)
+ h.handler = handler
+ h.pattern = pattern
+ h.paramKeys = paramKeys
+ }
+ } else {
+ h := n.endpoints.Value(method)
+ h.handler = handler
+ h.pattern = pattern
+ h.paramKeys = paramKeys
+ }
+}
+
+func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) {
+ // Reset the context routing pattern and params
+ rctx.routePattern = ""
+ rctx.routeParams.Keys = rctx.routeParams.Keys[:0]
+ rctx.routeParams.Values = rctx.routeParams.Values[:0]
+
+ // Find the routing handlers for the path
+ rn := n.findRoute(rctx, method, path)
+ if rn == nil {
+ return nil, nil, nil
+ }
+
+ // Record the routing params in the request lifecycle
+ rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...)
+ rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...)
+
+ // Record the routing pattern in the request lifecycle
+ if rn.endpoints[method].pattern != "" {
+ rctx.routePattern = rn.endpoints[method].pattern
+ rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern)
+ }
+
+ return rn, rn.endpoints, rn.endpoints[method].handler
+}
+
+// Recursive edge traversal by checking all nodeTyp groups along the way.
+// It's like searching through a multi-dimensional radix trie.
+func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node {
+ nn := n
+ search := path
+
+ for t, nds := range nn.children {
+ ntyp := nodeTyp(t)
+ if len(nds) == 0 {
+ continue
+ }
+
+ var xn *node
+ xsearch := search
+
+ var label byte
+ if search != "" {
+ label = search[0]
+ }
+
+ switch ntyp {
+ case ntStatic:
+ xn = nds.findEdge(label)
+ if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) {
+ continue
+ }
+ xsearch = xsearch[len(xn.prefix):]
+
+ case ntParam, ntRegexp:
+ // short-circuit and return no matching route for empty param values
+ if xsearch == "" {
+ continue
+ }
+
+ // serially loop through each node grouped by the tail delimiter
+ for idx := 0; idx < len(nds); idx++ {
+ xn = nds[idx]
+
+ // label for param nodes is the delimiter byte
+ p := strings.IndexByte(xsearch, xn.tail)
+
+ if p < 0 {
+ if xn.tail == '/' {
+ p = len(xsearch)
+ } else {
+ continue
+ }
+ }
+
+ if ntyp == ntRegexp && xn.rex != nil {
+ if xn.rex.Match([]byte(xsearch[:p])) == false {
+ continue
+ }
+ } else if strings.IndexByte(xsearch[:p], '/') != -1 {
+ // avoid a match across path segments
+ continue
+ }
+
+ rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p])
+ xsearch = xsearch[p:]
+ break
+ }
+
+ default:
+ // catch-all nodes
+ rctx.routeParams.Values = append(rctx.routeParams.Values, search)
+ xn = nds[0]
+ xsearch = ""
+ }
+
+ if xn == nil {
+ continue
+ }
+
+ // did we find it yet?
+ if len(xsearch) == 0 {
+ if xn.isLeaf() {
+ h, _ := xn.endpoints[method]
+ if h != nil && h.handler != nil {
+ rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
+ return xn
+ }
+
+ // flag that the routing context found a route, but not a corresponding
+ // supported method
+ rctx.methodNotAllowed = true
+ }
+ }
+
+ // recursively find the next node..
+ fin := xn.findRoute(rctx, method, xsearch)
+ if fin != nil {
+ return fin
+ }
+
+ // Did not find final handler, let's remove the param here if it was set
+ if xn.typ > ntStatic {
+ if len(rctx.routeParams.Values) > 0 {
+ rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1]
+ }
+ }
+
+ }
+
+ return nil
+}
+
+func (n *node) findEdge(ntyp nodeTyp, label byte) *node {
+ nds := n.children[ntyp]
+ num := len(nds)
+ idx := 0
+
+ switch ntyp {
+ case ntStatic, ntParam, ntRegexp:
+ i, j := 0, num-1
+ for i <= j {
+ idx = i + (j-i)/2
+ if label > nds[idx].label {
+ i = idx + 1
+ } else if label < nds[idx].label {
+ j = idx - 1
+ } else {
+ i = num // breaks cond
+ }
+ }
+ if nds[idx].label != label {
+ return nil
+ }
+ return nds[idx]
+
+ default: // catch all
+ return nds[idx]
+ }
+}
+
+func (n *node) isEmpty() bool {
+ for _, nds := range n.children {
+ if len(nds) > 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func (n *node) isLeaf() bool {
+ return n.endpoints != nil
+}
+
+func (n *node) findPattern(pattern string) bool {
+ nn := n
+ for _, nds := range nn.children {
+ if len(nds) == 0 {
+ continue
+ }
+
+ n = nn.findEdge(nds[0].typ, pattern[0])
+ if n == nil {
+ continue
+ }
+
+ var idx int
+ var xpattern string
+
+ switch n.typ {
+ case ntStatic:
+ idx = longestPrefix(pattern, n.prefix)
+ if idx < len(n.prefix) {
+ continue
+ }
+
+ case ntParam, ntRegexp:
+ idx = strings.IndexByte(pattern, '}') + 1
+
+ case ntCatchAll:
+ idx = longestPrefix(pattern, "*")
+
+ default:
+ panic("chi: unknown node type")
+ }
+
+ xpattern = pattern[idx:]
+ if len(xpattern) == 0 {
+ return true
+ }
+
+ return n.findPattern(xpattern)
+ }
+ return false
+}
+
+func (n *node) routes() []Route {
+ rts := []Route{}
+
+ n.walk(func(eps endpoints, subroutes Routes) bool {
+ if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil {
+ return false
+ }
+
+ // Group methodHandlers by unique patterns
+ pats := make(map[string]endpoints, 0)
+
+ for mt, h := range eps {
+ if h.pattern == "" {
+ continue
+ }
+ p, ok := pats[h.pattern]
+ if !ok {
+ p = endpoints{}
+ pats[h.pattern] = p
+ }
+ p[mt] = h
+ }
+
+ for p, mh := range pats {
+ hs := make(map[string]http.Handler, 0)
+ if mh[mALL] != nil && mh[mALL].handler != nil {
+ hs["*"] = mh[mALL].handler
+ }
+
+ for mt, h := range mh {
+ if h.handler == nil {
+ continue
+ }
+ m := methodTypString(mt)
+ if m == "" {
+ continue
+ }
+ hs[m] = h.handler
+ }
+
+ rt := Route{p, hs, subroutes}
+ rts = append(rts, rt)
+ }
+
+ return false
+ })
+
+ return rts
+}
+
+func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool {
+ // Visit the leaf values if any
+ if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) {
+ return true
+ }
+
+ // Recurse on the children
+ for _, ns := range n.children {
+ for _, cn := range ns {
+ if cn.walk(fn) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// patNextSegment returns the next segment details from a pattern:
+// node type, param key, regexp string, param tail byte, param starting index, param ending index
+func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) {
+ ps := strings.Index(pattern, "{")
+ ws := strings.Index(pattern, "*")
+
+ if ps < 0 && ws < 0 {
+ return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing
+ }
+
+ // Sanity check
+ if ws >= 0 && ws != len(pattern)-1 {
+ panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead")
+ }
+ if ps >= 0 && ws >= 0 && ws < ps {
+ panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'")
+ }
+
+ var tail byte = '/' // Default endpoint tail to / byte
+
+ if ps >= 0 {
+ // Param/Regexp pattern is next
+ nt := ntParam
+
+ // Read to closing } taking into account opens and closes in curl count (cc)
+ cc := 0
+ pe := ps
+ for i, c := range pattern[ps:] {
+ if c == '{' {
+ cc++
+ } else if c == '}' {
+ cc--
+ if cc == 0 {
+ pe = ps + i
+ break
+ }
+ }
+ }
+ if pe == ps {
+ panic("chi: route param closing delimiter '}' is missing")
+ }
+
+ key := pattern[ps+1 : pe]
+ pe++ // set end to next position
+
+ if pe < len(pattern) {
+ tail = pattern[pe]
+ }
+
+ var rexpat string
+ if idx := strings.Index(key, ":"); idx >= 0 {
+ nt = ntRegexp
+ rexpat = key[idx+1:]
+ key = key[:idx]
+ }
+
+ if len(rexpat) > 0 {
+ if rexpat[0] != '^' {
+ rexpat = "^" + rexpat
+ }
+ if rexpat[len(rexpat)-1] != '$' {
+ rexpat = rexpat + "$"
+ }
+ }
+
+ return nt, key, rexpat, tail, ps, pe
+ }
+
+ // Wildcard pattern as finale
+ // TODO: should we panic if there is stuff after the * ???
+ return ntCatchAll, "*", "", 0, ws, len(pattern)
+}
+
+func patParamKeys(pattern string) []string {
+ pat := pattern
+ paramKeys := []string{}
+ for {
+ ptyp, paramKey, _, _, _, e := patNextSegment(pat)
+ if ptyp == ntStatic {
+ return paramKeys
+ }
+ for i := 0; i < len(paramKeys); i++ {
+ if paramKeys[i] == paramKey {
+ panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey))
+ }
+ }
+ paramKeys = append(paramKeys, paramKey)
+ pat = pat[e:]
+ }
+}
+
+// longestPrefix finds the length of the shared prefix
+// of two strings
+func longestPrefix(k1, k2 string) int {
+ max := len(k1)
+ if l := len(k2); l < max {
+ max = l
+ }
+ var i int
+ for i = 0; i < max; i++ {
+ if k1[i] != k2[i] {
+ break
+ }
+ }
+ return i
+}
+
+func methodTypString(method methodTyp) string {
+ for s, t := range methodMap {
+ if method == t {
+ return s
+ }
+ }
+ return ""
+}
+
+type nodes []*node
+
+// Sort the list of nodes by label
+func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() }
+func (ns nodes) Len() int { return len(ns) }
+func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
+func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label }
+
+// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes.
+// The list order determines the traversal order.
+func (ns nodes) tailSort() {
+ for i := len(ns) - 1; i >= 0; i-- {
+ if ns[i].typ > ntStatic && ns[i].tail == '/' {
+ ns.Swap(i, len(ns)-1)
+ return
+ }
+ }
+}
+
+func (ns nodes) findEdge(label byte) *node {
+ num := len(ns)
+ idx := 0
+ i, j := 0, num-1
+ for i <= j {
+ idx = i + (j-i)/2
+ if label > ns[idx].label {
+ i = idx + 1
+ } else if label < ns[idx].label {
+ j = idx - 1
+ } else {
+ i = num // breaks cond
+ }
+ }
+ if ns[idx].label != label {
+ return nil
+ }
+ return ns[idx]
+}
+
+// Route describes the details of a routing handler.
+type Route struct {
+ Pattern string
+ Handlers map[string]http.Handler
+ SubRoutes Routes
+}
+
+// WalkFunc is the type of the function called for each method and route visited by Walk.
+type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error
+
+// Walk walks any router tree that implements Routes interface.
+func Walk(r Routes, walkFn WalkFunc) error {
+ return walk(r, walkFn, "")
+}
+
+func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error {
+ for _, route := range r.Routes() {
+ mws := make([]func(http.Handler) http.Handler, len(parentMw))
+ copy(mws, parentMw)
+ mws = append(mws, r.Middlewares()...)
+
+ if route.SubRoutes != nil {
+ if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil {
+ return err
+ }
+ continue
+ }
+
+ for method, handler := range route.Handlers {
+ if method == "*" {
+ // Ignore a "catchAll" method, since we pass down all the specific methods for each route.
+ continue
+ }
+
+ fullRoute := parentRoute + route.Pattern
+
+ if chain, ok := handler.(*ChainHandler); ok {
+ if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil {
+ return err
+ }
+ } else {
+ if err := walkFn(method, fullRoute, handler, mws...); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore
new file mode 100644
index 000000000..ac710204f
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.idea/
+*.iml
\ No newline at end of file
diff --git a/vendor/github.com/gorilla/websocket/.travis.yml b/vendor/github.com/gorilla/websocket/.travis.yml
new file mode 100644
index 000000000..3d8d29cf3
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/.travis.yml
@@ -0,0 +1,19 @@
+language: go
+sudo: false
+
+matrix:
+ include:
+ - go: 1.4
+ - go: 1.5
+ - go: 1.6
+ - go: 1.7
+ - go: 1.8
+ - go: tip
+ allow_failures:
+ - go: tip
+
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d .)
+ - go vet $(go list ./... | grep -v /vendor/)
+ - go test -v -race ./...
diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS
new file mode 100644
index 000000000..b003eca0c
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/AUTHORS
@@ -0,0 +1,8 @@
+# This is the official list of Gorilla WebSocket authors for copyright
+# purposes.
+#
+# Please keep the list sorted.
+
+Gary Burd
+Joachim Bauch
+
diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE
new file mode 100644
index 000000000..9171c9722
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md
new file mode 100644
index 000000000..33c3d2be3
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/README.md
@@ -0,0 +1,64 @@
+# Gorilla WebSocket
+
+Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
+[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
+
+[](https://travis-ci.org/gorilla/websocket)
+[](https://godoc.org/github.com/gorilla/websocket)
+
+### Documentation
+
+* [API Reference](http://godoc.org/github.com/gorilla/websocket)
+* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
+* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
+* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
+* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
+
+### Status
+
+The Gorilla WebSocket package provides a complete and tested implementation of
+the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
+package API is stable.
+
+### Installation
+
+ go get github.com/gorilla/websocket
+
+### Protocol Compliance
+
+The Gorilla WebSocket package passes the server tests in the [Autobahn Test
+Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn
+subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
+
+### Gorilla WebSocket compared with other packages
+
+
+
+Notes:
+
+1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
+2. The application can get the type of a received data message by implementing
+ a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
+ function.
+3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
+ Read returns when the input buffer is full or a frame boundary is
+ encountered. Each call to Write sends a single frame message. The Gorilla
+ io.Reader and io.WriteCloser operate on a single WebSocket message.
+
diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go
new file mode 100644
index 000000000..43a87c753
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client.go
@@ -0,0 +1,392 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "encoding/base64"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// ErrBadHandshake is returned when the server response to opening handshake is
+// invalid.
+var ErrBadHandshake = errors.New("websocket: bad handshake")
+
+var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
+
+// NewClient creates a new client connection using the given net connection.
+// The URL u specifies the host and request URI. Use requestHeader to specify
+// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
+// (Cookie). Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etc.
+//
+// Deprecated: Use Dialer instead.
+func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
+ d := Dialer{
+ ReadBufferSize: readBufSize,
+ WriteBufferSize: writeBufSize,
+ NetDial: func(net, addr string) (net.Conn, error) {
+ return netConn, nil
+ },
+ }
+ return d.Dial(u.String(), requestHeader)
+}
+
+// A Dialer contains options for connecting to WebSocket server.
+type Dialer struct {
+ // NetDial specifies the dial function for creating TCP connections. If
+ // NetDial is nil, net.Dial is used.
+ NetDial func(network, addr string) (net.Conn, error)
+
+ // Proxy specifies a function to return a proxy for a given
+ // Request. If the function returns a non-nil error, the
+ // request is aborted with the provided error.
+ // If Proxy is nil or returns a nil *URL, no proxy is used.
+ Proxy func(*http.Request) (*url.URL, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with tls.Client.
+ // If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
+ // size is zero, then a useful default size is used. The I/O buffer sizes
+ // do not limit the size of the messages that can be sent or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // Subprotocols specifies the client's requested subprotocols.
+ Subprotocols []string
+
+ // EnableCompression specifies if the client should attempt to negotiate
+ // per message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+
+ // Jar specifies the cookie jar.
+ // If Jar is nil, cookies are not sent in requests and ignored
+ // in responses.
+ Jar http.CookieJar
+}
+
+var errMalformedURL = errors.New("malformed ws or wss URL")
+
+// parseURL parses the URL.
+//
+// This function is a replacement for the standard library url.Parse function.
+// In Go 1.4 and earlier, url.Parse loses information from the path.
+func parseURL(s string) (*url.URL, error) {
+ // From the RFC:
+ //
+ // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ]
+ // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ]
+ var u url.URL
+ switch {
+ case strings.HasPrefix(s, "ws://"):
+ u.Scheme = "ws"
+ s = s[len("ws://"):]
+ case strings.HasPrefix(s, "wss://"):
+ u.Scheme = "wss"
+ s = s[len("wss://"):]
+ default:
+ return nil, errMalformedURL
+ }
+
+ if i := strings.Index(s, "?"); i >= 0 {
+ u.RawQuery = s[i+1:]
+ s = s[:i]
+ }
+
+ if i := strings.Index(s, "/"); i >= 0 {
+ u.Opaque = s[i:]
+ s = s[:i]
+ } else {
+ u.Opaque = "/"
+ }
+
+ u.Host = s
+
+ if strings.Contains(u.Host, "@") {
+ // Don't bother parsing user information because user information is
+ // not allowed in websocket URIs.
+ return nil, errMalformedURL
+ }
+
+ return &u, nil
+}
+
+func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
+ hostPort = u.Host
+ hostNoPort = u.Host
+ if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
+ hostNoPort = hostNoPort[:i]
+ } else {
+ switch u.Scheme {
+ case "wss":
+ hostPort += ":443"
+ case "https":
+ hostPort += ":443"
+ default:
+ hostPort += ":80"
+ }
+ }
+ return hostPort, hostNoPort
+}
+
+// DefaultDialer is a dialer with all fields set to the default zero values.
+var DefaultDialer = &Dialer{
+ Proxy: http.ProxyFromEnvironment,
+}
+
+// Dial creates a new client connection. Use requestHeader to specify the
+// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
+// Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etcetera. The response body may not contain the entire response and does not
+// need to be closed by the application.
+func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+
+ if d == nil {
+ d = &Dialer{
+ Proxy: http.ProxyFromEnvironment,
+ }
+ }
+
+ challengeKey, err := generateChallengeKey()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ u, err := parseURL(urlStr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ switch u.Scheme {
+ case "ws":
+ u.Scheme = "http"
+ case "wss":
+ u.Scheme = "https"
+ default:
+ return nil, nil, errMalformedURL
+ }
+
+ if u.User != nil {
+ // User name and password are not allowed in websocket URIs.
+ return nil, nil, errMalformedURL
+ }
+
+ req := &http.Request{
+ Method: "GET",
+ URL: u,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(http.Header),
+ Host: u.Host,
+ }
+
+ // Set the cookies present in the cookie jar of the dialer
+ if d.Jar != nil {
+ for _, cookie := range d.Jar.Cookies(u) {
+ req.AddCookie(cookie)
+ }
+ }
+
+ // Set the request headers using the capitalization for names and values in
+ // RFC examples. Although the capitalization shouldn't matter, there are
+ // servers that depend on it. The Header.Set method is not used because the
+ // method canonicalizes the header names.
+ req.Header["Upgrade"] = []string{"websocket"}
+ req.Header["Connection"] = []string{"Upgrade"}
+ req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
+ req.Header["Sec-WebSocket-Version"] = []string{"13"}
+ if len(d.Subprotocols) > 0 {
+ req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
+ }
+ for k, vs := range requestHeader {
+ switch {
+ case k == "Host":
+ if len(vs) > 0 {
+ req.Host = vs[0]
+ }
+ case k == "Upgrade" ||
+ k == "Connection" ||
+ k == "Sec-Websocket-Key" ||
+ k == "Sec-Websocket-Version" ||
+ k == "Sec-Websocket-Extensions" ||
+ (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
+ return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
+ default:
+ req.Header[k] = vs
+ }
+ }
+
+ if d.EnableCompression {
+ req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover")
+ }
+
+ hostPort, hostNoPort := hostPortNoPort(u)
+
+ var proxyURL *url.URL
+ // Check wether the proxy method has been configured
+ if d.Proxy != nil {
+ proxyURL, err = d.Proxy(req)
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var targetHostPort string
+ if proxyURL != nil {
+ targetHostPort, _ = hostPortNoPort(proxyURL)
+ } else {
+ targetHostPort = hostPort
+ }
+
+ var deadline time.Time
+ if d.HandshakeTimeout != 0 {
+ deadline = time.Now().Add(d.HandshakeTimeout)
+ }
+
+ netDial := d.NetDial
+ if netDial == nil {
+ netDialer := &net.Dialer{Deadline: deadline}
+ netDial = netDialer.Dial
+ }
+
+ netConn, err := netDial("tcp", targetHostPort)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ defer func() {
+ if netConn != nil {
+ netConn.Close()
+ }
+ }()
+
+ if err := netConn.SetDeadline(deadline); err != nil {
+ return nil, nil, err
+ }
+
+ if proxyURL != nil {
+ connectHeader := make(http.Header)
+ if user := proxyURL.User; user != nil {
+ proxyUser := user.Username()
+ if proxyPassword, passwordSet := user.Password(); passwordSet {
+ credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
+ connectHeader.Set("Proxy-Authorization", "Basic "+credential)
+ }
+ }
+ connectReq := &http.Request{
+ Method: "CONNECT",
+ URL: &url.URL{Opaque: hostPort},
+ Host: hostPort,
+ Header: connectHeader,
+ }
+
+ connectReq.Write(netConn)
+
+ // Read response.
+ // Okay to use and discard buffered reader here, because
+ // TLS server will not speak until spoken to.
+ br := bufio.NewReader(netConn)
+ resp, err := http.ReadResponse(br, connectReq)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp.StatusCode != 200 {
+ f := strings.SplitN(resp.Status, " ", 2)
+ return nil, nil, errors.New(f[1])
+ }
+ }
+
+ if u.Scheme == "https" {
+ cfg := cloneTLSConfig(d.TLSClientConfig)
+ if cfg.ServerName == "" {
+ cfg.ServerName = hostNoPort
+ }
+ tlsConn := tls.Client(netConn, cfg)
+ netConn = tlsConn
+ if err := tlsConn.Handshake(); err != nil {
+ return nil, nil, err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+
+ conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize)
+
+ if err := req.Write(netConn); err != nil {
+ return nil, nil, err
+ }
+
+ resp, err := http.ReadResponse(conn.br, req)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if d.Jar != nil {
+ if rc := resp.Cookies(); len(rc) > 0 {
+ d.Jar.SetCookies(u, rc)
+ }
+ }
+
+ if resp.StatusCode != 101 ||
+ !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
+ !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
+ resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
+ // Before closing the network connection on return from this
+ // function, slurp up some of the response to aid application
+ // debugging.
+ buf := make([]byte, 1024)
+ n, _ := io.ReadFull(resp.Body, buf)
+ resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
+ return nil, resp, ErrBadHandshake
+ }
+
+ for _, ext := range parseExtensions(resp.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ _, snct := ext["server_no_context_takeover"]
+ _, cnct := ext["client_no_context_takeover"]
+ if !snct || !cnct {
+ return nil, resp, errInvalidCompression
+ }
+ conn.newCompressionWriter = compressNoContextTakeover
+ conn.newDecompressionReader = decompressNoContextTakeover
+ break
+ }
+
+ resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
+ conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
+
+ netConn.SetDeadline(time.Time{})
+ netConn = nil // to avoid close in defer.
+ return conn, resp, nil
+}
diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go
new file mode 100644
index 000000000..4f0d94372
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client_clone.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package websocket
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return cfg.Clone()
+}
diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go
new file mode 100644
index 000000000..babb007fb
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go
@@ -0,0 +1,38 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package websocket
+
+import "crypto/tls"
+
+// cloneTLSConfig clones all public fields except the fields
+// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
+// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
+// config in active use.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return &tls.Config{
+ Rand: cfg.Rand,
+ Time: cfg.Time,
+ Certificates: cfg.Certificates,
+ NameToCertificate: cfg.NameToCertificate,
+ GetCertificate: cfg.GetCertificate,
+ RootCAs: cfg.RootCAs,
+ NextProtos: cfg.NextProtos,
+ ServerName: cfg.ServerName,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ CipherSuites: cfg.CipherSuites,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ ClientSessionCache: cfg.ClientSessionCache,
+ MinVersion: cfg.MinVersion,
+ MaxVersion: cfg.MaxVersion,
+ CurvePreferences: cfg.CurvePreferences,
+ }
+}
diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go
new file mode 100644
index 000000000..813ffb1e8
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/compression.go
@@ -0,0 +1,148 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "compress/flate"
+ "errors"
+ "io"
+ "strings"
+ "sync"
+)
+
+const (
+ minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
+ maxCompressionLevel = flate.BestCompression
+ defaultCompressionLevel = 1
+)
+
+var (
+ flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
+ flateReaderPool = sync.Pool{New: func() interface{} {
+ return flate.NewReader(nil)
+ }}
+)
+
+func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
+ const tail =
+ // Add four bytes as specified in RFC
+ "\x00\x00\xff\xff" +
+ // Add final block to squelch unexpected EOF error from flate reader.
+ "\x01\x00\x00\xff\xff"
+
+ fr, _ := flateReaderPool.Get().(io.ReadCloser)
+ fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
+ return &flateReadWrapper{fr}
+}
+
+func isValidCompressionLevel(level int) bool {
+ return minCompressionLevel <= level && level <= maxCompressionLevel
+}
+
+func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
+ p := &flateWriterPools[level-minCompressionLevel]
+ tw := &truncWriter{w: w}
+ fw, _ := p.Get().(*flate.Writer)
+ if fw == nil {
+ fw, _ = flate.NewWriter(tw, level)
+ } else {
+ fw.Reset(tw)
+ }
+ return &flateWriteWrapper{fw: fw, tw: tw, p: p}
+}
+
+// truncWriter is an io.Writer that writes all but the last four bytes of the
+// stream to another io.Writer.
+type truncWriter struct {
+ w io.WriteCloser
+ n int
+ p [4]byte
+}
+
+func (w *truncWriter) Write(p []byte) (int, error) {
+ n := 0
+
+ // fill buffer first for simplicity.
+ if w.n < len(w.p) {
+ n = copy(w.p[w.n:], p)
+ p = p[n:]
+ w.n += n
+ if len(p) == 0 {
+ return n, nil
+ }
+ }
+
+ m := len(p)
+ if m > len(w.p) {
+ m = len(w.p)
+ }
+
+ if nn, err := w.w.Write(w.p[:m]); err != nil {
+ return n + nn, err
+ }
+
+ copy(w.p[:], w.p[m:])
+ copy(w.p[len(w.p)-m:], p[len(p)-m:])
+ nn, err := w.w.Write(p[:len(p)-m])
+ return n + nn, err
+}
+
+type flateWriteWrapper struct {
+ fw *flate.Writer
+ tw *truncWriter
+ p *sync.Pool
+}
+
+func (w *flateWriteWrapper) Write(p []byte) (int, error) {
+ if w.fw == nil {
+ return 0, errWriteClosed
+ }
+ return w.fw.Write(p)
+}
+
+func (w *flateWriteWrapper) Close() error {
+ if w.fw == nil {
+ return errWriteClosed
+ }
+ err1 := w.fw.Flush()
+ w.p.Put(w.fw)
+ w.fw = nil
+ if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
+ return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
+ }
+ err2 := w.tw.w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+type flateReadWrapper struct {
+ fr io.ReadCloser
+}
+
+func (r *flateReadWrapper) Read(p []byte) (int, error) {
+ if r.fr == nil {
+ return 0, io.ErrClosedPipe
+ }
+ n, err := r.fr.Read(p)
+ if err == io.EOF {
+ // Preemptively place the reader back in the pool. This helps with
+ // scenarios where the application does not call NextReader() soon after
+ // this final read.
+ r.Close()
+ }
+ return n, err
+}
+
+func (r *flateReadWrapper) Close() error {
+ if r.fr == nil {
+ return io.ErrClosedPipe
+ }
+ err := r.fr.Close()
+ flateReaderPool.Put(r.fr)
+ r.fr = nil
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go
new file mode 100644
index 000000000..97e1dbacb
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn.go
@@ -0,0 +1,1149 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "encoding/binary"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+ "unicode/utf8"
+)
+
+const (
+ // Frame header byte 0 bits from Section 5.2 of RFC 6455
+ finalBit = 1 << 7
+ rsv1Bit = 1 << 6
+ rsv2Bit = 1 << 5
+ rsv3Bit = 1 << 4
+
+ // Frame header byte 1 bits from Section 5.2 of RFC 6455
+ maskBit = 1 << 7
+
+ maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask
+ maxControlFramePayloadSize = 125
+
+ writeWait = time.Second
+
+ defaultReadBufferSize = 4096
+ defaultWriteBufferSize = 4096
+
+ continuationFrame = 0
+ noFrame = -1
+)
+
+// Close codes defined in RFC 6455, section 11.7.
+const (
+ CloseNormalClosure = 1000
+ CloseGoingAway = 1001
+ CloseProtocolError = 1002
+ CloseUnsupportedData = 1003
+ CloseNoStatusReceived = 1005
+ CloseAbnormalClosure = 1006
+ CloseInvalidFramePayloadData = 1007
+ ClosePolicyViolation = 1008
+ CloseMessageTooBig = 1009
+ CloseMandatoryExtension = 1010
+ CloseInternalServerErr = 1011
+ CloseServiceRestart = 1012
+ CloseTryAgainLater = 1013
+ CloseTLSHandshake = 1015
+)
+
+// The message types are defined in RFC 6455, section 11.8.
+const (
+ // TextMessage denotes a text data message. The text message payload is
+ // interpreted as UTF-8 encoded text data.
+ TextMessage = 1
+
+ // BinaryMessage denotes a binary data message.
+ BinaryMessage = 2
+
+ // CloseMessage denotes a close control message. The optional message
+ // payload contains a numeric code and text. Use the FormatCloseMessage
+ // function to format a close message payload.
+ CloseMessage = 8
+
+ // PingMessage denotes a ping control message. The optional message payload
+ // is UTF-8 encoded text.
+ PingMessage = 9
+
+ // PongMessage denotes a ping control message. The optional message payload
+ // is UTF-8 encoded text.
+ PongMessage = 10
+)
+
+// ErrCloseSent is returned when the application writes a message to the
+// connection after sending a close message.
+var ErrCloseSent = errors.New("websocket: close sent")
+
+// ErrReadLimit is returned when reading a message that is larger than the
+// read limit set for the connection.
+var ErrReadLimit = errors.New("websocket: read limit exceeded")
+
+// netError satisfies the net Error interface.
+type netError struct {
+ msg string
+ temporary bool
+ timeout bool
+}
+
+func (e *netError) Error() string { return e.msg }
+func (e *netError) Temporary() bool { return e.temporary }
+func (e *netError) Timeout() bool { return e.timeout }
+
+// CloseError represents close frame.
+type CloseError struct {
+
+ // Code is defined in RFC 6455, section 11.7.
+ Code int
+
+ // Text is the optional text payload.
+ Text string
+}
+
+func (e *CloseError) Error() string {
+ s := []byte("websocket: close ")
+ s = strconv.AppendInt(s, int64(e.Code), 10)
+ switch e.Code {
+ case CloseNormalClosure:
+ s = append(s, " (normal)"...)
+ case CloseGoingAway:
+ s = append(s, " (going away)"...)
+ case CloseProtocolError:
+ s = append(s, " (protocol error)"...)
+ case CloseUnsupportedData:
+ s = append(s, " (unsupported data)"...)
+ case CloseNoStatusReceived:
+ s = append(s, " (no status)"...)
+ case CloseAbnormalClosure:
+ s = append(s, " (abnormal closure)"...)
+ case CloseInvalidFramePayloadData:
+ s = append(s, " (invalid payload data)"...)
+ case ClosePolicyViolation:
+ s = append(s, " (policy violation)"...)
+ case CloseMessageTooBig:
+ s = append(s, " (message too big)"...)
+ case CloseMandatoryExtension:
+ s = append(s, " (mandatory extension missing)"...)
+ case CloseInternalServerErr:
+ s = append(s, " (internal server error)"...)
+ case CloseTLSHandshake:
+ s = append(s, " (TLS handshake error)"...)
+ }
+ if e.Text != "" {
+ s = append(s, ": "...)
+ s = append(s, e.Text...)
+ }
+ return string(s)
+}
+
+// IsCloseError returns boolean indicating whether the error is a *CloseError
+// with one of the specified codes.
+func IsCloseError(err error, codes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range codes {
+ if e.Code == code {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// IsUnexpectedCloseError returns boolean indicating whether the error is a
+// *CloseError with a code not in the list of expected codes.
+func IsUnexpectedCloseError(err error, expectedCodes ...int) bool {
+ if e, ok := err.(*CloseError); ok {
+ for _, code := range expectedCodes {
+ if e.Code == code {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+var (
+ errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true}
+ errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()}
+ errBadWriteOpCode = errors.New("websocket: bad write message type")
+ errWriteClosed = errors.New("websocket: write closed")
+ errInvalidControlFrame = errors.New("websocket: invalid control frame")
+)
+
+func newMaskKey() [4]byte {
+ n := rand.Uint32()
+ return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
+}
+
+func hideTempErr(err error) error {
+ if e, ok := err.(net.Error); ok && e.Temporary() {
+ err = &netError{msg: e.Error(), timeout: e.Timeout()}
+ }
+ return err
+}
+
+func isControl(frameType int) bool {
+ return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
+}
+
+func isData(frameType int) bool {
+ return frameType == TextMessage || frameType == BinaryMessage
+}
+
+var validReceivedCloseCodes = map[int]bool{
+ // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+
+ CloseNormalClosure: true,
+ CloseGoingAway: true,
+ CloseProtocolError: true,
+ CloseUnsupportedData: true,
+ CloseNoStatusReceived: false,
+ CloseAbnormalClosure: false,
+ CloseInvalidFramePayloadData: true,
+ ClosePolicyViolation: true,
+ CloseMessageTooBig: true,
+ CloseMandatoryExtension: true,
+ CloseInternalServerErr: true,
+ CloseServiceRestart: true,
+ CloseTryAgainLater: true,
+ CloseTLSHandshake: false,
+}
+
+func isValidReceivedCloseCode(code int) bool {
+ return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
+}
+
+// The Conn type represents a WebSocket connection.
+type Conn struct {
+ conn net.Conn
+ isServer bool
+ subprotocol string
+
+ // Write fields
+ mu chan bool // used as mutex to protect write to conn
+ writeBuf []byte // frame is constructed in this buffer.
+ writeDeadline time.Time
+ writer io.WriteCloser // the current writer returned to the application
+ isWriting bool // for best-effort concurrent write detection
+
+ writeErrMu sync.Mutex
+ writeErr error
+
+ enableWriteCompression bool
+ compressionLevel int
+ newCompressionWriter func(io.WriteCloser, int) io.WriteCloser
+
+ // Read fields
+ reader io.ReadCloser // the current reader returned to the application
+ readErr error
+ br *bufio.Reader
+ readRemaining int64 // bytes remaining in current frame.
+ readFinal bool // true the current message has more frames.
+ readLength int64 // Message size.
+ readLimit int64 // Maximum message size.
+ readMaskPos int
+ readMaskKey [4]byte
+ handlePong func(string) error
+ handlePing func(string) error
+ handleClose func(int, string) error
+ readErrCount int
+ messageReader *messageReader // the current low-level reader
+
+ readDecompress bool // whether last read frame had RSV1 set
+ newDecompressionReader func(io.Reader) io.ReadCloser
+}
+
+func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn {
+ return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil)
+}
+
+type writeHook struct {
+ p []byte
+}
+
+func (wh *writeHook) Write(p []byte) (int, error) {
+ wh.p = p
+ return len(p), nil
+}
+
+func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn {
+ mu := make(chan bool, 1)
+ mu <- true
+
+ var br *bufio.Reader
+ if readBufferSize == 0 && brw != nil && brw.Reader != nil {
+ // Reuse the supplied bufio.Reader if the buffer has a useful size.
+ // This code assumes that peek on a reader returns
+ // bufio.Reader.buf[:0].
+ brw.Reader.Reset(conn)
+ if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 {
+ br = brw.Reader
+ }
+ }
+ if br == nil {
+ if readBufferSize == 0 {
+ readBufferSize = defaultReadBufferSize
+ }
+ if readBufferSize < maxControlFramePayloadSize {
+ readBufferSize = maxControlFramePayloadSize
+ }
+ br = bufio.NewReaderSize(conn, readBufferSize)
+ }
+
+ var writeBuf []byte
+ if writeBufferSize == 0 && brw != nil && brw.Writer != nil {
+ // Use the bufio.Writer's buffer if the buffer has a useful size. This
+ // code assumes that bufio.Writer.buf[:1] is passed to the
+ // bufio.Writer's underlying writer.
+ var wh writeHook
+ brw.Writer.Reset(&wh)
+ brw.Writer.WriteByte(0)
+ brw.Flush()
+ if cap(wh.p) >= maxFrameHeaderSize+256 {
+ writeBuf = wh.p[:cap(wh.p)]
+ }
+ }
+
+ if writeBuf == nil {
+ if writeBufferSize == 0 {
+ writeBufferSize = defaultWriteBufferSize
+ }
+ writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize)
+ }
+
+ c := &Conn{
+ isServer: isServer,
+ br: br,
+ conn: conn,
+ mu: mu,
+ readFinal: true,
+ writeBuf: writeBuf,
+ enableWriteCompression: true,
+ compressionLevel: defaultCompressionLevel,
+ }
+ c.SetCloseHandler(nil)
+ c.SetPingHandler(nil)
+ c.SetPongHandler(nil)
+ return c
+}
+
+// Subprotocol returns the negotiated protocol for the connection.
+func (c *Conn) Subprotocol() string {
+ return c.subprotocol
+}
+
+// Close closes the underlying network connection without sending or waiting for a close frame.
+func (c *Conn) Close() error {
+ return c.conn.Close()
+}
+
+// LocalAddr returns the local network address.
+func (c *Conn) LocalAddr() net.Addr {
+ return c.conn.LocalAddr()
+}
+
+// RemoteAddr returns the remote network address.
+func (c *Conn) RemoteAddr() net.Addr {
+ return c.conn.RemoteAddr()
+}
+
+// Write methods
+
+func (c *Conn) writeFatal(err error) error {
+ err = hideTempErr(err)
+ c.writeErrMu.Lock()
+ if c.writeErr == nil {
+ c.writeErr = err
+ }
+ c.writeErrMu.Unlock()
+ return err
+}
+
+func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error {
+ <-c.mu
+ defer func() { c.mu <- true }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ for _, buf := range bufs {
+ if len(buf) > 0 {
+ _, err := c.conn.Write(buf)
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ }
+ }
+
+ if frameType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return nil
+}
+
+// WriteControl writes a control message with the given deadline. The allowed
+// message types are CloseMessage, PingMessage and PongMessage.
+func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
+ if !isControl(messageType) {
+ return errBadWriteOpCode
+ }
+ if len(data) > maxControlFramePayloadSize {
+ return errInvalidControlFrame
+ }
+
+ b0 := byte(messageType) | finalBit
+ b1 := byte(len(data))
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
+ buf = append(buf, b0, b1)
+
+ if c.isServer {
+ buf = append(buf, data...)
+ } else {
+ key := newMaskKey()
+ buf = append(buf, key[:]...)
+ buf = append(buf, data...)
+ maskBytes(key, 0, buf[6:])
+ }
+
+ d := time.Hour * 1000
+ if !deadline.IsZero() {
+ d = deadline.Sub(time.Now())
+ if d < 0 {
+ return errWriteTimeout
+ }
+ }
+
+ timer := time.NewTimer(d)
+ select {
+ case <-c.mu:
+ timer.Stop()
+ case <-timer.C:
+ return errWriteTimeout
+ }
+ defer func() { c.mu <- true }()
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ c.conn.SetWriteDeadline(deadline)
+ _, err = c.conn.Write(buf)
+ if err != nil {
+ return c.writeFatal(err)
+ }
+ if messageType == CloseMessage {
+ c.writeFatal(ErrCloseSent)
+ }
+ return err
+}
+
+func (c *Conn) prepWrite(messageType int) error {
+ // Close previous writer if not already closed by the application. It's
+ // probably better to return an error in this situation, but we cannot
+ // change this without breaking existing applications.
+ if c.writer != nil {
+ c.writer.Close()
+ c.writer = nil
+ }
+
+ if !isControl(messageType) && !isData(messageType) {
+ return errBadWriteOpCode
+ }
+
+ c.writeErrMu.Lock()
+ err := c.writeErr
+ c.writeErrMu.Unlock()
+ return err
+}
+
+// NextWriter returns a writer for the next message to send. The writer's Close
+// method flushes the complete message to the network.
+//
+// There can be at most one open writer on a connection. NextWriter closes the
+// previous writer if the application has not already done so.
+func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
+ if err := c.prepWrite(messageType); err != nil {
+ return nil, err
+ }
+
+ mw := &messageWriter{
+ c: c,
+ frameType: messageType,
+ pos: maxFrameHeaderSize,
+ }
+ c.writer = mw
+ if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) {
+ w := c.newCompressionWriter(c.writer, c.compressionLevel)
+ mw.compress = true
+ c.writer = w
+ }
+ return c.writer, nil
+}
+
+type messageWriter struct {
+ c *Conn
+ compress bool // whether next call to flushFrame should set RSV1
+ pos int // end of data in writeBuf.
+ frameType int // type of the current frame.
+ err error
+}
+
+func (w *messageWriter) fatal(err error) error {
+ if w.err != nil {
+ w.err = err
+ w.c.writer = nil
+ }
+ return err
+}
+
+// flushFrame writes buffered data and extra as a frame to the network. The
+// final argument indicates that this is the last frame in the message.
+func (w *messageWriter) flushFrame(final bool, extra []byte) error {
+ c := w.c
+ length := w.pos - maxFrameHeaderSize + len(extra)
+
+ // Check for invalid control frames.
+ if isControl(w.frameType) &&
+ (!final || length > maxControlFramePayloadSize) {
+ return w.fatal(errInvalidControlFrame)
+ }
+
+ b0 := byte(w.frameType)
+ if final {
+ b0 |= finalBit
+ }
+ if w.compress {
+ b0 |= rsv1Bit
+ }
+ w.compress = false
+
+ b1 := byte(0)
+ if !c.isServer {
+ b1 |= maskBit
+ }
+
+ // Assume that the frame starts at beginning of c.writeBuf.
+ framePos := 0
+ if c.isServer {
+ // Adjust up if mask not included in the header.
+ framePos = 4
+ }
+
+ switch {
+ case length >= 65536:
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 127
+ binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
+ case length > 125:
+ framePos += 6
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | 126
+ binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
+ default:
+ framePos += 8
+ c.writeBuf[framePos] = b0
+ c.writeBuf[framePos+1] = b1 | byte(length)
+ }
+
+ if !c.isServer {
+ key := newMaskKey()
+ copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
+ maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos])
+ if len(extra) > 0 {
+ return c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))
+ }
+ }
+
+ // Write the buffers to the connection with best-effort detection of
+ // concurrent writes. See the concurrency section in the package
+ // documentation for more info.
+
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+
+ err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra)
+
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+
+ if err != nil {
+ return w.fatal(err)
+ }
+
+ if final {
+ c.writer = nil
+ return nil
+ }
+
+ // Setup for next frame.
+ w.pos = maxFrameHeaderSize
+ w.frameType = continuationFrame
+ return nil
+}
+
+func (w *messageWriter) ncopy(max int) (int, error) {
+ n := len(w.c.writeBuf) - w.pos
+ if n <= 0 {
+ if err := w.flushFrame(false, nil); err != nil {
+ return 0, err
+ }
+ n = len(w.c.writeBuf) - w.pos
+ }
+ if n > max {
+ n = max
+ }
+ return n, nil
+}
+
+func (w *messageWriter) Write(p []byte) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
+ // Don't buffer large messages.
+ err := w.flushFrame(false, p)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) WriteString(p string) (int, error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+
+ nn := len(p)
+ for len(p) > 0 {
+ n, err := w.ncopy(len(p))
+ if err != nil {
+ return 0, err
+ }
+ copy(w.c.writeBuf[w.pos:], p[:n])
+ w.pos += n
+ p = p[n:]
+ }
+ return nn, nil
+}
+
+func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for {
+ if w.pos == len(w.c.writeBuf) {
+ err = w.flushFrame(false, nil)
+ if err != nil {
+ break
+ }
+ }
+ var n int
+ n, err = r.Read(w.c.writeBuf[w.pos:])
+ w.pos += n
+ nn += int64(n)
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ break
+ }
+ }
+ return nn, err
+}
+
+func (w *messageWriter) Close() error {
+ if w.err != nil {
+ return w.err
+ }
+ if err := w.flushFrame(true, nil); err != nil {
+ return err
+ }
+ w.err = errWriteClosed
+ return nil
+}
+
+// WritePreparedMessage writes prepared message into connection.
+func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error {
+ frameType, frameData, err := pm.frame(prepareKey{
+ isServer: c.isServer,
+ compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType),
+ compressionLevel: c.compressionLevel,
+ })
+ if err != nil {
+ return err
+ }
+ if c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = true
+ err = c.write(frameType, c.writeDeadline, frameData, nil)
+ if !c.isWriting {
+ panic("concurrent write to websocket connection")
+ }
+ c.isWriting = false
+ return err
+}
+
+// WriteMessage is a helper method for getting a writer using NextWriter,
+// writing the message and closing the writer.
+func (c *Conn) WriteMessage(messageType int, data []byte) error {
+
+ if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {
+ // Fast path with no allocations and single frame.
+
+ if err := c.prepWrite(messageType); err != nil {
+ return err
+ }
+ mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize}
+ n := copy(c.writeBuf[mw.pos:], data)
+ mw.pos += n
+ data = data[n:]
+ return mw.flushFrame(true, data)
+ }
+
+ w, err := c.NextWriter(messageType)
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(data); err != nil {
+ return err
+ }
+ return w.Close()
+}
+
+// SetWriteDeadline sets the write deadline on the underlying network
+// connection. After a write has timed out, the websocket state is corrupt and
+// all future writes will return an error. A zero value for t means writes will
+// not time out.
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ c.writeDeadline = t
+ return nil
+}
+
+// Read methods
+
+func (c *Conn) advanceFrame() (int, error) {
+
+ // 1. Skip remainder of previous frame.
+
+ if c.readRemaining > 0 {
+ if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
+ return noFrame, err
+ }
+ }
+
+ // 2. Read and parse first two bytes of frame header.
+
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+
+ final := p[0]&finalBit != 0
+ frameType := int(p[0] & 0xf)
+ mask := p[1]&maskBit != 0
+ c.readRemaining = int64(p[1] & 0x7f)
+
+ c.readDecompress = false
+ if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 {
+ c.readDecompress = true
+ p[0] &^= rsv1Bit
+ }
+
+ if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 {
+ return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16))
+ }
+
+ switch frameType {
+ case CloseMessage, PingMessage, PongMessage:
+ if c.readRemaining > maxControlFramePayloadSize {
+ return noFrame, c.handleProtocolError("control frame length > 125")
+ }
+ if !final {
+ return noFrame, c.handleProtocolError("control frame not final")
+ }
+ case TextMessage, BinaryMessage:
+ if !c.readFinal {
+ return noFrame, c.handleProtocolError("message start before final message frame")
+ }
+ c.readFinal = final
+ case continuationFrame:
+ if c.readFinal {
+ return noFrame, c.handleProtocolError("continuation after final message frame")
+ }
+ c.readFinal = final
+ default:
+ return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType))
+ }
+
+ // 3. Read and parse frame length.
+
+ switch c.readRemaining {
+ case 126:
+ p, err := c.read(2)
+ if err != nil {
+ return noFrame, err
+ }
+ c.readRemaining = int64(binary.BigEndian.Uint16(p))
+ case 127:
+ p, err := c.read(8)
+ if err != nil {
+ return noFrame, err
+ }
+ c.readRemaining = int64(binary.BigEndian.Uint64(p))
+ }
+
+ // 4. Handle frame masking.
+
+ if mask != c.isServer {
+ return noFrame, c.handleProtocolError("incorrect mask flag")
+ }
+
+ if mask {
+ c.readMaskPos = 0
+ p, err := c.read(len(c.readMaskKey))
+ if err != nil {
+ return noFrame, err
+ }
+ copy(c.readMaskKey[:], p)
+ }
+
+ // 5. For text and binary messages, enforce read limit and return.
+
+ if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
+
+ c.readLength += c.readRemaining
+ if c.readLimit > 0 && c.readLength > c.readLimit {
+ c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
+ return noFrame, ErrReadLimit
+ }
+
+ return frameType, nil
+ }
+
+ // 6. Read control frame payload.
+
+ var payload []byte
+ if c.readRemaining > 0 {
+ payload, err = c.read(int(c.readRemaining))
+ c.readRemaining = 0
+ if err != nil {
+ return noFrame, err
+ }
+ if c.isServer {
+ maskBytes(c.readMaskKey, 0, payload)
+ }
+ }
+
+ // 7. Process control frame payload.
+
+ switch frameType {
+ case PongMessage:
+ if err := c.handlePong(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case PingMessage:
+ if err := c.handlePing(string(payload)); err != nil {
+ return noFrame, err
+ }
+ case CloseMessage:
+ closeCode := CloseNoStatusReceived
+ closeText := ""
+ if len(payload) >= 2 {
+ closeCode = int(binary.BigEndian.Uint16(payload))
+ if !isValidReceivedCloseCode(closeCode) {
+ return noFrame, c.handleProtocolError("invalid close code")
+ }
+ closeText = string(payload[2:])
+ if !utf8.ValidString(closeText) {
+ return noFrame, c.handleProtocolError("invalid utf8 payload in close frame")
+ }
+ }
+ if err := c.handleClose(closeCode, closeText); err != nil {
+ return noFrame, err
+ }
+ return noFrame, &CloseError{Code: closeCode, Text: closeText}
+ }
+
+ return frameType, nil
+}
+
+func (c *Conn) handleProtocolError(message string) error {
+ c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait))
+ return errors.New("websocket: " + message)
+}
+
+// NextReader returns the next data message received from the peer. The
+// returned messageType is either TextMessage or BinaryMessage.
+//
+// There can be at most one open reader on a connection. NextReader discards
+// the previous message if the application has not already consumed it.
+//
+// Applications must break out of the application's read loop when this method
+// returns a non-nil error value. Errors returned from this method are
+// permanent. Once this method returns a non-nil error, all subsequent calls to
+// this method return the same error.
+func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
+ // Close previous reader, only relevant for decompression.
+ if c.reader != nil {
+ c.reader.Close()
+ c.reader = nil
+ }
+
+ c.messageReader = nil
+ c.readLength = 0
+
+ for c.readErr == nil {
+ frameType, err := c.advanceFrame()
+ if err != nil {
+ c.readErr = hideTempErr(err)
+ break
+ }
+ if frameType == TextMessage || frameType == BinaryMessage {
+ c.messageReader = &messageReader{c}
+ c.reader = c.messageReader
+ if c.readDecompress {
+ c.reader = c.newDecompressionReader(c.reader)
+ }
+ return frameType, c.reader, nil
+ }
+ }
+
+ // Applications that do handle the error returned from this method spin in
+ // tight loop on connection failure. To help application developers detect
+ // this error, panic on repeated reads to the failed connection.
+ c.readErrCount++
+ if c.readErrCount >= 1000 {
+ panic("repeated read on failed websocket connection")
+ }
+
+ return noFrame, nil, c.readErr
+}
+
+type messageReader struct{ c *Conn }
+
+func (r *messageReader) Read(b []byte) (int, error) {
+ c := r.c
+ if c.messageReader != r {
+ return 0, io.EOF
+ }
+
+ for c.readErr == nil {
+
+ if c.readRemaining > 0 {
+ if int64(len(b)) > c.readRemaining {
+ b = b[:c.readRemaining]
+ }
+ n, err := c.br.Read(b)
+ c.readErr = hideTempErr(err)
+ if c.isServer {
+ c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n])
+ }
+ c.readRemaining -= int64(n)
+ if c.readRemaining > 0 && c.readErr == io.EOF {
+ c.readErr = errUnexpectedEOF
+ }
+ return n, c.readErr
+ }
+
+ if c.readFinal {
+ c.messageReader = nil
+ return 0, io.EOF
+ }
+
+ frameType, err := c.advanceFrame()
+ switch {
+ case err != nil:
+ c.readErr = hideTempErr(err)
+ case frameType == TextMessage || frameType == BinaryMessage:
+ c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
+ }
+ }
+
+ err := c.readErr
+ if err == io.EOF && c.messageReader == r {
+ err = errUnexpectedEOF
+ }
+ return 0, err
+}
+
+func (r *messageReader) Close() error {
+ return nil
+}
+
+// ReadMessage is a helper method for getting a reader using NextReader and
+// reading from that reader to a buffer.
+func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
+ var r io.Reader
+ messageType, r, err = c.NextReader()
+ if err != nil {
+ return messageType, nil, err
+ }
+ p, err = ioutil.ReadAll(r)
+ return messageType, p, err
+}
+
+// SetReadDeadline sets the read deadline on the underlying network connection.
+// After a read has timed out, the websocket connection state is corrupt and
+// all future reads will return an error. A zero value for t means reads will
+// not time out.
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ return c.conn.SetReadDeadline(t)
+}
+
+// SetReadLimit sets the maximum size for a message read from the peer. If a
+// message exceeds the limit, the connection sends a close frame to the peer
+// and returns ErrReadLimit to the application.
+func (c *Conn) SetReadLimit(limit int64) {
+ c.readLimit = limit
+}
+
+// CloseHandler returns the current close handler
+func (c *Conn) CloseHandler() func(code int, text string) error {
+ return c.handleClose
+}
+
+// SetCloseHandler sets the handler for close messages received from the peer.
+// The code argument to h is the received close code or CloseNoStatusReceived
+// if the close message is empty. The default close handler sends a close frame
+// back to the peer.
+//
+// The application must read the connection to process close messages as
+// described in the section on Control Frames above.
+//
+// The connection read methods return a CloseError when a close frame is
+// received. Most applications should handle close messages as part of their
+// normal error handling. Applications should only set a close handler when the
+// application must perform some action before sending a close frame back to
+// the peer.
+func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
+ if h == nil {
+ h = func(code int, text string) error {
+ message := []byte{}
+ if code != CloseNoStatusReceived {
+ message = FormatCloseMessage(code, "")
+ }
+ c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
+ return nil
+ }
+ }
+ c.handleClose = h
+}
+
+// PingHandler returns the current ping handler
+func (c *Conn) PingHandler() func(appData string) error {
+ return c.handlePing
+}
+
+// SetPingHandler sets the handler for ping messages received from the peer.
+// The appData argument to h is the PING frame application data. The default
+// ping handler sends a pong to the peer.
+//
+// The application must read the connection to process ping messages as
+// described in the section on Control Frames above.
+func (c *Conn) SetPingHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(message string) error {
+ err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
+ if err == ErrCloseSent {
+ return nil
+ } else if e, ok := err.(net.Error); ok && e.Temporary() {
+ return nil
+ }
+ return err
+ }
+ }
+ c.handlePing = h
+}
+
+// PongHandler returns the current pong handler
+func (c *Conn) PongHandler() func(appData string) error {
+ return c.handlePong
+}
+
+// SetPongHandler sets the handler for pong messages received from the peer.
+// The appData argument to h is the PONG frame application data. The default
+// pong handler does nothing.
+//
+// The application must read the connection to process ping messages as
+// described in the section on Control Frames above.
+func (c *Conn) SetPongHandler(h func(appData string) error) {
+ if h == nil {
+ h = func(string) error { return nil }
+ }
+ c.handlePong = h
+}
+
+// UnderlyingConn returns the internal net.Conn. This can be used to further
+// modifications to connection specific flags.
+func (c *Conn) UnderlyingConn() net.Conn {
+ return c.conn
+}
+
+// EnableWriteCompression enables and disables write compression of
+// subsequent text and binary messages. This function is a noop if
+// compression was not negotiated with the peer.
+func (c *Conn) EnableWriteCompression(enable bool) {
+ c.enableWriteCompression = enable
+}
+
+// SetCompressionLevel sets the flate compression level for subsequent text and
+// binary messages. This function is a noop if compression was not negotiated
+// with the peer. See the compress/flate package for a description of
+// compression levels.
+func (c *Conn) SetCompressionLevel(level int) error {
+ if !isValidCompressionLevel(level) {
+ return errors.New("websocket: invalid compression level")
+ }
+ c.compressionLevel = level
+ return nil
+}
+
+// FormatCloseMessage formats closeCode and text as a WebSocket close message.
+func FormatCloseMessage(closeCode int, text string) []byte {
+ buf := make([]byte, 2+len(text))
+ binary.BigEndian.PutUint16(buf, uint16(closeCode))
+ copy(buf[2:], text)
+ return buf
+}
diff --git a/vendor/github.com/gorilla/websocket/conn_read.go b/vendor/github.com/gorilla/websocket/conn_read.go
new file mode 100644
index 000000000..1ea15059e
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn_read.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.5
+
+package websocket
+
+import "io"
+
+func (c *Conn) read(n int) ([]byte, error) {
+ p, err := c.br.Peek(n)
+ if err == io.EOF {
+ err = errUnexpectedEOF
+ }
+ c.br.Discard(len(p))
+ return p, err
+}
diff --git a/vendor/github.com/gorilla/websocket/conn_read_legacy.go b/vendor/github.com/gorilla/websocket/conn_read_legacy.go
new file mode 100644
index 000000000..018541cf6
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/conn_read_legacy.go
@@ -0,0 +1,21 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.5
+
+package websocket
+
+import "io"
+
+func (c *Conn) read(n int) ([]byte, error) {
+ p, err := c.br.Peek(n)
+ if err == io.EOF {
+ err = errUnexpectedEOF
+ }
+ if len(p) > 0 {
+ // advance over the bytes just read
+ io.ReadFull(c.br, p)
+ }
+ return p, err
+}
diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go
new file mode 100644
index 000000000..e291a952c
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/doc.go
@@ -0,0 +1,180 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements the WebSocket protocol defined in RFC 6455.
+//
+// Overview
+//
+// The Conn type represents a WebSocket connection. A server application uses
+// the Upgrade function from an Upgrader object with a HTTP request handler
+// to get a pointer to a Conn:
+//
+// var upgrader = websocket.Upgrader{
+// ReadBufferSize: 1024,
+// WriteBufferSize: 1024,
+// }
+//
+// func handler(w http.ResponseWriter, r *http.Request) {
+// conn, err := upgrader.Upgrade(w, r, nil)
+// if err != nil {
+// log.Println(err)
+// return
+// }
+// ... Use conn to send and receive messages.
+// }
+//
+// Call the connection's WriteMessage and ReadMessage methods to send and
+// receive messages as a slice of bytes. This snippet of code shows how to echo
+// messages using these methods:
+//
+// for {
+// messageType, p, err := conn.ReadMessage()
+// if err != nil {
+// return
+// }
+// if err = conn.WriteMessage(messageType, p); err != nil {
+// return err
+// }
+// }
+//
+// In above snippet of code, p is a []byte and messageType is an int with value
+// websocket.BinaryMessage or websocket.TextMessage.
+//
+// An application can also send and receive messages using the io.WriteCloser
+// and io.Reader interfaces. To send a message, call the connection NextWriter
+// method to get an io.WriteCloser, write the message to the writer and close
+// the writer when done. To receive a message, call the connection NextReader
+// method to get an io.Reader and read until io.EOF is returned. This snippet
+// shows how to echo messages using the NextWriter and NextReader methods:
+//
+// for {
+// messageType, r, err := conn.NextReader()
+// if err != nil {
+// return
+// }
+// w, err := conn.NextWriter(messageType)
+// if err != nil {
+// return err
+// }
+// if _, err := io.Copy(w, r); err != nil {
+// return err
+// }
+// if err := w.Close(); err != nil {
+// return err
+// }
+// }
+//
+// Data Messages
+//
+// The WebSocket protocol distinguishes between text and binary data messages.
+// Text messages are interpreted as UTF-8 encoded text. The interpretation of
+// binary messages is left to the application.
+//
+// This package uses the TextMessage and BinaryMessage integer constants to
+// identify the two data message types. The ReadMessage and NextReader methods
+// return the type of the received message. The messageType argument to the
+// WriteMessage and NextWriter methods specifies the type of a sent message.
+//
+// It is the application's responsibility to ensure that text messages are
+// valid UTF-8 encoded text.
+//
+// Control Messages
+//
+// The WebSocket protocol defines three types of control messages: close, ping
+// and pong. Call the connection WriteControl, WriteMessage or NextWriter
+// methods to send a control message to the peer.
+//
+// Connections handle received close messages by sending a close message to the
+// peer and returning a *CloseError from the the NextReader, ReadMessage or the
+// message Read method.
+//
+// Connections handle received ping and pong messages by invoking callback
+// functions set with SetPingHandler and SetPongHandler methods. The callback
+// functions are called from the NextReader, ReadMessage and the message Read
+// methods.
+//
+// The default ping handler sends a pong to the peer. The application's reading
+// goroutine can block for a short time while the handler writes the pong data
+// to the connection.
+//
+// The application must read the connection to process ping, pong and close
+// messages sent from the peer. If the application is not otherwise interested
+// in messages from the peer, then the application should start a goroutine to
+// read and discard messages from the peer. A simple example is:
+//
+// func readLoop(c *websocket.Conn) {
+// for {
+// if _, _, err := c.NextReader(); err != nil {
+// c.Close()
+// break
+// }
+// }
+// }
+//
+// Concurrency
+//
+// Connections support one concurrent reader and one concurrent writer.
+//
+// Applications are responsible for ensuring that no more than one goroutine
+// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
+// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
+// that no more than one goroutine calls the read methods (NextReader,
+// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
+// concurrently.
+//
+// The Close and WriteControl methods can be called concurrently with all other
+// methods.
+//
+// Origin Considerations
+//
+// Web browsers allow Javascript applications to open a WebSocket connection to
+// any host. It's up to the server to enforce an origin policy using the Origin
+// request header sent by the browser.
+//
+// The Upgrader calls the function specified in the CheckOrigin field to check
+// the origin. If the CheckOrigin function returns false, then the Upgrade
+// method fails the WebSocket handshake with HTTP status 403.
+//
+// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
+// the handshake if the Origin request header is present and not equal to the
+// Host request header.
+//
+// An application can allow connections from any origin by specifying a
+// function that always returns true:
+//
+// var upgrader = websocket.Upgrader{
+// CheckOrigin: func(r *http.Request) bool { return true },
+// }
+//
+// The deprecated Upgrade function does not enforce an origin policy. It's the
+// application's responsibility to check the Origin header before calling
+// Upgrade.
+//
+// Compression EXPERIMENTAL
+//
+// Per message compression extensions (RFC 7692) are experimentally supported
+// by this package in a limited capacity. Setting the EnableCompression option
+// to true in Dialer or Upgrader will attempt to negotiate per message deflate
+// support.
+//
+// var upgrader = websocket.Upgrader{
+// EnableCompression: true,
+// }
+//
+// If compression was successfully negotiated with the connection's peer, any
+// message received in compressed form will be automatically decompressed.
+// All Read methods will return uncompressed bytes.
+//
+// Per message compression of messages written to a connection can be enabled
+// or disabled by calling the corresponding Conn method:
+//
+// conn.EnableWriteCompression(false)
+//
+// Currently this package does not support compression with "context takeover".
+// This means that messages must be compressed and decompressed in isolation,
+// without retaining sliding window or dictionary state across messages. For
+// more details refer to RFC 7692.
+//
+// Use of compression is experimental and may result in decreased performance.
+package websocket
diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go
new file mode 100644
index 000000000..4f0e36875
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/json.go
@@ -0,0 +1,55 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// WriteJSON is deprecated, use c.WriteJSON instead.
+func WriteJSON(c *Conn, v interface{}) error {
+ return c.WriteJSON(v)
+}
+
+// WriteJSON writes the JSON encoding of v to the connection.
+//
+// See the documentation for encoding/json Marshal for details about the
+// conversion of Go values to JSON.
+func (c *Conn) WriteJSON(v interface{}) error {
+ w, err := c.NextWriter(TextMessage)
+ if err != nil {
+ return err
+ }
+ err1 := json.NewEncoder(w).Encode(v)
+ err2 := w.Close()
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+// ReadJSON is deprecated, use c.ReadJSON instead.
+func ReadJSON(c *Conn, v interface{}) error {
+ return c.ReadJSON(v)
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// See the documentation for the encoding/json Unmarshal function for details
+// about the conversion of JSON to a Go value.
+func (c *Conn) ReadJSON(v interface{}) error {
+ _, r, err := c.NextReader()
+ if err != nil {
+ return err
+ }
+ err = json.NewDecoder(r).Decode(v)
+ if err == io.EOF {
+ // One value is expected in the message.
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+}
diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go
new file mode 100644
index 000000000..6a88bbc74
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/mask.go
@@ -0,0 +1,55 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build !appengine
+
+package websocket
+
+import "unsafe"
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+
+ // Mask one byte at a time for small buffers.
+ if len(b) < 2*wordSize {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+ }
+
+ // Mask one byte at a time to word boundary.
+ if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
+ n = wordSize - n
+ for i := range b[:n] {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ b = b[n:]
+ }
+
+ // Create aligned word size key.
+ var k [wordSize]byte
+ for i := range k {
+ k[i] = key[(pos+i)&3]
+ }
+ kw := *(*uintptr)(unsafe.Pointer(&k))
+
+ // Mask one word at a time.
+ n := (len(b) / wordSize) * wordSize
+ for i := 0; i < n; i += wordSize {
+ *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
+ }
+
+ // Mask one byte at a time for remaining bytes.
+ b = b[n:]
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+
+ return pos & 3
+}
diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go
new file mode 100644
index 000000000..2aac060e5
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/mask_safe.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build appengine
+
+package websocket
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+ for i := range b {
+ b[i] ^= key[pos&3]
+ pos++
+ }
+ return pos & 3
+}
diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go
new file mode 100644
index 000000000..1efffbd1e
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/prepared.go
@@ -0,0 +1,103 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bytes"
+ "net"
+ "sync"
+ "time"
+)
+
+// PreparedMessage caches on the wire representations of a message payload.
+// Use PreparedMessage to efficiently send a message payload to multiple
+// connections. PreparedMessage is especially useful when compression is used
+// because the CPU and memory expensive compression operation can be executed
+// once for a given set of compression options.
+type PreparedMessage struct {
+ messageType int
+ data []byte
+ err error
+ mu sync.Mutex
+ frames map[prepareKey]*preparedFrame
+}
+
+// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
+type prepareKey struct {
+ isServer bool
+ compress bool
+ compressionLevel int
+}
+
+// preparedFrame contains data in wire representation.
+type preparedFrame struct {
+ once sync.Once
+ data []byte
+}
+
+// NewPreparedMessage returns an initialized PreparedMessage. You can then send
+// it to connection using WritePreparedMessage method. Valid wire
+// representation will be calculated lazily only once for a set of current
+// connection options.
+func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
+ pm := &PreparedMessage{
+ messageType: messageType,
+ frames: make(map[prepareKey]*preparedFrame),
+ data: data,
+ }
+
+ // Prepare a plain server frame.
+ _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
+ if err != nil {
+ return nil, err
+ }
+
+ // To protect against caller modifying the data argument, remember the data
+ // copied to the plain server frame.
+ pm.data = frameData[len(frameData)-len(data):]
+ return pm, nil
+}
+
+func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
+ pm.mu.Lock()
+ frame, ok := pm.frames[key]
+ if !ok {
+ frame = &preparedFrame{}
+ pm.frames[key] = frame
+ }
+ pm.mu.Unlock()
+
+ var err error
+ frame.once.Do(func() {
+ // Prepare a frame using a 'fake' connection.
+ // TODO: Refactor code in conn.go to allow more direct construction of
+ // the frame.
+ mu := make(chan bool, 1)
+ mu <- true
+ var nc prepareConn
+ c := &Conn{
+ conn: &nc,
+ mu: mu,
+ isServer: key.isServer,
+ compressionLevel: key.compressionLevel,
+ enableWriteCompression: true,
+ writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
+ }
+ if key.compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ }
+ err = c.WriteMessage(pm.messageType, pm.data)
+ frame.data = nc.buf.Bytes()
+ })
+ return pm.messageType, frame.data, err
+}
+
+type prepareConn struct {
+ buf bytes.Buffer
+ net.Conn
+}
+
+func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
+func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go
new file mode 100644
index 000000000..3495e0f1a
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/server.go
@@ -0,0 +1,291 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "errors"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// HandshakeError describes an error with the handshake from the peer.
+type HandshakeError struct {
+ message string
+}
+
+func (e HandshakeError) Error() string { return e.message }
+
+// Upgrader specifies parameters for upgrading an HTTP connection to a
+// WebSocket connection.
+type Upgrader struct {
+ // HandshakeTimeout specifies the duration for the handshake to complete.
+ HandshakeTimeout time.Duration
+
+ // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
+ // size is zero, then buffers allocated by the HTTP server are used. The
+ // I/O buffer sizes do not limit the size of the messages that can be sent
+ // or received.
+ ReadBufferSize, WriteBufferSize int
+
+ // Subprotocols specifies the server's supported protocols in order of
+ // preference. If this field is set, then the Upgrade method negotiates a
+ // subprotocol by selecting the first match in this list with a protocol
+ // requested by the client.
+ Subprotocols []string
+
+ // Error specifies the function for generating HTTP error responses. If Error
+ // is nil, then http.Error is used to generate the HTTP response.
+ Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
+
+ // CheckOrigin returns true if the request Origin header is acceptable. If
+ // CheckOrigin is nil, the host in the Origin header must not be set or
+ // must match the host of the request.
+ CheckOrigin func(r *http.Request) bool
+
+ // EnableCompression specify if the server should attempt to negotiate per
+ // message compression (RFC 7692). Setting this value to true does not
+ // guarantee that compression will be supported. Currently only "no context
+ // takeover" modes are supported.
+ EnableCompression bool
+}
+
+func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
+ err := HandshakeError{reason}
+ if u.Error != nil {
+ u.Error(w, r, status, err)
+ } else {
+ w.Header().Set("Sec-Websocket-Version", "13")
+ http.Error(w, http.StatusText(status), status)
+ }
+ return nil, err
+}
+
+// checkSameOrigin returns true if the origin is not set or is equal to the request host.
+func checkSameOrigin(r *http.Request) bool {
+ origin := r.Header["Origin"]
+ if len(origin) == 0 {
+ return true
+ }
+ u, err := url.Parse(origin[0])
+ if err != nil {
+ return false
+ }
+ return u.Host == r.Host
+}
+
+func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
+ if u.Subprotocols != nil {
+ clientProtocols := Subprotocols(r)
+ for _, serverProtocol := range u.Subprotocols {
+ for _, clientProtocol := range clientProtocols {
+ if clientProtocol == serverProtocol {
+ return clientProtocol
+ }
+ }
+ }
+ } else if responseHeader != nil {
+ return responseHeader.Get("Sec-Websocket-Protocol")
+ }
+ return ""
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// application negotiated subprotocol (Sec-Websocket-Protocol).
+//
+// If the upgrade fails, then Upgrade replies to the client with an HTTP error
+// response.
+func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
+ if r.Method != "GET" {
+ return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET")
+ }
+
+ if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported")
+ }
+
+ if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header")
+ }
+
+ if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header")
+ }
+
+ if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
+ }
+
+ checkOrigin := u.CheckOrigin
+ if checkOrigin == nil {
+ checkOrigin = checkSameOrigin
+ }
+ if !checkOrigin(r) {
+ return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed")
+ }
+
+ challengeKey := r.Header.Get("Sec-Websocket-Key")
+ if challengeKey == "" {
+ return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank")
+ }
+
+ subprotocol := u.selectSubprotocol(r, responseHeader)
+
+ // Negotiate PMCE
+ var compress bool
+ if u.EnableCompression {
+ for _, ext := range parseExtensions(r.Header) {
+ if ext[""] != "permessage-deflate" {
+ continue
+ }
+ compress = true
+ break
+ }
+ }
+
+ var (
+ netConn net.Conn
+ err error
+ )
+
+ h, ok := w.(http.Hijacker)
+ if !ok {
+ return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
+ }
+ var brw *bufio.ReadWriter
+ netConn, brw, err = h.Hijack()
+ if err != nil {
+ return u.returnError(w, r, http.StatusInternalServerError, err.Error())
+ }
+
+ if brw.Reader.Buffered() > 0 {
+ netConn.Close()
+ return nil, errors.New("websocket: client sent data before handshake is complete")
+ }
+
+ c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw)
+ c.subprotocol = subprotocol
+
+ if compress {
+ c.newCompressionWriter = compressNoContextTakeover
+ c.newDecompressionReader = decompressNoContextTakeover
+ }
+
+ p := c.writeBuf[:0]
+ p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
+ p = append(p, computeAcceptKey(challengeKey)...)
+ p = append(p, "\r\n"...)
+ if c.subprotocol != "" {
+ p = append(p, "Sec-Websocket-Protocol: "...)
+ p = append(p, c.subprotocol...)
+ p = append(p, "\r\n"...)
+ }
+ if compress {
+ p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
+ }
+ for k, vs := range responseHeader {
+ if k == "Sec-Websocket-Protocol" {
+ continue
+ }
+ for _, v := range vs {
+ p = append(p, k...)
+ p = append(p, ": "...)
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if b <= 31 {
+ // prevent response splitting.
+ b = ' '
+ }
+ p = append(p, b)
+ }
+ p = append(p, "\r\n"...)
+ }
+ }
+ p = append(p, "\r\n"...)
+
+ // Clear deadlines set by HTTP server.
+ netConn.SetDeadline(time.Time{})
+
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
+ }
+ if _, err = netConn.Write(p); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ if u.HandshakeTimeout > 0 {
+ netConn.SetWriteDeadline(time.Time{})
+ }
+
+ return c, nil
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// This function is deprecated, use websocket.Upgrader instead.
+//
+// The application is responsible for checking the request origin before
+// calling Upgrade. An example implementation of the same origin policy is:
+//
+// if req.Header.Get("Origin") != "http://"+req.Host {
+// http.Error(w, "Origin not allowed", 403)
+// return
+// }
+//
+// If the endpoint supports subprotocols, then the application is responsible
+// for negotiating the protocol used on the connection. Use the Subprotocols()
+// function to get the subprotocols requested by the client. Use the
+// Sec-Websocket-Protocol response header to specify the subprotocol selected
+// by the application.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// negotiated subprotocol (Sec-Websocket-Protocol).
+//
+// The connection buffers IO to the underlying network connection. The
+// readBufSize and writeBufSize parameters specify the size of the buffers to
+// use. Messages can be larger than the buffers.
+//
+// If the request is not a valid WebSocket handshake, then Upgrade returns an
+// error of type HandshakeError. Applications should handle this error by
+// replying to the client with an HTTP error response.
+func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
+ u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
+ u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
+ // don't return errors to maintain backwards compatibility
+ }
+ u.CheckOrigin = func(r *http.Request) bool {
+ // allow all connections by default
+ return true
+ }
+ return u.Upgrade(w, r, responseHeader)
+}
+
+// Subprotocols returns the subprotocols requested by the client in the
+// Sec-Websocket-Protocol header.
+func Subprotocols(r *http.Request) []string {
+ h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
+ if h == "" {
+ return nil
+ }
+ protocols := strings.Split(h, ",")
+ for i := range protocols {
+ protocols[i] = strings.TrimSpace(protocols[i])
+ }
+ return protocols
+}
+
+// IsWebSocketUpgrade returns true if the client requested upgrade to the
+// WebSocket protocol.
+func IsWebSocketUpgrade(r *http.Request) bool {
+ return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
+ tokenListContainsValue(r.Header, "Upgrade", "websocket")
+}
diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go
new file mode 100644
index 000000000..9a4908df2
--- /dev/null
+++ b/vendor/github.com/gorilla/websocket/util.go
@@ -0,0 +1,214 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "io"
+ "net/http"
+ "strings"
+)
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func computeAcceptKey(challengeKey string) string {
+ h := sha1.New()
+ h.Write([]byte(challengeKey))
+ h.Write(keyGUID)
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func generateChallengeKey() (string, error) {
+ p := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, p); err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(p), nil
+}
+
+// Octet types from RFC 2616.
+var octetTypes [256]byte
+
+const (
+ isTokenOctet = 1 << iota
+ isSpaceOctet
+)
+
+func init() {
+ // From RFC 2616
+ //
+ // OCTET =
+ // CHAR =
+ // CTL =
+ // CR =
+ // LF =
+ // SP =
+ // HT =
+ // <"> =
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT =
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*
+ // qdtext = >
+
+ for c := 0; c < 256; c++ {
+ var t byte
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
+ if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+ t |= isSpaceOctet
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isTokenOctet
+ }
+ octetTypes[c] = t
+ }
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpaceOctet == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func nextToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isTokenOctet == 0 {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func nextTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return nextToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j += 1
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j += 1
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
+
+// tokenListContainsValue returns true if the 1#token header with the given
+// name contains token.
+func tokenListContainsValue(header http.Header, name string, value string) bool {
+headers:
+ for _, s := range header[name] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ if strings.EqualFold(t, value) {
+ return true
+ }
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return false
+}
+
+// parseExtensiosn parses WebSocket extensions from a header.
+func parseExtensions(header http.Header) []map[string]string {
+
+ // From RFC 6455:
+ //
+ // Sec-WebSocket-Extensions = extension-list
+ // extension-list = 1#extension
+ // extension = extension-token *( ";" extension-param )
+ // extension-token = registered-token
+ // registered-token = token
+ // extension-param = token [ "=" (token | quoted-string) ]
+ // ;When using the quoted-string syntax variant, the value
+ // ;after quoted-string unescaping MUST conform to the
+ // ;'token' ABNF.
+
+ var result []map[string]string
+headers:
+ for _, s := range header["Sec-Websocket-Extensions"] {
+ for {
+ var t string
+ t, s = nextToken(skipSpace(s))
+ if t == "" {
+ continue headers
+ }
+ ext := map[string]string{"": t}
+ for {
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ";") {
+ break
+ }
+ var k string
+ k, s = nextToken(skipSpace(s[1:]))
+ if k == "" {
+ continue headers
+ }
+ s = skipSpace(s)
+ var v string
+ if strings.HasPrefix(s, "=") {
+ v, s = nextTokenOrQuoted(skipSpace(s[1:]))
+ s = skipSpace(s)
+ }
+ if s != "" && s[0] != ',' && s[0] != ';' {
+ continue headers
+ }
+ ext[k] = v
+ }
+ if s != "" && s[0] != ',' {
+ continue headers
+ }
+ result = append(result, ext)
+ if s == "" {
+ continue headers
+ }
+ s = s[1:]
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/h2non/filetype/.editorconfig b/vendor/github.com/h2non/filetype/.editorconfig
new file mode 100644
index 000000000..000dc0a7a
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/.editorconfig
@@ -0,0 +1,12 @@
+root = true
+
+[*]
+indent_style = tabs
+indent_size = 2
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
+
+[*.md]
+trim_trailing_whitespace = false
diff --git a/vendor/github.com/h2non/filetype/.gitignore b/vendor/github.com/h2non/filetype/.gitignore
new file mode 100644
index 000000000..6fefe6cce
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/.gitignore
@@ -0,0 +1,2 @@
+bin
+.DS_Store
diff --git a/vendor/github.com/h2non/filetype/.travis.yml b/vendor/github.com/h2non/filetype/.travis.yml
new file mode 100644
index 000000000..370dc0472
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+
+go:
+ - "1.11"
+ - "1.10"
+ - "tip"
+
+before_install:
+ - go get -u -v golang.org/x/lint/golint
+
+script:
+ - diff -u <(echo -n) <(gofmt -s -d ./)
+ - diff -u <(echo -n) <(go vet ./...)
+ - diff -u <(echo -n) <(golint)
+ - go test -v -race ./...
diff --git a/vendor/github.com/h2non/filetype/History.md b/vendor/github.com/h2non/filetype/History.md
new file mode 100644
index 000000000..58ca1555f
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/History.md
@@ -0,0 +1,87 @@
+
+v1.0.6 / 2019-01-22
+===================
+
+ * Merge pull request #55 from ivanlemeshev/master
+ * Added ftypmp4v to MP4 matcher
+ * Merge pull request #54 from aofei/master
+ * chore: add support for Go modules
+ * feat: add support for AAC (audio/aac)
+ * Merge pull request #53 from lynxbyorion/check-for-docoments
+ * Added checks for documents.
+ * Merge pull request #51 from eriken/master
+ * fixed bad mime and import paths
+ * Merge pull request #50 from eriken/jpeg2000_support
+ * fix import paths
+ * jpeg2000 support
+ * Merge pull request #47 from Ma124/master
+ * Merge pull request #49 from amoore614/master
+ * more robust check for .mov files
+ * bugfix: reverse order of matcher key list so user registered matchers appear first
+ * bugfix: store ptr to MatcherKeys in case user registered matchers are used.
+ * update comment
+ * Bump buffer size to 8K to allow for more custom file matching
+ * refactor(readme): update package import path
+ * Merge pull request #48 from kumakichi/support_msooxml
+ * do not use v1
+ * ok, master already changed travis
+ * add fixtures, but MatchReader may not work for some msooxml files, 4096 bytes maybe not enough
+ * support ms ooxml, #40
+ * Fixed misspells
+ * fix(travis): use string notation for matrix items
+ * Merge pull request #42 from bruth/patch-2
+ * refactor(travis): remove Go 1.6, add Go 1.10
+ * Change maximum bytes required for detection
+ * Merge pull request #36 from yiiTT/patch-1
+ * Add MP4 dash and additional ISO formats
+ * Merge pull request #34 from RangelReale/fix-mp4-case
+ * Merge pull request #32 from yiiTT/fix-m4v
+ * Fixed mp4 detection case-sensitivity according to http://www.ftyps.com/
+ * Fix M4v matcher
+
+v1.0.5 / 2017-12-12
+===================
+
+ * Merge pull request #30 from RangelReale/fix_mp4
+ * Fix duplicated item in mp4 fix
+ * Fix MP4 matcher, with information from http://www.file-recovery.com/mp4-signature-format.htm
+ * Merge pull request #28 from ikovic/master
+ * Updated file header example.
+
+v1.0.4 / 2017-11-29
+===================
+
+ * fix: tests and document types matchers
+ * refactor(docs): remove codesponsor
+ * Merge pull request #26 from bienkma/master
+ * Add support check file type: .doc, .docx, .pptx, .ppt, .xls, .xlsx
+ * feat(docs): add code sponsor banner
+ * feat(travis): add go 1.9
+ * Merge pull request #24 from strazzere/patch-1
+ * Fix typo in unknown
+
+v1.0.3 / 2017-08-03
+===================
+
+ * Merge pull request #21 from elemeta/master
+ * Add Elf file as supported matcher archive type
+
+v1.0.2 / 2017-07-26
+===================
+
+ * Merge pull request #20 from marshyski/master
+ * Added RedHat RPM as supported matcher archive type
+ * Merge pull request #19 from nlamirault/patch-1
+ * Fix typo in documentation
+
+v1.0.1 / 2017-02-24
+===================
+
+ * Merge pull request #18 from Impyy/enable-webm
+ * Enable the webm matcher
+ * feat(docs): add Go version badge
+
+1.0.0 / 2016-12-11
+==================
+
+- Initial stable version (v1.0.0).
diff --git a/vendor/github.com/h2non/filetype/LICENSE b/vendor/github.com/h2non/filetype/LICENSE
new file mode 100644
index 000000000..30ede59b6
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/LICENSE
@@ -0,0 +1,24 @@
+The MIT License
+
+Copyright (c) Tomas Aparicio
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/h2non/filetype/README.md b/vendor/github.com/h2non/filetype/README.md
new file mode 100644
index 000000000..d816dc8ea
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/README.md
@@ -0,0 +1,281 @@
+# filetype [](https://travis-ci.org/h2non/filetype) [](https://godoc.org/github.com/h2non/filetype) [](http://goreportcard.com/report/h2non/filetype) [](https://github.com/h2non/gentleman)
+
+Small and dependency free [Go](https://golang.org) package to infer file and MIME type checking the [magic numbers](https://en.wikipedia.org/wiki/Magic_number_(programming)#Magic_numbers_in_files) signature.
+
+For SVG file type checking, see [go-is-svg](https://github.com/h2non/go-is-svg) package.
+
+## Features
+
+- Supports a [wide range](#supported-types) of file types
+- Provides file extension and proper MIME type
+- File discovery by extension or MIME type
+- File discovery by class (image, video, audio...)
+- Provides a bunch of helpers and file matching shortcuts
+- [Pluggable](#add-additional-file-type-matchers): add custom new types and matchers
+- Simple and semantic API
+- [Blazing fast](#benchmarks), even processing large files
+- Only first 262 bytes representing the max file header is required, so you can just [pass a slice](#file-header)
+- Dependency free (just Go code, no C compilation needed)
+- Cross-platform file recognition
+
+## Installation
+
+```bash
+go get github.com/h2non/filetype
+```
+
+## API
+
+See [Godoc](https://godoc.org/github.com/h2non/filetype) reference.
+
+### Subpackages
+
+- [`github.com/h2non/filetype/types`](https://godoc.org/github.com/h2non/filetype/types)
+- [`github.com/h2non/filetype/matchers`](https://godoc.org/github.com/h2non/filetype/matchers)
+
+## Examples
+
+#### Simple file type checking
+
+```go
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+
+ "github.com/h2non/filetype"
+)
+
+func main() {
+ buf, _ := ioutil.ReadFile("sample.jpg")
+
+ kind, unknown := filetype.Match(buf)
+ if unknown != nil {
+ fmt.Printf("Unknown: %s", unknown)
+ return
+ }
+
+ fmt.Printf("File type: %s. MIME: %s\n", kind.Extension, kind.MIME.Value)
+}
+```
+
+#### Check type class
+
+```go
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+
+ "github.com/h2non/filetype"
+)
+
+func main() {
+ buf, _ := ioutil.ReadFile("sample.jpg")
+
+ if filetype.IsImage(buf) {
+ fmt.Println("File is an image")
+ } else {
+ fmt.Println("Not an image")
+ }
+}
+```
+
+#### Supported type
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/h2non/filetype"
+)
+
+func main() {
+ // Check if file is supported by extension
+ if filetype.IsSupported("jpg") {
+ fmt.Println("Extension supported")
+ } else {
+ fmt.Println("Extension not supported")
+ }
+
+ // Check if file is supported by extension
+ if filetype.IsMIMESupported("image/jpeg") {
+ fmt.Println("MIME type supported")
+ } else {
+ fmt.Println("MIME type not supported")
+ }
+}
+```
+
+#### File header
+
+```go
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+
+ "github.com/h2non/filetype"
+)
+
+func main() {
+ // Open a file descriptor
+ file, _ := os.Open("movie.mp4")
+
+ // We only have to pass the file header = first 261 bytes
+ head := make([]byte, 261)
+ file.Read(head)
+
+ if filetype.IsImage(head) {
+ fmt.Println("File is an image")
+ } else {
+ fmt.Println("Not an image")
+ }
+}
+```
+
+#### Add additional file type matchers
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/h2non/filetype"
+)
+
+var fooType = filetype.NewType("foo", "foo/foo")
+
+func fooMatcher(buf []byte) bool {
+ return len(buf) > 1 && buf[0] == 0x01 && buf[1] == 0x02
+}
+
+func main() {
+ // Register the new matcher and its type
+ filetype.AddMatcher(fooType, fooMatcher)
+
+ // Check if the new type is supported by extension
+ if filetype.IsSupported("foo") {
+ fmt.Println("New supported type: foo")
+ }
+
+ // Check if the new type is supported by MIME
+ if filetype.IsMIMESupported("foo/foo") {
+ fmt.Println("New supported MIME type: foo/foo")
+ }
+
+ // Try to match the file
+ fooFile := []byte{0x01, 0x02}
+ kind, _ := filetype.Match(fooFile)
+ if kind == filetype.Unknown {
+ fmt.Println("Unknown file type")
+ } else {
+ fmt.Printf("File type matched: %s\n", kind.Extension)
+ }
+}
+```
+
+## Supported types
+
+#### Image
+
+- **jpg** - `image/jpeg`
+- **png** - `image/png`
+- **gif** - `image/gif`
+- **webp** - `image/webp`
+- **cr2** - `image/x-canon-cr2`
+- **tif** - `image/tiff`
+- **bmp** - `image/bmp`
+- **jxr** - `image/vnd.ms-photo`
+- **psd** - `image/vnd.adobe.photoshop`
+- **ico** - `image/x-icon`
+
+#### Video
+
+- **mp4** - `video/mp4`
+- **m4v** - `video/x-m4v`
+- **mkv** - `video/x-matroska`
+- **webm** - `video/webm`
+- **mov** - `video/quicktime`
+- **avi** - `video/x-msvideo`
+- **wmv** - `video/x-ms-wmv`
+- **mpg** - `video/mpeg`
+- **flv** - `video/x-flv`
+
+#### Audio
+
+- **mid** - `audio/midi`
+- **mp3** - `audio/mpeg`
+- **m4a** - `audio/m4a`
+- **ogg** - `audio/ogg`
+- **flac** - `audio/x-flac`
+- **wav** - `audio/x-wav`
+- **amr** - `audio/amr`
+- **aac** - `audio/aac`
+
+#### Archive
+
+- **epub** - `application/epub+zip`
+- **zip** - `application/zip`
+- **tar** - `application/x-tar`
+- **rar** - `application/x-rar-compressed`
+- **gz** - `application/gzip`
+- **bz2** - `application/x-bzip2`
+- **7z** - `application/x-7z-compressed`
+- **xz** - `application/x-xz`
+- **pdf** - `application/pdf`
+- **exe** - `application/x-msdownload`
+- **swf** - `application/x-shockwave-flash`
+- **rtf** - `application/rtf`
+- **eot** - `application/octet-stream`
+- **ps** - `application/postscript`
+- **sqlite** - `application/x-sqlite3`
+- **nes** - `application/x-nintendo-nes-rom`
+- **crx** - `application/x-google-chrome-extension`
+- **cab** - `application/vnd.ms-cab-compressed`
+- **deb** - `application/x-deb`
+- **ar** - `application/x-unix-archive`
+- **Z** - `application/x-compress`
+- **lz** - `application/x-lzip`
+- **rpm** - `application/x-rpm`
+- **elf** - `application/x-executable`
+
+#### Documents
+
+- **doc** - `application/msword`
+- **docx** - `application/vnd.openxmlformats-officedocument.wordprocessingml.document`
+- **xls** - `application/vnd.ms-excel`
+- **xlsx** - `application/vnd.openxmlformats-officedocument.spreadsheetml.sheet`
+- **ppt** - `application/vnd.ms-powerpoint`
+- **pptx** - `application/vnd.openxmlformats-officedocument.presentationml.presentation`
+
+#### Font
+
+- **woff** - `application/font-woff`
+- **woff2** - `application/font-woff`
+- **ttf** - `application/font-sfnt`
+- **otf** - `application/font-sfnt`
+
+## Benchmarks
+
+Measured using [real files](https://github.com/h2non/filetype/tree/master/fixtures).
+
+Environment: OSX x64 i7 2.7 Ghz
+
+```bash
+BenchmarkMatchTar-8 1000000 1083 ns/op
+BenchmarkMatchZip-8 1000000 1162 ns/op
+BenchmarkMatchJpeg-8 1000000 1280 ns/op
+BenchmarkMatchGif-8 1000000 1315 ns/op
+BenchmarkMatchPng-8 1000000 1121 ns/op
+```
+
+## License
+
+MIT - Tomas Aparicio
diff --git a/vendor/github.com/h2non/filetype/filetype.go b/vendor/github.com/h2non/filetype/filetype.go
new file mode 100644
index 000000000..933058c85
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/filetype.go
@@ -0,0 +1,87 @@
+package filetype
+
+import (
+ "errors"
+
+ "github.com/h2non/filetype/matchers"
+ "github.com/h2non/filetype/types"
+)
+
+// Types stores a map of supported types
+var Types = types.Types
+
+// NewType creates and registers a new type
+var NewType = types.NewType
+
+// Unknown represents an unknown file type
+var Unknown = types.Unknown
+
+// ErrEmptyBuffer represents an empty buffer error
+var ErrEmptyBuffer = errors.New("Empty buffer")
+
+// ErrUnknownBuffer represents a unknown buffer error
+var ErrUnknownBuffer = errors.New("Unknown buffer type")
+
+// AddType registers a new file type
+func AddType(ext, mime string) types.Type {
+ return types.NewType(ext, mime)
+}
+
+// Is checks if a given buffer matches with the given file type extension
+func Is(buf []byte, ext string) bool {
+ kind, ok := types.Types[ext]
+ if ok {
+ return IsType(buf, kind)
+ }
+ return false
+}
+
+// IsExtension semantic alias to Is()
+func IsExtension(buf []byte, ext string) bool {
+ return Is(buf, ext)
+}
+
+// IsType checks if a given buffer matches with the given file type
+func IsType(buf []byte, kind types.Type) bool {
+ matcher := matchers.Matchers[kind]
+ if matcher == nil {
+ return false
+ }
+ return matcher(buf) != types.Unknown
+}
+
+// IsMIME checks if a given buffer matches with the given MIME type
+func IsMIME(buf []byte, mime string) bool {
+ for _, kind := range types.Types {
+ if kind.MIME.Value == mime {
+ matcher := matchers.Matchers[kind]
+ return matcher(buf) != types.Unknown
+ }
+ }
+ return false
+}
+
+// IsSupported checks if a given file extension is supported
+func IsSupported(ext string) bool {
+ for name := range Types {
+ if name == ext {
+ return true
+ }
+ }
+ return false
+}
+
+// IsMIMESupported checks if a given MIME type is supported
+func IsMIMESupported(mime string) bool {
+ for _, m := range Types {
+ if m.MIME.Value == mime {
+ return true
+ }
+ }
+ return false
+}
+
+// GetType retrieves a Type by file extension
+func GetType(ext string) types.Type {
+ return types.Get(ext)
+}
diff --git a/vendor/github.com/h2non/filetype/go.mod b/vendor/github.com/h2non/filetype/go.mod
new file mode 100644
index 000000000..765d393f8
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/go.mod
@@ -0,0 +1 @@
+module github.com/h2non/filetype
diff --git a/vendor/github.com/h2non/filetype/kind.go b/vendor/github.com/h2non/filetype/kind.go
new file mode 100644
index 000000000..de8473507
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/kind.go
@@ -0,0 +1,80 @@
+package filetype
+
+import (
+ "github.com/h2non/filetype/matchers"
+ "github.com/h2non/filetype/types"
+)
+
+// Image tries to match a file as image type
+func Image(buf []byte) (types.Type, error) {
+ return doMatchMap(buf, matchers.Image)
+}
+
+// IsImage checks if the given buffer is an image type
+func IsImage(buf []byte) bool {
+ kind, _ := Image(buf)
+ return kind != types.Unknown
+}
+
+// Audio tries to match a file as audio type
+func Audio(buf []byte) (types.Type, error) {
+ return doMatchMap(buf, matchers.Audio)
+}
+
+// IsAudio checks if the given buffer is an audio type
+func IsAudio(buf []byte) bool {
+ kind, _ := Audio(buf)
+ return kind != types.Unknown
+}
+
+// Video tries to match a file as video type
+func Video(buf []byte) (types.Type, error) {
+ return doMatchMap(buf, matchers.Video)
+}
+
+// IsVideo checks if the given buffer is a video type
+func IsVideo(buf []byte) bool {
+ kind, _ := Video(buf)
+ return kind != types.Unknown
+}
+
+// Font tries to match a file as text font type
+func Font(buf []byte) (types.Type, error) {
+ return doMatchMap(buf, matchers.Font)
+}
+
+// IsFont checks if the given buffer is a font type
+func IsFont(buf []byte) bool {
+ kind, _ := Font(buf)
+ return kind != types.Unknown
+}
+
+// Archive tries to match a file as generic archive type
+func Archive(buf []byte) (types.Type, error) {
+ return doMatchMap(buf, matchers.Archive)
+}
+
+// IsArchive checks if the given buffer is an archive type
+func IsArchive(buf []byte) bool {
+ kind, _ := Archive(buf)
+ return kind != types.Unknown
+}
+
+// Document tries to match a file as document type
+func Document(buf []byte) (types.Type, error) {
+ return doMatchMap(buf, matchers.Document)
+}
+
+// IsDocument checks if the given buffer is an document type
+func IsDocument(buf []byte) bool {
+ kind, _ := Document(buf)
+ return kind != types.Unknown
+}
+
+func doMatchMap(buf []byte, machers matchers.Map) (types.Type, error) {
+ kind := MatchMap(buf, machers)
+ if kind != types.Unknown {
+ return kind, nil
+ }
+ return kind, ErrUnknownBuffer
+}
diff --git a/vendor/github.com/h2non/filetype/match.go b/vendor/github.com/h2non/filetype/match.go
new file mode 100644
index 000000000..82cf80468
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/match.go
@@ -0,0 +1,90 @@
+package filetype
+
+import (
+ "io"
+ "os"
+
+ "github.com/h2non/filetype/matchers"
+ "github.com/h2non/filetype/types"
+)
+
+// Matchers is an alias to matchers.Matchers
+var Matchers = matchers.Matchers
+
+// MatcherKeys is an alias to matchers.MatcherKeys
+var MatcherKeys = &matchers.MatcherKeys
+
+// NewMatcher is an alias to matchers.NewMatcher
+var NewMatcher = matchers.NewMatcher
+
+// Match infers the file type of a given buffer inspecting its magic numbers signature
+func Match(buf []byte) (types.Type, error) {
+ length := len(buf)
+ if length == 0 {
+ return types.Unknown, ErrEmptyBuffer
+ }
+
+ for _, kind := range *MatcherKeys {
+ checker := Matchers[kind]
+ match := checker(buf)
+ if match != types.Unknown && match.Extension != "" {
+ return match, nil
+ }
+ }
+
+ return types.Unknown, nil
+}
+
+// Get is an alias to Match()
+func Get(buf []byte) (types.Type, error) {
+ return Match(buf)
+}
+
+// MatchFile infers a file type for a file
+func MatchFile(filepath string) (types.Type, error) {
+ file, err := os.Open(filepath)
+ if err != nil {
+ return types.Unknown, err
+ }
+ defer file.Close()
+
+ return MatchReader(file)
+}
+
+// MatchReader is convenient wrapper to Match() any Reader
+func MatchReader(reader io.Reader) (types.Type, error) {
+ buffer := make([]byte, 8192) // 8K makes msooxml tests happy and allows for expanded custom file checks
+
+ _, err := reader.Read(buffer)
+ if err != nil && err != io.EOF {
+ return types.Unknown, err
+ }
+
+ return Match(buffer)
+}
+
+// AddMatcher registers a new matcher type
+func AddMatcher(fileType types.Type, matcher matchers.Matcher) matchers.TypeMatcher {
+ return matchers.NewMatcher(fileType, matcher)
+}
+
+// Matches checks if the given buffer matches with some supported file type
+func Matches(buf []byte) bool {
+ kind, _ := Match(buf)
+ return kind != types.Unknown
+}
+
+// MatchMap performs a file matching against a map of match functions
+func MatchMap(buf []byte, matchers matchers.Map) types.Type {
+ for kind, matcher := range matchers {
+ if matcher(buf) {
+ return kind
+ }
+ }
+ return types.Unknown
+}
+
+// MatchesMap is an alias to Matches() but using matching against a map of match functions
+func MatchesMap(buf []byte, matchers matchers.Map) bool {
+ return MatchMap(buf, matchers) != types.Unknown
+}
diff --git a/vendor/github.com/h2non/filetype/matchers/archive.go b/vendor/github.com/h2non/filetype/matchers/archive.go
new file mode 100644
index 000000000..9c1270ffb
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/matchers/archive.go
@@ -0,0 +1,217 @@
+package matchers
+
+var (
+ TypeEpub = newType("epub", "application/epub+zip")
+ TypeZip = newType("zip", "application/zip")
+ TypeTar = newType("tar", "application/x-tar")
+ TypeRar = newType("rar", "application/x-rar-compressed")
+ TypeGz = newType("gz", "application/gzip")
+ TypeBz2 = newType("bz2", "application/x-bzip2")
+ Type7z = newType("7z", "application/x-7z-compressed")
+ TypeXz = newType("xz", "application/x-xz")
+ TypePdf = newType("pdf", "application/pdf")
+ TypeExe = newType("exe", "application/x-msdownload")
+ TypeSwf = newType("swf", "application/x-shockwave-flash")
+ TypeRtf = newType("rtf", "application/rtf")
+ TypeEot = newType("eot", "application/octet-stream")
+ TypePs = newType("ps", "application/postscript")
+ TypeSqlite = newType("sqlite", "application/x-sqlite3")
+ TypeNes = newType("nes", "application/x-nintendo-nes-rom")
+ TypeCrx = newType("crx", "application/x-google-chrome-extension")
+ TypeCab = newType("cab", "application/vnd.ms-cab-compressed")
+ TypeDeb = newType("deb", "application/x-deb")
+ TypeAr = newType("ar", "application/x-unix-archive")
+ TypeZ = newType("Z", "application/x-compress")
+ TypeLz = newType("lz", "application/x-lzip")
+ TypeRpm = newType("rpm", "application/x-rpm")
+ TypeElf = newType("elf", "application/x-executable")
+)
+
+var Archive = Map{
+ TypeEpub: Epub,
+ TypeZip: Zip,
+ TypeTar: Tar,
+ TypeRar: Rar,
+ TypeGz: Gz,
+ TypeBz2: Bz2,
+ Type7z: SevenZ,
+ TypeXz: Xz,
+ TypePdf: Pdf,
+ TypeExe: Exe,
+ TypeSwf: Swf,
+ TypeRtf: Rtf,
+ TypeEot: Eot,
+ TypePs: Ps,
+ TypeSqlite: Sqlite,
+ TypeNes: Nes,
+ TypeCrx: Crx,
+ TypeCab: Cab,
+ TypeDeb: Deb,
+ TypeAr: Ar,
+ TypeZ: Z,
+ TypeLz: Lz,
+ TypeRpm: Rpm,
+ TypeElf: Elf,
+}
+
+func Epub(buf []byte) bool {
+ return len(buf) > 57 &&
+ buf[0] == 0x50 && buf[1] == 0x4B && buf[2] == 0x3 && buf[3] == 0x4 &&
+ buf[30] == 0x6D && buf[31] == 0x69 && buf[32] == 0x6D && buf[33] == 0x65 &&
+ buf[34] == 0x74 && buf[35] == 0x79 && buf[36] == 0x70 && buf[37] == 0x65 &&
+ buf[38] == 0x61 && buf[39] == 0x70 && buf[40] == 0x70 && buf[41] == 0x6C &&
+ buf[42] == 0x69 && buf[43] == 0x63 && buf[44] == 0x61 && buf[45] == 0x74 &&
+ buf[46] == 0x69 && buf[47] == 0x6F && buf[48] == 0x6E && buf[49] == 0x2F &&
+ buf[50] == 0x65 && buf[51] == 0x70 && buf[52] == 0x75 && buf[53] == 0x62 &&
+ buf[54] == 0x2B && buf[55] == 0x7A && buf[56] == 0x69 && buf[57] == 0x70
+}
+
+func Zip(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x50 && buf[1] == 0x4B &&
+ (buf[2] == 0x3 || buf[2] == 0x5 || buf[2] == 0x7) &&
+ (buf[3] == 0x4 || buf[3] == 0x6 || buf[3] == 0x8)
+}
+
+func Tar(buf []byte) bool {
+ return len(buf) > 261 &&
+ buf[257] == 0x75 && buf[258] == 0x73 &&
+ buf[259] == 0x74 && buf[260] == 0x61 &&
+ buf[261] == 0x72
+}
+
+func Rar(buf []byte) bool {
+ return len(buf) > 6 &&
+ buf[0] == 0x52 && buf[1] == 0x61 && buf[2] == 0x72 &&
+ buf[3] == 0x21 && buf[4] == 0x1A && buf[5] == 0x7 &&
+ (buf[6] == 0x0 || buf[6] == 0x1)
+}
+
+func Gz(buf []byte) bool {
+ return len(buf) > 2 &&
+ buf[0] == 0x1F && buf[1] == 0x8B && buf[2] == 0x8
+}
+
+func Bz2(buf []byte) bool {
+ return len(buf) > 2 &&
+ buf[0] == 0x42 && buf[1] == 0x5A && buf[2] == 0x68
+}
+
+func SevenZ(buf []byte) bool {
+ return len(buf) > 5 &&
+ buf[0] == 0x37 && buf[1] == 0x7A && buf[2] == 0xBC &&
+ buf[3] == 0xAF && buf[4] == 0x27 && buf[5] == 0x1C
+}
+
+func Pdf(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x25 && buf[1] == 0x50 &&
+ buf[2] == 0x44 && buf[3] == 0x46
+}
+
+func Exe(buf []byte) bool {
+ return len(buf) > 1 &&
+ buf[0] == 0x4D && buf[1] == 0x5A
+}
+
+func Swf(buf []byte) bool {
+ return len(buf) > 2 &&
+ (buf[0] == 0x43 || buf[0] == 0x46) &&
+ buf[1] == 0x57 && buf[2] == 0x53
+}
+
+func Rtf(buf []byte) bool {
+ return len(buf) > 4 &&
+ buf[0] == 0x7B && buf[1] == 0x5C &&
+ buf[2] == 0x72 && buf[3] == 0x74 &&
+ buf[4] == 0x66
+}
+
+func Nes(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x4E && buf[1] == 0x45 &&
+ buf[2] == 0x53 && buf[3] == 0x1A
+}
+
+func Crx(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x43 && buf[1] == 0x72 &&
+ buf[2] == 0x32 && buf[3] == 0x34
+}
+
+func Cab(buf []byte) bool {
+ return len(buf) > 3 &&
+ ((buf[0] == 0x4D && buf[1] == 0x53 && buf[2] == 0x43 && buf[3] == 0x46) ||
+ (buf[0] == 0x49 && buf[1] == 0x53 && buf[2] == 0x63 && buf[3] == 0x28))
+}
+
+func Eot(buf []byte) bool {
+ return len(buf) > 35 &&
+ buf[34] == 0x4C && buf[35] == 0x50 &&
+ ((buf[8] == 0x02 && buf[9] == 0x00 &&
+ buf[10] == 0x01) || (buf[8] == 0x01 &&
+ buf[9] == 0x00 && buf[10] == 0x00) ||
+ (buf[8] == 0x02 && buf[9] == 0x00 &&
+ buf[10] == 0x02))
+}
+
+func Ps(buf []byte) bool {
+ return len(buf) > 1 &&
+ buf[0] == 0x25 && buf[1] == 0x21
+}
+
+func Xz(buf []byte) bool {
+ return len(buf) > 5 &&
+ buf[0] == 0xFD && buf[1] == 0x37 &&
+ buf[2] == 0x7A && buf[3] == 0x58 &&
+ buf[4] == 0x5A && buf[5] == 0x00
+}
+
+func Sqlite(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x53 && buf[1] == 0x51 &&
+ buf[2] == 0x4C && buf[3] == 0x69
+}
+
+func Deb(buf []byte) bool {
+ return len(buf) > 20 &&
+ buf[0] == 0x21 && buf[1] == 0x3C && buf[2] == 0x61 &&
+ buf[3] == 0x72 && buf[4] == 0x63 && buf[5] == 0x68 &&
+ buf[6] == 0x3E && buf[7] == 0x0A && buf[8] == 0x64 &&
+ buf[9] == 0x65 && buf[10] == 0x62 && buf[11] == 0x69 &&
+ buf[12] == 0x61 && buf[13] == 0x6E && buf[14] == 0x2D &&
+ buf[15] == 0x62 && buf[16] == 0x69 && buf[17] == 0x6E &&
+ buf[18] == 0x61 && buf[19] == 0x72 && buf[20] == 0x79
+}
+
+func Ar(buf []byte) bool {
+ return len(buf) > 6 &&
+ buf[0] == 0x21 && buf[1] == 0x3C &&
+ buf[2] == 0x61 && buf[3] == 0x72 &&
+ buf[4] == 0x63 && buf[5] == 0x68 &&
+ buf[6] == 0x3E
+}
+
+func Z(buf []byte) bool {
+ return len(buf) > 1 &&
+ ((buf[0] == 0x1F && buf[1] == 0xA0) ||
+ (buf[0] == 0x1F && buf[1] == 0x9D))
+}
+
+func Lz(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x4C && buf[1] == 0x5A &&
+ buf[2] == 0x49 && buf[3] == 0x50
+}
+
+func Rpm(buf []byte) bool {
+ return len(buf) > 96 &&
+ buf[0] == 0xED && buf[1] == 0xAB &&
+ buf[2] == 0xEE && buf[3] == 0xDB
+}
+
+func Elf(buf []byte) bool {
+ return len(buf) > 52 &&
+ buf[0] == 0x7F && buf[1] == 0x45 &&
+ buf[2] == 0x4C && buf[3] == 0x46
+}
diff --git a/vendor/github.com/h2non/filetype/matchers/audio.go b/vendor/github.com/h2non/filetype/matchers/audio.go
new file mode 100644
index 000000000..6d532630a
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/matchers/audio.go
@@ -0,0 +1,75 @@
+package matchers
+
+var (
+ TypeMidi = newType("mid", "audio/midi")
+ TypeMp3 = newType("mp3", "audio/mpeg")
+ TypeM4a = newType("m4a", "audio/m4a")
+ TypeOgg = newType("ogg", "audio/ogg")
+ TypeFlac = newType("flac", "audio/x-flac")
+ TypeWav = newType("wav", "audio/x-wav")
+ TypeAmr = newType("amr", "audio/amr")
+ TypeAac = newType("aac", "audio/aac")
+)
+
+var Audio = Map{
+ TypeMidi: Midi,
+ TypeMp3: Mp3,
+ TypeM4a: M4a,
+ TypeOgg: Ogg,
+ TypeFlac: Flac,
+ TypeWav: Wav,
+ TypeAmr: Amr,
+ TypeAac: Aac,
+}
+
+func Midi(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x4D && buf[1] == 0x54 &&
+ buf[2] == 0x68 && buf[3] == 0x64
+}
+
+func Mp3(buf []byte) bool {
+ return len(buf) > 2 &&
+ ((buf[0] == 0x49 && buf[1] == 0x44 && buf[2] == 0x33) ||
+ (buf[0] == 0xFF && buf[1] == 0xfb))
+}
+
+func M4a(buf []byte) bool {
+ return len(buf) > 10 &&
+ ((buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 &&
+ buf[7] == 0x70 && buf[8] == 0x4D && buf[9] == 0x34 && buf[10] == 0x41) ||
+ (buf[0] == 0x4D && buf[1] == 0x34 && buf[2] == 0x41 && buf[3] == 0x20))
+}
+
+func Ogg(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x4F && buf[1] == 0x67 &&
+ buf[2] == 0x67 && buf[3] == 0x53
+}
+
+func Flac(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x66 && buf[1] == 0x4C &&
+ buf[2] == 0x61 && buf[3] == 0x43
+}
+
+func Wav(buf []byte) bool {
+ return len(buf) > 11 &&
+ buf[0] == 0x52 && buf[1] == 0x49 &&
+ buf[2] == 0x46 && buf[3] == 0x46 &&
+ buf[8] == 0x57 && buf[9] == 0x41 &&
+ buf[10] == 0x56 && buf[11] == 0x45
+}
+
+func Amr(buf []byte) bool {
+ return len(buf) > 11 &&
+ buf[0] == 0x23 && buf[1] == 0x21 &&
+ buf[2] == 0x41 && buf[3] == 0x4D &&
+ buf[4] == 0x52 && buf[5] == 0x0A
+}
+
+func Aac(buf []byte) bool {
+ return len(buf) > 1 &&
+ ((buf[0] == 0xFF && buf[1] == 0xF1) ||
+ (buf[0] == 0xFF && buf[1] == 0xF9))
+}
diff --git a/vendor/github.com/h2non/filetype/matchers/document.go b/vendor/github.com/h2non/filetype/matchers/document.go
new file mode 100644
index 000000000..7e1ef851d
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/matchers/document.go
@@ -0,0 +1,182 @@
+package matchers
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+var (
+ TypeDoc = newType("doc", "application/msword")
+ TypeDocx = newType("docx", "application/vnd.openxmlformats-officedocument.wordprocessingml.document")
+ TypeXls = newType("xls", "application/vnd.ms-excel")
+ TypeXlsx = newType("xlsx", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
+ TypePpt = newType("ppt", "application/vnd.ms-powerpoint")
+ TypePptx = newType("pptx", "application/vnd.openxmlformats-officedocument.presentationml.presentation")
+)
+
+var Document = Map{
+ TypeDoc: Doc,
+ TypeDocx: Docx,
+ TypeXls: Xls,
+ TypeXlsx: Xlsx,
+ TypePpt: Ppt,
+ TypePptx: Pptx,
+}
+
+type docType int
+
+const (
+ TYPE_DOC docType = iota
+ TYPE_DOCX
+ TYPE_XLS
+ TYPE_XLSX
+ TYPE_PPT
+ TYPE_PPTX
+ TYPE_OOXML
+)
+
+func Doc(buf []byte) bool {
+ return len(buf) > 7 &&
+ buf[0] == 0xD0 && buf[1] == 0xCF &&
+ buf[2] == 0x11 && buf[3] == 0xE0 &&
+ buf[4] == 0xA1 && buf[5] == 0xB1 &&
+ buf[6] == 0x1A && buf[7] == 0xE1
+}
+
+func Docx(buf []byte) bool {
+ typ, ok := msooxml(buf)
+ return ok && typ == TYPE_DOCX
+}
+
+func Xls(buf []byte) bool {
+ return len(buf) > 7 &&
+ buf[0] == 0xD0 && buf[1] == 0xCF &&
+ buf[2] == 0x11 && buf[3] == 0xE0 &&
+ buf[4] == 0xA1 && buf[5] == 0xB1 &&
+ buf[6] == 0x1A && buf[7] == 0xE1
+}
+
+func Xlsx(buf []byte) bool {
+ typ, ok := msooxml(buf)
+ return ok && typ == TYPE_XLSX
+}
+
+func Ppt(buf []byte) bool {
+ return len(buf) > 7 &&
+ buf[0] == 0xD0 && buf[1] == 0xCF &&
+ buf[2] == 0x11 && buf[3] == 0xE0 &&
+ buf[4] == 0xA1 && buf[5] == 0xB1 &&
+ buf[6] == 0x1A && buf[7] == 0xE1
+}
+
+func Pptx(buf []byte) bool {
+ typ, ok := msooxml(buf)
+ return ok && typ == TYPE_PPTX
+}
+
+func msooxml(buf []byte) (typ docType, found bool) {
+ signature := []byte{'P', 'K', 0x03, 0x04}
+
+ // start by checking for ZIP local file header signature
+ if ok := compareBytes(buf, signature, 0); !ok {
+ return
+ }
+
+ // make sure the first file is correct
+ if v, ok := checkMSOoml(buf, 0x1E); ok {
+ return v, ok
+ }
+
+ if !compareBytes(buf, []byte("[Content_Types].xml"), 0x1E) && !compareBytes(buf, []byte("_rels/.rels"), 0x1E) {
+ return
+ }
+
+ // skip to the second local file header
+ // since some documents include a 520-byte extra field following the file
+ // header, we need to scan for the next header
+ startOffset := int(binary.LittleEndian.Uint32(buf[18:22]) + 49)
+ idx := search(buf, startOffset, 6000)
+ if idx == -1 {
+ return
+ }
+
+ // now skip to the *third* local file header; again, we need to scan due to a
+ // 520-byte extra field following the file header
+ startOffset += idx + 4 + 26
+ idx = search(buf, startOffset, 6000)
+ if idx == -1 {
+ return
+ }
+
+ // and check the subdirectory name to determine which type of OOXML
+ // file we have. Correct the mimetype with the registered ones:
+ // http://technet.microsoft.com/en-us/library/cc179224.aspx
+ startOffset += idx + 4 + 26
+ if typ, ok := checkMSOoml(buf, startOffset); ok {
+ return typ, ok
+ }
+
+ // OpenOffice/Libreoffice orders ZIP entry differently, so check the 4th file
+ startOffset += 26
+ idx = search(buf, startOffset, 6000)
+ if idx == -1 {
+ return TYPE_OOXML, true
+ }
+
+ startOffset += idx + 4 + 26
+ if typ, ok := checkMSOoml(buf, startOffset); ok {
+ return typ, ok
+ } else {
+ return TYPE_OOXML, true
+ }
+}
+
+func compareBytes(slice, subSlice []byte, startOffset int) bool {
+ sl := len(subSlice)
+
+ if startOffset+sl > len(slice) {
+ return false
+ }
+
+ s := slice[startOffset : startOffset+sl]
+ for i := range s {
+ if subSlice[i] != s[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func checkMSOoml(buf []byte, offset int) (typ docType, ok bool) {
+ ok = true
+
+ switch {
+ case compareBytes(buf, []byte("word/"), offset):
+ typ = TYPE_DOCX
+ case compareBytes(buf, []byte("ppt/"), offset):
+ typ = TYPE_PPTX
+ case compareBytes(buf, []byte("xl/"), offset):
+ typ = TYPE_XLSX
+ default:
+ ok = false
+ }
+
+ return
+}
+
+func search(buf []byte, start, rangeNum int) int {
+ length := len(buf)
+ end := start + rangeNum
+ signature := []byte{'P', 'K', 0x03, 0x04}
+
+ if end > length {
+ end = length
+ }
+
+ if start >= end {
+ return -1
+ }
+
+ return bytes.Index(buf[start:end], signature)
+}
diff --git a/vendor/github.com/h2non/filetype/matchers/font.go b/vendor/github.com/h2non/filetype/matchers/font.go
new file mode 100644
index 000000000..f39171675
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/matchers/font.go
@@ -0,0 +1,45 @@
+package matchers
+
+var (
+ TypeWoff = newType("woff", "application/font-woff")
+ TypeWoff2 = newType("woff2", "application/font-woff")
+ TypeTtf = newType("ttf", "application/font-sfnt")
+ TypeOtf = newType("otf", "application/font-sfnt")
+)
+
+var Font = Map{
+ TypeWoff: Woff,
+ TypeWoff2: Woff2,
+ TypeTtf: Ttf,
+ TypeOtf: Otf,
+}
+
+func Woff(buf []byte) bool {
+ return len(buf) > 7 &&
+ buf[0] == 0x77 && buf[1] == 0x4F &&
+ buf[2] == 0x46 && buf[3] == 0x46 &&
+ buf[4] == 0x00 && buf[5] == 0x01 &&
+ buf[6] == 0x00 && buf[7] == 0x00
+}
+
+func Woff2(buf []byte) bool {
+ return len(buf) > 7 &&
+ buf[0] == 0x77 && buf[1] == 0x4F &&
+ buf[2] == 0x46 && buf[3] == 0x32 &&
+ buf[4] == 0x00 && buf[5] == 0x01 &&
+ buf[6] == 0x00 && buf[7] == 0x00
+}
+
+func Ttf(buf []byte) bool {
+ return len(buf) > 4 &&
+ buf[0] == 0x00 && buf[1] == 0x01 &&
+ buf[2] == 0x00 && buf[3] == 0x00 &&
+ buf[4] == 0x00
+}
+
+func Otf(buf []byte) bool {
+ return len(buf) > 4 &&
+ buf[0] == 0x4F && buf[1] == 0x54 &&
+ buf[2] == 0x54 && buf[3] == 0x4F &&
+ buf[4] == 0x00
+}
diff --git a/vendor/github.com/h2non/filetype/matchers/image.go b/vendor/github.com/h2non/filetype/matchers/image.go
new file mode 100644
index 000000000..edb7fbc70
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/matchers/image.go
@@ -0,0 +1,108 @@
+package matchers
+
+var (
+ TypeJpeg = newType("jpg", "image/jpeg")
+ TypeJpeg2000 = newType("jp2", "image/jp2")
+ TypePng = newType("png", "image/png")
+ TypeGif = newType("gif", "image/gif")
+ TypeWebp = newType("webp", "image/webp")
+ TypeCR2 = newType("cr2", "image/x-canon-cr2")
+ TypeTiff = newType("tif", "image/tiff")
+ TypeBmp = newType("bmp", "image/bmp")
+ TypeJxr = newType("jxr", "image/vnd.ms-photo")
+ TypePsd = newType("psd", "image/vnd.adobe.photoshop")
+ TypeIco = newType("ico", "image/x-icon")
+)
+
+var Image = Map{
+ TypeJpeg: Jpeg,
+ TypeJpeg2000: Jpeg2000,
+ TypePng: Png,
+ TypeGif: Gif,
+ TypeWebp: Webp,
+ TypeCR2: CR2,
+ TypeTiff: Tiff,
+ TypeBmp: Bmp,
+ TypeJxr: Jxr,
+ TypePsd: Psd,
+ TypeIco: Ico,
+}
+
+func Jpeg(buf []byte) bool {
+ return len(buf) > 2 &&
+ buf[0] == 0xFF &&
+ buf[1] == 0xD8 &&
+ buf[2] == 0xFF
+}
+
+func Jpeg2000(buf []byte) bool {
+ return len(buf) > 12 &&
+ buf[0] == 0x0 &&
+ buf[1] == 0x0 &&
+ buf[2] == 0x0 &&
+ buf[3] == 0xC &&
+ buf[4] == 0x6A &&
+ buf[5] == 0x50 &&
+ buf[6] == 0x20 &&
+ buf[7] == 0x20 &&
+ buf[8] == 0xD &&
+ buf[9] == 0xA &&
+ buf[10] == 0x87 &&
+ buf[11] == 0xA &&
+ buf[12] == 0x0
+}
+
+func Png(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x89 && buf[1] == 0x50 &&
+ buf[2] == 0x4E && buf[3] == 0x47
+}
+
+func Gif(buf []byte) bool {
+ return len(buf) > 2 &&
+ buf[0] == 0x47 && buf[1] == 0x49 && buf[2] == 0x46
+}
+
+func Webp(buf []byte) bool {
+ return len(buf) > 11 &&
+ buf[8] == 0x57 && buf[9] == 0x45 &&
+ buf[10] == 0x42 && buf[11] == 0x50
+}
+
+func CR2(buf []byte) bool {
+ return len(buf) > 9 &&
+ ((buf[0] == 0x49 && buf[1] == 0x49 && buf[2] == 0x2A && buf[3] == 0x0) ||
+ (buf[0] == 0x4D && buf[1] == 0x4D && buf[2] == 0x0 && buf[3] == 0x2A)) &&
+ buf[8] == 0x43 && buf[9] == 0x52
+}
+
+func Tiff(buf []byte) bool {
+ return len(buf) > 3 &&
+ ((buf[0] == 0x49 && buf[1] == 0x49 && buf[2] == 0x2A && buf[3] == 0x0) ||
+ (buf[0] == 0x4D && buf[1] == 0x4D && buf[2] == 0x0 && buf[3] == 0x2A))
+}
+
+func Bmp(buf []byte) bool {
+ return len(buf) > 1 &&
+ buf[0] == 0x42 &&
+ buf[1] == 0x4D
+}
+
+func Jxr(buf []byte) bool {
+ return len(buf) > 2 &&
+ buf[0] == 0x49 &&
+ buf[1] == 0x49 &&
+ buf[2] == 0xBC
+}
+
+func Psd(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x38 && buf[1] == 0x42 &&
+ buf[2] == 0x50 && buf[3] == 0x53
+}
+
+func Ico(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x00 && buf[1] == 0x00 &&
+ buf[2] == 0x01 && buf[3] == 0x00
+}
diff --git a/vendor/github.com/h2non/filetype/matchers/matchers.go b/vendor/github.com/h2non/filetype/matchers/matchers.go
new file mode 100644
index 000000000..e9462df8e
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/matchers/matchers.go
@@ -0,0 +1,51 @@
+package matchers
+
+import (
+ "github.com/h2non/filetype/types"
+)
+
+// Internal shortcut to NewType
+var newType = types.NewType
+
+// Matcher function interface as type alias
+type Matcher func([]byte) bool
+
+// Type interface to store pairs of type with its matcher function
+type Map map[types.Type]Matcher
+
+// Type specific matcher function interface
+type TypeMatcher func([]byte) types.Type
+
+// Store registered file type matchers
+var Matchers = make(map[types.Type]TypeMatcher)
+var MatcherKeys []types.Type
+
+// Create and register a new type matcher function
+func NewMatcher(kind types.Type, fn Matcher) TypeMatcher {
+ matcher := func(buf []byte) types.Type {
+ if fn(buf) {
+ return kind
+ }
+ return types.Unknown
+ }
+
+ Matchers[kind] = matcher
+ // prepend here so any user defined matchers get added first
+ MatcherKeys = append([]types.Type{kind}, MatcherKeys...)
+ return matcher
+}
+
+func register(matchers ...Map) {
+ MatcherKeys = MatcherKeys[:0]
+ for _, m := range matchers {
+ for kind, matcher := range m {
+ NewMatcher(kind, matcher)
+ }
+ }
+}
+
+func init() {
+ // Arguments order is intentional
+ // Archive files will be checked last due to prepend above in func NewMatcher
+ register(Archive, Document, Font, Audio, Video, Image)
+}
diff --git a/vendor/github.com/h2non/filetype/matchers/video.go b/vendor/github.com/h2non/filetype/matchers/video.go
new file mode 100644
index 000000000..2c7803810
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/matchers/video.go
@@ -0,0 +1,129 @@
+package matchers
+
+var (
+ TypeMp4 = newType("mp4", "video/mp4")
+ TypeM4v = newType("m4v", "video/x-m4v")
+ TypeMkv = newType("mkv", "video/x-matroska")
+ TypeWebm = newType("webm", "video/webm")
+ TypeMov = newType("mov", "video/quicktime")
+ TypeAvi = newType("avi", "video/x-msvideo")
+ TypeWmv = newType("wmv", "video/x-ms-wmv")
+ TypeMpeg = newType("mpg", "video/mpeg")
+ TypeFlv = newType("flv", "video/x-flv")
+)
+
+var Video = Map{
+ TypeMp4: Mp4,
+ TypeM4v: M4v,
+ TypeMkv: Mkv,
+ TypeWebm: Webm,
+ TypeMov: Mov,
+ TypeAvi: Avi,
+ TypeWmv: Wmv,
+ TypeMpeg: Mpeg,
+ TypeFlv: Flv,
+}
+
+func M4v(buf []byte) bool {
+ return len(buf) > 10 &&
+ buf[4] == 0x66 && buf[5] == 0x74 &&
+ buf[6] == 0x79 && buf[7] == 0x70 &&
+ buf[8] == 0x4D && buf[9] == 0x34 &&
+ buf[10] == 0x56
+}
+
+func Mkv(buf []byte) bool {
+ return (len(buf) > 15 &&
+ buf[0] == 0x1A && buf[1] == 0x45 &&
+ buf[2] == 0xDF && buf[3] == 0xA3 &&
+ buf[4] == 0x93 && buf[5] == 0x42 &&
+ buf[6] == 0x82 && buf[7] == 0x88 &&
+ buf[8] == 0x6D && buf[9] == 0x61 &&
+ buf[10] == 0x74 && buf[11] == 0x72 &&
+ buf[12] == 0x6F && buf[13] == 0x73 &&
+ buf[14] == 0x6B && buf[15] == 0x61) ||
+ (len(buf) > 38 &&
+ buf[31] == 0x6D && buf[32] == 0x61 &&
+ buf[33] == 0x74 && buf[34] == 0x72 &&
+ buf[35] == 0x6f && buf[36] == 0x73 &&
+ buf[37] == 0x6B && buf[38] == 0x61)
+}
+
+func Webm(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x1A && buf[1] == 0x45 &&
+ buf[2] == 0xDF && buf[3] == 0xA3
+}
+
+func Mov(buf []byte) bool {
+ return len(buf) > 15 && ((buf[0] == 0x0 && buf[1] == 0x0 &&
+ buf[2] == 0x0 && buf[3] == 0x14 &&
+ buf[4] == 0x66 && buf[5] == 0x74 &&
+ buf[6] == 0x79 && buf[7] == 0x70) ||
+ (buf[4] == 0x6d && buf[5] == 0x6f && buf[6] == 0x6f && buf[7] == 0x76) ||
+ (buf[4] == 0x6d && buf[5] == 0x64 && buf[6] == 0x61 && buf[7] == 0x74) ||
+ (buf[12] == 0x6d && buf[13] == 0x64 && buf[14] == 0x61 && buf[15] == 0x74))
+}
+
+func Avi(buf []byte) bool {
+ return len(buf) > 10 &&
+ buf[0] == 0x52 && buf[1] == 0x49 &&
+ buf[2] == 0x46 && buf[3] == 0x46 &&
+ buf[8] == 0x41 && buf[9] == 0x56 &&
+ buf[10] == 0x49
+}
+
+func Wmv(buf []byte) bool {
+ return len(buf) > 9 &&
+ buf[0] == 0x30 && buf[1] == 0x26 &&
+ buf[2] == 0xB2 && buf[3] == 0x75 &&
+ buf[4] == 0x8E && buf[5] == 0x66 &&
+ buf[6] == 0xCF && buf[7] == 0x11 &&
+ buf[8] == 0xA6 && buf[9] == 0xD9
+}
+
+func Mpeg(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x0 && buf[1] == 0x0 &&
+ buf[2] == 0x1 && buf[3] >= 0xb0 &&
+ buf[3] <= 0xbf
+}
+
+func Flv(buf []byte) bool {
+ return len(buf) > 3 &&
+ buf[0] == 0x46 && buf[1] == 0x4C &&
+ buf[2] == 0x56 && buf[3] == 0x01
+}
+
+func Mp4(buf []byte) bool {
+ return len(buf) > 11 &&
+ (buf[4] == 'f' && buf[5] == 't' && buf[6] == 'y' && buf[7] == 'p') &&
+ ((buf[8] == 'a' && buf[9] == 'v' && buf[10] == 'c' && buf[11] == '1') ||
+ (buf[8] == 'd' && buf[9] == 'a' && buf[10] == 's' && buf[11] == 'h') ||
+ (buf[8] == 'i' && buf[9] == 's' && buf[10] == 'o' && buf[11] == '2') ||
+ (buf[8] == 'i' && buf[9] == 's' && buf[10] == 'o' && buf[11] == '3') ||
+ (buf[8] == 'i' && buf[9] == 's' && buf[10] == 'o' && buf[11] == '4') ||
+ (buf[8] == 'i' && buf[9] == 's' && buf[10] == 'o' && buf[11] == '5') ||
+ (buf[8] == 'i' && buf[9] == 's' && buf[10] == 'o' && buf[11] == '6') ||
+ (buf[8] == 'i' && buf[9] == 's' && buf[10] == 'o' && buf[11] == 'm') ||
+ (buf[8] == 'm' && buf[9] == 'm' && buf[10] == 'p' && buf[11] == '4') ||
+ (buf[8] == 'm' && buf[9] == 'p' && buf[10] == '4' && buf[11] == '1') ||
+ (buf[8] == 'm' && buf[9] == 'p' && buf[10] == '4' && buf[11] == '2') ||
+ (buf[8] == 'm' && buf[9] == 'p' && buf[10] == '4' && buf[11] == 'v') ||
+ (buf[8] == 'm' && buf[9] == 'p' && buf[10] == '7' && buf[11] == '1') ||
+ (buf[8] == 'M' && buf[9] == 'S' && buf[10] == 'N' && buf[11] == 'V') ||
+ (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'A' && buf[11] == 'S') ||
+ (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'S' && buf[11] == 'C') ||
+ (buf[8] == 'N' && buf[9] == 'S' && buf[10] == 'D' && buf[11] == 'C') ||
+ (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'S' && buf[11] == 'H') ||
+ (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'S' && buf[11] == 'M') ||
+ (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'S' && buf[11] == 'P') ||
+ (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'S' && buf[11] == 'S') ||
+ (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'X' && buf[11] == 'C') ||
+ (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'X' && buf[11] == 'H') ||
+ (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'X' && buf[11] == 'M') ||
+ (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'X' && buf[11] == 'P') ||
+ (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'X' && buf[11] == 'S') ||
+ (buf[8] == 'F' && buf[9] == '4' && buf[10] == 'V' && buf[11] == ' ') ||
+ (buf[8] == 'F' && buf[9] == '4' && buf[10] == 'P' && buf[11] == ' '))
+}
diff --git a/vendor/github.com/h2non/filetype/types/defaults.go b/vendor/github.com/h2non/filetype/types/defaults.go
new file mode 100644
index 000000000..0d985a05d
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/types/defaults.go
@@ -0,0 +1,4 @@
+package types
+
+// Unknown default type
+var Unknown = NewType("unknown", "")
diff --git a/vendor/github.com/h2non/filetype/types/mime.go b/vendor/github.com/h2non/filetype/types/mime.go
new file mode 100644
index 000000000..fe8ea822e
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/types/mime.go
@@ -0,0 +1,14 @@
+package types
+
+// MIME stores the file MIME type values
+type MIME struct {
+ Type string
+ Subtype string
+ Value string
+}
+
+// Creates a new MIME type
+func NewMIME(mime string) MIME {
+ kind, subtype := splitMime(mime)
+ return MIME{Type: kind, Subtype: subtype, Value: mime}
+}
diff --git a/vendor/github.com/h2non/filetype/types/split.go b/vendor/github.com/h2non/filetype/types/split.go
new file mode 100644
index 000000000..68a5a8b3b
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/types/split.go
@@ -0,0 +1,11 @@
+package types
+
+import "strings"
+
+func splitMime(s string) (string, string) {
+ x := strings.Split(s, "/")
+ if len(x) > 1 {
+ return x[0], x[1]
+ }
+ return x[0], ""
+}
diff --git a/vendor/github.com/h2non/filetype/types/type.go b/vendor/github.com/h2non/filetype/types/type.go
new file mode 100644
index 000000000..5cf7dfc4b
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/types/type.go
@@ -0,0 +1,16 @@
+package types
+
+// Type represents a file MIME type and its extension
+type Type struct {
+ MIME MIME
+ Extension string
+}
+
+// NewType creates a new Type
+func NewType(ext, mime string) Type {
+ t := Type{
+ MIME: NewMIME(mime),
+ Extension: ext,
+ }
+ return Add(t)
+}
diff --git a/vendor/github.com/h2non/filetype/types/types.go b/vendor/github.com/h2non/filetype/types/types.go
new file mode 100644
index 000000000..27d433eec
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/types/types.go
@@ -0,0 +1,18 @@
+package types
+
+var Types = make(map[string]Type)
+
+// Add registers a new type in the package
+func Add(t Type) Type {
+ Types[t.Extension] = t
+ return t
+}
+
+// Get retrieves a Type by extension
+func Get(ext string) Type {
+ kind := Types[ext]
+ if kind.Extension != "" {
+ return kind
+ }
+ return Unknown
+}
diff --git a/vendor/github.com/h2non/filetype/version.go b/vendor/github.com/h2non/filetype/version.go
new file mode 100644
index 000000000..20a4b26e3
--- /dev/null
+++ b/vendor/github.com/h2non/filetype/version.go
@@ -0,0 +1,4 @@
+package filetype
+
+// Version exposes the current package version.
+const Version = "1.0.6"
diff --git a/vendor/github.com/hashicorp/golang-lru/.gitignore b/vendor/github.com/hashicorp/golang-lru/.gitignore
new file mode 100644
index 000000000..836562412
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go
new file mode 100644
index 000000000..e474cd075
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/2q.go
@@ -0,0 +1,223 @@
+package lru
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+const (
+ // Default2QRecentRatio is the ratio of the 2Q cache dedicated
+ // to recently added entries that have only been accessed once.
+ Default2QRecentRatio = 0.25
+
+ // Default2QGhostEntries is the default ratio of ghost
+ // entries kept to track entries recently evicted
+ Default2QGhostEntries = 0.50
+)
+
+// TwoQueueCache is a thread-safe fixed size 2Q cache.
+// 2Q is an enhancement over the standard LRU cache
+// in that it tracks both frequently and recently used
+// entries separately. This avoids a burst in access to new
+// entries from evicting frequently used entries. It adds some
+// additional tracking overhead to the standard LRU cache, and is
+// computationally about 2x the cost, and adds some metadata over
+// head. The ARCCache is similar, but does not require setting any
+// parameters.
+type TwoQueueCache struct {
+ size int
+ recentSize int
+
+ recent simplelru.LRUCache
+ frequent simplelru.LRUCache
+ recentEvict simplelru.LRUCache
+ lock sync.RWMutex
+}
+
+// New2Q creates a new TwoQueueCache using the default
+// values for the parameters.
+func New2Q(size int) (*TwoQueueCache, error) {
+ return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
+}
+
+// New2QParams creates a new TwoQueueCache using the provided
+// parameter values.
+func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
+ if size <= 0 {
+ return nil, fmt.Errorf("invalid size")
+ }
+ if recentRatio < 0.0 || recentRatio > 1.0 {
+ return nil, fmt.Errorf("invalid recent ratio")
+ }
+ if ghostRatio < 0.0 || ghostRatio > 1.0 {
+ return nil, fmt.Errorf("invalid ghost ratio")
+ }
+
+ // Determine the sub-sizes
+ recentSize := int(float64(size) * recentRatio)
+ evictSize := int(float64(size) * ghostRatio)
+
+ // Allocate the LRUs
+ recent, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ frequent, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ recentEvict, err := simplelru.NewLRU(evictSize, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize the cache
+ c := &TwoQueueCache{
+ size: size,
+ recentSize: recentSize,
+ recent: recent,
+ frequent: frequent,
+ recentEvict: recentEvict,
+ }
+ return c, nil
+}
+
+// Get looks up a key's value from the cache.
+func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if this is a frequent value
+ if val, ok := c.frequent.Get(key); ok {
+ return val, ok
+ }
+
+ // If the value is contained in recent, then we
+ // promote it to frequent
+ if val, ok := c.recent.Peek(key); ok {
+ c.recent.Remove(key)
+ c.frequent.Add(key, val)
+ return val, ok
+ }
+
+ // No hit
+ return nil, false
+}
+
+// Add adds a value to the cache.
+func (c *TwoQueueCache) Add(key, value interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if the value is frequently used already,
+ // and just update the value
+ if c.frequent.Contains(key) {
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // Check if the value is recently used, and promote
+ // the value into the frequent list
+ if c.recent.Contains(key) {
+ c.recent.Remove(key)
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // If the value was recently evicted, add it to the
+ // frequently used list
+ if c.recentEvict.Contains(key) {
+ c.ensureSpace(true)
+ c.recentEvict.Remove(key)
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // Add to the recently seen list
+ c.ensureSpace(false)
+ c.recent.Add(key, value)
+ return
+}
+
+// ensureSpace is used to ensure we have space in the cache
+func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
+ // If we have space, nothing to do
+ recentLen := c.recent.Len()
+ freqLen := c.frequent.Len()
+ if recentLen+freqLen < c.size {
+ return
+ }
+
+ // If the recent buffer is larger than
+ // the target, evict from there
+ if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
+ k, _, _ := c.recent.RemoveOldest()
+ c.recentEvict.Add(k, nil)
+ return
+ }
+
+ // Remove from the frequent list otherwise
+ c.frequent.RemoveOldest()
+}
+
+// Len returns the number of items in the cache.
+func (c *TwoQueueCache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.recent.Len() + c.frequent.Len()
+}
+
+// Keys returns a slice of the keys in the cache.
+// The frequently used keys are first in the returned slice.
+func (c *TwoQueueCache) Keys() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ k1 := c.frequent.Keys()
+ k2 := c.recent.Keys()
+ return append(k1, k2...)
+}
+
+// Remove removes the provided key from the cache.
+func (c *TwoQueueCache) Remove(key interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.frequent.Remove(key) {
+ return
+ }
+ if c.recent.Remove(key) {
+ return
+ }
+ if c.recentEvict.Remove(key) {
+ return
+ }
+}
+
+// Purge is used to completely clear the cache.
+func (c *TwoQueueCache) Purge() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.recent.Purge()
+ c.frequent.Purge()
+ c.recentEvict.Purge()
+}
+
+// Contains is used to check if the cache contains a key
+// without updating recency or frequency.
+func (c *TwoQueueCache) Contains(key interface{}) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.frequent.Contains(key) || c.recent.Contains(key)
+}
+
+// Peek is used to inspect the cache value of a key
+// without updating recency or frequency.
+func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if val, ok := c.frequent.Peek(key); ok {
+ return val, ok
+ }
+ return c.recent.Peek(key)
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE
new file mode 100644
index 000000000..be2cc4dfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md
new file mode 100644
index 000000000..33e58cfaf
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/README.md
@@ -0,0 +1,25 @@
+golang-lru
+==========
+
+This provides the `lru` package which implements a fixed-size
+thread safe LRU cache. It is based on the cache in Groupcache.
+
+Documentation
+=============
+
+Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
+
+Example
+=======
+
+Using the LRU is very simple:
+
+```go
+l, _ := New(128)
+for i := 0; i < 256; i++ {
+ l.Add(i, nil)
+}
+if l.Len() != 128 {
+ panic(fmt.Sprintf("bad len: %v", l.Len()))
+}
+```
diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go
new file mode 100644
index 000000000..555225a21
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/arc.go
@@ -0,0 +1,257 @@
+package lru
+
+import (
+ "sync"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
+// ARC is an enhancement over the standard LRU cache in that tracks both
+// frequency and recency of use. This avoids a burst in access to new
+// entries from evicting the frequently used older entries. It adds some
+// additional tracking overhead to a standard LRU cache, computationally
+// it is roughly 2x the cost, and the extra memory overhead is linear
+// with the size of the cache. ARC has been patented by IBM, but is
+// similar to the TwoQueueCache (2Q) which requires setting parameters.
+type ARCCache struct {
+ size int // Size is the total capacity of the cache
+ p int // P is the dynamic preference towards T1 or T2
+
+ t1 simplelru.LRUCache // T1 is the LRU for recently accessed items
+ b1 simplelru.LRUCache // B1 is the LRU for evictions from t1
+
+ t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items
+ b2 simplelru.LRUCache // B2 is the LRU for evictions from t2
+
+ lock sync.RWMutex
+}
+
+// NewARC creates an ARC of the given size
+func NewARC(size int) (*ARCCache, error) {
+ // Create the sub LRUs
+ b1, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ t1, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ t2, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize the ARC
+ c := &ARCCache{
+ size: size,
+ p: 0,
+ t1: t1,
+ b1: b1,
+ t2: t2,
+ b2: b2,
+ }
+ return c, nil
+}
+
+// Get looks up a key's value from the cache.
+func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // If the value is contained in T1 (recent), then
+ // promote it to T2 (frequent)
+ if val, ok := c.t1.Peek(key); ok {
+ c.t1.Remove(key)
+ c.t2.Add(key, val)
+ return val, ok
+ }
+
+ // Check if the value is contained in T2 (frequent)
+ if val, ok := c.t2.Get(key); ok {
+ return val, ok
+ }
+
+ // No hit
+ return nil, false
+}
+
+// Add adds a value to the cache.
+func (c *ARCCache) Add(key, value interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if the value is contained in T1 (recent), and potentially
+ // promote it to frequent T2
+ if c.t1.Contains(key) {
+ c.t1.Remove(key)
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Check if the value is already in T2 (frequent) and update it
+ if c.t2.Contains(key) {
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Check if this value was recently evicted as part of the
+ // recently used list
+ if c.b1.Contains(key) {
+ // T1 set is too small, increase P appropriately
+ delta := 1
+ b1Len := c.b1.Len()
+ b2Len := c.b2.Len()
+ if b2Len > b1Len {
+ delta = b2Len / b1Len
+ }
+ if c.p+delta >= c.size {
+ c.p = c.size
+ } else {
+ c.p += delta
+ }
+
+ // Potentially need to make room in the cache
+ if c.t1.Len()+c.t2.Len() >= c.size {
+ c.replace(false)
+ }
+
+ // Remove from B1
+ c.b1.Remove(key)
+
+ // Add the key to the frequently used list
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Check if this value was recently evicted as part of the
+ // frequently used list
+ if c.b2.Contains(key) {
+ // T2 set is too small, decrease P appropriately
+ delta := 1
+ b1Len := c.b1.Len()
+ b2Len := c.b2.Len()
+ if b1Len > b2Len {
+ delta = b1Len / b2Len
+ }
+ if delta >= c.p {
+ c.p = 0
+ } else {
+ c.p -= delta
+ }
+
+ // Potentially need to make room in the cache
+ if c.t1.Len()+c.t2.Len() >= c.size {
+ c.replace(true)
+ }
+
+ // Remove from B2
+ c.b2.Remove(key)
+
+ // Add the key to the frequently used list
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Potentially need to make room in the cache
+ if c.t1.Len()+c.t2.Len() >= c.size {
+ c.replace(false)
+ }
+
+ // Keep the size of the ghost buffers trim
+ if c.b1.Len() > c.size-c.p {
+ c.b1.RemoveOldest()
+ }
+ if c.b2.Len() > c.p {
+ c.b2.RemoveOldest()
+ }
+
+ // Add to the recently seen list
+ c.t1.Add(key, value)
+ return
+}
+
+// replace is used to adaptively evict from either T1 or T2
+// based on the current learned value of P
+func (c *ARCCache) replace(b2ContainsKey bool) {
+ t1Len := c.t1.Len()
+ if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
+ k, _, ok := c.t1.RemoveOldest()
+ if ok {
+ c.b1.Add(k, nil)
+ }
+ } else {
+ k, _, ok := c.t2.RemoveOldest()
+ if ok {
+ c.b2.Add(k, nil)
+ }
+ }
+}
+
+// Len returns the number of cached entries
+func (c *ARCCache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.t1.Len() + c.t2.Len()
+}
+
+// Keys returns all the cached keys
+func (c *ARCCache) Keys() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ k1 := c.t1.Keys()
+ k2 := c.t2.Keys()
+ return append(k1, k2...)
+}
+
+// Remove is used to purge a key from the cache
+func (c *ARCCache) Remove(key interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.t1.Remove(key) {
+ return
+ }
+ if c.t2.Remove(key) {
+ return
+ }
+ if c.b1.Remove(key) {
+ return
+ }
+ if c.b2.Remove(key) {
+ return
+ }
+}
+
+// Purge is used to clear the cache
+func (c *ARCCache) Purge() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.t1.Purge()
+ c.t2.Purge()
+ c.b1.Purge()
+ c.b2.Purge()
+}
+
+// Contains is used to check if the cache contains a key
+// without updating recency or frequency.
+func (c *ARCCache) Contains(key interface{}) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.t1.Contains(key) || c.t2.Contains(key)
+}
+
+// Peek is used to inspect the cache value of a key
+// without updating recency or frequency.
+func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if val, ok := c.t1.Peek(key); ok {
+ return val, ok
+ }
+ return c.t2.Peek(key)
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go
new file mode 100644
index 000000000..2547df979
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/doc.go
@@ -0,0 +1,21 @@
+// Package lru provides three different LRU caches of varying sophistication.
+//
+// Cache is a simple LRU cache. It is based on the
+// LRU implementation in groupcache:
+// https://github.com/golang/groupcache/tree/master/lru
+//
+// TwoQueueCache tracks frequently used and recently used entries separately.
+// This avoids a burst of accesses from taking out frequently used entries,
+// at the cost of about 2x computational overhead and some extra bookkeeping.
+//
+// ARCCache is an adaptive replacement cache. It tracks recent evictions as
+// well as recent usage in both the frequent and recent caches. Its
+// computational overhead is comparable to TwoQueueCache, but the memory
+// overhead is linear with the size of the cache.
+//
+// ARC has been patented by IBM, so do not use it if that is problematic for
+// your program.
+//
+// All caches in this package take locks while operating, and are therefore
+// thread-safe for consumers.
+package lru
diff --git a/vendor/github.com/hashicorp/golang-lru/go.mod b/vendor/github.com/hashicorp/golang-lru/go.mod
new file mode 100644
index 000000000..824cb97e8
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/go.mod
@@ -0,0 +1 @@
+module github.com/hashicorp/golang-lru
diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go
new file mode 100644
index 000000000..c8d9b0a23
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/lru.go
@@ -0,0 +1,110 @@
+package lru
+
+import (
+ "sync"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+// Cache is a thread-safe fixed size LRU cache.
+type Cache struct {
+ lru simplelru.LRUCache
+ lock sync.RWMutex
+}
+
+// New creates an LRU of the given size.
+func New(size int) (*Cache, error) {
+ return NewWithEvict(size, nil)
+}
+
+// NewWithEvict constructs a fixed size cache with the given eviction
+// callback.
+func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
+ lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
+ if err != nil {
+ return nil, err
+ }
+ c := &Cache{
+ lru: lru,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache.
+func (c *Cache) Purge() {
+ c.lock.Lock()
+ c.lru.Purge()
+ c.lock.Unlock()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *Cache) Add(key, value interface{}) (evicted bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.lru.Add(key, value)
+}
+
+// Get looks up a key's value from the cache.
+func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.lru.Get(key)
+}
+
+// Contains checks if a key is in the cache, without updating the
+// recent-ness or deleting it for being stale.
+func (c *Cache) Contains(key interface{}) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Contains(key)
+}
+
+// Peek returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Peek(key)
+}
+
+// ContainsOrAdd checks if a key is in the cache without updating the
+// recent-ness or deleting it for being stale, and if not, adds the value.
+// Returns whether found and whether an eviction occurred.
+func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if c.lru.Contains(key) {
+ return true, false
+ }
+ evicted = c.lru.Add(key, value)
+ return false, evicted
+}
+
+// Remove removes the provided key from the cache.
+func (c *Cache) Remove(key interface{}) {
+ c.lock.Lock()
+ c.lru.Remove(key)
+ c.lock.Unlock()
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *Cache) RemoveOldest() {
+ c.lock.Lock()
+ c.lru.RemoveOldest()
+ c.lock.Unlock()
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *Cache) Keys() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Keys()
+}
+
+// Len returns the number of items in the cache.
+func (c *Cache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Len()
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
new file mode 100644
index 000000000..5673773b2
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
@@ -0,0 +1,161 @@
+package simplelru
+
+import (
+ "container/list"
+ "errors"
+)
+
+// EvictCallback is used to get a callback when a cache entry is evicted
+type EvictCallback func(key interface{}, value interface{})
+
+// LRU implements a non-thread safe fixed size LRU cache
+type LRU struct {
+ size int
+ evictList *list.List
+ items map[interface{}]*list.Element
+ onEvict EvictCallback
+}
+
+// entry is used to hold a value in the evictList
+type entry struct {
+ key interface{}
+ value interface{}
+}
+
+// NewLRU constructs an LRU of the given size
+func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
+ if size <= 0 {
+ return nil, errors.New("Must provide a positive size")
+ }
+ c := &LRU{
+ size: size,
+ evictList: list.New(),
+ items: make(map[interface{}]*list.Element),
+ onEvict: onEvict,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache.
+func (c *LRU) Purge() {
+ for k, v := range c.items {
+ if c.onEvict != nil {
+ c.onEvict(k, v.Value.(*entry).value)
+ }
+ delete(c.items, k)
+ }
+ c.evictList.Init()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *LRU) Add(key, value interface{}) (evicted bool) {
+ // Check for existing item
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ ent.Value.(*entry).value = value
+ return false
+ }
+
+ // Add new item
+ ent := &entry{key, value}
+ entry := c.evictList.PushFront(ent)
+ c.items[key] = entry
+
+ evict := c.evictList.Len() > c.size
+ // Verify size not exceeded
+ if evict {
+ c.removeOldest()
+ }
+ return evict
+}
+
+// Get looks up a key's value from the cache.
+func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ return ent.Value.(*entry).value, true
+ }
+ return
+}
+
+// Contains checks if a key is in the cache, without updating the recent-ness
+// or deleting it for being stale.
+func (c *LRU) Contains(key interface{}) (ok bool) {
+ _, ok = c.items[key]
+ return ok
+}
+
+// Peek returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
+ var ent *list.Element
+ if ent, ok = c.items[key]; ok {
+ return ent.Value.(*entry).value, true
+ }
+ return nil, ok
+}
+
+// Remove removes the provided key from the cache, returning if the
+// key was contained.
+func (c *LRU) Remove(key interface{}) (present bool) {
+ if ent, ok := c.items[key]; ok {
+ c.removeElement(ent)
+ return true
+ }
+ return false
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// GetOldest returns the oldest entry
+func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *LRU) Keys() []interface{} {
+ keys := make([]interface{}, len(c.items))
+ i := 0
+ for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
+ keys[i] = ent.Value.(*entry).key
+ i++
+ }
+ return keys
+}
+
+// Len returns the number of items in the cache.
+func (c *LRU) Len() int {
+ return c.evictList.Len()
+}
+
+// removeOldest removes the oldest item from the cache.
+func (c *LRU) removeOldest() {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ }
+}
+
+// removeElement is used to remove a given list element from the cache
+func (c *LRU) removeElement(e *list.Element) {
+ c.evictList.Remove(e)
+ kv := e.Value.(*entry)
+ delete(c.items, kv.key)
+ if c.onEvict != nil {
+ c.onEvict(kv.key, kv.value)
+ }
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
new file mode 100644
index 000000000..74c707744
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
@@ -0,0 +1,36 @@
+package simplelru
+
+// LRUCache is the interface for simple LRU cache.
+type LRUCache interface {
+ // Adds a value to the cache, returns true if an eviction occurred and
+ // updates the "recently used"-ness of the key.
+ Add(key, value interface{}) bool
+
+ // Returns key's value from the cache and
+ // updates the "recently used"-ness of the key. #value, isFound
+ Get(key interface{}) (value interface{}, ok bool)
+
+ // Check if a key exsists in cache without updating the recent-ness.
+ Contains(key interface{}) (ok bool)
+
+ // Returns key's value without updating the "recently used"-ness of the key.
+ Peek(key interface{}) (value interface{}, ok bool)
+
+ // Removes a key from the cache.
+ Remove(key interface{}) bool
+
+ // Removes the oldest entry from cache.
+ RemoveOldest() (interface{}, interface{}, bool)
+
+ // Returns the oldest entry from the cache. #key, value, isFound
+ GetOldest() (interface{}, interface{}, bool)
+
+ // Returns a slice of the keys in the cache, from oldest to newest.
+ Keys() []interface{}
+
+ // Returns the number of items in the cache.
+ Len() int
+
+ // Clear all cache entries
+ Purge()
+}
diff --git a/vendor/github.com/jmoiron/sqlx/.gitignore b/vendor/github.com/jmoiron/sqlx/.gitignore
new file mode 100644
index 000000000..529841cf1
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+tags
+environ
diff --git a/vendor/github.com/jmoiron/sqlx/.travis.yml b/vendor/github.com/jmoiron/sqlx/.travis.yml
new file mode 100644
index 000000000..6bc68d67f
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/.travis.yml
@@ -0,0 +1,27 @@
+# vim: ft=yaml sw=2 ts=2
+
+language: go
+
+# enable database services
+services:
+ - mysql
+ - postgresql
+
+# create test database
+before_install:
+ - mysql -e 'CREATE DATABASE IF NOT EXISTS sqlxtest;'
+ - psql -c 'create database sqlxtest;' -U postgres
+ - go get github.com/mattn/goveralls
+ - export SQLX_MYSQL_DSN="travis:@/sqlxtest?parseTime=true"
+ - export SQLX_POSTGRES_DSN="postgres://postgres:@localhost/sqlxtest?sslmode=disable"
+ - export SQLX_SQLITE_DSN="$HOME/sqlxtest.db"
+
+# go versions to test
+go:
+ - "1.8"
+ - "1.9"
+ - "1.10.x"
+
+# run tests w/ coverage
+script:
+ - travis_retry $GOPATH/bin/goveralls -service=travis-ci
diff --git a/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/jmoiron/sqlx/LICENSE
new file mode 100644
index 000000000..0d31edfa7
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/LICENSE
@@ -0,0 +1,23 @@
+ Copyright (c) 2013, Jason Moiron
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use,
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following
+ conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md
new file mode 100644
index 000000000..839034365
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/README.md
@@ -0,0 +1,187 @@
+# sqlx
+
+[](https://travis-ci.org/jmoiron/sqlx) [](https://coveralls.io/github/jmoiron/sqlx?branch=master) [](https://godoc.org/github.com/jmoiron/sqlx) [](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE)
+
+sqlx is a library which provides a set of extensions on go's standard
+`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`,
+et al. all leave the underlying interfaces untouched, so that their interfaces
+are a superset on the standard ones. This makes it relatively painless to
+integrate existing codebases using database/sql with sqlx.
+
+Major additional concepts are:
+
+* Marshal rows into structs (with embedded struct support), maps, and slices
+* Named parameter support including prepared statements
+* `Get` and `Select` to go quickly from query to struct/slice
+
+In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx),
+there is also some [standard documentation](http://jmoiron.github.io/sqlx/) that
+explains how to use `database/sql` along with sqlx.
+
+## Recent Changes
+
+* The [introduction](https://github.com/jmoiron/sqlx/pull/387) of `sql.ColumnType` sets the required minimum Go version to 1.8.
+
+* sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions.
+
+This breaks backwards compatibility, but it's in a way that is trivially fixable
+(`s/JsonText/JSONText/g`). The `types` package is both experimental and not in
+active development currently.
+
+* Using Go 1.6 and below with `types.JSONText` and `types.GzippedText` can be _potentially unsafe_, **especially** when used with common auto-scan sqlx idioms like `Select` and `Get`. See [golang bug #13905](https://github.com/golang/go/issues/13905).
+
+### Backwards Compatibility
+
+There is no Go1-like promise of absolute stability, but I take the issue seriously
+and will maintain the library in a compatible state unless vital bugs prevent me
+from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and
+[#60](https://github.com/jmoiron/sqlx/issues/60) necessitated breaking behavior,
+a wider API cleanup was done at the time of fixing. It's possible this will happen
+in future; if it does, a git tag will be provided for users requiring the old
+behavior to continue to use it until such a time as they can migrate.
+
+## install
+
+ go get github.com/jmoiron/sqlx
+
+## issues
+
+Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of
+`Columns()` does not fully qualify column names in queries like:
+
+```sql
+SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id;
+```
+
+making a struct or map destination ambiguous. Use `AS` in your queries
+to give columns distinct names, `rows.Scan` to scan them manually, or
+`SliceScan` to get a slice of results.
+
+## usage
+
+Below is an example which shows some common use cases for sqlx. Check
+[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more
+usage.
+
+
+```go
+package main
+
+import (
+ "database/sql"
+ "fmt"
+ "log"
+
+ _ "github.com/lib/pq"
+ "github.com/jmoiron/sqlx"
+)
+
+var schema = `
+CREATE TABLE person (
+ first_name text,
+ last_name text,
+ email text
+);
+
+CREATE TABLE place (
+ country text,
+ city text NULL,
+ telcode integer
+)`
+
+type Person struct {
+ FirstName string `db:"first_name"`
+ LastName string `db:"last_name"`
+ Email string
+}
+
+type Place struct {
+ Country string
+ City sql.NullString
+ TelCode int
+}
+
+func main() {
+ // this Pings the database trying to connect, panics on error
+ // use sqlx.Open() for sql.Open() semantics
+ db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // exec the schema or fail; multi-statement Exec behavior varies between
+ // database drivers; pq will exec them all, sqlite3 won't, ymmv
+ db.MustExec(schema)
+
+ tx := db.MustBegin()
+ tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net")
+ tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net")
+ tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1")
+ tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852")
+ tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65")
+ // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person
+ tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"})
+ tx.Commit()
+
+ // Query the database, storing results in a []Person (wrapped in []interface{})
+ people := []Person{}
+ db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
+ jason, john := people[0], people[1]
+
+ fmt.Printf("%#v\n%#v", jason, john)
+ // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
+ // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"}
+
+ // You can also get a single result, a la QueryRow
+ jason = Person{}
+ err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason")
+ fmt.Printf("%#v\n", jason)
+ // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
+
+ // if you have null fields and use SELECT *, you must use sql.Null* in your struct
+ places := []Place{}
+ err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC")
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+ usa, singsing, honkers := places[0], places[1], places[2]
+
+ fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers)
+ // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
+ // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
+ // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
+
+ // Loop through rows using only one struct
+ place := Place{}
+ rows, err := db.Queryx("SELECT * FROM place")
+ for rows.Next() {
+ err := rows.StructScan(&place)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ fmt.Printf("%#v\n", place)
+ }
+ // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
+ // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
+ // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
+
+ // Named queries, using `:name` as the bindvar. Automatic bindvar support
+ // which takes into account the dbtype based on the driverName on sqlx.Open/Connect
+ _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`,
+ map[string]interface{}{
+ "first": "Bin",
+ "last": "Smuth",
+ "email": "bensmith@allblacks.nz",
+ })
+
+ // Selects Mr. Smith from the database
+ rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"})
+
+ // Named queries can also use structs. Their bind names follow the same rules
+ // as the name -> db mapping, so struct fields are lowercased and the `db` tag
+ // is taken into consideration.
+ rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason)
+}
+```
+
diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go
new file mode 100644
index 000000000..0a48252a0
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/bind.go
@@ -0,0 +1,217 @@
+package sqlx
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "errors"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/jmoiron/sqlx/reflectx"
+)
+
+// Bindvar types supported by Rebind, BindMap and BindStruct.
+const (
+ UNKNOWN = iota
+ QUESTION
+ DOLLAR
+ NAMED
+ AT
+)
+
+// BindType returns the bindtype for a given database given a drivername.
+func BindType(driverName string) int {
+ switch driverName {
+ case "postgres", "pgx", "pq-timeouts", "cloudsqlpostgres":
+ return DOLLAR
+ case "mysql":
+ return QUESTION
+ case "sqlite3":
+ return QUESTION
+ case "oci8", "ora", "goracle":
+ return NAMED
+ case "sqlserver":
+ return AT
+ }
+ return UNKNOWN
+}
+
+// FIXME: this should be able to be tolerant of escaped ?'s in queries without
+// losing much speed, and should be to avoid confusion.
+
+// Rebind a query from the default bindtype (QUESTION) to the target bindtype.
+func Rebind(bindType int, query string) string {
+ switch bindType {
+ case QUESTION, UNKNOWN:
+ return query
+ }
+
+ // Add space enough for 10 params before we have to allocate
+ rqb := make([]byte, 0, len(query)+10)
+
+ var i, j int
+
+ for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") {
+ rqb = append(rqb, query[:i]...)
+
+ switch bindType {
+ case DOLLAR:
+ rqb = append(rqb, '$')
+ case NAMED:
+ rqb = append(rqb, ':', 'a', 'r', 'g')
+ case AT:
+ rqb = append(rqb, '@', 'p')
+ }
+
+ j++
+ rqb = strconv.AppendInt(rqb, int64(j), 10)
+
+ query = query[i+1:]
+ }
+
+ return string(append(rqb, query...))
+}
+
+// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
+// much simpler and should be more resistant to odd unicode, but it is twice as
+// slow. Kept here for benchmarking purposes and to possibly replace Rebind if
+// problems arise with its somewhat naive handling of unicode.
+func rebindBuff(bindType int, query string) string {
+ if bindType != DOLLAR {
+ return query
+ }
+
+ b := make([]byte, 0, len(query))
+ rqb := bytes.NewBuffer(b)
+ j := 1
+ for _, r := range query {
+ if r == '?' {
+ rqb.WriteRune('$')
+ rqb.WriteString(strconv.Itoa(j))
+ j++
+ } else {
+ rqb.WriteRune(r)
+ }
+ }
+
+ return rqb.String()
+}
+
+// In expands slice values in args, returning the modified query string
+// and a new arg list that can be executed by a database. The `query` should
+// use the `?` bindVar. The return value uses the `?` bindVar.
+func In(query string, args ...interface{}) (string, []interface{}, error) {
+ // argMeta stores reflect.Value and length for slices and
+ // the value itself for non-slice arguments
+ type argMeta struct {
+ v reflect.Value
+ i interface{}
+ length int
+ }
+
+ var flatArgsCount int
+ var anySlices bool
+
+ meta := make([]argMeta, len(args))
+
+ for i, arg := range args {
+ if a, ok := arg.(driver.Valuer); ok {
+ arg, _ = a.Value()
+ }
+ v := reflect.ValueOf(arg)
+ t := reflectx.Deref(v.Type())
+
+ // []byte is a driver.Value type so it should not be expanded
+ if t.Kind() == reflect.Slice && t != reflect.TypeOf([]byte{}) {
+ meta[i].length = v.Len()
+ meta[i].v = v
+
+ anySlices = true
+ flatArgsCount += meta[i].length
+
+ if meta[i].length == 0 {
+ return "", nil, errors.New("empty slice passed to 'in' query")
+ }
+ } else {
+ meta[i].i = arg
+ flatArgsCount++
+ }
+ }
+
+ // don't do any parsing if there aren't any slices; note that this means
+ // some errors that we might have caught below will not be returned.
+ if !anySlices {
+ return query, args, nil
+ }
+
+ newArgs := make([]interface{}, 0, flatArgsCount)
+ buf := make([]byte, 0, len(query)+len(", ?")*flatArgsCount)
+
+ var arg, offset int
+
+ for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') {
+ if arg >= len(meta) {
+ // if an argument wasn't passed, lets return an error; this is
+ // not actually how database/sql Exec/Query works, but since we are
+ // creating an argument list programmatically, we want to be able
+ // to catch these programmer errors earlier.
+ return "", nil, errors.New("number of bindVars exceeds arguments")
+ }
+
+ argMeta := meta[arg]
+ arg++
+
+ // not a slice, continue.
+ // our questionmark will either be written before the next expansion
+ // of a slice or after the loop when writing the rest of the query
+ if argMeta.length == 0 {
+ offset = offset + i + 1
+ newArgs = append(newArgs, argMeta.i)
+ continue
+ }
+
+ // write everything up to and including our ? character
+ buf = append(buf, query[:offset+i+1]...)
+
+ for si := 1; si < argMeta.length; si++ {
+ buf = append(buf, ", ?"...)
+ }
+
+ newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length)
+
+ // slice the query and reset the offset. this avoids some bookkeeping for
+ // the write after the loop
+ query = query[offset+i+1:]
+ offset = 0
+ }
+
+ buf = append(buf, query...)
+
+ if arg < len(meta) {
+ return "", nil, errors.New("number of bindVars less than number arguments")
+ }
+
+ return string(buf), newArgs, nil
+}
+
+func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} {
+ switch val := v.Interface().(type) {
+ case []interface{}:
+ args = append(args, val...)
+ case []int:
+ for i := range val {
+ args = append(args, val[i])
+ }
+ case []string:
+ for i := range val {
+ args = append(args, val[i])
+ }
+ default:
+ for si := 0; si < vlen; si++ {
+ args = append(args, v.Index(si).Interface())
+ }
+ }
+
+ return args
+}
diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go
new file mode 100644
index 000000000..e2b4e60b2
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/doc.go
@@ -0,0 +1,12 @@
+// Package sqlx provides general purpose extensions to database/sql.
+//
+// It is intended to seamlessly wrap database/sql and provide convenience
+// methods which are useful in the development of database driven applications.
+// None of the underlying database/sql methods are changed. Instead all extended
+// behavior is implemented through new methods defined on wrapper types.
+//
+// Additions include scanning into structs, named query support, rebinding
+// queries for different drivers, convenient shorthands for common error handling
+// and more.
+//
+package sqlx
diff --git a/vendor/github.com/jmoiron/sqlx/go.mod b/vendor/github.com/jmoiron/sqlx/go.mod
new file mode 100644
index 000000000..66c67561c
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/go.mod
@@ -0,0 +1,7 @@
+module github.com/jmoiron/sqlx
+
+require (
+ github.com/go-sql-driver/mysql v1.4.0
+ github.com/lib/pq v1.0.0
+ github.com/mattn/go-sqlite3 v1.9.0
+)
diff --git a/vendor/github.com/jmoiron/sqlx/go.sum b/vendor/github.com/jmoiron/sqlx/go.sum
new file mode 100644
index 000000000..a3239ada7
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/go.sum
@@ -0,0 +1,6 @@
+github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=
+github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go
new file mode 100644
index 000000000..fa82b5609
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/named.go
@@ -0,0 +1,356 @@
+package sqlx
+
+// Named Query Support
+//
+// * BindMap - bind query bindvars to map/struct args
+// * NamedExec, NamedQuery - named query w/ struct or map
+// * NamedStmt - a pre-compiled named query which is a prepared statement
+//
+// Internal Interfaces:
+//
+// * compileNamedQuery - rebind a named query, returning a query and list of names
+// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist
+//
+import (
+ "database/sql"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "unicode"
+
+ "github.com/jmoiron/sqlx/reflectx"
+)
+
+// NamedStmt is a prepared statement that executes named queries. Prepare it
+// how you would execute a NamedQuery, but pass in a struct or map when executing.
+type NamedStmt struct {
+ Params []string
+ QueryString string
+ Stmt *Stmt
+}
+
+// Close closes the named statement.
+func (n *NamedStmt) Close() error {
+ return n.Stmt.Close()
+}
+
+// Exec executes a named statement using the struct passed.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return *new(sql.Result), err
+ }
+ return n.Stmt.Exec(args...)
+}
+
+// Query executes a named statement using the struct argument, returning rows.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return nil, err
+ }
+ return n.Stmt.Query(args...)
+}
+
+// QueryRow executes a named statement against the database. Because sqlx cannot
+// create a *sql.Row with an error condition pre-set for binding errors, sqlx
+// returns a *sqlx.Row instead.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryRow(arg interface{}) *Row {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return &Row{err: err}
+ }
+ return n.Stmt.QueryRowx(args...)
+}
+
+// MustExec execs a NamedStmt, panicing on error
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) MustExec(arg interface{}) sql.Result {
+ res, err := n.Exec(arg)
+ if err != nil {
+ panic(err)
+ }
+ return res
+}
+
+// Queryx using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {
+ r, err := n.Query(arg)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
+}
+
+// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is
+// an alias for QueryRow.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryRowx(arg interface{}) *Row {
+ return n.QueryRow(arg)
+}
+
+// Select using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Select(dest interface{}, arg interface{}) error {
+ rows, err := n.Queryx(arg)
+ if err != nil {
+ return err
+ }
+ // if something happens here, we want to make sure the rows are Closed
+ defer rows.Close()
+ return scanAll(rows, dest, false)
+}
+
+// Get using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Get(dest interface{}, arg interface{}) error {
+ r := n.QueryRowx(arg)
+ return r.scanAny(dest, false)
+}
+
+// Unsafe creates an unsafe version of the NamedStmt
+func (n *NamedStmt) Unsafe() *NamedStmt {
+ r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString}
+ r.Stmt.unsafe = true
+ return r
+}
+
+// A union interface of preparer and binder, required to be able to prepare
+// named statements (as the bindtype must be determined).
+type namedPreparer interface {
+ Preparer
+ binder
+}
+
+func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {
+ bindType := BindType(p.DriverName())
+ q, args, err := compileNamedQuery([]byte(query), bindType)
+ if err != nil {
+ return nil, err
+ }
+ stmt, err := Preparex(p, q)
+ if err != nil {
+ return nil, err
+ }
+ return &NamedStmt{
+ QueryString: q,
+ Params: args,
+ Stmt: stmt,
+ }, nil
+}
+
+func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
+ if maparg, ok := arg.(map[string]interface{}); ok {
+ return bindMapArgs(names, maparg)
+ }
+ return bindArgs(names, arg, m)
+}
+
+// private interface to generate a list of interfaces from a given struct
+// type, given a list of names to pull out of the struct. Used by public
+// BindStruct interface.
+func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
+ arglist := make([]interface{}, 0, len(names))
+
+ // grab the indirected value of arg
+ v := reflect.ValueOf(arg)
+ for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {
+ v = v.Elem()
+ }
+
+ err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error {
+ if len(t) == 0 {
+ return fmt.Errorf("could not find name %s in %#v", names[i], arg)
+ }
+
+ val := reflectx.FieldByIndexesReadOnly(v, t)
+ arglist = append(arglist, val.Interface())
+
+ return nil
+ })
+
+ return arglist, err
+}
+
+// like bindArgs, but for maps.
+func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {
+ arglist := make([]interface{}, 0, len(names))
+
+ for _, name := range names {
+ val, ok := arg[name]
+ if !ok {
+ return arglist, fmt.Errorf("could not find name %s in %#v", name, arg)
+ }
+ arglist = append(arglist, val)
+ }
+ return arglist, nil
+}
+
+// bindStruct binds a named parameter query with fields from a struct argument.
+// The rules for binding field names to parameter names follow the same
+// conventions as for StructScan, including obeying the `db` struct tags.
+func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
+ bound, names, err := compileNamedQuery([]byte(query), bindType)
+ if err != nil {
+ return "", []interface{}{}, err
+ }
+
+ arglist, err := bindArgs(names, arg, m)
+ if err != nil {
+ return "", []interface{}{}, err
+ }
+
+ return bound, arglist, nil
+}
+
+// bindMap binds a named parameter query with a map of arguments.
+func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {
+ bound, names, err := compileNamedQuery([]byte(query), bindType)
+ if err != nil {
+ return "", []interface{}{}, err
+ }
+
+ arglist, err := bindMapArgs(names, args)
+ return bound, arglist, err
+}
+
+// -- Compilation of Named Queries
+
+// Allow digits and letters in bind params; additionally runes are
+// checked against underscores, meaning that bind params can have be
+// alphanumeric with underscores. Mind the difference between unicode
+// digits and numbers, where '5' is a digit but '五' is not.
+var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}
+
+// FIXME: this function isn't safe for unicode named params, as a failing test
+// can testify. This is not a regression but a failure of the original code
+// as well. It should be modified to range over runes in a string rather than
+// bytes, even though this is less convenient and slower. Hopefully the
+// addition of the prepared NamedStmt (which will only do this once) will make
+// up for the slightly slower ad-hoc NamedExec/NamedQuery.
+
+// compile a NamedQuery into an unbound query (using the '?' bindvar) and
+// a list of names.
+func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {
+ names = make([]string, 0, 10)
+ rebound := make([]byte, 0, len(qs))
+
+ inName := false
+ last := len(qs) - 1
+ currentVar := 1
+ name := make([]byte, 0, 10)
+
+ for i, b := range qs {
+ // a ':' while we're in a name is an error
+ if b == ':' {
+ // if this is the second ':' in a '::' escape sequence, append a ':'
+ if inName && i > 0 && qs[i-1] == ':' {
+ rebound = append(rebound, ':')
+ inName = false
+ continue
+ } else if inName {
+ err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i))
+ return query, names, err
+ }
+ inName = true
+ name = []byte{}
+ } else if inName && i > 0 && b == '=' {
+ rebound = append(rebound, ':', '=')
+ inName = false
+ continue
+ // if we're in a name, and this is an allowed character, continue
+ } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last {
+ // append the byte to the name if we are in a name and not on the last byte
+ name = append(name, b)
+ // if we're in a name and it's not an allowed character, the name is done
+ } else if inName {
+ inName = false
+ // if this is the final byte of the string and it is part of the name, then
+ // make sure to add it to the name
+ if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {
+ name = append(name, b)
+ }
+ // add the string representation to the names list
+ names = append(names, string(name))
+ // add a proper bindvar for the bindType
+ switch bindType {
+ // oracle only supports named type bind vars even for positional
+ case NAMED:
+ rebound = append(rebound, ':')
+ rebound = append(rebound, name...)
+ case QUESTION, UNKNOWN:
+ rebound = append(rebound, '?')
+ case DOLLAR:
+ rebound = append(rebound, '$')
+ for _, b := range strconv.Itoa(currentVar) {
+ rebound = append(rebound, byte(b))
+ }
+ currentVar++
+ case AT:
+ rebound = append(rebound, '@', 'p')
+ for _, b := range strconv.Itoa(currentVar) {
+ rebound = append(rebound, byte(b))
+ }
+ currentVar++
+ }
+ // add this byte to string unless it was not part of the name
+ if i != last {
+ rebound = append(rebound, b)
+ } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {
+ rebound = append(rebound, b)
+ }
+ } else {
+ // this is a normal byte and should just go onto the rebound query
+ rebound = append(rebound, b)
+ }
+ }
+
+ return string(rebound), names, err
+}
+
+// BindNamed binds a struct or a map to a query with named parameters.
+// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future.
+func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {
+ return bindNamedMapper(bindType, query, arg, mapper())
+}
+
+// Named takes a query using named parameters and an argument and
+// returns a new query with a list of args that can be executed by
+// a database. The return value uses the `?` bindvar.
+func Named(query string, arg interface{}) (string, []interface{}, error) {
+ return bindNamedMapper(QUESTION, query, arg, mapper())
+}
+
+func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
+ if maparg, ok := arg.(map[string]interface{}); ok {
+ return bindMap(bindType, query, maparg)
+ }
+ return bindStruct(bindType, query, arg, m)
+}
+
+// NamedQuery binds a named query and then runs Query on the result using the
+// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
+// map[string]interface{} types.
+func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {
+ q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
+ if err != nil {
+ return nil, err
+ }
+ return e.Queryx(q, args...)
+}
+
+// NamedExec uses BindStruct to get a query executable by the driver and
+// then runs Exec on the result. Returns an error from the binding
+// or the query excution itself.
+func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {
+ q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
+ if err != nil {
+ return nil, err
+ }
+ return e.Exec(q, args...)
+}
diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go
new file mode 100644
index 000000000..9405007e2
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/named_context.go
@@ -0,0 +1,132 @@
+// +build go1.8
+
+package sqlx
+
+import (
+ "context"
+ "database/sql"
+)
+
+// A union interface of contextPreparer and binder, required to be able to
+// prepare named statements with context (as the bindtype must be determined).
+type namedPreparerContext interface {
+ PreparerContext
+ binder
+}
+
+func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) {
+ bindType := BindType(p.DriverName())
+ q, args, err := compileNamedQuery([]byte(query), bindType)
+ if err != nil {
+ return nil, err
+ }
+ stmt, err := PreparexContext(ctx, p, q)
+ if err != nil {
+ return nil, err
+ }
+ return &NamedStmt{
+ QueryString: q,
+ Params: args,
+ Stmt: stmt,
+ }, nil
+}
+
+// ExecContext executes a named statement using the struct passed.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return *new(sql.Result), err
+ }
+ return n.Stmt.ExecContext(ctx, args...)
+}
+
+// QueryContext executes a named statement using the struct argument, returning rows.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return nil, err
+ }
+ return n.Stmt.QueryContext(ctx, args...)
+}
+
+// QueryRowContext executes a named statement against the database. Because sqlx cannot
+// create a *sql.Row with an error condition pre-set for binding errors, sqlx
+// returns a *sqlx.Row instead.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return &Row{err: err}
+ }
+ return n.Stmt.QueryRowxContext(ctx, args...)
+}
+
+// MustExecContext execs a NamedStmt, panicing on error
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result {
+ res, err := n.ExecContext(ctx, arg)
+ if err != nil {
+ panic(err)
+ }
+ return res
+}
+
+// QueryxContext using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) {
+ r, err := n.QueryContext(ctx, arg)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
+}
+
+// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is
+// an alias for QueryRow.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row {
+ return n.QueryRowContext(ctx, arg)
+}
+
+// SelectContext using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error {
+ rows, err := n.QueryxContext(ctx, arg)
+ if err != nil {
+ return err
+ }
+ // if something happens here, we want to make sure the rows are Closed
+ defer rows.Close()
+ return scanAll(rows, dest, false)
+}
+
+// GetContext using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error {
+ r := n.QueryRowxContext(ctx, arg)
+ return r.scanAny(dest, false)
+}
+
+// NamedQueryContext binds a named query and then runs Query on the result using the
+// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
+// map[string]interface{} types.
+func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) {
+ q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
+ if err != nil {
+ return nil, err
+ }
+ return e.QueryxContext(ctx, q, args...)
+}
+
+// NamedExecContext uses BindStruct to get a query executable by the driver and
+// then runs Exec on the result. Returns an error from the binding
+// or the query excution itself.
+func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) {
+ q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
+ if err != nil {
+ return nil, err
+ }
+ return e.ExecContext(ctx, q, args...)
+}
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md
new file mode 100644
index 000000000..f01d3d1f0
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/reflectx/README.md
@@ -0,0 +1,17 @@
+# reflectx
+
+The sqlx package has special reflect needs. In particular, it needs to:
+
+* be able to map a name to a field
+* understand embedded structs
+* understand mapping names to fields by a particular tag
+* user specified name -> field mapping functions
+
+These behaviors mimic the behaviors by the standard library marshallers and also the
+behavior of standard Go accessors.
+
+The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is
+addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct
+tags in the ways that are vital to most marshallers, and they are slow.
+
+This reflectx package extends reflect to achieve these goals.
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
new file mode 100644
index 000000000..73c21eb39
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
@@ -0,0 +1,441 @@
+// Package reflectx implements extensions to the standard reflect lib suitable
+// for implementing marshalling and unmarshalling packages. The main Mapper type
+// allows for Go-compatible named attribute access, including accessing embedded
+// struct attributes and the ability to use functions and struct tags to
+// customize field names.
+//
+package reflectx
+
+import (
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+// A FieldInfo is metadata for a struct field.
+type FieldInfo struct {
+ Index []int
+ Path string
+ Field reflect.StructField
+ Zero reflect.Value
+ Name string
+ Options map[string]string
+ Embedded bool
+ Children []*FieldInfo
+ Parent *FieldInfo
+}
+
+// A StructMap is an index of field metadata for a struct.
+type StructMap struct {
+ Tree *FieldInfo
+ Index []*FieldInfo
+ Paths map[string]*FieldInfo
+ Names map[string]*FieldInfo
+}
+
+// GetByPath returns a *FieldInfo for a given string path.
+func (f StructMap) GetByPath(path string) *FieldInfo {
+ return f.Paths[path]
+}
+
+// GetByTraversal returns a *FieldInfo for a given integer path. It is
+// analogous to reflect.FieldByIndex, but using the cached traversal
+// rather than re-executing the reflect machinery each time.
+func (f StructMap) GetByTraversal(index []int) *FieldInfo {
+ if len(index) == 0 {
+ return nil
+ }
+
+ tree := f.Tree
+ for _, i := range index {
+ if i >= len(tree.Children) || tree.Children[i] == nil {
+ return nil
+ }
+ tree = tree.Children[i]
+ }
+ return tree
+}
+
+// Mapper is a general purpose mapper of names to struct fields. A Mapper
+// behaves like most marshallers in the standard library, obeying a field tag
+// for name mapping but also providing a basic transform function.
+type Mapper struct {
+ cache map[reflect.Type]*StructMap
+ tagName string
+ tagMapFunc func(string) string
+ mapFunc func(string) string
+ mutex sync.Mutex
+}
+
+// NewMapper returns a new mapper using the tagName as its struct field tag.
+// If tagName is the empty string, it is ignored.
+func NewMapper(tagName string) *Mapper {
+ return &Mapper{
+ cache: make(map[reflect.Type]*StructMap),
+ tagName: tagName,
+ }
+}
+
+// NewMapperTagFunc returns a new mapper which contains a mapper for field names
+// AND a mapper for tag values. This is useful for tags like json which can
+// have values like "name,omitempty".
+func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper {
+ return &Mapper{
+ cache: make(map[reflect.Type]*StructMap),
+ tagName: tagName,
+ mapFunc: mapFunc,
+ tagMapFunc: tagMapFunc,
+ }
+}
+
+// NewMapperFunc returns a new mapper which optionally obeys a field tag and
+// a struct field name mapper func given by f. Tags will take precedence, but
+// for any other field, the mapped name will be f(field.Name)
+func NewMapperFunc(tagName string, f func(string) string) *Mapper {
+ return &Mapper{
+ cache: make(map[reflect.Type]*StructMap),
+ tagName: tagName,
+ mapFunc: f,
+ }
+}
+
+// TypeMap returns a mapping of field strings to int slices representing
+// the traversal down the struct to reach the field.
+func (m *Mapper) TypeMap(t reflect.Type) *StructMap {
+ m.mutex.Lock()
+ mapping, ok := m.cache[t]
+ if !ok {
+ mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc)
+ m.cache[t] = mapping
+ }
+ m.mutex.Unlock()
+ return mapping
+}
+
+// FieldMap returns the mapper's mapping of field names to reflect values. Panics
+// if v's Kind is not Struct, or v is not Indirectable to a struct kind.
+func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
+ v = reflect.Indirect(v)
+ mustBe(v, reflect.Struct)
+
+ r := map[string]reflect.Value{}
+ tm := m.TypeMap(v.Type())
+ for tagName, fi := range tm.Names {
+ r[tagName] = FieldByIndexes(v, fi.Index)
+ }
+ return r
+}
+
+// FieldByName returns a field by its mapped name as a reflect.Value.
+// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
+// Returns zero Value if the name is not found.
+func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
+ v = reflect.Indirect(v)
+ mustBe(v, reflect.Struct)
+
+ tm := m.TypeMap(v.Type())
+ fi, ok := tm.Names[name]
+ if !ok {
+ return v
+ }
+ return FieldByIndexes(v, fi.Index)
+}
+
+// FieldsByName returns a slice of values corresponding to the slice of names
+// for the value. Panics if v's Kind is not Struct or v is not Indirectable
+// to a struct Kind. Returns zero Value for each name not found.
+func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
+ v = reflect.Indirect(v)
+ mustBe(v, reflect.Struct)
+
+ tm := m.TypeMap(v.Type())
+ vals := make([]reflect.Value, 0, len(names))
+ for _, name := range names {
+ fi, ok := tm.Names[name]
+ if !ok {
+ vals = append(vals, *new(reflect.Value))
+ } else {
+ vals = append(vals, FieldByIndexes(v, fi.Index))
+ }
+ }
+ return vals
+}
+
+// TraversalsByName returns a slice of int slices which represent the struct
+// traversals for each mapped name. Panics if t is not a struct or Indirectable
+// to a struct. Returns empty int slice for each name not found.
+func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
+ r := make([][]int, 0, len(names))
+ m.TraversalsByNameFunc(t, names, func(_ int, i []int) error {
+ if i == nil {
+ r = append(r, []int{})
+ } else {
+ r = append(r, i)
+ }
+
+ return nil
+ })
+ return r
+}
+
+// TraversalsByNameFunc traverses the mapped names and calls fn with the index of
+// each name and the struct traversal represented by that name. Panics if t is not
+// a struct or Indirectable to a struct. Returns the first error returned by fn or nil.
+func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error {
+ t = Deref(t)
+ mustBe(t, reflect.Struct)
+ tm := m.TypeMap(t)
+ for i, name := range names {
+ fi, ok := tm.Names[name]
+ if !ok {
+ if err := fn(i, nil); err != nil {
+ return err
+ }
+ } else {
+ if err := fn(i, fi.Index); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// FieldByIndexes returns a value for the field given by the struct traversal
+// for the given value.
+func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value {
+ for _, i := range indexes {
+ v = reflect.Indirect(v).Field(i)
+ // if this is a pointer and it's nil, allocate a new value and set it
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ alloc := reflect.New(Deref(v.Type()))
+ v.Set(alloc)
+ }
+ if v.Kind() == reflect.Map && v.IsNil() {
+ v.Set(reflect.MakeMap(v.Type()))
+ }
+ }
+ return v
+}
+
+// FieldByIndexesReadOnly returns a value for a particular struct traversal,
+// but is not concerned with allocating nil pointers because the value is
+// going to be used for reading and not setting.
+func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value {
+ for _, i := range indexes {
+ v = reflect.Indirect(v).Field(i)
+ }
+ return v
+}
+
+// Deref is Indirect for reflect.Types
+func Deref(t reflect.Type) reflect.Type {
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return t
+}
+
+// -- helpers & utilities --
+
+type kinder interface {
+ Kind() reflect.Kind
+}
+
+// mustBe checks a value against a kind, panicing with a reflect.ValueError
+// if the kind isn't that which is required.
+func mustBe(v kinder, expected reflect.Kind) {
+ if k := v.Kind(); k != expected {
+ panic(&reflect.ValueError{Method: methodName(), Kind: k})
+ }
+}
+
+// methodName returns the caller of the function calling methodName
+func methodName() string {
+ pc, _, _, _ := runtime.Caller(2)
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ return "unknown method"
+ }
+ return f.Name()
+}
+
+type typeQueue struct {
+ t reflect.Type
+ fi *FieldInfo
+ pp string // Parent path
+}
+
+// A copying append that creates a new slice each time.
+func apnd(is []int, i int) []int {
+ x := make([]int, len(is)+1)
+ for p, n := range is {
+ x[p] = n
+ }
+ x[len(x)-1] = i
+ return x
+}
+
+type mapf func(string) string
+
+// parseName parses the tag and the target name for the given field using
+// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the
+// field's name to a target name, and tagMapFunc for mapping the tag to
+// a target name.
+func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) {
+ // first, set the fieldName to the field's name
+ fieldName = field.Name
+ // if a mapFunc is set, use that to override the fieldName
+ if mapFunc != nil {
+ fieldName = mapFunc(fieldName)
+ }
+
+ // if there's no tag to look for, return the field name
+ if tagName == "" {
+ return "", fieldName
+ }
+
+ // if this tag is not set using the normal convention in the tag,
+ // then return the fieldname.. this check is done because according
+ // to the reflect documentation:
+ // If the tag does not have the conventional format,
+ // the value returned by Get is unspecified.
+ // which doesn't sound great.
+ if !strings.Contains(string(field.Tag), tagName+":") {
+ return "", fieldName
+ }
+
+ // at this point we're fairly sure that we have a tag, so lets pull it out
+ tag = field.Tag.Get(tagName)
+
+ // if we have a mapper function, call it on the whole tag
+ // XXX: this is a change from the old version, which pulled out the name
+ // before the tagMapFunc could be run, but I think this is the right way
+ if tagMapFunc != nil {
+ tag = tagMapFunc(tag)
+ }
+
+ // finally, split the options from the name
+ parts := strings.Split(tag, ",")
+ fieldName = parts[0]
+
+ return tag, fieldName
+}
+
+// parseOptions parses options out of a tag string, skipping the name
+func parseOptions(tag string) map[string]string {
+ parts := strings.Split(tag, ",")
+ options := make(map[string]string, len(parts))
+ if len(parts) > 1 {
+ for _, opt := range parts[1:] {
+ // short circuit potentially expensive split op
+ if strings.Contains(opt, "=") {
+ kv := strings.Split(opt, "=")
+ options[kv[0]] = kv[1]
+ continue
+ }
+ options[opt] = ""
+ }
+ }
+ return options
+}
+
+// getMapping returns a mapping for the t type, using the tagName, mapFunc and
+// tagMapFunc to determine the canonical names of fields.
+func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap {
+ m := []*FieldInfo{}
+
+ root := &FieldInfo{}
+ queue := []typeQueue{}
+ queue = append(queue, typeQueue{Deref(t), root, ""})
+
+QueueLoop:
+ for len(queue) != 0 {
+ // pop the first item off of the queue
+ tq := queue[0]
+ queue = queue[1:]
+
+ // ignore recursive field
+ for p := tq.fi.Parent; p != nil; p = p.Parent {
+ if tq.fi.Field.Type == p.Field.Type {
+ continue QueueLoop
+ }
+ }
+
+ nChildren := 0
+ if tq.t.Kind() == reflect.Struct {
+ nChildren = tq.t.NumField()
+ }
+ tq.fi.Children = make([]*FieldInfo, nChildren)
+
+ // iterate through all of its fields
+ for fieldPos := 0; fieldPos < nChildren; fieldPos++ {
+
+ f := tq.t.Field(fieldPos)
+
+ // parse the tag and the target name using the mapping options for this field
+ tag, name := parseName(f, tagName, mapFunc, tagMapFunc)
+
+ // if the name is "-", disabled via a tag, skip it
+ if name == "-" {
+ continue
+ }
+
+ fi := FieldInfo{
+ Field: f,
+ Name: name,
+ Zero: reflect.New(f.Type).Elem(),
+ Options: parseOptions(tag),
+ }
+
+ // if the path is empty this path is just the name
+ if tq.pp == "" {
+ fi.Path = fi.Name
+ } else {
+ fi.Path = tq.pp + "." + fi.Name
+ }
+
+ // skip unexported fields
+ if len(f.PkgPath) != 0 && !f.Anonymous {
+ continue
+ }
+
+ // bfs search of anonymous embedded structs
+ if f.Anonymous {
+ pp := tq.pp
+ if tag != "" {
+ pp = fi.Path
+ }
+
+ fi.Embedded = true
+ fi.Index = apnd(tq.fi.Index, fieldPos)
+ nChildren := 0
+ ft := Deref(f.Type)
+ if ft.Kind() == reflect.Struct {
+ nChildren = ft.NumField()
+ }
+ fi.Children = make([]*FieldInfo, nChildren)
+ queue = append(queue, typeQueue{Deref(f.Type), &fi, pp})
+ } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) {
+ fi.Index = apnd(tq.fi.Index, fieldPos)
+ fi.Children = make([]*FieldInfo, Deref(f.Type).NumField())
+ queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path})
+ }
+
+ fi.Index = apnd(tq.fi.Index, fieldPos)
+ fi.Parent = tq.fi
+ tq.fi.Children[fieldPos] = &fi
+ m = append(m, &fi)
+ }
+ }
+
+ flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}}
+ for _, fi := range flds.Index {
+ flds.Paths[fi.Path] = fi
+ if fi.Name != "" && !fi.Embedded {
+ flds.Names[fi.Path] = fi
+ }
+ }
+
+ return flds
+}
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go
new file mode 100644
index 000000000..3f000f47c
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/sqlx.go
@@ -0,0 +1,1045 @@
+package sqlx
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/jmoiron/sqlx/reflectx"
+)
+
+// Although the NameMapper is convenient, in practice it should not
+// be relied on except for application code. If you are writing a library
+// that uses sqlx, you should be aware that the name mappings you expect
+// can be overridden by your user's application.
+
+// NameMapper is used to map column names to struct field names. By default,
+// it uses strings.ToLower to lowercase struct field names. It can be set
+// to whatever you want, but it is encouraged to be set before sqlx is used
+// as name-to-field mappings are cached after first use on a type.
+var NameMapper = strings.ToLower
+var origMapper = reflect.ValueOf(NameMapper)
+
+// Rather than creating on init, this is created when necessary so that
+// importers have time to customize the NameMapper.
+var mpr *reflectx.Mapper
+
+// mprMu protects mpr.
+var mprMu sync.Mutex
+
+// mapper returns a valid mapper using the configured NameMapper func.
+func mapper() *reflectx.Mapper {
+ mprMu.Lock()
+ defer mprMu.Unlock()
+
+ if mpr == nil {
+ mpr = reflectx.NewMapperFunc("db", NameMapper)
+ } else if origMapper != reflect.ValueOf(NameMapper) {
+ // if NameMapper has changed, create a new mapper
+ mpr = reflectx.NewMapperFunc("db", NameMapper)
+ origMapper = reflect.ValueOf(NameMapper)
+ }
+ return mpr
+}
+
+// isScannable takes the reflect.Type and the actual dest value and returns
+// whether or not it's Scannable. Something is scannable if:
+// * it is not a struct
+// * it implements sql.Scanner
+// * it has no exported fields
+func isScannable(t reflect.Type) bool {
+ if reflect.PtrTo(t).Implements(_scannerInterface) {
+ return true
+ }
+ if t.Kind() != reflect.Struct {
+ return true
+ }
+
+ // it's not important that we use the right mapper for this particular object,
+ // we're only concerned on how many exported fields this struct has
+ m := mapper()
+ if len(m.TypeMap(t).Index) == 0 {
+ return true
+ }
+ return false
+}
+
+// ColScanner is an interface used by MapScan and SliceScan
+type ColScanner interface {
+ Columns() ([]string, error)
+ Scan(dest ...interface{}) error
+ Err() error
+}
+
+// Queryer is an interface used by Get and Select
+type Queryer interface {
+ Query(query string, args ...interface{}) (*sql.Rows, error)
+ Queryx(query string, args ...interface{}) (*Rows, error)
+ QueryRowx(query string, args ...interface{}) *Row
+}
+
+// Execer is an interface used by MustExec and LoadFile
+type Execer interface {
+ Exec(query string, args ...interface{}) (sql.Result, error)
+}
+
+// Binder is an interface for something which can bind queries (Tx, DB)
+type binder interface {
+ DriverName() string
+ Rebind(string) string
+ BindNamed(string, interface{}) (string, []interface{}, error)
+}
+
+// Ext is a union interface which can bind, query, and exec, used by
+// NamedQuery and NamedExec.
+type Ext interface {
+ binder
+ Queryer
+ Execer
+}
+
+// Preparer is an interface used by Preparex.
+type Preparer interface {
+ Prepare(query string) (*sql.Stmt, error)
+}
+
+// determine if any of our extensions are unsafe
+func isUnsafe(i interface{}) bool {
+ switch v := i.(type) {
+ case Row:
+ return v.unsafe
+ case *Row:
+ return v.unsafe
+ case Rows:
+ return v.unsafe
+ case *Rows:
+ return v.unsafe
+ case NamedStmt:
+ return v.Stmt.unsafe
+ case *NamedStmt:
+ return v.Stmt.unsafe
+ case Stmt:
+ return v.unsafe
+ case *Stmt:
+ return v.unsafe
+ case qStmt:
+ return v.unsafe
+ case *qStmt:
+ return v.unsafe
+ case DB:
+ return v.unsafe
+ case *DB:
+ return v.unsafe
+ case Tx:
+ return v.unsafe
+ case *Tx:
+ return v.unsafe
+ case sql.Rows, *sql.Rows:
+ return false
+ default:
+ return false
+ }
+}
+
+func mapperFor(i interface{}) *reflectx.Mapper {
+ switch i.(type) {
+ case DB:
+ return i.(DB).Mapper
+ case *DB:
+ return i.(*DB).Mapper
+ case Tx:
+ return i.(Tx).Mapper
+ case *Tx:
+ return i.(*Tx).Mapper
+ default:
+ return mapper()
+ }
+}
+
+var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
+var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// Row is a reimplementation of sql.Row in order to gain access to the underlying
+// sql.Rows.Columns() data, necessary for StructScan.
+type Row struct {
+ err error
+ unsafe bool
+ rows *sql.Rows
+ Mapper *reflectx.Mapper
+}
+
+// Scan is a fixed implementation of sql.Row.Scan, which does not discard the
+// underlying error from the internal rows object if it exists.
+func (r *Row) Scan(dest ...interface{}) error {
+ if r.err != nil {
+ return r.err
+ }
+
+ // TODO(bradfitz): for now we need to defensively clone all
+ // []byte that the driver returned (not permitting
+ // *RawBytes in Rows.Scan), since we're about to close
+ // the Rows in our defer, when we return from this function.
+ // the contract with the driver.Next(...) interface is that it
+ // can return slices into read-only temporary memory that's
+ // only valid until the next Scan/Close. But the TODO is that
+ // for a lot of drivers, this copy will be unnecessary. We
+ // should provide an optional interface for drivers to
+ // implement to say, "don't worry, the []bytes that I return
+ // from Next will not be modified again." (for instance, if
+ // they were obtained from the network anyway) But for now we
+ // don't care.
+ defer r.rows.Close()
+ for _, dp := range dest {
+ if _, ok := dp.(*sql.RawBytes); ok {
+ return errors.New("sql: RawBytes isn't allowed on Row.Scan")
+ }
+ }
+
+ if !r.rows.Next() {
+ if err := r.rows.Err(); err != nil {
+ return err
+ }
+ return sql.ErrNoRows
+ }
+ err := r.rows.Scan(dest...)
+ if err != nil {
+ return err
+ }
+ // Make sure the query can be processed to completion with no errors.
+ if err := r.rows.Close(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually
+// returned by Row.Scan()
+func (r *Row) Columns() ([]string, error) {
+ if r.err != nil {
+ return []string{}, r.err
+ }
+ return r.rows.Columns()
+}
+
+// ColumnTypes returns the underlying sql.Rows.ColumnTypes(), or the deferred error
+func (r *Row) ColumnTypes() ([]*sql.ColumnType, error) {
+ if r.err != nil {
+ return []*sql.ColumnType{}, r.err
+ }
+ return r.rows.ColumnTypes()
+}
+
+// Err returns the error encountered while scanning.
+func (r *Row) Err() error {
+ return r.err
+}
+
+// DB is a wrapper around sql.DB which keeps track of the driverName upon Open,
+// used mostly to automatically bind named queries using the right bindvars.
+type DB struct {
+ *sql.DB
+ driverName string
+ unsafe bool
+ Mapper *reflectx.Mapper
+}
+
+// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The
+// driverName of the original database is required for named query support.
+func NewDb(db *sql.DB, driverName string) *DB {
+ return &DB{DB: db, driverName: driverName, Mapper: mapper()}
+}
+
+// DriverName returns the driverName passed to the Open function for this DB.
+func (db *DB) DriverName() string {
+ return db.driverName
+}
+
+// Open is the same as sql.Open, but returns an *sqlx.DB instead.
+func Open(driverName, dataSourceName string) (*DB, error) {
+ db, err := sql.Open(driverName, dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+ return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err
+}
+
+// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error.
+func MustOpen(driverName, dataSourceName string) *DB {
+ db, err := Open(driverName, dataSourceName)
+ if err != nil {
+ panic(err)
+ }
+ return db
+}
+
+// MapperFunc sets a new mapper for this db using the default sqlx struct tag
+// and the provided mapper function.
+func (db *DB) MapperFunc(mf func(string) string) {
+ db.Mapper = reflectx.NewMapperFunc("db", mf)
+}
+
+// Rebind transforms a query from QUESTION to the DB driver's bindvar type.
+func (db *DB) Rebind(query string) string {
+ return Rebind(BindType(db.driverName), query)
+}
+
+// Unsafe returns a version of DB which will silently succeed to scan when
+// columns in the SQL result have no fields in the destination struct.
+// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its
+// safety behavior.
+func (db *DB) Unsafe() *DB {
+ return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper}
+}
+
+// BindNamed binds a query using the DB driver's bindvar type.
+func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
+ return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper)
+}
+
+// NamedQuery using this DB.
+// Any named placeholder parameters are replaced with fields from arg.
+func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) {
+ return NamedQuery(db, query, arg)
+}
+
+// NamedExec using this DB.
+// Any named placeholder parameters are replaced with fields from arg.
+func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) {
+ return NamedExec(db, query, arg)
+}
+
+// Select using this DB.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) Select(dest interface{}, query string, args ...interface{}) error {
+ return Select(db, dest, query, args...)
+}
+
+// Get using this DB.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (db *DB) Get(dest interface{}, query string, args ...interface{}) error {
+ return Get(db, dest, query, args...)
+}
+
+// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead
+// of an *sql.Tx.
+func (db *DB) MustBegin() *Tx {
+ tx, err := db.Beginx()
+ if err != nil {
+ panic(err)
+ }
+ return tx
+}
+
+// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx.
+func (db *DB) Beginx() (*Tx, error) {
+ tx, err := db.DB.Begin()
+ if err != nil {
+ return nil, err
+ }
+ return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
+}
+
+// Queryx queries the database and returns an *sqlx.Rows.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) {
+ r, err := db.DB.Query(query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
+}
+
+// QueryRowx queries the database and returns an *sqlx.Row.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) QueryRowx(query string, args ...interface{}) *Row {
+ rows, err := db.DB.Query(query, args...)
+ return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
+}
+
+// MustExec (panic) runs MustExec using this database.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) MustExec(query string, args ...interface{}) sql.Result {
+ return MustExec(db, query, args...)
+}
+
+// Preparex returns an sqlx.Stmt instead of a sql.Stmt
+func (db *DB) Preparex(query string) (*Stmt, error) {
+ return Preparex(db, query)
+}
+
+// PrepareNamed returns an sqlx.NamedStmt
+func (db *DB) PrepareNamed(query string) (*NamedStmt, error) {
+ return prepareNamed(db, query)
+}
+
+// Tx is an sqlx wrapper around sql.Tx with extra functionality
+type Tx struct {
+ *sql.Tx
+ driverName string
+ unsafe bool
+ Mapper *reflectx.Mapper
+}
+
+// DriverName returns the driverName used by the DB which began this transaction.
+func (tx *Tx) DriverName() string {
+ return tx.driverName
+}
+
+// Rebind a query within a transaction's bindvar type.
+func (tx *Tx) Rebind(query string) string {
+ return Rebind(BindType(tx.driverName), query)
+}
+
+// Unsafe returns a version of Tx which will silently succeed to scan when
+// columns in the SQL result have no fields in the destination struct.
+func (tx *Tx) Unsafe() *Tx {
+ return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper}
+}
+
+// BindNamed binds a query within a transaction's bindvar type.
+func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
+ return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper)
+}
+
+// NamedQuery within a transaction.
+// Any named placeholder parameters are replaced with fields from arg.
+func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) {
+ return NamedQuery(tx, query, arg)
+}
+
+// NamedExec a named query within a transaction.
+// Any named placeholder parameters are replaced with fields from arg.
+func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) {
+ return NamedExec(tx, query, arg)
+}
+
+// Select within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error {
+ return Select(tx, dest, query, args...)
+}
+
+// Queryx within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) {
+ r, err := tx.Tx.Query(query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
+}
+
+// QueryRowx within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row {
+ rows, err := tx.Tx.Query(query, args...)
+ return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
+}
+
+// Get within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error {
+ return Get(tx, dest, query, args...)
+}
+
+// MustExec runs MustExec within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result {
+ return MustExec(tx, query, args...)
+}
+
+// Preparex a statement within a transaction.
+func (tx *Tx) Preparex(query string) (*Stmt, error) {
+ return Preparex(tx, query)
+}
+
+// Stmtx returns a version of the prepared statement which runs within a transaction. Provided
+// stmt can be either *sql.Stmt or *sqlx.Stmt.
+func (tx *Tx) Stmtx(stmt interface{}) *Stmt {
+ var s *sql.Stmt
+ switch v := stmt.(type) {
+ case Stmt:
+ s = v.Stmt
+ case *Stmt:
+ s = v.Stmt
+ case *sql.Stmt:
+ s = v
+ default:
+ panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
+ }
+ return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper}
+}
+
+// NamedStmt returns a version of the prepared statement which runs within a transaction.
+func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt {
+ return &NamedStmt{
+ QueryString: stmt.QueryString,
+ Params: stmt.Params,
+ Stmt: tx.Stmtx(stmt.Stmt),
+ }
+}
+
+// PrepareNamed returns an sqlx.NamedStmt
+func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) {
+ return prepareNamed(tx, query)
+}
+
+// Stmt is an sqlx wrapper around sql.Stmt with extra functionality
+type Stmt struct {
+ *sql.Stmt
+ unsafe bool
+ Mapper *reflectx.Mapper
+}
+
+// Unsafe returns a version of Stmt which will silently succeed to scan when
+// columns in the SQL result have no fields in the destination struct.
+func (s *Stmt) Unsafe() *Stmt {
+ return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper}
+}
+
+// Select using the prepared statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) Select(dest interface{}, args ...interface{}) error {
+ return Select(&qStmt{s}, dest, "", args...)
+}
+
+// Get using the prepared statement.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (s *Stmt) Get(dest interface{}, args ...interface{}) error {
+ return Get(&qStmt{s}, dest, "", args...)
+}
+
+// MustExec (panic) using this statement. Note that the query portion of the error
+// output will be blank, as Stmt does not expose its query.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) MustExec(args ...interface{}) sql.Result {
+ return MustExec(&qStmt{s}, "", args...)
+}
+
+// QueryRowx using this statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) QueryRowx(args ...interface{}) *Row {
+ qs := &qStmt{s}
+ return qs.QueryRowx("", args...)
+}
+
+// Queryx using this statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) {
+ qs := &qStmt{s}
+ return qs.Queryx("", args...)
+}
+
+// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by
+// implementing those interfaces and ignoring the `query` argument.
+type qStmt struct{ *Stmt }
+
+func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) {
+ return q.Stmt.Query(args...)
+}
+
+func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) {
+ r, err := q.Stmt.Query(args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
+}
+
+func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row {
+ rows, err := q.Stmt.Query(args...)
+ return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
+}
+
+func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) {
+ return q.Stmt.Exec(args...)
+}
+
+// Rows is a wrapper around sql.Rows which caches costly reflect operations
+// during a looped StructScan
+type Rows struct {
+ *sql.Rows
+ unsafe bool
+ Mapper *reflectx.Mapper
+ // these fields cache memory use for a rows during iteration w/ structScan
+ started bool
+ fields [][]int
+ values []interface{}
+}
+
+// SliceScan using this Rows.
+func (r *Rows) SliceScan() ([]interface{}, error) {
+ return SliceScan(r)
+}
+
+// MapScan using this Rows.
+func (r *Rows) MapScan(dest map[string]interface{}) error {
+ return MapScan(r, dest)
+}
+
+// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct.
+// Use this and iterate over Rows manually when the memory load of Select() might be
+// prohibitive. *Rows.StructScan caches the reflect work of matching up column
+// positions to fields to avoid that overhead per scan, which means it is not safe
+// to run StructScan on the same Rows instance with different struct types.
+func (r *Rows) StructScan(dest interface{}) error {
+ v := reflect.ValueOf(dest)
+
+ if v.Kind() != reflect.Ptr {
+ return errors.New("must pass a pointer, not a value, to StructScan destination")
+ }
+
+ v = v.Elem()
+
+ if !r.started {
+ columns, err := r.Columns()
+ if err != nil {
+ return err
+ }
+ m := r.Mapper
+
+ r.fields = m.TraversalsByName(v.Type(), columns)
+ // if we are not unsafe and are missing fields, return an error
+ if f, err := missingFields(r.fields); err != nil && !r.unsafe {
+ return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
+ }
+ r.values = make([]interface{}, len(columns))
+ r.started = true
+ }
+
+ err := fieldsByTraversal(v, r.fields, r.values, true)
+ if err != nil {
+ return err
+ }
+ // scan into the struct field pointers and append to our results
+ err = r.Scan(r.values...)
+ if err != nil {
+ return err
+ }
+ return r.Err()
+}
+
+// Connect to a database and verify with a ping.
+func Connect(driverName, dataSourceName string) (*DB, error) {
+ db, err := Open(driverName, dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+ err = db.Ping()
+ if err != nil {
+ db.Close()
+ return nil, err
+ }
+ return db, nil
+}
+
+// MustConnect connects to a database and panics on error.
+func MustConnect(driverName, dataSourceName string) *DB {
+ db, err := Connect(driverName, dataSourceName)
+ if err != nil {
+ panic(err)
+ }
+ return db
+}
+
+// Preparex prepares a statement.
+func Preparex(p Preparer, query string) (*Stmt, error) {
+ s, err := p.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+ return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
+}
+
+// Select executes a query using the provided Queryer, and StructScans each row
+// into dest, which must be a slice. If the slice elements are scannable, then
+// the result set must have only one column. Otherwise, StructScan is used.
+// The *sql.Rows are closed automatically.
+// Any placeholder parameters are replaced with supplied args.
+func Select(q Queryer, dest interface{}, query string, args ...interface{}) error {
+ rows, err := q.Queryx(query, args...)
+ if err != nil {
+ return err
+ }
+ // if something happens here, we want to make sure the rows are Closed
+ defer rows.Close()
+ return scanAll(rows, dest, false)
+}
+
+// Get does a QueryRow using the provided Queryer, and scans the resulting row
+// to dest. If dest is scannable, the result must only have one column. Otherwise,
+// StructScan is used. Get will return sql.ErrNoRows like row.Scan would.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func Get(q Queryer, dest interface{}, query string, args ...interface{}) error {
+ r := q.QueryRowx(query, args...)
+ return r.scanAny(dest, false)
+}
+
+// LoadFile exec's every statement in a file (as a single call to Exec).
+// LoadFile may return a nil *sql.Result if errors are encountered locating or
+// reading the file at path. LoadFile reads the entire file into memory, so it
+// is not suitable for loading large data dumps, but can be useful for initializing
+// schemas or loading indexes.
+//
+// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
+// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
+// this by requiring something with DriverName() and then attempting to split the
+// queries will be difficult to get right, and its current driver-specific behavior
+// is deemed at least not complex in its incorrectness.
+func LoadFile(e Execer, path string) (*sql.Result, error) {
+ realpath, err := filepath.Abs(path)
+ if err != nil {
+ return nil, err
+ }
+ contents, err := ioutil.ReadFile(realpath)
+ if err != nil {
+ return nil, err
+ }
+ res, err := e.Exec(string(contents))
+ return &res, err
+}
+
+// MustExec execs the query using e and panics if there was an error.
+// Any placeholder parameters are replaced with supplied args.
+func MustExec(e Execer, query string, args ...interface{}) sql.Result {
+ res, err := e.Exec(query, args...)
+ if err != nil {
+ panic(err)
+ }
+ return res
+}
+
+// SliceScan using this Rows.
+func (r *Row) SliceScan() ([]interface{}, error) {
+ return SliceScan(r)
+}
+
+// MapScan using this Rows.
+func (r *Row) MapScan(dest map[string]interface{}) error {
+ return MapScan(r, dest)
+}
+
+func (r *Row) scanAny(dest interface{}, structOnly bool) error {
+ if r.err != nil {
+ return r.err
+ }
+ if r.rows == nil {
+ r.err = sql.ErrNoRows
+ return r.err
+ }
+ defer r.rows.Close()
+
+ v := reflect.ValueOf(dest)
+ if v.Kind() != reflect.Ptr {
+ return errors.New("must pass a pointer, not a value, to StructScan destination")
+ }
+ if v.IsNil() {
+ return errors.New("nil pointer passed to StructScan destination")
+ }
+
+ base := reflectx.Deref(v.Type())
+ scannable := isScannable(base)
+
+ if structOnly && scannable {
+ return structOnlyError(base)
+ }
+
+ columns, err := r.Columns()
+ if err != nil {
+ return err
+ }
+
+ if scannable && len(columns) > 1 {
+ return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns))
+ }
+
+ if scannable {
+ return r.Scan(dest)
+ }
+
+ m := r.Mapper
+
+ fields := m.TraversalsByName(v.Type(), columns)
+ // if we are not unsafe and are missing fields, return an error
+ if f, err := missingFields(fields); err != nil && !r.unsafe {
+ return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
+ }
+ values := make([]interface{}, len(columns))
+
+ err = fieldsByTraversal(v, fields, values, true)
+ if err != nil {
+ return err
+ }
+ // scan into the struct field pointers and append to our results
+ return r.Scan(values...)
+}
+
+// StructScan a single Row into dest.
+func (r *Row) StructScan(dest interface{}) error {
+ return r.scanAny(dest, true)
+}
+
+// SliceScan a row, returning a []interface{} with values similar to MapScan.
+// This function is primarily intended for use where the number of columns
+// is not known. Because you can pass an []interface{} directly to Scan,
+// it's recommended that you do that as it will not have to allocate new
+// slices per row.
+func SliceScan(r ColScanner) ([]interface{}, error) {
+ // ignore r.started, since we needn't use reflect for anything.
+ columns, err := r.Columns()
+ if err != nil {
+ return []interface{}{}, err
+ }
+
+ values := make([]interface{}, len(columns))
+ for i := range values {
+ values[i] = new(interface{})
+ }
+
+ err = r.Scan(values...)
+
+ if err != nil {
+ return values, err
+ }
+
+ for i := range columns {
+ values[i] = *(values[i].(*interface{}))
+ }
+
+ return values, r.Err()
+}
+
+// MapScan scans a single Row into the dest map[string]interface{}.
+// Use this to get results for SQL that might not be under your control
+// (for instance, if you're building an interface for an SQL server that
+// executes SQL from input). Please do not use this as a primary interface!
+// This will modify the map sent to it in place, so reuse the same map with
+// care. Columns which occur more than once in the result will overwrite
+// each other!
+func MapScan(r ColScanner, dest map[string]interface{}) error {
+ // ignore r.started, since we needn't use reflect for anything.
+ columns, err := r.Columns()
+ if err != nil {
+ return err
+ }
+
+ values := make([]interface{}, len(columns))
+ for i := range values {
+ values[i] = new(interface{})
+ }
+
+ err = r.Scan(values...)
+ if err != nil {
+ return err
+ }
+
+ for i, column := range columns {
+ dest[column] = *(values[i].(*interface{}))
+ }
+
+ return r.Err()
+}
+
+type rowsi interface {
+ Close() error
+ Columns() ([]string, error)
+ Err() error
+ Next() bool
+ Scan(...interface{}) error
+}
+
+// structOnlyError returns an error appropriate for type when a non-scannable
+// struct is expected but something else is given
+func structOnlyError(t reflect.Type) error {
+ isStruct := t.Kind() == reflect.Struct
+ isScanner := reflect.PtrTo(t).Implements(_scannerInterface)
+ if !isStruct {
+ return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind())
+ }
+ if isScanner {
+ return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name())
+ }
+ return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name())
+}
+
+// scanAll scans all rows into a destination, which must be a slice of any
+// type. If the destination slice type is a Struct, then StructScan will be
+// used on each row. If the destination is some other kind of base type, then
+// each row must only have one column which can scan into that type. This
+// allows you to do something like:
+//
+// rows, _ := db.Query("select id from people;")
+// var ids []int
+// scanAll(rows, &ids, false)
+//
+// and ids will be a list of the id results. I realize that this is a desirable
+// interface to expose to users, but for now it will only be exposed via changes
+// to `Get` and `Select`. The reason that this has been implemented like this is
+// this is the only way to not duplicate reflect work in the new API while
+// maintaining backwards compatibility.
+func scanAll(rows rowsi, dest interface{}, structOnly bool) error {
+ var v, vp reflect.Value
+
+ value := reflect.ValueOf(dest)
+
+ // json.Unmarshal returns errors for these
+ if value.Kind() != reflect.Ptr {
+ return errors.New("must pass a pointer, not a value, to StructScan destination")
+ }
+ if value.IsNil() {
+ return errors.New("nil pointer passed to StructScan destination")
+ }
+ direct := reflect.Indirect(value)
+
+ slice, err := baseType(value.Type(), reflect.Slice)
+ if err != nil {
+ return err
+ }
+
+ isPtr := slice.Elem().Kind() == reflect.Ptr
+ base := reflectx.Deref(slice.Elem())
+ scannable := isScannable(base)
+
+ if structOnly && scannable {
+ return structOnlyError(base)
+ }
+
+ columns, err := rows.Columns()
+ if err != nil {
+ return err
+ }
+
+ // if it's a base type make sure it only has 1 column; if not return an error
+ if scannable && len(columns) > 1 {
+ return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns))
+ }
+
+ if !scannable {
+ var values []interface{}
+ var m *reflectx.Mapper
+
+ switch rows.(type) {
+ case *Rows:
+ m = rows.(*Rows).Mapper
+ default:
+ m = mapper()
+ }
+
+ fields := m.TraversalsByName(base, columns)
+ // if we are not unsafe and are missing fields, return an error
+ if f, err := missingFields(fields); err != nil && !isUnsafe(rows) {
+ return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
+ }
+ values = make([]interface{}, len(columns))
+
+ for rows.Next() {
+ // create a new struct type (which returns PtrTo) and indirect it
+ vp = reflect.New(base)
+ v = reflect.Indirect(vp)
+
+ err = fieldsByTraversal(v, fields, values, true)
+ if err != nil {
+ return err
+ }
+
+ // scan into the struct field pointers and append to our results
+ err = rows.Scan(values...)
+ if err != nil {
+ return err
+ }
+
+ if isPtr {
+ direct.Set(reflect.Append(direct, vp))
+ } else {
+ direct.Set(reflect.Append(direct, v))
+ }
+ }
+ } else {
+ for rows.Next() {
+ vp = reflect.New(base)
+ err = rows.Scan(vp.Interface())
+ if err != nil {
+ return err
+ }
+ // append
+ if isPtr {
+ direct.Set(reflect.Append(direct, vp))
+ } else {
+ direct.Set(reflect.Append(direct, reflect.Indirect(vp)))
+ }
+ }
+ }
+
+ return rows.Err()
+}
+
+// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately
+// it doesn't really feel like it's named properly. There is an incongruency
+// between this and the way that StructScan (which might better be ScanStruct
+// anyway) works on a rows object.
+
+// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice.
+// StructScan will scan in the entire rows result, so if you do not want to
+// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan.
+// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default.
+func StructScan(rows rowsi, dest interface{}) error {
+ return scanAll(rows, dest, true)
+
+}
+
+// reflect helpers
+
+func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) {
+ t = reflectx.Deref(t)
+ if t.Kind() != expected {
+ return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind())
+ }
+ return t, nil
+}
+
+// fieldsByName fills a values interface with fields from the passed value based
+// on the traversals in int. If ptrs is true, return addresses instead of values.
+// We write this instead of using FieldsByName to save allocations and map lookups
+// when iterating over many rows. Empty traversals will get an interface pointer.
+// Because of the necessity of requesting ptrs or values, it's considered a bit too
+// specialized for inclusion in reflectx itself.
+func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error {
+ v = reflect.Indirect(v)
+ if v.Kind() != reflect.Struct {
+ return errors.New("argument not a struct")
+ }
+
+ for i, traversal := range traversals {
+ if len(traversal) == 0 {
+ values[i] = new(interface{})
+ continue
+ }
+ f := reflectx.FieldByIndexes(v, traversal)
+ if ptrs {
+ values[i] = f.Addr().Interface()
+ } else {
+ values[i] = f.Interface()
+ }
+ }
+ return nil
+}
+
+func missingFields(transversals [][]int) (field int, err error) {
+ for i, t := range transversals {
+ if len(t) == 0 {
+ return i, errors.New("missing field")
+ }
+ }
+ return 0, nil
+}
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go
new file mode 100644
index 000000000..06033111a
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/sqlx_context.go
@@ -0,0 +1,346 @@
+// +build go1.8
+
+package sqlx
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+)
+
+// ConnectContext to a database and verify with a ping.
+func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) {
+ db, err := Open(driverName, dataSourceName)
+ if err != nil {
+ return db, err
+ }
+ err = db.PingContext(ctx)
+ return db, err
+}
+
+// QueryerContext is an interface used by GetContext and SelectContext
+type QueryerContext interface {
+ QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
+ QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error)
+ QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row
+}
+
+// PreparerContext is an interface used by PreparexContext.
+type PreparerContext interface {
+ PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
+}
+
+// ExecerContext is an interface used by MustExecContext and LoadFileContext
+type ExecerContext interface {
+ ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
+}
+
+// ExtContext is a union interface which can bind, query, and exec, with Context
+// used by NamedQueryContext and NamedExecContext.
+type ExtContext interface {
+ binder
+ QueryerContext
+ ExecerContext
+}
+
+// SelectContext executes a query using the provided Queryer, and StructScans
+// each row into dest, which must be a slice. If the slice elements are
+// scannable, then the result set must have only one column. Otherwise,
+// StructScan is used. The *sql.Rows are closed automatically.
+// Any placeholder parameters are replaced with supplied args.
+func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
+ rows, err := q.QueryxContext(ctx, query, args...)
+ if err != nil {
+ return err
+ }
+ // if something happens here, we want to make sure the rows are Closed
+ defer rows.Close()
+ return scanAll(rows, dest, false)
+}
+
+// PreparexContext prepares a statement.
+//
+// The provided context is used for the preparation of the statement, not for
+// the execution of the statement.
+func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) {
+ s, err := p.PrepareContext(ctx, query)
+ if err != nil {
+ return nil, err
+ }
+ return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
+}
+
+// GetContext does a QueryRow using the provided Queryer, and scans the
+// resulting row to dest. If dest is scannable, the result must only have one
+// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like
+// row.Scan would. Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
+ r := q.QueryRowxContext(ctx, query, args...)
+ return r.scanAny(dest, false)
+}
+
+// LoadFileContext exec's every statement in a file (as a single call to Exec).
+// LoadFileContext may return a nil *sql.Result if errors are encountered
+// locating or reading the file at path. LoadFile reads the entire file into
+// memory, so it is not suitable for loading large data dumps, but can be useful
+// for initializing schemas or loading indexes.
+//
+// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
+// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
+// this by requiring something with DriverName() and then attempting to split the
+// queries will be difficult to get right, and its current driver-specific behavior
+// is deemed at least not complex in its incorrectness.
+func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) {
+ realpath, err := filepath.Abs(path)
+ if err != nil {
+ return nil, err
+ }
+ contents, err := ioutil.ReadFile(realpath)
+ if err != nil {
+ return nil, err
+ }
+ res, err := e.ExecContext(ctx, string(contents))
+ return &res, err
+}
+
+// MustExecContext execs the query using e and panics if there was an error.
+// Any placeholder parameters are replaced with supplied args.
+func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result {
+ res, err := e.ExecContext(ctx, query, args...)
+ if err != nil {
+ panic(err)
+ }
+ return res
+}
+
+// PrepareNamedContext returns an sqlx.NamedStmt
+func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) {
+ return prepareNamedContext(ctx, db, query)
+}
+
+// NamedQueryContext using this DB.
+// Any named placeholder parameters are replaced with fields from arg.
+func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) {
+ return NamedQueryContext(ctx, db, query, arg)
+}
+
+// NamedExecContext using this DB.
+// Any named placeholder parameters are replaced with fields from arg.
+func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
+ return NamedExecContext(ctx, db, query, arg)
+}
+
+// SelectContext using this DB.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return SelectContext(ctx, db, dest, query, args...)
+}
+
+// GetContext using this DB.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return GetContext(ctx, db, dest, query, args...)
+}
+
+// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt.
+//
+// The provided context is used for the preparation of the statement, not for
+// the execution of the statement.
+func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) {
+ return PreparexContext(ctx, db, query)
+}
+
+// QueryxContext queries the database and returns an *sqlx.Rows.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
+ r, err := db.DB.QueryContext(ctx, query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
+}
+
+// QueryRowxContext queries the database and returns an *sqlx.Row.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
+ rows, err := db.DB.QueryContext(ctx, query, args...)
+ return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
+}
+
+// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead
+// of an *sql.Tx.
+//
+// The provided context is used until the transaction is committed or rolled
+// back. If the context is canceled, the sql package will roll back the
+// transaction. Tx.Commit will return an error if the context provided to
+// MustBeginContext is canceled.
+func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx {
+ tx, err := db.BeginTxx(ctx, opts)
+ if err != nil {
+ panic(err)
+ }
+ return tx
+}
+
+// MustExecContext (panic) runs MustExec using this database.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
+ return MustExecContext(ctx, db, query, args...)
+}
+
+// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an
+// *sql.Tx.
+//
+// The provided context is used until the transaction is committed or rolled
+// back. If the context is canceled, the sql package will roll back the
+// transaction. Tx.Commit will return an error if the context provided to
+// BeginxContext is canceled.
+func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
+ tx, err := db.DB.BeginTx(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
+}
+
+// StmtxContext returns a version of the prepared statement which runs within a
+// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt.
+func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt {
+ var s *sql.Stmt
+ switch v := stmt.(type) {
+ case Stmt:
+ s = v.Stmt
+ case *Stmt:
+ s = v.Stmt
+ case *sql.Stmt:
+ s = v
+ default:
+ panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
+ }
+ return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper}
+}
+
+// NamedStmtContext returns a version of the prepared statement which runs
+// within a transaction.
+func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt {
+ return &NamedStmt{
+ QueryString: stmt.QueryString,
+ Params: stmt.Params,
+ Stmt: tx.StmtxContext(ctx, stmt.Stmt),
+ }
+}
+
+// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt.
+//
+// The provided context is used for the preparation of the statement, not for
+// the execution of the statement.
+func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) {
+ return PreparexContext(ctx, tx, query)
+}
+
+// PrepareNamedContext returns an sqlx.NamedStmt
+func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) {
+ return prepareNamedContext(ctx, tx, query)
+}
+
+// MustExecContext runs MustExecContext within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
+ return MustExecContext(ctx, tx, query, args...)
+}
+
+// QueryxContext within a transaction and context.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
+ r, err := tx.Tx.QueryContext(ctx, query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
+}
+
+// SelectContext within a transaction and context.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return SelectContext(ctx, tx, dest, query, args...)
+}
+
+// GetContext within a transaction and context.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return GetContext(ctx, tx, dest, query, args...)
+}
+
+// QueryRowxContext within a transaction and context.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
+ rows, err := tx.Tx.QueryContext(ctx, query, args...)
+ return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
+}
+
+// NamedExecContext using this Tx.
+// Any named placeholder parameters are replaced with fields from arg.
+func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
+ return NamedExecContext(ctx, tx, query, arg)
+}
+
+// SelectContext using the prepared statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error {
+ return SelectContext(ctx, &qStmt{s}, dest, "", args...)
+}
+
+// GetContext using the prepared statement.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error {
+ return GetContext(ctx, &qStmt{s}, dest, "", args...)
+}
+
+// MustExecContext (panic) using this statement. Note that the query portion of
+// the error output will be blank, as Stmt does not expose its query.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result {
+ return MustExecContext(ctx, &qStmt{s}, "", args...)
+}
+
+// QueryRowxContext using this statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row {
+ qs := &qStmt{s}
+ return qs.QueryRowxContext(ctx, "", args...)
+}
+
+// QueryxContext using this statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) {
+ qs := &qStmt{s}
+ return qs.QueryxContext(ctx, "", args...)
+}
+
+func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
+ return q.Stmt.QueryContext(ctx, args...)
+}
+
+func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
+ r, err := q.Stmt.QueryContext(ctx, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
+}
+
+func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
+ rows, err := q.Stmt.QueryContext(ctx, args...)
+ return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
+}
+
+func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
+ return q.Stmt.ExecContext(ctx, args...)
+}
diff --git a/vendor/github.com/mattn/go-sqlite3/.travis.yml b/vendor/github.com/mattn/go-sqlite3/.travis.yml
index 2ae08beb4..a21c8170e 100644
--- a/vendor/github.com/mattn/go-sqlite3/.travis.yml
+++ b/vendor/github.com/mattn/go-sqlite3/.travis.yml
@@ -12,18 +12,18 @@ env:
matrix:
- GOTAGS=
- GOTAGS=libsqlite3
- - GOTAGS="sqlite_allow_uri_authority sqlite_app_armor sqlite_foreign_keys sqlite_fts5 sqlite_icu sqlite_introspect sqlite_json sqlite_secure_delete sqlite_see sqlite_stat4 sqlite_trace sqlite_userauth sqlite_vacuum_incr sqlite_vtable sqlite_unlock_notify"
+ - GOTAGS="sqlite_allow_uri_authority sqlite_app_armor sqlite_foreign_keys sqlite_fts5 sqlite_icu sqlite_introspect sqlite_json sqlite_secure_delete sqlite_see sqlite_stat4 sqlite_trace sqlite_userauth sqlite_vacuum_incr sqlite_vtable"
- GOTAGS=sqlite_vacuum_full
go:
- 1.9.x
- 1.10.x
- - 1.11.x
before_install:
- |
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
brew update
+ brew upgrade icu4c
fi
- |
go get github.com/smartystreets/goconvey
diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md
index 207f1cd1e..37d017a1d 100644
--- a/vendor/github.com/mattn/go-sqlite3/README.md
+++ b/vendor/github.com/mattn/go-sqlite3/README.md
@@ -67,7 +67,6 @@ This is also known as a DSN string. (Data Source Name).
Options are append after the filename of the SQLite database.
The database filename and options are seperated by an `?` (Question Mark).
-Options should be URL-encoded (see [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)).
This also applies when using an in-memory database instead of a file.
@@ -199,7 +198,7 @@ Additional information:
# Google Cloud Platform
-Building on GCP is not possible because Google Cloud Platform does not allow `gcc` to be executed.
+Building on GCP is not possible because `Google Cloud Platform does not allow `gcc` to be executed.
Please work only with compiled final binaries.
@@ -291,7 +290,7 @@ For example the TDM-GCC Toolchain can be found [here](ttps://sourceforge.net/pro
When receiving a compile time error referencing recompile with `-FPIC` then you
are probably using a hardend system.
- You can compile the library on a hardend system with the following command.
+ You can copile the library on a hardend system with the following command.
```bash
go build -ldflags '-extldflags=-fno-PIC'
@@ -474,7 +473,7 @@ For an example see [shaxbee/go-spatialite](https://github.com/shaxbee/go-spatial
For more information see [#289](https://github.com/mattn/go-sqlite3/issues/289)
-- Trying to execute a `.` (dot) command throws an error.
+- Trying to execure a `.` (dot) command throws an error.
Error: `Error: near ".": syntax error`
Dot command are part of SQLite3 CLI not of this library.
diff --git a/vendor/github.com/mattn/go-sqlite3/callback.go b/vendor/github.com/mattn/go-sqlite3/callback.go
index 2c68973b8..5a735c034 100644
--- a/vendor/github.com/mattn/go-sqlite3/callback.go
+++ b/vendor/github.com/mattn/go-sqlite3/callback.go
@@ -77,12 +77,6 @@ func updateHookTrampoline(handle uintptr, op int, db *C.char, table *C.char, row
callback(op, C.GoString(db), C.GoString(table), rowid)
}
-//export authorizerTrampoline
-func authorizerTrampoline(handle uintptr, op int, arg1 *C.char, arg2 *C.char, arg3 *C.char) int {
- callback := lookupHandle(handle).(func(int, string, string, string) int)
- return callback(op, C.GoString(arg1), C.GoString(arg2), C.GoString(arg3))
-}
-
// Use handles to avoid passing Go pointers to C.
type handleVal struct {
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
index 776319750..f077152a9 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
@@ -1,7 +1,7 @@
#ifndef USE_LIBSQLITE3
/******************************************************************************
** This file is an amalgamation of many separate C source files from SQLite
-** version 3.25.2. By combining all the individual C code files into this
+** version 3.24.0. By combining all the individual C code files into this
** single large file, the entire code can be compiled as a single translation
** unit. This allows many compilers to do optimizations that would not be
** possible if the files were compiled separately. Performance improvements
@@ -56,12 +56,6 @@
#define CTIMEOPT_VAL_(opt) #opt
#define CTIMEOPT_VAL(opt) CTIMEOPT_VAL_(opt)
-/* Like CTIMEOPT_VAL, but especially for SQLITE_DEFAULT_LOOKASIDE. This
-** option requires a separate macro because legal values contain a single
-** comma. e.g. (-DSQLITE_DEFAULT_LOOKASIDE="100,100") */
-#define CTIMEOPT_VAL2_(opt1,opt2) #opt1 "," #opt2
-#define CTIMEOPT_VAL2(opt) CTIMEOPT_VAL2_(opt)
-
/*
** An array of names of all compile-time options. This array should
** be sorted A-Z.
@@ -145,7 +139,7 @@ static const char * const sqlite3azCompileOpt[] = {
"DEFAULT_LOCKING_MODE=" CTIMEOPT_VAL(SQLITE_DEFAULT_LOCKING_MODE),
#endif
#ifdef SQLITE_DEFAULT_LOOKASIDE
- "DEFAULT_LOOKASIDE=" CTIMEOPT_VAL2(SQLITE_DEFAULT_LOOKASIDE),
+ "DEFAULT_LOOKASIDE=" CTIMEOPT_VAL(SQLITE_DEFAULT_LOOKASIDE),
#endif
#if SQLITE_DEFAULT_MEMSTATUS
"DEFAULT_MEMSTATUS",
@@ -1157,9 +1151,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.25.2"
-#define SQLITE_VERSION_NUMBER 3025002
-#define SQLITE_SOURCE_ID "2018-09-25 19:08:10 fb90e7189ae6d62e77ba3a308ca5d683f90bbe633cf681865365b8e92792d1c7"
+#define SQLITE_VERSION "3.24.0"
+#define SQLITE_VERSION_NUMBER 3024000
+#define SQLITE_SOURCE_ID "2018-06-04 19:24:41 c7ee0833225bfd8c5ec2f9bf62b97c4e04d03bd9566366d5221ac8fb199a87ca"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -1506,7 +1500,6 @@ SQLITE_API int sqlite3_exec(
*/
#define SQLITE_ERROR_MISSING_COLLSEQ (SQLITE_ERROR | (1<<8))
#define SQLITE_ERROR_RETRY (SQLITE_ERROR | (2<<8))
-#define SQLITE_ERROR_SNAPSHOT (SQLITE_ERROR | (3<<8))
#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8))
#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8))
#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8))
@@ -1546,7 +1539,6 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8))
#define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8))
#define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8))
-#define SQLITE_CANTOPEN_DIRTYWAL (SQLITE_CANTOPEN | (5<<8)) /* Not Used */
#define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8))
#define SQLITE_CORRUPT_SEQUENCE (SQLITE_CORRUPT | (2<<8))
#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8))
@@ -1922,8 +1914,7 @@ struct sqlite3_io_methods {
**
[[SQLITE_FCNTL_PERSIST_WAL]]
** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the
** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary
-** write ahead log ([WAL file]) and shared memory
-** files used for transaction control
+** write ahead log and shared memory files used for transaction control
** are automatically deleted when the latest connection to the database
** closes. Setting persistent WAL mode causes those files to persist after
** close. Persisting the files is useful when other processes that do not
@@ -2109,26 +2100,6 @@ struct sqlite3_io_methods {
** a file lock using the xLock or xShmLock methods of the VFS to wait
** for up to M milliseconds before failing, where M is the single
** unsigned integer parameter.
-**
-**
[[SQLITE_FCNTL_DATA_VERSION]]
-** The [SQLITE_FCNTL_DATA_VERSION] opcode is used to detect changes to
-** a database file. The argument is a pointer to a 32-bit unsigned integer.
-** The "data version" for the pager is written into the pointer. The
-** "data version" changes whenever any change occurs to the corresponding
-** database file, either through SQL statements on the same database
-** connection or through transactions committed by separate database
-** connections possibly in other processes. The [sqlite3_total_changes()]
-** interface can be used to find if any database on the connection has changed,
-** but that interface responds to changes on TEMP as well as MAIN and does
-** not provide a mechanism to detect changes to MAIN only. Also, the
-** [sqlite3_total_changes()] interface responds to internal changes only and
-** omits changes made by other database connections. The
-** [PRAGMA data_version] command provide a mechanism to detect changes to
-** a single attached database that occur due to other database connections,
-** but omits changes implemented by the database connection on which it is
-** called. This file control is the only mechanism to detect changes that
-** happen either internally or externally and that are associated with
-** a particular attached database.
**
*/
#define SQLITE_FCNTL_LOCKSTATE 1
@@ -2164,7 +2135,6 @@ struct sqlite3_io_methods {
#define SQLITE_FCNTL_COMMIT_ATOMIC_WRITE 32
#define SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE 33
#define SQLITE_FCNTL_LOCK_TIMEOUT 34
-#define SQLITE_FCNTL_DATA_VERSION 35
/* deprecated names */
#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
@@ -3179,12 +3149,6 @@ struct sqlite3_mem_methods {
** with no schema and no content. The following process works even for
** a badly corrupted database file:
**
-**
If the database connection is newly opened, make sure it has read the
-** database schema by preparing then discarding some query against the
-** database, or calling sqlite3_table_column_metadata(), ignoring any
-** errors. This step is only necessary if the application desires to keep
-** the database in WAL mode after the reset if it was in WAL mode before
-** the reset.
**
sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0);
@@ -3333,17 +3297,12 @@ SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64);
** program, the value returned reflects the number of rows modified by the
** previous INSERT, UPDATE or DELETE statement within the same trigger.
**
+** See also the [sqlite3_total_changes()] interface, the
+** [count_changes pragma], and the [changes() SQL function].
+**
** If a separate thread makes changes on the same database connection
** while [sqlite3_changes()] is running then the value returned
** is unpredictable and not meaningful.
-**
-** See also:
-**
-**
the [sqlite3_total_changes()] interface
-**
the [count_changes pragma]
-**
the [changes() SQL function]
-**
the [data_version pragma]
-**
*/
SQLITE_API int sqlite3_changes(sqlite3*);
@@ -3361,26 +3320,13 @@ SQLITE_API int sqlite3_changes(sqlite3*);
** count, but those made as part of REPLACE constraint resolution are
** not. ^Changes to a view that are intercepted by INSTEAD OF triggers
** are not counted.
-**
-** This the [sqlite3_total_changes(D)] interface only reports the number
-** of rows that changed due to SQL statement run against database
-** connection D. Any changes by other database connections are ignored.
-** To detect changes against a database file from other database
-** connections use the [PRAGMA data_version] command or the
-** [SQLITE_FCNTL_DATA_VERSION] [file control].
**
+** See also the [sqlite3_changes()] interface, the
+** [count_changes pragma], and the [total_changes() SQL function].
+**
** If a separate thread makes changes on the same database connection
** while [sqlite3_total_changes()] is running then the value
** returned is unpredictable and not meaningful.
-**
-** See also:
-**
-**
the [sqlite3_changes()] interface
-**
the [count_changes pragma]
-**
the [changes() SQL function]
-**
the [data_version pragma]
-**
the [SQLITE_FCNTL_DATA_VERSION] [file control]
-**
*/
SQLITE_API int sqlite3_total_changes(sqlite3*);
@@ -4436,24 +4382,13 @@ SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int
** [database connection] D failed, then the sqlite3_errcode(D) interface
** returns the numeric [result code] or [extended result code] for that
** API call.
+** If the most recent API call was successful,
+** then the return value from sqlite3_errcode() is undefined.
** ^The sqlite3_extended_errcode()
** interface is the same except that it always returns the
** [extended result code] even when extended result codes are
** disabled.
**
-** The values returned by sqlite3_errcode() and/or
-** sqlite3_extended_errcode() might change with each API call.
-** Except, there are some interfaces that are guaranteed to never
-** change the value of the error code. The error-code preserving
-** interfaces are:
-**
-**
-**
sqlite3_errcode()
-**
sqlite3_extended_errcode()
-**
sqlite3_errmsg()
-**
sqlite3_errmsg16()
-**
-**
** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language
** text that describes the error, as either UTF-8 or UTF-16 respectively.
** ^(Memory to hold the error message string is managed internally.
@@ -5607,25 +5542,11 @@ SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt);
** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into
** [sqlite3_free()].
**
-** As long as the input parameters are correct, these routines will only
-** fail if an out-of-memory error occurs during a format conversion.
-** Only the following subset of interfaces are subject to out-of-memory
-** errors:
-**
-**
-**
sqlite3_column_blob()
-**
sqlite3_column_text()
-**
sqlite3_column_text16()
-**
sqlite3_column_bytes()
-**
sqlite3_column_bytes16()
-**
-**
-** If an out-of-memory error occurs, then the return value from these
-** routines is the same as if the column had contained an SQL NULL value.
-** Valid SQL NULL returns can be distinguished from out-of-memory errors
-** by invoking the [sqlite3_errcode()] immediately after the suspect
-** return value is obtained and before any
-** other SQLite interface is called on the same [database connection].
+** ^(If a memory allocation error occurs during the evaluation of any
+** of these routines, a default value is returned. The default value
+** is either the integer 0, the floating point number 0.0, or a NULL
+** pointer. Subsequent calls to [sqlite3_errcode()] will return
+** [SQLITE_NOMEM].)^
*/
SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol);
@@ -5702,13 +5623,11 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
**
** ^These functions (collectively known as "function creation routines")
** are used to add SQL functions or aggregates or to redefine the behavior
-** of existing SQL functions or aggregates. The only differences between
-** the three "sqlite3_create_function*" routines are the text encoding
-** expected for the second parameter (the name of the function being
-** created) and the presence or absence of a destructor callback for
-** the application data pointer. Function sqlite3_create_window_function()
-** is similar, but allows the user to supply the extra callback functions
-** needed by [aggregate window functions].
+** of existing SQL functions or aggregates. The only differences between
+** these routines are the text encoding expected for
+** the second parameter (the name of the function being created)
+** and the presence or absence of a destructor callback for
+** the application data pointer.
**
** ^The first parameter is the [database connection] to which the SQL
** function is to be added. ^If an application uses more than one database
@@ -5754,8 +5673,7 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
** ^(The fifth parameter is an arbitrary pointer. The implementation of the
** function can gain access to this pointer using [sqlite3_user_data()].)^
**
-** ^The sixth, seventh and eighth parameters passed to the three
-** "sqlite3_create_function*" functions, xFunc, xStep and xFinal, are
+** ^The sixth, seventh and eighth parameters, xFunc, xStep and xFinal, are
** pointers to C-language functions that implement the SQL function or
** aggregate. ^A scalar SQL function requires an implementation of the xFunc
** callback only; NULL pointers must be passed as the xStep and xFinal
@@ -5764,24 +5682,15 @@ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
** SQL function or aggregate, pass NULL pointers for all three function
** callbacks.
**
-** ^The sixth, seventh, eighth and ninth parameters (xStep, xFinal, xValue
-** and xInverse) passed to sqlite3_create_window_function are pointers to
-** C-language callbacks that implement the new function. xStep and xFinal
-** must both be non-NULL. xValue and xInverse may either both be NULL, in
-** which case a regular aggregate function is created, or must both be
-** non-NULL, in which case the new function may be used as either an aggregate
-** or aggregate window function. More details regarding the implementation
-** of aggregate window functions are
-** [user-defined window functions|available here].
-**
-** ^(If the final parameter to sqlite3_create_function_v2() or
-** sqlite3_create_window_function() is not NULL, then it is destructor for
-** the application data pointer. The destructor is invoked when the function
-** is deleted, either by being overloaded or when the database connection
-** closes.)^ ^The destructor is also invoked if the call to
-** sqlite3_create_function_v2() fails. ^When the destructor callback is
-** invoked, it is passed a single argument which is a copy of the application
-** data pointer which was the fifth parameter to sqlite3_create_function_v2().
+** ^(If the ninth parameter to sqlite3_create_function_v2() is not NULL,
+** then it is destructor for the application data pointer.
+** The destructor is invoked when the function is deleted, either by being
+** overloaded or when the database connection closes.)^
+** ^The destructor is also invoked if the call to
+** sqlite3_create_function_v2() fails.
+** ^When the destructor callback of the tenth parameter is invoked, it
+** is passed a single argument which is a copy of the application data
+** pointer which was the fifth parameter to sqlite3_create_function_v2().
**
** ^It is permitted to register multiple implementations of the same
** functions with the same name but with either differing numbers of
@@ -5834,18 +5743,6 @@ SQLITE_API int sqlite3_create_function_v2(
void (*xFinal)(sqlite3_context*),
void(*xDestroy)(void*)
);
-SQLITE_API int sqlite3_create_window_function(
- sqlite3 *db,
- const char *zFunctionName,
- int nArg,
- int eTextRep,
- void *pApp,
- void (*xStep)(sqlite3_context*,int,sqlite3_value**),
- void (*xFinal)(sqlite3_context*),
- void (*xValue)(sqlite3_context*),
- void (*xInverse)(sqlite3_context*,int,sqlite3_value**),
- void(*xDestroy)(void*)
-);
/*
** CAPI3REF: Text Encodings
@@ -5988,28 +5885,6 @@ SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int6
**
** These routines must be called from the same thread as
** the SQL function that supplied the [sqlite3_value*] parameters.
-**
-** As long as the input parameter is correct, these routines can only
-** fail if an out-of-memory error occurs during a format conversion.
-** Only the following subset of interfaces are subject to out-of-memory
-** errors:
-**
-**
-**
sqlite3_value_blob()
-**
sqlite3_value_text()
-**
sqlite3_value_text16()
-**
sqlite3_value_text16le()
-**
sqlite3_value_text16be()
-**
sqlite3_value_bytes()
-**
sqlite3_value_bytes16()
-**
-**
-** If an out-of-memory error occurs, then the return value from these
-** routines is the same as if the column had contained an SQL NULL value.
-** Valid SQL NULL returns can be distinguished from out-of-memory errors
-** by invoking the [sqlite3_errcode()] immediately after the suspect
-** return value is obtained and before any
-** other SQLite interface is called on the same [database connection].
*/
SQLITE_API const void *sqlite3_value_blob(sqlite3_value*);
SQLITE_API double sqlite3_value_double(sqlite3_value*);
@@ -7476,7 +7351,6 @@ struct sqlite3_index_info {
#define SQLITE_INDEX_CONSTRAINT_ISNOTNULL 70
#define SQLITE_INDEX_CONSTRAINT_ISNULL 71
#define SQLITE_INDEX_CONSTRAINT_IS 72
-#define SQLITE_INDEX_CONSTRAINT_FUNCTION 150
/*
** CAPI3REF: Register A Virtual Table Implementation
@@ -8153,7 +8027,6 @@ SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*);
/*
** CAPI3REF: Low-Level Control Of Database Files
** METHOD: sqlite3
-** KEYWORDS: {file control}
**
** ^The [sqlite3_file_control()] interface makes a direct call to the
** xFileControl method for the [sqlite3_io_methods] object associated
@@ -8168,18 +8041,11 @@ SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*);
** the xFileControl method. ^The return value of the xFileControl
** method becomes the return value of this routine.
**
-** A few opcodes for [sqlite3_file_control()] are handled directly
-** by the SQLite core and never invoke the
-** sqlite3_io_methods.xFileControl method.
** ^The [SQLITE_FCNTL_FILE_POINTER] value for the op parameter causes
** a pointer to the underlying [sqlite3_file] object to be written into
-** the space pointed to by the 4th parameter. The
-** [SQLITE_FCNTL_JOURNAL_POINTER] works similarly except that it returns
-** the [sqlite3_file] object associated with the journal file instead of
-** the main database. The [SQLITE_FCNTL_VFS_POINTER] opcode returns
-** a pointer to the underlying [sqlite3_vfs] object for the file.
-** The [SQLITE_FCNTL_DATA_VERSION] returns the data version counter
-** from the pager.
+** the space pointed to by the 4th parameter. ^The [SQLITE_FCNTL_FILE_POINTER]
+** case is a short-circuit path which does not actually invoke the
+** underlying sqlite3_io_methods.xFileControl method.
**
** ^If the second parameter (zDbName) does not match the name of any
** open database file, then SQLITE_ERROR is returned. ^This error
@@ -9998,6 +9864,7 @@ SQLITE_API int sqlite3_system_errno(sqlite3*);
/*
** CAPI3REF: Database Snapshot
** KEYWORDS: {snapshot} {sqlite3_snapshot}
+** EXPERIMENTAL
**
** An instance of the snapshot object records the state of a [WAL mode]
** database for some specific point in history.
@@ -10014,6 +9881,11 @@ SQLITE_API int sqlite3_system_errno(sqlite3*);
** version of the database file so that it is possible to later open a new read
** transaction that sees that historical version of the database rather than
** the most recent version.
+**
+** The constructor for this object is [sqlite3_snapshot_get()]. The
+** [sqlite3_snapshot_open()] method causes a fresh read transaction to refer
+** to an historical snapshot (if possible). The destructor for
+** sqlite3_snapshot objects is [sqlite3_snapshot_free()].
*/
typedef struct sqlite3_snapshot {
unsigned char hidden[48];
@@ -10021,7 +9893,7 @@ typedef struct sqlite3_snapshot {
/*
** CAPI3REF: Record A Database Snapshot
-** CONSTRUCTOR: sqlite3_snapshot
+** EXPERIMENTAL
**
** ^The [sqlite3_snapshot_get(D,S,P)] interface attempts to make a
** new [sqlite3_snapshot] object that records the current state of
@@ -10037,7 +9909,7 @@ typedef struct sqlite3_snapshot {
** in this case.
**
**
-**
The database handle must not be in [autocommit mode].
+**
The database handle must be in [autocommit mode].
**
**
Schema S of [database connection] D must be a [WAL mode] database.
**
@@ -10060,7 +9932,7 @@ typedef struct sqlite3_snapshot {
** to avoid a memory leak.
**
** The [sqlite3_snapshot_get()] interface is only available when the
-** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used.
+** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
*/
SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get(
sqlite3 *db,
@@ -10070,35 +9942,24 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get(
/*
** CAPI3REF: Start a read transaction on an historical snapshot
-** METHOD: sqlite3_snapshot
+** EXPERIMENTAL
**
-** ^The [sqlite3_snapshot_open(D,S,P)] interface either starts a new read
-** transaction or upgrades an existing one for schema S of
-** [database connection] D such that the read transaction refers to
-** historical [snapshot] P, rather than the most recent change to the
-** database. ^The [sqlite3_snapshot_open()] interface returns SQLITE_OK
-** on success or an appropriate [error code] if it fails.
-**
-** ^In order to succeed, the database connection must not be in
-** [autocommit mode] when [sqlite3_snapshot_open(D,S,P)] is called. If there
-** is already a read transaction open on schema S, then the database handle
-** must have no active statements (SELECT statements that have been passed
-** to sqlite3_step() but not sqlite3_reset() or sqlite3_finalize()).
-** SQLITE_ERROR is returned if either of these conditions is violated, or
-** if schema S does not exist, or if the snapshot object is invalid.
-**
-** ^A call to sqlite3_snapshot_open() will fail to open if the specified
-** snapshot has been overwritten by a [checkpoint]. In this case
-** SQLITE_ERROR_SNAPSHOT is returned.
-**
-** If there is already a read transaction open when this function is
-** invoked, then the same read transaction remains open (on the same
-** database snapshot) if SQLITE_ERROR, SQLITE_BUSY or SQLITE_ERROR_SNAPSHOT
-** is returned. If another error code - for example SQLITE_PROTOCOL or an
-** SQLITE_IOERR error code - is returned, then the final state of the
-** read transaction is undefined. If SQLITE_OK is returned, then the
-** read transaction is now open on database snapshot P.
+** ^The [sqlite3_snapshot_open(D,S,P)] interface starts a
+** read transaction for schema S of
+** [database connection] D such that the read transaction
+** refers to historical [snapshot] P, rather than the most
+** recent change to the database.
+** ^The [sqlite3_snapshot_open()] interface returns SQLITE_OK on success
+** or an appropriate [error code] if it fails.
**
+** ^In order to succeed, a call to [sqlite3_snapshot_open(D,S,P)] must be
+** the first operation following the [BEGIN] that takes the schema S
+** out of [autocommit mode].
+** ^In other words, schema S must not currently be in
+** a transaction for [sqlite3_snapshot_open(D,S,P)] to work, but the
+** database connection D must be out of [autocommit mode].
+** ^A [snapshot] will fail to open if it has been overwritten by a
+** [checkpoint].
** ^(A call to [sqlite3_snapshot_open(D,S,P)] will fail if the
** database connection D does not know that the database file for
** schema S is in [WAL mode]. A database connection might not know
@@ -10109,7 +9970,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get(
** database connection in order to make it ready to use snapshots.)
**
** The [sqlite3_snapshot_open()] interface is only available when the
-** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used.
+** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
*/
SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open(
sqlite3 *db,
@@ -10119,20 +9980,20 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open(
/*
** CAPI3REF: Destroy a snapshot
-** DESTRUCTOR: sqlite3_snapshot
+** EXPERIMENTAL
**
** ^The [sqlite3_snapshot_free(P)] interface destroys [sqlite3_snapshot] P.
** The application must eventually free every [sqlite3_snapshot] object
** using this routine to avoid a memory leak.
**
** The [sqlite3_snapshot_free()] interface is only available when the
-** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used.
+** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
*/
SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*);
/*
** CAPI3REF: Compare the ages of two snapshot handles.
-** METHOD: sqlite3_snapshot
+** EXPERIMENTAL
**
** The sqlite3_snapshot_cmp(P1, P2) interface is used to compare the ages
** of two valid snapshot handles.
@@ -10151,9 +10012,6 @@ SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*);
** Otherwise, this API returns a negative value if P1 refers to an older
** snapshot than P2, zero if the two handles refer to the same database
** snapshot, and a positive value if P1 is a newer snapshot than P2.
-**
-** This interface is only available if SQLite is compiled with the
-** [SQLITE_ENABLE_SNAPSHOT] option.
*/
SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp(
sqlite3_snapshot *p1,
@@ -10162,26 +10020,23 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp(
/*
** CAPI3REF: Recover snapshots from a wal file
-** METHOD: sqlite3_snapshot
+** EXPERIMENTAL
**
-** If a [WAL file] remains on disk after all database connections close
-** (either through the use of the [SQLITE_FCNTL_PERSIST_WAL] [file control]
-** or because the last process to have the database opened exited without
-** calling [sqlite3_close()]) and a new connection is subsequently opened
-** on that database and [WAL file], the [sqlite3_snapshot_open()] interface
-** will only be able to open the last transaction added to the WAL file
-** even though the WAL file contains other valid transactions.
+** If all connections disconnect from a database file but do not perform
+** a checkpoint, the existing wal file is opened along with the database
+** file the next time the database is opened. At this point it is only
+** possible to successfully call sqlite3_snapshot_open() to open the most
+** recent snapshot of the database (the one at the head of the wal file),
+** even though the wal file may contain other valid snapshots for which
+** clients have sqlite3_snapshot handles.
**
-** This function attempts to scan the WAL file associated with database zDb
+** This function attempts to scan the wal file associated with database zDb
** of database handle db and make all valid snapshots available to
** sqlite3_snapshot_open(). It is an error if there is already a read
-** transaction open on the database, or if the database is not a WAL mode
+** transaction open on the database, or if the database is not a wal mode
** database.
**
** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
-**
-** This interface is only available if SQLite is compiled with the
-** [SQLITE_ENABLE_SNAPSHOT] option.
*/
SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb);
@@ -10292,7 +10147,7 @@ SQLITE_API int sqlite3_deserialize(
** in the P argument is held in memory obtained from [sqlite3_malloc64()]
** and that SQLite should take ownership of this memory and automatically
** free it when it has finished using it. Without this flag, the caller
-** is responsible for freeing any dynamically allocated memory.
+** is resposible for freeing any dynamically allocated memory.
**
** The SQLITE_DESERIALIZE_RESIZEABLE flag means that SQLite is allowed to
** grow the size of the database using calls to [sqlite3_realloc64()]. This
@@ -12470,7 +12325,7 @@ struct Fts5ExtensionApi {
** This way, even if the tokenizer does not provide synonyms
** when tokenizing query text (it should not - to do would be
** inefficient), it doesn't matter if the user queries for
-** 'first + place' or '1st + place', as there are entries in the
+** 'first + place' or '1st + place', as there are entires in the
** FTS index corresponding to both forms of the first token.
**
**
@@ -12498,7 +12353,7 @@ struct Fts5ExtensionApi {
** extra data to the FTS index or require FTS5 to query for multiple terms,
** so it is efficient in terms of disk space and query speed. However, it
** does not support prefix queries very well. If, as suggested above, the
-** token "first" is substituted for "1st" by the tokenizer, then the query:
+** token "first" is subsituted for "1st" by the tokenizer, then the query:
**
**
** ... MATCH '1s*'
@@ -13362,104 +13217,94 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*);
#define TK_REPLACE 73
#define TK_RESTRICT 74
#define TK_ROW 75
-#define TK_ROWS 76
-#define TK_TRIGGER 77
-#define TK_VACUUM 78
-#define TK_VIEW 79
-#define TK_VIRTUAL 80
-#define TK_WITH 81
-#define TK_CURRENT 82
-#define TK_FOLLOWING 83
-#define TK_PARTITION 84
-#define TK_PRECEDING 85
-#define TK_RANGE 86
-#define TK_UNBOUNDED 87
-#define TK_REINDEX 88
-#define TK_RENAME 89
-#define TK_CTIME_KW 90
-#define TK_ANY 91
-#define TK_BITAND 92
-#define TK_BITOR 93
-#define TK_LSHIFT 94
-#define TK_RSHIFT 95
-#define TK_PLUS 96
-#define TK_MINUS 97
-#define TK_STAR 98
-#define TK_SLASH 99
-#define TK_REM 100
-#define TK_CONCAT 101
-#define TK_COLLATE 102
-#define TK_BITNOT 103
-#define TK_ON 104
-#define TK_INDEXED 105
-#define TK_STRING 106
-#define TK_JOIN_KW 107
-#define TK_CONSTRAINT 108
-#define TK_DEFAULT 109
-#define TK_NULL 110
-#define TK_PRIMARY 111
-#define TK_UNIQUE 112
-#define TK_CHECK 113
-#define TK_REFERENCES 114
-#define TK_AUTOINCR 115
-#define TK_INSERT 116
-#define TK_DELETE 117
-#define TK_UPDATE 118
-#define TK_SET 119
-#define TK_DEFERRABLE 120
-#define TK_FOREIGN 121
-#define TK_DROP 122
-#define TK_UNION 123
-#define TK_ALL 124
-#define TK_EXCEPT 125
-#define TK_INTERSECT 126
-#define TK_SELECT 127
-#define TK_VALUES 128
-#define TK_DISTINCT 129
-#define TK_DOT 130
-#define TK_FROM 131
-#define TK_JOIN 132
-#define TK_USING 133
-#define TK_ORDER 134
-#define TK_GROUP 135
-#define TK_HAVING 136
-#define TK_LIMIT 137
-#define TK_WHERE 138
-#define TK_INTO 139
-#define TK_NOTHING 140
-#define TK_FLOAT 141
-#define TK_BLOB 142
-#define TK_INTEGER 143
-#define TK_VARIABLE 144
-#define TK_CASE 145
-#define TK_WHEN 146
-#define TK_THEN 147
-#define TK_ELSE 148
-#define TK_INDEX 149
-#define TK_ALTER 150
-#define TK_ADD 151
-#define TK_WINDOW 152
-#define TK_OVER 153
-#define TK_FILTER 154
-#define TK_TRUEFALSE 155
-#define TK_ISNOT 156
-#define TK_FUNCTION 157
-#define TK_COLUMN 158
-#define TK_AGG_FUNCTION 159
-#define TK_AGG_COLUMN 160
-#define TK_UMINUS 161
-#define TK_UPLUS 162
-#define TK_TRUTH 163
-#define TK_REGISTER 164
-#define TK_VECTOR 165
-#define TK_SELECT_COLUMN 166
-#define TK_IF_NULL_ROW 167
-#define TK_ASTERISK 168
-#define TK_SPAN 169
-#define TK_END_OF_FILE 170
-#define TK_UNCLOSED_STRING 171
-#define TK_SPACE 172
-#define TK_ILLEGAL 173
+#define TK_TRIGGER 76
+#define TK_VACUUM 77
+#define TK_VIEW 78
+#define TK_VIRTUAL 79
+#define TK_WITH 80
+#define TK_REINDEX 81
+#define TK_RENAME 82
+#define TK_CTIME_KW 83
+#define TK_ANY 84
+#define TK_BITAND 85
+#define TK_BITOR 86
+#define TK_LSHIFT 87
+#define TK_RSHIFT 88
+#define TK_PLUS 89
+#define TK_MINUS 90
+#define TK_STAR 91
+#define TK_SLASH 92
+#define TK_REM 93
+#define TK_CONCAT 94
+#define TK_COLLATE 95
+#define TK_BITNOT 96
+#define TK_ON 97
+#define TK_INDEXED 98
+#define TK_STRING 99
+#define TK_JOIN_KW 100
+#define TK_CONSTRAINT 101
+#define TK_DEFAULT 102
+#define TK_NULL 103
+#define TK_PRIMARY 104
+#define TK_UNIQUE 105
+#define TK_CHECK 106
+#define TK_REFERENCES 107
+#define TK_AUTOINCR 108
+#define TK_INSERT 109
+#define TK_DELETE 110
+#define TK_UPDATE 111
+#define TK_SET 112
+#define TK_DEFERRABLE 113
+#define TK_FOREIGN 114
+#define TK_DROP 115
+#define TK_UNION 116
+#define TK_ALL 117
+#define TK_EXCEPT 118
+#define TK_INTERSECT 119
+#define TK_SELECT 120
+#define TK_VALUES 121
+#define TK_DISTINCT 122
+#define TK_DOT 123
+#define TK_FROM 124
+#define TK_JOIN 125
+#define TK_USING 126
+#define TK_ORDER 127
+#define TK_GROUP 128
+#define TK_HAVING 129
+#define TK_LIMIT 130
+#define TK_WHERE 131
+#define TK_INTO 132
+#define TK_NOTHING 133
+#define TK_FLOAT 134
+#define TK_BLOB 135
+#define TK_INTEGER 136
+#define TK_VARIABLE 137
+#define TK_CASE 138
+#define TK_WHEN 139
+#define TK_THEN 140
+#define TK_ELSE 141
+#define TK_INDEX 142
+#define TK_ALTER 143
+#define TK_ADD 144
+#define TK_TRUEFALSE 145
+#define TK_ISNOT 146
+#define TK_FUNCTION 147
+#define TK_COLUMN 148
+#define TK_AGG_FUNCTION 149
+#define TK_AGG_COLUMN 150
+#define TK_UMINUS 151
+#define TK_UPLUS 152
+#define TK_TRUTH 153
+#define TK_REGISTER 154
+#define TK_VECTOR 155
+#define TK_SELECT_COLUMN 156
+#define TK_IF_NULL_ROW 157
+#define TK_ASTERISK 158
+#define TK_SPAN 159
+#define TK_END_OF_FILE 160
+#define TK_UNCLOSED_STRING 161
+#define TK_SPACE 162
+#define TK_ILLEGAL 163
/* The token codes above must all fit in 8 bits */
#define TKFLG_MASK 0xff
@@ -13733,8 +13578,7 @@ typedef INT16_TYPE LogEst;
# if defined(__SIZEOF_POINTER__)
# define SQLITE_PTRSIZE __SIZEOF_POINTER__
# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \
- defined(_M_ARM) || defined(__arm__) || defined(__x86) || \
- (defined(__TOS_AIX__) && !defined(__64BIT__))
+ defined(_M_ARM) || defined(__arm__) || defined(__x86)
# define SQLITE_PTRSIZE 4
# else
# define SQLITE_PTRSIZE 8
@@ -13775,7 +13619,7 @@ typedef INT16_TYPE LogEst;
# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \
defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \
- defined(__arm__) || defined(_M_ARM64)
+ defined(__arm__)
# define SQLITE_BYTEORDER 1234
# elif defined(sparc) || defined(__ppc__)
# define SQLITE_BYTEORDER 4321
@@ -14030,7 +13874,6 @@ typedef struct NameContext NameContext;
typedef struct Parse Parse;
typedef struct PreUpdate PreUpdate;
typedef struct PrintfArguments PrintfArguments;
-typedef struct RenameToken RenameToken;
typedef struct RowSet RowSet;
typedef struct Savepoint Savepoint;
typedef struct Select Select;
@@ -14051,35 +13894,8 @@ typedef struct VTable VTable;
typedef struct VtabCtx VtabCtx;
typedef struct Walker Walker;
typedef struct WhereInfo WhereInfo;
-typedef struct Window Window;
typedef struct With With;
-
-/*
-** The bitmask datatype defined below is used for various optimizations.
-**
-** Changing this from a 64-bit to a 32-bit type limits the number of
-** tables in a join to 32 instead of 64. But it also reduces the size
-** of the library by 738 bytes on ix86.
-*/
-#ifdef SQLITE_BITMASK_TYPE
- typedef SQLITE_BITMASK_TYPE Bitmask;
-#else
- typedef u64 Bitmask;
-#endif
-
-/*
-** The number of bits in a Bitmask. "BMS" means "BitMask Size".
-*/
-#define BMS ((int)(sizeof(Bitmask)*8))
-
-/*
-** A bit in a Bitmask
-*/
-#define MASKBIT(n) (((Bitmask)1)<<(n))
-#define MASKBIT32(n) (((unsigned int)1)<<(n))
-#define ALLBITS ((Bitmask)-1)
-
/* A VList object records a mapping between parameters/variables/wildcards
** in the SQL statement (such as $abc, @pqr, or :xyz) and the integer
** variable number associated with that parameter. See the format description
@@ -14175,7 +13991,7 @@ SQLITE_PRIVATE int sqlite3BtreeGetOptimalReserve(Btree*);
SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p);
SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *, int);
SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *);
-SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree*,int,int*);
+SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree*,int);
SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree*, const char *zMaster);
SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree*, int);
SQLITE_PRIVATE int sqlite3BtreeCommit(Btree*);
@@ -14398,9 +14214,6 @@ struct BtreePayload {
SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const BtreePayload *pPayload,
int flags, int seekResult);
SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor*, int *pRes);
-#ifndef SQLITE_OMIT_WINDOWFUNC
-SQLITE_PRIVATE void sqlite3BtreeSkipNext(BtCursor*);
-#endif
SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor*, int *pRes);
SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor*, int flags);
SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor*);
@@ -14568,8 +14381,7 @@ struct VdbeOp {
u64 cycles; /* Total time spent executing this instruction */
#endif
#ifdef SQLITE_VDBE_COVERAGE
- u32 iSrcLine; /* Source-code line that generated this opcode
- ** with flags in the upper 8 bits */
+ int iSrcLine; /* Source-code line that generated this opcode */
#endif
};
typedef struct VdbeOp VdbeOp;
@@ -14670,52 +14482,52 @@ typedef struct VdbeOpList VdbeOpList;
#define OP_AutoCommit 1
#define OP_Transaction 2
#define OP_SorterNext 3 /* jump */
-#define OP_Prev 4 /* jump */
-#define OP_Next 5 /* jump */
-#define OP_Checkpoint 6
-#define OP_JournalMode 7
-#define OP_Vacuum 8
-#define OP_VFilter 9 /* jump, synopsis: iplan=r[P3] zplan='P4' */
-#define OP_VUpdate 10 /* synopsis: data=r[P3@P2] */
-#define OP_Goto 11 /* jump */
-#define OP_Gosub 12 /* jump */
-#define OP_InitCoroutine 13 /* jump */
-#define OP_Yield 14 /* jump */
-#define OP_MustBeInt 15 /* jump */
-#define OP_Jump 16 /* jump */
-#define OP_Once 17 /* jump */
-#define OP_If 18 /* jump */
+#define OP_PrevIfOpen 4 /* jump */
+#define OP_NextIfOpen 5 /* jump */
+#define OP_Prev 6 /* jump */
+#define OP_Next 7 /* jump */
+#define OP_Checkpoint 8
+#define OP_JournalMode 9
+#define OP_Vacuum 10
+#define OP_VFilter 11 /* jump, synopsis: iplan=r[P3] zplan='P4' */
+#define OP_VUpdate 12 /* synopsis: data=r[P3@P2] */
+#define OP_Goto 13 /* jump */
+#define OP_Gosub 14 /* jump */
+#define OP_InitCoroutine 15 /* jump */
+#define OP_Yield 16 /* jump */
+#define OP_MustBeInt 17 /* jump */
+#define OP_Jump 18 /* jump */
#define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */
-#define OP_IfNot 20 /* jump */
-#define OP_IfNullRow 21 /* jump, synopsis: if P1.nullRow then r[P3]=NULL, goto P2 */
-#define OP_SeekLT 22 /* jump, synopsis: key=r[P3@P4] */
-#define OP_SeekLE 23 /* jump, synopsis: key=r[P3@P4] */
-#define OP_SeekGE 24 /* jump, synopsis: key=r[P3@P4] */
-#define OP_SeekGT 25 /* jump, synopsis: key=r[P3@P4] */
-#define OP_IfNoHope 26 /* jump, synopsis: key=r[P3@P4] */
-#define OP_NoConflict 27 /* jump, synopsis: key=r[P3@P4] */
-#define OP_NotFound 28 /* jump, synopsis: key=r[P3@P4] */
-#define OP_Found 29 /* jump, synopsis: key=r[P3@P4] */
-#define OP_SeekRowid 30 /* jump, synopsis: intkey=r[P3] */
-#define OP_NotExists 31 /* jump, synopsis: intkey=r[P3] */
-#define OP_Last 32 /* jump */
-#define OP_IfSmaller 33 /* jump */
-#define OP_SorterSort 34 /* jump */
-#define OP_Sort 35 /* jump */
-#define OP_Rewind 36 /* jump */
-#define OP_IdxLE 37 /* jump, synopsis: key=r[P3@P4] */
-#define OP_IdxGT 38 /* jump, synopsis: key=r[P3@P4] */
-#define OP_IdxLT 39 /* jump, synopsis: key=r[P3@P4] */
-#define OP_IdxGE 40 /* jump, synopsis: key=r[P3@P4] */
-#define OP_RowSetRead 41 /* jump, synopsis: r[P3]=rowset(P1) */
-#define OP_RowSetTest 42 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */
+#define OP_Once 20 /* jump */
+#define OP_If 21 /* jump */
+#define OP_IfNot 22 /* jump */
+#define OP_IfNullRow 23 /* jump, synopsis: if P1.nullRow then r[P3]=NULL, goto P2 */
+#define OP_SeekLT 24 /* jump, synopsis: key=r[P3@P4] */
+#define OP_SeekLE 25 /* jump, synopsis: key=r[P3@P4] */
+#define OP_SeekGE 26 /* jump, synopsis: key=r[P3@P4] */
+#define OP_SeekGT 27 /* jump, synopsis: key=r[P3@P4] */
+#define OP_NoConflict 28 /* jump, synopsis: key=r[P3@P4] */
+#define OP_NotFound 29 /* jump, synopsis: key=r[P3@P4] */
+#define OP_Found 30 /* jump, synopsis: key=r[P3@P4] */
+#define OP_SeekRowid 31 /* jump, synopsis: intkey=r[P3] */
+#define OP_NotExists 32 /* jump, synopsis: intkey=r[P3] */
+#define OP_Last 33 /* jump */
+#define OP_IfSmaller 34 /* jump */
+#define OP_SorterSort 35 /* jump */
+#define OP_Sort 36 /* jump */
+#define OP_Rewind 37 /* jump */
+#define OP_IdxLE 38 /* jump, synopsis: key=r[P3@P4] */
+#define OP_IdxGT 39 /* jump, synopsis: key=r[P3@P4] */
+#define OP_IdxLT 40 /* jump, synopsis: key=r[P3@P4] */
+#define OP_IdxGE 41 /* jump, synopsis: key=r[P3@P4] */
+#define OP_RowSetRead 42 /* jump, synopsis: r[P3]=rowset(P1) */
#define OP_Or 43 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */
#define OP_And 44 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */
-#define OP_Program 45 /* jump */
-#define OP_FkIfZero 46 /* jump, synopsis: if fkctr[P1]==0 goto P2 */
-#define OP_IfPos 47 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */
-#define OP_IfNotZero 48 /* jump, synopsis: if r[P1]!=0 then r[P1]--, goto P2 */
-#define OP_DecrJumpZero 49 /* jump, synopsis: if (--r[P1])==0 goto P2 */
+#define OP_RowSetTest 45 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */
+#define OP_Program 46 /* jump */
+#define OP_FkIfZero 47 /* jump, synopsis: if fkctr[P1]==0 goto P2 */
+#define OP_IfPos 48 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */
+#define OP_IfNotZero 49 /* jump, synopsis: if r[P1]!=0 then r[P1]--, goto P2 */
#define OP_IsNull 50 /* jump, same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */
#define OP_NotNull 51 /* jump, same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */
#define OP_Ne 52 /* jump, same as TK_NE, synopsis: IF r[P3]!=r[P1] */
@@ -14725,121 +14537,119 @@ typedef struct VdbeOpList VdbeOpList;
#define OP_Lt 56 /* jump, same as TK_LT, synopsis: IF r[P3]=r[P1] */
#define OP_ElseNotEq 58 /* jump, same as TK_ESCAPE */
-#define OP_IncrVacuum 59 /* jump */
-#define OP_VNext 60 /* jump */
-#define OP_Init 61 /* jump, synopsis: Start at P2 */
-#define OP_PureFunc0 62
-#define OP_Function0 63 /* synopsis: r[P3]=func(r[P2@P5]) */
-#define OP_PureFunc 64
-#define OP_Function 65 /* synopsis: r[P3]=func(r[P2@P5]) */
-#define OP_Return 66
-#define OP_EndCoroutine 67
-#define OP_HaltIfNull 68 /* synopsis: if r[P3]=null halt */
-#define OP_Halt 69
-#define OP_Integer 70 /* synopsis: r[P2]=P1 */
-#define OP_Int64 71 /* synopsis: r[P2]=P4 */
-#define OP_String 72 /* synopsis: r[P2]='P4' (len=P1) */
-#define OP_Null 73 /* synopsis: r[P2..P3]=NULL */
-#define OP_SoftNull 74 /* synopsis: r[P1]=NULL */
-#define OP_Blob 75 /* synopsis: r[P2]=P4 (len=P1) */
-#define OP_Variable 76 /* synopsis: r[P2]=parameter(P1,P4) */
-#define OP_Move 77 /* synopsis: r[P2@P3]=r[P1@P3] */
-#define OP_Copy 78 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */
-#define OP_SCopy 79 /* synopsis: r[P2]=r[P1] */
-#define OP_IntCopy 80 /* synopsis: r[P2]=r[P1] */
-#define OP_ResultRow 81 /* synopsis: output=r[P1@P2] */
-#define OP_CollSeq 82
-#define OP_AddImm 83 /* synopsis: r[P1]=r[P1]+P2 */
-#define OP_RealAffinity 84
-#define OP_Cast 85 /* synopsis: affinity(r[P1]) */
-#define OP_Permutation 86
-#define OP_Compare 87 /* synopsis: r[P1@P3] <-> r[P2@P3] */
-#define OP_IsTrue 88 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */
-#define OP_Offset 89 /* synopsis: r[P3] = sqlite_offset(P1) */
-#define OP_Column 90 /* synopsis: r[P3]=PX */
-#define OP_Affinity 91 /* synopsis: affinity(r[P1@P2]) */
-#define OP_BitAnd 92 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */
-#define OP_BitOr 93 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */
-#define OP_ShiftLeft 94 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<>r[P1] */
-#define OP_Add 96 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */
-#define OP_Subtract 97 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */
-#define OP_Multiply 98 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */
-#define OP_Divide 99 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */
-#define OP_Remainder 100 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */
-#define OP_Concat 101 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */
-#define OP_MakeRecord 102 /* synopsis: r[P3]=mkrec(r[P1@P2]) */
-#define OP_BitNot 103 /* same as TK_BITNOT, synopsis: r[P2]= ~r[P1] */
-#define OP_Count 104 /* synopsis: r[P2]=count() */
-#define OP_ReadCookie 105
-#define OP_String8 106 /* same as TK_STRING, synopsis: r[P2]='P4' */
-#define OP_SetCookie 107
-#define OP_ReopenIdx 108 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenRead 109 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenWrite 110 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenDup 111
-#define OP_OpenAutoindex 112 /* synopsis: nColumn=P2 */
-#define OP_OpenEphemeral 113 /* synopsis: nColumn=P2 */
-#define OP_SorterOpen 114
-#define OP_SequenceTest 115 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */
-#define OP_OpenPseudo 116 /* synopsis: P3 columns in r[P2] */
-#define OP_Close 117
-#define OP_ColumnsUsed 118
-#define OP_SeekHit 119 /* synopsis: seekHit=P2 */
-#define OP_Sequence 120 /* synopsis: r[P2]=cursor[P1].ctr++ */
-#define OP_NewRowid 121 /* synopsis: r[P2]=rowid */
-#define OP_Insert 122 /* synopsis: intkey=r[P3] data=r[P2] */
-#define OP_InsertInt 123 /* synopsis: intkey=P3 data=r[P2] */
-#define OP_Delete 124
-#define OP_ResetCount 125
-#define OP_SorterCompare 126 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */
-#define OP_SorterData 127 /* synopsis: r[P2]=data */
-#define OP_RowData 128 /* synopsis: r[P2]=data */
-#define OP_Rowid 129 /* synopsis: r[P2]=rowid */
-#define OP_NullRow 130
-#define OP_SeekEnd 131
-#define OP_SorterInsert 132 /* synopsis: key=r[P2] */
-#define OP_IdxInsert 133 /* synopsis: key=r[P2] */
-#define OP_IdxDelete 134 /* synopsis: key=r[P2@P3] */
-#define OP_DeferredSeek 135 /* synopsis: Move P3 to P1.rowid if needed */
-#define OP_IdxRowid 136 /* synopsis: r[P2]=rowid */
-#define OP_Destroy 137
-#define OP_Clear 138
-#define OP_ResetSorter 139
-#define OP_CreateBtree 140 /* synopsis: r[P2]=root iDb=P1 flags=P3 */
-#define OP_Real 141 /* same as TK_FLOAT, synopsis: r[P2]=P4 */
-#define OP_SqlExec 142
-#define OP_ParseSchema 143
-#define OP_LoadAnalysis 144
-#define OP_DropTable 145
-#define OP_DropIndex 146
-#define OP_DropTrigger 147
-#define OP_IntegrityCk 148
-#define OP_RowSetAdd 149 /* synopsis: rowset(P1)=r[P2] */
-#define OP_Param 150
-#define OP_FkCounter 151 /* synopsis: fkctr[P1]+=P2 */
-#define OP_MemMax 152 /* synopsis: r[P1]=max(r[P1],r[P2]) */
-#define OP_OffsetLimit 153 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */
-#define OP_AggInverse 154 /* synopsis: accum=r[P3] inverse(r[P2@P5]) */
-#define OP_AggStep 155 /* synopsis: accum=r[P3] step(r[P2@P5]) */
-#define OP_AggStep1 156 /* synopsis: accum=r[P3] step(r[P2@P5]) */
-#define OP_AggValue 157 /* synopsis: r[P3]=value N=P2 */
-#define OP_AggFinal 158 /* synopsis: accum=r[P1] N=P2 */
-#define OP_Expire 159
-#define OP_TableLock 160 /* synopsis: iDb=P1 root=P2 write=P3 */
-#define OP_VBegin 161
-#define OP_VCreate 162
-#define OP_VDestroy 163
-#define OP_VOpen 164
-#define OP_VColumn 165 /* synopsis: r[P3]=vcolumn(P2) */
-#define OP_VRename 166
-#define OP_Pagecount 167
-#define OP_MaxPgcnt 168
-#define OP_Trace 169
-#define OP_CursorHint 170
-#define OP_Noop 171
-#define OP_Explain 172
-#define OP_Abortable 173
+#define OP_DecrJumpZero 59 /* jump, synopsis: if (--r[P1])==0 goto P2 */
+#define OP_IncrVacuum 60 /* jump */
+#define OP_VNext 61 /* jump */
+#define OP_Init 62 /* jump, synopsis: Start at P2 */
+#define OP_Return 63
+#define OP_EndCoroutine 64
+#define OP_HaltIfNull 65 /* synopsis: if r[P3]=null halt */
+#define OP_Halt 66
+#define OP_Integer 67 /* synopsis: r[P2]=P1 */
+#define OP_Int64 68 /* synopsis: r[P2]=P4 */
+#define OP_String 69 /* synopsis: r[P2]='P4' (len=P1) */
+#define OP_Null 70 /* synopsis: r[P2..P3]=NULL */
+#define OP_SoftNull 71 /* synopsis: r[P1]=NULL */
+#define OP_Blob 72 /* synopsis: r[P2]=P4 (len=P1) */
+#define OP_Variable 73 /* synopsis: r[P2]=parameter(P1,P4) */
+#define OP_Move 74 /* synopsis: r[P2@P3]=r[P1@P3] */
+#define OP_Copy 75 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */
+#define OP_SCopy 76 /* synopsis: r[P2]=r[P1] */
+#define OP_IntCopy 77 /* synopsis: r[P2]=r[P1] */
+#define OP_ResultRow 78 /* synopsis: output=r[P1@P2] */
+#define OP_CollSeq 79
+#define OP_AddImm 80 /* synopsis: r[P1]=r[P1]+P2 */
+#define OP_RealAffinity 81
+#define OP_Cast 82 /* synopsis: affinity(r[P1]) */
+#define OP_Permutation 83
+#define OP_Compare 84 /* synopsis: r[P1@P3] <-> r[P2@P3] */
+#define OP_BitAnd 85 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */
+#define OP_BitOr 86 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */
+#define OP_ShiftLeft 87 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<>r[P1] */
+#define OP_Add 89 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */
+#define OP_Subtract 90 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */
+#define OP_Multiply 91 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */
+#define OP_Divide 92 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */
+#define OP_Remainder 93 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */
+#define OP_Concat 94 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */
+#define OP_IsTrue 95 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */
+#define OP_BitNot 96 /* same as TK_BITNOT, synopsis: r[P1]= ~r[P1] */
+#define OP_Offset 97 /* synopsis: r[P3] = sqlite_offset(P1) */
+#define OP_Column 98 /* synopsis: r[P3]=PX */
+#define OP_String8 99 /* same as TK_STRING, synopsis: r[P2]='P4' */
+#define OP_Affinity 100 /* synopsis: affinity(r[P1@P2]) */
+#define OP_MakeRecord 101 /* synopsis: r[P3]=mkrec(r[P1@P2]) */
+#define OP_Count 102 /* synopsis: r[P2]=count() */
+#define OP_ReadCookie 103
+#define OP_SetCookie 104
+#define OP_ReopenIdx 105 /* synopsis: root=P2 iDb=P3 */
+#define OP_OpenRead 106 /* synopsis: root=P2 iDb=P3 */
+#define OP_OpenWrite 107 /* synopsis: root=P2 iDb=P3 */
+#define OP_OpenDup 108
+#define OP_OpenAutoindex 109 /* synopsis: nColumn=P2 */
+#define OP_OpenEphemeral 110 /* synopsis: nColumn=P2 */
+#define OP_SorterOpen 111
+#define OP_SequenceTest 112 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */
+#define OP_OpenPseudo 113 /* synopsis: P3 columns in r[P2] */
+#define OP_Close 114
+#define OP_ColumnsUsed 115
+#define OP_Sequence 116 /* synopsis: r[P2]=cursor[P1].ctr++ */
+#define OP_NewRowid 117 /* synopsis: r[P2]=rowid */
+#define OP_Insert 118 /* synopsis: intkey=r[P3] data=r[P2] */
+#define OP_InsertInt 119 /* synopsis: intkey=P3 data=r[P2] */
+#define OP_Delete 120
+#define OP_ResetCount 121
+#define OP_SorterCompare 122 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */
+#define OP_SorterData 123 /* synopsis: r[P2]=data */
+#define OP_RowData 124 /* synopsis: r[P2]=data */
+#define OP_Rowid 125 /* synopsis: r[P2]=rowid */
+#define OP_NullRow 126
+#define OP_SeekEnd 127
+#define OP_SorterInsert 128 /* synopsis: key=r[P2] */
+#define OP_IdxInsert 129 /* synopsis: key=r[P2] */
+#define OP_IdxDelete 130 /* synopsis: key=r[P2@P3] */
+#define OP_DeferredSeek 131 /* synopsis: Move P3 to P1.rowid if needed */
+#define OP_IdxRowid 132 /* synopsis: r[P2]=rowid */
+#define OP_Destroy 133
+#define OP_Real 134 /* same as TK_FLOAT, synopsis: r[P2]=P4 */
+#define OP_Clear 135
+#define OP_ResetSorter 136
+#define OP_CreateBtree 137 /* synopsis: r[P2]=root iDb=P1 flags=P3 */
+#define OP_SqlExec 138
+#define OP_ParseSchema 139
+#define OP_LoadAnalysis 140
+#define OP_DropTable 141
+#define OP_DropIndex 142
+#define OP_DropTrigger 143
+#define OP_IntegrityCk 144
+#define OP_RowSetAdd 145 /* synopsis: rowset(P1)=r[P2] */
+#define OP_Param 146
+#define OP_FkCounter 147 /* synopsis: fkctr[P1]+=P2 */
+#define OP_MemMax 148 /* synopsis: r[P1]=max(r[P1],r[P2]) */
+#define OP_OffsetLimit 149 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */
+#define OP_AggStep0 150 /* synopsis: accum=r[P3] step(r[P2@P5]) */
+#define OP_AggStep 151 /* synopsis: accum=r[P3] step(r[P2@P5]) */
+#define OP_AggFinal 152 /* synopsis: accum=r[P1] N=P2 */
+#define OP_Expire 153
+#define OP_TableLock 154 /* synopsis: iDb=P1 root=P2 write=P3 */
+#define OP_VBegin 155
+#define OP_VCreate 156
+#define OP_VDestroy 157
+#define OP_VOpen 158
+#define OP_VColumn 159 /* synopsis: r[P3]=vcolumn(P2) */
+#define OP_VRename 160
+#define OP_Pagecount 161
+#define OP_MaxPgcnt 162
+#define OP_PureFunc0 163
+#define OP_Function0 164 /* synopsis: r[P3]=func(r[P2@P5]) */
+#define OP_PureFunc 165
+#define OP_Function 166 /* synopsis: r[P3]=func(r[P2@P5]) */
+#define OP_Trace 167
+#define OP_CursorHint 168
+#define OP_Noop 169
+#define OP_Explain 170
+#define OP_Abortable 171
/* Properties such as "out2" or "jump" that are specified in
** comments following the "case" for each opcode in the vdbe.c
@@ -14852,28 +14662,28 @@ typedef struct VdbeOpList VdbeOpList;
#define OPFLG_OUT2 0x10 /* out2: P2 is an output */
#define OPFLG_OUT3 0x20 /* out3: P3 is an output */
#define OPFLG_INITIALIZER {\
-/* 0 */ 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x10,\
-/* 8 */ 0x00, 0x01, 0x00, 0x01, 0x01, 0x01, 0x03, 0x03,\
-/* 16 */ 0x01, 0x01, 0x03, 0x12, 0x03, 0x01, 0x09, 0x09,\
+/* 0 */ 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01,\
+/* 8 */ 0x00, 0x10, 0x00, 0x01, 0x00, 0x01, 0x01, 0x01,\
+/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x03, 0x03, 0x01,\
/* 24 */ 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,\
-/* 32 */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,\
-/* 40 */ 0x01, 0x23, 0x0b, 0x26, 0x26, 0x01, 0x01, 0x03,\
+/* 32 */ 0x09, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,\
+/* 40 */ 0x01, 0x01, 0x23, 0x26, 0x26, 0x0b, 0x01, 0x01,\
/* 48 */ 0x03, 0x03, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\
-/* 56 */ 0x0b, 0x0b, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,\
-/* 64 */ 0x00, 0x00, 0x02, 0x02, 0x08, 0x00, 0x10, 0x10,\
-/* 72 */ 0x10, 0x10, 0x00, 0x10, 0x10, 0x00, 0x00, 0x10,\
-/* 80 */ 0x10, 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00,\
-/* 88 */ 0x12, 0x20, 0x00, 0x00, 0x26, 0x26, 0x26, 0x26,\
-/* 96 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x00, 0x12,\
-/* 104 */ 0x10, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 112 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 120 */ 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 128 */ 0x00, 0x10, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00,\
-/* 136 */ 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\
-/* 144 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x10, 0x00,\
-/* 152 */ 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 160 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,\
-/* 168 */ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,}
+/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x01, 0x01, 0x01, 0x02,\
+/* 64 */ 0x02, 0x08, 0x00, 0x10, 0x10, 0x10, 0x10, 0x00,\
+/* 72 */ 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\
+/* 80 */ 0x02, 0x02, 0x02, 0x00, 0x00, 0x26, 0x26, 0x26,\
+/* 88 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x12,\
+/* 96 */ 0x12, 0x20, 0x00, 0x10, 0x00, 0x00, 0x10, 0x10,\
+/* 104 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+/* 112 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\
+/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,\
+/* 128 */ 0x04, 0x04, 0x00, 0x00, 0x10, 0x10, 0x10, 0x00,\
+/* 136 */ 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+/* 144 */ 0x00, 0x06, 0x10, 0x00, 0x04, 0x1a, 0x00, 0x00,\
+/* 152 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+/* 160 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\
+/* 168 */ 0x00, 0x00, 0x00, 0x00,}
/* The sqlite3P2Values() routine is able to run faster if it knows
** the value of the largest JUMP opcode. The smaller the maximum
@@ -14881,7 +14691,7 @@ typedef struct VdbeOpList VdbeOpList;
** generated this include file strives to group all JUMP opcodes
** together near the beginning of the list.
*/
-#define SQLITE_MX_JUMP_OPCODE 61 /* Maximum JUMP opcode */
+#define SQLITE_MX_JUMP_OPCODE 62 /* Maximum JUMP opcode */
/************** End of opcodes.h *********************************************/
/************** Continuing where we left off in vdbe.h ***********************/
@@ -14955,6 +14765,9 @@ SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3*,Vdbe*);
SQLITE_PRIVATE void sqlite3VdbeMakeReady(Vdbe*,Parse*);
SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe*);
SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe*, int);
+#ifdef SQLITE_COVERAGE_TEST
+SQLITE_PRIVATE int sqlite3VdbeLabelHasBeenResolved(Vdbe*,int);
+#endif
SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe*);
#ifdef SQLITE_DEBUG
SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *, int);
@@ -14976,7 +14789,6 @@ SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe*, int);
SQLITE_PRIVATE char *sqlite3VdbeExpandSql(Vdbe*, const char*);
#endif
SQLITE_PRIVATE int sqlite3MemCompare(const Mem*, const Mem*, const CollSeq*);
-SQLITE_PRIVATE int sqlite3BlobCompare(const Mem*, const Mem*);
SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(KeyInfo*,int,const void*,UnpackedRecord*);
SQLITE_PRIVATE int sqlite3VdbeRecordCompare(int,const void*,UnpackedRecord*);
@@ -15032,52 +14844,23 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...);
**
** VdbeCoverageNeverTaken(v) // Previous branch is never taken
**
-** VdbeCoverageNeverNull(v) // Previous three-way branch is only
-** // taken on the first two ways. The
-** // NULL option is not possible
-**
-** VdbeCoverageEqNe(v) // Previous OP_Jump is only interested
-** // in distingishing equal and not-equal.
-**
** Every VDBE branch operation must be tagged with one of the macros above.
** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and
** -DSQLITE_DEBUG then an ALWAYS() will fail in the vdbeTakeBranch()
** routine in vdbe.c, alerting the developer to the missed tag.
-**
-** During testing, the test application will invoke
-** sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE,...) to set a callback
-** routine that is invoked as each bytecode branch is taken. The callback
-** contains the sqlite3.c source line number ov the VdbeCoverage macro and
-** flags to indicate whether or not the branch was taken. The test application
-** is responsible for keeping track of this and reporting byte-code branches
-** that are never taken.
-**
-** See the VdbeBranchTaken() macro and vdbeTakeBranch() function in the
-** vdbe.c source file for additional information.
*/
#ifdef SQLITE_VDBE_COVERAGE
SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe*,int);
# define VdbeCoverage(v) sqlite3VdbeSetLineNumber(v,__LINE__)
# define VdbeCoverageIf(v,x) if(x)sqlite3VdbeSetLineNumber(v,__LINE__)
-# define VdbeCoverageAlwaysTaken(v) \
- sqlite3VdbeSetLineNumber(v,__LINE__|0x5000000);
-# define VdbeCoverageNeverTaken(v) \
- sqlite3VdbeSetLineNumber(v,__LINE__|0x6000000);
-# define VdbeCoverageNeverNull(v) \
- sqlite3VdbeSetLineNumber(v,__LINE__|0x4000000);
-# define VdbeCoverageNeverNullIf(v,x) \
- if(x)sqlite3VdbeSetLineNumber(v,__LINE__|0x4000000);
-# define VdbeCoverageEqNe(v) \
- sqlite3VdbeSetLineNumber(v,__LINE__|0x8000000);
+# define VdbeCoverageAlwaysTaken(v) sqlite3VdbeSetLineNumber(v,2);
+# define VdbeCoverageNeverTaken(v) sqlite3VdbeSetLineNumber(v,1);
# define VDBE_OFFSET_LINENO(x) (__LINE__+x)
#else
# define VdbeCoverage(v)
# define VdbeCoverageIf(v,x)
# define VdbeCoverageAlwaysTaken(v)
# define VdbeCoverageNeverTaken(v)
-# define VdbeCoverageNeverNull(v)
-# define VdbeCoverageNeverNullIf(v,x)
-# define VdbeCoverageEqNe(v)
# define VDBE_OFFSET_LINENO(x) 0
#endif
@@ -15087,10 +14870,6 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatus(Vdbe*, int, int, int, LogEst, const ch
# define sqlite3VdbeScanStatus(a,b,c,d,e)
#endif
-#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
-SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE*, int, VdbeOp*);
-#endif
-
#endif /* SQLITE_VDBE_H */
/************** End of vdbe.h ************************************************/
@@ -15285,8 +15064,6 @@ SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager, Pgno);
SQLITE_PRIVATE int sqlite3PagerSnapshotGet(Pager *pPager, sqlite3_snapshot **ppSnapshot);
SQLITE_PRIVATE int sqlite3PagerSnapshotOpen(Pager *pPager, sqlite3_snapshot *pSnapshot);
SQLITE_PRIVATE int sqlite3PagerSnapshotRecover(Pager *pPager);
-SQLITE_PRIVATE int sqlite3PagerSnapshotCheck(Pager *pPager, sqlite3_snapshot *pSnapshot);
-SQLITE_PRIVATE void sqlite3PagerSnapshotUnlock(Pager *pPager);
# endif
#else
# define sqlite3PagerUseWal(x,y) 0
@@ -16270,7 +16047,6 @@ struct sqlite3 {
#define SQLITE_EnableQPSG 0x00800000 /* Query Planner Stability Guarantee*/
#define SQLITE_TriggerEQP 0x01000000 /* Show trigger EXPLAIN QUERY PLAN */
#define SQLITE_ResetDatabase 0x02000000 /* Reset the database */
-#define SQLITE_LegacyAlter 0x04000000 /* Legacy ALTER TABLE behaviour */
/* Flags used only if debugging */
#ifdef SQLITE_DEBUG
@@ -16295,7 +16071,7 @@ struct sqlite3 {
** selectively disable various optimizations.
*/
#define SQLITE_QueryFlattener 0x0001 /* Query flattening */
- /* 0x0002 available for reuse */
+#define SQLITE_ColumnCache 0x0002 /* Column cache */
#define SQLITE_GroupByOrder 0x0004 /* GROUPBY cover of ORDERBY */
#define SQLITE_FactorOutConst 0x0008 /* Constant factoring */
#define SQLITE_DistinctOpt 0x0010 /* DISTINCT using indexes */
@@ -16309,8 +16085,6 @@ struct sqlite3 {
/* TH3 expects the Stat34 ^^^^^^ value to be 0x0800. Don't change it */
#define SQLITE_PushDown 0x1000 /* The push-down optimization */
#define SQLITE_SimplifyJoin 0x2000 /* Convert LEFT JOIN to JOIN */
-#define SQLITE_SkipScan 0x4000 /* Skip-scans */
-#define SQLITE_PropagateConst 0x8000 /* The constant propagation opt */
#define SQLITE_AllOpts 0xffff /* All optimizations */
/*
@@ -16349,13 +16123,11 @@ struct sqlite3 {
*/
struct FuncDef {
i8 nArg; /* Number of arguments. -1 means unlimited */
- u32 funcFlags; /* Some combination of SQLITE_FUNC_* */
+ u16 funcFlags; /* Some combination of SQLITE_FUNC_* */
void *pUserData; /* User data parameter */
FuncDef *pNext; /* Next function with same name */
void (*xSFunc)(sqlite3_context*,int,sqlite3_value**); /* func or agg-step */
void (*xFinalize)(sqlite3_context*); /* Agg finalizer */
- void (*xValue)(sqlite3_context*); /* Current agg value */
- void (*xInverse)(sqlite3_context*,int,sqlite3_value**); /* inverse agg-step */
const char *zName; /* SQL name of the function. */
union {
FuncDef *pHash; /* Next with a different name but the same hash */
@@ -16412,8 +16184,6 @@ struct FuncDestructor {
** single query - might change over time */
#define SQLITE_FUNC_AFFINITY 0x4000 /* Built-in affinity() function */
#define SQLITE_FUNC_OFFSET 0x8000 /* Built-in sqlite_offset() function */
-#define SQLITE_FUNC_WINDOW 0x10000 /* Built-in window-only function */
-#define SQLITE_FUNC_WINDOW_SIZE 0x20000 /* Requires partition size as arg. */
/*
** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are
@@ -16448,12 +16218,6 @@ struct FuncDestructor {
** are interpreted in the same way as the first 4 parameters to
** FUNCTION().
**
-** WFUNCTION(zName, nArg, iArg, xStep, xFinal, xValue, xInverse)
-** Used to create an aggregate function definition implemented by
-** the C functions xStep and xFinal. The first four parameters
-** are interpreted in the same way as the first 4 parameters to
-** FUNCTION().
-**
** LIKEFUNC(zName, nArg, pArg, flags)
** Used to create a scalar function definition of a function zName
** that accepts nArg arguments and is implemented by a call to C
@@ -16464,35 +16228,31 @@ struct FuncDestructor {
*/
#define FUNCTION(zName, nArg, iArg, bNC, xFunc) \
{nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
- SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
+ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, #zName, {0} }
#define VFUNCTION(zName, nArg, iArg, bNC, xFunc) \
{nArg, SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
- SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
+ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, #zName, {0} }
#define DFUNCTION(zName, nArg, iArg, bNC, xFunc) \
{nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8, \
- 0, 0, xFunc, 0, 0, 0, #zName, {0} }
+ 0, 0, xFunc, 0, #zName, {0} }
#define PURE_DATE(zName, nArg, iArg, bNC, xFunc) \
{nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|SQLITE_FUNC_CONSTANT, \
- (void*)&sqlite3Config, 0, xFunc, 0, 0, 0, #zName, {0} }
+ (void*)&sqlite3Config, 0, xFunc, 0, #zName, {0} }
#define FUNCTION2(zName, nArg, iArg, bNC, xFunc, extraFlags) \
{nArg,SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL)|extraFlags,\
- SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
+ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, #zName, {0} }
#define STR_FUNCTION(zName, nArg, pArg, bNC, xFunc) \
{nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
- pArg, 0, xFunc, 0, 0, 0, #zName, }
+ pArg, 0, xFunc, 0, #zName, }
#define LIKEFUNC(zName, nArg, arg, flags) \
{nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|flags, \
- (void *)arg, 0, likeFunc, 0, 0, 0, #zName, {0} }
-#define AGGREGATE(zName, nArg, arg, nc, xStep, xFinal, xValue) \
+ (void *)arg, 0, likeFunc, 0, #zName, {0} }
+#define AGGREGATE(zName, nArg, arg, nc, xStep, xFinal) \
{nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL), \
- SQLITE_INT_TO_PTR(arg), 0, xStep,xFinal,xValue,0,#zName, {0}}
+ SQLITE_INT_TO_PTR(arg), 0, xStep,xFinal,#zName, {0}}
#define AGGREGATE2(zName, nArg, arg, nc, xStep, xFinal, extraFlags) \
{nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL)|extraFlags, \
- SQLITE_INT_TO_PTR(arg), 0, xStep,xFinal,xFinal,0,#zName, {0}}
-
-#define WAGGREGATE(zName, nArg, arg, nc, xStep, xFinal, xValue, xInverse, f) \
- {nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL)|f, \
- SQLITE_INT_TO_PTR(arg), 0, xStep,xFinal,xValue,xInverse,#zName, {0}}
+ SQLITE_INT_TO_PTR(arg), 0, xStep,xFinal,#zName, {0}}
/*
** All current savepoints are stored in a linked list starting at
@@ -16978,7 +16738,6 @@ struct Index {
tRowcnt *aiRowEst; /* Non-logarithmic stat1 data for this index */
tRowcnt nRowEst0; /* Non-logarithmic number of rows in the index */
#endif
- Bitmask colNotIdxed; /* 0 for unindexed columns in pTab */
};
/*
@@ -17017,11 +16776,9 @@ struct IndexSample {
** Each token coming out of the lexer is an instance of
** this structure. Tokens are also used as part of an expression.
**
-** The memory that "z" points to is owned by other objects. Take care
-** that the owner of the "z" string does not deallocate the string before
-** the Token goes out of scope! Very often, the "z" points to some place
-** in the middle of the Parse.zSql text. But it might also point to a
-** static string.
+** Note if Token.z==0 then Token.dyn and Token.n are undefined and
+** may contain random values. Do not make any assumptions about Token.dyn
+** and Token.n when Token.z==0.
*/
struct Token {
const char *z; /* Text of the token. Not NULL-terminated! */
@@ -17196,9 +16953,6 @@ struct Expr {
AggInfo *pAggInfo; /* Used by TK_AGG_COLUMN and TK_AGG_FUNCTION */
Table *pTab; /* Table for TK_COLUMN expressions. Can be NULL
** for a column of an index on an expression */
-#ifndef SQLITE_OMIT_WINDOWFUNC
- Window *pWin; /* Window definition for window functions */
-#endif
};
/*
@@ -17207,7 +16961,7 @@ struct Expr {
#define EP_FromJoin 0x000001 /* Originates in ON/USING clause of outer join */
#define EP_Agg 0x000002 /* Contains one or more aggregate functions */
#define EP_HasFunc 0x000004 /* Contains one or more functions of any kind */
-#define EP_FixedCol 0x000008 /* TK_Column with a known fixed value */
+ /* 0x000008 // available for use */
#define EP_Distinct 0x000010 /* Aggregate function with DISTINCT keyword */
#define EP_VarSelect 0x000020 /* pSelect is correlated, not constant */
#define EP_DblQuoted 0x000040 /* token.z was originally in "..." */
@@ -17329,6 +17083,31 @@ struct IdList {
int nId; /* Number of identifiers on the list */
};
+/*
+** The bitmask datatype defined below is used for various optimizations.
+**
+** Changing this from a 64-bit to a 32-bit type limits the number of
+** tables in a join to 32 instead of 64. But it also reduces the size
+** of the library by 738 bytes on ix86.
+*/
+#ifdef SQLITE_BITMASK_TYPE
+ typedef SQLITE_BITMASK_TYPE Bitmask;
+#else
+ typedef u64 Bitmask;
+#endif
+
+/*
+** The number of bits in a Bitmask. "BMS" means "BitMask Size".
+*/
+#define BMS ((int)(sizeof(Bitmask)*8))
+
+/*
+** A bit in a Bitmask
+*/
+#define MASKBIT(n) (((Bitmask)1)<<(n))
+#define MASKBIT32(n) (((unsigned int)1)<<(n))
+#define ALLBITS ((Bitmask)-1)
+
/*
** The following structure describes the FROM clause of a SELECT statement.
** Each table or subquery in the FROM clause is a separate element of
@@ -17460,7 +17239,6 @@ struct NameContext {
int nRef; /* Number of names resolved by this context */
int nErr; /* Number of errors encountered while resolving names */
u16 ncFlags; /* Zero or more NC_* flags defined below */
- Select *pWinSelect; /* SELECT statement for any window functions */
};
/*
@@ -17483,7 +17261,6 @@ struct NameContext {
#define NC_UUpsert 0x0200 /* True if uNC.pUpsert is used */
#define NC_MinMaxAgg 0x1000 /* min/max aggregates seen. See note above */
#define NC_Complex 0x2000 /* True if a function or subquery seen */
-#define NC_AllowWin 0x4000 /* Window functions are allowed here */
/*
** An instance of the following object describes a single ON CONFLICT
@@ -17538,7 +17315,9 @@ struct Select {
LogEst nSelectRow; /* Estimated number of result rows */
u32 selFlags; /* Various SF_* values */
int iLimit, iOffset; /* Memory registers holding LIMIT & OFFSET counters */
- u32 selId; /* Unique identifier number for this SELECT */
+#if SELECTTRACE_ENABLED
+ char zSelName[12]; /* Symbolic name of this SELECT use for debugging */
+#endif
int addrOpenEphm[2]; /* OP_OpenEphem opcodes related to this select */
SrcList *pSrc; /* The FROM clause */
Expr *pWhere; /* The WHERE clause */
@@ -17549,10 +17328,6 @@ struct Select {
Select *pNext; /* Next select to the left in a compound */
Expr *pLimit; /* LIMIT expression. NULL means not used. */
With *pWith; /* WITH clause attached to this select. Or NULL. */
-#ifndef SQLITE_OMIT_WINDOWFUNC
- Window *pWin; /* List of window functions */
- Window *pWinDefn; /* List of named window definitions */
-#endif
};
/*
@@ -17696,6 +17471,13 @@ struct AutoincInfo {
int regCtr; /* Memory register holding the rowid counter */
};
+/*
+** Size of the column cache
+*/
+#ifndef SQLITE_N_COLCACHE
+# define SQLITE_N_COLCACHE 10
+#endif
+
/*
** At least one instance of the following structure is created for each
** trigger that may be fired while parsing an INSERT, UPDATE or DELETE
@@ -17771,6 +17553,7 @@ struct Parse {
u8 hasCompound; /* Need to invoke convertCompoundSelectToSubquery() */
u8 okConstFactor; /* OK to factor out constants */
u8 disableLookaside; /* Number of times lookaside has been disabled */
+ u8 nColCache; /* Number of entries in aColCache[] */
int nRangeReg; /* Size of the temporary register block */
int iRangeReg; /* First register in temporary register block */
int nErr; /* Number of errors seen */
@@ -17780,6 +17563,8 @@ struct Parse {
int szOpAlloc; /* Bytes of memory space allocated for Vdbe.aOp[] */
int iSelfTab; /* Table associated with an index on expr, or negative
** of the base register during check-constraint eval */
+ int iCacheLevel; /* ColCache valid when aColCache[].iLevel<=iCacheLevel */
+ int iCacheCnt; /* Counter used to generate aColCache[].lru values */
int nLabel; /* Number of labels used */
int *aLabel; /* Space to hold the labels */
ExprList *pConstExpr;/* Constant expressions */
@@ -17789,7 +17574,9 @@ struct Parse {
int regRowid; /* Register holding rowid of CREATE TABLE entry */
int regRoot; /* Register holding root page number for new objects */
int nMaxArg; /* Max args passed to user function by sub-program */
- int nSelect; /* Number of SELECT stmts. Counter for Select.selId */
+#if SELECTTRACE_ENABLED
+ int nSelect; /* Number of SELECT statements seen */
+#endif
#ifndef SQLITE_OMIT_SHARED_CACHE
int nTableLock; /* Number of locks in aTableLock */
TableLock *aTableLock; /* Required table locks for shared-cache mode */
@@ -17809,9 +17596,17 @@ struct Parse {
** Fields above must be initialized to zero. The fields that follow,
** down to the beginning of the recursive section, do not need to be
** initialized as they will be set before being used. The boundary is
- ** determined by offsetof(Parse,aTempReg).
+ ** determined by offsetof(Parse,aColCache).
**************************************************************************/
+ struct yColCache {
+ int iTable; /* Table cursor number */
+ i16 iColumn; /* Table column number */
+ u8 tempReg; /* iReg is a temp register that needs to be freed */
+ int iLevel; /* Nesting level */
+ int iReg; /* Reg with value of this column. 0 means none. */
+ int lru; /* Least recently used entry has the smallest value */
+ } aColCache[SQLITE_N_COLCACHE]; /* One for each column cache entry */
int aTempReg[8]; /* Holding area for temporary registers */
Token sNameToken; /* Token with unqualified schema object name */
@@ -17826,10 +17621,8 @@ struct Parse {
ynVar nVar; /* Number of '?' variables seen in the SQL so far */
u8 iPkSortOrder; /* ASC or DESC for INTEGER PRIMARY KEY */
u8 explain; /* True if the EXPLAIN flag is found on the query */
-#if !(defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_OMIT_ALTERTABLE))
- u8 eParseMode; /* PARSE_MODE_XXX constant */
-#endif
#ifndef SQLITE_OMIT_VIRTUALTABLE
+ u8 declareVtab; /* True if inside sqlite3_declare_vtab() */
int nVtabLock; /* Number of virtual tables to lock */
#endif
int nHeight; /* Expression tree height of current sub-select */
@@ -17840,7 +17633,6 @@ struct Parse {
Vdbe *pReprepare; /* VM being reprepared (sqlite3Reprepare()) */
const char *zTail; /* All SQL text past the last semicolon parsed */
Table *pNewTable; /* A table being constructed by CREATE TABLE */
- Index *pNewIndex; /* An index being constructed by CREATE INDEX */
Trigger *pNewTrigger; /* Trigger under construct by a CREATE TRIGGER */
const char *zAuthContext; /* The 6th parameter to db->xAuth callbacks */
#ifndef SQLITE_OMIT_VIRTUALTABLE
@@ -17851,20 +17643,12 @@ struct Parse {
TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */
With *pWith; /* Current WITH clause, or NULL */
With *pWithToFree; /* Free this WITH object at the end of the parse */
-#ifndef SQLITE_OMIT_ALTERTABLE
- RenameToken *pRename; /* Tokens subject to renaming by ALTER TABLE */
-#endif
};
-#define PARSE_MODE_NORMAL 0
-#define PARSE_MODE_DECLARE_VTAB 1
-#define PARSE_MODE_RENAME_COLUMN 2
-#define PARSE_MODE_RENAME_TABLE 3
-
/*
** Sizes and pointers of various parts of the Parse object.
*/
-#define PARSE_HDR_SZ offsetof(Parse,aTempReg) /* Recursive part w/o aColCache*/
+#define PARSE_HDR_SZ offsetof(Parse,aColCache) /* Recursive part w/o aColCache*/
#define PARSE_RECURSE_SZ offsetof(Parse,sLastToken) /* Recursive part */
#define PARSE_TAIL_SZ (sizeof(Parse)-PARSE_RECURSE_SZ) /* Non-recursive part */
#define PARSE_TAIL(X) (((char*)(X))+PARSE_RECURSE_SZ) /* Pointer to tail */
@@ -17875,19 +17659,7 @@ struct Parse {
#ifdef SQLITE_OMIT_VIRTUALTABLE
#define IN_DECLARE_VTAB 0
#else
- #define IN_DECLARE_VTAB (pParse->eParseMode==PARSE_MODE_DECLARE_VTAB)
-#endif
-
-#if defined(SQLITE_OMIT_ALTERTABLE)
- #define IN_RENAME_OBJECT 0
-#else
- #define IN_RENAME_OBJECT (pParse->eParseMode>=PARSE_MODE_RENAME_COLUMN)
-#endif
-
-#if defined(SQLITE_OMIT_VIRTUALTABLE) && defined(SQLITE_OMIT_ALTERTABLE)
- #define IN_SPECIAL_PARSE 0
-#else
- #define IN_SPECIAL_PARSE (pParse->eParseMode!=PARSE_MODE_NORMAL)
+ #define IN_DECLARE_VTAB (pParse->declareVtab)
#endif
/*
@@ -18066,14 +17838,8 @@ typedef struct {
char **pzErrMsg; /* Error message stored here */
int iDb; /* 0 for main database. 1 for TEMP, 2.. for ATTACHed */
int rc; /* Result code stored here */
- u32 mInitFlags; /* Flags controlling error messages */
} InitData;
-/*
-** Allowed values for mInitFlags
-*/
-#define INITFLAG_AlterTable 0x0001 /* This is a reparse after ALTER TABLE */
-
/*
** Structure containing global configuration data for the SQLite library.
**
@@ -18124,7 +17890,7 @@ struct Sqlite3Config {
/* The following callback (if not NULL) is invoked on every VDBE branch
** operation. Set the callback using SQLITE_TESTCTRL_VDBE_COVERAGE.
*/
- void (*xVdbeBranch)(void*,unsigned iSrcLine,u8 eThis,u8 eMx); /* Callback */
+ void (*xVdbeBranch)(void*,int iSrcLine,u8 eThis,u8 eMx); /* Callback */
void *pVdbeBranchArg; /* 1st argument */
#endif
#ifndef SQLITE_UNTESTABLE
@@ -18175,9 +17941,6 @@ struct Walker {
struct IdxExprTrans *pIdxTrans; /* Convert idxed expr to column */
ExprList *pGroupBy; /* GROUP BY clause */
Select *pSelect; /* HAVING to WHERE clause ctx */
- struct WindowRewrite *pRewrite; /* Window rewrite context */
- struct WhereConst *pConst; /* WHERE clause constants */
- struct RenameCtx *pRename; /* RENAME COLUMN context */
} u;
};
@@ -18228,68 +17991,6 @@ struct TreeView {
};
#endif /* SQLITE_DEBUG */
-/*
-** This object is used in varioius ways, all related to window functions
-**
-** (1) A single instance of this structure is attached to the
-** the Expr.pWin field for each window function in an expression tree.
-** This object holds the information contained in the OVER clause,
-** plus additional fields used during code generation.
-**
-** (2) All window functions in a single SELECT form a linked-list
-** attached to Select.pWin. The Window.pFunc and Window.pExpr
-** fields point back to the expression that is the window function.
-**
-** (3) The terms of the WINDOW clause of a SELECT are instances of this
-** object on a linked list attached to Select.pWinDefn.
-**
-** The uses (1) and (2) are really the same Window object that just happens
-** to be accessible in two different ways. Use (3) is are separate objects.
-*/
-struct Window {
- char *zName; /* Name of window (may be NULL) */
- ExprList *pPartition; /* PARTITION BY clause */
- ExprList *pOrderBy; /* ORDER BY clause */
- u8 eType; /* TK_RANGE or TK_ROWS */
- u8 eStart; /* UNBOUNDED, CURRENT, PRECEDING or FOLLOWING */
- u8 eEnd; /* UNBOUNDED, CURRENT, PRECEDING or FOLLOWING */
- Expr *pStart; /* Expression for " PRECEDING" */
- Expr *pEnd; /* Expression for " FOLLOWING" */
- Window *pNextWin; /* Next window function belonging to this SELECT */
- Expr *pFilter; /* The FILTER expression */
- FuncDef *pFunc; /* The function */
- int iEphCsr; /* Partition buffer or Peer buffer */
- int regAccum;
- int regResult;
- int csrApp; /* Function cursor (used by min/max) */
- int regApp; /* Function register (also used by min/max) */
- int regPart; /* First in a set of registers holding PARTITION BY
- ** and ORDER BY values for the window */
- Expr *pOwner; /* Expression object this window is attached to */
- int nBufferCol; /* Number of columns in buffer table */
- int iArgCol; /* Offset of first argument for this function */
-};
-
-#ifndef SQLITE_OMIT_WINDOWFUNC
-SQLITE_PRIVATE void sqlite3WindowDelete(sqlite3*, Window*);
-SQLITE_PRIVATE void sqlite3WindowListDelete(sqlite3 *db, Window *p);
-SQLITE_PRIVATE Window *sqlite3WindowAlloc(Parse*, int, int, Expr*, int , Expr*);
-SQLITE_PRIVATE void sqlite3WindowAttach(Parse*, Expr*, Window*);
-SQLITE_PRIVATE int sqlite3WindowCompare(Parse*, Window*, Window*);
-SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse*, Window*);
-SQLITE_PRIVATE void sqlite3WindowCodeStep(Parse*, Select*, WhereInfo*, int, int);
-SQLITE_PRIVATE int sqlite3WindowRewrite(Parse*, Select*);
-SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse*, struct SrcList_item*);
-SQLITE_PRIVATE void sqlite3WindowUpdate(Parse*, Window*, Window*, FuncDef*);
-SQLITE_PRIVATE Window *sqlite3WindowDup(sqlite3 *db, Expr *pOwner, Window *p);
-SQLITE_PRIVATE Window *sqlite3WindowListDup(sqlite3 *db, Window *p);
-SQLITE_PRIVATE void sqlite3WindowFunctions(void);
-#else
-# define sqlite3WindowDelete(a,b)
-# define sqlite3WindowFunctions()
-# define sqlite3WindowAttach(a,b,c)
-#endif
-
/*
** Assuming zIn points to the first byte of a UTF-8 character,
** advance zIn to point to the first byte of the next UTF-8 character.
@@ -18377,7 +18078,9 @@ SQLITE_PRIVATE int sqlite3CorruptPgnoError(int,Pgno);
# define sqlite3Tolower(x) tolower((unsigned char)(x))
# define sqlite3Isquote(x) ((x)=='"'||(x)=='\''||(x)=='['||(x)=='`')
#endif
+#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
SQLITE_PRIVATE int sqlite3IsIdChar(u8);
+#endif
/*
** Internal function prototypes
@@ -18502,10 +18205,6 @@ SQLITE_PRIVATE void sqlite3TreeViewBareExprList(TreeView*, const ExprList*, co
SQLITE_PRIVATE void sqlite3TreeViewExprList(TreeView*, const ExprList*, u8, const char*);
SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView*, const Select*, u8);
SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView*, const With*, u8);
-#ifndef SQLITE_OMIT_WINDOWFUNC
-SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView*, const Window*, u8);
-SQLITE_PRIVATE void sqlite3TreeViewWinFunc(TreeView*, const Window*, u8);
-#endif
#endif
@@ -18530,7 +18229,7 @@ SQLITE_PRIVATE void sqlite3ExprAttachSubtrees(sqlite3*,Expr*,Expr*,Expr*);
SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*);
SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*);
SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3*,Expr*, Expr*);
-SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*, int);
+SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*);
SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32);
SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*);
SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*);
@@ -18542,7 +18241,6 @@ SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*);
SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*);
SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**);
SQLITE_PRIVATE int sqlite3InitCallback(void*, int, char**, char**);
-SQLITE_PRIVATE int sqlite3InitOne(sqlite3*, int, char**, u32);
SQLITE_PRIVATE void sqlite3Pragma(Parse*,Token*,Token*,Token*,int);
#ifndef SQLITE_OMIT_VIRTUALTABLE
SQLITE_PRIVATE Module *sqlite3PragmaVtabRegister(sqlite3*,const char *zName);
@@ -18592,9 +18290,8 @@ SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec*);
SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int,int*);
#endif
-SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3*);
-SQLITE_PRIVATE void sqlite3RowSetDelete(void*);
-SQLITE_PRIVATE void sqlite3RowSetClear(void*);
+SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3*, void*, unsigned int);
+SQLITE_PRIVATE void sqlite3RowSetClear(RowSet*);
SQLITE_PRIVATE void sqlite3RowSetInsert(RowSet*, i64);
SQLITE_PRIVATE int sqlite3RowSetTest(RowSet*, int iBatch, i64);
SQLITE_PRIVATE int sqlite3RowSetNext(RowSet*, i64*);
@@ -18613,7 +18310,6 @@ SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask);
SQLITE_PRIVATE void sqlite3DropTable(Parse*, SrcList*, int, int);
SQLITE_PRIVATE void sqlite3CodeDropTable(Parse*, Table*, int, int);
SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3*, Table*);
-SQLITE_PRIVATE void sqlite3FreeIndex(sqlite3*, Index*);
#ifndef SQLITE_OMIT_AUTOINCREMENT
SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse);
SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse);
@@ -18623,7 +18319,7 @@ SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse);
#endif
SQLITE_PRIVATE void sqlite3Insert(Parse*, SrcList*, Select*, IdList*, int, Upsert*);
SQLITE_PRIVATE void *sqlite3ArrayAllocate(sqlite3*,void*,int,int*,int*);
-SQLITE_PRIVATE IdList *sqlite3IdListAppend(Parse*, IdList*, Token*);
+SQLITE_PRIVATE IdList *sqlite3IdListAppend(sqlite3*, IdList*, Token*);
SQLITE_PRIVATE int sqlite3IdListIndex(IdList*,const char*);
SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(sqlite3*, SrcList*, int, int);
SQLITE_PRIVATE SrcList *sqlite3SrcListAppend(sqlite3*, SrcList*, Token*, Token*);
@@ -18658,7 +18354,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo*);
SQLITE_PRIVATE LogEst sqlite3WhereOutputRowCount(WhereInfo*);
SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo*);
SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo*);
-SQLITE_PRIVATE int sqlite3WhereOrderByLimitOptLabel(WhereInfo*);
+SQLITE_PRIVATE int sqlite3WhereOrderedInnerLoop(WhereInfo*);
SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo*);
SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo*);
SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo*);
@@ -18668,8 +18364,15 @@ SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo*, int*);
#define ONEPASS_MULTI 2 /* ONEPASS is valid for multiple rows */
SQLITE_PRIVATE void sqlite3ExprCodeLoadIndexColumn(Parse*, Index*, int, int, int);
SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(Parse*, Table*, int, int, int, u8);
+SQLITE_PRIVATE void sqlite3ExprCodeGetColumnToReg(Parse*, Table*, int, int, int);
SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(Vdbe*, Table*, int, int, int);
SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse*, int, int, int);
+SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse*, int, int, int);
+SQLITE_PRIVATE void sqlite3ExprCachePush(Parse*);
+SQLITE_PRIVATE void sqlite3ExprCachePop(Parse*);
+SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse*, int, int);
+SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse*);
+SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse*, int, int);
SQLITE_PRIVATE void sqlite3ExprCode(Parse*, Expr*, int);
SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse*, Expr*, int);
SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse*, Expr*, int);
@@ -18757,6 +18460,11 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3*,ExprList*,int);
SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3*,SrcList*,int);
SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3*,IdList*);
SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3*,Select*,int);
+#if SELECTTRACE_ENABLED
+SQLITE_PRIVATE void sqlite3SelectSetName(Select*,const char*);
+#else
+# define sqlite3SelectSetName(A,B)
+#endif
SQLITE_PRIVATE void sqlite3InsertBuiltinFuncs(FuncDef*,int);
SQLITE_PRIVATE FuncDef *sqlite3FindFunction(sqlite3*,const char*,int,u8,u8);
SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void);
@@ -18785,12 +18493,12 @@ SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect(Parse *, Trigger *, Table *, i
SQLITE_PRIVATE void sqlite3DeleteTriggerStep(sqlite3*, TriggerStep*);
SQLITE_PRIVATE TriggerStep *sqlite3TriggerSelectStep(sqlite3*,Select*,
const char*,const char*);
-SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(Parse*,Token*, IdList*,
+SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(sqlite3*,Token*, IdList*,
Select*,u8,Upsert*,
const char*,const char*);
-SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(Parse*,Token*,ExprList*, Expr*, u8,
+SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(sqlite3*,Token*,ExprList*, Expr*, u8,
const char*,const char*);
-SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep(Parse*,Token*, Expr*,
+SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep(sqlite3*,Token*, Expr*,
const char*,const char*);
SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3*, Trigger*);
SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTrigger(sqlite3*,int,const char*);
@@ -18905,7 +18613,6 @@ SQLITE_PRIVATE int sqlite3MemdbInit(void);
SQLITE_PRIVATE const char *sqlite3ErrStr(int);
SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse);
SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq(sqlite3*,u8 enc, const char*,int);
-SQLITE_PRIVATE int sqlite3IsBinary(const CollSeq*);
SQLITE_PRIVATE CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char*zName);
SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr);
SQLITE_PRIVATE CollSeq *sqlite3ExprNNCollSeq(Parse *pParse, Expr *pExpr);
@@ -18958,10 +18665,9 @@ SQLITE_PRIVATE void sqlite3RootPageMoved(sqlite3*, int, int, int);
SQLITE_PRIVATE void sqlite3Reindex(Parse*, Token*, Token*);
SQLITE_PRIVATE void sqlite3AlterFunctions(void);
SQLITE_PRIVATE void sqlite3AlterRenameTable(Parse*, SrcList*, Token*);
-SQLITE_PRIVATE void sqlite3AlterRenameColumn(Parse*, SrcList*, Token*, Token*);
SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *, int *);
SQLITE_PRIVATE void sqlite3NestedParse(Parse*, const char*, ...);
-SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3*, int);
+SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3*);
SQLITE_PRIVATE int sqlite3CodeSubselect(Parse*, Expr *, int, int);
SQLITE_PRIVATE void sqlite3SelectPrep(Parse*, Select*, NameContext*);
SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p);
@@ -18974,10 +18680,6 @@ SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy(Parse*, Select*, ExprList*, const
SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *, Table *, int, int);
SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *, Token *);
SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *, SrcList *);
-SQLITE_PRIVATE void *sqlite3RenameTokenMap(Parse*, void*, Token*);
-SQLITE_PRIVATE void sqlite3RenameTokenRemap(Parse*, void *pTo, void *pFrom);
-SQLITE_PRIVATE void sqlite3RenameExprUnmap(Parse*, Expr*);
-SQLITE_PRIVATE void sqlite3RenameExprlistUnmap(Parse*, ExprList*);
SQLITE_PRIVATE CollSeq *sqlite3GetCollSeq(Parse*, u8, CollSeq *, const char*);
SQLITE_PRIVATE char sqlite3AffinityType(const char*, Column*);
SQLITE_PRIVATE void sqlite3Analyze(Parse*, Token*, Token*);
@@ -18996,17 +18698,12 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3*,int,int);
SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo*);
SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoRef(KeyInfo*);
SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse*, Index*);
-SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoFromExprList(Parse*, ExprList*, int, int);
-
#ifdef SQLITE_DEBUG
SQLITE_PRIVATE int sqlite3KeyInfoIsWriteable(KeyInfo*);
#endif
SQLITE_PRIVATE int sqlite3CreateFunc(sqlite3 *, const char *, int, int, void *,
void (*)(sqlite3_context*,int,sqlite3_value **),
- void (*)(sqlite3_context*,int,sqlite3_value **),
- void (*)(sqlite3_context*),
- void (*)(sqlite3_context*),
- void (*)(sqlite3_context*,int,sqlite3_value **),
+ void (*)(sqlite3_context*,int,sqlite3_value **), void (*)(sqlite3_context*),
FuncDestructor *pDestructor
);
SQLITE_PRIVATE void sqlite3NoopDestructor(void*);
@@ -19047,7 +18744,6 @@ SQLITE_PRIVATE void *sqlite3ParserAlloc(void*(*)(u64), Parse*);
SQLITE_PRIVATE void sqlite3ParserFree(void*, void(*)(void*));
#endif
SQLITE_PRIVATE void sqlite3Parser(void*, int, Token);
-SQLITE_PRIVATE int sqlite3ParserFallback(int);
#ifdef YYTRACKMAXSTACKDEPTH
SQLITE_PRIVATE int sqlite3ParserStackPeak(void*);
#endif
@@ -19748,7 +19444,6 @@ struct VdbeCursor {
Bool isEphemeral:1; /* True for an ephemeral table */
Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */
Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */
- Bool seekHit:1; /* See the OP_SeekHit and OP_IfNoHope opcodes */
Btree *pBtx; /* Separate file holding temporary table */
i64 seqCount; /* Sequence counter */
int *aAltMap; /* Mapping from table to index column numbers */
@@ -19832,9 +19527,6 @@ struct VdbeFrame {
void *token; /* Copy of SubProgram.token */
i64 lastRowid; /* Last insert rowid (sqlite3.lastRowid) */
AuxData *pAuxData; /* Linked list of auxdata allocations */
-#if SQLITE_DEBUG
- u32 iFrameMagic; /* magic number for sanity checking */
-#endif
int nCursor; /* Number of entries in apCsr */
int pc; /* Program Counter in parent (calling) frame */
int nOp; /* Size of aOp array */
@@ -19845,13 +19537,6 @@ struct VdbeFrame {
int nDbChange; /* Value of db->nChange */
};
-/* Magic number for sanity checking on VdbeFrame objects */
-#define SQLITE_FRAME_MAGIC 0x879fb71e
-
-/*
-** Return a pointer to the array of registers allocated for use
-** by a VdbeFrame.
-*/
#define VdbeFrameMem(p) ((Mem *)&((u8 *)p)[ROUND8(sizeof(VdbeFrame))])
/*
@@ -19866,6 +19551,8 @@ struct sqlite3_value {
int nZero; /* Extra zero bytes when MEM_Zero and MEM_Blob set */
const char *zPType; /* Pointer type when MEM_Term|MEM_Subtype|MEM_Null */
FuncDef *pDef; /* Used only when flags==MEM_Agg */
+ RowSet *pRowSet; /* Used only when flags==MEM_RowSet */
+ VdbeFrame *pFrame; /* Used when flags==MEM_Frame */
} u;
u16 flags; /* Some combination of MEM_Null, MEM_Str, MEM_Dyn, etc. */
u8 enc; /* SQLITE_UTF8, SQLITE_UTF16BE, SQLITE_UTF16LE */
@@ -19880,7 +19567,7 @@ struct sqlite3_value {
void (*xDel)(void*);/* Destructor for Mem.z - only valid if MEM_Dyn */
#ifdef SQLITE_DEBUG
Mem *pScopyFrom; /* This Mem is a shallow copy of pScopyFrom */
- u16 mScopyFlags; /* flags value immediately after the shallow copy */
+ void *pFiller; /* So that sizeof(Mem) is a multiple of 8 */
#endif
};
@@ -19909,8 +19596,8 @@ struct sqlite3_value {
#define MEM_Real 0x0008 /* Value is a real number */
#define MEM_Blob 0x0010 /* Value is a BLOB */
#define MEM_AffMask 0x001f /* Mask of affinity bits */
-/* Available 0x0020 */
-/* Available 0x0040 */
+#define MEM_RowSet 0x0020 /* Value is a RowSet object */
+#define MEM_Frame 0x0040 /* Value is a VdbeFrame object */
#define MEM_Undefined 0x0080 /* Value is undefined */
#define MEM_Cleared 0x0100 /* NULL set by OP_Null, not from data */
#define MEM_TypeMask 0xc1ff /* Mask of type bits */
@@ -19937,7 +19624,7 @@ struct sqlite3_value {
** that needs to be deallocated to avoid a leak.
*/
#define VdbeMemDynamic(X) \
- (((X)->flags&(MEM_Agg|MEM_Dyn))!=0)
+ (((X)->flags&(MEM_Agg|MEM_Dyn|MEM_RowSet|MEM_Frame))!=0)
/*
** Clear any existing type flags from a Mem and replace them with f
@@ -20057,9 +19744,9 @@ struct Vdbe {
u8 errorAction; /* Recovery action to do in case of an error */
u8 minWriteFileFormat; /* Minimum file format for writable database files */
u8 prepFlags; /* SQLITE_PREPARE_* flags */
- bft expired:2; /* 1: recompile VM immediately 2: when convenient */
- bft explain:2; /* True if EXPLAIN present on SQL command */
+ bft expired:1; /* True if the VM needs to be recompiled */
bft doingRerun:1; /* True if rerunning after an auto-reprepare */
+ bft explain:2; /* True if EXPLAIN present on SQL command */
bft changeCntOn:1; /* True to update the change-counter */
bft runOnlyOnce:1; /* Automatically expire on reset */
bft usesStmtJournal:1; /* True if uses a statement journal */
@@ -20120,6 +19807,9 @@ SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *, VdbeCursor*);
void sqliteVdbePopStack(Vdbe*,int);
SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor**, int*);
SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor*);
+#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
+SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE*, int, Op*);
+#endif
SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32);
SQLITE_PRIVATE u8 sqlite3VdbeOneByteSerialTypeLen(u8);
SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem*, int, u32*);
@@ -20150,10 +19840,7 @@ SQLITE_PRIVATE void sqlite3VdbeMemSetPointer(Mem*, void*, const char*, void(*)(v
SQLITE_PRIVATE void sqlite3VdbeMemInit(Mem*,sqlite3*,u16);
SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem*);
SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem*,int);
-#ifdef SQLITE_DEBUG
-SQLITE_PRIVATE int sqlite3VdbeMemIsRowSet(const Mem*);
-#endif
-SQLITE_PRIVATE int sqlite3VdbeMemSetRowSet(Mem*);
+SQLITE_PRIVATE void sqlite3VdbeMemSetRowSet(Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8);
SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem*);
@@ -20167,18 +19854,11 @@ SQLITE_PRIVATE void sqlite3VdbeMemCast(Mem*,u8,u8);
SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(BtCursor*,u32,u32,Mem*);
SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p);
SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem*, FuncDef*);
-#ifndef SQLITE_OMIT_WINDOWFUNC
-SQLITE_PRIVATE int sqlite3VdbeMemAggValue(Mem*, Mem*, FuncDef*);
-#endif
SQLITE_PRIVATE const char *sqlite3OpcodeName(int);
SQLITE_PRIVATE int sqlite3VdbeMemGrow(Mem *pMem, int n, int preserve);
SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int n);
SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *, int);
-#ifdef SQLITE_DEBUG
-SQLITE_PRIVATE int sqlite3VdbeFrameIsValid(VdbeFrame*);
-#endif
-SQLITE_PRIVATE void sqlite3VdbeFrameMemDel(void*); /* Destructor on Mem */
-SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame*); /* Actually deletes the Frame */
+SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame*);
SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *);
#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(Vdbe*,VdbeCursor*,int,const char*,Table*,i64,int);
@@ -22276,12 +21956,9 @@ SQLITE_API int sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDflt){
** Unregister a VFS so that it is no longer accessible.
*/
SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs *pVfs){
- MUTEX_LOGIC(sqlite3_mutex *mutex;)
-#ifndef SQLITE_OMIT_AUTOINIT
- int rc = sqlite3_initialize();
- if( rc ) return rc;
+#if SQLITE_THREADSAFE
+ sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);
#endif
- MUTEX_LOGIC( mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); )
sqlite3_mutex_enter(mutex);
vfsUnlink(pVfs);
sqlite3_mutex_leave(mutex);
@@ -27612,12 +27289,7 @@ SQLITE_API void sqlite3_str_vappendf(
if( bufpt==0 ){
bufpt = "";
}else if( xtype==etDYNSTRING ){
- if( pAccum->nChar==0
- && pAccum->mxAlloc
- && width==0
- && precision<0
- && pAccum->accError==0
- ){
+ if( pAccum->nChar==0 && pAccum->mxAlloc && width==0 && precision<0 ){
/* Special optimization for sqlite3_mprintf("%z..."):
** Extend an existing memory allocation rather than creating
** a new one. */
@@ -28315,13 +27987,21 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m
sqlite3TreeViewPush(pView, 1);
}
do{
+#if SELECTTRACE_ENABLED
sqlite3TreeViewLine(pView,
- "SELECT%s%s (%u/%p) selFlags=0x%x nSelectRow=%d",
+ "SELECT%s%s (%s/%p) selFlags=0x%x nSelectRow=%d",
((p->selFlags & SF_Distinct) ? " DISTINCT" : ""),
((p->selFlags & SF_Aggregate) ? " agg_flag" : ""),
- p->selId, p, p->selFlags,
+ p->zSelName, p, p->selFlags,
(int)p->nSelectRow
);
+#else
+ sqlite3TreeViewLine(pView, "SELECT%s%s (0x%p) selFlags=0x%x nSelectRow=%d",
+ ((p->selFlags & SF_Distinct) ? " DISTINCT" : ""),
+ ((p->selFlags & SF_Aggregate) ? " agg_flag" : ""), p, p->selFlags,
+ (int)p->nSelectRow
+ );
+#endif
if( cnt++ ) sqlite3TreeViewPop(pView);
if( p->pPrior ){
n = 1000;
@@ -28333,23 +28013,8 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m
if( p->pHaving ) n++;
if( p->pOrderBy ) n++;
if( p->pLimit ) n++;
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( p->pWin ) n++;
- if( p->pWinDefn ) n++;
-#endif
}
sqlite3TreeViewExprList(pView, p->pEList, (n--)>0, "result-set");
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( p->pWin ){
- Window *pX;
- pView = sqlite3TreeViewPush(pView, (n--)>0);
- sqlite3TreeViewLine(pView, "window-functions");
- for(pX=p->pWin; pX; pX=pX->pNextWin){
- sqlite3TreeViewWinFunc(pView, pX, pX->pNextWin!=0);
- }
- sqlite3TreeViewPop(pView);
- }
-#endif
if( p->pSrc && p->pSrc->nSrc ){
int i;
pView = sqlite3TreeViewPush(pView, (n--)>0);
@@ -28399,16 +28064,6 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m
sqlite3TreeViewExpr(pView, p->pHaving, 0);
sqlite3TreeViewPop(pView);
}
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( p->pWinDefn ){
- Window *pX;
- sqlite3TreeViewItem(pView, "WINDOW", (n--)>0);
- for(pX=p->pWinDefn; pX; pX=pX->pNextWin){
- sqlite3TreeViewWindow(pView, pX, pX->pNextWin!=0);
- }
- sqlite3TreeViewPop(pView);
- }
-#endif
if( p->pOrderBy ){
sqlite3TreeViewExprList(pView, p->pOrderBy, (n--)>0, "ORDERBY");
}
@@ -28436,83 +28091,6 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m
sqlite3TreeViewPop(pView);
}
-#ifndef SQLITE_OMIT_WINDOWFUNC
-/*
-** Generate a description of starting or stopping bounds
-*/
-SQLITE_PRIVATE void sqlite3TreeViewBound(
- TreeView *pView, /* View context */
- u8 eBound, /* UNBOUNDED, CURRENT, PRECEDING, FOLLOWING */
- Expr *pExpr, /* Value for PRECEDING or FOLLOWING */
- u8 moreToFollow /* True if more to follow */
-){
- switch( eBound ){
- case TK_UNBOUNDED: {
- sqlite3TreeViewItem(pView, "UNBOUNDED", moreToFollow);
- sqlite3TreeViewPop(pView);
- break;
- }
- case TK_CURRENT: {
- sqlite3TreeViewItem(pView, "CURRENT", moreToFollow);
- sqlite3TreeViewPop(pView);
- break;
- }
- case TK_PRECEDING: {
- sqlite3TreeViewItem(pView, "PRECEDING", moreToFollow);
- sqlite3TreeViewExpr(pView, pExpr, 0);
- sqlite3TreeViewPop(pView);
- break;
- }
- case TK_FOLLOWING: {
- sqlite3TreeViewItem(pView, "FOLLOWING", moreToFollow);
- sqlite3TreeViewExpr(pView, pExpr, 0);
- sqlite3TreeViewPop(pView);
- break;
- }
- }
-}
-#endif /* SQLITE_OMIT_WINDOWFUNC */
-
-#ifndef SQLITE_OMIT_WINDOWFUNC
-/*
-** Generate a human-readable explanation for a Window object
-*/
-SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u8 more){
- pView = sqlite3TreeViewPush(pView, more);
- if( pWin->zName ){
- sqlite3TreeViewLine(pView, "OVER %s", pWin->zName);
- }else{
- sqlite3TreeViewLine(pView, "OVER");
- }
- if( pWin->pPartition ){
- sqlite3TreeViewExprList(pView, pWin->pPartition, 1, "PARTITION-BY");
- }
- if( pWin->pOrderBy ){
- sqlite3TreeViewExprList(pView, pWin->pOrderBy, 1, "ORDER-BY");
- }
- if( pWin->eType ){
- sqlite3TreeViewItem(pView, pWin->eType==TK_RANGE ? "RANGE" : "ROWS", 0);
- sqlite3TreeViewBound(pView, pWin->eStart, pWin->pStart, 1);
- sqlite3TreeViewBound(pView, pWin->eEnd, pWin->pEnd, 0);
- sqlite3TreeViewPop(pView);
- }
- sqlite3TreeViewPop(pView);
-}
-#endif /* SQLITE_OMIT_WINDOWFUNC */
-
-#ifndef SQLITE_OMIT_WINDOWFUNC
-/*
-** Generate a human-readable explanation for a Window Function object
-*/
-SQLITE_PRIVATE void sqlite3TreeViewWinFunc(TreeView *pView, const Window *pWin, u8 more){
- pView = sqlite3TreeViewPush(pView, more);
- sqlite3TreeViewLine(pView, "WINFUNC %s(%d)",
- pWin->pFunc->zName, pWin->pFunc->nArg);
- sqlite3TreeViewWindow(pView, pWin, 0);
- sqlite3TreeViewPop(pView);
-}
-#endif /* SQLITE_OMIT_WINDOWFUNC */
-
/*
** Generate a human-readable explanation of an expression tree.
*/
@@ -28550,9 +28128,6 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
sqlite3TreeViewLine(pView, "{%d:%d}%s",
pExpr->iTable, pExpr->iColumn, zFlgs);
}
- if( ExprHasProperty(pExpr, EP_FixedCol) ){
- sqlite3TreeViewExpr(pView, pExpr->pLeft, 0);
- }
break;
}
case TK_INTEGER: {
@@ -28666,17 +28241,10 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
case TK_AGG_FUNCTION:
case TK_FUNCTION: {
ExprList *pFarg; /* List of function arguments */
- Window *pWin;
if( ExprHasProperty(pExpr, EP_TokenOnly) ){
pFarg = 0;
- pWin = 0;
}else{
pFarg = pExpr->x.pList;
-#ifndef SQLITE_OMIT_WINDOWFUNC
- pWin = pExpr->pWin;
-#else
- pWin = 0;
-#endif
}
if( pExpr->op==TK_AGG_FUNCTION ){
sqlite3TreeViewLine(pView, "AGG_FUNCTION%d %Q",
@@ -28685,13 +28253,8 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
sqlite3TreeViewLine(pView, "FUNCTION %Q", pExpr->u.zToken);
}
if( pFarg ){
- sqlite3TreeViewExprList(pView, pFarg, pWin!=0, 0);
+ sqlite3TreeViewExprList(pView, pFarg, 0, 0);
}
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( pWin ){
- sqlite3TreeViewWindow(pView, pWin, 0);
- }
-#endif
break;
}
#ifndef SQLITE_OMIT_SUBQUERY
@@ -31723,52 +31286,52 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/* 1 */ "AutoCommit" OpHelp(""),
/* 2 */ "Transaction" OpHelp(""),
/* 3 */ "SorterNext" OpHelp(""),
- /* 4 */ "Prev" OpHelp(""),
- /* 5 */ "Next" OpHelp(""),
- /* 6 */ "Checkpoint" OpHelp(""),
- /* 7 */ "JournalMode" OpHelp(""),
- /* 8 */ "Vacuum" OpHelp(""),
- /* 9 */ "VFilter" OpHelp("iplan=r[P3] zplan='P4'"),
- /* 10 */ "VUpdate" OpHelp("data=r[P3@P2]"),
- /* 11 */ "Goto" OpHelp(""),
- /* 12 */ "Gosub" OpHelp(""),
- /* 13 */ "InitCoroutine" OpHelp(""),
- /* 14 */ "Yield" OpHelp(""),
- /* 15 */ "MustBeInt" OpHelp(""),
- /* 16 */ "Jump" OpHelp(""),
- /* 17 */ "Once" OpHelp(""),
- /* 18 */ "If" OpHelp(""),
+ /* 4 */ "PrevIfOpen" OpHelp(""),
+ /* 5 */ "NextIfOpen" OpHelp(""),
+ /* 6 */ "Prev" OpHelp(""),
+ /* 7 */ "Next" OpHelp(""),
+ /* 8 */ "Checkpoint" OpHelp(""),
+ /* 9 */ "JournalMode" OpHelp(""),
+ /* 10 */ "Vacuum" OpHelp(""),
+ /* 11 */ "VFilter" OpHelp("iplan=r[P3] zplan='P4'"),
+ /* 12 */ "VUpdate" OpHelp("data=r[P3@P2]"),
+ /* 13 */ "Goto" OpHelp(""),
+ /* 14 */ "Gosub" OpHelp(""),
+ /* 15 */ "InitCoroutine" OpHelp(""),
+ /* 16 */ "Yield" OpHelp(""),
+ /* 17 */ "MustBeInt" OpHelp(""),
+ /* 18 */ "Jump" OpHelp(""),
/* 19 */ "Not" OpHelp("r[P2]= !r[P1]"),
- /* 20 */ "IfNot" OpHelp(""),
- /* 21 */ "IfNullRow" OpHelp("if P1.nullRow then r[P3]=NULL, goto P2"),
- /* 22 */ "SeekLT" OpHelp("key=r[P3@P4]"),
- /* 23 */ "SeekLE" OpHelp("key=r[P3@P4]"),
- /* 24 */ "SeekGE" OpHelp("key=r[P3@P4]"),
- /* 25 */ "SeekGT" OpHelp("key=r[P3@P4]"),
- /* 26 */ "IfNoHope" OpHelp("key=r[P3@P4]"),
- /* 27 */ "NoConflict" OpHelp("key=r[P3@P4]"),
- /* 28 */ "NotFound" OpHelp("key=r[P3@P4]"),
- /* 29 */ "Found" OpHelp("key=r[P3@P4]"),
- /* 30 */ "SeekRowid" OpHelp("intkey=r[P3]"),
- /* 31 */ "NotExists" OpHelp("intkey=r[P3]"),
- /* 32 */ "Last" OpHelp(""),
- /* 33 */ "IfSmaller" OpHelp(""),
- /* 34 */ "SorterSort" OpHelp(""),
- /* 35 */ "Sort" OpHelp(""),
- /* 36 */ "Rewind" OpHelp(""),
- /* 37 */ "IdxLE" OpHelp("key=r[P3@P4]"),
- /* 38 */ "IdxGT" OpHelp("key=r[P3@P4]"),
- /* 39 */ "IdxLT" OpHelp("key=r[P3@P4]"),
- /* 40 */ "IdxGE" OpHelp("key=r[P3@P4]"),
- /* 41 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"),
- /* 42 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"),
+ /* 20 */ "Once" OpHelp(""),
+ /* 21 */ "If" OpHelp(""),
+ /* 22 */ "IfNot" OpHelp(""),
+ /* 23 */ "IfNullRow" OpHelp("if P1.nullRow then r[P3]=NULL, goto P2"),
+ /* 24 */ "SeekLT" OpHelp("key=r[P3@P4]"),
+ /* 25 */ "SeekLE" OpHelp("key=r[P3@P4]"),
+ /* 26 */ "SeekGE" OpHelp("key=r[P3@P4]"),
+ /* 27 */ "SeekGT" OpHelp("key=r[P3@P4]"),
+ /* 28 */ "NoConflict" OpHelp("key=r[P3@P4]"),
+ /* 29 */ "NotFound" OpHelp("key=r[P3@P4]"),
+ /* 30 */ "Found" OpHelp("key=r[P3@P4]"),
+ /* 31 */ "SeekRowid" OpHelp("intkey=r[P3]"),
+ /* 32 */ "NotExists" OpHelp("intkey=r[P3]"),
+ /* 33 */ "Last" OpHelp(""),
+ /* 34 */ "IfSmaller" OpHelp(""),
+ /* 35 */ "SorterSort" OpHelp(""),
+ /* 36 */ "Sort" OpHelp(""),
+ /* 37 */ "Rewind" OpHelp(""),
+ /* 38 */ "IdxLE" OpHelp("key=r[P3@P4]"),
+ /* 39 */ "IdxGT" OpHelp("key=r[P3@P4]"),
+ /* 40 */ "IdxLT" OpHelp("key=r[P3@P4]"),
+ /* 41 */ "IdxGE" OpHelp("key=r[P3@P4]"),
+ /* 42 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"),
/* 43 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"),
/* 44 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"),
- /* 45 */ "Program" OpHelp(""),
- /* 46 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"),
- /* 47 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"),
- /* 48 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"),
- /* 49 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"),
+ /* 45 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"),
+ /* 46 */ "Program" OpHelp(""),
+ /* 47 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"),
+ /* 48 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"),
+ /* 49 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"),
/* 50 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"),
/* 51 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"),
/* 52 */ "Ne" OpHelp("IF r[P3]!=r[P1]"),
@@ -31778,121 +31341,119 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/* 56 */ "Lt" OpHelp("IF r[P3]=r[P1]"),
/* 58 */ "ElseNotEq" OpHelp(""),
- /* 59 */ "IncrVacuum" OpHelp(""),
- /* 60 */ "VNext" OpHelp(""),
- /* 61 */ "Init" OpHelp("Start at P2"),
- /* 62 */ "PureFunc0" OpHelp(""),
- /* 63 */ "Function0" OpHelp("r[P3]=func(r[P2@P5])"),
- /* 64 */ "PureFunc" OpHelp(""),
- /* 65 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"),
- /* 66 */ "Return" OpHelp(""),
- /* 67 */ "EndCoroutine" OpHelp(""),
- /* 68 */ "HaltIfNull" OpHelp("if r[P3]=null halt"),
- /* 69 */ "Halt" OpHelp(""),
- /* 70 */ "Integer" OpHelp("r[P2]=P1"),
- /* 71 */ "Int64" OpHelp("r[P2]=P4"),
- /* 72 */ "String" OpHelp("r[P2]='P4' (len=P1)"),
- /* 73 */ "Null" OpHelp("r[P2..P3]=NULL"),
- /* 74 */ "SoftNull" OpHelp("r[P1]=NULL"),
- /* 75 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"),
- /* 76 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"),
- /* 77 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"),
- /* 78 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"),
- /* 79 */ "SCopy" OpHelp("r[P2]=r[P1]"),
- /* 80 */ "IntCopy" OpHelp("r[P2]=r[P1]"),
- /* 81 */ "ResultRow" OpHelp("output=r[P1@P2]"),
- /* 82 */ "CollSeq" OpHelp(""),
- /* 83 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"),
- /* 84 */ "RealAffinity" OpHelp(""),
- /* 85 */ "Cast" OpHelp("affinity(r[P1])"),
- /* 86 */ "Permutation" OpHelp(""),
- /* 87 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"),
- /* 88 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"),
- /* 89 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"),
- /* 90 */ "Column" OpHelp("r[P3]=PX"),
- /* 91 */ "Affinity" OpHelp("affinity(r[P1@P2])"),
- /* 92 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"),
- /* 93 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"),
- /* 94 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<>r[P1]"),
- /* 96 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"),
- /* 97 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"),
- /* 98 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"),
- /* 99 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"),
- /* 100 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"),
- /* 101 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"),
- /* 102 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"),
- /* 103 */ "BitNot" OpHelp("r[P2]= ~r[P1]"),
- /* 104 */ "Count" OpHelp("r[P2]=count()"),
- /* 105 */ "ReadCookie" OpHelp(""),
- /* 106 */ "String8" OpHelp("r[P2]='P4'"),
- /* 107 */ "SetCookie" OpHelp(""),
- /* 108 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"),
- /* 109 */ "OpenRead" OpHelp("root=P2 iDb=P3"),
- /* 110 */ "OpenWrite" OpHelp("root=P2 iDb=P3"),
- /* 111 */ "OpenDup" OpHelp(""),
- /* 112 */ "OpenAutoindex" OpHelp("nColumn=P2"),
- /* 113 */ "OpenEphemeral" OpHelp("nColumn=P2"),
- /* 114 */ "SorterOpen" OpHelp(""),
- /* 115 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"),
- /* 116 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"),
- /* 117 */ "Close" OpHelp(""),
- /* 118 */ "ColumnsUsed" OpHelp(""),
- /* 119 */ "SeekHit" OpHelp("seekHit=P2"),
- /* 120 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"),
- /* 121 */ "NewRowid" OpHelp("r[P2]=rowid"),
- /* 122 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"),
- /* 123 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"),
- /* 124 */ "Delete" OpHelp(""),
- /* 125 */ "ResetCount" OpHelp(""),
- /* 126 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"),
- /* 127 */ "SorterData" OpHelp("r[P2]=data"),
- /* 128 */ "RowData" OpHelp("r[P2]=data"),
- /* 129 */ "Rowid" OpHelp("r[P2]=rowid"),
- /* 130 */ "NullRow" OpHelp(""),
- /* 131 */ "SeekEnd" OpHelp(""),
- /* 132 */ "SorterInsert" OpHelp("key=r[P2]"),
- /* 133 */ "IdxInsert" OpHelp("key=r[P2]"),
- /* 134 */ "IdxDelete" OpHelp("key=r[P2@P3]"),
- /* 135 */ "DeferredSeek" OpHelp("Move P3 to P1.rowid if needed"),
- /* 136 */ "IdxRowid" OpHelp("r[P2]=rowid"),
- /* 137 */ "Destroy" OpHelp(""),
- /* 138 */ "Clear" OpHelp(""),
- /* 139 */ "ResetSorter" OpHelp(""),
- /* 140 */ "CreateBtree" OpHelp("r[P2]=root iDb=P1 flags=P3"),
- /* 141 */ "Real" OpHelp("r[P2]=P4"),
- /* 142 */ "SqlExec" OpHelp(""),
- /* 143 */ "ParseSchema" OpHelp(""),
- /* 144 */ "LoadAnalysis" OpHelp(""),
- /* 145 */ "DropTable" OpHelp(""),
- /* 146 */ "DropIndex" OpHelp(""),
- /* 147 */ "DropTrigger" OpHelp(""),
- /* 148 */ "IntegrityCk" OpHelp(""),
- /* 149 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"),
- /* 150 */ "Param" OpHelp(""),
- /* 151 */ "FkCounter" OpHelp("fkctr[P1]+=P2"),
- /* 152 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"),
- /* 153 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"),
- /* 154 */ "AggInverse" OpHelp("accum=r[P3] inverse(r[P2@P5])"),
- /* 155 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"),
- /* 156 */ "AggStep1" OpHelp("accum=r[P3] step(r[P2@P5])"),
- /* 157 */ "AggValue" OpHelp("r[P3]=value N=P2"),
- /* 158 */ "AggFinal" OpHelp("accum=r[P1] N=P2"),
- /* 159 */ "Expire" OpHelp(""),
- /* 160 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"),
- /* 161 */ "VBegin" OpHelp(""),
- /* 162 */ "VCreate" OpHelp(""),
- /* 163 */ "VDestroy" OpHelp(""),
- /* 164 */ "VOpen" OpHelp(""),
- /* 165 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
- /* 166 */ "VRename" OpHelp(""),
- /* 167 */ "Pagecount" OpHelp(""),
- /* 168 */ "MaxPgcnt" OpHelp(""),
- /* 169 */ "Trace" OpHelp(""),
- /* 170 */ "CursorHint" OpHelp(""),
- /* 171 */ "Noop" OpHelp(""),
- /* 172 */ "Explain" OpHelp(""),
- /* 173 */ "Abortable" OpHelp(""),
+ /* 59 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"),
+ /* 60 */ "IncrVacuum" OpHelp(""),
+ /* 61 */ "VNext" OpHelp(""),
+ /* 62 */ "Init" OpHelp("Start at P2"),
+ /* 63 */ "Return" OpHelp(""),
+ /* 64 */ "EndCoroutine" OpHelp(""),
+ /* 65 */ "HaltIfNull" OpHelp("if r[P3]=null halt"),
+ /* 66 */ "Halt" OpHelp(""),
+ /* 67 */ "Integer" OpHelp("r[P2]=P1"),
+ /* 68 */ "Int64" OpHelp("r[P2]=P4"),
+ /* 69 */ "String" OpHelp("r[P2]='P4' (len=P1)"),
+ /* 70 */ "Null" OpHelp("r[P2..P3]=NULL"),
+ /* 71 */ "SoftNull" OpHelp("r[P1]=NULL"),
+ /* 72 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"),
+ /* 73 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"),
+ /* 74 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"),
+ /* 75 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"),
+ /* 76 */ "SCopy" OpHelp("r[P2]=r[P1]"),
+ /* 77 */ "IntCopy" OpHelp("r[P2]=r[P1]"),
+ /* 78 */ "ResultRow" OpHelp("output=r[P1@P2]"),
+ /* 79 */ "CollSeq" OpHelp(""),
+ /* 80 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"),
+ /* 81 */ "RealAffinity" OpHelp(""),
+ /* 82 */ "Cast" OpHelp("affinity(r[P1])"),
+ /* 83 */ "Permutation" OpHelp(""),
+ /* 84 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"),
+ /* 85 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"),
+ /* 86 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"),
+ /* 87 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<>r[P1]"),
+ /* 89 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"),
+ /* 90 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"),
+ /* 91 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"),
+ /* 92 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"),
+ /* 93 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"),
+ /* 94 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"),
+ /* 95 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"),
+ /* 96 */ "BitNot" OpHelp("r[P1]= ~r[P1]"),
+ /* 97 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"),
+ /* 98 */ "Column" OpHelp("r[P3]=PX"),
+ /* 99 */ "String8" OpHelp("r[P2]='P4'"),
+ /* 100 */ "Affinity" OpHelp("affinity(r[P1@P2])"),
+ /* 101 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"),
+ /* 102 */ "Count" OpHelp("r[P2]=count()"),
+ /* 103 */ "ReadCookie" OpHelp(""),
+ /* 104 */ "SetCookie" OpHelp(""),
+ /* 105 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"),
+ /* 106 */ "OpenRead" OpHelp("root=P2 iDb=P3"),
+ /* 107 */ "OpenWrite" OpHelp("root=P2 iDb=P3"),
+ /* 108 */ "OpenDup" OpHelp(""),
+ /* 109 */ "OpenAutoindex" OpHelp("nColumn=P2"),
+ /* 110 */ "OpenEphemeral" OpHelp("nColumn=P2"),
+ /* 111 */ "SorterOpen" OpHelp(""),
+ /* 112 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"),
+ /* 113 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"),
+ /* 114 */ "Close" OpHelp(""),
+ /* 115 */ "ColumnsUsed" OpHelp(""),
+ /* 116 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"),
+ /* 117 */ "NewRowid" OpHelp("r[P2]=rowid"),
+ /* 118 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"),
+ /* 119 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"),
+ /* 120 */ "Delete" OpHelp(""),
+ /* 121 */ "ResetCount" OpHelp(""),
+ /* 122 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"),
+ /* 123 */ "SorterData" OpHelp("r[P2]=data"),
+ /* 124 */ "RowData" OpHelp("r[P2]=data"),
+ /* 125 */ "Rowid" OpHelp("r[P2]=rowid"),
+ /* 126 */ "NullRow" OpHelp(""),
+ /* 127 */ "SeekEnd" OpHelp(""),
+ /* 128 */ "SorterInsert" OpHelp("key=r[P2]"),
+ /* 129 */ "IdxInsert" OpHelp("key=r[P2]"),
+ /* 130 */ "IdxDelete" OpHelp("key=r[P2@P3]"),
+ /* 131 */ "DeferredSeek" OpHelp("Move P3 to P1.rowid if needed"),
+ /* 132 */ "IdxRowid" OpHelp("r[P2]=rowid"),
+ /* 133 */ "Destroy" OpHelp(""),
+ /* 134 */ "Real" OpHelp("r[P2]=P4"),
+ /* 135 */ "Clear" OpHelp(""),
+ /* 136 */ "ResetSorter" OpHelp(""),
+ /* 137 */ "CreateBtree" OpHelp("r[P2]=root iDb=P1 flags=P3"),
+ /* 138 */ "SqlExec" OpHelp(""),
+ /* 139 */ "ParseSchema" OpHelp(""),
+ /* 140 */ "LoadAnalysis" OpHelp(""),
+ /* 141 */ "DropTable" OpHelp(""),
+ /* 142 */ "DropIndex" OpHelp(""),
+ /* 143 */ "DropTrigger" OpHelp(""),
+ /* 144 */ "IntegrityCk" OpHelp(""),
+ /* 145 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"),
+ /* 146 */ "Param" OpHelp(""),
+ /* 147 */ "FkCounter" OpHelp("fkctr[P1]+=P2"),
+ /* 148 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"),
+ /* 149 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"),
+ /* 150 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"),
+ /* 151 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"),
+ /* 152 */ "AggFinal" OpHelp("accum=r[P1] N=P2"),
+ /* 153 */ "Expire" OpHelp(""),
+ /* 154 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"),
+ /* 155 */ "VBegin" OpHelp(""),
+ /* 156 */ "VCreate" OpHelp(""),
+ /* 157 */ "VDestroy" OpHelp(""),
+ /* 158 */ "VOpen" OpHelp(""),
+ /* 159 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
+ /* 160 */ "VRename" OpHelp(""),
+ /* 161 */ "Pagecount" OpHelp(""),
+ /* 162 */ "MaxPgcnt" OpHelp(""),
+ /* 163 */ "PureFunc0" OpHelp(""),
+ /* 164 */ "Function0" OpHelp("r[P3]=func(r[P2@P5])"),
+ /* 165 */ "PureFunc" OpHelp(""),
+ /* 166 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"),
+ /* 167 */ "Trace" OpHelp(""),
+ /* 168 */ "CursorHint" OpHelp(""),
+ /* 169 */ "Noop" OpHelp(""),
+ /* 170 */ "Explain" OpHelp(""),
+ /* 171 */ "Abortable" OpHelp(""),
};
return azName[i];
}
@@ -32621,11 +32182,7 @@ static struct unix_syscall {
#define osLstat ((int(*)(const char*,struct stat*))aSyscall[27].pCurrent)
#if defined(__linux__) && defined(SQLITE_ENABLE_BATCH_ATOMIC_WRITE)
-# ifdef __ANDROID__
- { "ioctl", (sqlite3_syscall_ptr)(int(*)(int, int, ...))ioctl, 0 },
-# else
{ "ioctl", (sqlite3_syscall_ptr)ioctl, 0 },
-# endif
#else
{ "ioctl", (sqlite3_syscall_ptr)0, 0 },
#endif
@@ -32806,25 +32363,12 @@ static int robust_open(const char *z, int f, mode_t m){
** unixEnterMutex()
** assert( unixMutexHeld() );
** unixEnterLeave()
-**
-** To prevent deadlock, the global unixBigLock must must be acquired
-** before the unixInodeInfo.pLockMutex mutex, if both are held. It is
-** OK to get the pLockMutex without holding unixBigLock first, but if
-** that happens, the unixBigLock mutex must not be acquired until after
-** pLockMutex is released.
-**
-** OK: enter(unixBigLock), enter(pLockInfo)
-** OK: enter(unixBigLock)
-** OK: enter(pLockInfo)
-** ERROR: enter(pLockInfo), enter(unixBigLock)
*/
static sqlite3_mutex *unixBigLock = 0;
static void unixEnterMutex(void){
- assert( sqlite3_mutex_notheld(unixBigLock) ); /* Not a recursive mutex */
sqlite3_mutex_enter(unixBigLock);
}
static void unixLeaveMutex(void){
- assert( sqlite3_mutex_held(unixBigLock) );
sqlite3_mutex_leave(unixBigLock);
}
#ifdef SQLITE_DEBUG
@@ -33225,34 +32769,16 @@ struct unixFileId {
** A single inode can have multiple file descriptors, so each unixFile
** structure contains a pointer to an instance of this object and this
** object keeps a count of the number of unixFile pointing to it.
-**
-** Mutex rules:
-**
-** (1) Only the pLockMutex mutex must be held in order to read or write
-** any of the locking fields:
-** nShared, nLock, eFileLock, bProcessLock, pUnused
-**
-** (2) When nRef>0, then the following fields are unchanging and can
-** be read (but not written) without holding any mutex:
-** fileId, pLockMutex
-**
-** (3) With the exceptions above, all the fields may only be read
-** or written while holding the global unixBigLock mutex.
-**
-** Deadlock prevention: The global unixBigLock mutex may not
-** be acquired while holding the pLockMutex mutex. If both unixBigLock
-** and pLockMutex are needed, then unixBigLock must be acquired first.
*/
struct unixInodeInfo {
struct unixFileId fileId; /* The lookup key */
- sqlite3_mutex *pLockMutex; /* Hold this mutex for... */
- int nShared; /* Number of SHARED locks held */
- int nLock; /* Number of outstanding file locks */
- unsigned char eFileLock; /* One of SHARED_LOCK, RESERVED_LOCK etc. */
- unsigned char bProcessLock; /* An exclusive process lock is held */
- UnixUnusedFd *pUnused; /* Unused file descriptors to close */
+ int nShared; /* Number of SHARED locks held */
+ unsigned char eFileLock; /* One of SHARED_LOCK, RESERVED_LOCK etc. */
+ unsigned char bProcessLock; /* An exclusive process lock is held */
int nRef; /* Number of pointers to this structure */
unixShmNode *pShmNode; /* Shared memory associated with this inode */
+ int nLock; /* Number of outstanding file locks */
+ UnixUnusedFd *pUnused; /* Unused file descriptors to close */
unixInodeInfo *pNext; /* List of all unixInodeInfo objects */
unixInodeInfo *pPrev; /* .... doubly linked */
#if SQLITE_ENABLE_LOCKING_STYLE
@@ -33268,21 +32794,7 @@ struct unixInodeInfo {
** A lists of all unixInodeInfo objects.
*/
static unixInodeInfo *inodeList = 0; /* All unixInodeInfo objects */
-
-#ifdef SQLITE_DEBUG
-/*
-** True if the inode mutex is held, or not. Used only within assert()
-** to help verify correct mutex usage.
-*/
-int unixFileMutexHeld(unixFile *pFile){
- assert( pFile->pInode );
- return sqlite3_mutex_held(pFile->pInode->pLockMutex);
-}
-int unixFileMutexNotheld(unixFile *pFile){
- assert( pFile->pInode );
- return sqlite3_mutex_notheld(pFile->pInode->pLockMutex);
-}
-#endif
+static unsigned int nUnusedFd = 0; /* Total unused file descriptors */
/*
**
@@ -33388,11 +32900,11 @@ static void closePendingFds(unixFile *pFile){
unixInodeInfo *pInode = pFile->pInode;
UnixUnusedFd *p;
UnixUnusedFd *pNext;
- assert( unixFileMutexHeld(pFile) );
for(p=pInode->pUnused; p; p=pNext){
pNext = p->pNext;
robust_close(pFile, p->fd, __LINE__);
sqlite3_free(p);
+ nUnusedFd--;
}
pInode->pUnused = 0;
}
@@ -33406,14 +32918,11 @@ static void closePendingFds(unixFile *pFile){
static void releaseInodeInfo(unixFile *pFile){
unixInodeInfo *pInode = pFile->pInode;
assert( unixMutexHeld() );
- assert( unixFileMutexNotheld(pFile) );
if( ALWAYS(pInode) ){
pInode->nRef--;
if( pInode->nRef==0 ){
assert( pInode->pShmNode==0 );
- sqlite3_mutex_enter(pInode->pLockMutex);
closePendingFds(pFile);
- sqlite3_mutex_leave(pInode->pLockMutex);
if( pInode->pPrev ){
assert( pInode->pPrev->pNext==pInode );
pInode->pPrev->pNext = pInode->pNext;
@@ -33425,10 +32934,10 @@ static void releaseInodeInfo(unixFile *pFile){
assert( pInode->pNext->pPrev==pInode );
pInode->pNext->pPrev = pInode->pPrev;
}
- sqlite3_mutex_free(pInode->pLockMutex);
sqlite3_free(pInode);
}
}
+ assert( inodeList!=0 || nUnusedFd==0 );
}
/*
@@ -33498,6 +33007,7 @@ static int findInodeInfo(
#else
fileId.ino = (u64)statbuf.st_ino;
#endif
+ assert( inodeList!=0 || nUnusedFd==0 );
pInode = inodeList;
while( pInode && memcmp(&fileId, &pInode->fileId, sizeof(fileId)) ){
pInode = pInode->pNext;
@@ -33509,13 +33019,6 @@ static int findInodeInfo(
}
memset(pInode, 0, sizeof(*pInode));
memcpy(&pInode->fileId, &fileId, sizeof(fileId));
- if( sqlite3GlobalConfig.bCoreMutex ){
- pInode->pLockMutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST);
- if( pInode->pLockMutex==0 ){
- sqlite3_free(pInode);
- return SQLITE_NOMEM_BKPT;
- }
- }
pInode->nRef = 1;
pInode->pNext = inodeList;
pInode->pPrev = 0;
@@ -33594,7 +33097,7 @@ static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){
assert( pFile );
assert( pFile->eFileLock<=SHARED_LOCK );
- sqlite3_mutex_enter(pFile->pInode->pLockMutex);
+ unixEnterMutex(); /* Because pFile->pInode is shared across threads */
/* Check if a thread in this process holds such a lock */
if( pFile->pInode->eFileLock>SHARED_LOCK ){
@@ -33619,7 +33122,7 @@ static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){
}
#endif
- sqlite3_mutex_leave(pFile->pInode->pLockMutex);
+ unixLeaveMutex();
OSTRACE(("TEST WR-LOCK %d %d %d (unix)\n", pFile->h, rc, reserved));
*pResOut = reserved;
@@ -33685,8 +33188,8 @@ static int osSetPosixAdvisoryLock(
static int unixFileLock(unixFile *pFile, struct flock *pLock){
int rc;
unixInodeInfo *pInode = pFile->pInode;
+ assert( unixMutexHeld() );
assert( pInode!=0 );
- assert( sqlite3_mutex_held(pInode->pLockMutex) );
if( (pFile->ctrlFlags & (UNIXFILE_EXCL|UNIXFILE_RDONLY))==UNIXFILE_EXCL ){
if( pInode->bProcessLock==0 ){
struct flock lock;
@@ -33805,8 +33308,8 @@ static int unixLock(sqlite3_file *id, int eFileLock){
/* This mutex is needed because pFile->pInode is shared across threads
*/
+ unixEnterMutex();
pInode = pFile->pInode;
- sqlite3_mutex_enter(pInode->pLockMutex);
/* If some thread using this PID has a lock via a different unixFile*
** handle that precludes the requested lock, return BUSY.
@@ -33949,7 +33452,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
}
end_lock:
- sqlite3_mutex_leave(pInode->pLockMutex);
+ unixLeaveMutex();
OSTRACE(("LOCK %d %s %s (unix)\n", pFile->h, azFileLock(eFileLock),
rc==SQLITE_OK ? "ok" : "failed"));
return rc;
@@ -33962,11 +33465,11 @@ end_lock:
static void setPendingFd(unixFile *pFile){
unixInodeInfo *pInode = pFile->pInode;
UnixUnusedFd *p = pFile->pPreallocatedUnused;
- assert( unixFileMutexHeld(pFile) );
p->pNext = pInode->pUnused;
pInode->pUnused = p;
pFile->h = -1;
pFile->pPreallocatedUnused = 0;
+ nUnusedFd++;
}
/*
@@ -33997,8 +33500,8 @@ static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){
if( pFile->eFileLock<=eFileLock ){
return SQLITE_OK;
}
+ unixEnterMutex();
pInode = pFile->pInode;
- sqlite3_mutex_enter(pInode->pLockMutex);
assert( pInode->nShared!=0 );
if( pFile->eFileLock>SHARED_LOCK ){
assert( pInode->eFileLock==pFile->eFileLock );
@@ -34124,14 +33627,14 @@ static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){
*/
pInode->nLock--;
assert( pInode->nLock>=0 );
- if( pInode->nLock==0 ) closePendingFds(pFile);
+ if( pInode->nLock==0 ){
+ closePendingFds(pFile);
+ }
}
end_unlock:
- sqlite3_mutex_leave(pInode->pLockMutex);
- if( rc==SQLITE_OK ){
- pFile->eFileLock = eFileLock;
- }
+ unixLeaveMutex();
+ if( rc==SQLITE_OK ) pFile->eFileLock = eFileLock;
return rc;
}
@@ -34202,20 +33705,15 @@ static int closeUnixFile(sqlite3_file *id){
static int unixClose(sqlite3_file *id){
int rc = SQLITE_OK;
unixFile *pFile = (unixFile *)id;
- unixInodeInfo *pInode = pFile->pInode;
-
- assert( pInode!=0 );
verifyDbFile(pFile);
unixUnlock(id, NO_LOCK);
- assert( unixFileMutexNotheld(pFile) );
unixEnterMutex();
/* unixFile.pInode is always valid here. Otherwise, a different close
** routine (e.g. nolockClose()) would be called instead.
*/
assert( pFile->pInode->nLock>0 || pFile->pInode->bProcessLock==0 );
- sqlite3_mutex_enter(pInode->pLockMutex);
- if( pInode->nLock ){
+ if( ALWAYS(pFile->pInode) && pFile->pInode->nLock ){
/* If there are outstanding locks, do not actually close the file just
** yet because that would clear those locks. Instead, add the file
** descriptor to pInode->pUnused list. It will be automatically closed
@@ -34223,7 +33721,6 @@ static int unixClose(sqlite3_file *id){
*/
setPendingFd(pFile);
}
- sqlite3_mutex_leave(pInode->pLockMutex);
releaseInodeInfo(pFile);
rc = closeUnixFile(id);
unixLeaveMutex();
@@ -34821,7 +34318,6 @@ static int semXClose(sqlite3_file *id) {
unixFile *pFile = (unixFile*)id;
semXUnlock(id, NO_LOCK);
assert( pFile );
- assert( unixFileMutexNotheld(pFile) );
unixEnterMutex();
releaseInodeInfo(pFile);
unixLeaveMutex();
@@ -34936,7 +34432,8 @@ static int afpCheckReservedLock(sqlite3_file *id, int *pResOut){
*pResOut = 1;
return SQLITE_OK;
}
- sqlite3_mutex_enter(pFile->pInode->pLockMutex);
+ unixEnterMutex(); /* Because pFile->pInode is shared across threads */
+
/* Check if a thread in this process holds such a lock */
if( pFile->pInode->eFileLock>SHARED_LOCK ){
reserved = 1;
@@ -34960,7 +34457,7 @@ static int afpCheckReservedLock(sqlite3_file *id, int *pResOut){
}
}
- sqlite3_mutex_leave(pFile->pInode->pLockMutex);
+ unixLeaveMutex();
OSTRACE(("TEST WR-LOCK %d %d %d (afp)\n", pFile->h, rc, reserved));
*pResOut = reserved;
@@ -35023,8 +34520,8 @@ static int afpLock(sqlite3_file *id, int eFileLock){
/* This mutex is needed because pFile->pInode is shared across threads
*/
+ unixEnterMutex();
pInode = pFile->pInode;
- sqlite3_mutex_enter(pInode->pLockMutex);
/* If some thread using this PID has a lock via a different unixFile*
** handle that precludes the requested lock, return BUSY.
@@ -35160,7 +34657,7 @@ static int afpLock(sqlite3_file *id, int eFileLock){
}
afp_end_lock:
- sqlite3_mutex_leave(pInode->pLockMutex);
+ unixLeaveMutex();
OSTRACE(("LOCK %d %s %s (afp)\n", pFile->h, azFileLock(eFileLock),
rc==SQLITE_OK ? "ok" : "failed"));
return rc;
@@ -35192,8 +34689,8 @@ static int afpUnlock(sqlite3_file *id, int eFileLock) {
if( pFile->eFileLock<=eFileLock ){
return SQLITE_OK;
}
+ unixEnterMutex();
pInode = pFile->pInode;
- sqlite3_mutex_enter(pInode->pLockMutex);
assert( pInode->nShared!=0 );
if( pFile->eFileLock>SHARED_LOCK ){
assert( pInode->eFileLock==pFile->eFileLock );
@@ -35262,14 +34759,14 @@ static int afpUnlock(sqlite3_file *id, int eFileLock) {
if( rc==SQLITE_OK ){
pInode->nLock--;
assert( pInode->nLock>=0 );
- if( pInode->nLock==0 ) closePendingFds(pFile);
+ if( pInode->nLock==0 ){
+ closePendingFds(pFile);
+ }
}
}
- sqlite3_mutex_leave(pInode->pLockMutex);
- if( rc==SQLITE_OK ){
- pFile->eFileLock = eFileLock;
- }
+ unixLeaveMutex();
+ if( rc==SQLITE_OK ) pFile->eFileLock = eFileLock;
return rc;
}
@@ -35281,20 +34778,14 @@ static int afpClose(sqlite3_file *id) {
unixFile *pFile = (unixFile*)id;
assert( id!=0 );
afpUnlock(id, NO_LOCK);
- assert( unixFileMutexNotheld(pFile) );
unixEnterMutex();
- if( pFile->pInode ){
- unixInodeInfo *pInode = pFile->pInode;
- sqlite3_mutex_enter(pInode->pLockMutex);
- if( pInode->nLock ){
- /* If there are outstanding locks, do not actually close the file just
- ** yet because that would clear those locks. Instead, add the file
- ** descriptor to pInode->aPending. It will be automatically closed when
- ** the last lock is cleared.
- */
- setPendingFd(pFile);
- }
- sqlite3_mutex_leave(pInode->pLockMutex);
+ if( pFile->pInode && pFile->pInode->nLock ){
+ /* If there are outstanding locks, do not actually close the file just
+ ** yet because that would clear those locks. Instead, add the file
+ ** descriptor to pInode->aPending. It will be automatically closed when
+ ** the last lock is cleared.
+ */
+ setPendingFd(pFile);
}
releaseInodeInfo(pFile);
sqlite3_free(pFile->lockingContext);
@@ -36600,7 +36091,6 @@ static int unixOpenSharedMemory(unixFile *pDbFd){
/* Check to see if a unixShmNode object already exists. Reuse an existing
** one if present. Create a new one if necessary.
*/
- assert( unixFileMutexNotheld(pDbFd) );
unixEnterMutex();
pInode = pDbFd->pInode;
pShmNode = pInode->pShmNode;
@@ -36983,9 +36473,6 @@ static void unixShmBarrier(
){
UNUSED_PARAMETER(fd);
sqlite3MemoryBarrier(); /* compiler-defined memory barrier */
- assert( fd->pMethods->xLock==nolockLock
- || unixFileMutexNotheld((unixFile*)fd)
- );
unixEnterMutex(); /* Also mutex, for redundancy */
unixLeaveMutex();
}
@@ -37027,7 +36514,6 @@ static int unixShmUnmap(
/* If pShmNode->nRef has reached 0, then close the underlying
** shared-memory file, too */
- assert( unixFileMutexNotheld(pDbFd) );
unixEnterMutex();
assert( pShmNode->nRef>0 );
pShmNode->nRef--;
@@ -37354,7 +36840,7 @@ IOMETHODS(
IOMETHODS(
nolockIoFinder, /* Finder function name */
nolockIoMethods, /* sqlite3_io_methods object name */
- 3, /* shared memory and mmap are enabled */
+ 3, /* shared memory is disabled */
nolockClose, /* xClose method */
nolockLock, /* xLock method */
nolockUnlock, /* xUnlock method */
@@ -37850,7 +37336,7 @@ static UnixUnusedFd *findReusableFd(const char *zPath, int flags){
**
** Even if a subsequent open() call does succeed, the consequences of
** not searching for a reusable file descriptor are not dire. */
- if( inodeList!=0 && 0==osStat(zPath, &sStat) ){
+ if( nUnusedFd>0 && 0==osStat(zPath, &sStat) ){
unixInodeInfo *pInode;
pInode = inodeList;
@@ -37860,14 +37346,12 @@ static UnixUnusedFd *findReusableFd(const char *zPath, int flags){
}
if( pInode ){
UnixUnusedFd **pp;
- assert( sqlite3_mutex_notheld(pInode->pLockMutex) );
- sqlite3_mutex_enter(pInode->pLockMutex);
for(pp=&pInode->pUnused; *pp && (*pp)->flags!=flags; pp=&((*pp)->pNext));
pUnused = *pp;
if( pUnused ){
+ nUnusedFd--;
*pp = pUnused->pNext;
}
- sqlite3_mutex_leave(pInode->pLockMutex);
}
}
unixLeaveMutex();
@@ -43070,9 +42554,6 @@ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){
winFile *pFile = (winFile*)id; /* File handle object */
int rc = SQLITE_OK; /* Return code for this function */
DWORD lastErrno;
-#if SQLITE_MAX_MMAP_SIZE>0
- sqlite3_int64 oldMmapSize;
-#endif
assert( pFile );
SimulateIOError(return SQLITE_IOERR_TRUNCATE);
@@ -43088,15 +42569,6 @@ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){
nByte = ((nByte + pFile->szChunk - 1)/pFile->szChunk) * pFile->szChunk;
}
-#if SQLITE_MAX_MMAP_SIZE>0
- if( pFile->pMapRegion ){
- oldMmapSize = pFile->mmapSize;
- }else{
- oldMmapSize = 0;
- }
- winUnmapfile(pFile);
-#endif
-
/* SetEndOfFile() returns non-zero when successful, or zero when it fails. */
if( winSeekFile(pFile, nByte) ){
rc = winLogError(SQLITE_IOERR_TRUNCATE, pFile->lastErrno,
@@ -43109,12 +42581,12 @@ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){
}
#if SQLITE_MAX_MMAP_SIZE>0
- if( rc==SQLITE_OK && oldMmapSize>0 ){
- if( oldMmapSize>nByte ){
- winMapfile(pFile, -1);
- }else{
- winMapfile(pFile, oldMmapSize);
- }
+ /* If the file was truncated to a size smaller than the currently
+ ** mapped region, reduce the effective mapping size as well. SQLite will
+ ** use read() and write() to access data beyond this point from now on.
+ */
+ if( pFile->pMapRegion && nBytemmapSize ){
+ pFile->mmapSize = nByte;
}
#endif
@@ -46302,8 +45774,8 @@ SQLITE_API int sqlite3_os_end(void){
** This file also implements interface sqlite3_serialize() and
** sqlite3_deserialize().
*/
-/* #include "sqliteInt.h" */
#ifdef SQLITE_ENABLE_DESERIALIZE
+/* #include "sqliteInt.h" */
/*
** Forward declaration of objects used by this utility
@@ -49551,23 +49023,30 @@ struct RowSet {
#define ROWSET_NEXT 0x02 /* True if sqlite3RowSetNext() has been called */
/*
-** Allocate a RowSet object. Return NULL if a memory allocation
-** error occurs.
+** Turn bulk memory into a RowSet object. N bytes of memory
+** are available at pSpace. The db pointer is used as a memory context
+** for any subsequent allocations that need to occur.
+** Return a pointer to the new RowSet object.
+**
+** It must be the case that N is sufficient to make a Rowset. If not
+** an assertion fault occurs.
+**
+** If N is larger than the minimum, use the surplus as an initial
+** allocation of entries available to be filled.
*/
-SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3 *db){
- RowSet *p = sqlite3DbMallocRawNN(db, sizeof(*p));
- if( p ){
- int N = sqlite3DbMallocSize(db, p);
- p->pChunk = 0;
- p->db = db;
- p->pEntry = 0;
- p->pLast = 0;
- p->pForest = 0;
- p->pFresh = (struct RowSetEntry*)(ROUND8(sizeof(*p)) + (char*)p);
- p->nFresh = (u16)((N - ROUND8(sizeof(*p)))/sizeof(struct RowSetEntry));
- p->rsFlags = ROWSET_SORTED;
- p->iBatch = 0;
- }
+SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3 *db, void *pSpace, unsigned int N){
+ RowSet *p;
+ assert( N >= ROUND8(sizeof(*p)) );
+ p = pSpace;
+ p->pChunk = 0;
+ p->db = db;
+ p->pEntry = 0;
+ p->pLast = 0;
+ p->pForest = 0;
+ p->pFresh = (struct RowSetEntry*)(ROUND8(sizeof(*p)) + (char*)p);
+ p->nFresh = (u16)((N - ROUND8(sizeof(*p)))/sizeof(struct RowSetEntry));
+ p->rsFlags = ROWSET_SORTED;
+ p->iBatch = 0;
return p;
}
@@ -49576,8 +49055,7 @@ SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3 *db){
** the RowSet has allocated over its lifetime. This routine is
** the destructor for the RowSet.
*/
-SQLITE_PRIVATE void sqlite3RowSetClear(void *pArg){
- RowSet *p = (RowSet*)pArg;
+SQLITE_PRIVATE void sqlite3RowSetClear(RowSet *p){
struct RowSetChunk *pChunk, *pNextChunk;
for(pChunk=p->pChunk; pChunk; pChunk = pNextChunk){
pNextChunk = pChunk->pNextChunk;
@@ -49591,16 +49069,6 @@ SQLITE_PRIVATE void sqlite3RowSetClear(void *pArg){
p->rsFlags = ROWSET_SORTED;
}
-/*
-** Deallocate all chunks from a RowSet. This frees all memory that
-** the RowSet has allocated over its lifetime. This routine is
-** the destructor for the RowSet.
-*/
-SQLITE_PRIVATE void sqlite3RowSetDelete(void *pArg){
- sqlite3RowSetClear(pArg);
- sqlite3DbFree(((RowSet*)pArg)->db, pArg);
-}
-
/*
** Allocate a new RowSetEntry object that is associated with the
** given RowSet. Return a pointer to the new and completely uninitialized
@@ -50088,8 +49556,6 @@ SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal);
SQLITE_PRIVATE int sqlite3WalSnapshotGet(Wal *pWal, sqlite3_snapshot **ppSnapshot);
SQLITE_PRIVATE void sqlite3WalSnapshotOpen(Wal *pWal, sqlite3_snapshot *pSnapshot);
SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal);
-SQLITE_PRIVATE int sqlite3WalSnapshotCheck(Wal *pWal, sqlite3_snapshot *pSnapshot);
-SQLITE_PRIVATE void sqlite3WalSnapshotUnlock(Wal *pWal);
#endif
#ifdef SQLITE_ENABLE_ZIPVFS
@@ -51083,12 +50549,8 @@ static int assert_pager_state(Pager *p){
** to "print *pPager" in gdb:
**
** (gdb) printf "%s", print_pager_state(pPager)
-**
-** This routine has external linkage in order to suppress compiler warnings
-** about an unused function. It is enclosed within SQLITE_DEBUG and so does
-** not appear in normal builds.
*/
-char *print_pager_state(Pager *p){
+static char *print_pager_state(Pager *p){
static char zRet[1024];
sqlite3_snprintf(1024, zRet,
@@ -51854,6 +51316,7 @@ static void pager_reset(Pager *pPager){
** Return the pPager->iDataVersion value
*/
SQLITE_PRIVATE u32 sqlite3PagerDataVersion(Pager *pPager){
+ assert( pPager->eState>PAGER_OPEN );
return pPager->iDataVersion;
}
@@ -56471,10 +55934,9 @@ SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(
** backup in progress needs to be restarted. */
sqlite3BackupRestart(pPager->pBackup);
}else{
- PgHdr *pList;
if( pagerUseWal(pPager) ){
+ PgHdr *pList = sqlite3PcacheDirtyList(pPager->pPCache);
PgHdr *pPageOne = 0;
- pList = sqlite3PcacheDirtyList(pPager->pPCache);
if( pList==0 ){
/* Must have at least one page for the WAL commit flag.
** Ticket [2d1a5c67dfc2363e44f29d9bbd57f] 2011-05-18 */
@@ -56495,14 +55957,14 @@ SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(
** should be used. No rollback journal is created if batch-atomic-write
** is enabled.
*/
-#ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE
sqlite3_file *fd = pPager->fd;
- int bBatch = zMaster==0 /* An SQLITE_IOCAP_BATCH_ATOMIC commit */
+#ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE
+ const int bBatch = zMaster==0 /* An SQLITE_IOCAP_BATCH_ATOMIC commit */
&& (sqlite3OsDeviceCharacteristics(fd) & SQLITE_IOCAP_BATCH_ATOMIC)
&& !pPager->noSync
&& sqlite3JournalIsInMemory(pPager->jfd);
#else
-# define bBatch 0
+# define bBatch 0
#endif
#ifdef SQLITE_ENABLE_ATOMIC_WRITE
@@ -56554,16 +56016,15 @@ SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(
}
}
}
-#else /* SQLITE_ENABLE_ATOMIC_WRITE */
+#else
#ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE
if( zMaster ){
rc = sqlite3JournalCreate(pPager->jfd);
if( rc!=SQLITE_OK ) goto commit_phase_one_exit;
- assert( bBatch==0 );
}
#endif
rc = pager_incr_changecounter(pPager, 0);
-#endif /* !SQLITE_ENABLE_ATOMIC_WRITE */
+#endif
if( rc!=SQLITE_OK ) goto commit_phase_one_exit;
/* Write the master journal name into the journal file. If a master
@@ -56587,36 +56048,24 @@ SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(
rc = syncJournal(pPager, 0);
if( rc!=SQLITE_OK ) goto commit_phase_one_exit;
- pList = sqlite3PcacheDirtyList(pPager->pPCache);
-#ifdef SQLITE_ENABLE_BATCH_ATOMIC_WRITE
if( bBatch ){
+ /* The pager is now in DBMOD state. But regardless of what happens
+ ** next, attempting to play the journal back into the database would
+ ** be unsafe. Close it now to make sure that does not happen. */
+ sqlite3OsClose(pPager->jfd);
rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_BEGIN_ATOMIC_WRITE, 0);
+ if( rc!=SQLITE_OK ) goto commit_phase_one_exit;
+ }
+ rc = pager_write_pagelist(pPager,sqlite3PcacheDirtyList(pPager->pPCache));
+ if( bBatch ){
if( rc==SQLITE_OK ){
- rc = pager_write_pagelist(pPager, pList);
- if( rc==SQLITE_OK ){
- rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_COMMIT_ATOMIC_WRITE, 0);
- }
- if( rc!=SQLITE_OK ){
- sqlite3OsFileControlHint(fd, SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE, 0);
- }
+ rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_COMMIT_ATOMIC_WRITE, 0);
}
-
- if( (rc&0xFF)==SQLITE_IOERR && rc!=SQLITE_IOERR_NOMEM ){
- rc = sqlite3JournalCreate(pPager->jfd);
- if( rc!=SQLITE_OK ){
- sqlite3OsClose(pPager->jfd);
- goto commit_phase_one_exit;
- }
- bBatch = 0;
- }else{
- sqlite3OsClose(pPager->jfd);
+ if( rc!=SQLITE_OK ){
+ sqlite3OsFileControlHint(fd, SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE, 0);
}
}
-#endif /* SQLITE_ENABLE_BATCH_ATOMIC_WRITE */
- if( bBatch==0 ){
- rc = pager_write_pagelist(pPager, pList);
- }
if( rc!=SQLITE_OK ){
assert( rc!=SQLITE_IOERR_BLOCKED );
goto commit_phase_one_exit;
@@ -57368,6 +56817,13 @@ SQLITE_PRIVATE int sqlite3PagerLockingMode(Pager *pPager, int eMode){
SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){
u8 eOld = pPager->journalMode; /* Prior journalmode */
+#ifdef SQLITE_DEBUG
+ /* The print_pager_state() routine is intended to be used by the debugger
+ ** only. We invoke it once here to suppress a compiler warning. */
+ print_pager_state(pPager);
+#endif
+
+
/* The eMode parameter is always valid */
assert( eMode==PAGER_JOURNALMODE_DELETE
|| eMode==PAGER_JOURNALMODE_TRUNCATE
@@ -57736,38 +57192,6 @@ SQLITE_PRIVATE int sqlite3PagerSnapshotRecover(Pager *pPager){
}
return rc;
}
-
-/*
-** The caller currently has a read transaction open on the database.
-** If this is not a WAL database, SQLITE_ERROR is returned. Otherwise,
-** this function takes a SHARED lock on the CHECKPOINTER slot and then
-** checks if the snapshot passed as the second argument is still
-** available. If so, SQLITE_OK is returned.
-**
-** If the snapshot is not available, SQLITE_ERROR is returned. Or, if
-** the CHECKPOINTER lock cannot be obtained, SQLITE_BUSY. If any error
-** occurs (any value other than SQLITE_OK is returned), the CHECKPOINTER
-** lock is released before returning.
-*/
-SQLITE_PRIVATE int sqlite3PagerSnapshotCheck(Pager *pPager, sqlite3_snapshot *pSnapshot){
- int rc;
- if( pPager->pWal ){
- rc = sqlite3WalSnapshotCheck(pPager->pWal, pSnapshot);
- }else{
- rc = SQLITE_ERROR;
- }
- return rc;
-}
-
-/*
-** Release a lock obtained by an earlier successful call to
-** sqlite3PagerSnapshotCheck().
-*/
-SQLITE_PRIVATE void sqlite3PagerSnapshotUnlock(Pager *pPager){
- assert( pPager->pWal );
- return sqlite3WalSnapshotUnlock(pPager->pWal);
-}
-
#endif /* SQLITE_ENABLE_SNAPSHOT */
#endif /* !SQLITE_OMIT_WAL */
@@ -58049,18 +57473,6 @@ SQLITE_PRIVATE int sqlite3WalTrace = 0;
# define WALTRACE(X)
#endif
-/*
-** WAL mode depends on atomic aligned 32-bit loads and stores in a few
-** places. The following macros try to make this explicit.
-*/
-#if GCC_VESRION>=5004000
-# define AtomicLoad(PTR) __atomic_load_n((PTR),__ATOMIC_RELAXED)
-# define AtomicStore(PTR,VAL) __atomic_store_n((PTR),(VAL),__ATOMIC_RELAXED)
-#else
-# define AtomicLoad(PTR) (*(PTR))
-# define AtomicStore(PTR,VAL) (*(PTR) = (VAL))
-#endif
-
/*
** The maximum (and only) versions of the wal and wal-index formats
** that may be interpreted by this version of SQLite.
@@ -58683,51 +58095,48 @@ static int walNextHash(int iPriorHash){
return (iPriorHash+1)&(HASHTABLE_NSLOT-1);
}
-/*
-** An instance of the WalHashLoc object is used to describe the location
-** of a page hash table in the wal-index. This becomes the return value
-** from walHashGet().
-*/
-typedef struct WalHashLoc WalHashLoc;
-struct WalHashLoc {
- volatile ht_slot *aHash; /* Start of the wal-index hash table */
- volatile u32 *aPgno; /* aPgno[1] is the page of first frame indexed */
- u32 iZero; /* One less than the frame number of first indexed*/
-};
-
/*
** Return pointers to the hash table and page number array stored on
** page iHash of the wal-index. The wal-index is broken into 32KB pages
** numbered starting from 0.
**
-** Set output variable pLoc->aHash to point to the start of the hash table
-** in the wal-index file. Set pLoc->iZero to one less than the frame
+** Set output variable *paHash to point to the start of the hash table
+** in the wal-index file. Set *piZero to one less than the frame
** number of the first frame indexed by this hash table. If a
** slot in the hash table is set to N, it refers to frame number
-** (pLoc->iZero+N) in the log.
+** (*piZero+N) in the log.
**
-** Finally, set pLoc->aPgno so that pLoc->aPgno[1] is the page number of the
-** first frame indexed by the hash table, frame (pLoc->iZero+1).
+** Finally, set *paPgno so that *paPgno[1] is the page number of the
+** first frame indexed by the hash table, frame (*piZero+1).
*/
static int walHashGet(
Wal *pWal, /* WAL handle */
int iHash, /* Find the iHash'th table */
- WalHashLoc *pLoc /* OUT: Hash table location */
+ volatile ht_slot **paHash, /* OUT: Pointer to hash index */
+ volatile u32 **paPgno, /* OUT: Pointer to page number array */
+ u32 *piZero /* OUT: Frame associated with *paPgno[0] */
){
int rc; /* Return code */
+ volatile u32 *aPgno;
- rc = walIndexPage(pWal, iHash, &pLoc->aPgno);
+ rc = walIndexPage(pWal, iHash, &aPgno);
assert( rc==SQLITE_OK || iHash>0 );
if( rc==SQLITE_OK ){
- pLoc->aHash = (volatile ht_slot *)&pLoc->aPgno[HASHTABLE_NPAGE];
+ u32 iZero;
+ volatile ht_slot *aHash;
+
+ aHash = (volatile ht_slot *)&aPgno[HASHTABLE_NPAGE];
if( iHash==0 ){
- pLoc->aPgno = &pLoc->aPgno[WALINDEX_HDR_SIZE/sizeof(u32)];
- pLoc->iZero = 0;
+ aPgno = &aPgno[WALINDEX_HDR_SIZE/sizeof(u32)];
+ iZero = 0;
}else{
- pLoc->iZero = HASHTABLE_NPAGE_ONE + (iHash-1)*HASHTABLE_NPAGE;
+ iZero = HASHTABLE_NPAGE_ONE + (iHash-1)*HASHTABLE_NPAGE;
}
- pLoc->aPgno = &pLoc->aPgno[-1];
+
+ *paPgno = &aPgno[-1];
+ *paHash = aHash;
+ *piZero = iZero;
}
return rc;
}
@@ -58773,7 +58182,9 @@ static u32 walFramePgno(Wal *pWal, u32 iFrame){
** actually needed.
*/
static void walCleanupHash(Wal *pWal){
- WalHashLoc sLoc; /* Hash table location */
+ volatile ht_slot *aHash = 0; /* Pointer to hash table to clear */
+ volatile u32 *aPgno = 0; /* Page number array for hash table */
+ u32 iZero = 0; /* frame == (aHash[x]+iZero) */
int iLimit = 0; /* Zero values greater than this */
int nByte; /* Number of bytes to zero in aPgno[] */
int i; /* Used to iterate through aHash[] */
@@ -58791,24 +58202,24 @@ static void walCleanupHash(Wal *pWal){
*/
assert( pWal->nWiData>walFramePage(pWal->hdr.mxFrame) );
assert( pWal->apWiData[walFramePage(pWal->hdr.mxFrame)] );
- walHashGet(pWal, walFramePage(pWal->hdr.mxFrame), &sLoc);
+ walHashGet(pWal, walFramePage(pWal->hdr.mxFrame), &aHash, &aPgno, &iZero);
/* Zero all hash-table entries that correspond to frame numbers greater
** than pWal->hdr.mxFrame.
*/
- iLimit = pWal->hdr.mxFrame - sLoc.iZero;
+ iLimit = pWal->hdr.mxFrame - iZero;
assert( iLimit>0 );
for(i=0; iiLimit ){
- sLoc.aHash[i] = 0;
+ if( aHash[i]>iLimit ){
+ aHash[i] = 0;
}
}
/* Zero the entries in the aPgno array that correspond to frames with
** frame numbers greater than pWal->hdr.mxFrame.
*/
- nByte = (int)((char *)sLoc.aHash - (char *)&sLoc.aPgno[iLimit+1]);
- memset((void *)&sLoc.aPgno[iLimit+1], 0, nByte);
+ nByte = (int)((char *)aHash - (char *)&aPgno[iLimit+1]);
+ memset((void *)&aPgno[iLimit+1], 0, nByte);
#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT
/* Verify that the every entry in the mapping region is still reachable
@@ -58818,10 +58229,10 @@ static void walCleanupHash(Wal *pWal){
int j; /* Loop counter */
int iKey; /* Hash key */
for(j=1; j<=iLimit; j++){
- for(iKey=walHash(sLoc.aPgno[j]);sLoc.aHash[iKey];iKey=walNextHash(iKey)){
- if( sLoc.aHash[iKey]==j ) break;
+ for(iKey=walHash(aPgno[j]); aHash[iKey]; iKey=walNextHash(iKey)){
+ if( aHash[iKey]==j ) break;
}
- assert( sLoc.aHash[iKey]==j );
+ assert( aHash[iKey]==j );
}
}
#endif /* SQLITE_ENABLE_EXPENSIVE_ASSERT */
@@ -58834,9 +58245,11 @@ static void walCleanupHash(Wal *pWal){
*/
static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){
int rc; /* Return code */
- WalHashLoc sLoc; /* Wal-index hash table location */
+ u32 iZero = 0; /* One less than frame number of aPgno[1] */
+ volatile u32 *aPgno = 0; /* Page number array */
+ volatile ht_slot *aHash = 0; /* Hash table */
- rc = walHashGet(pWal, walFramePage(iFrame), &sLoc);
+ rc = walHashGet(pWal, walFramePage(iFrame), &aHash, &aPgno, &iZero);
/* Assuming the wal-index file was successfully mapped, populate the
** page number array and hash table entry.
@@ -58846,16 +58259,15 @@ static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){
int idx; /* Value to write to hash-table slot */
int nCollide; /* Number of hash collisions */
- idx = iFrame - sLoc.iZero;
+ idx = iFrame - iZero;
assert( idx <= HASHTABLE_NSLOT/2 + 1 );
/* If this is the first entry to be added to this hash-table, zero the
** entire hash table and aPgno[] array before proceeding.
*/
if( idx==1 ){
- int nByte = (int)((u8 *)&sLoc.aHash[HASHTABLE_NSLOT]
- - (u8 *)&sLoc.aPgno[1]);
- memset((void*)&sLoc.aPgno[1], 0, nByte);
+ int nByte = (int)((u8 *)&aHash[HASHTABLE_NSLOT] - (u8 *)&aPgno[1]);
+ memset((void*)&aPgno[1], 0, nByte);
}
/* If the entry in aPgno[] is already set, then the previous writer
@@ -58864,18 +58276,18 @@ static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){
** Remove the remnants of that writers uncommitted transaction from
** the hash-table before writing any new entries.
*/
- if( sLoc.aPgno[idx] ){
+ if( aPgno[idx] ){
walCleanupHash(pWal);
- assert( !sLoc.aPgno[idx] );
+ assert( !aPgno[idx] );
}
/* Write the aPgno[] array entry and the hash-table slot. */
nCollide = idx;
- for(iKey=walHash(iPage); sLoc.aHash[iKey]; iKey=walNextHash(iKey)){
+ for(iKey=walHash(iPage); aHash[iKey]; iKey=walNextHash(iKey)){
if( (nCollide--)==0 ) return SQLITE_CORRUPT_BKPT;
}
- sLoc.aPgno[idx] = iPage;
- sLoc.aHash[iKey] = (ht_slot)idx;
+ aPgno[idx] = iPage;
+ aHash[iKey] = (ht_slot)idx;
#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT
/* Verify that the number of entries in the hash table exactly equals
@@ -58884,7 +58296,7 @@ static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){
{
int i; /* Loop counter */
int nEntry = 0; /* Number of entries in the hash table */
- for(i=0; iaSegment[p->nSegment])[sLoc.iZero];
- sLoc.iZero++;
+ aIndex = &((ht_slot *)&p->aSegment[p->nSegment])[iZero];
+ iZero++;
for(j=0; jaSegment[i].iZero = sLoc.iZero;
+ walMergesort((u32 *)aPgno, aTmp, aIndex, &nEntry);
+ p->aSegment[i].iZero = iZero;
p->aSegment[i].nEntry = nEntry;
p->aSegment[i].aIndex = aIndex;
- p->aSegment[i].aPgno = (u32 *)sLoc.aPgno;
+ p->aSegment[i].aPgno = (u32 *)aPgno;
}
}
sqlite3_free(aTmp);
@@ -59638,6 +59050,7 @@ static int walCheckpoint(
if( pIter
&& (rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(0),1))==SQLITE_OK
){
+ i64 nSize; /* Current size of database file */
u32 nBackfill = pInfo->nBackfill;
pInfo->nBackfillAttempted = mxSafeFrame;
@@ -59650,7 +59063,6 @@ static int walCheckpoint(
*/
if( rc==SQLITE_OK ){
i64 nReq = ((i64)mxPage * szPage);
- i64 nSize; /* Current size of database file */
rc = sqlite3OsFileSize(pWal->pDbFd, &nSize);
if( rc==SQLITE_OK && nSizepDbFd, SQLITE_FCNTL_SIZE_HINT, &nReq);
@@ -60358,7 +59770,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
}
#endif
for(i=1; iaReadMark+i);
+ u32 thisMark = pInfo->aReadMark[i];
if( mxReadMark<=thisMark && thisMark<=mxFrame ){
assert( thisMark!=READMARK_NOT_USED );
mxReadMark = thisMark;
@@ -60371,7 +59783,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
for(i=1; iaReadMark+i,mxFrame);
+ mxReadMark = pInfo->aReadMark[i] = mxFrame;
mxI = i;
walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1);
break;
@@ -60423,9 +59835,9 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
** we can guarantee that the checkpointer that set nBackfill could not
** see any pages past pWal->hdr.mxFrame, this problem does not come up.
*/
- pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1;
+ pWal->minFrame = pInfo->nBackfill+1;
walShmBarrier(pWal);
- if( AtomicLoad(pInfo->aReadMark+mxI)!=mxReadMark
+ if( pInfo->aReadMark[mxI]!=mxReadMark
|| memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr))
){
walUnlockShared(pWal, WAL_READ_LOCK(mxI));
@@ -60476,14 +59888,16 @@ SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){
}else{
u32 i = pInfo->nBackfillAttempted;
for(i=pInfo->nBackfillAttempted; i>pInfo->nBackfill; i--){
- WalHashLoc sLoc; /* Hash table location */
+ volatile ht_slot *dummy;
+ volatile u32 *aPgno; /* Array of page numbers */
+ u32 iZero; /* Frame corresponding to aPgno[0] */
u32 pgno; /* Page number in db file */
i64 iDbOff; /* Offset of db file entry */
i64 iWalOff; /* Offset of wal file entry */
- rc = walHashGet(pWal, walFramePage(i), &sLoc);
+ rc = walHashGet(pWal, walFramePage(i), &dummy, &aPgno, &iZero);
if( rc!=SQLITE_OK ) break;
- pgno = sLoc.aPgno[i-sLoc.iZero];
+ pgno = aPgno[i-iZero];
iDbOff = (i64)(pgno-1) * szPage;
if( iDbOff+szPage<=szDb ){
@@ -60524,7 +59938,7 @@ SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){
**
** If the database contents have changes since the previous read
** transaction, then *pChanged is set to 1 before returning. The
-** Pager layer will use this to know that its cache is stale and
+** Pager layer will use this to know that is cache is stale and
** needs to be flushed.
*/
SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
@@ -60586,7 +60000,7 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
/* Check that the wal file has not been wrapped. Assuming that it has
** not, also check that no checkpointer has attempted to checkpoint any
** frames beyond pSnapshot->mxFrame. If either of these conditions are
- ** true, return SQLITE_ERROR_SNAPSHOT. Otherwise, overwrite pWal->hdr
+ ** true, return SQLITE_BUSY_SNAPSHOT. Otherwise, overwrite pWal->hdr
** with *pSnapshot and set *pChanged as appropriate for opening the
** snapshot. */
if( !memcmp(pSnapshot->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt))
@@ -60596,12 +60010,11 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
memcpy(&pWal->hdr, pSnapshot, sizeof(WalIndexHdr));
*pChanged = bChanged;
}else{
- rc = SQLITE_ERROR_SNAPSHOT;
+ rc = SQLITE_BUSY_SNAPSHOT;
}
/* Release the shared CKPT lock obtained above. */
walUnlockShared(pWal, WAL_CKPT_LOCK);
- pWal->minFrame = 1;
}
@@ -60685,20 +60098,21 @@ SQLITE_PRIVATE int sqlite3WalFindFrame(
*/
iMinHash = walFramePage(pWal->minFrame);
for(iHash=walFramePage(iLast); iHash>=iMinHash; iHash--){
- WalHashLoc sLoc; /* Hash table location */
+ volatile ht_slot *aHash; /* Pointer to hash table */
+ volatile u32 *aPgno; /* Pointer to array of page numbers */
+ u32 iZero; /* Frame number corresponding to aPgno[0] */
int iKey; /* Hash slot index */
int nCollide; /* Number of hash collisions remaining */
int rc; /* Error code */
- rc = walHashGet(pWal, iHash, &sLoc);
+ rc = walHashGet(pWal, iHash, &aHash, &aPgno, &iZero);
if( rc!=SQLITE_OK ){
return rc;
}
nCollide = HASHTABLE_NSLOT;
- for(iKey=walHash(pgno); sLoc.aHash[iKey]; iKey=walNextHash(iKey)){
- u32 iFrame = sLoc.aHash[iKey] + sLoc.iZero;
- if( iFrame<=iLast && iFrame>=pWal->minFrame
- && sLoc.aPgno[sLoc.aHash[iKey]]==pgno ){
+ for(iKey=walHash(pgno); aHash[iKey]; iKey=walNextHash(iKey)){
+ u32 iFrame = aHash[iKey] + iZero;
+ if( iFrame<=iLast && iFrame>=pWal->minFrame && aPgno[aHash[iKey]]==pgno ){
assert( iFrame>iRead || CORRUPT_DB );
iRead = iFrame;
}
@@ -61573,43 +60987,6 @@ SQLITE_API int sqlite3_snapshot_cmp(sqlite3_snapshot *p1, sqlite3_snapshot *p2){
if( pHdr1->mxFrame>pHdr2->mxFrame ) return +1;
return 0;
}
-
-/*
-** The caller currently has a read transaction open on the database.
-** This function takes a SHARED lock on the CHECKPOINTER slot and then
-** checks if the snapshot passed as the second argument is still
-** available. If so, SQLITE_OK is returned.
-**
-** If the snapshot is not available, SQLITE_ERROR is returned. Or, if
-** the CHECKPOINTER lock cannot be obtained, SQLITE_BUSY. If any error
-** occurs (any value other than SQLITE_OK is returned), the CHECKPOINTER
-** lock is released before returning.
-*/
-SQLITE_PRIVATE int sqlite3WalSnapshotCheck(Wal *pWal, sqlite3_snapshot *pSnapshot){
- int rc;
- rc = walLockShared(pWal, WAL_CKPT_LOCK);
- if( rc==SQLITE_OK ){
- WalIndexHdr *pNew = (WalIndexHdr*)pSnapshot;
- if( memcmp(pNew->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt))
- || pNew->mxFramenBackfillAttempted
- ){
- rc = SQLITE_ERROR_SNAPSHOT;
- walUnlockShared(pWal, WAL_CKPT_LOCK);
- }
- }
- return rc;
-}
-
-/*
-** Release a lock obtained by an earlier successful call to
-** sqlite3WalSnapshotCheck().
-*/
-SQLITE_PRIVATE void sqlite3WalSnapshotUnlock(Wal *pWal){
- assert( pWal );
- walUnlockShared(pWal, WAL_CKPT_LOCK);
-}
-
-
#endif /* SQLITE_ENABLE_SNAPSHOT */
#ifdef SQLITE_ENABLE_ZIPVFS
@@ -65955,7 +65332,7 @@ SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p){
** when A already has a read lock, we encourage A to give up and let B
** proceed.
*/
-SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVersion){
+SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag){
BtShared *pBt = p->pBt;
int rc = SQLITE_OK;
@@ -65971,12 +65348,6 @@ SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVers
}
assert( pBt->inTransaction==TRANS_WRITE || IfNotOmitAV(pBt->bDoTruncate)==0 );
- if( (p->db->flags & SQLITE_ResetDatabase)
- && sqlite3PagerIsreadonly(pBt->pPager)==0
- ){
- pBt->btsFlags &= ~BTS_READ_ONLY;
- }
-
/* Write transactions are not possible on a read-only database */
if( (pBt->btsFlags & BTS_READ_ONLY)!=0 && wrflag ){
rc = SQLITE_READONLY;
@@ -66036,11 +65407,6 @@ SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVers
rc = sqlite3PagerBegin(pBt->pPager,wrflag>1,sqlite3TempInMemory(p->db));
if( rc==SQLITE_OK ){
rc = newDatabase(pBt);
- }else if( rc==SQLITE_BUSY_SNAPSHOT && pBt->inTransaction==TRANS_NONE ){
- /* if there was no transaction opened when this function was
- ** called and SQLITE_BUSY_SNAPSHOT is returned, change the error
- ** code to SQLITE_BUSY. */
- rc = SQLITE_BUSY;
}
}
}
@@ -66092,18 +65458,14 @@ SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVers
}
}
+
trans_begun:
- if( rc==SQLITE_OK ){
- if( pSchemaVersion ){
- *pSchemaVersion = get4byte(&pBt->pPage1->aData[40]);
- }
- if( wrflag ){
- /* This call makes sure that the pager has the correct number of
- ** open savepoints. If the second parameter is greater than 0 and
- ** the sub-journal is not already open, then it will be opened here.
- */
- rc = sqlite3PagerOpenSavepoint(pBt->pPager, p->db->nSavepoint);
- }
+ if( rc==SQLITE_OK && wrflag ){
+ /* This call makes sure that the pager has the correct number of
+ ** open savepoints. If the second parameter is greater than 0 and
+ ** the sub-journal is not already open, then it will be opened here.
+ */
+ rc = sqlite3PagerOpenSavepoint(pBt->pPager, p->db->nSavepoint);
}
btreeIntegrity(p);
@@ -67850,23 +67212,6 @@ SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){
return rc;
}
-/*
-** This function is a no-op if cursor pCur does not point to a valid row.
-** Otherwise, if pCur is valid, configure it so that the next call to
-** sqlite3BtreeNext() is a no-op.
-*/
-#ifndef SQLITE_OMIT_WINDOWFUNC
-SQLITE_PRIVATE void sqlite3BtreeSkipNext(BtCursor *pCur){
- /* We believe that the cursor must always be in the valid state when
- ** this routine is called, but the proof is difficult, so we add an
- ** ALWaYS() test just in case we are wrong. */
- if( ALWAYS(pCur->eState==CURSOR_VALID) ){
- pCur->eState = CURSOR_SKIPNEXT;
- pCur->skipNext = 1;
- }
-}
-#endif /* SQLITE_OMIT_WINDOWFUNC */
-
/* Move the cursor to the last entry in the table. Return SQLITE_OK
** on success. Set *pRes to 0 if the cursor actually points to something
** or set *pRes to 1 if the table is empty.
@@ -68271,16 +67616,7 @@ static SQLITE_NOINLINE int btreeNext(BtCursor *pCur){
pPage = pCur->pPage;
idx = ++pCur->ix;
- if( !pPage->isInit ){
- /* The only known way for this to happen is for there to be a
- ** recursive SQL function that does a DELETE operation as part of a
- ** SELECT which deletes content out from under an active cursor
- ** in a corrupt database file where the table being DELETE-ed from
- ** has pages in common with the table being queried. See TH3
- ** module cov1/btree78.test testcase 220 (2018-06-08) for an
- ** example. */
- return SQLITE_CORRUPT_BKPT;
- }
+ assert( pPage->isInit );
/* If the database file is corrupt, it is possible for the value of idx
** to be invalid here. This can only occur if a second cursor modifies
@@ -71992,7 +71328,8 @@ static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){
** Also check that the page number is in bounds.
*/
static int checkRef(IntegrityCk *pCheck, Pgno iPage){
- if( iPage>pCheck->nPage || iPage==0 ){
+ if( iPage==0 ) return 1;
+ if( iPage>pCheck->nPage ){
checkAppendMsg(pCheck, "invalid page number %d", iPage);
return 1;
}
@@ -72047,12 +71384,17 @@ static void checkList(
){
int i;
int expected = N;
- int nErrAtStart = pCheck->nErr;
- while( iPage!=0 && pCheck->mxErr ){
+ int iFirst = iPage;
+ while( N-- > 0 && pCheck->mxErr ){
DbPage *pOvflPage;
unsigned char *pOvflData;
+ if( iPage<1 ){
+ checkAppendMsg(pCheck,
+ "%d of %d pages missing from overflow list starting at %d",
+ N+1, expected, iFirst);
+ break;
+ }
if( checkRef(pCheck, iPage) ) break;
- N--;
if( sqlite3PagerGet(pCheck->pPager, (Pgno)iPage, &pOvflPage, 0) ){
checkAppendMsg(pCheck, "failed to get page %d", iPage);
break;
@@ -72096,12 +71438,10 @@ static void checkList(
#endif
iPage = get4byte(pOvflData);
sqlite3PagerUnref(pOvflPage);
- }
- if( N && nErrAtStart==pCheck->nErr ){
- checkAppendMsg(pCheck,
- "%s is %d but should be %d",
- isFreeList ? "size" : "overflow list length",
- expected-N, expected);
+
+ if( isFreeList && N<(iPage!=0) ){
+ checkAppendMsg(pCheck, "free-page count in header is too small");
+ }
}
}
#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
@@ -72495,24 +71835,6 @@ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(
/* Check all the tables.
*/
-#ifndef SQLITE_OMIT_AUTOVACUUM
- if( pBt->autoVacuum ){
- int mx = 0;
- int mxInHdr;
- for(i=0; (int)ipPage1->aData[52]);
- if( mx!=mxInHdr ){
- checkAppendMsg(&sCheck,
- "max rootpage (%d) disagrees with header (%d)",
- mx, mxInHdr
- );
- }
- }else if( get4byte(&pBt->pPage1->aData[64])!=0 ){
- checkAppendMsg(&sCheck,
- "incremental_vacuum enabled with a max rootpage of zero"
- );
- }
-#endif
testcase( pBt->db->flags & SQLITE_CellSizeCk );
pBt->db->flags &= ~SQLITE_CellSizeCk;
for(i=0; (int)ibtsFlags &= ~BTS_NO_WAL;
if( iVersion==1 ) pBt->btsFlags |= BTS_NO_WAL;
- rc = sqlite3BtreeBeginTrans(pBtree, 0, 0);
+ rc = sqlite3BtreeBeginTrans(pBtree, 0);
if( rc==SQLITE_OK ){
u8 *aData = pBt->pPage1->aData;
if( aData[18]!=(u8)iVersion || aData[19]!=(u8)iVersion ){
- rc = sqlite3BtreeBeginTrans(pBtree, 2, 0);
+ rc = sqlite3BtreeBeginTrans(pBtree, 2);
if( rc==SQLITE_OK ){
rc = sqlite3PagerWrite(pBt->pPage1->pDbPage);
if( rc==SQLITE_OK ){
@@ -73238,7 +72560,7 @@ SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage){
** before this function exits.
*/
if( rc==SQLITE_OK && 0==sqlite3BtreeIsInReadTrans(p->pSrc) ){
- rc = sqlite3BtreeBeginTrans(p->pSrc, 0, 0);
+ rc = sqlite3BtreeBeginTrans(p->pSrc, 0);
bCloseTrans = 1;
}
@@ -73254,10 +72576,10 @@ SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage){
/* Lock the destination database, if it is not locked already. */
if( SQLITE_OK==rc && p->bDestLocked==0
- && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2,
- (int*)&p->iDestSchema))
+ && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2))
){
p->bDestLocked = 1;
+ sqlite3BtreeGetMeta(p->pDest, BTREE_SCHEMA_VERSION, &p->iDestSchema);
}
/* Do not allow backup if the destination database is in WAL mode
@@ -73701,7 +73023,8 @@ SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem *p){
if( p->flags & MEM_Null ){
/* Cannot be both MEM_Null and some other type */
- assert( (p->flags & (MEM_Int|MEM_Real|MEM_Str|MEM_Blob|MEM_Agg))==0 );
+ assert( (p->flags & (MEM_Int|MEM_Real|MEM_Str|MEM_Blob
+ |MEM_RowSet|MEM_Frame|MEM_Agg))==0 );
/* If MEM_Null is set, then either the value is a pure NULL (the usual
** case) or it is a pointer set using sqlite3_bind_pointer() or
@@ -73814,7 +73137,7 @@ SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *pMem, int desiredEnc){
#ifndef SQLITE_OMIT_UTF16
int rc;
#endif
- assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( (pMem->flags&MEM_RowSet)==0 );
assert( desiredEnc==SQLITE_UTF8 || desiredEnc==SQLITE_UTF16LE
|| desiredEnc==SQLITE_UTF16BE );
if( !(pMem->flags&MEM_Str) || pMem->enc==desiredEnc ){
@@ -73847,7 +73170,7 @@ SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *pMem, int desiredEnc){
*/
SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3VdbeMemGrow(Mem *pMem, int n, int bPreserve){
assert( sqlite3VdbeCheckMemInvariants(pMem) );
- assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( (pMem->flags&MEM_RowSet)==0 );
testcase( pMem->db==0 );
/* If the bPreserve flag is set to true, then the memory cell must already
@@ -73935,7 +73258,7 @@ static SQLITE_NOINLINE int vdbeMemAddTerminator(Mem *pMem){
*/
SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem *pMem){
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
- assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( (pMem->flags&MEM_RowSet)==0 );
if( (pMem->flags & (MEM_Str|MEM_Blob))!=0 ){
if( ExpandBlob(pMem) ) return SQLITE_NOMEM;
if( pMem->szMalloc==0 || pMem->z!=pMem->zMalloc ){
@@ -73960,7 +73283,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *pMem){
int nByte;
assert( pMem->flags & MEM_Zero );
assert( pMem->flags&MEM_Blob );
- assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( (pMem->flags&MEM_RowSet)==0 );
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
/* Set nByte to the number of bytes required to store the expanded blob. */
@@ -74015,7 +73338,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){
assert( !(fg&MEM_Zero) );
assert( !(fg&(MEM_Str|MEM_Blob)) );
assert( fg&(MEM_Int|MEM_Real) );
- assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( (pMem->flags&MEM_RowSet)==0 );
assert( EIGHT_BYTE_ALIGNMENT(pMem) );
@@ -74073,35 +73396,6 @@ SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem *pMem, FuncDef *pFunc){
return ctx.isError;
}
-/*
-** Memory cell pAccum contains the context of an aggregate function.
-** This routine calls the xValue method for that function and stores
-** the results in memory cell pMem.
-**
-** SQLITE_ERROR is returned if xValue() reports an error. SQLITE_OK
-** otherwise.
-*/
-#ifndef SQLITE_OMIT_WINDOWFUNC
-SQLITE_PRIVATE int sqlite3VdbeMemAggValue(Mem *pAccum, Mem *pOut, FuncDef *pFunc){
- sqlite3_context ctx;
- Mem t;
- assert( pFunc!=0 );
- assert( pFunc->xValue!=0 );
- assert( (pAccum->flags & MEM_Null)!=0 || pFunc==pAccum->u.pDef );
- assert( pAccum->db==0 || sqlite3_mutex_held(pAccum->db->mutex) );
- memset(&ctx, 0, sizeof(ctx));
- memset(&t, 0, sizeof(t));
- t.flags = MEM_Null;
- t.db = pAccum->db;
- sqlite3VdbeMemSetNull(pOut);
- ctx.pOut = pOut;
- ctx.pMem = pAccum;
- ctx.pFunc = pFunc;
- pFunc->xValue(&ctx);
- return ctx.isError;
-}
-#endif /* SQLITE_OMIT_WINDOWFUNC */
-
/*
** If the memory cell contains a value that must be freed by
** invoking the external callback in Mem.xDel, then this routine
@@ -74120,8 +73414,15 @@ static SQLITE_NOINLINE void vdbeMemClearExternAndSetNull(Mem *p){
testcase( p->flags & MEM_Dyn );
}
if( p->flags&MEM_Dyn ){
+ assert( (p->flags&MEM_RowSet)==0 );
assert( p->xDel!=SQLITE_DYNAMIC && p->xDel!=0 );
p->xDel((void *)p->z);
+ }else if( p->flags&MEM_RowSet ){
+ sqlite3RowSetClear(p->u.pRowSet);
+ }else if( p->flags&MEM_Frame ){
+ VdbeFrame *pFrame = p->u.pFrame;
+ pFrame->pParent = pFrame->v->pDelFrame;
+ pFrame->v->pDelFrame = pFrame;
}
p->flags = MEM_Null;
}
@@ -74269,7 +73570,7 @@ SQLITE_PRIVATE int sqlite3VdbeBooleanValue(Mem *pMem, int ifNull){
SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){
i64 ix;
assert( pMem->flags & MEM_Real );
- assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( (pMem->flags & MEM_RowSet)==0 );
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
assert( EIGHT_BYTE_ALIGNMENT(pMem) );
@@ -74296,7 +73597,7 @@ SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){
*/
SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem *pMem){
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
- assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( (pMem->flags & MEM_RowSet)==0 );
assert( EIGHT_BYTE_ALIGNMENT(pMem) );
pMem->u.i = sqlite3VdbeIntValue(pMem);
@@ -74514,36 +73815,26 @@ SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem *pMem, double val){
}
#endif
-#ifdef SQLITE_DEBUG
-/*
-** Return true if the Mem holds a RowSet object. This routine is intended
-** for use inside of assert() statements.
-*/
-SQLITE_PRIVATE int sqlite3VdbeMemIsRowSet(const Mem *pMem){
- return (pMem->flags&(MEM_Blob|MEM_Dyn))==(MEM_Blob|MEM_Dyn)
- && pMem->xDel==sqlite3RowSetDelete;
-}
-#endif
-
/*
** Delete any previous value and set the value of pMem to be an
** empty boolean index.
-**
-** Return SQLITE_OK on success and SQLITE_NOMEM if a memory allocation
-** error occurs.
*/
-SQLITE_PRIVATE int sqlite3VdbeMemSetRowSet(Mem *pMem){
+SQLITE_PRIVATE void sqlite3VdbeMemSetRowSet(Mem *pMem){
sqlite3 *db = pMem->db;
- RowSet *p;
assert( db!=0 );
- assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( (pMem->flags & MEM_RowSet)==0 );
sqlite3VdbeMemRelease(pMem);
- p = sqlite3RowSetInit(db);
- if( p==0 ) return SQLITE_NOMEM;
- pMem->z = (char*)p;
- pMem->flags = MEM_Blob|MEM_Dyn;
- pMem->xDel = sqlite3RowSetDelete;
- return SQLITE_OK;
+ pMem->zMalloc = sqlite3DbMallocRawNN(db, 64);
+ if( db->mallocFailed ){
+ pMem->flags = MEM_Null;
+ pMem->szMalloc = 0;
+ }else{
+ assert( pMem->zMalloc );
+ pMem->szMalloc = sqlite3DbMallocSize(db, pMem->zMalloc);
+ pMem->u.pRowSet = sqlite3RowSetInit(db, pMem->zMalloc, pMem->szMalloc);
+ assert( pMem->u.pRowSet!=0 );
+ pMem->flags = MEM_RowSet;
+ }
}
/*
@@ -74576,21 +73867,7 @@ SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe *pVdbe, Mem *pMem){
Mem *pX;
for(i=0, pX=pVdbe->aMem; inMem; i++, pX++){
if( pX->pScopyFrom==pMem ){
- /* If pX is marked as a shallow copy of pMem, then verify that
- ** no significant changes have been made to pX since the OP_SCopy.
- ** A significant change would indicated a missed call to this
- ** function for pX. Minor changes, such as adding or removing a
- ** dual type, are allowed, as long as the underlying value is the
- ** same. */
- u16 mFlags = pMem->flags & pX->flags & pX->mScopyFlags;
- assert( (mFlags&MEM_Int)==0 || pMem->u.i==pX->u.i );
- assert( (mFlags&MEM_Real)==0 || pMem->u.r==pX->u.r );
- assert( (mFlags&MEM_Str)==0 || (pMem->n==pX->n && pMem->z==pX->z) );
- assert( (mFlags&MEM_Blob)==0 || sqlite3BlobCompare(pMem,pX)==0 );
-
- /* pMem is the register that is changing. But also mark pX as
- ** undefined so that we can quickly detect the shallow-copy error */
- pX->flags = MEM_Undefined;
+ pX->flags |= MEM_Undefined;
pX->pScopyFrom = 0;
}
}
@@ -74611,7 +73888,7 @@ static SQLITE_NOINLINE void vdbeClrCopy(Mem *pTo, const Mem *pFrom, int eType){
sqlite3VdbeMemShallowCopy(pTo, pFrom, eType);
}
SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem *pTo, const Mem *pFrom, int srcType){
- assert( !sqlite3VdbeMemIsRowSet(pFrom) );
+ assert( (pFrom->flags & MEM_RowSet)==0 );
assert( pTo->db==pFrom->db );
if( VdbeMemDynamic(pTo) ){ vdbeClrCopy(pTo,pFrom,srcType); return; }
memcpy(pTo, pFrom, MEMCELLSIZE);
@@ -74629,7 +73906,7 @@ SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem *pTo, const Mem *pFrom, int sr
SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem *pTo, const Mem *pFrom){
int rc = SQLITE_OK;
- assert( !sqlite3VdbeMemIsRowSet(pFrom) );
+ assert( (pFrom->flags & MEM_RowSet)==0 );
if( VdbeMemDynamic(pTo) ) vdbeMemClearExternAndSetNull(pTo);
memcpy(pTo, pFrom, MEMCELLSIZE);
pTo->flags &= ~MEM_Dyn;
@@ -74687,7 +73964,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
u16 flags = 0; /* New value for pMem->flags */
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
- assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( (pMem->flags & MEM_RowSet)==0 );
/* If z is a NULL pointer, set pMem to contain an SQL NULL. */
if( !z ){
@@ -74809,7 +74086,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(
/* Note: the calls to BtreeKeyFetch() and DataFetch() below assert()
** that both the BtShared and database handle mutexes are held. */
- assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( (pMem->flags & MEM_RowSet)==0 );
zData = (char *)sqlite3BtreePayloadFetch(pCur, &available);
assert( zData!=0 );
@@ -74833,7 +74110,7 @@ static SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){
assert( pVal!=0 );
assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) );
assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) );
- assert( !sqlite3VdbeMemIsRowSet(pVal) );
+ assert( (pVal->flags & MEM_RowSet)==0 );
assert( (pVal->flags & (MEM_Null))==0 );
if( pVal->flags & (MEM_Blob|MEM_Str) ){
if( ExpandBlob(pVal) ) return 0;
@@ -74876,7 +74153,7 @@ SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){
if( !pVal ) return 0;
assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) );
assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) );
- assert( !sqlite3VdbeMemIsRowSet(pVal) );
+ assert( (pVal->flags & MEM_RowSet)==0 );
if( (pVal->flags&(MEM_Str|MEM_Term))==(MEM_Str|MEM_Term) && pVal->enc==enc ){
assert( sqlite3VdbeMemConsistentDualRep(pVal) );
return pVal->z;
@@ -75443,11 +74720,11 @@ SQLITE_PRIVATE int sqlite3Stat4Column(
int iCol, /* Column to extract */
sqlite3_value **ppVal /* OUT: Extracted value */
){
- u32 t = 0; /* a column type code */
+ u32 t; /* a column type code */
int nHdr; /* Size of the header in the record */
int iHdr; /* Next unread header byte */
int iField; /* Next unread data byte */
- int szField = 0; /* Size of the current data field */
+ int szField; /* Size of the current data field */
int i; /* Column index */
u8 *a = (u8*)pRec; /* Typecast byte array */
Mem *pMem = *ppVal; /* Write result into this Mem object */
@@ -75740,6 +75017,14 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
#endif
#ifdef SQLITE_DEBUG
if( p->db->flags & SQLITE_VdbeAddopTrace ){
+ int jj, kk;
+ Parse *pParse = p->pParse;
+ for(jj=kk=0; jjnColCache; jj++){
+ struct yColCache *x = pParse->aColCache + jj;
+ printf(" r[%d]={%d:%d}", x->iReg, x->iTable, x->iColumn);
+ kk++;
+ }
+ if( kk ) printf("\n");
sqlite3VdbePrintOp(0, i, &p->aOp[i]);
test_addop_breakpoint();
}
@@ -75863,7 +75148,7 @@ SQLITE_PRIVATE int sqlite3VdbeExplainParent(Parse *pParse){
SQLITE_PRIVATE void sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt, ...){
if( pParse->explain==2 ){
char *zMsg;
- Vdbe *v;
+ Vdbe *v = pParse->pVdbe;
va_list ap;
int iThis;
va_start(ap, zFmt);
@@ -75984,6 +75269,19 @@ SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *v, int x){
}
}
+#ifdef SQLITE_COVERAGE_TEST
+/*
+** Return TRUE if and only if the label x has already been resolved.
+** Return FALSE (zero) if label x is still unresolved.
+**
+** This routine is only used inside of testcase() macros, and so it
+** only exists when measuring test coverage.
+*/
+SQLITE_PRIVATE int sqlite3VdbeLabelHasBeenResolved(Vdbe *v, int x){
+ return v->pParse->aLabel && v->pParse->aLabel[ADDR(x)]>=0;
+}
+#endif /* SQLITE_COVERAGE_TEST */
+
/*
** Mark the VDBE as one that can only be run one time.
*/
@@ -76215,6 +75513,7 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){
break;
}
case OP_Next:
+ case OP_NextIfOpen:
case OP_SorterNext: {
pOp->p4.xAdvance = sqlite3BtreeNext;
pOp->p4type = P4_ADVANCE;
@@ -76224,7 +75523,8 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){
assert( pOp->p2>=0 );
break;
}
- case OP_Prev: {
+ case OP_Prev:
+ case OP_PrevIfOpen: {
pOp->p4.xAdvance = sqlite3BtreePrevious;
pOp->p4type = P4_ADVANCE;
/* The code generator never codes any of these opcodes as a jump
@@ -77139,7 +76439,7 @@ SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe *p){
/*
** Print a single opcode. This routine is used for debugging only.
*/
-SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, VdbeOp *pOp){
+SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, Op *pOp){
char *zP4;
char zPtr[50];
char zCom[100];
@@ -77208,8 +76508,9 @@ static void releaseMemArray(Mem *p, int N){
*/
testcase( p->flags & MEM_Agg );
testcase( p->flags & MEM_Dyn );
- testcase( p->xDel==sqlite3VdbeFrameMemDel );
- if( p->flags&(MEM_Agg|MEM_Dyn) ){
+ testcase( p->flags & MEM_Frame );
+ testcase( p->flags & MEM_RowSet );
+ if( p->flags&(MEM_Agg|MEM_Dyn|MEM_Frame|MEM_RowSet) ){
sqlite3VdbeMemRelease(p);
}else if( p->szMalloc ){
sqlite3DbFreeNN(db, p->zMalloc);
@@ -77221,35 +76522,6 @@ static void releaseMemArray(Mem *p, int N){
}
}
-#ifdef SQLITE_DEBUG
-/*
-** Verify that pFrame is a valid VdbeFrame pointer. Return true if it is
-** and false if something is wrong.
-**
-** This routine is intended for use inside of assert() statements only.
-*/
-SQLITE_PRIVATE int sqlite3VdbeFrameIsValid(VdbeFrame *pFrame){
- if( pFrame->iFrameMagic!=SQLITE_FRAME_MAGIC ) return 0;
- return 1;
-}
-#endif
-
-
-/*
-** This is a destructor on a Mem object (which is really an sqlite3_value)
-** that deletes the Frame object that is attached to it as a blob.
-**
-** This routine does not delete the Frame right away. It merely adds the
-** frame to a list of frames to be deleted when the Vdbe halts.
-*/
-SQLITE_PRIVATE void sqlite3VdbeFrameMemDel(void *pArg){
- VdbeFrame *pFrame = (VdbeFrame*)pArg;
- assert( sqlite3VdbeFrameIsValid(pFrame) );
- pFrame->pParent = pFrame->v->pDelFrame;
- pFrame->v->pDelFrame = pFrame;
-}
-
-
/*
** Delete a VdbeFrame object and its contents. VdbeFrame objects are
** allocated by the OP_Program opcode in sqlite3VdbeExec().
@@ -77258,7 +76530,6 @@ SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame *p){
int i;
Mem *aMem = VdbeFrameMem(p);
VdbeCursor **apCsr = (VdbeCursor **)&aMem[p->nChildMem];
- assert( sqlite3VdbeFrameIsValid(p) );
for(i=0; inChildCsr; i++){
sqlite3VdbeFreeCursor(p->v, apCsr[i]);
}
@@ -78555,7 +77826,7 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
*/
sqlite3VdbeHalt(p);
- /* If the VDBE has been run even partially, then transfer the error code
+ /* If the VDBE has be run even partially, then transfer the error code
** and error message from the VDBE into the main database structure. But
** if the VDBE has just been set to run but has not actually executed any
** instructions yet, leave the main database error information unchanged.
@@ -79467,7 +78738,7 @@ static int isAllZero(const char *z, int n){
** is less than, equal to, or greater than the second, respectively.
** If one blob is a prefix of the other, then the shorter is the lessor.
*/
-SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem *pB2){
+static SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem *pB2){
int c;
int n1 = pB1->n;
int n2 = pB2->n;
@@ -79537,7 +78808,7 @@ SQLITE_PRIVATE int sqlite3MemCompare(const Mem *pMem1, const Mem *pMem2, const C
f1 = pMem1->flags;
f2 = pMem2->flags;
combined_flags = f1|f2;
- assert( !sqlite3VdbeMemIsRowSet(pMem1) && !sqlite3VdbeMemIsRowSet(pMem2) );
+ assert( (combined_flags & MEM_RowSet)==0 );
/* If one value is NULL, it is less than the other. If both values
** are NULL, return 0.
@@ -79682,7 +78953,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
u32 idx1; /* Offset of first type in header */
int rc = 0; /* Return value */
Mem *pRhs = pPKey2->aMem; /* Next field of pPKey2 to compare */
- KeyInfo *pKeyInfo;
+ KeyInfo *pKeyInfo = pPKey2->pKeyInfo;
const unsigned char *aKey1 = (const unsigned char *)pKey1;
Mem mem1;
@@ -79777,7 +79048,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
if( (d1+mem1.n) > (unsigned)nKey1 ){
pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
return 0; /* Corruption */
- }else if( (pKeyInfo = pPKey2->pKeyInfo)->aColl[i] ){
+ }else if( pKeyInfo->aColl[i] ){
mem1.enc = pKeyInfo->enc;
mem1.db = pKeyInfo->db;
mem1.flags = MEM_Str;
@@ -79828,7 +79099,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
}
if( rc!=0 ){
- if( pPKey2->pKeyInfo->aSortOrder[i] ){
+ if( pKeyInfo->aSortOrder[i] ){
rc = -rc;
}
assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, rc) );
@@ -79837,11 +79108,10 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
}
i++;
- if( i==pPKey2->nField ) break;
pRhs++;
d1 += sqlite3VdbeSerialTypeLen(serial_type);
idx1 += sqlite3VarintLen(serial_type);
- }while( idx1<(unsigned)szHdr1 && d1<=(unsigned)nKey1 );
+ }while( idx1<(unsigned)szHdr1 && inField && d1<=(unsigned)nKey1 );
/* No memory allocation is ever used on mem1. Prove this using
** the following assert(). If the assert() fails, it indicates a
@@ -79853,7 +79123,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
** value. */
assert( CORRUPT_DB
|| vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, pPKey2->default_rc)
- || pPKey2->pKeyInfo->db->mallocFailed
+ || pKeyInfo->db->mallocFailed
);
pPKey2->eqSeen = 1;
return pPKey2->default_rc;
@@ -80179,7 +79449,7 @@ SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(
if( rc ){
return rc;
}
- *res = sqlite3VdbeRecordCompareWithSkip(m.n, m.z, pUnpacked, 0);
+ *res = sqlite3VdbeRecordCompare(m.n, m.z, pUnpacked);
sqlite3VdbeMemRelease(&m);
return SQLITE_OK;
}
@@ -80211,19 +79481,11 @@ SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe *v){
** programs obsolete. Removing user-defined functions or collating
** sequences, or changing an authorization function are the types of
** things that make prepared statements obsolete.
-**
-** If iCode is 1, then expiration is advisory. The statement should
-** be reprepared before being restarted, but if it is already running
-** it is allowed to run to completion.
-**
-** Internally, this function just sets the Vdbe.expired flag on all
-** prepared statements. The flag is set to 1 for an immediate expiration
-** and set to 2 for an advisory expiration.
*/
-SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3 *db, int iCode){
+SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3 *db){
Vdbe *p;
for(p = db->pVdbe; p; p=p->pNext){
- p->expired = iCode+1;
+ p->expired = 1;
}
}
@@ -81383,7 +80645,7 @@ static const Mem *columnNullValue(void){
/* .xDel = */ (void(*)(void*))0,
#ifdef SQLITE_DEBUG
/* .pScopyFrom = */ (Mem*)0,
- /* .mScopyFlags= */ 0,
+ /* .pFiller = */ (void*)0,
#endif
};
return &nullMem;
@@ -82693,56 +81955,32 @@ SQLITE_API int sqlite3_found_count = 0;
** feature is used for test suite validation only and does not appear an
** production builds.
**
-** M is an integer between 2 and 4. 2 indicates a ordinary two-way
-** branch (I=0 means fall through and I=1 means taken). 3 indicates
-** a 3-way branch where the third way is when one of the operands is
-** NULL. 4 indicates the OP_Jump instruction which has three destinations
-** depending on whether the first operand is less than, equal to, or greater
-** than the second.
+** M is an integer, 2 or 3, that indices how many different ways the
+** branch can go. It is usually 2. "I" is the direction the branch
+** goes. 0 means falls through. 1 means branch is taken. 2 means the
+** second alternative branch is taken.
**
** iSrcLine is the source code line (from the __LINE__ macro) that
-** generated the VDBE instruction combined with flag bits. The source
-** code line number is in the lower 24 bits of iSrcLine and the upper
-** 8 bytes are flags. The lower three bits of the flags indicate
-** values for I that should never occur. For example, if the branch is
-** always taken, the flags should be 0x05 since the fall-through and
-** alternate branch are never taken. If a branch is never taken then
-** flags should be 0x06 since only the fall-through approach is allowed.
-**
-** Bit 0x04 of the flags indicates an OP_Jump opcode that is only
-** interested in equal or not-equal. In other words, I==0 and I==2
-** should be treated the same.
-**
-** Since only a line number is retained, not the filename, this macro
-** only works for amalgamation builds. But that is ok, since these macros
-** should be no-ops except for special builds used to measure test coverage.
+** generated the VDBE instruction. This instrumentation assumes that all
+** source code is in a single file (the amalgamation). Special values 1
+** and 2 for the iSrcLine parameter mean that this particular branch is
+** always taken or never taken, respectively.
*/
#if !defined(SQLITE_VDBE_COVERAGE)
# define VdbeBranchTaken(I,M)
#else
# define VdbeBranchTaken(I,M) vdbeTakeBranch(pOp->iSrcLine,I,M)
- static void vdbeTakeBranch(u32 iSrcLine, u8 I, u8 M){
- u8 mNever;
- assert( I<=2 ); /* 0: fall through, 1: taken, 2: alternate taken */
- assert( M<=4 ); /* 2: two-way branch, 3: three-way branch, 4: OP_Jump */
- assert( I> 24;
- assert( (I & mNever)==0 );
- if( sqlite3GlobalConfig.xVdbeBranch==0 ) return; /*NO_TEST*/
- I |= mNever;
- if( M==2 ) I |= 0x04;
- if( M==4 ){
- I |= 0x08;
- if( (mNever&0x08)!=0 && (I&0x05)!=0) I |= 0x05; /*NO_TEST*/
+ static void vdbeTakeBranch(int iSrcLine, u8 I, u8 M){
+ if( iSrcLine<=2 && ALWAYS(iSrcLine>0) ){
+ M = iSrcLine;
+ /* Assert the truth of VdbeCoverageAlwaysTaken() and
+ ** VdbeCoverageNeverTaken() */
+ assert( (M & I)==I );
+ }else{
+ if( sqlite3GlobalConfig.xVdbeBranch==0 ) return; /*NO_TEST*/
+ sqlite3GlobalConfig.xVdbeBranch(sqlite3GlobalConfig.pVdbeBranchArg,
+ iSrcLine,I,M);
}
- sqlite3GlobalConfig.xVdbeBranch(sqlite3GlobalConfig.pVdbeBranchArg,
- iSrcLine&0xffffff, I, M);
}
#endif
@@ -83073,7 +82311,7 @@ static void memTracePrint(Mem *p){
}else if( p->flags & MEM_Real ){
printf(" r:%g", p->u.r);
#endif
- }else if( sqlite3VdbeMemIsRowSet(p) ){
+ }else if( p->flags & MEM_RowSet ){
printf(" (rowset)");
}else{
char zBuf[200];
@@ -83827,9 +83065,6 @@ case OP_Null: { /* out2 */
assert( pOp->p3<=(p->nMem+1 - p->nCursor) );
pOut->flags = nullFlag = pOp->p1 ? (MEM_Null|MEM_Cleared) : MEM_Null;
pOut->n = 0;
-#ifdef SQLITE_DEBUG
- pOut->uTemp = 0;
-#endif
while( cnt>0 ){
pOut++;
memAboutToChange(p, pOut);
@@ -83951,7 +83186,6 @@ case OP_Copy: {
pOut = &aMem[pOp->p2];
assert( pOut!=pIn1 );
while( 1 ){
- memAboutToChange(p, pOut);
sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem);
Deephemeralize(pOut);
#ifdef SQLITE_DEBUG
@@ -83984,8 +83218,7 @@ case OP_SCopy: { /* out2 */
assert( pOut!=pIn1 );
sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem);
#ifdef SQLITE_DEBUG
- pOut->pScopyFrom = pIn1;
- pOut->mScopyFlags = pIn1->flags;
+ if( pOut->pScopyFrom==0 ) pOut->pScopyFrom = pIn1;
#endif
break;
}
@@ -84619,12 +83852,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
if( (flags1 | flags3)&MEM_Str ){
if( (flags1 & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){
applyNumericAffinity(pIn1,0);
- assert( flags3==pIn3->flags );
- /* testcase( flags3!=pIn3->flags );
- ** this used to be possible with pIn1==pIn3, but not since
- ** the column cache was removed. The following assignment
- ** is essentially a no-op. But, it provides defense-in-depth
- ** in case our analysis is incorrect, so it is left in. */
+ testcase( flags3!=pIn3->flags ); /* Possible if pIn1==pIn3 */
flags3 = pIn3->flags;
}
if( (flags3 & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){
@@ -84838,11 +84066,11 @@ case OP_Compare: {
*/
case OP_Jump: { /* jump */
if( iCompare<0 ){
- VdbeBranchTaken(0,4); pOp = &aOp[pOp->p1 - 1];
+ VdbeBranchTaken(0,3); pOp = &aOp[pOp->p1 - 1];
}else if( iCompare==0 ){
- VdbeBranchTaken(1,4); pOp = &aOp[pOp->p2 - 1];
+ VdbeBranchTaken(1,3); pOp = &aOp[pOp->p2 - 1];
}else{
- VdbeBranchTaken(2,4); pOp = &aOp[pOp->p3 - 1];
+ VdbeBranchTaken(2,3); pOp = &aOp[pOp->p3 - 1];
}
break;
}
@@ -84939,7 +84167,7 @@ case OP_Not: { /* same as TK_NOT, in1, out2 */
}
/* Opcode: BitNot P1 P2 * * *
-** Synopsis: r[P2]= ~r[P1]
+** Synopsis: r[P1]= ~r[P1]
**
** Interpret the content of register P1 as an integer. Store the
** ones-complement of the P1 value into register P2. If P1 holds
@@ -85754,7 +84982,7 @@ case OP_Savepoint: {
}
}
if( isSchemaChange ){
- sqlite3ExpirePreparedStatements(db, 0);
+ sqlite3ExpirePreparedStatements(db);
sqlite3ResetAllSchemasOfConnection(db);
db->mDbFlags |= DBFLAG_SchemaChange;
}
@@ -85896,7 +85124,8 @@ case OP_AutoCommit: {
*/
case OP_Transaction: {
Btree *pBt;
- int iMeta = 0;
+ int iMeta;
+ int iGen;
assert( p->bIsReader );
assert( p->readOnly==0 || pOp->p2==0 );
@@ -85909,7 +85138,7 @@ case OP_Transaction: {
pBt = db->aDb[pOp->p1].pBt;
if( pBt ){
- rc = sqlite3BtreeBeginTrans(pBt, pOp->p2, &iMeta);
+ rc = sqlite3BtreeBeginTrans(pBt, pOp->p2);
testcase( rc==SQLITE_BUSY_SNAPSHOT );
testcase( rc==SQLITE_BUSY_RECOVERY );
if( rc!=SQLITE_OK ){
@@ -85942,17 +85171,19 @@ case OP_Transaction: {
p->nStmtDefCons = db->nDeferredCons;
p->nStmtDefImmCons = db->nDeferredImmCons;
}
- }
- assert( pOp->p5==0 || pOp->p4type==P4_INT32 );
- if( pOp->p5
- && (iMeta!=pOp->p3
- || db->aDb[pOp->p1].pSchema->iGeneration!=pOp->p4.i)
- ){
- /*
+
+ /* Gather the schema version number for checking:
** IMPLEMENTATION-OF: R-03189-51135 As each SQL statement runs, the schema
** version is checked to ensure that the schema has not changed since the
** SQL statement was prepared.
*/
+ sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&iMeta);
+ iGen = db->aDb[pOp->p1].pSchema->iGeneration;
+ }else{
+ iGen = iMeta = 0;
+ }
+ assert( pOp->p5==0 || pOp->p4type==P4_INT32 );
+ if( pOp->p5 && (iMeta!=pOp->p3 || iGen!=pOp->p4.i) ){
sqlite3DbFree(db, p->zErrMsg);
p->zErrMsg = sqlite3DbStrDup(db, "database schema has changed");
/* If the schema-cookie from the database file matches the cookie
@@ -86043,7 +85274,7 @@ case OP_SetCookie: {
if( pOp->p1==1 ){
/* Invalidate all prepared statements whenever the TEMP database
** schema is changed. Ticket #1644 */
- sqlite3ExpirePreparedStatements(db, 0);
+ sqlite3ExpirePreparedStatements(db);
p->expired = 0;
}
if( rc ) goto abort_due_to_error;
@@ -86061,78 +85292,59 @@ case OP_SetCookie: {
** values need not be contiguous but all P1 values should be small integers.
** It is an error for P1 to be negative.
**
-** Allowed P5 bits:
-**
-**
0x02 OPFLAG_SEEKEQ: This cursor will only be used for
-** equality lookups (implemented as a pair of opcodes OP_SeekGE/OP_IdxGT
-** of OP_SeekLE/OP_IdxGT)
-**
+** If P5!=0 then use the content of register P2 as the root page, not
+** the value of P2 itself.
+**
+** There will be a read lock on the database whenever there is an
+** open cursor. If the database was unlocked prior to this instruction
+** then a read lock is acquired as part of this instruction. A read
+** lock allows other processes to read the database but prohibits
+** any other process from modifying the database. The read lock is
+** released when all cursors are closed. If this instruction attempts
+** to get a read lock but fails, the script terminates with an
+** SQLITE_BUSY error code.
**
** The P4 value may be either an integer (P4_INT32) or a pointer to
** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo
-** object, then table being opened must be an [index b-tree] where the
-** KeyInfo object defines the content and collating
-** sequence of that index b-tree. Otherwise, if P4 is an integer
-** value, then the table being opened must be a [table b-tree] with a
-** number of columns no less than the value of P4.
+** structure, then said structure defines the content and collating
+** sequence of the index being opened. Otherwise, if P4 is an integer
+** value, it is set to the number of columns in the table.
**
** See also: OpenWrite, ReopenIdx
*/
/* Opcode: ReopenIdx P1 P2 P3 P4 P5
** Synopsis: root=P2 iDb=P3
**
-** The ReopenIdx opcode works like OP_OpenRead except that it first
-** checks to see if the cursor on P1 is already open on the same
-** b-tree and if it is this opcode becomes a no-op. In other words,
+** The ReopenIdx opcode works exactly like ReadOpen except that it first
+** checks to see if the cursor on P1 is already open with a root page
+** number of P2 and if it is this opcode becomes a no-op. In other words,
** if the cursor is already open, do not reopen it.
**
-** The ReopenIdx opcode may only be used with P5==0 or P5==OPFLAG_SEEKEQ
-** and with P4 being a P4_KEYINFO object. Furthermore, the P3 value must
-** be the same as every other ReopenIdx or OpenRead for the same cursor
-** number.
+** The ReopenIdx opcode may only be used with P5==0 and with P4 being
+** a P4_KEYINFO object. Furthermore, the P3 value must be the same as
+** every other ReopenIdx or OpenRead for the same cursor number.
**
-** Allowed P5 bits:
-**
-**
0x02 OPFLAG_SEEKEQ: This cursor will only be used for
-** equality lookups (implemented as a pair of opcodes OP_SeekGE/OP_IdxGT
-** of OP_SeekLE/OP_IdxGT)
-**
-**
-** See also: OP_OpenRead, OP_OpenWrite
+** See the OpenRead opcode documentation for additional information.
*/
/* Opcode: OpenWrite P1 P2 P3 P4 P5
** Synopsis: root=P2 iDb=P3
**
** Open a read/write cursor named P1 on the table or index whose root
-** page is P2 (or whose root page is held in register P2 if the
-** OPFLAG_P2ISREG bit is set in P5 - see below).
+** page is P2. Or if P5!=0 use the content of register P2 to find the
+** root page.
**
** The P4 value may be either an integer (P4_INT32) or a pointer to
** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo
-** object, then table being opened must be an [index b-tree] where the
-** KeyInfo object defines the content and collating
-** sequence of that index b-tree. Otherwise, if P4 is an integer
-** value, then the table being opened must be a [table b-tree] with a
-** number of columns no less than the value of P4.
+** structure, then said structure defines the content and collating
+** sequence of the index being opened. Otherwise, if P4 is an integer
+** value, it is set to the number of columns in the table, or to the
+** largest index of any column of the table that is actually used.
**
-** Allowed P5 bits:
-**
-**
0x02 OPFLAG_SEEKEQ: This cursor will only be used for
-** equality lookups (implemented as a pair of opcodes OP_SeekGE/OP_IdxGT
-** of OP_SeekLE/OP_IdxGT)
-**
0x08 OPFLAG_FORDELETE: This cursor is used only to seek
-** and subsequently delete entries in an index btree. This is a
-** hint to the storage engine that the storage engine is allowed to
-** ignore. The hint is not used by the official SQLite b*tree storage
-** engine, but is used by COMDB2.
-**
0x10 OPFLAG_P2ISREG: Use the content of register P2
-** as the root page, not the value of P2 itself.
-**
+** This instruction works just like OpenRead except that it opens the cursor
+** in read/write mode. For a given table, there can be one or more read-only
+** cursors or a single read/write cursor but not both.
**
-** This instruction works like OpenRead except that it opens the cursor
-** in read/write mode.
-**
-** See also: OP_OpenRead, OP_ReopenIdx
+** See also OpenRead.
*/
case OP_ReopenIdx: {
int nField;
@@ -86161,7 +85373,7 @@ case OP_OpenWrite:
assert( pOp->opcode==OP_OpenRead || pOp->opcode==OP_ReopenIdx
|| p->readOnly==0 );
- if( p->expired==1 ){
+ if( p->expired ){
rc = SQLITE_ABORT_ROLLBACK;
goto abort_due_to_error;
}
@@ -86188,7 +85400,6 @@ case OP_OpenWrite:
if( pOp->p5 & OPFLAG_P2ISREG ){
assert( p2>0 );
assert( p2<=(p->nMem+1 - p->nCursor) );
- assert( pOp->opcode==OP_OpenWrite );
pIn2 = &aMem[p2];
assert( memIsValid(pIn2) );
assert( (pIn2->flags & MEM_Int)!=0 );
@@ -86317,7 +85528,7 @@ case OP_OpenEphemeral: {
rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pCx->pBtx,
BTREE_OMIT_JOURNAL | BTREE_SINGLE | pOp->p5, vfsFlags);
if( rc==SQLITE_OK ){
- rc = sqlite3BtreeBeginTrans(pCx->pBtx, 1, 0);
+ rc = sqlite3BtreeBeginTrans(pCx->pBtx, 1);
}
if( rc==SQLITE_OK ){
/* If a transient index is required, create it by calling
@@ -86544,10 +85755,10 @@ case OP_ColumnsUsed: {
**
** See also: Found, NotFound, SeekGt, SeekGe, SeekLt
*/
-case OP_SeekLT: /* jump, in3, group */
-case OP_SeekLE: /* jump, in3, group */
-case OP_SeekGE: /* jump, in3, group */
-case OP_SeekGT: { /* jump, in3, group */
+case OP_SeekLT: /* jump, in3 */
+case OP_SeekLE: /* jump, in3 */
+case OP_SeekGE: /* jump, in3 */
+case OP_SeekGT: { /* jump, in3 */
int res; /* Comparison result */
int oc; /* Opcode */
VdbeCursor *pC; /* The cursor to seek */
@@ -86725,25 +85936,6 @@ seek_not_found:
break;
}
-/* Opcode: SeekHit P1 P2 * * *
-** Synopsis: seekHit=P2
-**
-** Set the seekHit flag on cursor P1 to the value in P2.
-** The seekHit flag is used by the IfNoHope opcode.
-**
-** P1 must be a valid b-tree cursor. P2 must be a boolean value,
-** either 0 or 1.
-*/
-case OP_SeekHit: {
- VdbeCursor *pC;
- assert( pOp->p1>=0 && pOp->p1nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- assert( pOp->p2==0 || pOp->p2==1 );
- pC->seekHit = pOp->p2 & 1;
- break;
-}
-
/* Opcode: Found P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
**
@@ -86778,34 +85970,7 @@ case OP_SeekHit: {
** advanced in either direction. In other words, the Next and Prev
** opcodes do not work after this operation.
**
-** See also: Found, NotExists, NoConflict, IfNoHope
-*/
-/* Opcode: IfNoHope P1 P2 P3 P4 *
-** Synopsis: key=r[P3@P4]
-**
-** Register P3 is the first of P4 registers that form an unpacked
-** record.
-**
-** Cursor P1 is on an index btree. If the seekHit flag is set on P1, then
-** this opcode is a no-op. But if the seekHit flag of P1 is clear, then
-** check to see if there is any entry in P1 that matches the
-** prefix identified by P3 and P4. If no entry matches the prefix,
-** jump to P2. Otherwise fall through.
-**
-** This opcode behaves like OP_NotFound if the seekHit
-** flag is clear and it behaves like OP_Noop if the seekHit flag is set.
-**
-** This opcode is used in IN clause processing for a multi-column key.
-** If an IN clause is attached to an element of the key other than the
-** left-most element, and if there are no matches on the most recent
-** seek over the whole key, then it might be that one of the key element
-** to the left is prohibiting a match, and hence there is "no hope" of
-** any match regardless of how many IN clause elements are checked.
-** In such a case, we abandon the IN clause search early, using this
-** opcode. The opcode name comes from the fact that the
-** jump is taken if there is "no hope" of achieving a match.
-**
-** See also: NotFound, SeekHit
+** See also: Found, NotExists, NoConflict
*/
/* Opcode: NoConflict P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
@@ -86830,14 +85995,6 @@ case OP_SeekHit: {
**
** See also: NotFound, Found, NotExists
*/
-case OP_IfNoHope: { /* jump, in3 */
- VdbeCursor *pC;
- assert( pOp->p1>=0 && pOp->p1nCursor );
- pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
- if( pC->seekHit ) break;
- /* Fall through into OP_NotFound */
-}
case OP_NoConflict: /* jump, in3 */
case OP_NotFound: /* jump, in3 */
case OP_Found: { /* jump, in3 */
@@ -86975,26 +86132,18 @@ case OP_SeekRowid: { /* jump, in3 */
pIn3 = &aMem[pOp->p3];
if( (pIn3->flags & MEM_Int)==0 ){
- /* Make sure pIn3->u.i contains a valid integer representation of
- ** the key value, but do not change the datatype of the register, as
- ** other parts of the perpared statement might be depending on the
- ** current datatype. */
- u16 origFlags = pIn3->flags;
- int isNotInt;
applyAffinity(pIn3, SQLITE_AFF_NUMERIC, encoding);
- isNotInt = (pIn3->flags & MEM_Int)==0;
- pIn3->flags = origFlags;
- if( isNotInt ) goto jump_to_p2;
+ if( (pIn3->flags & MEM_Int)==0 ) goto jump_to_p2;
}
/* Fall through into OP_NotExists */
case OP_NotExists: /* jump, in3 */
pIn3 = &aMem[pOp->p3];
- assert( (pIn3->flags & MEM_Int)!=0 || pOp->opcode==OP_SeekRowid );
+ assert( pIn3->flags & MEM_Int );
assert( pOp->p1>=0 && pOp->p1nCursor );
pC = p->apCsr[pOp->p1];
assert( pC!=0 );
#ifdef SQLITE_DEBUG
- pC->seekOp = OP_SeekRowid;
+ pC->seekOp = 0;
#endif
assert( pC->isTable );
assert( pC->eCurType==CURTYPE_BTREE );
@@ -87648,9 +86797,6 @@ case OP_NullRow: {
assert( pC->uc.pCursor!=0 );
sqlite3BtreeClearCursor(pC->uc.pCursor);
}
-#ifdef SQLITE_DEBUG
- if( pC->seekOp==0 ) pC->seekOp = OP_NullRow;
-#endif
break;
}
@@ -87769,7 +86915,7 @@ case OP_Sort: { /* jump */
p->aCounter[SQLITE_STMTSTATUS_SORT]++;
/* Fall through into OP_Rewind */
}
-/* Opcode: Rewind P1 P2 * * P5
+/* Opcode: Rewind P1 P2 * * *
**
** The next use of the Rowid or Column or Next instruction for P1
** will refer to the first entry in the database table or index.
@@ -87777,10 +86923,6 @@ case OP_Sort: { /* jump */
** If the table or index is not empty, fall through to the following
** instruction.
**
-** If P5 is non-zero and the table is not empty, then the "skip-next"
-** flag is set on the cursor so that the next OP_Next instruction
-** executed on it is a no-op.
-**
** This opcode leaves the cursor configured to move in forward order,
** from the beginning toward the end. In other words, the cursor is
** configured to use Next, not Prev.
@@ -87805,9 +86947,6 @@ case OP_Rewind: { /* jump */
pCrsr = pC->uc.pCursor;
assert( pCrsr );
rc = sqlite3BtreeFirst(pCrsr, &res);
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( pOp->p5 ) sqlite3BtreeSkipNext(pCrsr);
-#endif
pC->deferredMoveto = 0;
pC->cacheStatus = CACHE_STALE;
}
@@ -87844,7 +86983,12 @@ case OP_Rewind: { /* jump */
** If P5 is positive and the jump is taken, then event counter
** number P5-1 in the prepared statement is incremented.
**
-** See also: Prev
+** See also: Prev, NextIfOpen
+*/
+/* Opcode: NextIfOpen P1 P2 P3 P4 P5
+**
+** This opcode works just like Next except that if cursor P1 is not
+** open it behaves a no-op.
*/
/* Opcode: Prev P1 P2 P3 P4 P5
**
@@ -87872,6 +87016,11 @@ case OP_Rewind: { /* jump */
** If P5 is positive and the jump is taken, then event counter
** number P5-1 in the prepared statement is incremented.
*/
+/* Opcode: PrevIfOpen P1 P2 P3 P4 P5
+**
+** This opcode works just like Prev except that if cursor P1 is not
+** open it behaves a no-op.
+*/
/* Opcode: SorterNext P1 P2 * * P5
**
** This opcode works just like OP_Next except that P1 must be a
@@ -87886,6 +87035,10 @@ case OP_SorterNext: { /* jump */
assert( isSorter(pC) );
rc = sqlite3VdbeSorterNext(db, pC);
goto next_tail;
+case OP_PrevIfOpen: /* jump */
+case OP_NextIfOpen: /* jump */
+ if( p->apCsr[pOp->p1]==0 ) break;
+ /* Fall through */
case OP_Prev: /* jump */
case OP_Next: /* jump */
assert( pOp->p1>=0 && pOp->p1nCursor );
@@ -87896,17 +87049,17 @@ case OP_Next: /* jump */
assert( pC->eCurType==CURTYPE_BTREE );
assert( pOp->opcode!=OP_Next || pOp->p4.xAdvance==sqlite3BtreeNext );
assert( pOp->opcode!=OP_Prev || pOp->p4.xAdvance==sqlite3BtreePrevious );
+ assert( pOp->opcode!=OP_NextIfOpen || pOp->p4.xAdvance==sqlite3BtreeNext );
+ assert( pOp->opcode!=OP_PrevIfOpen || pOp->p4.xAdvance==sqlite3BtreePrevious);
- /* The Next opcode is only used after SeekGT, SeekGE, Rewind, and Found.
+ /* The Next opcode is only used after SeekGT, SeekGE, and Rewind.
** The Prev opcode is only used after SeekLT, SeekLE, and Last. */
- assert( pOp->opcode!=OP_Next
+ assert( pOp->opcode!=OP_Next || pOp->opcode!=OP_NextIfOpen
|| pC->seekOp==OP_SeekGT || pC->seekOp==OP_SeekGE
- || pC->seekOp==OP_Rewind || pC->seekOp==OP_Found
- || pC->seekOp==OP_NullRow);
- assert( pOp->opcode!=OP_Prev
+ || pC->seekOp==OP_Rewind || pC->seekOp==OP_Found);
+ assert( pOp->opcode!=OP_Prev || pOp->opcode!=OP_PrevIfOpen
|| pC->seekOp==OP_SeekLT || pC->seekOp==OP_SeekLE
- || pC->seekOp==OP_Last
- || pC->seekOp==OP_NullRow);
+ || pC->seekOp==OP_Last );
rc = pOp->p4.xAdvance(pC->uc.pCursor, pOp->p3);
next_tail:
@@ -88189,13 +87342,7 @@ case OP_IdxGE: { /* jump */
}
r.aMem = &aMem[pOp->p3];
#ifdef SQLITE_DEBUG
- {
- int i;
- for(i=0; ip3+i, &aMem[pOp->p3+i]);
- }
- }
+ { int i; for(i=0; ip1;
assert( iDb>=0 && iDbnDb );
assert( DbHasProperty(db, iDb, DB_SchemaLoaded) );
-
-#ifndef SQLITE_OMIT_ALTERTABLE
- if( pOp->p4.z==0 ){
- sqlite3SchemaClear(db->aDb[iDb].pSchema);
- db->mDbFlags &= ~DBFLAG_SchemaKnownOk;
- rc = sqlite3InitOne(db, iDb, &p->zErrMsg, INITFLAG_AlterTable);
- db->mDbFlags |= DBFLAG_SchemaChange;
- p->expired = 0;
- }else
-#endif
- {
+ /* Used to be a conditional */ {
zMaster = MASTER_NAME;
initData.db = db;
initData.iDb = pOp->p1;
initData.pzErrMsg = &p->zErrMsg;
- initData.mInitFlags = 0;
zSql = sqlite3MPrintf(db,
"SELECT name, rootpage, sql FROM '%q'.%s WHERE %s ORDER BY rowid",
db->aDb[iDb].zDbSName, zMaster, pOp->p4.z);
@@ -88575,11 +87710,11 @@ case OP_RowSetAdd: { /* in1, in2 */
pIn1 = &aMem[pOp->p1];
pIn2 = &aMem[pOp->p2];
assert( (pIn2->flags & MEM_Int)!=0 );
- if( (pIn1->flags & MEM_Blob)==0 ){
- if( sqlite3VdbeMemSetRowSet(pIn1) ) goto no_mem;
+ if( (pIn1->flags & MEM_RowSet)==0 ){
+ sqlite3VdbeMemSetRowSet(pIn1);
+ if( (pIn1->flags & MEM_RowSet)==0 ) goto no_mem;
}
- assert( sqlite3VdbeMemIsRowSet(pIn1) );
- sqlite3RowSetInsert((RowSet*)pIn1->z, pIn2->u.i);
+ sqlite3RowSetInsert(pIn1->u.pRowSet, pIn2->u.i);
break;
}
@@ -88595,9 +87730,8 @@ case OP_RowSetRead: { /* jump, in1, out3 */
i64 val;
pIn1 = &aMem[pOp->p1];
- assert( (pIn1->flags & MEM_Blob)==0 || sqlite3VdbeMemIsRowSet(pIn1) );
- if( (pIn1->flags & MEM_Blob)==0
- || sqlite3RowSetNext((RowSet*)pIn1->z, &val)==0
+ if( (pIn1->flags & MEM_RowSet)==0
+ || sqlite3RowSetNext(pIn1->u.pRowSet, &val)==0
){
/* The boolean index is empty */
sqlite3VdbeMemSetNull(pIn1);
@@ -88646,19 +87780,20 @@ case OP_RowSetTest: { /* jump, in1, in3 */
/* If there is anything other than a rowset object in memory cell P1,
** delete it now and initialize P1 with an empty rowset
*/
- if( (pIn1->flags & MEM_Blob)==0 ){
- if( sqlite3VdbeMemSetRowSet(pIn1) ) goto no_mem;
+ if( (pIn1->flags & MEM_RowSet)==0 ){
+ sqlite3VdbeMemSetRowSet(pIn1);
+ if( (pIn1->flags & MEM_RowSet)==0 ) goto no_mem;
}
- assert( sqlite3VdbeMemIsRowSet(pIn1) );
+
assert( pOp->p4type==P4_INT32 );
assert( iSet==-1 || iSet>=0 );
if( iSet ){
- exists = sqlite3RowSetTest((RowSet*)pIn1->z, iSet, pIn3->u.i);
+ exists = sqlite3RowSetTest(pIn1->u.pRowSet, iSet, pIn3->u.i);
VdbeBranchTaken(exists!=0,2);
if( exists ) goto jump_to_p2;
}
if( iSet>=0 ){
- sqlite3RowSetInsert((RowSet*)pIn1->z, pIn3->u.i);
+ sqlite3RowSetInsert(pIn1->u.pRowSet, pIn3->u.i);
}
break;
}
@@ -88722,7 +87857,7 @@ case OP_Program: { /* jump */
** of the current program, and the memory required at runtime to execute
** the trigger program. If this trigger has been fired before, then pRt
** is already allocated. Otherwise, it must be initialized. */
- if( (pRt->flags&MEM_Blob)==0 ){
+ if( (pRt->flags&MEM_Frame)==0 ){
/* SubProgram.nMem is set to the number of memory cells used by the
** program stored in SubProgram.aOp. As well as these, one memory
** cell is required for each cursor used by the program. Set local
@@ -88740,10 +87875,8 @@ case OP_Program: { /* jump */
goto no_mem;
}
sqlite3VdbeMemRelease(pRt);
- pRt->flags = MEM_Blob|MEM_Dyn;
- pRt->z = (char*)pFrame;
- pRt->n = nByte;
- pRt->xDel = sqlite3VdbeFrameMemDel;
+ pRt->flags = MEM_Frame;
+ pRt->u.pFrame = pFrame;
pFrame->v = p;
pFrame->nChildMem = nMem;
@@ -88759,9 +87892,6 @@ case OP_Program: { /* jump */
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
pFrame->anExec = p->anExec;
#endif
-#ifdef SQLITE_DEBUG
- pFrame->iFrameMagic = SQLITE_FRAME_MAGIC;
-#endif
pEnd = &VdbeFrameMem(pFrame)[pFrame->nChildMem];
for(pMem=VdbeFrameMem(pFrame); pMem!=pEnd; pMem++){
@@ -88769,8 +87899,7 @@ case OP_Program: { /* jump */
pMem->db = db;
}
}else{
- pFrame = (VdbeFrame*)pRt->z;
- assert( pRt->xDel==sqlite3VdbeFrameMemDel );
+ pFrame = pRt->u.pFrame;
assert( pProgram->nMem+pProgram->nCsr==pFrame->nChildMem
|| (pProgram->nCsr==0 && pProgram->nMem+1==pFrame->nChildMem) );
assert( pProgram->nCsr==pFrame->nChildCsr );
@@ -88999,35 +88128,24 @@ case OP_DecrJumpZero: { /* jump, in1 */
}
+/* Opcode: AggStep0 * P2 P3 P4 P5
+** Synopsis: accum=r[P3] step(r[P2@P5])
+**
+** Execute the step function for an aggregate. The
+** function has P5 arguments. P4 is a pointer to the FuncDef
+** structure that specifies the function. Register P3 is the
+** accumulator.
+**
+** The P5 arguments are taken from register P2 and its
+** successors.
+*/
/* Opcode: AggStep * P2 P3 P4 P5
** Synopsis: accum=r[P3] step(r[P2@P5])
**
-** Execute the xStep function for an aggregate.
-** The function has P5 arguments. P4 is a pointer to the
-** FuncDef structure that specifies the function. Register P3 is the
-** accumulator.
-**
-** The P5 arguments are taken from register P2 and its
-** successors.
-*/
-/* Opcode: AggInverse * P2 P3 P4 P5
-** Synopsis: accum=r[P3] inverse(r[P2@P5])
-**
-** Execute the xInverse function for an aggregate.
-** The function has P5 arguments. P4 is a pointer to the
-** FuncDef structure that specifies the function. Register P3 is the
-** accumulator.
-**
-** The P5 arguments are taken from register P2 and its
-** successors.
-*/
-/* Opcode: AggStep1 P1 P2 P3 P4 P5
-** Synopsis: accum=r[P3] step(r[P2@P5])
-**
-** Execute the xStep (if P1==0) or xInverse (if P1!=0) function for an
-** aggregate. The function has P5 arguments. P4 is a pointer to the
-** FuncDef structure that specifies the function. Register P3 is the
-** accumulator.
+** Execute the step function for an aggregate. The
+** function has P5 arguments. P4 is a pointer to an sqlite3_context
+** object that is used to run the function. Register P3 is
+** as the accumulator.
**
** The P5 arguments are taken from register P2 and its
** successors.
@@ -89038,8 +88156,7 @@ case OP_DecrJumpZero: { /* jump, in1 */
** sqlite3_context only happens once, instead of on each call to the
** step function.
*/
-case OP_AggInverse:
-case OP_AggStep: {
+case OP_AggStep0: {
int n;
sqlite3_context *pCtx;
@@ -89062,14 +88179,10 @@ case OP_AggStep: {
pCtx->argc = n;
pOp->p4type = P4_FUNCCTX;
pOp->p4.pCtx = pCtx;
-
- /* OP_AggInverse must have P1==1 and OP_AggStep must have P1==0 */
- assert( pOp->p1==(pOp->opcode==OP_AggInverse) );
-
- pOp->opcode = OP_AggStep1;
+ pOp->opcode = OP_AggStep;
/* Fall through into OP_AggStep */
}
-case OP_AggStep1: {
+case OP_AggStep: {
int i;
sqlite3_context *pCtx;
Mem *pMem;
@@ -89078,17 +88191,6 @@ case OP_AggStep1: {
pCtx = pOp->p4.pCtx;
pMem = &aMem[pOp->p3];
-#ifdef SQLITE_DEBUG
- if( pOp->p1 ){
- /* This is an OP_AggInverse call. Verify that xStep has always
- ** been called at least once prior to any xInverse call. */
- assert( pMem->uTemp==0x1122e0e3 );
- }else{
- /* This is an OP_AggStep call. Mark it as such. */
- pMem->uTemp = 0x1122e0e3;
- }
-#endif
-
/* If this function is inside of a trigger, the register array in aMem[]
** might change from one evaluation to the next. The next block of code
** checks to see if the register array has changed, and if so it
@@ -89109,13 +88211,7 @@ case OP_AggStep1: {
assert( pCtx->pOut->flags==MEM_Null );
assert( pCtx->isError==0 );
assert( pCtx->skipFlag==0 );
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( pOp->p1 ){
- (pCtx->pFunc->xInverse)(pCtx,pCtx->argc,pCtx->argv);
- }else
-#endif
(pCtx->pFunc->xSFunc)(pCtx,pCtx->argc,pCtx->argv); /* IMP: R-24505-23230 */
-
if( pCtx->isError ){
if( pCtx->isError>0 ){
sqlite3VdbeError(p, "%s", sqlite3_value_text(pCtx->pOut));
@@ -89140,46 +88236,22 @@ case OP_AggStep1: {
/* Opcode: AggFinal P1 P2 * P4 *
** Synopsis: accum=r[P1] N=P2
**
-** P1 is the memory location that is the accumulator for an aggregate
-** or window function. Execute the finalizer function
-** for an aggregate and store the result in P1.
+** Execute the finalizer function for an aggregate. P1 is
+** the memory location that is the accumulator for the aggregate.
**
** P2 is the number of arguments that the step function takes and
** P4 is a pointer to the FuncDef for this function. The P2
** argument is not used by this opcode. It is only there to disambiguate
** functions that can take varying numbers of arguments. The
-** P4 argument is only needed for the case where
+** P4 argument is only needed for the degenerate case where
** the step function was not previously called.
*/
-/* Opcode: AggValue * P2 P3 P4 *
-** Synopsis: r[P3]=value N=P2
-**
-** Invoke the xValue() function and store the result in register P3.
-**
-** P2 is the number of arguments that the step function takes and
-** P4 is a pointer to the FuncDef for this function. The P2
-** argument is not used by this opcode. It is only there to disambiguate
-** functions that can take varying numbers of arguments. The
-** P4 argument is only needed for the case where
-** the step function was not previously called.
-*/
-case OP_AggValue:
case OP_AggFinal: {
Mem *pMem;
assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) );
- assert( pOp->p3==0 || pOp->opcode==OP_AggValue );
pMem = &aMem[pOp->p1];
assert( (pMem->flags & ~(MEM_Null|MEM_Agg))==0 );
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( pOp->p3 ){
- rc = sqlite3VdbeMemAggValue(pMem, &aMem[pOp->p3], pOp->p4.pFunc);
- pMem = &aMem[pOp->p3];
- }else
-#endif
- {
- rc = sqlite3VdbeMemFinalize(pMem, pOp->p4.pFunc);
- }
-
+ rc = sqlite3VdbeMemFinalize(pMem, pOp->p4.pFunc);
if( rc ){
sqlite3VdbeError(p, "%s", sqlite3_value_text(pMem));
goto abort_due_to_error;
@@ -89374,7 +88446,7 @@ case OP_IncrVacuum: { /* jump */
}
#endif
-/* Opcode: Expire P1 P2 * * *
+/* Opcode: Expire P1 * * * *
**
** Cause precompiled statements to expire. When an expired statement
** is executed using sqlite3_step() it will either automatically
@@ -89383,19 +88455,12 @@ case OP_IncrVacuum: { /* jump */
**
** If P1 is 0, then all SQL statements become expired. If P1 is non-zero,
** then only the currently executing statement is expired.
-**
-** If P2 is 0, then SQL statements are expired immediately. If P2 is 1,
-** then running SQL statements are allowed to continue to run to completion.
-** The P2==1 case occurs when a CREATE INDEX or similar schema change happens
-** that might help the statement run faster but which does not affect the
-** correctness of operation.
*/
case OP_Expire: {
- assert( pOp->p2==0 || pOp->p2==1 );
if( !pOp->p1 ){
- sqlite3ExpirePreparedStatements(db, pOp->p2);
+ sqlite3ExpirePreparedStatements(db);
}else{
- p->expired = pOp->p2+1;
+ p->expired = 1;
}
break;
}
@@ -89719,10 +88784,7 @@ case OP_VNext: { /* jump */
case OP_VRename: {
sqlite3_vtab *pVtab;
Mem *pName;
- int isLegacy;
-
- isLegacy = (db->flags & SQLITE_LegacyAlter);
- db->flags |= SQLITE_LegacyAlter;
+
pVtab = pOp->p4.pVtab->pVtab;
pName = &aMem[pOp->p1];
assert( pVtab->pModule->xRename );
@@ -89736,7 +88798,6 @@ case OP_VRename: {
rc = sqlite3VdbeChangeEncoding(pName, SQLITE_UTF8);
if( rc ) goto abort_due_to_error;
rc = pVtab->pModule->xRename(pVtab, pName->z);
- if( isLegacy==0 ) db->flags &= ~SQLITE_LegacyAlter;
sqlite3VtabImportErrmsg(p, pVtab);
p->expired = 0;
if( rc ) goto abort_due_to_error;
@@ -89785,7 +88846,6 @@ case OP_VUpdate: {
|| pOp->p5==OE_Abort || pOp->p5==OE_Ignore || pOp->p5==OE_Replace
);
assert( p->readOnly==0 );
- if( db->mallocFailed ) goto no_mem;
sqlite3VdbeIncrWriteCounter(p, 0);
pVtab = pOp->p4.pVtab->pVtab;
if( pVtab==0 || NEVER(pVtab->pModule==0) ){
@@ -89907,8 +88967,8 @@ case OP_MaxPgcnt: { /* out2 */
**
** See also: Function0, AggStep, AggFinal
*/
-case OP_PureFunc0: /* group */
-case OP_Function0: { /* group */
+case OP_PureFunc0:
+case OP_Function0: {
int n;
sqlite3_context *pCtx;
@@ -89932,8 +88992,8 @@ case OP_Function0: { /* group */
pOp->opcode += 2;
/* Fall through into OP_Function */
}
-case OP_PureFunc: /* group */
-case OP_Function: { /* group */
+case OP_PureFunc:
+case OP_Function: {
int i;
sqlite3_context *pCtx;
@@ -92858,11 +91918,7 @@ static int vdbeMergeEngineInit(
){
int rc = SQLITE_OK; /* Return code */
int i; /* For looping over PmaReader objects */
- int nTree; /* Number of subtrees to merge */
-
- /* Failure to allocate the merge would have been detected prior to
- ** invoking this routine */
- assert( pMerger!=0 );
+ int nTree = pMerger->nTree;
/* eMode is always INCRINIT_NORMAL in single-threaded mode */
assert( SQLITE_MAX_WORKER_THREADS>0 || eMode==INCRINIT_NORMAL );
@@ -92871,7 +91927,6 @@ static int vdbeMergeEngineInit(
assert( pMerger->pTask==0 );
pMerger->pTask = pTask;
- nTree = pMerger->nTree;
for(i=0; i0 && eMode==INCRINIT_ROOT ){
/* PmaReaders should be normally initialized in order, as if they are
@@ -94000,14 +93055,6 @@ static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){
}else if( pExpr->x.pList ){
if( sqlite3WalkExprList(pWalker, pExpr->x.pList) ) return WRC_Abort;
}
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( !ExprHasProperty(pExpr, EP_Reduced) && pExpr->pWin ){
- Window *pWin = pExpr->pWin;
- if( sqlite3WalkExprList(pWalker, pWin->pPartition) ) return WRC_Abort;
- if( sqlite3WalkExprList(pWalker, pWin->pOrderBy) ) return WRC_Abort;
- if( sqlite3WalkExpr(pWalker, pWin->pFilter) ) return WRC_Abort;
- }
-#endif
}
break;
}
@@ -94380,9 +93427,6 @@ static int lookupName(
if( sqlite3StrICmp(zTabName, zTab)!=0 ){
continue;
}
- if( IN_RENAME_OBJECT && pItem->zAlias ){
- sqlite3RenameTokenRemap(pParse, 0, (void*)&pExpr->pTab);
- }
}
if( 0==(cntTab++) ){
pMatch = pItem;
@@ -94468,15 +93512,9 @@ static int lookupName(
#ifndef SQLITE_OMIT_UPSERT
if( pExpr->iTable==2 ){
testcase( iCol==(-1) );
- if( IN_RENAME_OBJECT ){
- pExpr->iColumn = iCol;
- pExpr->pTab = pTab;
- eNewExprOp = TK_COLUMN;
- }else{
- pExpr->iTable = pNC->uNC.pUpsert->regData + iCol;
- eNewExprOp = TK_REGISTER;
- ExprSetProperty(pExpr, EP_Alias);
- }
+ pExpr->iTable = pNC->uNC.pUpsert->regData + iCol;
+ eNewExprOp = TK_REGISTER;
+ ExprSetProperty(pExpr, EP_Alias);
}else
#endif /* SQLITE_OMIT_UPSERT */
{
@@ -94561,9 +93599,6 @@ static int lookupName(
cnt = 1;
pMatch = 0;
assert( zTab==0 && zDb==0 );
- if( IN_RENAME_OBJECT ){
- sqlite3RenameTokenRemap(pParse, 0, (void*)pExpr);
- }
goto lookupname_end;
}
}
@@ -94791,24 +93826,17 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
zTable = 0;
zColumn = pExpr->u.zToken;
}else{
- Expr *pLeft = pExpr->pLeft;
notValid(pParse, pNC, "the \".\" operator", NC_IdxExpr);
pRight = pExpr->pRight;
if( pRight->op==TK_ID ){
zDb = 0;
+ zTable = pExpr->pLeft->u.zToken;
+ zColumn = pRight->u.zToken;
}else{
assert( pRight->op==TK_DOT );
- zDb = pLeft->u.zToken;
- pLeft = pRight->pLeft;
- pRight = pRight->pRight;
- }
- zTable = pLeft->u.zToken;
- zColumn = pRight->u.zToken;
- if( IN_RENAME_OBJECT ){
- sqlite3RenameTokenRemap(pParse, (void*)pExpr, (void*)pRight);
- }
- if( IN_RENAME_OBJECT ){
- sqlite3RenameTokenRemap(pParse, (void*)&pExpr->pTab, (void*)pLeft);
+ zDb = pExpr->pLeft->u.zToken;
+ zTable = pRight->pLeft->u.zToken;
+ zColumn = pRight->pRight->u.zToken;
}
}
return lookupName(pParse, zDb, zTable, zColumn, pNC, pExpr);
@@ -94891,95 +93919,40 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
NC_IdxExpr|NC_PartIdx);
}
}
-
- if( 0==IN_RENAME_OBJECT ){
-#ifndef SQLITE_OMIT_WINDOWFUNC
- assert( is_agg==0 || (pDef->funcFlags & SQLITE_FUNC_MINMAX)
- || (pDef->xValue==0 && pDef->xInverse==0)
- || (pDef->xValue && pDef->xInverse && pDef->xSFunc && pDef->xFinalize)
- );
- if( pDef && pDef->xValue==0 && pExpr->pWin ){
- sqlite3ErrorMsg(pParse,
- "%.*s() may not be used as a window function", nId, zId
- );
- pNC->nErr++;
- }else if(
- (is_agg && (pNC->ncFlags & NC_AllowAgg)==0)
- || (is_agg && (pDef->funcFlags & SQLITE_FUNC_WINDOW) && !pExpr->pWin)
- || (is_agg && pExpr->pWin && (pNC->ncFlags & NC_AllowWin)==0)
- ){
- const char *zType;
- if( (pDef->funcFlags & SQLITE_FUNC_WINDOW) || pExpr->pWin ){
- zType = "window";
- }else{
- zType = "aggregate";
- }
- sqlite3ErrorMsg(pParse, "misuse of %s function %.*s()",zType,nId,zId);
- pNC->nErr++;
- is_agg = 0;
- }
-#else
- if( (is_agg && (pNC->ncFlags & NC_AllowAgg)==0) ){
- sqlite3ErrorMsg(pParse,"misuse of aggregate function %.*s()",nId,zId);
- pNC->nErr++;
- is_agg = 0;
- }
-#endif
- else if( no_such_func && pParse->db->init.busy==0
+ if( is_agg && (pNC->ncFlags & NC_AllowAgg)==0 ){
+ sqlite3ErrorMsg(pParse, "misuse of aggregate function %.*s()", nId,zId);
+ pNC->nErr++;
+ is_agg = 0;
+ }else if( no_such_func && pParse->db->init.busy==0
#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION
- && pParse->explain==0
+ && pParse->explain==0
#endif
- ){
- sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId);
- pNC->nErr++;
- }else if( wrong_num_args ){
- sqlite3ErrorMsg(pParse,"wrong number of arguments to function %.*s()",
- nId, zId);
- pNC->nErr++;
- }
- if( is_agg ){
-#ifndef SQLITE_OMIT_WINDOWFUNC
- pNC->ncFlags &= ~(pExpr->pWin ? NC_AllowWin : NC_AllowAgg);
-#else
- pNC->ncFlags &= ~NC_AllowAgg;
-#endif
- }
+ ){
+ sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId);
+ pNC->nErr++;
+ }else if( wrong_num_args ){
+ sqlite3ErrorMsg(pParse,"wrong number of arguments to function %.*s()",
+ nId, zId);
+ pNC->nErr++;
}
+ if( is_agg ) pNC->ncFlags &= ~NC_AllowAgg;
sqlite3WalkExprList(pWalker, pList);
if( is_agg ){
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( pExpr->pWin ){
- Select *pSel = pNC->pWinSelect;
- sqlite3WalkExprList(pWalker, pExpr->pWin->pPartition);
- sqlite3WalkExprList(pWalker, pExpr->pWin->pOrderBy);
- sqlite3WalkExpr(pWalker, pExpr->pWin->pFilter);
- sqlite3WindowUpdate(pParse, pSel->pWinDefn, pExpr->pWin, pDef);
- if( 0==pSel->pWin
- || 0==sqlite3WindowCompare(pParse, pSel->pWin, pExpr->pWin)
- ){
- pExpr->pWin->pNextWin = pSel->pWin;
- pSel->pWin = pExpr->pWin;
- }
- pNC->ncFlags |= NC_AllowWin;
- }else
-#endif /* SQLITE_OMIT_WINDOWFUNC */
- {
- NameContext *pNC2 = pNC;
- pExpr->op = TK_AGG_FUNCTION;
- pExpr->op2 = 0;
- while( pNC2 && !sqlite3FunctionUsesThisSrc(pExpr, pNC2->pSrcList) ){
- pExpr->op2++;
- pNC2 = pNC2->pNext;
- }
- assert( pDef!=0 );
- if( pNC2 ){
- assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg );
- testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 );
- pNC2->ncFlags |= NC_HasAgg | (pDef->funcFlags & SQLITE_FUNC_MINMAX);
-
- }
- pNC->ncFlags |= NC_AllowAgg;
+ NameContext *pNC2 = pNC;
+ pExpr->op = TK_AGG_FUNCTION;
+ pExpr->op2 = 0;
+ while( pNC2 && !sqlite3FunctionUsesThisSrc(pExpr, pNC2->pSrcList) ){
+ pExpr->op2++;
+ pNC2 = pNC2->pNext;
}
+ assert( pDef!=0 );
+ if( pNC2 ){
+ assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg );
+ testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 );
+ pNC2->ncFlags |= NC_HasAgg | (pDef->funcFlags & SQLITE_FUNC_MINMAX);
+
+ }
+ pNC->ncFlags |= NC_AllowAgg;
}
/* FIX ME: Compute pExpr->affinity based on the expected return
** type of the function
@@ -95380,19 +94353,6 @@ static int resolveOrderGroupBy(
}
for(j=0; jpEList->nExpr; j++){
if( sqlite3ExprCompare(0, pE, pSelect->pEList->a[j].pExpr, -1)==0 ){
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( pE->pWin ){
- /* Since this window function is being changed into a reference
- ** to the same window function the result set, remove the instance
- ** of this window function from the Select.pWin list. */
- Window **pp;
- for(pp=&pSelect->pWin; *pp; pp=&(*pp)->pNextWin){
- if( *pp==pE->pWin ){
- *pp = (*pp)->pNextWin;
- }
- }
- }
-#endif
pItem->u.x.iOrderByCol = j+1;
}
}
@@ -95449,7 +94409,6 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
*/
memset(&sNC, 0, sizeof(sNC));
sNC.pParse = pParse;
- sNC.pWinSelect = p;
if( sqlite3ResolveExprNames(&sNC, p->pLimit) ){
return WRC_Abort;
}
@@ -95498,13 +94457,12 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
/* Set up the local name-context to pass to sqlite3ResolveExprNames() to
** resolve the result-set expression list.
*/
- sNC.ncFlags = NC_AllowAgg|NC_AllowWin;
+ sNC.ncFlags = NC_AllowAgg;
sNC.pSrcList = p->pSrc;
sNC.pNext = pOuterNC;
/* Resolve names in the result set. */
if( sqlite3ResolveExprListNames(&sNC, p->pEList) ) return WRC_Abort;
- sNC.ncFlags &= ~NC_AllowWin;
/* If there are no aggregate functions in the result-set, and no GROUP BY
** expression, do not allow aggregates in any of the other expressions.
@@ -95553,7 +94511,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
** outer queries
*/
sNC.pNext = 0;
- sNC.ncFlags |= NC_AllowAgg|NC_AllowWin;
+ sNC.ncFlags |= NC_AllowAgg;
/* If this is a converted compound query, move the ORDER BY clause from
** the sub-query back to the parent query. At this point each term
@@ -95584,7 +94542,6 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
if( db->mallocFailed ){
return WRC_Abort;
}
- sNC.ncFlags &= ~NC_AllowWin;
/* Resolve the GROUP BY clause. At the same time, make sure
** the GROUP BY clause does not contain aggregate functions.
@@ -95933,6 +94890,14 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr){
while( p ){
int op = p->op;
if( p->flags & EP_Generic ) break;
+ if( op==TK_CAST || op==TK_UPLUS ){
+ p = p->pLeft;
+ continue;
+ }
+ if( op==TK_COLLATE || (op==TK_REGISTER && p->op2==TK_COLLATE) ){
+ pColl = sqlite3GetCollSeq(pParse, ENC(db), 0, p->u.zToken);
+ break;
+ }
if( (op==TK_AGG_COLUMN || op==TK_COLUMN
|| op==TK_REGISTER || op==TK_TRIGGER)
&& p->pTab!=0
@@ -95946,14 +94911,6 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr){
}
break;
}
- if( op==TK_CAST || op==TK_UPLUS ){
- p = p->pLeft;
- continue;
- }
- if( op==TK_COLLATE || (op==TK_REGISTER && p->op2==TK_COLLATE) ){
- pColl = sqlite3GetCollSeq(pParse, ENC(db), 0, p->u.zToken);
- break;
- }
if( p->flags & EP_Collate ){
if( p->pLeft && (p->pLeft->flags & EP_Collate)!=0 ){
p = p->pLeft;
@@ -96373,6 +95330,7 @@ static void codeVectorCompare(
Expr *pL, *pR;
int r1, r2;
assert( i>=0 && i0 ) sqlite3ExprCachePush(pParse);
r1 = exprVectorRegister(pParse, pLeft, i, regLeft, &pL, ®Free1);
r2 = exprVectorRegister(pParse, pRight, i, regRight, &pR, ®Free2);
codeCompare(pParse, pL, pR, opx, r1, r2, dest, p5);
@@ -96384,6 +95342,7 @@ static void codeVectorCompare(
testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne);
sqlite3ReleaseTempReg(pParse, regFree1);
sqlite3ReleaseTempReg(pParse, regFree2);
+ if( i>0 ) sqlite3ExprCachePop(pParse);
if( i==nLeft-1 ){
break;
}
@@ -96731,12 +95690,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3 *db, Expr *pLeft, Expr *pRight){
** Construct a new expression node for a function with multiple
** arguments.
*/
-SQLITE_PRIVATE Expr *sqlite3ExprFunction(
- Parse *pParse, /* Parsing context */
- ExprList *pList, /* Argument list */
- Token *pToken, /* Name of the function */
- int eDistinct /* SF_Distinct or SF_ALL or 0 */
-){
+SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token *pToken){
Expr *pNew;
sqlite3 *db = pParse->db;
assert( pToken );
@@ -96745,14 +95699,10 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(
sqlite3ExprListDelete(db, pList); /* Avoid memory leak when malloc fails */
return 0;
}
- if( pList && pList->nExpr > pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){
- sqlite3ErrorMsg(pParse, "too many arguments on function %T", pToken);
- }
pNew->x.pList = pList;
ExprSetProperty(pNew, EP_HasFunc);
assert( !ExprHasProperty(pNew, EP_xIsSelect) );
sqlite3ExprSetHeightAndFlags(pParse, pNew);
- if( eDistinct==SF_Distinct ) ExprSetProperty(pNew, EP_Distinct);
return pNew;
}
@@ -96862,9 +95812,6 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){
}else{
sqlite3ExprListDelete(db, p->x.pList);
}
- if( !ExprHasProperty(p, EP_Reduced) ){
- sqlite3WindowDelete(db, p->pWin);
- }
}
if( ExprHasProperty(p, EP_MemToken) ) sqlite3DbFree(db, p->u.zToken);
if( !ExprHasProperty(p, EP_Static) ){
@@ -96913,7 +95860,7 @@ static int exprStructSize(Expr *p){
** Note that with flags==EXPRDUP_REDUCE, this routines works on full-size
** (unreduced) Expr objects as they or originally constructed by the parser.
** During expression analysis, extra information is computed and moved into
-** later parts of the Expr object and that extra information might get chopped
+** later parts of teh Expr object and that extra information might get chopped
** off if the expression is reduced. Note also that it does not work to
** make an EXPRDUP_REDUCE copy of a reduced expression. It is only legal
** to reduce a pristine expression tree from the parser. The implementation
@@ -96925,11 +95872,7 @@ static int dupedExprStructSize(Expr *p, int flags){
assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */
assert( EXPR_FULLSIZE<=0xfff );
assert( (0xfff & (EP_Reduced|EP_TokenOnly))==0 );
- if( 0==flags || p->op==TK_SELECT_COLUMN
-#ifndef SQLITE_OMIT_WINDOWFUNC
- || p->pWin
-#endif
- ){
+ if( 0==flags || p->op==TK_SELECT_COLUMN ){
nSize = EXPR_FULLSIZE;
}else{
assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) );
@@ -97057,22 +96000,18 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){
}
/* Fill in pNew->pLeft and pNew->pRight. */
- zAlloc += dupedExprNodeSize(p, dupFlags);
if( ExprHasProperty(pNew, EP_Reduced|EP_TokenOnly) ){
+ zAlloc += dupedExprNodeSize(p, dupFlags);
if( !ExprHasProperty(pNew, EP_TokenOnly|EP_Leaf) ){
pNew->pLeft = p->pLeft ?
exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc) : 0;
pNew->pRight = p->pRight ?
exprDup(db, p->pRight, EXPRDUP_REDUCE, &zAlloc) : 0;
}
- }else{
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( ExprHasProperty(p, EP_Reduced|EP_TokenOnly) ){
- pNew->pWin = 0;
- }else{
- pNew->pWin = sqlite3WindowDup(db, pNew, p->pWin);
+ if( pzBuffer ){
+ *pzBuffer = zAlloc;
}
-#endif /* SQLITE_OMIT_WINDOWFUNC */
+ }else{
if( !ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){
if( pNew->op==TK_SELECT_COLUMN ){
pNew->pLeft = p->pLeft;
@@ -97084,9 +96023,6 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){
pNew->pRight = sqlite3ExprDup(db, p->pRight, 0);
}
}
- if( pzBuffer ){
- *pzBuffer = zAlloc;
- }
}
return pNew;
}
@@ -97282,11 +96218,7 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *pDup, int flags){
pNew->addrOpenEphm[1] = -1;
pNew->nSelectRow = p->nSelectRow;
pNew->pWith = withDup(db, p->pWith);
-#ifndef SQLITE_OMIT_WINDOWFUNC
- pNew->pWin = 0;
- pNew->pWinDefn = sqlite3WindowListDup(db, p->pWinDefn);
-#endif
- pNew->selId = p->selId;
+ sqlite3SelectSetName(pNew, p->zSelName);
*pp = pNew;
pp = &pNew->pPrior;
pNext = pNew;
@@ -97458,9 +96390,6 @@ SQLITE_PRIVATE void sqlite3ExprListSetName(
assert( pItem->zName==0 );
pItem->zName = sqlite3DbStrNDup(pParse->db, pName->z, pName->n);
if( dequote ) sqlite3Dequote(pItem->zName);
- if( IN_RENAME_OBJECT ){
- sqlite3RenameTokenMap(pParse, (void*)pItem->zName, pName);
- }
}
}
@@ -97641,9 +96570,6 @@ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){
testcase( pExpr->op==TK_COLUMN );
testcase( pExpr->op==TK_AGG_FUNCTION );
testcase( pExpr->op==TK_AGG_COLUMN );
- if( ExprHasProperty(pExpr, EP_FixedCol) && pWalker->eCode!=2 ){
- return WRC_Continue;
- }
if( pWalker->eCode==3 && pExpr->iTable==pWalker->u.iCur ){
return WRC_Continue;
}
@@ -97699,17 +96625,10 @@ SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr *p){
}
/*
-** Walk an expression tree. Return non-zero if
-**
-** (1) the expression is constant, and
-** (2) the expression does originate in the ON or USING clause
-** of a LEFT JOIN, and
-** (3) the expression does not contain any EP_FixedCol TK_COLUMN
-** operands created by the constant propagation optimization.
-**
-** When this routine returns true, it indicates that the expression
-** can be added to the pParse->pConstExpr list and evaluated once when
-** the prepared statement starts up. See sqlite3ExprCodeAtInit().
+** Walk an expression tree. Return non-zero if the expression is constant
+** that does no originate from the ON or USING clauses of a join.
+** Return 0 if it involves variables or function calls or terms from
+** an ON or USING clause.
*/
SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr *p){
return exprIsConst(p, 2, 0);
@@ -97739,7 +96658,7 @@ static int exprNodeIsConstantOrGroupBy(Walker *pWalker, Expr *pExpr){
Expr *p = pGroupBy->a[i].pExpr;
if( sqlite3ExprCompare(0, pExpr, p, -1)<2 ){
CollSeq *pColl = sqlite3ExprNNCollSeq(pWalker->pParse, p);
- if( sqlite3IsBinary(pColl) ){
+ if( sqlite3_stricmp("BINARY", pColl->zName)==0 ){
return WRC_Prune;
}
}
@@ -98161,8 +97080,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex(
sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead);
eType = IN_INDEX_ROWID;
- ExplainQueryPlan((pParse, 0,
- "USING ROWID SEARCH ON TABLE %s FOR IN-OPERATOR",pTab->zName));
+
sqlite3VdbeJumpHere(v, iAddr);
}else{
Index *pIdx; /* Iterator variable */
@@ -98421,6 +97339,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
int rReg = 0; /* Register storing resulting */
Vdbe *v = sqlite3GetVdbe(pParse);
if( NEVER(v==0) ) return 0;
+ sqlite3ExprCachePush(pParse);
/* The evaluation of the IN/EXISTS/SELECT must be repeated every time it
** is encountered if any of the following is true:
@@ -98556,6 +97475,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
sqlite3VdbeAddOp3(v, OP_Insert, pExpr->iTable, r2, r3);
}else{
sqlite3VdbeAddOp4(v, OP_MakeRecord, r3, 1, r2, &affinity, 1);
+ sqlite3ExprCacheAffinityChange(pParse, r3, 1);
sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pExpr->iTable, r2, r3, 1);
}
}
@@ -98636,6 +97556,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
if( jmpIfDynamic>=0 ){
sqlite3VdbeJumpHere(v, jmpIfDynamic);
}
+ sqlite3ExprCachePop(pParse);
return rReg;
}
@@ -98754,6 +97675,7 @@ static void sqlite3ExprCodeIN(
** aiMap[] array contains a mapping from the original LHS field order to
** the field order that matches the RHS index.
*/
+ sqlite3ExprCachePush(pParse);
rLhsOrig = exprCodeVector(pParse, pLeft, &iDummy);
for(i=0; idb, aiMap);
@@ -98979,6 +97902,145 @@ static void codeInteger(Parse *pParse, Expr *pExpr, int negFlag, int iMem){
}
}
+/*
+** Erase column-cache entry number i
+*/
+static void cacheEntryClear(Parse *pParse, int i){
+ if( pParse->aColCache[i].tempReg ){
+ if( pParse->nTempRegaTempReg) ){
+ pParse->aTempReg[pParse->nTempReg++] = pParse->aColCache[i].iReg;
+ }
+ }
+ pParse->nColCache--;
+ if( inColCache ){
+ pParse->aColCache[i] = pParse->aColCache[pParse->nColCache];
+ }
+}
+
+
+/*
+** Record in the column cache that a particular column from a
+** particular table is stored in a particular register.
+*/
+SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse *pParse, int iTab, int iCol, int iReg){
+ int i;
+ int minLru;
+ int idxLru;
+ struct yColCache *p;
+
+ /* Unless an error has occurred, register numbers are always positive. */
+ assert( iReg>0 || pParse->nErr || pParse->db->mallocFailed );
+ assert( iCol>=-1 && iCol<32768 ); /* Finite column numbers */
+
+ /* The SQLITE_ColumnCache flag disables the column cache. This is used
+ ** for testing only - to verify that SQLite always gets the same answer
+ ** with and without the column cache.
+ */
+ if( OptimizationDisabled(pParse->db, SQLITE_ColumnCache) ) return;
+
+ /* First replace any existing entry.
+ **
+ ** Actually, the way the column cache is currently used, we are guaranteed
+ ** that the object will never already be in cache. Verify this guarantee.
+ */
+#ifndef NDEBUG
+ for(i=0, p=pParse->aColCache; inColCache; i++, p++){
+ assert( p->iTable!=iTab || p->iColumn!=iCol );
+ }
+#endif
+
+ /* If the cache is already full, delete the least recently used entry */
+ if( pParse->nColCache>=SQLITE_N_COLCACHE ){
+ minLru = 0x7fffffff;
+ idxLru = -1;
+ for(i=0, p=pParse->aColCache; ilrulru;
+ }
+ }
+ p = &pParse->aColCache[idxLru];
+ }else{
+ p = &pParse->aColCache[pParse->nColCache++];
+ }
+
+ /* Add the new entry to the end of the cache */
+ p->iLevel = pParse->iCacheLevel;
+ p->iTable = iTab;
+ p->iColumn = iCol;
+ p->iReg = iReg;
+ p->tempReg = 0;
+ p->lru = pParse->iCacheCnt++;
+}
+
+/*
+** Indicate that registers between iReg..iReg+nReg-1 are being overwritten.
+** Purge the range of registers from the column cache.
+*/
+SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse *pParse, int iReg, int nReg){
+ int i = 0;
+ while( inColCache ){
+ struct yColCache *p = &pParse->aColCache[i];
+ if( p->iReg >= iReg && p->iReg < iReg+nReg ){
+ cacheEntryClear(pParse, i);
+ }else{
+ i++;
+ }
+ }
+}
+
+/*
+** Remember the current column cache context. Any new entries added
+** added to the column cache after this call are removed when the
+** corresponding pop occurs.
+*/
+SQLITE_PRIVATE void sqlite3ExprCachePush(Parse *pParse){
+ pParse->iCacheLevel++;
+#ifdef SQLITE_DEBUG
+ if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
+ printf("PUSH to %d\n", pParse->iCacheLevel);
+ }
+#endif
+}
+
+/*
+** Remove from the column cache any entries that were added since the
+** the previous sqlite3ExprCachePush operation. In other words, restore
+** the cache to the state it was in prior the most recent Push.
+*/
+SQLITE_PRIVATE void sqlite3ExprCachePop(Parse *pParse){
+ int i = 0;
+ assert( pParse->iCacheLevel>=1 );
+ pParse->iCacheLevel--;
+#ifdef SQLITE_DEBUG
+ if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
+ printf("POP to %d\n", pParse->iCacheLevel);
+ }
+#endif
+ while( inColCache ){
+ if( pParse->aColCache[i].iLevel>pParse->iCacheLevel ){
+ cacheEntryClear(pParse, i);
+ }else{
+ i++;
+ }
+ }
+}
+
+/*
+** When a cached column is reused, make sure that its register is
+** no longer available as a temp register. ticket #3879: that same
+** register might be in the cache in multiple places, so be sure to
+** get them all.
+*/
+static void sqlite3ExprCachePinRegister(Parse *pParse, int iReg){
+ int i;
+ struct yColCache *p;
+ for(i=0, p=pParse->aColCache; inColCache; i++, p++){
+ if( p->iReg==iReg ){
+ p->tempReg = 0;
+ }
+ }
+}
/* Generate code that will load into register regOut a value that is
** appropriate for the iIdxCol-th column of index pIdx.
@@ -99034,7 +98096,12 @@ SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(
/*
** Generate code that will extract the iColumn-th column from
-** table pTab and store the column value in register iReg.
+** table pTab and store the column value in a register.
+**
+** An effort is made to store the column value in register iReg. This
+** is not garanteeed for GetColumn() - the result can be stored in
+** any register. But the result is guaranteed to land in register iReg
+** for GetColumnToReg().
**
** There must be an open cursor to pTab in iTable when this routine
** is called. If iColumn<0 then code is generated that extracts the rowid.
@@ -99048,23 +98115,96 @@ SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(
u8 p5 /* P5 value for OP_Column + FLAGS */
){
Vdbe *v = pParse->pVdbe;
+ int i;
+ struct yColCache *p;
+
+ for(i=0, p=pParse->aColCache; inColCache; i++, p++){
+ if( p->iTable==iTable && p->iColumn==iColumn ){
+ p->lru = pParse->iCacheCnt++;
+ sqlite3ExprCachePinRegister(pParse, p->iReg);
+ return p->iReg;
+ }
+ }
assert( v!=0 );
sqlite3ExprCodeGetColumnOfTable(v, pTab, iTable, iColumn, iReg);
if( p5 ){
sqlite3VdbeChangeP5(v, p5);
+ }else{
+ sqlite3ExprCacheStore(pParse, iTable, iColumn, iReg);
}
return iReg;
}
+SQLITE_PRIVATE void sqlite3ExprCodeGetColumnToReg(
+ Parse *pParse, /* Parsing and code generating context */
+ Table *pTab, /* Description of the table we are reading from */
+ int iColumn, /* Index of the table column */
+ int iTable, /* The cursor pointing to the table */
+ int iReg /* Store results here */
+){
+ int r1 = sqlite3ExprCodeGetColumn(pParse, pTab, iColumn, iTable, iReg, 0);
+ if( r1!=iReg ) sqlite3VdbeAddOp2(pParse->pVdbe, OP_SCopy, r1, iReg);
+}
+
+
+/*
+** Clear all column cache entries.
+*/
+SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse *pParse){
+ int i;
+
+#ifdef SQLITE_DEBUG
+ if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
+ printf("CLEAR\n");
+ }
+#endif
+ for(i=0; inColCache; i++){
+ if( pParse->aColCache[i].tempReg
+ && pParse->nTempRegaTempReg)
+ ){
+ pParse->aTempReg[pParse->nTempReg++] = pParse->aColCache[i].iReg;
+ }
+ }
+ pParse->nColCache = 0;
+}
+
+/*
+** Record the fact that an affinity change has occurred on iCount
+** registers starting with iStart.
+*/
+SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse *pParse, int iStart, int iCount){
+ sqlite3ExprCacheRemove(pParse, iStart, iCount);
+}
/*
** Generate code to move content from registers iFrom...iFrom+nReg-1
-** over to iTo..iTo+nReg-1.
+** over to iTo..iTo+nReg-1. Keep the column cache up-to-date.
*/
SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse *pParse, int iFrom, int iTo, int nReg){
assert( iFrom>=iTo+nReg || iFrom+nReg<=iTo );
sqlite3VdbeAddOp3(pParse->pVdbe, OP_Move, iFrom, iTo, nReg);
+ sqlite3ExprCacheRemove(pParse, iFrom, nReg);
}
+#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST)
+/*
+** Return true if any register in the range iFrom..iTo (inclusive)
+** is used as part of the column cache.
+**
+** This routine is used within assert() and testcase() macros only
+** and does not appear in a normal build.
+*/
+static int usedAsColumnCache(Parse *pParse, int iFrom, int iTo){
+ int i;
+ struct yColCache *p;
+ for(i=0, p=pParse->aColCache; inColCache; i++, p++){
+ int r = p->iReg;
+ if( r>=iFrom && r<=iTo ) return 1; /*NO_TEST*/
+ }
+ return 0;
+}
+#endif /* SQLITE_DEBUG || SQLITE_COVERAGE_TEST */
+
+
/*
** Convert a scalar expression node to a TK_REGISTER referencing
** register iReg. The caller must ensure that iReg already contains
@@ -99162,28 +98302,6 @@ expr_code_doover:
}
case TK_COLUMN: {
int iTab = pExpr->iTable;
- if( ExprHasProperty(pExpr, EP_FixedCol) ){
- /* This COLUMN expression is really a constant due to WHERE clause
- ** constraints, and that constant is coded by the pExpr->pLeft
- ** expresssion. However, make sure the constant has the correct
- ** datatype by applying the Affinity of the table column to the
- ** constant.
- */
- int iReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft,target);
- int aff = sqlite3TableColumnAffinity(pExpr->pTab, pExpr->iColumn);
- if( aff!=SQLITE_AFF_BLOB ){
- static const char zAff[] = "B\000C\000D\000E";
- assert( SQLITE_AFF_BLOB=='A' );
- assert( SQLITE_AFF_TEXT=='B' );
- if( iReg!=target ){
- sqlite3VdbeAddOp2(v, OP_SCopy, iReg, target);
- iReg = target;
- }
- sqlite3VdbeAddOp4(v, OP_Affinity, iReg, 1, 0,
- &zAff[(aff-'B')*2], P4_STATIC);
- }
- return iReg;
- }
if( iTab<0 ){
if( pParse->iSelfTab<0 ){
/* Generating CHECK constraints or inserting into partial index */
@@ -99264,6 +98382,8 @@ expr_code_doover:
}
sqlite3VdbeAddOp2(v, OP_Cast, target,
sqlite3AffinityType(pExpr->u.zToken, 0));
+ testcase( usedAsColumnCache(pParse, inReg, inReg) );
+ sqlite3ExprCacheAffinityChange(pParse, inReg, 1);
return inReg;
}
#endif /* SQLITE_OMIT_CAST */
@@ -99407,12 +98527,6 @@ expr_code_doover:
u8 enc = ENC(db); /* The text encoding used by this database */
CollSeq *pColl = 0; /* A collating sequence */
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) && pExpr->pWin ){
- return pExpr->pWin->regResult;
- }
-#endif
-
if( ConstFactorOk(pParse) && sqlite3ExprIsConstantNotJoin(pExpr) ){
/* SQL functions can be expensive. So try to move constant functions
** out of the inner loop, even if that means an extra OP_Copy. */
@@ -99449,7 +98563,10 @@ expr_code_doover:
for(i=1; ia[i].pExpr, target);
+ sqlite3ExprCachePop(pParse);
}
sqlite3VdbeResolveLabel(v, endCoalesce);
break;
@@ -99515,8 +98632,10 @@ expr_code_doover:
}
}
+ sqlite3ExprCachePush(pParse); /* Ticket 2ea2425d34be */
sqlite3ExprCodeExprList(pParse, pFarg, r1, 0,
SQLITE_ECEL_DUP|SQLITE_ECEL_FACTOR);
+ sqlite3ExprCachePop(pParse); /* Ticket 2ea2425d34be */
}else{
r1 = 0;
}
@@ -99533,7 +98652,7 @@ expr_code_doover:
** "glob(B,A). We want to use the A in "A glob B" to test
** for function overloading. But we use the B term in "glob(B,A)".
*/
- if( nFarg>=2 && ExprHasProperty(pExpr, EP_InfixFunc) ){
+ if( nFarg>=2 && (pExpr->flags & EP_InfixFunc) ){
pDef = sqlite3VtabOverloadFunction(db, pDef, nFarg, pFarg->a[1].pExpr);
}else if( nFarg>0 ){
pDef = sqlite3VtabOverloadFunction(db, pDef, nFarg, pFarg->a[0].pExpr);
@@ -99689,7 +98808,9 @@ expr_code_doover:
case TK_IF_NULL_ROW: {
int addrINR;
addrINR = sqlite3VdbeAddOp1(v, OP_IfNullRow, pExpr->iTable);
+ sqlite3ExprCachePush(pParse);
inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target);
+ sqlite3ExprCachePop(pParse);
sqlite3VdbeJumpHere(v, addrINR);
sqlite3VdbeChangeP3(v, addrINR, inReg);
break;
@@ -99726,6 +98847,7 @@ expr_code_doover:
Expr opCompare; /* The X==Ei expression */
Expr *pX; /* The X expression */
Expr *pTest = 0; /* X==Ei (form A) or just Ei (form B) */
+ VVA_ONLY( int iCacheLevel = pParse->iCacheLevel; )
assert( !ExprHasProperty(pExpr, EP_xIsSelect) && pExpr->x.pList );
assert(pExpr->x.pList->nExpr > 0);
@@ -99749,6 +98871,7 @@ expr_code_doover:
regFree1 = 0;
}
for(i=0; iop==TK_COLUMN );
sqlite3ExprCode(pParse, aListelem[i+1].pExpr, target);
sqlite3VdbeGoto(v, endLabel);
+ sqlite3ExprCachePop(pParse);
sqlite3VdbeResolveLabel(v, nextCase);
}
if( (nExpr&1)!=0 ){
+ sqlite3ExprCachePush(pParse);
sqlite3ExprCode(pParse, pEList->a[nExpr-1].pExpr, target);
+ sqlite3ExprCachePop(pParse);
}else{
sqlite3VdbeAddOp2(v, OP_Null, 0, target);
}
+ assert( pParse->db->mallocFailed || pParse->nErr>0
+ || pParse->iCacheLevel==iCacheLevel );
sqlite3VdbeResolveLabel(v, endLabel);
break;
}
@@ -99917,7 +99045,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse *pParse, Expr *pExpr, int target){
** might choose to code the expression at initialization time.
*/
SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse *pParse, Expr *pExpr, int target){
- if( pParse->okConstFactor && sqlite3ExprIsConstantNotJoin(pExpr) ){
+ if( pParse->okConstFactor && sqlite3ExprIsConstant(pExpr) ){
sqlite3ExprCodeAtInit(pParse, pExpr, target);
}else{
sqlite3ExprCode(pParse, pExpr, target);
@@ -99999,9 +99127,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList(
}else{
sqlite3VdbeAddOp2(v, copyOp, j+srcReg-1, target+i);
}
- }else if( (flags & SQLITE_ECEL_FACTOR)!=0
- && sqlite3ExprIsConstantNotJoin(pExpr)
- ){
+ }else if( (flags & SQLITE_ECEL_FACTOR)!=0 && sqlite3ExprIsConstant(pExpr) ){
sqlite3ExprCodeAtInit(pParse, pExpr, target+i);
}else{
int inReg = sqlite3ExprCodeTarget(pParse, pExpr, target+i);
@@ -100127,14 +99253,18 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int
int d2 = sqlite3VdbeMakeLabel(v);
testcase( jumpIfNull==0 );
sqlite3ExprIfFalse(pParse, pExpr->pLeft, d2,jumpIfNull^SQLITE_JUMPIFNULL);
+ sqlite3ExprCachePush(pParse);
sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull);
sqlite3VdbeResolveLabel(v, d2);
+ sqlite3ExprCachePop(pParse);
break;
}
case TK_OR: {
testcase( jumpIfNull==0 );
sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull);
+ sqlite3ExprCachePush(pParse);
sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull);
+ sqlite3ExprCachePop(pParse);
break;
}
case TK_NOT: {
@@ -100293,15 +99423,19 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int
case TK_AND: {
testcase( jumpIfNull==0 );
sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull);
+ sqlite3ExprCachePush(pParse);
sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull);
+ sqlite3ExprCachePop(pParse);
break;
}
case TK_OR: {
int d2 = sqlite3VdbeMakeLabel(v);
testcase( jumpIfNull==0 );
sqlite3ExprIfTrue(pParse, pExpr->pLeft, d2, jumpIfNull^SQLITE_JUMPIFNULL);
+ sqlite3ExprCachePush(pParse);
sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull);
sqlite3VdbeResolveLabel(v, d2);
+ sqlite3ExprCachePop(pParse);
break;
}
case TK_NOT: {
@@ -100523,8 +99657,7 @@ SQLITE_PRIVATE int sqlite3ExprCompare(Parse *pParse, Expr *pA, Expr *pB, int iTa
if( (pA->flags & EP_Distinct)!=(pB->flags & EP_Distinct) ) return 2;
if( ALWAYS((combinedFlags & EP_TokenOnly)==0) ){
if( combinedFlags & EP_xIsSelect ) return 2;
- if( (combinedFlags & EP_FixedCol)==0
- && sqlite3ExprCompare(pParse, pA->pLeft, pB->pLeft, iTab) ) return 2;
+ if( sqlite3ExprCompare(pParse, pA->pLeft, pB->pLeft, iTab) ) return 2;
if( sqlite3ExprCompare(pParse, pA->pRight, pB->pRight, iTab) ) return 2;
if( sqlite3ExprListCompare(pA->x.pList, pB->x.pList, iTab) ) return 2;
assert( (combinedFlags & EP_Reduced)==0 );
@@ -100533,21 +99666,6 @@ SQLITE_PRIVATE int sqlite3ExprCompare(Parse *pParse, Expr *pA, Expr *pB, int iTa
if( pA->iTable!=pB->iTable
&& (pA->iTable!=iTab || NEVER(pB->iTable>=0)) ) return 2;
}
-#ifndef SQLITE_OMIT_WINDOWFUNC
- /* Justification for the assert():
- ** window functions have p->op==TK_FUNCTION but aggregate functions
- ** have p->op==TK_AGG_FUNCTION. So any comparison between an aggregate
- ** function and a window function should have failed before reaching
- ** this point. And, it is not possible to have a window function and
- ** a scalar function with the same name and number of arguments. So
- ** if we reach this point, either A and B both window functions or
- ** neither are a window functions. */
- assert( (pA->pWin==0)==(pB->pWin==0) );
-
- if( pA->pWin!=0 ){
- if( sqlite3WindowCompare(pParse,pA->pWin,pB->pWin)!=0 ) return 2;
- }
-#endif
}
return 0;
}
@@ -100638,15 +99756,18 @@ SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Parse *pParse, Expr *pE1, Expr *pE2, i
/*
** This is the Expr node callback for sqlite3ExprImpliesNotNullRow().
** If the expression node requires that the table at pWalker->iCur
-** have one or more non-NULL column, then set pWalker->eCode to 1 and abort.
-**
-** This routine controls an optimization. False positives (setting
-** pWalker->eCode to 1 when it should not be) are deadly, but false-negatives
-** (never setting pWalker->eCode) is a harmless missed optimization.
+** have a non-NULL column, then set pWalker->eCode to 1 and abort.
*/
static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){
- testcase( pExpr->op==TK_AGG_COLUMN );
+ /* This routine is only called for WHERE clause expressions and so it
+ ** cannot have any TK_AGG_COLUMN entries because those are only found
+ ** in HAVING clauses. We can get a TK_AGG_FUNCTION in a WHERE clause,
+ ** but that is an illegal construct and the query will be rejected at
+ ** a later stage of processing, so the TK_AGG_FUNCTION case does not
+ ** need to be considered here. */
+ assert( pExpr->op!=TK_AGG_COLUMN );
testcase( pExpr->op==TK_AGG_FUNCTION );
+
if( ExprHasProperty(pExpr, EP_FromJoin) ) return WRC_Prune;
switch( pExpr->op ){
case TK_ISNOT:
@@ -101066,9 +100187,21 @@ SQLITE_PRIVATE int sqlite3GetTempReg(Parse *pParse){
/*
** Deallocate a register, making available for reuse for some other
** purpose.
+**
+** If a register is currently being used by the column cache, then
+** the deallocation is deferred until the column cache line that uses
+** the register becomes stale.
*/
SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse *pParse, int iReg){
if( iReg && pParse->nTempRegaTempReg) ){
+ int i;
+ struct yColCache *p;
+ for(i=0, p=pParse->aColCache; inColCache; i++, p++){
+ if( p->iReg==iReg ){
+ p->tempReg = 1;
+ return;
+ }
+ }
pParse->aTempReg[pParse->nTempReg++] = iReg;
}
}
@@ -101082,6 +100215,7 @@ SQLITE_PRIVATE int sqlite3GetTempRange(Parse *pParse, int nReg){
i = pParse->iRangeReg;
n = pParse->nRangeReg;
if( nReg<=n ){
+ assert( !usedAsColumnCache(pParse, i, i+n-1) );
pParse->iRangeReg += nReg;
pParse->nRangeReg -= nReg;
}else{
@@ -101095,6 +100229,7 @@ SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse *pParse, int iReg, int nReg){
sqlite3ReleaseTempReg(pParse, iReg);
return;
}
+ sqlite3ExprCacheRemove(pParse, iReg, nReg);
if( nReg>pParse->nRangeReg ){
pParse->nRangeReg = nReg;
pParse->iRangeReg = iReg;
@@ -101156,6 +100291,352 @@ SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse *pParse, int iFirst, int iLast){
*/
#ifndef SQLITE_OMIT_ALTERTABLE
+
+/*
+** This function is used by SQL generated to implement the
+** ALTER TABLE command. The first argument is the text of a CREATE TABLE or
+** CREATE INDEX command. The second is a table name. The table name in
+** the CREATE TABLE or CREATE INDEX statement is replaced with the third
+** argument and the result returned. Examples:
+**
+** sqlite_rename_table('CREATE TABLE abc(a, b, c)', 'def')
+** -> 'CREATE TABLE def(a, b, c)'
+**
+** sqlite_rename_table('CREATE INDEX i ON abc(a)', 'def')
+** -> 'CREATE INDEX i ON def(a, b, c)'
+*/
+static void renameTableFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **argv
+){
+ unsigned char const *zSql = sqlite3_value_text(argv[0]);
+ unsigned char const *zTableName = sqlite3_value_text(argv[1]);
+
+ int token;
+ Token tname;
+ unsigned char const *zCsr = zSql;
+ int len = 0;
+ char *zRet;
+
+ sqlite3 *db = sqlite3_context_db_handle(context);
+
+ UNUSED_PARAMETER(NotUsed);
+
+ /* The principle used to locate the table name in the CREATE TABLE
+ ** statement is that the table name is the first non-space token that
+ ** is immediately followed by a TK_LP or TK_USING token.
+ */
+ if( zSql ){
+ do {
+ if( !*zCsr ){
+ /* Ran out of input before finding an opening bracket. Return NULL. */
+ return;
+ }
+
+ /* Store the token that zCsr points to in tname. */
+ tname.z = (char*)zCsr;
+ tname.n = len;
+
+ /* Advance zCsr to the next token. Store that token type in 'token',
+ ** and its length in 'len' (to be used next iteration of this loop).
+ */
+ do {
+ zCsr += len;
+ len = sqlite3GetToken(zCsr, &token);
+ } while( token==TK_SPACE );
+ assert( len>0 );
+ } while( token!=TK_LP && token!=TK_USING );
+
+ zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", (int)(((u8*)tname.z) - zSql),
+ zSql, zTableName, tname.z+tname.n);
+ sqlite3_result_text(context, zRet, -1, SQLITE_DYNAMIC);
+ }
+}
+
+/*
+** This C function implements an SQL user function that is used by SQL code
+** generated by the ALTER TABLE ... RENAME command to modify the definition
+** of any foreign key constraints that use the table being renamed as the
+** parent table. It is passed three arguments:
+**
+** 1) The complete text of the CREATE TABLE statement being modified,
+** 2) The old name of the table being renamed, and
+** 3) The new name of the table being renamed.
+**
+** It returns the new CREATE TABLE statement. For example:
+**
+** sqlite_rename_parent('CREATE TABLE t1(a REFERENCES t2)', 't2', 't3')
+** -> 'CREATE TABLE t1(a REFERENCES t3)'
+*/
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+static void renameParentFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **argv
+){
+ sqlite3 *db = sqlite3_context_db_handle(context);
+ char *zOutput = 0;
+ char *zResult;
+ unsigned char const *zInput = sqlite3_value_text(argv[0]);
+ unsigned char const *zOld = sqlite3_value_text(argv[1]);
+ unsigned char const *zNew = sqlite3_value_text(argv[2]);
+
+ unsigned const char *z; /* Pointer to token */
+ int n; /* Length of token z */
+ int token; /* Type of token */
+
+ UNUSED_PARAMETER(NotUsed);
+ if( zInput==0 || zOld==0 ) return;
+ for(z=zInput; *z; z=z+n){
+ n = sqlite3GetToken(z, &token);
+ if( token==TK_REFERENCES ){
+ char *zParent;
+ do {
+ z += n;
+ n = sqlite3GetToken(z, &token);
+ }while( token==TK_SPACE );
+
+ if( token==TK_ILLEGAL ) break;
+ zParent = sqlite3DbStrNDup(db, (const char *)z, n);
+ if( zParent==0 ) break;
+ sqlite3Dequote(zParent);
+ if( 0==sqlite3StrICmp((const char *)zOld, zParent) ){
+ char *zOut = sqlite3MPrintf(db, "%s%.*s\"%w\"",
+ (zOutput?zOutput:""), (int)(z-zInput), zInput, (const char *)zNew
+ );
+ sqlite3DbFree(db, zOutput);
+ zOutput = zOut;
+ zInput = &z[n];
+ }
+ sqlite3DbFree(db, zParent);
+ }
+ }
+
+ zResult = sqlite3MPrintf(db, "%s%s", (zOutput?zOutput:""), zInput),
+ sqlite3_result_text(context, zResult, -1, SQLITE_DYNAMIC);
+ sqlite3DbFree(db, zOutput);
+}
+#endif
+
+#ifndef SQLITE_OMIT_TRIGGER
+/* This function is used by SQL generated to implement the
+** ALTER TABLE command. The first argument is the text of a CREATE TRIGGER
+** statement. The second is a table name. The table name in the CREATE
+** TRIGGER statement is replaced with the third argument and the result
+** returned. This is analagous to renameTableFunc() above, except for CREATE
+** TRIGGER, not CREATE INDEX and CREATE TABLE.
+*/
+static void renameTriggerFunc(
+ sqlite3_context *context,
+ int NotUsed,
+ sqlite3_value **argv
+){
+ unsigned char const *zSql = sqlite3_value_text(argv[0]);
+ unsigned char const *zTableName = sqlite3_value_text(argv[1]);
+
+ int token;
+ Token tname;
+ int dist = 3;
+ unsigned char const *zCsr = zSql;
+ int len = 0;
+ char *zRet;
+ sqlite3 *db = sqlite3_context_db_handle(context);
+
+ UNUSED_PARAMETER(NotUsed);
+
+ /* The principle used to locate the table name in the CREATE TRIGGER
+ ** statement is that the table name is the first token that is immediately
+ ** preceded by either TK_ON or TK_DOT and immediately followed by one
+ ** of TK_WHEN, TK_BEGIN or TK_FOR.
+ */
+ if( zSql ){
+ do {
+
+ if( !*zCsr ){
+ /* Ran out of input before finding the table name. Return NULL. */
+ return;
+ }
+
+ /* Store the token that zCsr points to in tname. */
+ tname.z = (char*)zCsr;
+ tname.n = len;
+
+ /* Advance zCsr to the next token. Store that token type in 'token',
+ ** and its length in 'len' (to be used next iteration of this loop).
+ */
+ do {
+ zCsr += len;
+ len = sqlite3GetToken(zCsr, &token);
+ }while( token==TK_SPACE );
+ assert( len>0 );
+
+ /* Variable 'dist' stores the number of tokens read since the most
+ ** recent TK_DOT or TK_ON. This means that when a WHEN, FOR or BEGIN
+ ** token is read and 'dist' equals 2, the condition stated above
+ ** to be met.
+ **
+ ** Note that ON cannot be a database, table or column name, so
+ ** there is no need to worry about syntax like
+ ** "CREATE TRIGGER ... ON ON.ON BEGIN ..." etc.
+ */
+ dist++;
+ if( token==TK_DOT || token==TK_ON ){
+ dist = 0;
+ }
+ } while( dist!=2 || (token!=TK_WHEN && token!=TK_FOR && token!=TK_BEGIN) );
+
+ /* Variable tname now contains the token that is the old table-name
+ ** in the CREATE TRIGGER statement.
+ */
+ zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", (int)(((u8*)tname.z) - zSql),
+ zSql, zTableName, tname.z+tname.n);
+ sqlite3_result_text(context, zRet, -1, SQLITE_DYNAMIC);
+ }
+}
+#endif /* !SQLITE_OMIT_TRIGGER */
+
+/*
+** Register built-in functions used to help implement ALTER TABLE
+*/
+SQLITE_PRIVATE void sqlite3AlterFunctions(void){
+ static FuncDef aAlterTableFuncs[] = {
+ FUNCTION(sqlite_rename_table, 2, 0, 0, renameTableFunc),
+#ifndef SQLITE_OMIT_TRIGGER
+ FUNCTION(sqlite_rename_trigger, 2, 0, 0, renameTriggerFunc),
+#endif
+#ifndef SQLITE_OMIT_FOREIGN_KEY
+ FUNCTION(sqlite_rename_parent, 3, 0, 0, renameParentFunc),
+#endif
+ };
+ sqlite3InsertBuiltinFuncs(aAlterTableFuncs, ArraySize(aAlterTableFuncs));
+}
+
+/*
+** This function is used to create the text of expressions of the form:
+**
+** name= OR name= OR ...
+**
+** If argument zWhere is NULL, then a pointer string containing the text
+** "name=" is returned, where is the quoted version
+** of the string passed as argument zConstant. The returned buffer is
+** allocated using sqlite3DbMalloc(). It is the responsibility of the
+** caller to ensure that it is eventually freed.
+**
+** If argument zWhere is not NULL, then the string returned is
+** " OR name=", where is the contents of zWhere.
+** In this case zWhere is passed to sqlite3DbFree() before returning.
+**
+*/
+static char *whereOrName(sqlite3 *db, char *zWhere, char *zConstant){
+ char *zNew;
+ if( !zWhere ){
+ zNew = sqlite3MPrintf(db, "name=%Q", zConstant);
+ }else{
+ zNew = sqlite3MPrintf(db, "%s OR name=%Q", zWhere, zConstant);
+ sqlite3DbFree(db, zWhere);
+ }
+ return zNew;
+}
+
+#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
+/*
+** Generate the text of a WHERE expression which can be used to select all
+** tables that have foreign key constraints that refer to table pTab (i.e.
+** constraints for which pTab is the parent table) from the sqlite_master
+** table.
+*/
+static char *whereForeignKeys(Parse *pParse, Table *pTab){
+ FKey *p;
+ char *zWhere = 0;
+ for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){
+ zWhere = whereOrName(pParse->db, zWhere, p->pFrom->zName);
+ }
+ return zWhere;
+}
+#endif
+
+/*
+** Generate the text of a WHERE expression which can be used to select all
+** temporary triggers on table pTab from the sqlite_temp_master table. If
+** table pTab has no temporary triggers, or is itself stored in the
+** temporary database, NULL is returned.
+*/
+static char *whereTempTriggers(Parse *pParse, Table *pTab){
+ Trigger *pTrig;
+ char *zWhere = 0;
+ const Schema *pTempSchema = pParse->db->aDb[1].pSchema; /* Temp db schema */
+
+ /* If the table is not located in the temp-db (in which case NULL is
+ ** returned, loop through the tables list of triggers. For each trigger
+ ** that is not part of the temp-db schema, add a clause to the WHERE
+ ** expression being built up in zWhere.
+ */
+ if( pTab->pSchema!=pTempSchema ){
+ sqlite3 *db = pParse->db;
+ for(pTrig=sqlite3TriggerList(pParse, pTab); pTrig; pTrig=pTrig->pNext){
+ if( pTrig->pSchema==pTempSchema ){
+ zWhere = whereOrName(db, zWhere, pTrig->zName);
+ }
+ }
+ }
+ if( zWhere ){
+ char *zNew = sqlite3MPrintf(pParse->db, "type='trigger' AND (%s)", zWhere);
+ sqlite3DbFree(pParse->db, zWhere);
+ zWhere = zNew;
+ }
+ return zWhere;
+}
+
+/*
+** Generate code to drop and reload the internal representation of table
+** pTab from the database, including triggers and temporary triggers.
+** Argument zName is the name of the table in the database schema at
+** the time the generated code is executed. This can be different from
+** pTab->zName if this function is being called to code part of an
+** "ALTER TABLE RENAME TO" statement.
+*/
+static void reloadTableSchema(Parse *pParse, Table *pTab, const char *zName){
+ Vdbe *v;
+ char *zWhere;
+ int iDb; /* Index of database containing pTab */
+#ifndef SQLITE_OMIT_TRIGGER
+ Trigger *pTrig;
+#endif
+
+ v = sqlite3GetVdbe(pParse);
+ if( NEVER(v==0) ) return;
+ assert( sqlite3BtreeHoldsAllMutexes(pParse->db) );
+ iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
+ assert( iDb>=0 );
+
+#ifndef SQLITE_OMIT_TRIGGER
+ /* Drop any table triggers from the internal schema. */
+ for(pTrig=sqlite3TriggerList(pParse, pTab); pTrig; pTrig=pTrig->pNext){
+ int iTrigDb = sqlite3SchemaToIndex(pParse->db, pTrig->pSchema);
+ assert( iTrigDb==iDb || iTrigDb==1 );
+ sqlite3VdbeAddOp4(v, OP_DropTrigger, iTrigDb, 0, 0, pTrig->zName, 0);
+ }
+#endif
+
+ /* Drop the table and index from the internal schema. */
+ sqlite3VdbeAddOp4(v, OP_DropTable, iDb, 0, 0, pTab->zName, 0);
+
+ /* Reload the table, index and permanent trigger schemas. */
+ zWhere = sqlite3MPrintf(pParse->db, "tbl_name=%Q", zName);
+ if( !zWhere ) return;
+ sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere);
+
+#ifndef SQLITE_OMIT_TRIGGER
+ /* Now, if the table is not stored in the temp database, reload any temp
+ ** triggers. Don't use IN(...) in case SQLITE_OMIT_SUBQUERY is defined.
+ */
+ if( (zWhere=whereTempTriggers(pParse, pTab))!=0 ){
+ sqlite3VdbeAddParseSchemaOp(v, 1, zWhere);
+ }
+#endif
+}
+
/*
** Parameter zName is the name of a table that is about to be altered
** (either with ALTER TABLE ... RENAME TO or ALTER TABLE ... ADD COLUMN).
@@ -101172,49 +100653,6 @@ static int isSystemTable(Parse *pParse, const char *zName){
return 0;
}
-/*
-** Generate code to verify that the schemas of database zDb and, if
-** bTemp is not true, database "temp", can still be parsed. This is
-** called at the end of the generation of an ALTER TABLE ... RENAME ...
-** statement to ensure that the operation has not rendered any schema
-** objects unusable.
-*/
-static void renameTestSchema(Parse *pParse, const char *zDb, int bTemp){
- sqlite3NestedParse(pParse,
- "SELECT 1 "
- "FROM \"%w\".%s "
- "WHERE name NOT LIKE 'sqlite_%%'"
- " AND sql NOT LIKE 'create virtual%%'"
- " AND sqlite_rename_test(%Q, sql, type, name, %d)=NULL ",
- zDb, MASTER_NAME,
- zDb, bTemp
- );
-
- if( bTemp==0 ){
- sqlite3NestedParse(pParse,
- "SELECT 1 "
- "FROM temp.%s "
- "WHERE name NOT LIKE 'sqlite_%%'"
- " AND sql NOT LIKE 'create virtual%%'"
- " AND sqlite_rename_test(%Q, sql, type, name, 1)=NULL ",
- MASTER_NAME, zDb
- );
- }
-}
-
-/*
-** Generate code to reload the schema for database iDb. And, if iDb!=1, for
-** the temp database as well.
-*/
-static void renameReloadSchema(Parse *pParse, int iDb){
- Vdbe *v = pParse->pVdbe;
- if( v ){
- sqlite3ChangeCookie(pParse, iDb);
- sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, iDb, 0);
- if( iDb!=1 ) sqlite3VdbeAddParseSchemaOp(pParse->pVdbe, 1, 0);
- }
-}
-
/*
** Generate code to implement the "ALTER TABLE xxx RENAME TO yyy"
** command.
@@ -101232,6 +100670,9 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable(
int nTabName; /* Number of UTF-8 characters in zTabName */
const char *zTabName; /* Original name of the table */
Vdbe *v;
+#ifndef SQLITE_OMIT_TRIGGER
+ char *zWhere = 0; /* Where clause to locate temp triggers */
+#endif
VTable *pVTab = 0; /* Non-zero if this is a v-tab with an xRename() */
u32 savedDbFlags; /* Saved value of db->mDbFlags */
@@ -101304,63 +100745,8 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable(
if( v==0 ){
goto exit_rename_table;
}
-
- /* figure out how many UTF-8 characters are in zName */
- zTabName = pTab->zName;
- nTabName = sqlite3Utf8CharLen(zTabName, -1);
-
- /* Rewrite all CREATE TABLE, INDEX, TRIGGER or VIEW statements in
- ** the schema to use the new table name. */
- sqlite3NestedParse(pParse,
- "UPDATE \"%w\".%s SET "
- "sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, %d) "
- "WHERE (type!='index' OR tbl_name=%Q COLLATE nocase)"
- "AND name NOT LIKE 'sqlite_%%'"
- , zDb, MASTER_NAME, zDb, zTabName, zName, (iDb==1), zTabName
- );
-
- /* Update the tbl_name and name columns of the sqlite_master table
- ** as required. */
- sqlite3NestedParse(pParse,
- "UPDATE %Q.%s SET "
- "tbl_name = %Q, "
- "name = CASE "
- "WHEN type='table' THEN %Q "
- "WHEN name LIKE 'sqlite_autoindex%%' AND type='index' THEN "
- "'sqlite_autoindex_' || %Q || substr(name,%d+18) "
- "ELSE name END "
- "WHERE tbl_name=%Q COLLATE nocase AND "
- "(type='table' OR type='index' OR type='trigger');",
- zDb, MASTER_NAME,
- zName, zName, zName,
- nTabName, zTabName
- );
-
-#ifndef SQLITE_OMIT_AUTOINCREMENT
- /* If the sqlite_sequence table exists in this database, then update
- ** it with the new table name.
- */
- if( sqlite3FindTable(db, "sqlite_sequence", zDb) ){
- sqlite3NestedParse(pParse,
- "UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q",
- zDb, zName, pTab->zName);
- }
-#endif
-
- /* If the table being renamed is not itself part of the temp database,
- ** edit view and trigger definitions within the temp database
- ** as required. */
- if( iDb!=1 ){
- sqlite3NestedParse(pParse,
- "UPDATE sqlite_temp_master SET "
- "sql = sqlite_rename_table(%Q, type, name, sql, %Q, %Q, 1), "
- "tbl_name = "
- "CASE WHEN tbl_name=%Q COLLATE nocase AND "
- " sqlite_rename_test(%Q, sql, type, name, 1) "
- "THEN %Q ELSE tbl_name END "
- "WHERE type IN ('view', 'trigger')"
- , zDb, zTabName, zName, zTabName, zDb, zName);
- }
+ sqlite3BeginWriteOperation(pParse, pVTab!=0, iDb);
+ sqlite3ChangeCookie(pParse, iDb);
/* If this is a virtual table, invoke the xRename() function if
** one is defined. The xRename() callback will modify the names
@@ -101376,8 +100762,90 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable(
}
#endif
- renameReloadSchema(pParse, iDb);
- renameTestSchema(pParse, zDb, iDb==1);
+ /* figure out how many UTF-8 characters are in zName */
+ zTabName = pTab->zName;
+ nTabName = sqlite3Utf8CharLen(zTabName, -1);
+
+#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
+ if( db->flags&SQLITE_ForeignKeys ){
+ /* If foreign-key support is enabled, rewrite the CREATE TABLE
+ ** statements corresponding to all child tables of foreign key constraints
+ ** for which the renamed table is the parent table. */
+ if( (zWhere=whereForeignKeys(pParse, pTab))!=0 ){
+ sqlite3NestedParse(pParse,
+ "UPDATE \"%w\".%s SET "
+ "sql = sqlite_rename_parent(sql, %Q, %Q) "
+ "WHERE %s;", zDb, MASTER_NAME, zTabName, zName, zWhere);
+ sqlite3DbFree(db, zWhere);
+ }
+ }
+#endif
+
+ /* Modify the sqlite_master table to use the new table name. */
+ sqlite3NestedParse(pParse,
+ "UPDATE %Q.%s SET "
+#ifdef SQLITE_OMIT_TRIGGER
+ "sql = sqlite_rename_table(sql, %Q), "
+#else
+ "sql = CASE "
+ "WHEN type = 'trigger' THEN sqlite_rename_trigger(sql, %Q)"
+ "ELSE sqlite_rename_table(sql, %Q) END, "
+#endif
+ "tbl_name = %Q, "
+ "name = CASE "
+ "WHEN type='table' THEN %Q "
+ "WHEN name LIKE 'sqlite_autoindex%%' AND type='index' THEN "
+ "'sqlite_autoindex_' || %Q || substr(name,%d+18) "
+ "ELSE name END "
+ "WHERE tbl_name=%Q COLLATE nocase AND "
+ "(type='table' OR type='index' OR type='trigger');",
+ zDb, MASTER_NAME, zName, zName, zName,
+#ifndef SQLITE_OMIT_TRIGGER
+ zName,
+#endif
+ zName, nTabName, zTabName
+ );
+
+#ifndef SQLITE_OMIT_AUTOINCREMENT
+ /* If the sqlite_sequence table exists in this database, then update
+ ** it with the new table name.
+ */
+ if( sqlite3FindTable(db, "sqlite_sequence", zDb) ){
+ sqlite3NestedParse(pParse,
+ "UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q",
+ zDb, zName, pTab->zName);
+ }
+#endif
+
+#ifndef SQLITE_OMIT_TRIGGER
+ /* If there are TEMP triggers on this table, modify the sqlite_temp_master
+ ** table. Don't do this if the table being ALTERed is itself located in
+ ** the temp database.
+ */
+ if( (zWhere=whereTempTriggers(pParse, pTab))!=0 ){
+ sqlite3NestedParse(pParse,
+ "UPDATE sqlite_temp_master SET "
+ "sql = sqlite_rename_trigger(sql, %Q), "
+ "tbl_name = %Q "
+ "WHERE %s;", zName, zName, zWhere);
+ sqlite3DbFree(db, zWhere);
+ }
+#endif
+
+#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
+ if( db->flags&SQLITE_ForeignKeys ){
+ FKey *p;
+ for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){
+ Table *pFrom = p->pFrom;
+ if( pFrom!=pTab ){
+ reloadTableSchema(pParse, p->pFrom, pFrom->zName);
+ }
+ }
+ }
+#endif
+
+ /* Drop and reload the internal table schema. */
+ reloadTableSchema(pParse, pTab, zName);
exit_rename_table:
sqlite3SrcListDelete(db, pSrc);
@@ -101403,11 +100871,12 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){
Column *pCol; /* The new column */
Expr *pDflt; /* Default value for the new column */
sqlite3 *db; /* The database connection; */
- Vdbe *v; /* The prepared statement under construction */
+ Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */
int r1; /* Temporary registers */
db = pParse->db;
if( pParse->nErr || db->mallocFailed ) return;
+ assert( v!=0 );
pNew = pParse->pNewTable;
assert( pNew );
@@ -101502,20 +100971,17 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){
** from less than 3 to 4, as that will corrupt any preexisting DESC
** index.
*/
- v = sqlite3GetVdbe(pParse);
- if( v ){
- r1 = sqlite3GetTempReg(pParse);
- sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, r1, BTREE_FILE_FORMAT);
- sqlite3VdbeUsesBtree(v, iDb);
- sqlite3VdbeAddOp2(v, OP_AddImm, r1, -2);
- sqlite3VdbeAddOp2(v, OP_IfPos, r1, sqlite3VdbeCurrentAddr(v)+2);
- VdbeCoverage(v);
- sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, 3);
- sqlite3ReleaseTempReg(pParse, r1);
- }
+ r1 = sqlite3GetTempReg(pParse);
+ sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, r1, BTREE_FILE_FORMAT);
+ sqlite3VdbeUsesBtree(v, iDb);
+ sqlite3VdbeAddOp2(v, OP_AddImm, r1, -2);
+ sqlite3VdbeAddOp2(v, OP_IfPos, r1, sqlite3VdbeCurrentAddr(v)+2);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, 3);
+ sqlite3ReleaseTempReg(pParse, r1);
- /* Reload the table definition */
- renameReloadSchema(pParse, iDb);
+ /* Reload the schema of the modified table. */
+ reloadTableSchema(pParse, pTab, pTab->zName);
}
/*
@@ -101536,6 +101002,7 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){
SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){
Table *pNew;
Table *pTab;
+ Vdbe *v;
int iDb;
int i;
int nAlloc;
@@ -101599,1142 +101066,16 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){
pNew->addColOffset = pTab->addColOffset;
pNew->nTabRef = 1;
+ /* Begin a transaction and increment the schema cookie. */
+ sqlite3BeginWriteOperation(pParse, 0, iDb);
+ v = sqlite3GetVdbe(pParse);
+ if( !v ) goto exit_begin_add_column;
+ sqlite3ChangeCookie(pParse, iDb);
+
exit_begin_add_column:
sqlite3SrcListDelete(db, pSrc);
return;
}
-
-/*
-** Parameter pTab is the subject of an ALTER TABLE ... RENAME COLUMN
-** command. This function checks if the table is a view or virtual
-** table (columns of views or virtual tables may not be renamed). If so,
-** it loads an error message into pParse and returns non-zero.
-**
-** Or, if pTab is not a view or virtual table, zero is returned.
-*/
-#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE)
-static int isRealTable(Parse *pParse, Table *pTab){
- const char *zType = 0;
-#ifndef SQLITE_OMIT_VIEW
- if( pTab->pSelect ){
- zType = "view";
- }
-#endif
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- if( IsVirtual(pTab) ){
- zType = "virtual table";
- }
-#endif
- if( zType ){
- sqlite3ErrorMsg(
- pParse, "cannot rename columns of %s \"%s\"", zType, pTab->zName
- );
- return 1;
- }
- return 0;
-}
-#else /* !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) */
-# define isRealTable(x,y) (0)
-#endif
-
-/*
-** Handles the following parser reduction:
-**
-** cmd ::= ALTER TABLE pSrc RENAME COLUMN pOld TO pNew
-*/
-SQLITE_PRIVATE void sqlite3AlterRenameColumn(
- Parse *pParse, /* Parsing context */
- SrcList *pSrc, /* Table being altered. pSrc->nSrc==1 */
- Token *pOld, /* Name of column being changed */
- Token *pNew /* New column name */
-){
- sqlite3 *db = pParse->db; /* Database connection */
- Table *pTab; /* Table being updated */
- int iCol; /* Index of column being renamed */
- char *zOld = 0; /* Old column name */
- char *zNew = 0; /* New column name */
- const char *zDb; /* Name of schema containing the table */
- int iSchema; /* Index of the schema */
- int bQuote; /* True to quote the new name */
-
- /* Locate the table to be altered */
- pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]);
- if( !pTab ) goto exit_rename_column;
-
- /* Cannot alter a system table */
- if( SQLITE_OK!=isSystemTable(pParse, pTab->zName) ) goto exit_rename_column;
- if( SQLITE_OK!=isRealTable(pParse, pTab) ) goto exit_rename_column;
-
- /* Which schema holds the table to be altered */
- iSchema = sqlite3SchemaToIndex(db, pTab->pSchema);
- assert( iSchema>=0 );
- zDb = db->aDb[iSchema].zDbSName;
-
-#ifndef SQLITE_OMIT_AUTHORIZATION
- /* Invoke the authorization callback. */
- if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){
- goto exit_rename_column;
- }
-#endif
-
- /* Make sure the old name really is a column name in the table to be
- ** altered. Set iCol to be the index of the column being renamed */
- zOld = sqlite3NameFromToken(db, pOld);
- if( !zOld ) goto exit_rename_column;
- for(iCol=0; iColnCol; iCol++){
- if( 0==sqlite3StrICmp(pTab->aCol[iCol].zName, zOld) ) break;
- }
- if( iCol==pTab->nCol ){
- sqlite3ErrorMsg(pParse, "no such column: \"%s\"", zOld);
- goto exit_rename_column;
- }
-
- /* Do the rename operation using a recursive UPDATE statement that
- ** uses the sqlite_rename_column() SQL function to compute the new
- ** CREATE statement text for the sqlite_master table.
- */
- zNew = sqlite3NameFromToken(db, pNew);
- if( !zNew ) goto exit_rename_column;
- assert( pNew->n>0 );
- bQuote = sqlite3Isquote(pNew->z[0]);
- sqlite3NestedParse(pParse,
- "UPDATE \"%w\".%s SET "
- "sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, %d) "
- "WHERE name NOT LIKE 'sqlite_%%' AND (type != 'index' OR tbl_name = %Q)"
- " AND sql NOT LIKE 'create virtual%%'",
- zDb, MASTER_NAME,
- zDb, pTab->zName, iCol, zNew, bQuote, iSchema==1,
- pTab->zName
- );
-
- sqlite3NestedParse(pParse,
- "UPDATE temp.%s SET "
- "sql = sqlite_rename_column(sql, type, name, %Q, %Q, %d, %Q, %d, 1) "
- "WHERE type IN ('trigger', 'view')",
- MASTER_NAME,
- zDb, pTab->zName, iCol, zNew, bQuote
- );
-
- /* Drop and reload the database schema. */
- renameReloadSchema(pParse, iSchema);
- renameTestSchema(pParse, zDb, iSchema==1);
-
- exit_rename_column:
- sqlite3SrcListDelete(db, pSrc);
- sqlite3DbFree(db, zOld);
- sqlite3DbFree(db, zNew);
- return;
-}
-
-/*
-** Each RenameToken object maps an element of the parse tree into
-** the token that generated that element. The parse tree element
-** might be one of:
-**
-** * A pointer to an Expr that represents an ID
-** * The name of a table column in Column.zName
-**
-** A list of RenameToken objects can be constructed during parsing.
-** Each new object is created by sqlite3RenameTokenMap().
-** As the parse tree is transformed, the sqlite3RenameTokenRemap()
-** routine is used to keep the mapping current.
-**
-** After the parse finishes, renameTokenFind() routine can be used
-** to look up the actual token value that created some element in
-** the parse tree.
-*/
-struct RenameToken {
- void *p; /* Parse tree element created by token t */
- Token t; /* The token that created parse tree element p */
- RenameToken *pNext; /* Next is a list of all RenameToken objects */
-};
-
-/*
-** The context of an ALTER TABLE RENAME COLUMN operation that gets passed
-** down into the Walker.
-*/
-typedef struct RenameCtx RenameCtx;
-struct RenameCtx {
- RenameToken *pList; /* List of tokens to overwrite */
- int nList; /* Number of tokens in pList */
- int iCol; /* Index of column being renamed */
- Table *pTab; /* Table being ALTERed */
- const char *zOld; /* Old column name */
-};
-
-#ifdef SQLITE_DEBUG
-/*
-** This function is only for debugging. It performs two tasks:
-**
-** 1. Checks that pointer pPtr does not already appear in the
-** rename-token list.
-**
-** 2. Dereferences each pointer in the rename-token list.
-**
-** The second is most effective when debugging under valgrind or
-** address-sanitizer or similar. If any of these pointers no longer
-** point to valid objects, an exception is raised by the memory-checking
-** tool.
-**
-** The point of this is to prevent comparisons of invalid pointer values.
-** Even though this always seems to work, it is undefined according to the
-** C standard. Example of undefined comparison:
-**
-** sqlite3_free(x);
-** if( x==y ) ...
-**
-** Technically, as x no longer points into a valid object or to the byte
-** following a valid object, it may not be used in comparison operations.
-*/
-static void renameTokenCheckAll(Parse *pParse, void *pPtr){
- if( pParse->nErr==0 && pParse->db->mallocFailed==0 ){
- RenameToken *p;
- u8 i = 0;
- for(p=pParse->pRename; p; p=p->pNext){
- if( p->p ){
- assert( p->p!=pPtr );
- i += *(u8*)(p->p);
- }
- }
- }
-}
-#else
-# define renameTokenCheckAll(x,y)
-#endif
-
-/*
-** Add a new RenameToken object mapping parse tree element pPtr into
-** token *pToken to the Parse object currently under construction.
-**
-** Return a copy of pPtr.
-*/
-SQLITE_PRIVATE void *sqlite3RenameTokenMap(Parse *pParse, void *pPtr, Token *pToken){
- RenameToken *pNew;
- assert( pPtr || pParse->db->mallocFailed );
- renameTokenCheckAll(pParse, pPtr);
- pNew = sqlite3DbMallocZero(pParse->db, sizeof(RenameToken));
- if( pNew ){
- pNew->p = pPtr;
- pNew->t = *pToken;
- pNew->pNext = pParse->pRename;
- pParse->pRename = pNew;
- }
-
- return pPtr;
-}
-
-/*
-** It is assumed that there is already a RenameToken object associated
-** with parse tree element pFrom. This function remaps the associated token
-** to parse tree element pTo.
-*/
-SQLITE_PRIVATE void sqlite3RenameTokenRemap(Parse *pParse, void *pTo, void *pFrom){
- RenameToken *p;
- renameTokenCheckAll(pParse, pTo);
- for(p=pParse->pRename; p; p=p->pNext){
- if( p->p==pFrom ){
- p->p = pTo;
- break;
- }
- }
-}
-
-/*
-** Walker callback used by sqlite3RenameExprUnmap().
-*/
-static int renameUnmapExprCb(Walker *pWalker, Expr *pExpr){
- Parse *pParse = pWalker->pParse;
- sqlite3RenameTokenRemap(pParse, 0, (void*)pExpr);
- return WRC_Continue;
-}
-
-/*
-** Remove all nodes that are part of expression pExpr from the rename list.
-*/
-SQLITE_PRIVATE void sqlite3RenameExprUnmap(Parse *pParse, Expr *pExpr){
- Walker sWalker;
- memset(&sWalker, 0, sizeof(Walker));
- sWalker.pParse = pParse;
- sWalker.xExprCallback = renameUnmapExprCb;
- sqlite3WalkExpr(&sWalker, pExpr);
-}
-
-/*
-** Remove all nodes that are part of expression-list pEList from the
-** rename list.
-*/
-SQLITE_PRIVATE void sqlite3RenameExprlistUnmap(Parse *pParse, ExprList *pEList){
- if( pEList ){
- int i;
- Walker sWalker;
- memset(&sWalker, 0, sizeof(Walker));
- sWalker.pParse = pParse;
- sWalker.xExprCallback = renameUnmapExprCb;
- sqlite3WalkExprList(&sWalker, pEList);
- for(i=0; inExpr; i++){
- sqlite3RenameTokenRemap(pParse, 0, (void*)pEList->a[i].zName);
- }
- }
-}
-
-/*
-** Free the list of RenameToken objects given in the second argument
-*/
-static void renameTokenFree(sqlite3 *db, RenameToken *pToken){
- RenameToken *pNext;
- RenameToken *p;
- for(p=pToken; p; p=pNext){
- pNext = p->pNext;
- sqlite3DbFree(db, p);
- }
-}
-
-/*
-** Search the Parse object passed as the first argument for a RenameToken
-** object associated with parse tree element pPtr. If found, remove it
-** from the Parse object and add it to the list maintained by the
-** RenameCtx object passed as the second argument.
-*/
-static void renameTokenFind(Parse *pParse, struct RenameCtx *pCtx, void *pPtr){
- RenameToken **pp;
- assert( pPtr!=0 );
- for(pp=&pParse->pRename; (*pp); pp=&(*pp)->pNext){
- if( (*pp)->p==pPtr ){
- RenameToken *pToken = *pp;
- *pp = pToken->pNext;
- pToken->pNext = pCtx->pList;
- pCtx->pList = pToken;
- pCtx->nList++;
- break;
- }
- }
-}
-
-/*
-** This is a Walker select callback. It does nothing. It is only required
-** because without a dummy callback, sqlite3WalkExpr() and similar do not
-** descend into sub-select statements.
-*/
-static int renameColumnSelectCb(Walker *pWalker, Select *p){
- UNUSED_PARAMETER(pWalker);
- UNUSED_PARAMETER(p);
- return WRC_Continue;
-}
-
-/*
-** This is a Walker expression callback.
-**
-** For every TK_COLUMN node in the expression tree, search to see
-** if the column being references is the column being renamed by an
-** ALTER TABLE statement. If it is, then attach its associated
-** RenameToken object to the list of RenameToken objects being
-** constructed in RenameCtx object at pWalker->u.pRename.
-*/
-static int renameColumnExprCb(Walker *pWalker, Expr *pExpr){
- RenameCtx *p = pWalker->u.pRename;
- if( pExpr->op==TK_TRIGGER
- && pExpr->iColumn==p->iCol
- && pWalker->pParse->pTriggerTab==p->pTab
- ){
- renameTokenFind(pWalker->pParse, p, (void*)pExpr);
- }else if( pExpr->op==TK_COLUMN
- && pExpr->iColumn==p->iCol
- && p->pTab==pExpr->pTab
- ){
- renameTokenFind(pWalker->pParse, p, (void*)pExpr);
- }
- return WRC_Continue;
-}
-
-/*
-** The RenameCtx contains a list of tokens that reference a column that
-** is being renamed by an ALTER TABLE statement. Return the "last"
-** RenameToken in the RenameCtx and remove that RenameToken from the
-** RenameContext. "Last" means the last RenameToken encountered when
-** the input SQL is parsed from left to right. Repeated calls to this routine
-** return all column name tokens in the order that they are encountered
-** in the SQL statement.
-*/
-static RenameToken *renameColumnTokenNext(RenameCtx *pCtx){
- RenameToken *pBest = pCtx->pList;
- RenameToken *pToken;
- RenameToken **pp;
-
- for(pToken=pBest->pNext; pToken; pToken=pToken->pNext){
- if( pToken->t.z>pBest->t.z ) pBest = pToken;
- }
- for(pp=&pCtx->pList; *pp!=pBest; pp=&(*pp)->pNext);
- *pp = pBest->pNext;
-
- return pBest;
-}
-
-/*
-** An error occured while parsing or otherwise processing a database
-** object (either pParse->pNewTable, pNewIndex or pNewTrigger) as part of an
-** ALTER TABLE RENAME COLUMN program. The error message emitted by the
-** sub-routine is currently stored in pParse->zErrMsg. This function
-** adds context to the error message and then stores it in pCtx.
-*/
-static void renameColumnParseError(
- sqlite3_context *pCtx,
- int bPost,
- sqlite3_value *pType,
- sqlite3_value *pObject,
- Parse *pParse
-){
- const char *zT = (const char*)sqlite3_value_text(pType);
- const char *zN = (const char*)sqlite3_value_text(pObject);
- char *zErr;
-
- zErr = sqlite3_mprintf("error in %s %s%s: %s",
- zT, zN, (bPost ? " after rename" : ""),
- pParse->zErrMsg
- );
- sqlite3_result_error(pCtx, zErr, -1);
- sqlite3_free(zErr);
-}
-
-/*
-** For each name in the the expression-list pEList (i.e. each
-** pEList->a[i].zName) that matches the string in zOld, extract the
-** corresponding rename-token from Parse object pParse and add it
-** to the RenameCtx pCtx.
-*/
-static void renameColumnElistNames(
- Parse *pParse,
- RenameCtx *pCtx,
- ExprList *pEList,
- const char *zOld
-){
- if( pEList ){
- int i;
- for(i=0; inExpr; i++){
- char *zName = pEList->a[i].zName;
- if( 0==sqlite3_stricmp(zName, zOld) ){
- renameTokenFind(pParse, pCtx, (void*)zName);
- }
- }
- }
-}
-
-/*
-** For each name in the the id-list pIdList (i.e. each pIdList->a[i].zName)
-** that matches the string in zOld, extract the corresponding rename-token
-** from Parse object pParse and add it to the RenameCtx pCtx.
-*/
-static void renameColumnIdlistNames(
- Parse *pParse,
- RenameCtx *pCtx,
- IdList *pIdList,
- const char *zOld
-){
- if( pIdList ){
- int i;
- for(i=0; inId; i++){
- char *zName = pIdList->a[i].zName;
- if( 0==sqlite3_stricmp(zName, zOld) ){
- renameTokenFind(pParse, pCtx, (void*)zName);
- }
- }
- }
-}
-
-/*
-** Parse the SQL statement zSql using Parse object (*p). The Parse object
-** is initialized by this function before it is used.
-*/
-static int renameParseSql(
- Parse *p, /* Memory to use for Parse object */
- const char *zDb, /* Name of schema SQL belongs to */
- int bTable, /* 1 -> RENAME TABLE, 0 -> RENAME COLUMN */
- sqlite3 *db, /* Database handle */
- const char *zSql, /* SQL to parse */
- int bTemp /* True if SQL is from temp schema */
-){
- int rc;
- char *zErr = 0;
-
- db->init.iDb = bTemp ? 1 : sqlite3FindDbName(db, zDb);
-
- /* Parse the SQL statement passed as the first argument. If no error
- ** occurs and the parse does not result in a new table, index or
- ** trigger object, the database must be corrupt. */
- memset(p, 0, sizeof(Parse));
- p->eParseMode = (bTable ? PARSE_MODE_RENAME_TABLE : PARSE_MODE_RENAME_COLUMN);
- p->db = db;
- p->nQueryLoop = 1;
- rc = sqlite3RunParser(p, zSql, &zErr);
- assert( p->zErrMsg==0 );
- assert( rc!=SQLITE_OK || zErr==0 );
- assert( (0!=p->pNewTable) + (0!=p->pNewIndex) + (0!=p->pNewTrigger)<2 );
- p->zErrMsg = zErr;
- if( db->mallocFailed ) rc = SQLITE_NOMEM;
- if( rc==SQLITE_OK
- && p->pNewTable==0 && p->pNewIndex==0 && p->pNewTrigger==0
- ){
- rc = SQLITE_CORRUPT_BKPT;
- }
-
-#ifdef SQLITE_DEBUG
- /* Ensure that all mappings in the Parse.pRename list really do map to
- ** a part of the input string. */
- if( rc==SQLITE_OK ){
- int nSql = sqlite3Strlen30(zSql);
- RenameToken *pToken;
- for(pToken=p->pRename; pToken; pToken=pToken->pNext){
- assert( pToken->t.z>=zSql && &pToken->t.z[pToken->t.n]<=&zSql[nSql] );
- }
- }
-#endif
-
- db->init.iDb = 0;
- return rc;
-}
-
-/*
-** This function edits SQL statement zSql, replacing each token identified
-** by the linked list pRename with the text of zNew. If argument bQuote is
-** true, then zNew is always quoted first. If no error occurs, the result
-** is loaded into context object pCtx as the result.
-**
-** Or, if an error occurs (i.e. an OOM condition), an error is left in
-** pCtx and an SQLite error code returned.
-*/
-static int renameEditSql(
- sqlite3_context *pCtx, /* Return result here */
- RenameCtx *pRename, /* Rename context */
- const char *zSql, /* SQL statement to edit */
- const char *zNew, /* New token text */
- int bQuote /* True to always quote token */
-){
- int nNew = sqlite3Strlen30(zNew);
- int nSql = sqlite3Strlen30(zSql);
- sqlite3 *db = sqlite3_context_db_handle(pCtx);
- int rc = SQLITE_OK;
- char *zQuot;
- char *zOut;
- int nQuot;
-
- /* Set zQuot to point to a buffer containing a quoted copy of the
- ** identifier zNew. If the corresponding identifier in the original
- ** ALTER TABLE statement was quoted (bQuote==1), then set zNew to
- ** point to zQuot so that all substitutions are made using the
- ** quoted version of the new column name. */
- zQuot = sqlite3MPrintf(db, "\"%w\"", zNew);
- if( zQuot==0 ){
- return SQLITE_NOMEM;
- }else{
- nQuot = sqlite3Strlen30(zQuot);
- }
- if( bQuote ){
- zNew = zQuot;
- nNew = nQuot;
- }
-
- /* At this point pRename->pList contains a list of RenameToken objects
- ** corresponding to all tokens in the input SQL that must be replaced
- ** with the new column name. All that remains is to construct and
- ** return the edited SQL string. */
- assert( nQuot>=nNew );
- zOut = sqlite3DbMallocZero(db, nSql + pRename->nList*nQuot + 1);
- if( zOut ){
- int nOut = nSql;
- memcpy(zOut, zSql, nSql);
- while( pRename->pList ){
- int iOff; /* Offset of token to replace in zOut */
- RenameToken *pBest = renameColumnTokenNext(pRename);
-
- u32 nReplace;
- const char *zReplace;
- if( sqlite3IsIdChar(*pBest->t.z) ){
- nReplace = nNew;
- zReplace = zNew;
- }else{
- nReplace = nQuot;
- zReplace = zQuot;
- }
-
- iOff = pBest->t.z - zSql;
- if( pBest->t.n!=nReplace ){
- memmove(&zOut[iOff + nReplace], &zOut[iOff + pBest->t.n],
- nOut - (iOff + pBest->t.n)
- );
- nOut += nReplace - pBest->t.n;
- zOut[nOut] = '\0';
- }
- memcpy(&zOut[iOff], zReplace, nReplace);
- sqlite3DbFree(db, pBest);
- }
-
- sqlite3_result_text(pCtx, zOut, -1, SQLITE_TRANSIENT);
- sqlite3DbFree(db, zOut);
- }else{
- rc = SQLITE_NOMEM;
- }
-
- sqlite3_free(zQuot);
- return rc;
-}
-
-/*
-** Resolve all symbols in the trigger at pParse->pNewTrigger, assuming
-** it was read from the schema of database zDb. Return SQLITE_OK if
-** successful. Otherwise, return an SQLite error code and leave an error
-** message in the Parse object.
-*/
-static int renameResolveTrigger(Parse *pParse, const char *zDb){
- sqlite3 *db = pParse->db;
- Trigger *pNew = pParse->pNewTrigger;
- TriggerStep *pStep;
- NameContext sNC;
- int rc = SQLITE_OK;
-
- memset(&sNC, 0, sizeof(sNC));
- sNC.pParse = pParse;
- assert( pNew->pTabSchema );
- pParse->pTriggerTab = sqlite3FindTable(db, pNew->table,
- db->aDb[sqlite3SchemaToIndex(db, pNew->pTabSchema)].zDbSName
- );
- pParse->eTriggerOp = pNew->op;
-
- /* Resolve symbols in WHEN clause */
- if( pNew->pWhen ){
- rc = sqlite3ResolveExprNames(&sNC, pNew->pWhen);
- }
-
- for(pStep=pNew->step_list; rc==SQLITE_OK && pStep; pStep=pStep->pNext){
- if( pStep->pSelect ){
- sqlite3SelectPrep(pParse, pStep->pSelect, &sNC);
- if( pParse->nErr ) rc = pParse->rc;
- }
- if( rc==SQLITE_OK && pStep->zTarget ){
- Table *pTarget = sqlite3LocateTable(pParse, 0, pStep->zTarget, zDb);
- if( pTarget==0 ){
- rc = SQLITE_ERROR;
- }else if( SQLITE_OK==(rc = sqlite3ViewGetColumnNames(pParse, pTarget)) ){
- SrcList sSrc;
- memset(&sSrc, 0, sizeof(sSrc));
- sSrc.nSrc = 1;
- sSrc.a[0].zName = pStep->zTarget;
- sSrc.a[0].pTab = pTarget;
- sNC.pSrcList = &sSrc;
- if( pStep->pWhere ){
- rc = sqlite3ResolveExprNames(&sNC, pStep->pWhere);
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3ResolveExprListNames(&sNC, pStep->pExprList);
- }
- assert( !pStep->pUpsert || (!pStep->pWhere && !pStep->pExprList) );
- if( pStep->pUpsert ){
- Upsert *pUpsert = pStep->pUpsert;
- assert( rc==SQLITE_OK );
- pUpsert->pUpsertSrc = &sSrc;
- sNC.uNC.pUpsert = pUpsert;
- sNC.ncFlags = NC_UUpsert;
- rc = sqlite3ResolveExprListNames(&sNC, pUpsert->pUpsertTarget);
- if( rc==SQLITE_OK ){
- ExprList *pUpsertSet = pUpsert->pUpsertSet;
- rc = sqlite3ResolveExprListNames(&sNC, pUpsertSet);
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3ResolveExprNames(&sNC, pUpsert->pUpsertWhere);
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3ResolveExprNames(&sNC, pUpsert->pUpsertTargetWhere);
- }
- sNC.ncFlags = 0;
- }
- }
- }
- }
- return rc;
-}
-
-/*
-** Invoke sqlite3WalkExpr() or sqlite3WalkSelect() on all Select or Expr
-** objects that are part of the trigger passed as the second argument.
-*/
-static void renameWalkTrigger(Walker *pWalker, Trigger *pTrigger){
- TriggerStep *pStep;
-
- /* Find tokens to edit in WHEN clause */
- sqlite3WalkExpr(pWalker, pTrigger->pWhen);
-
- /* Find tokens to edit in trigger steps */
- for(pStep=pTrigger->step_list; pStep; pStep=pStep->pNext){
- sqlite3WalkSelect(pWalker, pStep->pSelect);
- sqlite3WalkExpr(pWalker, pStep->pWhere);
- sqlite3WalkExprList(pWalker, pStep->pExprList);
- if( pStep->pUpsert ){
- Upsert *pUpsert = pStep->pUpsert;
- sqlite3WalkExprList(pWalker, pUpsert->pUpsertTarget);
- sqlite3WalkExprList(pWalker, pUpsert->pUpsertSet);
- sqlite3WalkExpr(pWalker, pUpsert->pUpsertWhere);
- sqlite3WalkExpr(pWalker, pUpsert->pUpsertTargetWhere);
- }
- }
-}
-
-/*
-** Free the contents of Parse object (*pParse). Do not free the memory
-** occupied by the Parse object itself.
-*/
-static void renameParseCleanup(Parse *pParse){
- sqlite3 *db = pParse->db;
- if( pParse->pVdbe ){
- sqlite3VdbeFinalize(pParse->pVdbe);
- }
- sqlite3DeleteTable(db, pParse->pNewTable);
- if( pParse->pNewIndex ) sqlite3FreeIndex(db, pParse->pNewIndex);
- sqlite3DeleteTrigger(db, pParse->pNewTrigger);
- sqlite3DbFree(db, pParse->zErrMsg);
- renameTokenFree(db, pParse->pRename);
- sqlite3ParserReset(pParse);
-}
-
-/*
-** SQL function:
-**
-** sqlite_rename_column(zSql, iCol, bQuote, zNew, zTable, zOld)
-**
-** 0. zSql: SQL statement to rewrite
-** 1. type: Type of object ("table", "view" etc.)
-** 2. object: Name of object
-** 3. Database: Database name (e.g. "main")
-** 4. Table: Table name
-** 5. iCol: Index of column to rename
-** 6. zNew: New column name
-** 7. bQuote: Non-zero if the new column name should be quoted.
-** 8. bTemp: True if zSql comes from temp schema
-**
-** Do a column rename operation on the CREATE statement given in zSql.
-** The iCol-th column (left-most is 0) of table zTable is renamed from zCol
-** into zNew. The name should be quoted if bQuote is true.
-**
-** This function is used internally by the ALTER TABLE RENAME COLUMN command.
-** Though accessible to application code, it is not intended for use by
-** applications. The existance of this function, and the way it works,
-** is subject to change without notice.
-**
-** If any of the parameters are out-of-bounds, then simply return NULL.
-** An out-of-bounds parameter can only occur when the application calls
-** this function directly. The parameters will always be well-formed when
-** this routine is invoked by the bytecode for a legitimate ALTER TABLE
-** statement.
-*/
-static void renameColumnFunc(
- sqlite3_context *context,
- int NotUsed,
- sqlite3_value **argv
-){
- sqlite3 *db = sqlite3_context_db_handle(context);
- RenameCtx sCtx;
- const char *zSql = (const char*)sqlite3_value_text(argv[0]);
- const char *zDb = (const char*)sqlite3_value_text(argv[3]);
- const char *zTable = (const char*)sqlite3_value_text(argv[4]);
- int iCol = sqlite3_value_int(argv[5]);
- const char *zNew = (const char*)sqlite3_value_text(argv[6]);
- int bQuote = sqlite3_value_int(argv[7]);
- int bTemp = sqlite3_value_int(argv[8]);
- const char *zOld;
- int rc;
- Parse sParse;
- Walker sWalker;
- Index *pIdx;
- int i;
- Table *pTab;
-#ifndef SQLITE_OMIT_AUTHORIZATION
- sqlite3_xauth xAuth = db->xAuth;
-#endif
-
- UNUSED_PARAMETER(NotUsed);
- if( zSql==0 ) return;
- if( zTable==0 ) return;
- if( zNew==0 ) return;
- if( iCol<0 ) return;
- sqlite3BtreeEnterAll(db);
- pTab = sqlite3FindTable(db, zTable, zDb);
- if( pTab==0 || iCol>=pTab->nCol ){
- sqlite3BtreeLeaveAll(db);
- return;
- }
- zOld = pTab->aCol[iCol].zName;
- memset(&sCtx, 0, sizeof(sCtx));
- sCtx.iCol = ((iCol==pTab->iPKey) ? -1 : iCol);
-
-#ifndef SQLITE_OMIT_AUTHORIZATION
- db->xAuth = 0;
-#endif
- rc = renameParseSql(&sParse, zDb, 0, db, zSql, bTemp);
-
- /* Find tokens that need to be replaced. */
- memset(&sWalker, 0, sizeof(Walker));
- sWalker.pParse = &sParse;
- sWalker.xExprCallback = renameColumnExprCb;
- sWalker.xSelectCallback = renameColumnSelectCb;
- sWalker.u.pRename = &sCtx;
-
- sCtx.pTab = pTab;
- if( rc!=SQLITE_OK ) goto renameColumnFunc_done;
- if( sParse.pNewTable ){
- Select *pSelect = sParse.pNewTable->pSelect;
- if( pSelect ){
- sParse.rc = SQLITE_OK;
- sqlite3SelectPrep(&sParse, sParse.pNewTable->pSelect, 0);
- rc = (db->mallocFailed ? SQLITE_NOMEM : sParse.rc);
- if( rc==SQLITE_OK ){
- sqlite3WalkSelect(&sWalker, pSelect);
- }
- if( rc!=SQLITE_OK ) goto renameColumnFunc_done;
- }else{
- /* A regular table */
- int bFKOnly = sqlite3_stricmp(zTable, sParse.pNewTable->zName);
- FKey *pFKey;
- assert( sParse.pNewTable->pSelect==0 );
- sCtx.pTab = sParse.pNewTable;
- if( bFKOnly==0 ){
- renameTokenFind(
- &sParse, &sCtx, (void*)sParse.pNewTable->aCol[iCol].zName
- );
- if( sCtx.iCol<0 ){
- renameTokenFind(&sParse, &sCtx, (void*)&sParse.pNewTable->iPKey);
- }
- sqlite3WalkExprList(&sWalker, sParse.pNewTable->pCheck);
- for(pIdx=sParse.pNewTable->pIndex; pIdx; pIdx=pIdx->pNext){
- sqlite3WalkExprList(&sWalker, pIdx->aColExpr);
- }
- }
-
- for(pFKey=sParse.pNewTable->pFKey; pFKey; pFKey=pFKey->pNextFrom){
- for(i=0; inCol; i++){
- if( bFKOnly==0 && pFKey->aCol[i].iFrom==iCol ){
- renameTokenFind(&sParse, &sCtx, (void*)&pFKey->aCol[i]);
- }
- if( 0==sqlite3_stricmp(pFKey->zTo, zTable)
- && 0==sqlite3_stricmp(pFKey->aCol[i].zCol, zOld)
- ){
- renameTokenFind(&sParse, &sCtx, (void*)pFKey->aCol[i].zCol);
- }
- }
- }
- }
- }else if( sParse.pNewIndex ){
- sqlite3WalkExprList(&sWalker, sParse.pNewIndex->aColExpr);
- sqlite3WalkExpr(&sWalker, sParse.pNewIndex->pPartIdxWhere);
- }else{
- /* A trigger */
- TriggerStep *pStep;
- rc = renameResolveTrigger(&sParse, (bTemp ? 0 : zDb));
- if( rc!=SQLITE_OK ) goto renameColumnFunc_done;
-
- for(pStep=sParse.pNewTrigger->step_list; pStep; pStep=pStep->pNext){
- if( pStep->zTarget ){
- Table *pTarget = sqlite3LocateTable(&sParse, 0, pStep->zTarget, zDb);
- if( pTarget==pTab ){
- if( pStep->pUpsert ){
- ExprList *pUpsertSet = pStep->pUpsert->pUpsertSet;
- renameColumnElistNames(&sParse, &sCtx, pUpsertSet, zOld);
- }
- renameColumnIdlistNames(&sParse, &sCtx, pStep->pIdList, zOld);
- renameColumnElistNames(&sParse, &sCtx, pStep->pExprList, zOld);
- }
- }
- }
-
-
- /* Find tokens to edit in UPDATE OF clause */
- if( sParse.pTriggerTab==pTab ){
- renameColumnIdlistNames(&sParse, &sCtx,sParse.pNewTrigger->pColumns,zOld);
- }
-
- /* Find tokens to edit in various expressions and selects */
- renameWalkTrigger(&sWalker, sParse.pNewTrigger);
- }
-
- assert( rc==SQLITE_OK );
- rc = renameEditSql(context, &sCtx, zSql, zNew, bQuote);
-
-renameColumnFunc_done:
- if( rc!=SQLITE_OK ){
- if( sParse.zErrMsg ){
- renameColumnParseError(context, 0, argv[1], argv[2], &sParse);
- }else{
- sqlite3_result_error_code(context, rc);
- }
- }
-
- renameParseCleanup(&sParse);
- renameTokenFree(db, sCtx.pList);
-#ifndef SQLITE_OMIT_AUTHORIZATION
- db->xAuth = xAuth;
-#endif
- sqlite3BtreeLeaveAll(db);
-}
-
-/*
-** Walker expression callback used by "RENAME TABLE".
-*/
-static int renameTableExprCb(Walker *pWalker, Expr *pExpr){
- RenameCtx *p = pWalker->u.pRename;
- if( pExpr->op==TK_COLUMN && p->pTab==pExpr->pTab ){
- renameTokenFind(pWalker->pParse, p, (void*)&pExpr->pTab);
- }
- return WRC_Continue;
-}
-
-/*
-** Walker select callback used by "RENAME TABLE".
-*/
-static int renameTableSelectCb(Walker *pWalker, Select *pSelect){
- int i;
- RenameCtx *p = pWalker->u.pRename;
- SrcList *pSrc = pSelect->pSrc;
- for(i=0; inSrc; i++){
- struct SrcList_item *pItem = &pSrc->a[i];
- if( pItem->pTab==p->pTab ){
- renameTokenFind(pWalker->pParse, p, pItem->zName);
- }
- }
-
- return WRC_Continue;
-}
-
-
-/*
-** This C function implements an SQL user function that is used by SQL code
-** generated by the ALTER TABLE ... RENAME command to modify the definition
-** of any foreign key constraints that use the table being renamed as the
-** parent table. It is passed three arguments:
-**
-** 0: The database containing the table being renamed.
-** 1. type: Type of object ("table", "view" etc.)
-** 2. object: Name of object
-** 3: The complete text of the schema statement being modified,
-** 4: The old name of the table being renamed, and
-** 5: The new name of the table being renamed.
-** 6: True if the schema statement comes from the temp db.
-**
-** It returns the new schema statement. For example:
-**
-** sqlite_rename_table('main', 'CREATE TABLE t1(a REFERENCES t2)','t2','t3',0)
-** -> 'CREATE TABLE t1(a REFERENCES t3)'
-*/
-static void renameTableFunc(
- sqlite3_context *context,
- int NotUsed,
- sqlite3_value **argv
-){
- sqlite3 *db = sqlite3_context_db_handle(context);
- const char *zDb = (const char*)sqlite3_value_text(argv[0]);
- const char *zInput = (const char*)sqlite3_value_text(argv[3]);
- const char *zOld = (const char*)sqlite3_value_text(argv[4]);
- const char *zNew = (const char*)sqlite3_value_text(argv[5]);
- int bTemp = sqlite3_value_int(argv[6]);
- UNUSED_PARAMETER(NotUsed);
-
- if( zInput && zOld && zNew ){
- Parse sParse;
- int rc;
- int bQuote = 1;
- RenameCtx sCtx;
- Walker sWalker;
-
-#ifndef SQLITE_OMIT_AUTHORIZATION
- sqlite3_xauth xAuth = db->xAuth;
- db->xAuth = 0;
-#endif
-
- sqlite3BtreeEnterAll(db);
-
- memset(&sCtx, 0, sizeof(RenameCtx));
- sCtx.pTab = sqlite3FindTable(db, zOld, zDb);
- memset(&sWalker, 0, sizeof(Walker));
- sWalker.pParse = &sParse;
- sWalker.xExprCallback = renameTableExprCb;
- sWalker.xSelectCallback = renameTableSelectCb;
- sWalker.u.pRename = &sCtx;
-
- rc = renameParseSql(&sParse, zDb, 1, db, zInput, bTemp);
-
- if( rc==SQLITE_OK ){
- int isLegacy = (db->flags & SQLITE_LegacyAlter);
- if( sParse.pNewTable ){
- Table *pTab = sParse.pNewTable;
-
- if( pTab->pSelect ){
- if( isLegacy==0 ){
- NameContext sNC;
- memset(&sNC, 0, sizeof(sNC));
- sNC.pParse = &sParse;
-
- sqlite3SelectPrep(&sParse, pTab->pSelect, &sNC);
- if( sParse.nErr ) rc = sParse.rc;
- sqlite3WalkSelect(&sWalker, pTab->pSelect);
- }
- }else{
- /* Modify any FK definitions to point to the new table. */
-#ifndef SQLITE_OMIT_FOREIGN_KEY
- if( db->flags & SQLITE_ForeignKeys ){
- FKey *pFKey;
- for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){
- if( sqlite3_stricmp(pFKey->zTo, zOld)==0 ){
- renameTokenFind(&sParse, &sCtx, (void*)pFKey->zTo);
- }
- }
- }
-#endif
-
- /* If this is the table being altered, fix any table refs in CHECK
- ** expressions. Also update the name that appears right after the
- ** "CREATE [VIRTUAL] TABLE" bit. */
- if( sqlite3_stricmp(zOld, pTab->zName)==0 ){
- sCtx.pTab = pTab;
- if( isLegacy==0 ){
- sqlite3WalkExprList(&sWalker, pTab->pCheck);
- }
- renameTokenFind(&sParse, &sCtx, pTab->zName);
- }
- }
- }
-
- else if( sParse.pNewIndex ){
- renameTokenFind(&sParse, &sCtx, sParse.pNewIndex->zName);
- if( isLegacy==0 ){
- sqlite3WalkExpr(&sWalker, sParse.pNewIndex->pPartIdxWhere);
- }
- }
-
-#ifndef SQLITE_OMIT_TRIGGER
- else{
- Trigger *pTrigger = sParse.pNewTrigger;
- TriggerStep *pStep;
- if( 0==sqlite3_stricmp(sParse.pNewTrigger->table, zOld)
- && sCtx.pTab->pSchema==pTrigger->pTabSchema
- ){
- renameTokenFind(&sParse, &sCtx, sParse.pNewTrigger->table);
- }
-
- if( isLegacy==0 ){
- rc = renameResolveTrigger(&sParse, bTemp ? 0 : zDb);
- if( rc==SQLITE_OK ){
- renameWalkTrigger(&sWalker, pTrigger);
- for(pStep=pTrigger->step_list; pStep; pStep=pStep->pNext){
- if( pStep->zTarget && 0==sqlite3_stricmp(pStep->zTarget, zOld) ){
- renameTokenFind(&sParse, &sCtx, pStep->zTarget);
- }
- }
- }
- }
- }
-#endif
- }
-
- if( rc==SQLITE_OK ){
- rc = renameEditSql(context, &sCtx, zInput, zNew, bQuote);
- }
- if( rc!=SQLITE_OK ){
- if( sParse.zErrMsg ){
- renameColumnParseError(context, 0, argv[1], argv[2], &sParse);
- }else{
- sqlite3_result_error_code(context, rc);
- }
- }
-
- renameParseCleanup(&sParse);
- renameTokenFree(db, sCtx.pList);
- sqlite3BtreeLeaveAll(db);
-#ifndef SQLITE_OMIT_AUTHORIZATION
- db->xAuth = xAuth;
-#endif
- }
-
- return;
-}
-
-/*
-** An SQL user function that checks that there are no parse or symbol
-** resolution problems in a CREATE TRIGGER|TABLE|VIEW|INDEX statement.
-** After an ALTER TABLE .. RENAME operation is performed and the schema
-** reloaded, this function is called on each SQL statement in the schema
-** to ensure that it is still usable.
-**
-** 0: Database name ("main", "temp" etc.).
-** 1: SQL statement.
-** 2: Object type ("view", "table", "trigger" or "index").
-** 3: Object name.
-** 4: True if object is from temp schema.
-**
-** Unless it finds an error, this function normally returns NULL. However, it
-** returns integer value 1 if:
-**
-** * the SQL argument creates a trigger, and
-** * the table that the trigger is attached to is in database zDb.
-*/
-static void renameTableTest(
- sqlite3_context *context,
- int NotUsed,
- sqlite3_value **argv
-){
- sqlite3 *db = sqlite3_context_db_handle(context);
- char const *zDb = (const char*)sqlite3_value_text(argv[0]);
- char const *zInput = (const char*)sqlite3_value_text(argv[1]);
- int bTemp = sqlite3_value_int(argv[4]);
- int isLegacy = (db->flags & SQLITE_LegacyAlter);
-
-#ifndef SQLITE_OMIT_AUTHORIZATION
- sqlite3_xauth xAuth = db->xAuth;
- db->xAuth = 0;
-#endif
-
- UNUSED_PARAMETER(NotUsed);
- if( zDb && zInput ){
- int rc;
- Parse sParse;
- rc = renameParseSql(&sParse, zDb, 1, db, zInput, bTemp);
- if( rc==SQLITE_OK ){
- if( isLegacy==0 && sParse.pNewTable && sParse.pNewTable->pSelect ){
- NameContext sNC;
- memset(&sNC, 0, sizeof(sNC));
- sNC.pParse = &sParse;
- sqlite3SelectPrep(&sParse, sParse.pNewTable->pSelect, &sNC);
- if( sParse.nErr ) rc = sParse.rc;
- }
-
- else if( sParse.pNewTrigger ){
- if( isLegacy==0 ){
- rc = renameResolveTrigger(&sParse, bTemp ? 0 : zDb);
- }
- if( rc==SQLITE_OK ){
- int i1 = sqlite3SchemaToIndex(db, sParse.pNewTrigger->pTabSchema);
- int i2 = sqlite3FindDbName(db, zDb);
- if( i1==i2 ) sqlite3_result_int(context, 1);
- }
- }
- }
-
- if( rc!=SQLITE_OK ){
- renameColumnParseError(context, 1, argv[2], argv[3], &sParse);
- }
- renameParseCleanup(&sParse);
- }
-
-#ifndef SQLITE_OMIT_AUTHORIZATION
- db->xAuth = xAuth;
-#endif
-}
-
-/*
-** Register built-in functions used to help implement ALTER TABLE
-*/
-SQLITE_PRIVATE void sqlite3AlterFunctions(void){
- static FuncDef aAlterTableFuncs[] = {
- FUNCTION(sqlite_rename_column, 9, 0, 0, renameColumnFunc),
- FUNCTION(sqlite_rename_table, 7, 0, 0, renameTableFunc),
- FUNCTION(sqlite_rename_test, 5, 0, 0, renameTableTest),
- };
- sqlite3InsertBuiltinFuncs(aAlterTableFuncs, ArraySize(aAlterTableFuncs));
-}
#endif /* SQLITE_ALTER_TABLE */
/************** End of alter.c ***********************************************/
@@ -103226,7 +101567,6 @@ static const FuncDef statInitFuncdef = {
0, /* pNext */
statInit, /* xSFunc */
0, /* xFinalize */
- 0, 0, /* xValue, xInverse */
"stat_init", /* zName */
{0}
};
@@ -103543,7 +101883,6 @@ static const FuncDef statPushFuncdef = {
0, /* pNext */
statPush, /* xSFunc */
0, /* xFinalize */
- 0, 0, /* xValue, xInverse */
"stat_push", /* zName */
{0}
};
@@ -103695,7 +102034,6 @@ static const FuncDef statGetFuncdef = {
0, /* pNext */
statGet, /* xSFunc */
0, /* xFinalize */
- 0, 0, /* xValue, xInverse */
"stat_get", /* zName */
{0}
};
@@ -104015,7 +102353,10 @@ static void analyzeOneTable(
callStatGet(v, regStat4, STAT_GET_NLT, regLt);
callStatGet(v, regStat4, STAT_GET_NDLT, regDLt);
sqlite3VdbeAddOp4Int(v, seekOp, iTabCur, addrNext, regSampleRowid, 0);
- VdbeCoverage(v);
+ /* We know that the regSampleRowid row exists because it was read by
+ ** the previous loop. Thus the not-found jump of seekOp will never
+ ** be taken */
+ VdbeCoverageNeverTaken(v);
#ifdef SQLITE_ENABLE_STAT3
sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iTabCur, 0, regSample);
#else
@@ -104655,7 +102996,7 @@ SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3 *db, int iDb){
/* Load the statistics from the sqlite_stat4 table. */
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
- if( rc==SQLITE_OK ){
+ if( rc==SQLITE_OK && OptimizationEnabled(db, SQLITE_Stat34) ){
db->lookaside.bDisable++;
rc = loadStat4(db, sInfo.zDatabase);
db->lookaside.bDisable--;
@@ -105094,7 +103435,6 @@ SQLITE_PRIVATE void sqlite3Detach(Parse *pParse, Expr *pDbname){
0, /* pNext */
detachFunc, /* xSFunc */
0, /* xFinalize */
- 0, 0, /* xValue, xInverse */
"sqlite_detach", /* zName */
{0}
};
@@ -105114,7 +103454,6 @@ SQLITE_PRIVATE void sqlite3Attach(Parse *pParse, Expr *p, Expr *pDbname, Expr *p
0, /* pNext */
attachFunc, /* xSFunc */
0, /* xFinalize */
- 0, 0, /* xValue, xInverse */
"sqlite_attach", /* zName */
{0}
};
@@ -105387,7 +103726,7 @@ SQLITE_API int sqlite3_set_authorizer(
sqlite3_mutex_enter(db->mutex);
db->xAuth = (sqlite3_xauth)xAuth;
db->pAuthArg = pArg;
- sqlite3ExpirePreparedStatements(db, 0);
+ sqlite3ExpirePreparedStatements(db);
sqlite3_mutex_leave(db->mutex);
return SQLITE_OK;
}
@@ -105516,7 +103855,7 @@ SQLITE_PRIVATE int sqlite3AuthCheck(
/* Don't do any authorization checks if the database is initialising
** or if the parser is being invoked from within sqlite3_declare_vtab.
*/
- if( db->init.busy || IN_SPECIAL_PARSE ){
+ if( db->init.busy || IN_DECLARE_VTAB ){
return SQLITE_OK;
}
@@ -105808,6 +104147,7 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
/* Get the VDBE program ready for execution
*/
if( v && pParse->nErr==0 && !db->mallocFailed ){
+ assert( pParse->iCacheLevel==0 ); /* Disables and re-enables match */
/* A minimum of one cursor is required if autoincrement is used
* See ticket [a696379c1f08866] */
if( pParse->pAinc!=0 && pParse->nTab==0 ) pParse->nTab = 1;
@@ -106022,7 +104362,7 @@ SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3 *db, const char *zName, const cha
/*
** Reclaim the memory used by an index
*/
-SQLITE_PRIVATE void sqlite3FreeIndex(sqlite3 *db, Index *p){
+static void freeIndex(sqlite3 *db, Index *p){
#ifndef SQLITE_OMIT_ANALYZE
sqlite3DeleteIndexSamples(db, p);
#endif
@@ -106062,7 +104402,7 @@ SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3 *db, int iDb, const char
p->pNext = pIndex->pNext;
}
}
- sqlite3FreeIndex(db, pIndex);
+ freeIndex(db, pIndex);
}
db->mDbFlags |= DBFLAG_SchemaChange;
}
@@ -106208,7 +104548,7 @@ static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){
assert( db==0 || sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) );
assert( pOld==pIndex || pOld==0 );
}
- sqlite3FreeIndex(db, pIndex);
+ freeIndex(db, pIndex);
}
/* Delete any foreign keys attached to this table. */
@@ -106366,7 +104706,7 @@ SQLITE_PRIVATE int sqlite3TwoPartName(
return -1;
}
}else{
- assert( db->init.iDb==0 || db->init.busy || IN_RENAME_OBJECT
+ assert( db->init.iDb==0 || db->init.busy
|| (db->mDbFlags & DBFLAG_Vacuum)!=0);
iDb = db->init.iDb;
*pUnqual = pName1;
@@ -106461,9 +104801,6 @@ SQLITE_PRIVATE void sqlite3StartTable(
}
if( !OMIT_TEMPDB && isTemp ) iDb = 1;
zName = sqlite3NameFromToken(db, pName);
- if( IN_RENAME_OBJECT ){
- sqlite3RenameTokenMap(pParse, (void*)zName, pName);
- }
}
pParse->sNameToken = *pName;
if( zName==0 ) return;
@@ -106499,7 +104836,7 @@ SQLITE_PRIVATE void sqlite3StartTable(
** and types will be used, so there is no need to test for namespace
** collisions.
*/
- if( !IN_SPECIAL_PARSE ){
+ if( !IN_DECLARE_VTAB ){
char *zDb = db->aDb[iDb].zDbSName;
if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){
goto begin_table_error;
@@ -106658,7 +104995,6 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName, Token *pType){
}
z = sqlite3DbMallocRaw(db, pName->n + pType->n + 2);
if( z==0 ) return;
- if( IN_RENAME_OBJECT ) sqlite3RenameTokenMap(pParse, (void*)z, pName);
memcpy(z, pName->z, pName->n);
z[pName->n] = 0;
sqlite3Dequote(z);
@@ -106865,9 +105201,6 @@ SQLITE_PRIVATE void sqlite3AddDefaultValue(
sqlite3DbFree(db, x.u.zToken);
}
}
- if( IN_RENAME_OBJECT ){
- sqlite3RenameExprUnmap(pParse, pExpr);
- }
sqlite3ExprDelete(db, pExpr);
}
@@ -106959,9 +105292,6 @@ SQLITE_PRIVATE void sqlite3AddPrimaryKey(
&& sqlite3StrICmp(sqlite3ColumnType(pCol,""), "INTEGER")==0
&& sortOrder!=SQLITE_SO_DESC
){
- if( IN_RENAME_OBJECT && pList ){
- sqlite3RenameTokenRemap(pParse, &pTab->iPKey, pList->a[0].pExpr);
- }
pTab->iPKey = iCol;
pTab->keyConf = (u8)onError;
assert( autoInc==0 || autoInc==1 );
@@ -107287,31 +105617,6 @@ static int hasColumn(const i16 *aiCol, int nCol, int x){
return 0;
}
-/* Recompute the colNotIdxed field of the Index.
-**
-** colNotIdxed is a bitmask that has a 0 bit representing each indexed
-** columns that are within the first 63 columns of the table. The
-** high-order bit of colNotIdxed is always 1. All unindexed columns
-** of the table have a 1.
-**
-** The colNotIdxed mask is AND-ed with the SrcList.a[].colUsed mask
-** to determine if the index is covering index.
-*/
-static void recomputeColumnsNotIndexed(Index *pIdx){
- Bitmask m = 0;
- int j;
- for(j=pIdx->nColumn-1; j>=0; j--){
- int x = pIdx->aiColumn[j];
- if( x>=0 ){
- testcase( x==BMS-1 );
- testcase( x==BMS-2 );
- if( xcolNotIdxed = ~m;
- assert( (pIdx->colNotIdxed>>63)==1 );
-}
-
/*
** This routine runs at the end of parsing a CREATE TABLE statement that
** has a WITHOUT ROWID clause. The job of this routine is to convert both
@@ -107354,6 +105659,10 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){
}
}
+ /* The remaining transformations only apply to b-tree tables, not to
+ ** virtual tables */
+ if( IN_DECLARE_VTAB ) return;
+
/* Convert the P3 operand of the OP_CreateBtree opcode from BTREE_INTKEY
** into BTREE_BLOBKEY.
*/
@@ -107376,7 +105685,7 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){
assert( pParse->pNewTable==pTab );
sqlite3CreateIndex(pParse, 0, 0, 0, pList, pTab->keyConf, 0, 0, 0, 0,
SQLITE_IDXTYPE_PRIMARYKEY);
- if( db->mallocFailed || pParse->nErr ) return;
+ if( db->mallocFailed ) return;
pPk = sqlite3PrimaryKeyIndex(pTab);
pTab->iPKey = -1;
}else{
@@ -107456,7 +105765,6 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){
}else{
pPk->nColumn = pTab->nCol;
}
- recomputeColumnsNotIndexed(pPk);
}
/*
@@ -107760,12 +106068,7 @@ SQLITE_PRIVATE void sqlite3CreateView(
** allocated rather than point to the input string - which means that
** they will persist after the current sqlite3_exec() call returns.
*/
- if( IN_RENAME_OBJECT ){
- p->pSelect = pSelect;
- pSelect = 0;
- }else{
- p->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE);
- }
+ p->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE);
p->pCheck = sqlite3ExprListDup(db, pCNames, EXPRDUP_REDUCE);
if( db->mallocFailed ) goto create_view_fail;
@@ -107790,9 +106093,6 @@ SQLITE_PRIVATE void sqlite3CreateView(
create_view_fail:
sqlite3SelectDelete(db, pSelect);
- if( IN_RENAME_OBJECT ){
- sqlite3RenameExprlistUnmap(pParse, pCNames);
- }
sqlite3ExprListDelete(db, pCNames);
return;
}
@@ -107866,10 +106166,6 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){
assert( pTable->pSelect );
pSel = sqlite3SelectDup(db, pTable->pSelect, 0);
if( pSel ){
-#ifndef SQLITE_OMIT_ALTERTABLE
- u8 eParseMode = pParse->eParseMode;
- pParse->eParseMode = PARSE_MODE_NORMAL;
-#endif
n = pParse->nTab;
sqlite3SrcListAssignCursors(pParse, pSel->pSrc);
pTable->nCol = -1;
@@ -107915,18 +106211,10 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){
sqlite3DeleteTable(db, pSelTab);
sqlite3SelectDelete(db, pSel);
db->lookaside.bDisable--;
-#ifndef SQLITE_OMIT_ALTERTABLE
- pParse->eParseMode = eParseMode;
-#endif
} else {
nErr++;
}
pTable->pSchema->schemaFlags |= DB_UnresetViews;
- if( db->mallocFailed ){
- sqlite3DeleteColumnNames(db, pTable);
- pTable->aCol = 0;
- pTable->nCol = 0;
- }
#endif /* SQLITE_OMIT_VIEW */
return nErr;
}
@@ -108265,10 +106553,8 @@ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView,
v = sqlite3GetVdbe(pParse);
if( v ){
sqlite3BeginWriteOperation(pParse, 1, iDb);
- if( !isView ){
- sqlite3ClearStatTables(pParse, iDb, "tbl", pTab->zName);
- sqlite3FkDropTable(pParse, pName, pTab);
- }
+ sqlite3ClearStatTables(pParse, iDb, "tbl", pTab->zName);
+ sqlite3FkDropTable(pParse, pName, pTab);
sqlite3CodeDropTable(pParse, pTab, iDb, isView);
}
@@ -108343,9 +106629,6 @@ SQLITE_PRIVATE void sqlite3CreateForeignKey(
pFKey->pNextFrom = p->pFKey;
z = (char*)&pFKey->aCol[nCol];
pFKey->zTo = z;
- if( IN_RENAME_OBJECT ){
- sqlite3RenameTokenMap(pParse, (void*)z, pTo);
- }
memcpy(z, pTo->z, pTo->n);
z[pTo->n] = 0;
sqlite3Dequote(z);
@@ -108368,18 +106651,12 @@ SQLITE_PRIVATE void sqlite3CreateForeignKey(
pFromCol->a[i].zName);
goto fk_end;
}
- if( IN_RENAME_OBJECT ){
- sqlite3RenameTokenRemap(pParse, &pFKey->aCol[i], pFromCol->a[i].zName);
- }
}
}
if( pToCol ){
for(i=0; ia[i].zName);
pFKey->aCol[i].zCol = z;
- if( IN_RENAME_OBJECT ){
- sqlite3RenameTokenRemap(pParse, z, pToCol->a[i].zName);
- }
memcpy(z, pToCol->a[i].zName, n);
z[n] = 0;
z += n+1;
@@ -108712,23 +106989,21 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){
goto exit_create_index;
}
- if( !IN_RENAME_OBJECT ){
- if( !db->init.busy ){
- if( sqlite3FindTable(db, zName, 0)!=0 ){
- sqlite3ErrorMsg(pParse, "there is already a table named %s", zName);
- goto exit_create_index;
- }
- }
- if( sqlite3FindIndex(db, zName, pDb->zDbSName)!=0 ){
- if( !ifNotExist ){
- sqlite3ErrorMsg(pParse, "index %s already exists", zName);
- }else{
- assert( !db->init.busy );
- sqlite3CodeVerifySchema(pParse, iDb);
- }
+ if( !db->init.busy ){
+ if( sqlite3FindTable(db, zName, 0)!=0 ){
+ sqlite3ErrorMsg(pParse, "there is already a table named %s", zName);
goto exit_create_index;
}
}
+ if( sqlite3FindIndex(db, zName, pDb->zDbSName)!=0 ){
+ if( !ifNotExist ){
+ sqlite3ErrorMsg(pParse, "index %s already exists", zName);
+ }else{
+ assert( !db->init.busy );
+ sqlite3CodeVerifySchema(pParse, iDb);
+ }
+ goto exit_create_index;
+ }
}else{
int n;
Index *pLoop;
@@ -108743,13 +107018,13 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
** The following statement converts "sqlite3_autoindex..." into
** "sqlite3_butoindex..." in order to make the names distinct.
** The "vtab_err.test" test demonstrates the need of this statement. */
- if( IN_SPECIAL_PARSE ) zName[7]++;
+ if( IN_DECLARE_VTAB ) zName[7]++;
}
/* Check for authorization to create an index.
*/
#ifndef SQLITE_OMIT_AUTHORIZATION
- if( !IN_RENAME_OBJECT ){
+ {
const char *zDb = pDb->zDbSName;
if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(iDb), 0, zDb) ){
goto exit_create_index;
@@ -108836,12 +107111,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
** TODO: Issue a warning if the table primary key is used as part of the
** index key.
*/
- pListItem = pList->a;
- if( IN_RENAME_OBJECT ){
- pIndex->aColExpr = pList;
- pList = 0;
- }
- for(i=0; inKeyCol; i++, pListItem++){
+ for(i=0, pListItem=pList->a; inExpr; i++, pListItem++){
Expr *pCExpr; /* The i-th index expression */
int requestedSortOrder; /* ASC or DESC on the i-th expression */
const char *zColl; /* Collation sequence name */
@@ -108857,8 +107127,12 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
goto exit_create_index;
}
if( pIndex->aColExpr==0 ){
- pIndex->aColExpr = pList;
- pList = 0;
+ ExprList *pCopy = sqlite3ExprListDup(db, pList, 0);
+ pIndex->aColExpr = pCopy;
+ if( !db->mallocFailed ){
+ assert( pCopy!=0 );
+ pListItem = &pCopy->a[i];
+ }
}
j = XN_EXPR;
pIndex->aiColumn[i] = XN_EXPR;
@@ -108924,7 +107198,6 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
** it as a covering index */
assert( HasRowid(pTab)
|| pTab->iPKey<0 || sqlite3ColumnOfIndex(pIndex, pTab->iPKey)>=0 );
- recomputeColumnsNotIndexed(pIndex);
if( pTblName!=0 && pIndex->nColumn>=pTab->nCol ){
pIndex->isCovering = 1;
for(j=0; jnCol; j++){
@@ -108997,101 +107270,98 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
}
}
- if( !IN_RENAME_OBJECT ){
+ /* Link the new Index structure to its table and to the other
+ ** in-memory database structures.
+ */
+ assert( pParse->nErr==0 );
+ if( db->init.busy ){
+ Index *p;
+ assert( !IN_DECLARE_VTAB );
+ assert( sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) );
+ p = sqlite3HashInsert(&pIndex->pSchema->idxHash,
+ pIndex->zName, pIndex);
+ if( p ){
+ assert( p==pIndex ); /* Malloc must have failed */
+ sqlite3OomFault(db);
+ goto exit_create_index;
+ }
+ db->mDbFlags |= DBFLAG_SchemaChange;
+ if( pTblName!=0 ){
+ pIndex->tnum = db->init.newTnum;
+ }
+ }
- /* Link the new Index structure to its table and to the other
- ** in-memory database structures.
+ /* If this is the initial CREATE INDEX statement (or CREATE TABLE if the
+ ** index is an implied index for a UNIQUE or PRIMARY KEY constraint) then
+ ** emit code to allocate the index rootpage on disk and make an entry for
+ ** the index in the sqlite_master table and populate the index with
+ ** content. But, do not do this if we are simply reading the sqlite_master
+ ** table to parse the schema, or if this index is the PRIMARY KEY index
+ ** of a WITHOUT ROWID table.
+ **
+ ** If pTblName==0 it means this index is generated as an implied PRIMARY KEY
+ ** or UNIQUE index in a CREATE TABLE statement. Since the table
+ ** has just been created, it contains no data and the index initialization
+ ** step can be skipped.
+ */
+ else if( HasRowid(pTab) || pTblName!=0 ){
+ Vdbe *v;
+ char *zStmt;
+ int iMem = ++pParse->nMem;
+
+ v = sqlite3GetVdbe(pParse);
+ if( v==0 ) goto exit_create_index;
+
+ sqlite3BeginWriteOperation(pParse, 1, iDb);
+
+ /* Create the rootpage for the index using CreateIndex. But before
+ ** doing so, code a Noop instruction and store its address in
+ ** Index.tnum. This is required in case this index is actually a
+ ** PRIMARY KEY and the table is actually a WITHOUT ROWID table. In
+ ** that case the convertToWithoutRowidTable() routine will replace
+ ** the Noop with a Goto to jump over the VDBE code generated below. */
+ pIndex->tnum = sqlite3VdbeAddOp0(v, OP_Noop);
+ sqlite3VdbeAddOp3(v, OP_CreateBtree, iDb, iMem, BTREE_BLOBKEY);
+
+ /* Gather the complete text of the CREATE INDEX statement into
+ ** the zStmt variable
*/
- assert( pParse->nErr==0 );
- if( db->init.busy ){
- Index *p;
- assert( !IN_SPECIAL_PARSE );
- assert( sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) );
- p = sqlite3HashInsert(&pIndex->pSchema->idxHash,
- pIndex->zName, pIndex);
- if( p ){
- assert( p==pIndex ); /* Malloc must have failed */
- sqlite3OomFault(db);
- goto exit_create_index;
- }
- db->mDbFlags |= DBFLAG_SchemaChange;
- if( pTblName!=0 ){
- pIndex->tnum = db->init.newTnum;
- }
+ if( pStart ){
+ int n = (int)(pParse->sLastToken.z - pName->z) + pParse->sLastToken.n;
+ if( pName->z[n-1]==';' ) n--;
+ /* A named index with an explicit CREATE INDEX statement */
+ zStmt = sqlite3MPrintf(db, "CREATE%s INDEX %.*s",
+ onError==OE_None ? "" : " UNIQUE", n, pName->z);
+ }else{
+ /* An automatic index created by a PRIMARY KEY or UNIQUE constraint */
+ /* zStmt = sqlite3MPrintf(""); */
+ zStmt = 0;
}
- /* If this is the initial CREATE INDEX statement (or CREATE TABLE if the
- ** index is an implied index for a UNIQUE or PRIMARY KEY constraint) then
- ** emit code to allocate the index rootpage on disk and make an entry for
- ** the index in the sqlite_master table and populate the index with
- ** content. But, do not do this if we are simply reading the sqlite_master
- ** table to parse the schema, or if this index is the PRIMARY KEY index
- ** of a WITHOUT ROWID table.
- **
- ** If pTblName==0 it means this index is generated as an implied PRIMARY KEY
- ** or UNIQUE index in a CREATE TABLE statement. Since the table
- ** has just been created, it contains no data and the index initialization
- ** step can be skipped.
+ /* Add an entry in sqlite_master for this index
*/
- else if( HasRowid(pTab) || pTblName!=0 ){
- Vdbe *v;
- char *zStmt;
- int iMem = ++pParse->nMem;
+ sqlite3NestedParse(pParse,
+ "INSERT INTO %Q.%s VALUES('index',%Q,%Q,#%d,%Q);",
+ db->aDb[iDb].zDbSName, MASTER_NAME,
+ pIndex->zName,
+ pTab->zName,
+ iMem,
+ zStmt
+ );
+ sqlite3DbFree(db, zStmt);
- v = sqlite3GetVdbe(pParse);
- if( v==0 ) goto exit_create_index;
-
- sqlite3BeginWriteOperation(pParse, 1, iDb);
-
- /* Create the rootpage for the index using CreateIndex. But before
- ** doing so, code a Noop instruction and store its address in
- ** Index.tnum. This is required in case this index is actually a
- ** PRIMARY KEY and the table is actually a WITHOUT ROWID table. In
- ** that case the convertToWithoutRowidTable() routine will replace
- ** the Noop with a Goto to jump over the VDBE code generated below. */
- pIndex->tnum = sqlite3VdbeAddOp0(v, OP_Noop);
- sqlite3VdbeAddOp3(v, OP_CreateBtree, iDb, iMem, BTREE_BLOBKEY);
-
- /* Gather the complete text of the CREATE INDEX statement into
- ** the zStmt variable
- */
- if( pStart ){
- int n = (int)(pParse->sLastToken.z - pName->z) + pParse->sLastToken.n;
- if( pName->z[n-1]==';' ) n--;
- /* A named index with an explicit CREATE INDEX statement */
- zStmt = sqlite3MPrintf(db, "CREATE%s INDEX %.*s",
- onError==OE_None ? "" : " UNIQUE", n, pName->z);
- }else{
- /* An automatic index created by a PRIMARY KEY or UNIQUE constraint */
- /* zStmt = sqlite3MPrintf(""); */
- zStmt = 0;
- }
-
- /* Add an entry in sqlite_master for this index
- */
- sqlite3NestedParse(pParse,
- "INSERT INTO %Q.%s VALUES('index',%Q,%Q,#%d,%Q);",
- db->aDb[iDb].zDbSName, MASTER_NAME,
- pIndex->zName,
- pTab->zName,
- iMem,
- zStmt
- );
- sqlite3DbFree(db, zStmt);
-
- /* Fill the index with data and reparse the schema. Code an OP_Expire
- ** to invalidate all pre-compiled statements.
- */
- if( pTblName ){
- sqlite3RefillIndex(pParse, pIndex, iMem);
- sqlite3ChangeCookie(pParse, iDb);
- sqlite3VdbeAddParseSchemaOp(v, iDb,
- sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName));
- sqlite3VdbeAddOp2(v, OP_Expire, 0, 1);
- }
-
- sqlite3VdbeJumpHere(v, pIndex->tnum);
+ /* Fill the index with data and reparse the schema. Code an OP_Expire
+ ** to invalidate all pre-compiled statements.
+ */
+ if( pTblName ){
+ sqlite3RefillIndex(pParse, pIndex, iMem);
+ sqlite3ChangeCookie(pParse, iDb);
+ sqlite3VdbeAddParseSchemaOp(v, iDb,
+ sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName));
+ sqlite3VdbeAddOp0(v, OP_Expire);
}
+
+ sqlite3VdbeJumpHere(v, pIndex->tnum);
}
/* When adding an index to the list of indices for a table, make
@@ -109115,15 +107385,10 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
}
pIndex = 0;
}
- else if( IN_RENAME_OBJECT ){
- assert( pParse->pNewIndex==0 );
- pParse->pNewIndex = pIndex;
- pIndex = 0;
- }
/* Clean up before exiting */
exit_create_index:
- if( pIndex ) sqlite3FreeIndex(db, pIndex);
+ if( pIndex ) freeIndex(db, pIndex);
sqlite3ExprDelete(db, pPIWhere);
sqlite3ExprListDelete(db, pList);
sqlite3SrcListDelete(db, pTblName);
@@ -109292,8 +107557,7 @@ SQLITE_PRIVATE void *sqlite3ArrayAllocate(
**
** A new IdList is returned, or NULL if malloc() fails.
*/
-SQLITE_PRIVATE IdList *sqlite3IdListAppend(Parse *pParse, IdList *pList, Token *pToken){
- sqlite3 *db = pParse->db;
+SQLITE_PRIVATE IdList *sqlite3IdListAppend(sqlite3 *db, IdList *pList, Token *pToken){
int i;
if( pList==0 ){
pList = sqlite3DbMallocZero(db, sizeof(IdList) );
@@ -109311,9 +107575,6 @@ SQLITE_PRIVATE IdList *sqlite3IdListAppend(Parse *pParse, IdList *pList, Token *
return 0;
}
pList->a[i].zName = sqlite3NameFromToken(db, pToken);
- if( IN_RENAME_OBJECT && pList->a[i].zName ){
- sqlite3RenameTokenMap(pParse, (void*)pList->a[i].zName, pToken);
- }
return pList;
}
@@ -109560,12 +107821,6 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(
}
assert( p->nSrc>0 );
pItem = &p->a[p->nSrc-1];
- assert( (pTable==0)==(pDatabase==0) );
- assert( pItem->zName==0 || pDatabase!=0 );
- if( IN_RENAME_OBJECT && pItem->zName ){
- Token *pToken = (ALWAYS(pDatabase) && pDatabase->z) ? pDatabase : pTable;
- sqlite3RenameTokenMap(pParse, pItem->zName, pToken);
- }
assert( pAlias!=0 );
if( pAlias->n ){
pItem->zAlias = sqlite3NameFromToken(db, pAlias);
@@ -111119,8 +109374,9 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
}
iKey = iPk;
}else{
- iKey = ++pParse->nMem;
- sqlite3ExprCodeGetColumnOfTable(v, pTab, iTabCur, -1, iKey);
+ iKey = pParse->nMem + 1;
+ iKey = sqlite3ExprCodeGetColumn(pParse, pTab, -1, iTabCur, iKey, 0);
+ if( iKey>pParse->nMem ) pParse->nMem = iKey;
}
if( eOnePass!=ONEPASS_OFF ){
@@ -111553,6 +109809,7 @@ SQLITE_PRIVATE int sqlite3GenerateIndexKey(
if( pIdx->pPartIdxWhere ){
*piPartIdxLabel = sqlite3VdbeMakeLabel(v);
pParse->iSelfTab = iDataCur + 1;
+ sqlite3ExprCachePush(pParse);
sqlite3ExprIfFalseDup(pParse, pIdx->pPartIdxWhere, *piPartIdxLabel,
SQLITE_JUMPIFNULL);
pParse->iSelfTab = 0;
@@ -111599,6 +109856,7 @@ SQLITE_PRIVATE int sqlite3GenerateIndexKey(
SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse *pParse, int iLabel){
if( iLabel ){
sqlite3VdbeResolveLabel(pParse->pVdbe, iLabel);
+ sqlite3ExprCachePop(pParse);
}
}
@@ -113111,7 +111369,7 @@ static void sumStep(sqlite3_context *context, int argc, sqlite3_value **argv){
i64 v = sqlite3_value_int64(argv[0]);
p->rSum += v;
if( (p->approx|p->overflow)==0 && sqlite3AddInt64(&p->iSum, v) ){
- p->approx = p->overflow = 1;
+ p->overflow = 1;
}
}else{
p->rSum += sqlite3_value_double(argv[0]);
@@ -113119,32 +111377,6 @@ static void sumStep(sqlite3_context *context, int argc, sqlite3_value **argv){
}
}
}
-#ifndef SQLITE_OMIT_WINDOWFUNC
-static void sumInverse(sqlite3_context *context, int argc, sqlite3_value**argv){
- SumCtx *p;
- int type;
- assert( argc==1 );
- UNUSED_PARAMETER(argc);
- p = sqlite3_aggregate_context(context, sizeof(*p));
- type = sqlite3_value_numeric_type(argv[0]);
- /* p is always non-NULL because sumStep() will have been called first
- ** to initialize it */
- if( ALWAYS(p) && type!=SQLITE_NULL ){
- assert( p->cnt>0 );
- p->cnt--;
- assert( type==SQLITE_INTEGER || p->approx );
- if( type==SQLITE_INTEGER && p->approx==0 ){
- i64 v = sqlite3_value_int64(argv[0]);
- p->rSum -= v;
- p->iSum -= v;
- }else{
- p->rSum -= sqlite3_value_double(argv[0]);
- }
- }
-}
-#else
-# define sumInverse 0
-#endif /* SQLITE_OMIT_WINDOWFUNC */
static void sumFinalize(sqlite3_context *context){
SumCtx *p;
p = sqlite3_aggregate_context(context, 0);
@@ -113179,9 +111411,6 @@ static void totalFinalize(sqlite3_context *context){
typedef struct CountCtx CountCtx;
struct CountCtx {
i64 n;
-#ifdef SQLITE_DEBUG
- int bInverse; /* True if xInverse() ever called */
-#endif
};
/*
@@ -113199,7 +111428,7 @@ static void countStep(sqlite3_context *context, int argc, sqlite3_value **argv){
** sure it still operates correctly, verify that its count agrees with our
** internal count when using count(*) and when the total count can be
** expressed as a 32-bit integer. */
- assert( argc==1 || p==0 || p->n>0x7fffffff || p->bInverse
+ assert( argc==1 || p==0 || p->n>0x7fffffff
|| p->n==sqlite3_aggregate_count(context) );
#endif
}
@@ -113208,21 +111437,6 @@ static void countFinalize(sqlite3_context *context){
p = sqlite3_aggregate_context(context, 0);
sqlite3_result_int64(context, p ? p->n : 0);
}
-#ifndef SQLITE_OMIT_WINDOWFUNC
-static void countInverse(sqlite3_context *ctx, int argc, sqlite3_value **argv){
- CountCtx *p;
- p = sqlite3_aggregate_context(ctx, sizeof(*p));
- /* p is always non-NULL since countStep() will have been called first */
- if( (argc==0 || SQLITE_NULL!=sqlite3_value_type(argv[0])) && ALWAYS(p) ){
- p->n--;
-#ifdef SQLITE_DEBUG
- p->bInverse = 1;
-#endif
- }
-}
-#else
-# define countInverse 0
-#endif /* SQLITE_OMIT_WINDOWFUNC */
/*
** Routines to implement min() and max() aggregate functions.
@@ -113239,7 +111453,7 @@ static void minmaxStep(
pBest = (Mem *)sqlite3_aggregate_context(context, sizeof(*pBest));
if( !pBest ) return;
- if( sqlite3_value_type(pArg)==SQLITE_NULL ){
+ if( sqlite3_value_type(argv[0])==SQLITE_NULL ){
if( pBest->flags ) sqlite3SkipAccumulatorLoad(context);
}else if( pBest->flags ){
int max;
@@ -113265,26 +111479,16 @@ static void minmaxStep(
sqlite3VdbeMemCopy(pBest, pArg);
}
}
-static void minMaxValueFinalize(sqlite3_context *context, int bValue){
+static void minMaxFinalize(sqlite3_context *context){
sqlite3_value *pRes;
pRes = (sqlite3_value *)sqlite3_aggregate_context(context, 0);
if( pRes ){
if( pRes->flags ){
sqlite3_result_value(context, pRes);
}
- if( bValue==0 ) sqlite3VdbeMemRelease(pRes);
+ sqlite3VdbeMemRelease(pRes);
}
}
-#ifndef SQLITE_OMIT_WINDOWFUNC
-static void minMaxValue(sqlite3_context *context){
- minMaxValueFinalize(context, 1);
-}
-#else
-# define minMaxValue 0
-#endif /* SQLITE_OMIT_WINDOWFUNC */
-static void minMaxFinalize(sqlite3_context *context){
- minMaxValueFinalize(context, 0);
-}
/*
** group_concat(EXPR, ?SEPARATOR?)
@@ -113321,38 +111525,6 @@ static void groupConcatStep(
if( zVal ) sqlite3_str_append(pAccum, zVal, nVal);
}
}
-#ifndef SQLITE_OMIT_WINDOWFUNC
-static void groupConcatInverse(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- int n;
- StrAccum *pAccum;
- assert( argc==1 || argc==2 );
- if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return;
- pAccum = (StrAccum*)sqlite3_aggregate_context(context, sizeof(*pAccum));
- /* pAccum is always non-NULL since groupConcatStep() will have always
- ** run frist to initialize it */
- if( ALWAYS(pAccum) ){
- n = sqlite3_value_bytes(argv[0]);
- if( argc==2 ){
- n += sqlite3_value_bytes(argv[1]);
- }else{
- n++;
- }
- if( n>=(int)pAccum->nChar ){
- pAccum->nChar = 0;
- }else{
- pAccum->nChar -= n;
- memmove(pAccum->zText, &pAccum->zText[n], pAccum->nChar);
- }
- if( pAccum->nChar==0 ) pAccum->mxAlloc = 0;
- }
-}
-#else
-# define groupConcatInverse 0
-#endif /* SQLITE_OMIT_WINDOWFUNC */
static void groupConcatFinalize(sqlite3_context *context){
StrAccum *pAccum;
pAccum = sqlite3_aggregate_context(context, 0);
@@ -113367,24 +111539,6 @@ static void groupConcatFinalize(sqlite3_context *context){
}
}
}
-#ifndef SQLITE_OMIT_WINDOWFUNC
-static void groupConcatValue(sqlite3_context *context){
- sqlite3_str *pAccum;
- pAccum = (sqlite3_str*)sqlite3_aggregate_context(context, 0);
- if( pAccum ){
- if( pAccum->accError==SQLITE_TOOBIG ){
- sqlite3_result_error_toobig(context);
- }else if( pAccum->accError==SQLITE_NOMEM ){
- sqlite3_result_error_nomem(context);
- }else{
- const char *zText = sqlite3_str_value(pAccum);
- sqlite3_result_text(context, zText, -1, SQLITE_TRANSIENT);
- }
- }
-}
-#else
-# define groupConcatValue 0
-#endif /* SQLITE_OMIT_WINDOWFUNC */
/*
** This routine does per-connection function registration. Most
@@ -113422,10 +111576,10 @@ SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive)
}else{
pInfo = (struct compareInfo*)&likeInfoNorm;
}
- sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0, 0, 0);
- sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0, 0, 0);
+ sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0);
+ sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0);
sqlite3CreateFunc(db, "glob", 2, SQLITE_UTF8,
- (struct compareInfo*)&globInfo, likeFunc, 0, 0, 0, 0, 0);
+ (struct compareInfo*)&globInfo, likeFunc, 0, 0, 0);
setLikeOptFlag(db, "glob", SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE);
setLikeOptFlag(db, "like",
caseSensitive ? (SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE) : SQLITE_FUNC_LIKE);
@@ -113534,11 +111688,11 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
FUNCTION(trim, 2, 3, 0, trimFunc ),
FUNCTION(min, -1, 0, 1, minmaxFunc ),
FUNCTION(min, 0, 0, 1, 0 ),
- WAGGREGATE(min, 1, 0, 1, minmaxStep, minMaxFinalize, minMaxValue, 0,
+ AGGREGATE2(min, 1, 0, 1, minmaxStep, minMaxFinalize,
SQLITE_FUNC_MINMAX ),
FUNCTION(max, -1, 1, 1, minmaxFunc ),
FUNCTION(max, 0, 1, 1, 0 ),
- WAGGREGATE(max, 1, 1, 1, minmaxStep, minMaxFinalize, minMaxValue, 0,
+ AGGREGATE2(max, 1, 1, 1, minmaxStep, minMaxFinalize,
SQLITE_FUNC_MINMAX ),
FUNCTION2(typeof, 1, 0, 0, typeofFunc, SQLITE_FUNC_TYPEOF),
FUNCTION2(length, 1, 0, 0, lengthFunc, SQLITE_FUNC_LENGTH),
@@ -113569,17 +111723,14 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
FUNCTION(zeroblob, 1, 0, 0, zeroblobFunc ),
FUNCTION(substr, 2, 0, 0, substrFunc ),
FUNCTION(substr, 3, 0, 0, substrFunc ),
- WAGGREGATE(sum, 1,0,0, sumStep, sumFinalize, sumFinalize, sumInverse, 0),
- WAGGREGATE(total, 1,0,0, sumStep,totalFinalize,totalFinalize,sumInverse, 0),
- WAGGREGATE(avg, 1,0,0, sumStep, avgFinalize, avgFinalize, sumInverse, 0),
- WAGGREGATE(count, 0,0,0, countStep,
- countFinalize, countFinalize, countInverse, SQLITE_FUNC_COUNT ),
- WAGGREGATE(count, 1,0,0, countStep,
- countFinalize, countFinalize, countInverse, 0 ),
- WAGGREGATE(group_concat, 1, 0, 0, groupConcatStep,
- groupConcatFinalize, groupConcatValue, groupConcatInverse, 0),
- WAGGREGATE(group_concat, 2, 0, 0, groupConcatStep,
- groupConcatFinalize, groupConcatValue, groupConcatInverse, 0),
+ AGGREGATE(sum, 1, 0, 0, sumStep, sumFinalize ),
+ AGGREGATE(total, 1, 0, 0, sumStep, totalFinalize ),
+ AGGREGATE(avg, 1, 0, 0, sumStep, avgFinalize ),
+ AGGREGATE2(count, 0, 0, 0, countStep, countFinalize,
+ SQLITE_FUNC_COUNT ),
+ AGGREGATE(count, 1, 0, 0, countStep, countFinalize ),
+ AGGREGATE(group_concat, 1, 0, 0, groupConcatStep, groupConcatFinalize),
+ AGGREGATE(group_concat, 2, 0, 0, groupConcatStep, groupConcatFinalize),
LIKEFUNC(glob, 2, &globInfo, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE),
#ifdef SQLITE_CASE_SENSITIVE_LIKE
@@ -113599,7 +111750,6 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
#ifndef SQLITE_OMIT_ALTERTABLE
sqlite3AlterFunctions();
#endif
- sqlite3WindowFunctions();
#if defined(SQLITE_ENABLE_STAT3) || defined(SQLITE_ENABLE_STAT4)
sqlite3AnalyzeFunctions();
#endif
@@ -114337,12 +112487,11 @@ static void fkTriggerDelete(sqlite3 *dbMem, Trigger *p){
*/
SQLITE_PRIVATE void sqlite3FkDropTable(Parse *pParse, SrcList *pName, Table *pTab){
sqlite3 *db = pParse->db;
- if( (db->flags&SQLITE_ForeignKeys) && !IsVirtual(pTab) ){
+ if( (db->flags&SQLITE_ForeignKeys) && !IsVirtual(pTab) && !pTab->pSelect ){
int iSkip = 0;
Vdbe *v = sqlite3GetVdbe(pParse);
assert( v ); /* VDBE has already been allocated */
- assert( pTab->pSelect==0 ); /* Not a view */
if( sqlite3FkReferences(pTab)==0 ){
/* Search for a deferred foreign key constraint for which this table
** is the child table. If one cannot be found, return without
@@ -116237,6 +114386,44 @@ static int checkConstraintUnchanged(Expr *pExpr, int *aiChng, int chngRowid){
return !w.eCode;
}
+/*
+** An instance of the ConstraintAddr object remembers the byte-code addresses
+** for sections of the constraint checks that deal with uniqueness constraints
+** on the rowid and on the upsert constraint.
+**
+** This information is passed into checkReorderConstraintChecks() to insert
+** some OP_Goto operations so that the rowid and upsert constraints occur
+** in the correct order relative to other constraints.
+*/
+typedef struct ConstraintAddr ConstraintAddr;
+struct ConstraintAddr {
+ int ipkTop; /* Subroutine for rowid constraint check */
+ int upsertTop; /* Label for upsert constraint check subroutine */
+ int upsertTop2; /* Copy of upsertTop not cleared by the call */
+ int upsertBtm; /* upsert constraint returns to this label */
+ int ipkBtm; /* Return opcode rowid constraint check */
+};
+
+/*
+** Generate any OP_Goto operations needed to cause constraints to be
+** run that haven't already been run.
+*/
+static void reorderConstraintChecks(Vdbe *v, ConstraintAddr *p){
+ if( p->upsertTop ){
+ testcase( sqlite3VdbeLabelHasBeenResolved(v, p->upsertTop) );
+ sqlite3VdbeGoto(v, p->upsertTop);
+ VdbeComment((v, "call upsert subroutine"));
+ sqlite3VdbeResolveLabel(v, p->upsertBtm);
+ p->upsertTop = 0;
+ }
+ if( p->ipkTop ){
+ sqlite3VdbeGoto(v, p->ipkTop);
+ VdbeComment((v, "call rowid unique-check subroutine"));
+ sqlite3VdbeJumpHere(v, p->ipkBtm);
+ p->ipkTop = 0;
+ }
+}
+
/*
** Generate code to do constraint checks prior to an INSERT or an UPDATE
** on table pTab.
@@ -116346,13 +114533,11 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
int addr1; /* Address of jump instruction */
int seenReplace = 0; /* True if REPLACE is used to resolve INT PK conflict */
int nPkField; /* Number of fields in PRIMARY KEY. 1 for ROWID tables */
+ ConstraintAddr sAddr;/* Address information for constraint reordering */
Index *pUpIdx = 0; /* Index to which to apply the upsert */
u8 isUpdate; /* True if this is an UPDATE operation */
u8 bAffinityDone = 0; /* True if the OP_Affinity operation has been run */
int upsertBypass = 0; /* Address of Goto to bypass upsert subroutine */
- int upsertJump = 0; /* Address of Goto that jumps into upsert subroutine */
- int ipkTop = 0; /* Top of the IPK uniqueness check */
- int ipkBottom = 0; /* OP_Goto at the end of the IPK uniqueness check */
isUpdate = regOldData!=0;
db = pParse->db;
@@ -116360,6 +114545,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
assert( v!=0 );
assert( pTab->pSelect==0 ); /* This table is not a VIEW */
nCol = pTab->nCol;
+ memset(&sAddr, 0, sizeof(sAddr));
/* pPk is the PRIMARY KEY index for WITHOUT ROWID tables and NULL for
** normal rowid tables. nPkField is the number of key fields in the
@@ -116463,8 +114649,8 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
/* UNIQUE and PRIMARY KEY constraints should be handled in the following
** order:
**
- ** (1) OE_Update
- ** (2) OE_Abort, OE_Fail, OE_Rollback, OE_Ignore
+ ** (1) OE_Abort, OE_Fail, OE_Rollback, OE_Ignore
+ ** (2) OE_Update
** (3) OE_Replace
**
** OE_Fail and OE_Ignore must happen before any changes are made.
@@ -116473,11 +114659,6 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
** could happen in any order, but they are grouped up front for
** convenience.
**
- ** 2018-08-14: Ticket https://www.sqlite.org/src/info/908f001483982c43
- ** The order of constraints used to have OE_Update as (2) and OE_Abort
- ** and so forth as (1). But apparently PostgreSQL checks the OE_Update
- ** constraint before any others, so it had to be moved.
- **
** Constraint checking code is generated in this order:
** (A) The rowid constraint
** (B) Unique index constraints that do not have OE_Replace as their
@@ -116497,10 +114678,11 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
overrideError = OE_Ignore;
pUpsert = 0;
}else if( (pUpIdx = pUpsert->pUpsertIdx)!=0 ){
- /* If the constraint-target uniqueness check must be run first.
- ** Jump to that uniqueness check now */
- upsertJump = sqlite3VdbeAddOp0(v, OP_Goto);
- VdbeComment((v, "UPSERT constraint goes first"));
+ /* If the constraint-target is on some column other than
+ ** then ROWID, then we might need to move the UPSERT around
+ ** so that it occurs in the correct order. */
+ sAddr.upsertTop = sAddr.upsertTop2 = sqlite3VdbeMakeLabel(v);
+ sAddr.upsertBtm = sqlite3VdbeMakeLabel(v);
}
}
@@ -116532,12 +114714,16 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
** to defer the running of the rowid conflict checking until after
** the UNIQUE constraints have run.
*/
- if( onError==OE_Replace /* IPK rule is REPLACE */
- && onError!=overrideError /* Rules for other contraints are different */
- && pTab->pIndex /* There exist other constraints */
+ assert( OE_Update>OE_Replace );
+ assert( OE_Ignore=OE_Replace
+ && (pUpsert || onError!=overrideError)
+ && pTab->pIndex
){
- ipkTop = sqlite3VdbeAddOp0(v, OP_Goto)+1;
- VdbeComment((v, "defer IPK REPLACE until last"));
+ sAddr.ipkTop = sqlite3VdbeAddOp0(v, OP_Goto)+1;
}
if( isUpdate ){
@@ -116632,9 +114818,9 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
}
}
sqlite3VdbeResolveLabel(v, addrRowidOk);
- if( ipkTop ){
- ipkBottom = sqlite3VdbeAddOp0(v, OP_Goto);
- sqlite3VdbeJumpHere(v, ipkTop-1);
+ if( sAddr.ipkTop ){
+ sAddr.ipkBtm = sqlite3VdbeAddOp0(v, OP_Goto);
+ sqlite3VdbeJumpHere(v, sAddr.ipkTop-1);
}
}
@@ -116653,18 +114839,18 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
if( aRegIdx[ix]==0 ) continue; /* Skip indices that do not change */
if( pUpIdx==pIdx ){
- addrUniqueOk = upsertJump+1;
+ addrUniqueOk = sAddr.upsertBtm;
upsertBypass = sqlite3VdbeGoto(v, 0);
VdbeComment((v, "Skip upsert subroutine"));
- sqlite3VdbeJumpHere(v, upsertJump);
+ sqlite3VdbeResolveLabel(v, sAddr.upsertTop2);
}else{
addrUniqueOk = sqlite3VdbeMakeLabel(v);
}
- if( bAffinityDone==0 && (pUpIdx==0 || pUpIdx==pIdx) ){
+ VdbeNoopComment((v, "uniqueness check for %s", pIdx->zName));
+ if( bAffinityDone==0 ){
sqlite3TableAffinity(v, pTab, regNewData+1);
bAffinityDone = 1;
}
- VdbeNoopComment((v, "uniqueness check for %s", pIdx->zName));
iThisCur = iIdxCur+ix;
@@ -116735,6 +114921,15 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
}
}
+ /* Invoke subroutines to handle IPK replace and upsert prior to running
+ ** the first REPLACE constraint check. */
+ if( onError==OE_Replace ){
+ testcase( sAddr.ipkTop );
+ testcase( sAddr.upsertTop
+ && sqlite3VdbeLabelHasBeenResolved(v,sAddr.upsertTop) );
+ reorderConstraintChecks(v, &sAddr);
+ }
+
/* Collision detection may be omitted if all of the following are true:
** (1) The conflict resolution algorithm is REPLACE
** (2) The table is a WITHOUT ROWID table
@@ -116755,6 +114950,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
}
/* Check to see if the new index entry will be unique */
+ sqlite3ExprCachePush(pParse);
sqlite3VdbeVerifyAbortable(v, onError);
sqlite3VdbeAddOp4Int(v, OP_NoConflict, iThisCur, addrUniqueOk,
regIdx, pIdx->nKeyCol); VdbeCoverage(v);
@@ -116856,21 +115052,19 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
}
}
if( pUpIdx==pIdx ){
- sqlite3VdbeGoto(v, upsertJump+1);
sqlite3VdbeJumpHere(v, upsertBypass);
}else{
sqlite3VdbeResolveLabel(v, addrUniqueOk);
}
+ sqlite3ExprCachePop(pParse);
if( regR!=regIdx ) sqlite3ReleaseTempRange(pParse, regR, nPkField);
- }
- /* If the IPK constraint is a REPLACE, run it last */
- if( ipkTop ){
- sqlite3VdbeGoto(v, ipkTop+1);
- VdbeComment((v, "Do IPK REPLACE"));
- sqlite3VdbeJumpHere(v, ipkBottom);
}
-
+ testcase( sAddr.ipkTop!=0 );
+ testcase( sAddr.upsertTop
+ && sqlite3VdbeLabelHasBeenResolved(v,sAddr.upsertTop) );
+ reorderConstraintChecks(v, &sAddr);
+
*pbMayReplace = seenReplace;
VdbeModuleComment((v, "END: GenCnstCks(%d)", seenReplace));
}
@@ -116966,6 +115160,7 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion(
sqlite3SetMakeRecordP5(v, pTab);
if( !bAffinityDone ){
sqlite3TableAffinity(v, pTab, 0);
+ sqlite3ExprCacheAffinityChange(pParse, regData, pTab->nCol);
}
if( pParse->nested ){
pik_flags = 0;
@@ -117941,12 +116136,6 @@ struct sqlite3_api_routines {
int (*str_errcode)(sqlite3_str*);
int (*str_length)(sqlite3_str*);
char *(*str_value)(sqlite3_str*);
- int (*create_window_function)(sqlite3*,const char*,int,int,void*,
- void (*xStep)(sqlite3_context*,int,sqlite3_value**),
- void (*xFinal)(sqlite3_context*),
- void (*xValue)(sqlite3_context*),
- void (*xInv)(sqlite3_context*,int,sqlite3_value**),
- void(*xDestroy)(void*));
};
/*
@@ -118232,8 +116421,6 @@ typedef int (*sqlite3_loadext_entry)(
#define sqlite3_str_errcode sqlite3_api->str_errcode
#define sqlite3_str_length sqlite3_api->str_length
#define sqlite3_str_value sqlite3_api->str_value
-/* Version 3.25.0 and later */
-#define sqlite3_create_window_function sqlite3_api->create_window_function
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
@@ -118687,9 +116874,7 @@ static const sqlite3_api_routines sqlite3Apis = {
sqlite3_str_reset,
sqlite3_str_errcode,
sqlite3_str_length,
- sqlite3_str_value,
- /* Version 3.25.0 and later */
- sqlite3_create_window_function
+ sqlite3_str_value
};
/*
@@ -119484,11 +117669,6 @@ static const PragmaName aPragmaName[] = {
/* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
- {/* zName: */ "legacy_alter_table",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
- /* ColNames: */ 0, 0,
- /* iArg: */ SQLITE_LegacyAlter },
{/* zName: */ "legacy_file_format",
/* ePragTyp: */ PragTyp_FLAG,
/* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
@@ -119742,7 +117922,7 @@ static const PragmaName aPragmaName[] = {
/* iArg: */ SQLITE_WriteSchema },
#endif
};
-/* Number of pragmas: 61 on by default, 78 total. */
+/* Number of pragmas: 60 on by default, 77 total. */
/************** End of pragma.h **********************************************/
/************** Continuing where we left off in pragma.c *********************/
@@ -121267,6 +119447,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
if( pTab->tnum<1 ) continue; /* Skip VIEWs or VIRTUAL TABLEs */
pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab);
+ sqlite3ExprCacheClear(pParse);
sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenRead, 0,
1, 0, &iDataCur, &iIdxCur);
/* reg[7] counts the number of entries in the table.
@@ -121280,11 +119461,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
assert( sqlite3NoTempsInRange(pParse,1,7+j) );
sqlite3VdbeAddOp2(v, OP_Rewind, iDataCur, 0); VdbeCoverage(v);
loopTop = sqlite3VdbeAddOp2(v, OP_AddImm, 7, 1);
- if( !isQuick ){
- /* Sanity check on record header decoding */
- sqlite3VdbeAddOp3(v, OP_Column, iDataCur, pTab->nCol-1, 3);
- sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG);
- }
/* Verify that all NOT NULL columns really are NOT NULL */
for(j=0; jnCol; j++){
char *zErr;
@@ -121309,6 +119485,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
char *zErr;
int k;
pParse->iSelfTab = iDataCur + 1;
+ sqlite3ExprCachePush(pParse);
for(k=pCheck->nExpr-1; k>0; k--){
sqlite3ExprIfFalse(pParse, pCheck->a[k].pExpr, addrCkFault, 0);
}
@@ -121321,10 +119498,14 @@ SQLITE_PRIVATE void sqlite3Pragma(
sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC);
integrityCheckResultRow(v);
sqlite3VdbeResolveLabel(v, addrCkOk);
+ sqlite3ExprCachePop(pParse);
}
sqlite3ExprListDelete(db, pCheck);
}
if( !isQuick ){ /* Omit the remaining tests for quick_check */
+ /* Sanity check on record header decoding */
+ sqlite3VdbeAddOp3(v, OP_Column, iDataCur, pTab->nCol-1, 3);
+ sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG);
/* Validate index entries for the current row */
for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){
int jmp2, jmp3, jmp4, jmp5;
@@ -121939,6 +120120,7 @@ static int pragmaVtabConnect(
}
if( i==0 ){
sqlite3_str_appendf(&acc, "(\"%s\"", pPragma->zName);
+ cSep = ',';
i++;
}
j = 0;
@@ -122231,23 +120413,15 @@ static void corruptSchema(
const char *zExtra /* Error information */
){
sqlite3 *db = pData->db;
- if( db->mallocFailed ){
- pData->rc = SQLITE_NOMEM_BKPT;
- }else if( pData->pzErrMsg[0]!=0 ){
- /* A error message has already been generated. Do not overwrite it */
- }else if( pData->mInitFlags & INITFLAG_AlterTable ){
- *pData->pzErrMsg = sqlite3DbStrDup(db, zExtra);
- pData->rc = SQLITE_ERROR;
- }else if( db->flags & SQLITE_WriteSchema ){
- pData->rc = SQLITE_CORRUPT_BKPT;
- }else{
+ if( !db->mallocFailed && (db->flags & SQLITE_WriteSchema)==0 ){
char *z;
if( zObj==0 ) zObj = "?";
z = sqlite3MPrintf(db, "malformed database schema (%s)", zObj);
if( zExtra && zExtra[0] ) z = sqlite3MPrintf(db, "%z - %s", z, zExtra);
+ sqlite3DbFree(db, *pData->pzErrMsg);
*pData->pzErrMsg = z;
- pData->rc = SQLITE_CORRUPT_BKPT;
}
+ pData->rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_CORRUPT_BKPT;
}
/*
@@ -122299,7 +120473,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char
rc = db->errCode;
assert( (rc&0xFF)==(rcp&0xFF) );
db->init.iDb = saved_iDb;
- /* assert( saved_iDb==0 || (db->mDbFlags & DBFLAG_Vacuum)!=0 ); */
+ assert( saved_iDb==0 || (db->mDbFlags & DBFLAG_Vacuum)!=0 );
if( SQLITE_OK!=rc ){
if( db->init.orphanTrigger ){
assert( iDb==1 );
@@ -122346,7 +120520,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char
** auxiliary databases. Return one of the SQLITE_ error codes to
** indicate success or failure.
*/
-SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFlags){
+static int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg){
int rc;
int i;
#ifndef SQLITE_OMIT_DEPRECATED
@@ -122381,7 +120555,6 @@ SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFl
initData.iDb = iDb;
initData.rc = SQLITE_OK;
initData.pzErrMsg = pzErrMsg;
- initData.mInitFlags = mFlags;
sqlite3InitCallback(&initData, 3, (char **)azArg, 0);
if( initData.rc ){
rc = initData.rc;
@@ -122403,7 +120576,7 @@ SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFl
** will be closed before this function returns. */
sqlite3BtreeEnter(pDb->pBt);
if( !sqlite3BtreeIsInReadTrans(pDb->pBt) ){
- rc = sqlite3BtreeBeginTrans(pDb->pBt, 0, 0);
+ rc = sqlite3BtreeBeginTrans(pDb->pBt, 0);
if( rc!=SQLITE_OK ){
sqlite3SetString(pzErrMsg, db, sqlite3ErrStr(rc));
goto initone_error_out;
@@ -122588,14 +120761,14 @@ SQLITE_PRIVATE int sqlite3Init(sqlite3 *db, char **pzErrMsg){
assert( db->nDb>0 );
/* Do the main schema first */
if( !DbHasProperty(db, 0, DB_SchemaLoaded) ){
- rc = sqlite3InitOne(db, 0, pzErrMsg, 0);
+ rc = sqlite3InitOne(db, 0, pzErrMsg);
if( rc ) return rc;
}
/* All other schemas after the main schema. The "temp" schema must be last */
for(i=db->nDb-1; i>0; i--){
assert( i==1 || sqlite3BtreeHoldsMutex(db->aDb[i].pBt) );
if( !DbHasProperty(db, i, DB_SchemaLoaded) ){
- rc = sqlite3InitOne(db, i, pzErrMsg, 0);
+ rc = sqlite3InitOne(db, i, pzErrMsg);
if( rc ) return rc;
}
}
@@ -122648,7 +120821,7 @@ static void schemaIsValid(Parse *pParse){
** on the b-tree database, open one now. If a transaction is opened, it
** will be closed immediately after reading the meta-value. */
if( !sqlite3BtreeIsInReadTrans(pBt) ){
- rc = sqlite3BtreeBeginTrans(pBt, 0, 0);
+ rc = sqlite3BtreeBeginTrans(pBt, 0);
if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){
sqlite3OomFault(db);
}
@@ -123145,7 +121318,7 @@ SQLITE_API int sqlite3_prepare16_v3(
/***/ int sqlite3SelectTrace = 0;
# define SELECTTRACE(K,P,S,X) \
if(sqlite3SelectTrace&(K)) \
- sqlite3DebugPrintf("%u/%d/%p: ",(S)->selId,(P)->addrExplain,(S)),\
+ sqlite3DebugPrintf("%s/%d/%p: ",(S)->zSelName,(P)->addrExplain,(S)),\
sqlite3DebugPrintf X
#else
# define SELECTTRACE(K,P,S,X)
@@ -123192,8 +121365,8 @@ struct SortCtx {
int labelBkOut; /* Start label for the block-output subroutine */
int addrSortIndex; /* Address of the OP_SorterOpen or OP_OpenEphemeral */
int labelDone; /* Jump here when done, ex: LIMIT reached */
- int labelOBLopt; /* Jump here when sorter is full */
u8 sortFlags; /* Zero or more SORTFLAG_* bits */
+ u8 bOrderedInnerLoop; /* ORDER BY correctly sorts the inner loop */
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
u8 nDefer; /* Number of valid entries in aDefer[] */
struct DeferredCsr {
@@ -123220,11 +121393,6 @@ static void clearSelect(sqlite3 *db, Select *p, int bFree){
sqlite3ExprDelete(db, p->pHaving);
sqlite3ExprListDelete(db, p->pOrderBy);
sqlite3ExprDelete(db, p->pLimit);
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( OK_IF_ALWAYS_TRUE(p->pWinDefn) ){
- sqlite3WindowListDelete(db, p->pWinDefn);
- }
-#endif
if( OK_IF_ALWAYS_TRUE(p->pWith) ) sqlite3WithDelete(db, p->pWith);
if( bFree ) sqlite3DbFreeNN(db, p);
p = pPrior;
@@ -123275,7 +121443,9 @@ SQLITE_PRIVATE Select *sqlite3SelectNew(
pNew->selFlags = selFlags;
pNew->iLimit = 0;
pNew->iOffset = 0;
- pNew->selId = ++pParse->nSelect;
+#if SELECTTRACE_ENABLED
+ pNew->zSelName[0] = 0;
+#endif
pNew->addrOpenEphm[0] = -1;
pNew->addrOpenEphm[1] = -1;
pNew->nSelectRow = 0;
@@ -123289,10 +121459,6 @@ SQLITE_PRIVATE Select *sqlite3SelectNew(
pNew->pNext = 0;
pNew->pLimit = pLimit;
pNew->pWith = 0;
-#ifndef SQLITE_OMIT_WINDOWFUNC
- pNew->pWin = 0;
- pNew->pWinDefn = 0;
-#endif
if( pParse->db->mallocFailed ) {
clearSelect(pParse->db, pNew, pNew!=&standin);
pNew = 0;
@@ -123303,6 +121469,17 @@ SQLITE_PRIVATE Select *sqlite3SelectNew(
return pNew;
}
+#if SELECTTRACE_ENABLED
+/*
+** Set the name of a Select object
+*/
+SQLITE_PRIVATE void sqlite3SelectSetName(Select *p, const char *zName){
+ if( p && zName ){
+ sqlite3_snprintf(sizeof(p->zSelName), p->zSelName, "%s", zName);
+ }
+}
+#endif
+
/*
** Delete the given Select structure and all of its substructures.
@@ -123649,6 +121826,14 @@ static int sqliteProcessJoin(Parse *pParse, Select *p){
return 0;
}
+/* Forward reference */
+static KeyInfo *keyInfoFromExprList(
+ Parse *pParse, /* Parsing context */
+ ExprList *pList, /* Form the KeyInfo object from this ExprList */
+ int iStart, /* Begin with this column of pList */
+ int nExtra /* Add this many extra columns to the end */
+);
+
/*
** An instance of this object holds information (beyond pParse and pSelect)
** needed to load the next result row that is to be added to the sorter.
@@ -123790,7 +121975,7 @@ static void pushOntoSorter(
memset(pKI->aSortOrder, 0, pKI->nKeyField); /* Makes OP_Jump testable */
sqlite3VdbeChangeP4(v, -1, (char*)pKI, P4_KEYINFO);
testcase( pKI->nAllField > pKI->nKeyField+2 );
- pOp->p4.pKeyInfo = sqlite3KeyInfoFromExprList(pParse,pSort->pOrderBy,nOBSat,
+ pOp->p4.pKeyInfo = keyInfoFromExprList(pParse, pSort->pOrderBy, nOBSat,
pKI->nAllField-pKI->nKeyField-1);
addrJmp = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp3(v, OP_Jump, addrJmp+1, 0, addrJmp+1); VdbeCoverage(v);
@@ -123817,10 +122002,10 @@ static void pushOntoSorter(
** than LIMIT+OFFSET items in the sorter.
**
** If the new record does not need to be inserted into the sorter,
- ** jump to the next iteration of the loop. If the pSort->labelOBLopt
- ** value is not zero, then it is a label of where to jump. Otherwise,
- ** just bypass the row insert logic. See the header comment on the
- ** sqlite3WhereOrderByLimitOptLabel() function for additional info.
+ ** jump to the next iteration of the loop. Or, if the
+ ** pSort->bOrderedInnerLoop flag is set to indicate that the inner
+ ** loop delivers items in sorted order, jump to the next iteration
+ ** of the outer loop.
*/
int iCsr = pSort->iECursor;
sqlite3VdbeAddOp2(v, OP_IfNotZero, iLimit, sqlite3VdbeCurrentAddr(v)+4);
@@ -123842,8 +122027,9 @@ static void pushOntoSorter(
sqlite3VdbeAddOp4Int(v, op, pSort->iECursor, regRecord,
regBase+nOBSat, nBase-nOBSat);
if( iSkip ){
+ assert( pSort->bOrderedInnerLoop==0 || pSort->bOrderedInnerLoop==1 );
sqlite3VdbeChangeP2(v, iSkip,
- pSort->labelOBLopt ? pSort->labelOBLopt : sqlite3VdbeCurrentAddr(v));
+ sqlite3VdbeCurrentAddr(v) + pSort->bOrderedInnerLoop);
}
}
@@ -124272,6 +122458,7 @@ static void selectInnerLoop(
assert( sqlite3Strlen30(pDest->zAffSdst)==nResultCol );
sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult, nResultCol,
r1, pDest->zAffSdst, nResultCol);
+ sqlite3ExprCacheAffinityChange(pParse, regResult, nResultCol);
sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol);
sqlite3ReleaseTempReg(pParse, r1);
}
@@ -124315,6 +122502,7 @@ static void selectInnerLoop(
sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm);
}else{
sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, nResultCol);
+ sqlite3ExprCacheAffinityChange(pParse, regResult, nResultCol);
}
break;
}
@@ -124457,7 +122645,7 @@ SQLITE_PRIVATE int sqlite3KeyInfoIsWriteable(KeyInfo *p){ return p->nRef==1; }
** function is responsible for seeing that this structure is eventually
** freed.
*/
-SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoFromExprList(
+static KeyInfo *keyInfoFromExprList(
Parse *pParse, /* Parsing context */
ExprList *pList, /* Form the KeyInfo object from this ExprList */
int iStart, /* Begin with this column of pList */
@@ -124671,6 +122859,7 @@ static void generateSortTail(
assert( nColumn==sqlite3Strlen30(pDest->zAffSdst) );
sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, nColumn, regRowid,
pDest->zAffSdst, nColumn);
+ sqlite3ExprCacheAffinityChange(pParse, regRow, nColumn);
sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, regRowid, regRow, nColumn);
break;
}
@@ -124685,6 +122874,7 @@ static void generateSortTail(
testcase( eDest==SRT_Coroutine );
if( eDest==SRT_Output ){
sqlite3VdbeAddOp2(v, OP_ResultRow, pDest->iSdst, nColumn);
+ sqlite3ExprCacheAffinityChange(pParse, pDest->iSdst, nColumn);
}else{
sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm);
}
@@ -125285,6 +123475,7 @@ static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){
** The current implementation interprets "LIMIT 0" to mean
** no rows.
*/
+ sqlite3ExprCacheClear(pParse);
if( pLimit ){
assert( pLimit->op==TK_LIMIT );
assert( pLimit->pLeft!=0 );
@@ -126070,6 +124261,7 @@ static int generateOutputSubroutine(
r1 = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp4(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst,
r1, pDest->zAffSdst, pIn->nSdst);
+ sqlite3ExprCacheAffinityChange(pParse, pIn->iSdst, pIn->nSdst);
sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pDest->iSDParm, r1,
pIn->iSdst, pIn->nSdst);
sqlite3ReleaseTempReg(pParse, r1);
@@ -126112,6 +124304,7 @@ static int generateOutputSubroutine(
default: {
assert( pDest->eDest==SRT_Output );
sqlite3VdbeAddOp2(v, OP_ResultRow, pIn->iSdst, pIn->nSdst);
+ sqlite3ExprCacheAffinityChange(pParse, pIn->iSdst, pIn->nSdst);
break;
}
}
@@ -126566,7 +124759,7 @@ static Expr *substExpr(
Expr *pCopy = pSubst->pEList->a[pExpr->iColumn].pExpr;
Expr ifNullRow;
assert( pSubst->pEList!=0 && pExpr->iColumnpEList->nExpr );
- assert( pExpr->pRight==0 );
+ assert( pExpr->pLeft==0 && pExpr->pRight==0 );
if( sqlite3ExprIsVector(pCopy) ){
sqlite3VectorErrorMsg(pSubst->pParse, pCopy);
}else{
@@ -126780,10 +124973,6 @@ static void substSelect(
** "SELECT x FROM (SELECT max(y), x FROM t1)" would not necessarily
** return the value X for which Y was maximal.)
**
-** (25) If either the subquery or the parent query contains a window
-** function in the select list or ORDER BY clause, flattening
-** is not attempted.
-**
**
** In this routine, the "p" parameter is a pointer to the outer query.
** The subquery is p->pSrc->a[iFrom]. isAgg is true if the outer query
@@ -126827,10 +125016,6 @@ static int flattenSubquery(
pSub = pSubitem->pSelect;
assert( pSub!=0 );
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( p->pWin || pSub->pWin ) return 0; /* Restriction (25) */
-#endif
-
pSubSrc = pSub->pSrc;
assert( pSubSrc );
/* Prior to version 3.1.2, when LIMIT and OFFSET had to be simple constants,
@@ -126941,8 +125126,8 @@ static int flattenSubquery(
assert( (p->selFlags & SF_Recursive)==0 || pSub->pPrior==0 );
/***** If we reach this point, flattening is permitted. *****/
- SELECTTRACE(1,pParse,p,("flatten %u.%p from term %d\n",
- pSub->selId, pSub, iFrom));
+ SELECTTRACE(1,pParse,p,("flatten %s.%p from term %d\n",
+ pSub->zSelName, pSub, iFrom));
/* Authorize the subquery */
pParse->zAuthContext = pSubitem->zName;
@@ -126993,6 +125178,7 @@ static int flattenSubquery(
p->pPrior = 0;
p->pLimit = 0;
pNew = sqlite3SelectDup(db, p, 0);
+ sqlite3SelectSetName(pNew, pSub->zSelName);
p->pLimit = pLimit;
p->pOrderBy = pOrderBy;
p->pSrc = pSrc;
@@ -127005,7 +125191,7 @@ static int flattenSubquery(
pNew->pNext = p;
p->pPrior = pNew;
SELECTTRACE(2,pParse,p,("compound-subquery flattener"
- " creates %u as peer\n",pNew->selId));
+ " creates %s.%p as peer\n",pNew->zSelName, pNew));
}
if( db->mallocFailed ) return 1;
}
@@ -127190,168 +125376,7 @@ static int flattenSubquery(
}
#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */
-/*
-** A structure to keep track of all of the column values that fixed to
-** a known value due to WHERE clause constraints of the form COLUMN=VALUE.
-*/
-typedef struct WhereConst WhereConst;
-struct WhereConst {
- Parse *pParse; /* Parsing context */
- int nConst; /* Number for COLUMN=CONSTANT terms */
- int nChng; /* Number of times a constant is propagated */
- Expr **apExpr; /* [i*2] is COLUMN and [i*2+1] is VALUE */
-};
-/*
-** Add a new entry to the pConst object
-*/
-static void constInsert(
- WhereConst *pConst,
- Expr *pColumn,
- Expr *pValue
-){
-
- pConst->nConst++;
- pConst->apExpr = sqlite3DbReallocOrFree(pConst->pParse->db, pConst->apExpr,
- pConst->nConst*2*sizeof(Expr*));
- if( pConst->apExpr==0 ){
- pConst->nConst = 0;
- }else{
- if( ExprHasProperty(pValue, EP_FixedCol) ) pValue = pValue->pLeft;
- pConst->apExpr[pConst->nConst*2-2] = pColumn;
- pConst->apExpr[pConst->nConst*2-1] = pValue;
- }
-}
-
-/*
-** Find all terms of COLUMN=VALUE or VALUE=COLUMN in pExpr where VALUE
-** is a constant expression and where the term must be true because it
-** is part of the AND-connected terms of the expression. For each term
-** found, add it to the pConst structure.
-*/
-static void findConstInWhere(WhereConst *pConst, Expr *pExpr){
- Expr *pRight, *pLeft;
- if( pExpr==0 ) return;
- if( ExprHasProperty(pExpr, EP_FromJoin) ) return;
- if( pExpr->op==TK_AND ){
- findConstInWhere(pConst, pExpr->pRight);
- findConstInWhere(pConst, pExpr->pLeft);
- return;
- }
- if( pExpr->op!=TK_EQ ) return;
- pRight = pExpr->pRight;
- pLeft = pExpr->pLeft;
- assert( pRight!=0 );
- assert( pLeft!=0 );
- if( pRight->op==TK_COLUMN
- && !ExprHasProperty(pRight, EP_FixedCol)
- && sqlite3ExprIsConstant(pLeft)
- && sqlite3IsBinary(sqlite3BinaryCompareCollSeq(pConst->pParse,pLeft,pRight))
- ){
- constInsert(pConst, pRight, pLeft);
- }else
- if( pLeft->op==TK_COLUMN
- && !ExprHasProperty(pLeft, EP_FixedCol)
- && sqlite3ExprIsConstant(pRight)
- && sqlite3IsBinary(sqlite3BinaryCompareCollSeq(pConst->pParse,pLeft,pRight))
- ){
- constInsert(pConst, pLeft, pRight);
- }
-}
-
-/*
-** This is a Walker expression callback. pExpr is a candidate expression
-** to be replaced by a value. If pExpr is equivalent to one of the
-** columns named in pWalker->u.pConst, then overwrite it with its
-** corresponding value.
-*/
-static int propagateConstantExprRewrite(Walker *pWalker, Expr *pExpr){
- int i;
- WhereConst *pConst;
- if( pExpr->op!=TK_COLUMN ) return WRC_Continue;
- if( ExprHasProperty(pExpr, EP_FixedCol) ) return WRC_Continue;
- pConst = pWalker->u.pConst;
- for(i=0; inConst; i++){
- Expr *pColumn = pConst->apExpr[i*2];
- if( pColumn==pExpr ) continue;
- if( pColumn->iTable!=pExpr->iTable ) continue;
- if( pColumn->iColumn!=pExpr->iColumn ) continue;
- /* A match is found. Add the EP_FixedCol property */
- pConst->nChng++;
- ExprClearProperty(pExpr, EP_Leaf);
- ExprSetProperty(pExpr, EP_FixedCol);
- assert( pExpr->pLeft==0 );
- pExpr->pLeft = sqlite3ExprDup(pConst->pParse->db, pConst->apExpr[i*2+1], 0);
- break;
- }
- return WRC_Prune;
-}
-
-/*
-** The WHERE-clause constant propagation optimization.
-**
-** If the WHERE clause contains terms of the form COLUMN=CONSTANT or
-** CONSTANT=COLUMN that must be tree (in other words, if the terms top-level
-** AND-connected terms that are not part of a ON clause from a LEFT JOIN)
-** then throughout the query replace all other occurrences of COLUMN
-** with CONSTANT within the WHERE clause.
-**
-** For example, the query:
-**
-** SELECT * FROM t1, t2, t3 WHERE t1.a=39 AND t2.b=t1.a AND t3.c=t2.b
-**
-** Is transformed into
-**
-** SELECT * FROM t1, t2, t3 WHERE t1.a=39 AND t2.b=39 AND t3.c=39
-**
-** Return true if any transformations where made and false if not.
-**
-** Implementation note: Constant propagation is tricky due to affinity
-** and collating sequence interactions. Consider this example:
-**
-** CREATE TABLE t1(a INT,b TEXT);
-** INSERT INTO t1 VALUES(123,'0123');
-** SELECT * FROM t1 WHERE a=123 AND b=a;
-** SELECT * FROM t1 WHERE a=123 AND b=123;
-**
-** The two SELECT statements above should return different answers. b=a
-** is alway true because the comparison uses numeric affinity, but b=123
-** is false because it uses text affinity and '0123' is not the same as '123'.
-** To work around this, the expression tree is not actually changed from
-** "b=a" to "b=123" but rather the "a" in "b=a" is tagged with EP_FixedCol
-** and the "123" value is hung off of the pLeft pointer. Code generator
-** routines know to generate the constant "123" instead of looking up the
-** column value. Also, to avoid collation problems, this optimization is
-** only attempted if the "a=123" term uses the default BINARY collation.
-*/
-static int propagateConstants(
- Parse *pParse, /* The parsing context */
- Select *p /* The query in which to propagate constants */
-){
- WhereConst x;
- Walker w;
- int nChng = 0;
- x.pParse = pParse;
- do{
- x.nConst = 0;
- x.nChng = 0;
- x.apExpr = 0;
- findConstInWhere(&x, p->pWhere);
- if( x.nConst ){
- memset(&w, 0, sizeof(w));
- w.pParse = pParse;
- w.xExprCallback = propagateConstantExprRewrite;
- w.xSelectCallback = sqlite3SelectWalkNoop;
- w.xSelectCallback2 = 0;
- w.walkerDepth = 0;
- w.u.pConst = &x;
- sqlite3WalkExpr(&w, p->pWhere);
- sqlite3DbFree(x.pParse->db, x.apExpr);
- nChng += x.nChng;
- }
- }while( x.nChng );
- return nChng;
-}
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
/*
@@ -127381,7 +125406,7 @@ static int propagateConstants(
** (2) The inner query is the recursive part of a common table expression.
**
** (3) The inner query has a LIMIT clause (since the changes to the WHERE
-** clause would change the meaning of the LIMIT).
+** close would change the meaning of the LIMIT).
**
** (4) The inner query is the right operand of a LEFT JOIN and the
** expression to be pushed down does not come from the ON clause
@@ -127400,10 +125425,6 @@ static int propagateConstants(
** But if the (b2=2) term were to be pushed down into the bb subquery,
** then the (1,1,NULL) row would be suppressed.
**
-** (6) The inner query features one or more window-functions (since
-** changes to the WHERE clause of the inner query could change the
-** window over which window functions are calculated).
-**
** Return 0 if no changes are made and non-zero if one or more WHERE clause
** terms are duplicated into the subquery.
*/
@@ -127419,10 +125440,6 @@ static int pushDownWhereTerms(
if( pWhere==0 ) return 0;
if( pSubq->selFlags & SF_Recursive ) return 0; /* restriction (2) */
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( pSubq->pWin ) return 0; /* restriction (6) */
-#endif
-
#ifdef SQLITE_DEBUG
/* Only the first term of a compound can have a WITH clause. But make
** sure no other terms are marked SF_Recursive in case something changes
@@ -127868,35 +125885,6 @@ static void selectPopWith(Walker *pWalker, Select *p){
#define selectPopWith 0
#endif
-/*
-** The SrcList_item structure passed as the second argument represents a
-** sub-query in the FROM clause of a SELECT statement. This function
-** allocates and populates the SrcList_item.pTab object. If successful,
-** SQLITE_OK is returned. Otherwise, if an OOM error is encountered,
-** SQLITE_NOMEM.
-*/
-SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, struct SrcList_item *pFrom){
- Select *pSel = pFrom->pSelect;
- Table *pTab;
-
- assert( pSel );
- pFrom->pTab = pTab = sqlite3DbMallocZero(pParse->db, sizeof(Table));
- if( pTab==0 ) return SQLITE_NOMEM;
- pTab->nTabRef = 1;
- if( pFrom->zAlias ){
- pTab->zName = sqlite3DbStrDup(pParse->db, pFrom->zAlias);
- }else{
- pTab->zName = sqlite3MPrintf(pParse->db, "subquery_%u", pSel->selId);
- }
- while( pSel->pPrior ){ pSel = pSel->pPrior; }
- sqlite3ColumnsFromExprList(pParse, pSel->pEList,&pTab->nCol,&pTab->aCol);
- pTab->iPKey = -1;
- pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
- pTab->tabFlags |= TF_Ephemeral;
-
- return SQLITE_OK;
-}
-
/*
** This routine is a Walker callback for "expanding" a SELECT statement.
** "Expanding" means to do the following:
@@ -127969,7 +125957,19 @@ static int selectExpander(Walker *pWalker, Select *p){
assert( pSel!=0 );
assert( pFrom->pTab==0 );
if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort;
- if( sqlite3ExpandSubquery(pParse, pFrom) ) return WRC_Abort;
+ pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table));
+ if( pTab==0 ) return WRC_Abort;
+ pTab->nTabRef = 1;
+ if( pFrom->zAlias ){
+ pTab->zName = sqlite3DbStrDup(db, pFrom->zAlias);
+ }else{
+ pTab->zName = sqlite3MPrintf(db, "subquery_%p", (void*)pTab);
+ }
+ while( pSel->pPrior ){ pSel = pSel->pPrior; }
+ sqlite3ColumnsFromExprList(pParse, pSel->pEList,&pTab->nCol,&pTab->aCol);
+ pTab->iPKey = -1;
+ pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
+ pTab->tabFlags |= TF_Ephemeral;
#endif
}else{
/* An ordinary table or view name in the FROM clause */
@@ -127992,6 +125992,7 @@ static int selectExpander(Walker *pWalker, Select *p){
if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort;
assert( pFrom->pSelect==0 );
pFrom->pSelect = sqlite3SelectDup(db, pTab->pSelect, 0);
+ sqlite3SelectSetName(pFrom->pSelect, pTab->zName);
nCol = pTab->nCol;
pTab->nCol = -1;
sqlite3WalkSelect(pWalker, pFrom->pSelect);
@@ -128269,7 +126270,7 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){
struct SrcList_item *pFrom;
assert( p->selFlags & SF_Resolved );
- if( p->selFlags & SF_HasTypeInfo ) return;
+ assert( (p->selFlags & SF_HasTypeInfo)==0 );
p->selFlags |= SF_HasTypeInfo;
pParse = pWalker->pParse;
pTabList = p->pSrc;
@@ -128372,7 +126373,7 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){
"argument");
pFunc->iDistinct = -1;
}else{
- KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pE->x.pList,0,0);
+ KeyInfo *pKeyInfo = keyInfoFromExprList(pParse, pE->x.pList, 0, 0);
sqlite3VdbeAddOp4(v, OP_OpenEphemeral, pFunc->iDistinct, 0, 0,
(char*)pKeyInfo, P4_KEYINFO);
}
@@ -128396,17 +126397,11 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){
}
}
-
/*
** Update the accumulator memory cells for an aggregate based on
** the current cursor position.
-**
-** If regAcc is non-zero and there are no min() or max() aggregates
-** in pAggInfo, then only populate the pAggInfo->nAccumulator accumulator
-** registers i register regAcc contains 0. The caller will take care
-** of setting and clearing regAcc.
*/
-static void updateAccumulator(Parse *pParse, int regAcc, AggInfo *pAggInfo){
+static void updateAccumulator(Parse *pParse, AggInfo *pAggInfo){
Vdbe *v = pParse->pVdbe;
int i;
int regHit = 0;
@@ -128449,24 +126444,36 @@ static void updateAccumulator(Parse *pParse, int regAcc, AggInfo *pAggInfo){
if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem;
sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ);
}
- sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, pF->iMem);
+ sqlite3VdbeAddOp3(v, OP_AggStep0, 0, regAgg, pF->iMem);
sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
sqlite3VdbeChangeP5(v, (u8)nArg);
+ sqlite3ExprCacheAffinityChange(pParse, regAgg, nArg);
sqlite3ReleaseTempRange(pParse, regAgg, nArg);
if( addrNext ){
sqlite3VdbeResolveLabel(v, addrNext);
+ sqlite3ExprCacheClear(pParse);
}
}
- if( regHit==0 && pAggInfo->nAccumulator ){
- regHit = regAcc;
- }
+
+ /* Before populating the accumulator registers, clear the column cache.
+ ** Otherwise, if any of the required column values are already present
+ ** in registers, sqlite3ExprCode() may use OP_SCopy to copy the value
+ ** to pC->iMem. But by the time the value is used, the original register
+ ** may have been used, invalidating the underlying buffer holding the
+ ** text or blob value. See ticket [883034dcb5].
+ **
+ ** Another solution would be to change the OP_SCopy used to copy cached
+ ** values to an OP_Copy.
+ */
if( regHit ){
addrHitTest = sqlite3VdbeAddOp1(v, OP_If, regHit); VdbeCoverage(v);
}
+ sqlite3ExprCacheClear(pParse);
for(i=0, pC=pAggInfo->aCol; inAccumulator; i++, pC++){
sqlite3ExprCode(pParse, pC->pExpr, pC->iMem);
}
pAggInfo->directMode = 0;
+ sqlite3ExprCacheClear(pParse);
if( addrHitTest ){
sqlite3VdbeJumpHere(v, addrHitTest);
}
@@ -128596,7 +126603,6 @@ static struct SrcList_item *isSelfJoinView(
** The transformation only works if all of the following are true:
**
** * The subquery is a UNION ALL of two or more terms
-** * The subquery does not have a LIMIT clause
** * There is no WHERE or GROUP BY or HAVING clauses on the subqueries
** * The outer query is a simple count(*)
**
@@ -128620,7 +126626,6 @@ static int countOfViewOptimization(Parse *pParse, Select *p){
do{
if( pSub->op!=TK_ALL && pSub->pPrior ) return 0; /* Must be UNION ALL */
if( pSub->pWhere ) return 0; /* No WHERE clause */
- if( pSub->pLimit ) return 0; /* No LIMIT clause */
if( pSub->selFlags & SF_Aggregate ) return 0; /* Not an aggregate */
pSub = pSub->pPrior; /* Repeat over compound */
}while( pSub );
@@ -128733,10 +126738,14 @@ SQLITE_PRIVATE int sqlite3Select(
p->selFlags &= ~SF_Distinct;
}
sqlite3SelectPrep(pParse, p, 0);
+ memset(&sSort, 0, sizeof(sSort));
+ sSort.pOrderBy = p->pOrderBy;
+ pTabList = p->pSrc;
if( pParse->nErr || db->mallocFailed ){
goto select_end;
}
assert( p->pEList!=0 );
+ isAgg = (p->selFlags & SF_Aggregate)!=0;
#if SELECTTRACE_ENABLED
if( sqlite3SelectTrace & 0x104 ){
SELECTTRACE(0x104,pParse,p, ("after name resolution:\n"));
@@ -128748,22 +126757,6 @@ SQLITE_PRIVATE int sqlite3Select(
generateColumnNames(pParse, p);
}
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( sqlite3WindowRewrite(pParse, p) ){
- goto select_end;
- }
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x108 ){
- SELECTTRACE(0x104,pParse,p, ("after window rewrite:\n"));
- sqlite3TreeViewSelect(0, p, 0);
- }
-#endif
-#endif /* SQLITE_OMIT_WINDOWFUNC */
- pTabList = p->pSrc;
- isAgg = (p->selFlags & SF_Aggregate)!=0;
- memset(&sSort, 0, sizeof(sSort));
- sSort.pOrderBy = p->pOrderBy;
-
/* Try to various optimizations (flattening subqueries, and strength
** reduction of join operators) in the FROM clause up into the main query
*/
@@ -128863,35 +126856,6 @@ SQLITE_PRIVATE int sqlite3Select(
}
#endif
- /* Do the WHERE-clause constant propagation optimization if this is
- ** a join. No need to speed time on this operation for non-join queries
- ** as the equivalent optimization will be handled by query planner in
- ** sqlite3WhereBegin().
- */
- if( pTabList->nSrc>1
- && OptimizationEnabled(db, SQLITE_PropagateConst)
- && propagateConstants(pParse, p)
- ){
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x100 ){
- SELECTTRACE(0x100,pParse,p,("After constant propagation:\n"));
- sqlite3TreeViewSelect(0, p, 0);
- }
-#endif
- }else{
- SELECTTRACE(0x100,pParse,p,("Constant propagation not helpful\n"));
- }
-
-#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION
- if( OptimizationEnabled(db, SQLITE_QueryFlattener|SQLITE_CountOfView)
- && countOfViewOptimization(pParse, p)
- ){
- if( db->mallocFailed ) goto select_end;
- pEList = p->pEList;
- pTabList = p->pSrc;
- }
-#endif
-
/* For each term in the FROM clause, do two things:
** (1) Authorized unreferenced tables
** (2) Generate code for all sub-queries
@@ -128965,8 +126929,7 @@ SQLITE_PRIVATE int sqlite3Select(
){
#if SELECTTRACE_ENABLED
if( sqlite3SelectTrace & 0x100 ){
- SELECTTRACE(0x100,pParse,p,
- ("After WHERE-clause push-down into subquery %d:\n", pSub->selId));
+ SELECTTRACE(0x100,pParse,p,("After WHERE-clause push-down:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
@@ -129000,7 +126963,7 @@ SQLITE_PRIVATE int sqlite3Select(
VdbeComment((v, "%s", pItem->pTab->zName));
pItem->addrFillSub = addrTop;
sqlite3SelectDestInit(&dest, SRT_Coroutine, pItem->regReturn);
- ExplainQueryPlan((pParse, 1, "CO-ROUTINE %u", pSub->selId));
+ ExplainQueryPlan((pParse, 1, "CO-ROUTINE 0x%p", pSub));
sqlite3Select(pParse, pSub, &dest);
pItem->pTab->nRowLogEst = pSub->nSelectRow;
pItem->fg.viaCoroutine = 1;
@@ -129039,7 +127002,7 @@ SQLITE_PRIVATE int sqlite3Select(
pSub->nSelectRow = pPrior->pSelect->nSelectRow;
}else{
sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor);
- ExplainQueryPlan((pParse, 1, "MATERIALIZE %u", pSub->selId));
+ ExplainQueryPlan((pParse, 1, "MATERIALIZE 0x%p", pSub));
sqlite3Select(pParse, pSub, &dest);
}
pItem->pTab->nRowLogEst = pSub->nSelectRow;
@@ -129070,6 +127033,16 @@ SQLITE_PRIVATE int sqlite3Select(
}
#endif
+#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION
+ if( OptimizationEnabled(db, SQLITE_QueryFlattener|SQLITE_CountOfView)
+ && countOfViewOptimization(pParse, p)
+ ){
+ if( db->mallocFailed ) goto select_end;
+ pEList = p->pEList;
+ pTabList = p->pSrc;
+ }
+#endif
+
/* If the query is DISTINCT with an ORDER BY but is not an aggregate, and
** if the select-list is the same as the ORDER BY list, then this query
** can be rewritten as a GROUP BY. In other words, this:
@@ -129113,8 +127086,7 @@ SQLITE_PRIVATE int sqlite3Select(
*/
if( sSort.pOrderBy ){
KeyInfo *pKeyInfo;
- pKeyInfo = sqlite3KeyInfoFromExprList(
- pParse, sSort.pOrderBy, 0, pEList->nExpr);
+ pKeyInfo = keyInfoFromExprList(pParse, sSort.pOrderBy, 0, pEList->nExpr);
sSort.iECursor = pParse->nTab++;
sSort.addrSortIndex =
sqlite3VdbeAddOp4(v, OP_OpenEphemeral,
@@ -129148,9 +127120,9 @@ SQLITE_PRIVATE int sqlite3Select(
if( p->selFlags & SF_Distinct ){
sDistinct.tabTnct = pParse->nTab++;
sDistinct.addrTnct = sqlite3VdbeAddOp4(v, OP_OpenEphemeral,
- sDistinct.tabTnct, 0, 0,
- (char*)sqlite3KeyInfoFromExprList(pParse, p->pEList,0,0),
- P4_KEYINFO);
+ sDistinct.tabTnct, 0, 0,
+ (char*)keyInfoFromExprList(pParse, p->pEList,0,0),
+ P4_KEYINFO);
sqlite3VdbeChangeP5(v, BTREE_UNORDERED);
sDistinct.eTnctType = WHERE_DISTINCT_UNORDERED;
}else{
@@ -129159,16 +127131,9 @@ SQLITE_PRIVATE int sqlite3Select(
if( !isAgg && pGroupBy==0 ){
/* No aggregate functions and no GROUP BY clause */
- u16 wctrlFlags = (sDistinct.isTnct ? WHERE_WANT_DISTINCT : 0)
- | (p->selFlags & SF_FixedLimit);
-#ifndef SQLITE_OMIT_WINDOWFUNC
- Window *pWin = p->pWin; /* Master window object (or NULL) */
- if( pWin ){
- sqlite3WindowCodeInit(pParse, pWin);
- }
-#endif
+ u16 wctrlFlags = (sDistinct.isTnct ? WHERE_WANT_DISTINCT : 0);
assert( WHERE_USE_LIMIT==SF_FixedLimit );
-
+ wctrlFlags |= p->selFlags & SF_FixedLimit;
/* Begin the database scan. */
SELECTTRACE(1,pParse,p,("WhereBegin\n"));
@@ -129183,7 +127148,7 @@ SQLITE_PRIVATE int sqlite3Select(
}
if( sSort.pOrderBy ){
sSort.nOBSat = sqlite3WhereIsOrdered(pWInfo);
- sSort.labelOBLopt = sqlite3WhereOrderByLimitOptLabel(pWInfo);
+ sSort.bOrderedInnerLoop = sqlite3WhereOrderedInnerLoop(pWInfo);
if( sSort.nOBSat==sSort.pOrderBy->nExpr ){
sSort.pOrderBy = 0;
}
@@ -129197,37 +127162,15 @@ SQLITE_PRIVATE int sqlite3Select(
sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex);
}
+ /* Use the standard inner loop. */
assert( p->pEList==pEList );
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( pWin ){
- int addrGosub = sqlite3VdbeMakeLabel(v);
- int iCont = sqlite3VdbeMakeLabel(v);
- int iBreak = sqlite3VdbeMakeLabel(v);
- int regGosub = ++pParse->nMem;
+ selectInnerLoop(pParse, p, -1, &sSort, &sDistinct, pDest,
+ sqlite3WhereContinueLabel(pWInfo),
+ sqlite3WhereBreakLabel(pWInfo));
- sqlite3WindowCodeStep(pParse, p, pWInfo, regGosub, addrGosub);
-
- sqlite3VdbeAddOp2(v, OP_Goto, 0, iBreak);
- sqlite3VdbeResolveLabel(v, addrGosub);
- VdbeNoopComment((v, "inner-loop subroutine"));
- sSort.labelOBLopt = 0;
- selectInnerLoop(pParse, p, -1, &sSort, &sDistinct, pDest, iCont, iBreak);
- sqlite3VdbeResolveLabel(v, iCont);
- sqlite3VdbeAddOp1(v, OP_Return, regGosub);
- VdbeComment((v, "end inner-loop subroutine"));
- sqlite3VdbeResolveLabel(v, iBreak);
- }else
-#endif /* SQLITE_OMIT_WINDOWFUNC */
- {
- /* Use the standard inner loop. */
- selectInnerLoop(pParse, p, -1, &sSort, &sDistinct, pDest,
- sqlite3WhereContinueLabel(pWInfo),
- sqlite3WhereBreakLabel(pWInfo));
-
- /* End the database scan loop.
- */
- sqlite3WhereEnd(pWInfo);
- }
+ /* End the database scan loop.
+ */
+ sqlite3WhereEnd(pWInfo);
}else{
/* This case when there exist aggregate functions or a GROUP BY clause
** or both */
@@ -129356,7 +127299,7 @@ SQLITE_PRIVATE int sqlite3Select(
** will be converted into a Noop.
*/
sAggInfo.sortingIdx = pParse->nTab++;
- pKeyInfo = sqlite3KeyInfoFromExprList(pParse,pGroupBy,0,sAggInfo.nColumn);
+ pKeyInfo = keyInfoFromExprList(pParse, pGroupBy, 0, sAggInfo.nColumn);
addrSortingIdx = sqlite3VdbeAddOp4(v, OP_SorterOpen,
sAggInfo.sortingIdx, sAggInfo.nSortingColumn,
0, (char*)pKeyInfo, P4_KEYINFO);
@@ -129375,6 +127318,8 @@ SQLITE_PRIVATE int sqlite3Select(
pParse->nMem += pGroupBy->nExpr;
sqlite3VdbeAddOp2(v, OP_Integer, 0, iAbortFlag);
VdbeComment((v, "clear abort flag"));
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, iUseFlag);
+ VdbeComment((v, "indicate accumulator empty"));
sqlite3VdbeAddOp3(v, OP_Null, 0, iAMem, iAMem+pGroupBy->nExpr-1);
/* Begin a loop that will extract all source rows in GROUP BY order.
@@ -129420,14 +127365,15 @@ SQLITE_PRIVATE int sqlite3Select(
}
}
regBase = sqlite3GetTempRange(pParse, nCol);
+ sqlite3ExprCacheClear(pParse);
sqlite3ExprCodeExprList(pParse, pGroupBy, regBase, 0, 0);
j = nGroupBy;
for(i=0; iiSorterColumn>=j ){
int r1 = j + regBase;
- sqlite3ExprCodeGetColumnOfTable(v,
- pCol->pTab, pCol->iTable, pCol->iColumn, r1);
+ sqlite3ExprCodeGetColumnToReg(pParse,
+ pCol->pTab, pCol->iColumn, pCol->iTable, r1);
j++;
}
}
@@ -129443,6 +127389,8 @@ SQLITE_PRIVATE int sqlite3Select(
sqlite3VdbeAddOp2(v, OP_SorterSort, sAggInfo.sortingIdx, addrEnd);
VdbeComment((v, "GROUP BY sort")); VdbeCoverage(v);
sAggInfo.useSortingIdx = 1;
+ sqlite3ExprCacheClear(pParse);
+
}
/* If the index or temporary table used by the GROUP BY sort
@@ -129465,6 +127413,7 @@ SQLITE_PRIVATE int sqlite3Select(
** from the previous row currently stored in a0, a1, a2...
*/
addrTopOfLoop = sqlite3VdbeCurrentAddr(v);
+ sqlite3ExprCacheClear(pParse);
if( groupBySort ){
sqlite3VdbeAddOp3(v, OP_SorterData, sAggInfo.sortingIdx,
sortOut, sortPTab);
@@ -129503,7 +127452,7 @@ SQLITE_PRIVATE int sqlite3Select(
** the current row
*/
sqlite3VdbeJumpHere(v, addr1);
- updateAccumulator(pParse, iUseFlag, &sAggInfo);
+ updateAccumulator(pParse, &sAggInfo);
sqlite3VdbeAddOp2(v, OP_Integer, 1, iUseFlag);
VdbeComment((v, "indicate data in accumulator"));
@@ -129555,8 +127504,6 @@ SQLITE_PRIVATE int sqlite3Select(
*/
sqlite3VdbeResolveLabel(v, addrReset);
resetAccumulator(pParse, &sAggInfo);
- sqlite3VdbeAddOp2(v, OP_Integer, 0, iUseFlag);
- VdbeComment((v, "indicate accumulator empty"));
sqlite3VdbeAddOp1(v, OP_Return, regReset);
} /* endif pGroupBy. Begin aggregate queries without GROUP BY: */
@@ -129622,23 +127569,6 @@ SQLITE_PRIVATE int sqlite3Select(
}else
#endif /* SQLITE_OMIT_BTREECOUNT */
{
- int regAcc = 0; /* "populate accumulators" flag */
-
- /* If there are accumulator registers but no min() or max() functions,
- ** allocate register regAcc. Register regAcc will contain 0 the first
- ** time the inner loop runs, and 1 thereafter. The code generated
- ** by updateAccumulator() only updates the accumulator registers if
- ** regAcc contains 0. */
- if( sAggInfo.nAccumulator ){
- for(i=0; ifuncFlags&SQLITE_FUNC_NEEDCOLL ) break;
- }
- if( i==sAggInfo.nFunc ){
- regAcc = ++pParse->nMem;
- sqlite3VdbeAddOp2(v, OP_Integer, 0, regAcc);
- }
- }
-
/* This case runs if the aggregate has no GROUP BY clause. The
** processing is much simpler since there is only a single row
** of output.
@@ -129660,8 +127590,7 @@ SQLITE_PRIVATE int sqlite3Select(
if( pWInfo==0 ){
goto select_end;
}
- updateAccumulator(pParse, regAcc, &sAggInfo);
- if( regAcc ) sqlite3VdbeAddOp2(v, OP_Integer, 1, regAcc);
+ updateAccumulator(pParse, &sAggInfo);
if( sqlite3WhereIsOrdered(pWInfo)>0 ){
sqlite3VdbeGoto(v, sqlite3WhereBreakLabel(pWInfo));
VdbeComment((v, "%s() by index",
@@ -130105,16 +128034,14 @@ SQLITE_PRIVATE void sqlite3BeginTrigger(
goto trigger_cleanup;
}
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
- if( !IN_RENAME_OBJECT ){
- if( sqlite3HashFind(&(db->aDb[iDb].pSchema->trigHash),zName) ){
- if( !noErr ){
- sqlite3ErrorMsg(pParse, "trigger %T already exists", pName);
- }else{
- assert( !db->init.busy );
- sqlite3CodeVerifySchema(pParse, iDb);
- }
- goto trigger_cleanup;
+ if( sqlite3HashFind(&(db->aDb[iDb].pSchema->trigHash),zName) ){
+ if( !noErr ){
+ sqlite3ErrorMsg(pParse, "trigger %T already exists", pName);
+ }else{
+ assert( !db->init.busy );
+ sqlite3CodeVerifySchema(pParse, iDb);
}
+ goto trigger_cleanup;
}
/* Do not create a trigger on a system table */
@@ -130138,7 +128065,7 @@ SQLITE_PRIVATE void sqlite3BeginTrigger(
}
#ifndef SQLITE_OMIT_AUTHORIZATION
- if( !IN_RENAME_OBJECT ){
+ {
int iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema);
int code = SQLITE_CREATE_TRIGGER;
const char *zDb = db->aDb[iTabDb].zDbSName;
@@ -130172,15 +128099,8 @@ SQLITE_PRIVATE void sqlite3BeginTrigger(
pTrigger->pTabSchema = pTab->pSchema;
pTrigger->op = (u8)op;
pTrigger->tr_tm = tr_tm==TK_BEFORE ? TRIGGER_BEFORE : TRIGGER_AFTER;
- if( IN_RENAME_OBJECT ){
- sqlite3RenameTokenRemap(pParse, pTrigger->table, pTableName->a[0].zName);
- pTrigger->pWhen = pWhen;
- pWhen = 0;
- }else{
- pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE);
- }
- pTrigger->pColumns = pColumns;
- pColumns = 0;
+ pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE);
+ pTrigger->pColumns = sqlite3IdListDup(db, pColumns);
assert( pParse->pNewTrigger==0 );
pParse->pNewTrigger = pTrigger;
@@ -130229,14 +128149,6 @@ SQLITE_PRIVATE void sqlite3FinishTrigger(
goto triggerfinish_cleanup;
}
-#ifndef SQLITE_OMIT_ALTERTABLE
- if( IN_RENAME_OBJECT ){
- assert( !db->init.busy );
- pParse->pNewTrigger = pTrig;
- pTrig = 0;
- }else
-#endif
-
/* if we are not initializing,
** build the sqlite_master entry
*/
@@ -130278,7 +128190,7 @@ SQLITE_PRIVATE void sqlite3FinishTrigger(
triggerfinish_cleanup:
sqlite3DeleteTrigger(db, pTrig);
- assert( IN_RENAME_OBJECT || !pParse->pNewTrigger );
+ assert( !pParse->pNewTrigger );
sqlite3DeleteTriggerStep(db, pStepList);
}
@@ -130325,13 +128237,12 @@ SQLITE_PRIVATE TriggerStep *sqlite3TriggerSelectStep(
** If an OOM error occurs, NULL is returned and db->mallocFailed is set.
*/
static TriggerStep *triggerStepAllocate(
- Parse *pParse, /* Parser context */
+ sqlite3 *db, /* Database connection */
u8 op, /* Trigger opcode */
Token *pName, /* The target name */
const char *zStart, /* Start of SQL text */
const char *zEnd /* End of SQL text */
){
- sqlite3 *db = pParse->db;
TriggerStep *pTriggerStep;
pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep) + pName->n + 1);
@@ -130342,9 +128253,6 @@ static TriggerStep *triggerStepAllocate(
pTriggerStep->zTarget = z;
pTriggerStep->op = op;
pTriggerStep->zSpan = triggerSpanDup(db, zStart, zEnd);
- if( IN_RENAME_OBJECT ){
- sqlite3RenameTokenMap(pParse, pTriggerStep->zTarget, pName);
- }
}
return pTriggerStep;
}
@@ -130357,7 +128265,7 @@ static TriggerStep *triggerStepAllocate(
** body of a trigger.
*/
SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(
- Parse *pParse, /* Parser */
+ sqlite3 *db, /* The database connection */
Token *pTableName, /* Name of the table into which we insert */
IdList *pColumn, /* List of columns in pTableName to insert into */
Select *pSelect, /* A SELECT statement that supplies values */
@@ -130366,19 +128274,13 @@ SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(
const char *zStart, /* Start of SQL text */
const char *zEnd /* End of SQL text */
){
- sqlite3 *db = pParse->db;
TriggerStep *pTriggerStep;
assert(pSelect != 0 || db->mallocFailed);
- pTriggerStep = triggerStepAllocate(pParse, TK_INSERT, pTableName,zStart,zEnd);
+ pTriggerStep = triggerStepAllocate(db, TK_INSERT, pTableName, zStart, zEnd);
if( pTriggerStep ){
- if( IN_RENAME_OBJECT ){
- pTriggerStep->pSelect = pSelect;
- pSelect = 0;
- }else{
- pTriggerStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE);
- }
+ pTriggerStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE);
pTriggerStep->pIdList = pColumn;
pTriggerStep->pUpsert = pUpsert;
pTriggerStep->orconf = orconf;
@@ -130399,7 +128301,7 @@ SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(
** sees an UPDATE statement inside the body of a CREATE TRIGGER.
*/
SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(
- Parse *pParse, /* Parser */
+ sqlite3 *db, /* The database connection */
Token *pTableName, /* Name of the table to be updated */
ExprList *pEList, /* The SET clause: list of column and new values */
Expr *pWhere, /* The WHERE clause */
@@ -130407,20 +128309,12 @@ SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(
const char *zStart, /* Start of SQL text */
const char *zEnd /* End of SQL text */
){
- sqlite3 *db = pParse->db;
TriggerStep *pTriggerStep;
- pTriggerStep = triggerStepAllocate(pParse, TK_UPDATE, pTableName,zStart,zEnd);
+ pTriggerStep = triggerStepAllocate(db, TK_UPDATE, pTableName, zStart, zEnd);
if( pTriggerStep ){
- if( IN_RENAME_OBJECT ){
- pTriggerStep->pExprList = pEList;
- pTriggerStep->pWhere = pWhere;
- pEList = 0;
- pWhere = 0;
- }else{
- pTriggerStep->pExprList = sqlite3ExprListDup(db, pEList, EXPRDUP_REDUCE);
- pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE);
- }
+ pTriggerStep->pExprList = sqlite3ExprListDup(db, pEList, EXPRDUP_REDUCE);
+ pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE);
pTriggerStep->orconf = orconf;
}
sqlite3ExprListDelete(db, pEList);
@@ -130434,23 +128328,17 @@ SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(
** sees a DELETE statement inside the body of a CREATE TRIGGER.
*/
SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep(
- Parse *pParse, /* Parser */
+ sqlite3 *db, /* Database connection */
Token *pTableName, /* The table from which rows are deleted */
Expr *pWhere, /* The WHERE clause */
const char *zStart, /* Start of SQL text */
const char *zEnd /* End of SQL text */
){
- sqlite3 *db = pParse->db;
TriggerStep *pTriggerStep;
- pTriggerStep = triggerStepAllocate(pParse, TK_DELETE, pTableName,zStart,zEnd);
+ pTriggerStep = triggerStepAllocate(db, TK_DELETE, pTableName, zStart, zEnd);
if( pTriggerStep ){
- if( IN_RENAME_OBJECT ){
- pTriggerStep->pWhere = pWhere;
- pWhere = 0;
- }else{
- pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE);
- }
+ pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE);
pTriggerStep->orconf = OE_Default;
}
sqlite3ExprDelete(db, pWhere);
@@ -131635,7 +129523,7 @@ SQLITE_PRIVATE void sqlite3Update(
if( !isView && aiCurOnePass[0]!=iDataCur && aiCurOnePass[1]!=iDataCur ){
assert( pPk );
sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelBreak, regKey,nKey);
- VdbeCoverage(v);
+ VdbeCoverageNeverTaken(v);
}
if( eOnePass!=ONEPASS_SINGLE ){
labelContinue = sqlite3VdbeMakeLabel(v);
@@ -131722,7 +129610,13 @@ SQLITE_PRIVATE void sqlite3Update(
*/
testcase( i==31 );
testcase( i==32 );
- sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, regNew+i);
+ sqlite3ExprCodeGetColumnToReg(pParse, pTab, i, iDataCur, regNew+i);
+ if( tmask & TRIGGER_BEFORE ){
+ /* This value will be recomputed in After-BEFORE-trigger-reload-loop
+ ** below, so make sure that it is not cached and reused.
+ ** Ticket d85fffd6ffe856092ed8daefa811b1e399706b28. */
+ sqlite3ExprCacheRemove(pParse, regNew+i, 1);
+ }
}else{
sqlite3VdbeAddOp2(v, OP_Null, 0, regNew+i);
}
@@ -132259,12 +130153,10 @@ SQLITE_PRIVATE void sqlite3UpsertDoUpdate(
Vdbe *v = pParse->pVdbe;
sqlite3 *db = pParse->db;
SrcList *pSrc; /* FROM clause for the UPDATE */
- int iDataCur;
+ int iDataCur = pUpsert->iDataCur;
assert( v!=0 );
- assert( pUpsert!=0 );
VdbeNoopComment((v, "Begin DO UPDATE of UPSERT"));
- iDataCur = pUpsert->iDataCur;
if( pIdx && iCur!=iDataCur ){
if( HasRowid(pTab) ){
int regRowid = sqlite3GetTempReg(pParse);
@@ -132534,7 +130426,7 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db, int iDb){
*/
rc = execSql(db, pzErrMsg, "BEGIN");
if( rc!=SQLITE_OK ) goto end_of_vacuum;
- rc = sqlite3BtreeBeginTrans(pMain, 2, 0);
+ rc = sqlite3BtreeBeginTrans(pMain, 2);
if( rc!=SQLITE_OK ) goto end_of_vacuum;
/* Do not attempt to change the page size for a WAL database */
@@ -132952,7 +130844,7 @@ SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){
assert( sqlite3_mutex_held(db->mutex) );
if( p ){
- sqlite3ExpirePreparedStatements(db, 0);
+ sqlite3ExpirePreparedStatements(db);
do {
VTable *pNext = p->pNext;
sqlite3VtabUnlock(p);
@@ -133448,7 +131340,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){
assert( IsVirtual(pTab) );
memset(&sParse, 0, sizeof(sParse));
- sParse.eParseMode = PARSE_MODE_DECLARE_VTAB;
+ sParse.declareVtab = 1;
sParse.db = db;
sParse.nQueryLoop = 1;
if( SQLITE_OK==sqlite3RunParser(&sParse, zCreateTable, &zErr)
@@ -133489,7 +131381,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){
sqlite3DbFree(db, zErr);
rc = SQLITE_ERROR;
}
- sParse.eParseMode = PARSE_MODE_NORMAL;
+ sParse.declareVtab = 0;
if( sParse.pVdbe ){
sqlite3VdbeFinalize(sParse.pVdbe);
@@ -134043,8 +131935,6 @@ struct WhereLevel {
struct InLoop {
int iCur; /* The VDBE cursor used by this IN operator */
int addrInTop; /* Top of the IN loop */
- int iBase; /* Base register of multi-key index record */
- int nPrefix; /* Number of prior entires in the key */
u8 eEndLoopOp; /* IN Loop terminator. OP_Next or OP_Prev */
} *aInLoop; /* Information about each nested IN operator */
} in; /* Used when pWLoop->wsFlags&WHERE_IN_ABLE */
@@ -134283,7 +132173,6 @@ struct WhereClause {
WhereInfo *pWInfo; /* WHERE clause processing context */
WhereClause *pOuter; /* Outer conjunction */
u8 op; /* Split operator. TK_AND or TK_OR */
- u8 hasOr; /* True if any a[].eOperator is WO_OR */
int nTerm; /* Number of terms */
int nSlot; /* Number of entries in a[] */
WhereTerm *a; /* Each a[] describes a term of the WHERE cluase */
@@ -134457,7 +132346,6 @@ SQLITE_PRIVATE void sqlite3WhereClauseInit(WhereClause*,WhereInfo*);
SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause*);
SQLITE_PRIVATE void sqlite3WhereSplit(WhereClause*,Expr*,u8);
SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet*, Expr*);
-SQLITE_PRIVATE Bitmask sqlite3WhereExprUsageNN(WhereMaskSet*, Expr*);
SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet*, ExprList*);
SQLITE_PRIVATE void sqlite3WhereExprAnalyze(SrcList*, WhereClause*);
SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, struct SrcList_item*, WhereClause*);
@@ -134520,7 +132408,6 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, struct SrcList_item*, WhereC
#define WHERE_SKIPSCAN 0x00008000 /* Uses the skip-scan algorithm */
#define WHERE_UNQ_WANTED 0x00010000 /* WHERE_ONEROW would have been helpful*/
#define WHERE_PARTIALIDX 0x00020000 /* The automatic index is partial */
-#define WHERE_IN_EARLYOUT 0x00040000 /* Perhaps quit IN loops early */
/************** End of whereInt.h ********************************************/
/************** Continuing where we left off in wherecode.c ******************/
@@ -134655,7 +132542,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan(
sqlite3StrAccumInit(&str, db, zBuf, sizeof(zBuf), SQLITE_MAX_LENGTH);
sqlite3_str_appendall(&str, isSearch ? "SEARCH" : "SCAN");
if( pItem->pSelect ){
- sqlite3_str_appendf(&str, " SUBQUERY %u", pItem->pSelect->selId);
+ sqlite3_str_appendf(&str, " SUBQUERY 0x%p", pItem->pSelect);
}else{
sqlite3_str_appendf(&str, " TABLE %s", pItem->zName);
}
@@ -134852,6 +132739,7 @@ static void codeApplyAffinity(Parse *pParse, int base, int n, char *zAff){
/* Code the OP_Affinity opcode if there is anything left to do. */
if( n>0 ){
sqlite3VdbeAddOp4(v, OP_Affinity, base, n, 0, zAff, n);
+ sqlite3ExprCacheAffinityChange(pParse, base, n);
}
}
@@ -135095,14 +132983,7 @@ static int codeEqualityTerm(
sqlite3VdbeAddOp1(v, OP_IsNull, iOut); VdbeCoverage(v);
if( i==iEq ){
pIn->iCur = iTab;
- pIn->eEndLoopOp = bRev ? OP_Prev : OP_Next;
- if( iEq>0 && (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 ){
- pIn->iBase = iReg - i;
- pIn->nPrefix = i;
- pLoop->wsFlags |= WHERE_IN_EARLYOUT;
- }else{
- pIn->nPrefix = 0;
- }
+ pIn->eEndLoopOp = bRev ? OP_PrevIfOpen : OP_NextIfOpen;
}else{
pIn->eEndLoopOp = OP_Noop;
}
@@ -135389,8 +133270,11 @@ static int codeCursorHintFixExpr(Walker *pWalker, Expr *pExpr){
struct CCurHint *pHint = pWalker->u.pCCurHint;
if( pExpr->op==TK_COLUMN ){
if( pExpr->iTable!=pHint->iTabCur ){
+ Vdbe *v = pWalker->pParse->pVdbe;
int reg = ++pWalker->pParse->nMem; /* Register for column value */
- sqlite3ExprCode(pWalker->pParse, pExpr, reg);
+ sqlite3ExprCodeGetColumnOfTable(
+ v, pExpr->pTab, pExpr->iTable, pExpr->iColumn, reg
+ );
pExpr->op = TK_REGISTER;
pExpr->iTable = reg;
}else if( pHint->pIdx!=0 ){
@@ -135743,7 +133627,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub);
pLevel->p2 = sqlite3VdbeAddOp2(v, OP_Yield, regYield, addrBrk);
VdbeCoverage(v);
- VdbeComment((v, "next row of %s", pTabItem->pTab->zName));
+ VdbeComment((v, "next row of \"%s\"", pTabItem->pTab->zName));
pLevel->op = OP_Goto;
}else
@@ -135757,6 +133641,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
int nConstraint = pLoop->nLTerm;
int iIn; /* Counter for IN constraints */
+ sqlite3ExprCachePush(pParse);
iReg = sqlite3GetTempRange(pParse, nConstraint+2);
addrNotFound = pLevel->addrBrk;
for(j=0; jaddrNxt;
sqlite3VdbeAddOp3(v, OP_SeekRowid, iCur, addrNxt, iRowidReg);
VdbeCoverage(v);
+ sqlite3ExprCacheAffinityChange(pParse, iRowidReg, 1);
+ sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg);
+ VdbeComment((v, "pk"));
pLevel->op = OP_Noop;
}else if( (pLoop->wsFlags & WHERE_IPK)!=0
&& (pLoop->wsFlags & WHERE_COLUMN_RANGE)!=0
@@ -135921,6 +133810,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
VdbeCoverageIf(v, pX->op==TK_LE);
VdbeCoverageIf(v, pX->op==TK_LT);
VdbeCoverageIf(v, pX->op==TK_GE);
+ sqlite3ExprCacheAffinityChange(pParse, r1, 1);
sqlite3ReleaseTempReg(pParse, rTemp);
}else{
sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iCur, addrHalt);
@@ -135955,6 +133845,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
if( testOp!=OP_Noop ){
iRowidReg = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_Rowid, iCur, iRowidReg);
+ sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg);
sqlite3VdbeAddOp3(v, testOp, memEndValue, addrBrk, iRowidReg);
VdbeCoverageIf(v, testOp==OP_Le);
VdbeCoverageIf(v, testOp==OP_Lt);
@@ -136159,9 +134050,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
** above has already left the cursor sitting on the correct row,
** so no further seeking is needed */
}else{
- if( pLoop->wsFlags & WHERE_IN_EARLYOUT ){
- sqlite3VdbeAddOp1(v, OP_SeekHit, iIdxCur);
- }
op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev];
assert( op!=0 );
sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
@@ -136180,6 +134068,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
nConstraint = nEq;
if( pRangeEnd ){
Expr *pRight = pRangeEnd->pExpr->pRight;
+ sqlite3ExprCacheRemove(pParse, regBase+nEq, 1);
codeExprOrVector(pParse, pRight, regBase+nEq, nTop);
whereLikeOptimizationStringFixup(v, pLevel, pRangeEnd);
if( (pRangeEnd->wtFlags & TERM_VNULL)==0
@@ -136204,6 +134093,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
}
}else if( bStopAtNull ){
sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq);
+ sqlite3ExprCacheRemove(pParse, regBase+nEq, 1);
endEq = 0;
nConstraint++;
}
@@ -136223,10 +134113,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE );
}
- if( pLoop->wsFlags & WHERE_IN_EARLYOUT ){
- sqlite3VdbeAddOp2(v, OP_SeekHit, iIdxCur, 1);
- }
-
/* Seek the table cursor, if required */
if( omitTable ){
/* pIdx is a covering index. No need to access the main table. */
@@ -136237,6 +134123,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
)){
iRowidReg = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg);
+ sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg);
sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, iRowidReg);
VdbeCoverage(v);
}else{
@@ -136471,23 +134358,23 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
** row will be skipped in subsequent sub-WHERE clauses.
*/
if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){
+ int r;
int iSet = ((ii==pOrWc->nTerm-1)?-1:ii);
if( HasRowid(pTab) ){
- sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, -1, regRowid);
+ r = sqlite3ExprCodeGetColumn(pParse, pTab, -1, iCur, regRowid, 0);
jmp1 = sqlite3VdbeAddOp4Int(v, OP_RowSetTest, regRowset, 0,
- regRowid, iSet);
+ r,iSet);
VdbeCoverage(v);
}else{
Index *pPk = sqlite3PrimaryKeyIndex(pTab);
int nPk = pPk->nKeyCol;
int iPk;
- int r;
/* Read the PK into an array of temp registers. */
r = sqlite3GetTempRange(pParse, nPk);
for(iPk=0; iPkaiColumn[iPk];
- sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, iCol, r+iPk);
+ sqlite3ExprCodeGetColumnToReg(pParse, pTab, iCol, iCur, r+iPk);
}
/* Check if the temp table already contains this key. If so,
@@ -136720,6 +134607,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
pLevel->addrFirst = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp2(v, OP_Integer, 1, pLevel->iLeftJoin);
VdbeComment((v, "record LEFT JOIN hit"));
+ sqlite3ExprCacheClear(pParse);
for(pTerm=pWC->a, j=0; jnTerm; j++, pTerm++){
testcase( pTerm->wtFlags & TERM_VIRTUAL );
testcase( pTerm->wtFlags & TERM_CODED );
@@ -136935,18 +134823,18 @@ static int isLikeOrGlob(
int *pisComplete, /* True if the only wildcard is % in the last character */
int *pnoCase /* True if uppercase is equivalent to lowercase */
){
- const u8 *z = 0; /* String on RHS of LIKE operator */
+ const u8 *z = 0; /* String on RHS of LIKE operator */
Expr *pRight, *pLeft; /* Right and left size of LIKE operator */
ExprList *pList; /* List of operands to the LIKE operator */
- u8 c; /* One character in z[] */
+ int c; /* One character in z[] */
int cnt; /* Number of non-wildcard prefix characters */
- u8 wc[4]; /* Wildcard characters */
+ char wc[4]; /* Wildcard characters */
sqlite3 *db = pParse->db; /* Database connection */
sqlite3_value *pVal = 0;
int op; /* Opcode of pRight */
int rc; /* Result code to return */
- if( !sqlite3IsLikeFunction(db, pExpr, pnoCase, (char*)wc) ){
+ if( !sqlite3IsLikeFunction(db, pExpr, pnoCase, wc) ){
return 0;
}
#ifdef SQLITE_EBCDIC
@@ -136971,6 +134859,23 @@ static int isLikeOrGlob(
}
if( z ){
+ /* If the RHS begins with a digit or a minus sign, then the LHS must
+ ** be an ordinary column (not a virtual table column) with TEXT affinity.
+ ** Otherwise the LHS might be numeric and "lhs >= rhs" would be false
+ ** even though "lhs LIKE rhs" is true. But if the RHS does not start
+ ** with a digit or '-', then "lhs LIKE rhs" will always be false if
+ ** the LHS is numeric and so the optimization still works.
+ */
+ if( sqlite3Isdigit(z[0]) || z[0]=='-' ){
+ if( pLeft->op!=TK_COLUMN
+ || sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT
+ || IsVirtual(pLeft->pTab) /* Value might be numeric */
+ ){
+ sqlite3ValueFree(pVal);
+ return 0;
+ }
+ }
+
/* Count the number of prefix characters prior to the first wildcard */
cnt = 0;
while( (c=z[cnt])!=0 && c!=wc[0] && c!=wc[1] && c!=wc[2] ){
@@ -136980,13 +134885,11 @@ static int isLikeOrGlob(
/* The optimization is possible only if (1) the pattern does not begin
** with a wildcard and if (2) the non-wildcard prefix does not end with
- ** an (illegal 0xff) character, or (3) the pattern does not consist of
- ** a single escape character. The second condition is necessary so
+ ** an (illegal 0xff) character. The second condition is necessary so
** that we can increment the prefix key to find an upper bound for the
- ** range search. The third is because the caller assumes that the pattern
- ** consists of at least one character after all escapes have been
- ** removed. */
- if( cnt!=0 && 255!=(u8)z[cnt-1] && (cnt>1 || z[0]!=wc[3]) ){
+ ** range search.
+ */
+ if( cnt!=0 && 255!=(u8)z[cnt-1] ){
Expr *pPrefix;
/* A "complete" match if the pattern ends with "*" or "%" */
@@ -137003,32 +134906,6 @@ static int isLikeOrGlob(
zNew[iTo++] = zNew[iFrom];
}
zNew[iTo] = 0;
-
- /* If the RHS begins with a digit or a minus sign, then the LHS must be
- ** an ordinary column (not a virtual table column) with TEXT affinity.
- ** Otherwise the LHS might be numeric and "lhs >= rhs" would be false
- ** even though "lhs LIKE rhs" is true. But if the RHS does not start
- ** with a digit or '-', then "lhs LIKE rhs" will always be false if
- ** the LHS is numeric and so the optimization still works.
- **
- ** 2018-09-10 ticket c94369cae9b561b1f996d0054bfab11389f9d033
- ** The RHS pattern must not be '/%' because the termination condition
- ** will then become "x<'0'" and if the affinity is numeric, will then
- ** be converted into "x<0", which is incorrect.
- */
- if( sqlite3Isdigit(zNew[0])
- || zNew[0]=='-'
- || (zNew[0]+1=='0' && iTo==1)
- ){
- if( pLeft->op!=TK_COLUMN
- || sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT
- || IsVirtual(pLeft->pTab) /* Value might be numeric */
- ){
- sqlite3ExprDelete(db, pPrefix);
- sqlite3ValueFree(pVal);
- return 0;
- }
- }
}
*ppPrefix = pPrefix;
@@ -137090,7 +134967,6 @@ static int isLikeOrGlob(
** If the expression matches none of the patterns above, return 0.
*/
static int isAuxiliaryVtabOperator(
- sqlite3 *db, /* Parsing context */
Expr *pExpr, /* Test this expression */
unsigned char *peOp2, /* OUT: 0 for MATCH, or else an op2 value */
Expr **ppLeft, /* Column expression to left of MATCH/op2 */
@@ -137114,54 +134990,16 @@ static int isAuxiliaryVtabOperator(
if( pList==0 || pList->nExpr!=2 ){
return 0;
}
-
- /* Built-in operators MATCH, GLOB, LIKE, and REGEXP attach to a
- ** virtual table on their second argument, which is the same as
- ** the left-hand side operand in their in-fix form.
- **
- ** vtab_column MATCH expression
- ** MATCH(expression,vtab_column)
- */
pCol = pList->a[1].pExpr;
- if( pCol->op==TK_COLUMN && IsVirtual(pCol->pTab) ){
- for(i=0; iu.zToken, aOp[i].zOp)==0 ){
- *peOp2 = aOp[i].eOp2;
- *ppRight = pList->a[0].pExpr;
- *ppLeft = pCol;
- return 1;
- }
- }
+ if( pCol->op!=TK_COLUMN || !IsVirtual(pCol->pTab) ){
+ return 0;
}
-
- /* We can also match against the first column of overloaded
- ** functions where xFindFunction returns a value of at least
- ** SQLITE_INDEX_CONSTRAINT_FUNCTION.
- **
- ** OVERLOADED(vtab_column,expression)
- **
- ** Historically, xFindFunction expected to see lower-case function
- ** names. But for this use case, xFindFunction is expected to deal
- ** with function names in an arbitrary case.
- */
- pCol = pList->a[0].pExpr;
- if( pCol->op==TK_COLUMN && IsVirtual(pCol->pTab) ){
- sqlite3_vtab *pVtab;
- sqlite3_module *pMod;
- void (*xNotUsed)(sqlite3_context*,int,sqlite3_value**);
- void *pNotUsed;
- pVtab = sqlite3GetVTable(db, pCol->pTab)->pVtab;
- assert( pVtab!=0 );
- assert( pVtab->pModule!=0 );
- pMod = (sqlite3_module *)pVtab->pModule;
- if( pMod->xFindFunction!=0 ){
- i = pMod->xFindFunction(pVtab,2, pExpr->u.zToken, &xNotUsed, &pNotUsed);
- if( i>=SQLITE_INDEX_CONSTRAINT_FUNCTION ){
- *peOp2 = i;
- *ppRight = pList->a[1].pExpr;
- *ppLeft = pCol;
- return 1;
- }
+ for(i=0; iu.zToken, aOp[i].zOp)==0 ){
+ *peOp2 = aOp[i].eOp2;
+ *ppRight = pList->a[0].pExpr;
+ *ppLeft = pCol;
+ return 1;
}
}
}else if( pExpr->op==TK_NE || pExpr->op==TK_ISNOT || pExpr->op==TK_NOTNULL ){
@@ -137463,12 +135301,7 @@ static void exprAnalyzeOrTerm(
** empty.
*/
pOrInfo->indexable = indexable;
- if( indexable ){
- pTerm->eOperator = WO_OR;
- pWC->hasOr = 1;
- }else{
- pTerm->eOperator = WO_OR;
- }
+ pTerm->eOperator = indexable==0 ? 0 : WO_OR;
/* For a two-way OR, attempt to implementation case 2.
*/
@@ -137609,7 +135442,7 @@ static void exprAnalyzeOrTerm(
idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC);
testcase( idxNew==0 );
exprAnalyze(pSrc, pWC, idxNew);
- /* pTerm = &pWC->a[idxTerm]; // would be needed if pTerm where used again */
+ pTerm = &pWC->a[idxTerm];
markTermAsChild(pWC, idxNew, idxTerm);
}else{
sqlite3ExprListDelete(db, pList);
@@ -137648,7 +135481,7 @@ static int termIsEquivalence(Parse *pParse, Expr *pExpr){
return 0;
}
pColl = sqlite3BinaryCompareCollSeq(pParse, pExpr->pLeft, pExpr->pRight);
- if( sqlite3IsBinary(pColl) ) return 1;
+ if( pColl==0 || sqlite3StrICmp(pColl->zName, "BINARY")==0 ) return 1;
return sqlite3ExprCollSeqMatch(pParse, pExpr->pLeft, pExpr->pRight);
}
@@ -137807,7 +135640,7 @@ static void exprAnalyze(
pTerm->prereqRight = sqlite3WhereExprUsage(pMaskSet, pExpr->pRight);
}
pMaskSet->bVarSelect = 0;
- prereqAll = sqlite3WhereExprUsageNN(pMaskSet, pExpr);
+ prereqAll = sqlite3WhereExprUsage(pMaskSet, pExpr);
if( pMaskSet->bVarSelect ) pTerm->wtFlags |= TERM_VARSELECT;
if( ExprHasProperty(pExpr, EP_FromJoin) ){
Bitmask x = sqlite3WhereGetMask(pMaskSet, pExpr->iRightJoinTable);
@@ -137989,7 +135822,7 @@ static void exprAnalyze(
}
*pC = c + 1;
}
- zCollSeqName = noCase ? "NOCASE" : sqlite3StrBINARY;
+ zCollSeqName = noCase ? "NOCASE" : "BINARY";
pNewExpr1 = sqlite3ExprDup(db, pLeft, 0);
pNewExpr1 = sqlite3PExpr(pParse, TK_GE,
sqlite3ExprAddCollateString(pParse,pNewExpr1,zCollSeqName),
@@ -138026,7 +135859,7 @@ static void exprAnalyze(
*/
if( pWC->op==TK_AND ){
Expr *pRight = 0, *pLeft = 0;
- int res = isAuxiliaryVtabOperator(db, pExpr, &eOp2, &pLeft, &pRight);
+ int res = isAuxiliaryVtabOperator(pExpr, &eOp2, &pLeft, &pRight);
while( res-- > 0 ){
int idxNew;
WhereTerm *pNewTerm;
@@ -138200,7 +136033,6 @@ SQLITE_PRIVATE void sqlite3WhereClauseInit(
WhereInfo *pWInfo /* The WHERE processing context */
){
pWC->pWInfo = pWInfo;
- pWC->hasOr = 0;
pWC->pOuter = 0;
pWC->nTerm = 0;
pWC->nSlot = ArraySize(pWC->aStatic);
@@ -138237,18 +136069,17 @@ SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause *pWC){
** a bitmask indicating which tables are used in that expression
** tree.
*/
-SQLITE_PRIVATE Bitmask sqlite3WhereExprUsageNN(WhereMaskSet *pMaskSet, Expr *p){
+SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet *pMaskSet, Expr *p){
Bitmask mask;
- if( p->op==TK_COLUMN && !ExprHasProperty(p, EP_FixedCol) ){
+ if( p==0 ) return 0;
+ if( p->op==TK_COLUMN ){
return sqlite3WhereGetMask(pMaskSet, p->iTable);
- }else if( ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){
- assert( p->op!=TK_IF_NULL_ROW );
- return 0;
}
mask = (p->op==TK_IF_NULL_ROW) ? sqlite3WhereGetMask(pMaskSet, p->iTable) : 0;
- if( p->pLeft ) mask |= sqlite3WhereExprUsageNN(pMaskSet, p->pLeft);
+ assert( !ExprHasProperty(p, EP_TokenOnly) );
+ if( p->pLeft ) mask |= sqlite3WhereExprUsage(pMaskSet, p->pLeft);
if( p->pRight ){
- mask |= sqlite3WhereExprUsageNN(pMaskSet, p->pRight);
+ mask |= sqlite3WhereExprUsage(pMaskSet, p->pRight);
assert( p->x.pList==0 );
}else if( ExprHasProperty(p, EP_xIsSelect) ){
if( ExprHasProperty(p, EP_VarSelect) ) pMaskSet->bVarSelect = 1;
@@ -138258,9 +136089,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereExprUsageNN(WhereMaskSet *pMaskSet, Expr *p){
}
return mask;
}
-SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet *pMaskSet, Expr *p){
- return p ? sqlite3WhereExprUsageNN(pMaskSet,p) : 0;
-}
SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet *pMaskSet, ExprList *pList){
int i;
Bitmask mask = 0;
@@ -138402,38 +136230,15 @@ SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo *pWInfo){
}
/*
-** In the ORDER BY LIMIT optimization, if the inner-most loop is known
-** to emit rows in increasing order, and if the last row emitted by the
-** inner-most loop did not fit within the sorter, then we can skip all
-** subsequent rows for the current iteration of the inner loop (because they
-** will not fit in the sorter either) and continue with the second inner
-** loop - the loop immediately outside the inner-most.
+** Return TRUE if the innermost loop of the WHERE clause implementation
+** returns rows in ORDER BY order for complete run of the inner loop.
**
-** When a row does not fit in the sorter (because the sorter already
-** holds LIMIT+OFFSET rows that are smaller), then a jump is made to the
-** label returned by this function.
-**
-** If the ORDER BY LIMIT optimization applies, the jump destination should
-** be the continuation for the second-inner-most loop. If the ORDER BY
-** LIMIT optimization does not apply, then the jump destination should
-** be the continuation for the inner-most loop.
-**
-** It is always safe for this routine to return the continuation of the
-** inner-most loop, in the sense that a correct answer will result.
-** Returning the continuation the second inner loop is an optimization
-** that might make the code run a little faster, but should not change
-** the final answer.
+** Across multiple iterations of outer loops, the output rows need not be
+** sorted. As long as rows are sorted for just the innermost loop, this
+** routine can return TRUE.
*/
-SQLITE_PRIVATE int sqlite3WhereOrderByLimitOptLabel(WhereInfo *pWInfo){
- WhereLevel *pInner;
- if( !pWInfo->bOrderedInnerLoop ){
- /* The ORDER BY LIMIT optimization does not apply. Jump to the
- ** continuation of the inner-most loop. */
- return pWInfo->iContinue;
- }
- pInner = &pWInfo->a[pWInfo->nLevel-1];
- assert( pInner->addrNxt!=0 );
- return pInner->addrNxt;
+SQLITE_PRIVATE int sqlite3WhereOrderedInnerLoop(WhereInfo *pWInfo){
+ return pWInfo->bOrderedInnerLoop;
}
/*
@@ -139160,6 +136965,7 @@ static void constructAutomaticIndex(
VdbeComment((v, "for %s", pTable->zName));
/* Fill the automatic index with content */
+ sqlite3ExprCachePush(pParse);
pTabItem = &pWC->pWInfo->pTabList->a[pLevel->iFrom];
if( pTabItem->fg.viaCoroutine ){
int regYield = pTabItem->regReturn;
@@ -139167,7 +136973,7 @@ static void constructAutomaticIndex(
sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub);
addrTop = sqlite3VdbeAddOp1(v, OP_Yield, regYield);
VdbeCoverage(v);
- VdbeComment((v, "next row of %s", pTabItem->pTab->zName));
+ VdbeComment((v, "next row of \"%s\"", pTabItem->pTab->zName));
}else{
addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v);
}
@@ -139196,6 +137002,7 @@ static void constructAutomaticIndex(
sqlite3VdbeChangeP5(v, SQLITE_STMTSTATUS_AUTOINDEX);
sqlite3VdbeJumpHere(v, addrTop);
sqlite3ReleaseTempReg(pParse, regRecord);
+ sqlite3ExprCachePop(pParse);
/* Jump here when skipping the initialization */
sqlite3VdbeJumpHere(v, addrInit);
@@ -139301,20 +137108,6 @@ static sqlite3_index_info *allocateIndexInfo(
testcase( pTerm->eOperator & WO_ALL );
if( (pTerm->eOperator & ~(WO_EQUIV))==0 ) continue;
if( pTerm->wtFlags & TERM_VNULL ) continue;
- if( (pSrc->fg.jointype & JT_LEFT)!=0
- && !ExprHasProperty(pTerm->pExpr, EP_FromJoin)
- && (pTerm->eOperator & (WO_IS|WO_ISNULL))
- ){
- /* An "IS" term in the WHERE clause where the virtual table is the rhs
- ** of a LEFT JOIN. Do not pass this term to the virtual table
- ** implementation, as this can lead to incorrect results from SQL such
- ** as:
- **
- ** "LEFT JOIN vtab WHERE vtab.col IS NULL" */
- testcase( pTerm->eOperator & WO_ISNULL );
- testcase( pTerm->eOperator & WO_IS );
- continue;
- }
assert( pTerm->u.leftColumn>=(-1) );
pIdxCons[j].iColumn = pTerm->u.leftColumn;
pIdxCons[j].iTermOffset = i;
@@ -139806,9 +137599,7 @@ static int whereRangeScanEst(
Index *p = pLoop->u.btree.pIndex;
int nEq = pLoop->u.btree.nEq;
- if( p->nSample>0 && nEqnSampleCol
- && OptimizationEnabled(pParse->db, SQLITE_Stat34)
- ){
+ if( p->nSample>0 && nEqnSampleCol ){
if( nEq==pBuilder->nRecValid ){
UnpackedRecord *pRec = pBuilder->pRec;
tRowcnt a[2];
@@ -140823,6 +138614,7 @@ static int whereLoopAddBtreeIndex(
if( eOp & WO_IN ){
Expr *pExpr = pTerm->pExpr;
+ pNew->wsFlags |= WHERE_COLUMN_IN;
if( ExprHasProperty(pExpr, EP_xIsSelect) ){
/* "x IN (SELECT ...)": TUNING: the SELECT returns 25 rows */
int i;
@@ -140842,42 +138634,6 @@ static int whereLoopAddBtreeIndex(
assert( nIn>0 ); /* RHS always has 2 or more terms... The parser
** changes "x IN (?)" into "x=?". */
}
- if( pProbe->hasStat1 ){
- LogEst M, logK, safetyMargin;
- /* Let:
- ** N = the total number of rows in the table
- ** K = the number of entries on the RHS of the IN operator
- ** M = the number of rows in the table that match terms to the
- ** to the left in the same index. If the IN operator is on
- ** the left-most index column, M==N.
- **
- ** Given the definitions above, it is better to omit the IN operator
- ** from the index lookup and instead do a scan of the M elements,
- ** testing each scanned row against the IN operator separately, if:
- **
- ** M*log(K) < K*log(N)
- **
- ** Our estimates for M, K, and N might be inaccurate, so we build in
- ** a safety margin of 2 (LogEst: 10) that favors using the IN operator
- ** with the index, as using an index has better worst-case behavior.
- ** If we do not have real sqlite_stat1 data, always prefer to use
- ** the index.
- */
- M = pProbe->aiRowLogEst[saved_nEq];
- logK = estLog(nIn);
- safetyMargin = 10; /* TUNING: extra weight for indexed IN */
- if( M + logK + safetyMargin < nIn + rLogSize ){
- WHERETRACE(0x40,
- ("Scan preferred over IN operator on column %d of \"%s\" (%d<%d)\n",
- saved_nEq, pProbe->zName, M+logK+10, nIn+rLogSize));
- continue;
- }else{
- WHERETRACE(0x40,
- ("IN operator preferred on column %d of \"%s\" (%d>=%d)\n",
- saved_nEq, pProbe->zName, M+logK+10, nIn+rLogSize));
- }
- }
- pNew->wsFlags |= WHERE_COLUMN_IN;
}else if( eOp & (WO_EQ|WO_IS) ){
int iCol = pProbe->aiColumn[saved_nEq];
pNew->wsFlags |= WHERE_COLUMN_EQ;
@@ -140956,7 +138712,6 @@ static int whereLoopAddBtreeIndex(
&& pProbe->nSample
&& pNew->u.btree.nEq<=pProbe->nSampleCol
&& ((eOp & WO_IN)==0 || !ExprHasProperty(pTerm->pExpr, EP_xIsSelect))
- && OptimizationEnabled(db, SQLITE_Stat34)
){
Expr *pExpr = pTerm->pExpr;
if( (eOp & (WO_EQ|WO_ISNULL|WO_IS))!=0 ){
@@ -141045,7 +138800,6 @@ static int whereLoopAddBtreeIndex(
if( saved_nEq==saved_nSkip
&& saved_nEq+1nKeyCol
&& pProbe->noSkipScan==0
- && OptimizationEnabled(db, SQLITE_SkipScan)
&& pProbe->aiRowLogEst[saved_nEq+1]>=42 /* TUNING: Minimum for skip-scan */
&& (rc = whereLoopResize(db, pNew, pNew->nLTerm+1))==SQLITE_OK
){
@@ -141109,6 +138863,24 @@ static int indexMightHelpWithOrderBy(
return 0;
}
+/*
+** Return a bitmask where 1s indicate that the corresponding column of
+** the table is used by an index. Only the first 63 columns are considered.
+*/
+static Bitmask columnsInIndex(Index *pIdx){
+ Bitmask m = 0;
+ int j;
+ for(j=pIdx->nColumn-1; j>=0; j--){
+ int x = pIdx->aiColumn[j];
+ if( x>=0 ){
+ testcase( x==BMS-1 );
+ testcase( x==BMS-2 );
+ if( xwsFlags = WHERE_IDX_ONLY | WHERE_INDEXED;
m = 0;
}else{
- m = pSrc->colUsed & pProbe->colNotIdxed;
+ m = pSrc->colUsed & ~columnsInIndex(pProbe);
pNew->wsFlags = (m==0) ? (WHERE_IDX_ONLY|WHERE_INDEXED) : WHERE_INDEXED;
}
@@ -141575,7 +139347,7 @@ SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info *pIdxInfo, int
if( pX->pLeft ){
pC = sqlite3BinaryCompareCollSeq(pHidden->pParse, pX->pLeft, pX->pRight);
}
- zRet = (pC ? pC->zName : sqlite3StrBINARY);
+ zRet = (pC ? pC->zName : "BINARY");
}
return zRet;
}
@@ -141891,7 +139663,7 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){
{
rc = whereLoopAddBtree(pBuilder, mPrereq);
}
- if( rc==SQLITE_OK && pBuilder->pWC->hasOr ){
+ if( rc==SQLITE_OK ){
rc = whereLoopAddOr(pBuilder, mPrereq, mUnusable);
}
mPrior |= pNew->maskSelf;
@@ -142426,11 +140198,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
pWInfo, nRowEst, nOrderBy, isOrdered
);
}
- /* TUNING: Add a small extra penalty (5) to sorting as an
- ** extra encouragment to the query planner to select a plan
- ** where the rows emerge in the correct order without any sorting
- ** required. */
- rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 5;
+ rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]);
WHERETRACE(0x002,
("---- sort cost=%-3d (%d/%d) increases cost %3d to %-3d\n",
@@ -142620,7 +140388,6 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
pWInfo->eDistinct = WHERE_DISTINCT_ORDERED;
}
}
- pWInfo->bOrderedInnerLoop = 0;
if( pWInfo->pOrderBy ){
if( pWInfo->wctrlFlags & WHERE_DISTINCTBY ){
if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){
@@ -142732,7 +140499,7 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){
}
if( j!=pIdx->nKeyCol ) continue;
pLoop->wsFlags = WHERE_COLUMN_EQ|WHERE_ONEROW|WHERE_INDEXED;
- if( pIdx->isCovering || (pItem->colUsed & pIdx->colNotIdxed)==0 ){
+ if( pIdx->isCovering || (pItem->colUsed & ~columnsInIndex(pIdx))==0 ){
pLoop->wsFlags |= WHERE_IDX_ONLY;
}
pLoop->nLTerm = j;
@@ -143412,26 +141179,6 @@ whereBeginError:
return 0;
}
-/*
-** Part of sqlite3WhereEnd() will rewrite opcodes to reference the
-** index rather than the main table. In SQLITE_DEBUG mode, we want
-** to trace those changes if PRAGMA vdbe_addoptrace=on. This routine
-** does that.
-*/
-#ifndef SQLITE_DEBUG
-# define OpcodeRewriteTrace(D,K,P) /* no-op */
-#else
-# define OpcodeRewriteTrace(D,K,P) sqlite3WhereOpcodeRewriteTrace(D,K,P)
- static void sqlite3WhereOpcodeRewriteTrace(
- sqlite3 *db,
- int pc,
- VdbeOp *pOp
- ){
- if( (db->flags & SQLITE_VdbeAddopTrace)==0 ) return;
- sqlite3VdbePrintOp(0, pc, pOp);
- }
-#endif
-
/*
** Generate the end of the WHERE loop. See comments on
** sqlite3WhereBegin() for additional information.
@@ -143448,6 +141195,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
/* Generate loop termination code.
*/
VdbeModuleComment((v, "End WHERE-core"));
+ sqlite3ExprCacheClear(pParse);
for(i=pWInfo->nLevel-1; i>=0; i--){
int addr;
pLevel = &pWInfo->a[i];
@@ -143498,17 +141246,10 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
for(j=pLevel->u.in.nIn, pIn=&pLevel->u.in.aInLoop[j-1]; j>0; j--, pIn--){
sqlite3VdbeJumpHere(v, pIn->addrInTop+1);
if( pIn->eEndLoopOp!=OP_Noop ){
- if( pIn->nPrefix ){
- assert( pLoop->wsFlags & WHERE_IN_EARLYOUT );
- sqlite3VdbeAddOp4Int(v, OP_IfNoHope, pLevel->iIdxCur,
- sqlite3VdbeCurrentAddr(v)+2,
- pIn->iBase, pIn->nPrefix);
- VdbeCoverage(v);
- }
sqlite3VdbeAddOp2(v, pIn->eEndLoopOp, pIn->iCur, pIn->addrInTop);
VdbeCoverage(v);
- VdbeCoverageIf(v, pIn->eEndLoopOp==OP_Prev);
- VdbeCoverageIf(v, pIn->eEndLoopOp==OP_Next);
+ VdbeCoverageIf(v, pIn->eEndLoopOp==OP_PrevIfOpen);
+ VdbeCoverageIf(v, pIn->eEndLoopOp==OP_NextIfOpen);
}
sqlite3VdbeJumpHere(v, pIn->addrInTop-1);
}
@@ -143599,11 +141340,6 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
){
last = sqlite3VdbeCurrentAddr(v);
k = pLevel->addrBody;
-#ifdef SQLITE_DEBUG
- if( db->flags & SQLITE_VdbeAddopTrace ){
- printf("TRANSLATE opcodes in range %d..%d\n", k, last-1);
- }
-#endif
pOp = sqlite3VdbeGetOp(v, k);
for(; kp1!=pLevel->iTabCur ) continue;
@@ -143623,22 +141359,16 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
if( x>=0 ){
pOp->p2 = x;
pOp->p1 = pLevel->iIdxCur;
- OpcodeRewriteTrace(db, k, pOp);
}
assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 || x>=0
|| pWInfo->eOnePass );
}else if( pOp->opcode==OP_Rowid ){
pOp->p1 = pLevel->iIdxCur;
pOp->opcode = OP_IdxRowid;
- OpcodeRewriteTrace(db, k, pOp);
}else if( pOp->opcode==OP_IfNullRow ){
pOp->p1 = pLevel->iIdxCur;
- OpcodeRewriteTrace(db, k, pOp);
}
}
-#ifdef SQLITE_DEBUG
- if( db->flags & SQLITE_VdbeAddopTrace ) printf("TRANSLATE complete\n");
-#endif
}
}
@@ -143650,2261 +141380,6 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
}
/************** End of where.c ***********************************************/
-/************** Begin file window.c ******************************************/
-/*
-** 2018 May 08
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-*/
-/* #include "sqliteInt.h" */
-
-#ifndef SQLITE_OMIT_WINDOWFUNC
-
-/*
-** SELECT REWRITING
-**
-** Any SELECT statement that contains one or more window functions in
-** either the select list or ORDER BY clause (the only two places window
-** functions may be used) is transformed by function sqlite3WindowRewrite()
-** in order to support window function processing. For example, with the
-** schema:
-**
-** CREATE TABLE t1(a, b, c, d, e, f, g);
-**
-** the statement:
-**
-** SELECT a+1, max(b) OVER (PARTITION BY c ORDER BY d) FROM t1 ORDER BY e;
-**
-** is transformed to:
-**
-** SELECT a+1, max(b) OVER (PARTITION BY c ORDER BY d) FROM (
-** SELECT a, e, c, d, b FROM t1 ORDER BY c, d
-** ) ORDER BY e;
-**
-** The flattening optimization is disabled when processing this transformed
-** SELECT statement. This allows the implementation of the window function
-** (in this case max()) to process rows sorted in order of (c, d), which
-** makes things easier for obvious reasons. More generally:
-**
-** * FROM, WHERE, GROUP BY and HAVING clauses are all moved to
-** the sub-query.
-**
-** * ORDER BY, LIMIT and OFFSET remain part of the parent query.
-**
-** * Terminals from each of the expression trees that make up the
-** select-list and ORDER BY expressions in the parent query are
-** selected by the sub-query. For the purposes of the transformation,
-** terminals are column references and aggregate functions.
-**
-** If there is more than one window function in the SELECT that uses
-** the same window declaration (the OVER bit), then a single scan may
-** be used to process more than one window function. For example:
-**
-** SELECT max(b) OVER (PARTITION BY c ORDER BY d),
-** min(e) OVER (PARTITION BY c ORDER BY d)
-** FROM t1;
-**
-** is transformed in the same way as the example above. However:
-**
-** SELECT max(b) OVER (PARTITION BY c ORDER BY d),
-** min(e) OVER (PARTITION BY a ORDER BY b)
-** FROM t1;
-**
-** Must be transformed to:
-**
-** SELECT max(b) OVER (PARTITION BY c ORDER BY d) FROM (
-** SELECT e, min(e) OVER (PARTITION BY a ORDER BY b), c, d, b FROM
-** SELECT a, e, c, d, b FROM t1 ORDER BY a, b
-** ) ORDER BY c, d
-** ) ORDER BY e;
-**
-** so that both min() and max() may process rows in the order defined by
-** their respective window declarations.
-**
-** INTERFACE WITH SELECT.C
-**
-** When processing the rewritten SELECT statement, code in select.c calls
-** sqlite3WhereBegin() to begin iterating through the results of the
-** sub-query, which is always implemented as a co-routine. It then calls
-** sqlite3WindowCodeStep() to process rows and finish the scan by calling
-** sqlite3WhereEnd().
-**
-** sqlite3WindowCodeStep() generates VM code so that, for each row returned
-** by the sub-query a sub-routine (OP_Gosub) coded by select.c is invoked.
-** When the sub-routine is invoked:
-**
-** * The results of all window-functions for the row are stored
-** in the associated Window.regResult registers.
-**
-** * The required terminal values are stored in the current row of
-** temp table Window.iEphCsr.
-**
-** In some cases, depending on the window frame and the specific window
-** functions invoked, sqlite3WindowCodeStep() caches each entire partition
-** in a temp table before returning any rows. In other cases it does not.
-** This detail is encapsulated within this file, the code generated by
-** select.c is the same in either case.
-**
-** BUILT-IN WINDOW FUNCTIONS
-**
-** This implementation features the following built-in window functions:
-**
-** row_number()
-** rank()
-** dense_rank()
-** percent_rank()
-** cume_dist()
-** ntile(N)
-** lead(expr [, offset [, default]])
-** lag(expr [, offset [, default]])
-** first_value(expr)
-** last_value(expr)
-** nth_value(expr, N)
-**
-** These are the same built-in window functions supported by Postgres.
-** Although the behaviour of aggregate window functions (functions that
-** can be used as either aggregates or window funtions) allows them to
-** be implemented using an API, built-in window functions are much more
-** esoteric. Additionally, some window functions (e.g. nth_value())
-** may only be implemented by caching the entire partition in memory.
-** As such, some built-in window functions use the same API as aggregate
-** window functions and some are implemented directly using VDBE
-** instructions. Additionally, for those functions that use the API, the
-** window frame is sometimes modified before the SELECT statement is
-** rewritten. For example, regardless of the specified window frame, the
-** row_number() function always uses:
-**
-** ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
-**
-** See sqlite3WindowUpdate() for details.
-**
-** As well as some of the built-in window functions, aggregate window
-** functions min() and max() are implemented using VDBE instructions if
-** the start of the window frame is declared as anything other than
-** UNBOUNDED PRECEDING.
-*/
-
-/*
-** Implementation of built-in window function row_number(). Assumes that the
-** window frame has been coerced to:
-**
-** ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
-*/
-static void row_numberStepFunc(
- sqlite3_context *pCtx,
- int nArg,
- sqlite3_value **apArg
-){
- i64 *p = (i64*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p ) (*p)++;
- UNUSED_PARAMETER(nArg);
- UNUSED_PARAMETER(apArg);
-}
-static void row_numberValueFunc(sqlite3_context *pCtx){
- i64 *p = (i64*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- sqlite3_result_int64(pCtx, (p ? *p : 0));
-}
-
-/*
-** Context object type used by rank(), dense_rank(), percent_rank() and
-** cume_dist().
-*/
-struct CallCount {
- i64 nValue;
- i64 nStep;
- i64 nTotal;
-};
-
-/*
-** Implementation of built-in window function dense_rank(). Assumes that
-** the window frame has been set to:
-**
-** RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
-*/
-static void dense_rankStepFunc(
- sqlite3_context *pCtx,
- int nArg,
- sqlite3_value **apArg
-){
- struct CallCount *p;
- p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p ) p->nStep = 1;
- UNUSED_PARAMETER(nArg);
- UNUSED_PARAMETER(apArg);
-}
-static void dense_rankValueFunc(sqlite3_context *pCtx){
- struct CallCount *p;
- p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p ){
- if( p->nStep ){
- p->nValue++;
- p->nStep = 0;
- }
- sqlite3_result_int64(pCtx, p->nValue);
- }
-}
-
-/*
-** Implementation of built-in window function rank(). Assumes that
-** the window frame has been set to:
-**
-** RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
-*/
-static void rankStepFunc(
- sqlite3_context *pCtx,
- int nArg,
- sqlite3_value **apArg
-){
- struct CallCount *p;
- p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p ){
- p->nStep++;
- if( p->nValue==0 ){
- p->nValue = p->nStep;
- }
- }
- UNUSED_PARAMETER(nArg);
- UNUSED_PARAMETER(apArg);
-}
-static void rankValueFunc(sqlite3_context *pCtx){
- struct CallCount *p;
- p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p ){
- sqlite3_result_int64(pCtx, p->nValue);
- p->nValue = 0;
- }
-}
-
-/*
-** Implementation of built-in window function percent_rank(). Assumes that
-** the window frame has been set to:
-**
-** RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
-*/
-static void percent_rankStepFunc(
- sqlite3_context *pCtx,
- int nArg,
- sqlite3_value **apArg
-){
- struct CallCount *p;
- UNUSED_PARAMETER(nArg); assert( nArg==1 );
-
- p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p ){
- if( p->nTotal==0 ){
- p->nTotal = sqlite3_value_int64(apArg[0]);
- }
- p->nStep++;
- if( p->nValue==0 ){
- p->nValue = p->nStep;
- }
- }
-}
-static void percent_rankValueFunc(sqlite3_context *pCtx){
- struct CallCount *p;
- p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p ){
- if( p->nTotal>1 ){
- double r = (double)(p->nValue-1) / (double)(p->nTotal-1);
- sqlite3_result_double(pCtx, r);
- }else{
- sqlite3_result_double(pCtx, 0.0);
- }
- p->nValue = 0;
- }
-}
-
-/*
-** Implementation of built-in window function cume_dist(). Assumes that
-** the window frame has been set to:
-**
-** RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
-*/
-static void cume_distStepFunc(
- sqlite3_context *pCtx,
- int nArg,
- sqlite3_value **apArg
-){
- struct CallCount *p;
- assert( nArg==1 ); UNUSED_PARAMETER(nArg);
-
- p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p ){
- if( p->nTotal==0 ){
- p->nTotal = sqlite3_value_int64(apArg[0]);
- }
- p->nStep++;
- }
-}
-static void cume_distValueFunc(sqlite3_context *pCtx){
- struct CallCount *p;
- p = (struct CallCount*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p && p->nTotal ){
- double r = (double)(p->nStep) / (double)(p->nTotal);
- sqlite3_result_double(pCtx, r);
- }
-}
-
-/*
-** Context object for ntile() window function.
-*/
-struct NtileCtx {
- i64 nTotal; /* Total rows in partition */
- i64 nParam; /* Parameter passed to ntile(N) */
- i64 iRow; /* Current row */
-};
-
-/*
-** Implementation of ntile(). This assumes that the window frame has
-** been coerced to:
-**
-** ROWS UNBOUNDED PRECEDING AND CURRENT ROW
-*/
-static void ntileStepFunc(
- sqlite3_context *pCtx,
- int nArg,
- sqlite3_value **apArg
-){
- struct NtileCtx *p;
- assert( nArg==2 ); UNUSED_PARAMETER(nArg);
- p = (struct NtileCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p ){
- if( p->nTotal==0 ){
- p->nParam = sqlite3_value_int64(apArg[0]);
- p->nTotal = sqlite3_value_int64(apArg[1]);
- if( p->nParam<=0 ){
- sqlite3_result_error(
- pCtx, "argument of ntile must be a positive integer", -1
- );
- }
- }
- p->iRow++;
- }
-}
-static void ntileValueFunc(sqlite3_context *pCtx){
- struct NtileCtx *p;
- p = (struct NtileCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p && p->nParam>0 ){
- int nSize = (p->nTotal / p->nParam);
- if( nSize==0 ){
- sqlite3_result_int64(pCtx, p->iRow);
- }else{
- i64 nLarge = p->nTotal - p->nParam*nSize;
- i64 iSmall = nLarge*(nSize+1);
- i64 iRow = p->iRow-1;
-
- assert( (nLarge*(nSize+1) + (p->nParam-nLarge)*nSize)==p->nTotal );
-
- if( iRowpVal);
- p->pVal = sqlite3_value_dup(apArg[0]);
- if( p->pVal==0 ){
- sqlite3_result_error_nomem(pCtx);
- }else{
- p->nVal++;
- }
- }
-}
-static void last_valueInvFunc(
- sqlite3_context *pCtx,
- int nArg,
- sqlite3_value **apArg
-){
- struct LastValueCtx *p;
- UNUSED_PARAMETER(nArg);
- UNUSED_PARAMETER(apArg);
- p = (struct LastValueCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( ALWAYS(p) ){
- p->nVal--;
- if( p->nVal==0 ){
- sqlite3_value_free(p->pVal);
- p->pVal = 0;
- }
- }
-}
-static void last_valueValueFunc(sqlite3_context *pCtx){
- struct LastValueCtx *p;
- p = (struct LastValueCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p && p->pVal ){
- sqlite3_result_value(pCtx, p->pVal);
- }
-}
-static void last_valueFinalizeFunc(sqlite3_context *pCtx){
- struct LastValueCtx *p;
- p = (struct LastValueCtx*)sqlite3_aggregate_context(pCtx, sizeof(*p));
- if( p && p->pVal ){
- sqlite3_result_value(pCtx, p->pVal);
- sqlite3_value_free(p->pVal);
- p->pVal = 0;
- }
-}
-
-/*
-** Static names for the built-in window function names. These static
-** names are used, rather than string literals, so that FuncDef objects
-** can be associated with a particular window function by direct
-** comparison of the zName pointer. Example:
-**
-** if( pFuncDef->zName==row_valueName ){ ... }
-*/
-static const char row_numberName[] = "row_number";
-static const char dense_rankName[] = "dense_rank";
-static const char rankName[] = "rank";
-static const char percent_rankName[] = "percent_rank";
-static const char cume_distName[] = "cume_dist";
-static const char ntileName[] = "ntile";
-static const char last_valueName[] = "last_value";
-static const char nth_valueName[] = "nth_value";
-static const char first_valueName[] = "first_value";
-static const char leadName[] = "lead";
-static const char lagName[] = "lag";
-
-/*
-** No-op implementations of xStep() and xFinalize(). Used as place-holders
-** for built-in window functions that never call those interfaces.
-**
-** The noopValueFunc() is called but is expected to do nothing. The
-** noopStepFunc() is never called, and so it is marked with NO_TEST to
-** let the test coverage routine know not to expect this function to be
-** invoked.
-*/
-static void noopStepFunc( /*NO_TEST*/
- sqlite3_context *p, /*NO_TEST*/
- int n, /*NO_TEST*/
- sqlite3_value **a /*NO_TEST*/
-){ /*NO_TEST*/
- UNUSED_PARAMETER(p); /*NO_TEST*/
- UNUSED_PARAMETER(n); /*NO_TEST*/
- UNUSED_PARAMETER(a); /*NO_TEST*/
- assert(0); /*NO_TEST*/
-} /*NO_TEST*/
-static void noopValueFunc(sqlite3_context *p){ UNUSED_PARAMETER(p); /*no-op*/ }
-
-/* Window functions that use all window interfaces: xStep, xFinal,
-** xValue, and xInverse */
-#define WINDOWFUNCALL(name,nArg,extra) { \
- nArg, (SQLITE_UTF8|SQLITE_FUNC_WINDOW|extra), 0, 0, \
- name ## StepFunc, name ## FinalizeFunc, name ## ValueFunc, \
- name ## InvFunc, name ## Name, {0} \
-}
-
-/* Window functions that are implemented using bytecode and thus have
-** no-op routines for their methods */
-#define WINDOWFUNCNOOP(name,nArg,extra) { \
- nArg, (SQLITE_UTF8|SQLITE_FUNC_WINDOW|extra), 0, 0, \
- noopStepFunc, noopValueFunc, noopValueFunc, \
- noopStepFunc, name ## Name, {0} \
-}
-
-/* Window functions that use all window interfaces: xStep, the
-** same routine for xFinalize and xValue and which never call
-** xInverse. */
-#define WINDOWFUNCX(name,nArg,extra) { \
- nArg, (SQLITE_UTF8|SQLITE_FUNC_WINDOW|extra), 0, 0, \
- name ## StepFunc, name ## ValueFunc, name ## ValueFunc, \
- noopStepFunc, name ## Name, {0} \
-}
-
-
-/*
-** Register those built-in window functions that are not also aggregates.
-*/
-SQLITE_PRIVATE void sqlite3WindowFunctions(void){
- static FuncDef aWindowFuncs[] = {
- WINDOWFUNCX(row_number, 0, 0),
- WINDOWFUNCX(dense_rank, 0, 0),
- WINDOWFUNCX(rank, 0, 0),
- WINDOWFUNCX(percent_rank, 0, SQLITE_FUNC_WINDOW_SIZE),
- WINDOWFUNCX(cume_dist, 0, SQLITE_FUNC_WINDOW_SIZE),
- WINDOWFUNCX(ntile, 1, SQLITE_FUNC_WINDOW_SIZE),
- WINDOWFUNCALL(last_value, 1, 0),
- WINDOWFUNCNOOP(nth_value, 2, 0),
- WINDOWFUNCNOOP(first_value, 1, 0),
- WINDOWFUNCNOOP(lead, 1, 0),
- WINDOWFUNCNOOP(lead, 2, 0),
- WINDOWFUNCNOOP(lead, 3, 0),
- WINDOWFUNCNOOP(lag, 1, 0),
- WINDOWFUNCNOOP(lag, 2, 0),
- WINDOWFUNCNOOP(lag, 3, 0),
- };
- sqlite3InsertBuiltinFuncs(aWindowFuncs, ArraySize(aWindowFuncs));
-}
-
-/*
-** This function is called immediately after resolving the function name
-** for a window function within a SELECT statement. Argument pList is a
-** linked list of WINDOW definitions for the current SELECT statement.
-** Argument pFunc is the function definition just resolved and pWin
-** is the Window object representing the associated OVER clause. This
-** function updates the contents of pWin as follows:
-**
-** * If the OVER clause refered to a named window (as in "max(x) OVER win"),
-** search list pList for a matching WINDOW definition, and update pWin
-** accordingly. If no such WINDOW clause can be found, leave an error
-** in pParse.
-**
-** * If the function is a built-in window function that requires the
-** window to be coerced (see "BUILT-IN WINDOW FUNCTIONS" at the top
-** of this file), pWin is updated here.
-*/
-SQLITE_PRIVATE void sqlite3WindowUpdate(
- Parse *pParse,
- Window *pList, /* List of named windows for this SELECT */
- Window *pWin, /* Window frame to update */
- FuncDef *pFunc /* Window function definition */
-){
- if( pWin->zName && pWin->eType==0 ){
- Window *p;
- for(p=pList; p; p=p->pNextWin){
- if( sqlite3StrICmp(p->zName, pWin->zName)==0 ) break;
- }
- if( p==0 ){
- sqlite3ErrorMsg(pParse, "no such window: %s", pWin->zName);
- return;
- }
- pWin->pPartition = sqlite3ExprListDup(pParse->db, p->pPartition, 0);
- pWin->pOrderBy = sqlite3ExprListDup(pParse->db, p->pOrderBy, 0);
- pWin->pStart = sqlite3ExprDup(pParse->db, p->pStart, 0);
- pWin->pEnd = sqlite3ExprDup(pParse->db, p->pEnd, 0);
- pWin->eStart = p->eStart;
- pWin->eEnd = p->eEnd;
- pWin->eType = p->eType;
- }
- if( pFunc->funcFlags & SQLITE_FUNC_WINDOW ){
- sqlite3 *db = pParse->db;
- if( pWin->pFilter ){
- sqlite3ErrorMsg(pParse,
- "FILTER clause may only be used with aggregate window functions"
- );
- }else
- if( pFunc->zName==row_numberName || pFunc->zName==ntileName ){
- sqlite3ExprDelete(db, pWin->pStart);
- sqlite3ExprDelete(db, pWin->pEnd);
- pWin->pStart = pWin->pEnd = 0;
- pWin->eType = TK_ROWS;
- pWin->eStart = TK_UNBOUNDED;
- pWin->eEnd = TK_CURRENT;
- }else
-
- if( pFunc->zName==dense_rankName || pFunc->zName==rankName
- || pFunc->zName==percent_rankName || pFunc->zName==cume_distName
- ){
- sqlite3ExprDelete(db, pWin->pStart);
- sqlite3ExprDelete(db, pWin->pEnd);
- pWin->pStart = pWin->pEnd = 0;
- pWin->eType = TK_RANGE;
- pWin->eStart = TK_UNBOUNDED;
- pWin->eEnd = TK_CURRENT;
- }
- }
- pWin->pFunc = pFunc;
-}
-
-/*
-** Context object passed through sqlite3WalkExprList() to
-** selectWindowRewriteExprCb() by selectWindowRewriteEList().
-*/
-typedef struct WindowRewrite WindowRewrite;
-struct WindowRewrite {
- Window *pWin;
- SrcList *pSrc;
- ExprList *pSub;
- Select *pSubSelect; /* Current sub-select, if any */
-};
-
-/*
-** Callback function used by selectWindowRewriteEList(). If necessary,
-** this function appends to the output expression-list and updates
-** expression (*ppExpr) in place.
-*/
-static int selectWindowRewriteExprCb(Walker *pWalker, Expr *pExpr){
- struct WindowRewrite *p = pWalker->u.pRewrite;
- Parse *pParse = pWalker->pParse;
-
- /* If this function is being called from within a scalar sub-select
- ** that used by the SELECT statement being processed, only process
- ** TK_COLUMN expressions that refer to it (the outer SELECT). Do
- ** not process aggregates or window functions at all, as they belong
- ** to the scalar sub-select. */
- if( p->pSubSelect ){
- if( pExpr->op!=TK_COLUMN ){
- return WRC_Continue;
- }else{
- int nSrc = p->pSrc->nSrc;
- int i;
- for(i=0; iiTable==p->pSrc->a[i].iCursor ) break;
- }
- if( i==nSrc ) return WRC_Continue;
- }
- }
-
- switch( pExpr->op ){
-
- case TK_FUNCTION:
- if( pExpr->pWin==0 ){
- break;
- }else{
- Window *pWin;
- for(pWin=p->pWin; pWin; pWin=pWin->pNextWin){
- if( pExpr->pWin==pWin ){
- assert( pWin->pOwner==pExpr );
- return WRC_Prune;
- }
- }
- }
- /* Fall through. */
-
- case TK_AGG_FUNCTION:
- case TK_COLUMN: {
- Expr *pDup = sqlite3ExprDup(pParse->db, pExpr, 0);
- p->pSub = sqlite3ExprListAppend(pParse, p->pSub, pDup);
- if( p->pSub ){
- assert( ExprHasProperty(pExpr, EP_Static)==0 );
- ExprSetProperty(pExpr, EP_Static);
- sqlite3ExprDelete(pParse->db, pExpr);
- ExprClearProperty(pExpr, EP_Static);
- memset(pExpr, 0, sizeof(Expr));
-
- pExpr->op = TK_COLUMN;
- pExpr->iColumn = p->pSub->nExpr-1;
- pExpr->iTable = p->pWin->iEphCsr;
- }
-
- break;
- }
-
- default: /* no-op */
- break;
- }
-
- return WRC_Continue;
-}
-static int selectWindowRewriteSelectCb(Walker *pWalker, Select *pSelect){
- struct WindowRewrite *p = pWalker->u.pRewrite;
- Select *pSave = p->pSubSelect;
- if( pSave==pSelect ){
- return WRC_Continue;
- }else{
- p->pSubSelect = pSelect;
- sqlite3WalkSelect(pWalker, pSelect);
- p->pSubSelect = pSave;
- }
- return WRC_Prune;
-}
-
-
-/*
-** Iterate through each expression in expression-list pEList. For each:
-**
-** * TK_COLUMN,
-** * aggregate function, or
-** * window function with a Window object that is not a member of the
-** Window list passed as the second argument (pWin).
-**
-** Append the node to output expression-list (*ppSub). And replace it
-** with a TK_COLUMN that reads the (N-1)th element of table
-** pWin->iEphCsr, where N is the number of elements in (*ppSub) after
-** appending the new one.
-*/
-static void selectWindowRewriteEList(
- Parse *pParse,
- Window *pWin,
- SrcList *pSrc,
- ExprList *pEList, /* Rewrite expressions in this list */
- ExprList **ppSub /* IN/OUT: Sub-select expression-list */
-){
- Walker sWalker;
- WindowRewrite sRewrite;
-
- memset(&sWalker, 0, sizeof(Walker));
- memset(&sRewrite, 0, sizeof(WindowRewrite));
-
- sRewrite.pSub = *ppSub;
- sRewrite.pWin = pWin;
- sRewrite.pSrc = pSrc;
-
- sWalker.pParse = pParse;
- sWalker.xExprCallback = selectWindowRewriteExprCb;
- sWalker.xSelectCallback = selectWindowRewriteSelectCb;
- sWalker.u.pRewrite = &sRewrite;
-
- (void)sqlite3WalkExprList(&sWalker, pEList);
-
- *ppSub = sRewrite.pSub;
-}
-
-/*
-** Append a copy of each expression in expression-list pAppend to
-** expression list pList. Return a pointer to the result list.
-*/
-static ExprList *exprListAppendList(
- Parse *pParse, /* Parsing context */
- ExprList *pList, /* List to which to append. Might be NULL */
- ExprList *pAppend /* List of values to append. Might be NULL */
-){
- if( pAppend ){
- int i;
- int nInit = pList ? pList->nExpr : 0;
- for(i=0; inExpr; i++){
- Expr *pDup = sqlite3ExprDup(pParse->db, pAppend->a[i].pExpr, 0);
- pList = sqlite3ExprListAppend(pParse, pList, pDup);
- if( pList ) pList->a[nInit+i].sortOrder = pAppend->a[i].sortOrder;
- }
- }
- return pList;
-}
-
-/*
-** If the SELECT statement passed as the second argument does not invoke
-** any SQL window functions, this function is a no-op. Otherwise, it
-** rewrites the SELECT statement so that window function xStep functions
-** are invoked in the correct order as described under "SELECT REWRITING"
-** at the top of this file.
-*/
-SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){
- int rc = SQLITE_OK;
- if( p->pWin ){
- Vdbe *v = sqlite3GetVdbe(pParse);
- sqlite3 *db = pParse->db;
- Select *pSub = 0; /* The subquery */
- SrcList *pSrc = p->pSrc;
- Expr *pWhere = p->pWhere;
- ExprList *pGroupBy = p->pGroupBy;
- Expr *pHaving = p->pHaving;
- ExprList *pSort = 0;
-
- ExprList *pSublist = 0; /* Expression list for sub-query */
- Window *pMWin = p->pWin; /* Master window object */
- Window *pWin; /* Window object iterator */
-
- p->pSrc = 0;
- p->pWhere = 0;
- p->pGroupBy = 0;
- p->pHaving = 0;
-
- /* Create the ORDER BY clause for the sub-select. This is the concatenation
- ** of the window PARTITION and ORDER BY clauses. Then, if this makes it
- ** redundant, remove the ORDER BY from the parent SELECT. */
- pSort = sqlite3ExprListDup(db, pMWin->pPartition, 0);
- pSort = exprListAppendList(pParse, pSort, pMWin->pOrderBy);
- if( pSort && p->pOrderBy ){
- if( sqlite3ExprListCompare(pSort, p->pOrderBy, -1)==0 ){
- sqlite3ExprListDelete(db, p->pOrderBy);
- p->pOrderBy = 0;
- }
- }
-
- /* Assign a cursor number for the ephemeral table used to buffer rows.
- ** The OpenEphemeral instruction is coded later, after it is known how
- ** many columns the table will have. */
- pMWin->iEphCsr = pParse->nTab++;
-
- selectWindowRewriteEList(pParse, pMWin, pSrc, p->pEList, &pSublist);
- selectWindowRewriteEList(pParse, pMWin, pSrc, p->pOrderBy, &pSublist);
- pMWin->nBufferCol = (pSublist ? pSublist->nExpr : 0);
-
- /* Append the PARTITION BY and ORDER BY expressions to the to the
- ** sub-select expression list. They are required to figure out where
- ** boundaries for partitions and sets of peer rows lie. */
- pSublist = exprListAppendList(pParse, pSublist, pMWin->pPartition);
- pSublist = exprListAppendList(pParse, pSublist, pMWin->pOrderBy);
-
- /* Append the arguments passed to each window function to the
- ** sub-select expression list. Also allocate two registers for each
- ** window function - one for the accumulator, another for interim
- ** results. */
- for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
- pWin->iArgCol = (pSublist ? pSublist->nExpr : 0);
- pSublist = exprListAppendList(pParse, pSublist, pWin->pOwner->x.pList);
- if( pWin->pFilter ){
- Expr *pFilter = sqlite3ExprDup(db, pWin->pFilter, 0);
- pSublist = sqlite3ExprListAppend(pParse, pSublist, pFilter);
- }
- pWin->regAccum = ++pParse->nMem;
- pWin->regResult = ++pParse->nMem;
- sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regAccum);
- }
-
- /* If there is no ORDER BY or PARTITION BY clause, and the window
- ** function accepts zero arguments, and there are no other columns
- ** selected (e.g. "SELECT row_number() OVER () FROM t1"), it is possible
- ** that pSublist is still NULL here. Add a constant expression here to
- ** keep everything legal in this case.
- */
- if( pSublist==0 ){
- pSublist = sqlite3ExprListAppend(pParse, 0,
- sqlite3ExprAlloc(db, TK_INTEGER, &sqlite3IntTokens[0], 0)
- );
- }
-
- pSub = sqlite3SelectNew(
- pParse, pSublist, pSrc, pWhere, pGroupBy, pHaving, pSort, 0, 0
- );
- p->pSrc = sqlite3SrcListAppend(db, 0, 0, 0);
- assert( p->pSrc || db->mallocFailed );
- if( p->pSrc ){
- p->pSrc->a[0].pSelect = pSub;
- sqlite3SrcListAssignCursors(pParse, p->pSrc);
- if( sqlite3ExpandSubquery(pParse, &p->pSrc->a[0]) ){
- rc = SQLITE_NOMEM;
- }else{
- pSub->selFlags |= SF_Expanded;
- p->selFlags &= ~SF_Aggregate;
- sqlite3SelectPrep(pParse, pSub, 0);
- }
-
- sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pMWin->iEphCsr, pSublist->nExpr);
- }else{
- sqlite3SelectDelete(db, pSub);
- }
- if( db->mallocFailed ) rc = SQLITE_NOMEM;
- }
-
- return rc;
-}
-
-/*
-** Free the Window object passed as the second argument.
-*/
-SQLITE_PRIVATE void sqlite3WindowDelete(sqlite3 *db, Window *p){
- if( p ){
- sqlite3ExprDelete(db, p->pFilter);
- sqlite3ExprListDelete(db, p->pPartition);
- sqlite3ExprListDelete(db, p->pOrderBy);
- sqlite3ExprDelete(db, p->pEnd);
- sqlite3ExprDelete(db, p->pStart);
- sqlite3DbFree(db, p->zName);
- sqlite3DbFree(db, p);
- }
-}
-
-/*
-** Free the linked list of Window objects starting at the second argument.
-*/
-SQLITE_PRIVATE void sqlite3WindowListDelete(sqlite3 *db, Window *p){
- while( p ){
- Window *pNext = p->pNextWin;
- sqlite3WindowDelete(db, p);
- p = pNext;
- }
-}
-
-/*
-** The argument expression is an PRECEDING or FOLLOWING offset. The
-** value should be a non-negative integer. If the value is not a
-** constant, change it to NULL. The fact that it is then a non-negative
-** integer will be caught later. But it is important not to leave
-** variable values in the expression tree.
-*/
-static Expr *sqlite3WindowOffsetExpr(Parse *pParse, Expr *pExpr){
- if( 0==sqlite3ExprIsConstant(pExpr) ){
- sqlite3ExprDelete(pParse->db, pExpr);
- pExpr = sqlite3ExprAlloc(pParse->db, TK_NULL, 0, 0);
- }
- return pExpr;
-}
-
-/*
-** Allocate and return a new Window object describing a Window Definition.
-*/
-SQLITE_PRIVATE Window *sqlite3WindowAlloc(
- Parse *pParse, /* Parsing context */
- int eType, /* Frame type. TK_RANGE or TK_ROWS */
- int eStart, /* Start type: CURRENT, PRECEDING, FOLLOWING, UNBOUNDED */
- Expr *pStart, /* Start window size if TK_PRECEDING or FOLLOWING */
- int eEnd, /* End type: CURRENT, FOLLOWING, TK_UNBOUNDED, PRECEDING */
- Expr *pEnd /* End window size if TK_FOLLOWING or PRECEDING */
-){
- Window *pWin = 0;
-
- /* Parser assures the following: */
- assert( eType==TK_RANGE || eType==TK_ROWS );
- assert( eStart==TK_CURRENT || eStart==TK_PRECEDING
- || eStart==TK_UNBOUNDED || eStart==TK_FOLLOWING );
- assert( eEnd==TK_CURRENT || eEnd==TK_FOLLOWING
- || eEnd==TK_UNBOUNDED || eEnd==TK_PRECEDING );
- assert( (eStart==TK_PRECEDING || eStart==TK_FOLLOWING)==(pStart!=0) );
- assert( (eEnd==TK_FOLLOWING || eEnd==TK_PRECEDING)==(pEnd!=0) );
-
-
- /* If a frame is declared "RANGE" (not "ROWS"), then it may not use
- ** either " PRECEDING" or " FOLLOWING".
- */
- if( eType==TK_RANGE && (pStart!=0 || pEnd!=0) ){
- sqlite3ErrorMsg(pParse, "RANGE must use only UNBOUNDED or CURRENT ROW");
- goto windowAllocErr;
- }
-
- /* Additionally, the
- ** starting boundary type may not occur earlier in the following list than
- ** the ending boundary type:
- **
- ** UNBOUNDED PRECEDING
- ** PRECEDING
- ** CURRENT ROW
- ** FOLLOWING
- ** UNBOUNDED FOLLOWING
- **
- ** The parser ensures that "UNBOUNDED PRECEDING" cannot be used as an ending
- ** boundary, and than "UNBOUNDED FOLLOWING" cannot be used as a starting
- ** frame boundary.
- */
- if( (eStart==TK_CURRENT && eEnd==TK_PRECEDING)
- || (eStart==TK_FOLLOWING && (eEnd==TK_PRECEDING || eEnd==TK_CURRENT))
- ){
- sqlite3ErrorMsg(pParse, "unsupported frame delimiter for ROWS");
- goto windowAllocErr;
- }
-
- pWin = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window));
- if( pWin==0 ) goto windowAllocErr;
- pWin->eType = eType;
- pWin->eStart = eStart;
- pWin->eEnd = eEnd;
- pWin->pEnd = sqlite3WindowOffsetExpr(pParse, pEnd);
- pWin->pStart = sqlite3WindowOffsetExpr(pParse, pStart);
- return pWin;
-
-windowAllocErr:
- sqlite3ExprDelete(pParse->db, pEnd);
- sqlite3ExprDelete(pParse->db, pStart);
- return 0;
-}
-
-/*
-** Attach window object pWin to expression p.
-*/
-SQLITE_PRIVATE void sqlite3WindowAttach(Parse *pParse, Expr *p, Window *pWin){
- if( p ){
- /* This routine is only called for the parser. If pWin was not
- ** allocated due to an OOM, then the parser would fail before ever
- ** invoking this routine */
- if( ALWAYS(pWin) ){
- p->pWin = pWin;
- pWin->pOwner = p;
- if( p->flags & EP_Distinct ){
- sqlite3ErrorMsg(pParse,
- "DISTINCT is not supported for window functions");
- }
- }
- }else{
- sqlite3WindowDelete(pParse->db, pWin);
- }
-}
-
-/*
-** Return 0 if the two window objects are identical, or non-zero otherwise.
-** Identical window objects can be processed in a single scan.
-*/
-SQLITE_PRIVATE int sqlite3WindowCompare(Parse *pParse, Window *p1, Window *p2){
- if( p1->eType!=p2->eType ) return 1;
- if( p1->eStart!=p2->eStart ) return 1;
- if( p1->eEnd!=p2->eEnd ) return 1;
- if( sqlite3ExprCompare(pParse, p1->pStart, p2->pStart, -1) ) return 1;
- if( sqlite3ExprCompare(pParse, p1->pEnd, p2->pEnd, -1) ) return 1;
- if( sqlite3ExprListCompare(p1->pPartition, p2->pPartition, -1) ) return 1;
- if( sqlite3ExprListCompare(p1->pOrderBy, p2->pOrderBy, -1) ) return 1;
- return 0;
-}
-
-
-/*
-** This is called by code in select.c before it calls sqlite3WhereBegin()
-** to begin iterating through the sub-query results. It is used to allocate
-** and initialize registers and cursors used by sqlite3WindowCodeStep().
-*/
-SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse *pParse, Window *pMWin){
- Window *pWin;
- Vdbe *v = sqlite3GetVdbe(pParse);
- int nPart = (pMWin->pPartition ? pMWin->pPartition->nExpr : 0);
- nPart += (pMWin->pOrderBy ? pMWin->pOrderBy->nExpr : 0);
- if( nPart ){
- pMWin->regPart = pParse->nMem+1;
- pParse->nMem += nPart;
- sqlite3VdbeAddOp3(v, OP_Null, 0, pMWin->regPart, pMWin->regPart+nPart-1);
- }
-
- for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
- FuncDef *p = pWin->pFunc;
- if( (p->funcFlags & SQLITE_FUNC_MINMAX) && pWin->eStart!=TK_UNBOUNDED ){
- /* The inline versions of min() and max() require a single ephemeral
- ** table and 3 registers. The registers are used as follows:
- **
- ** regApp+0: slot to copy min()/max() argument to for MakeRecord
- ** regApp+1: integer value used to ensure keys are unique
- ** regApp+2: output of MakeRecord
- */
- ExprList *pList = pWin->pOwner->x.pList;
- KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pList, 0, 0);
- pWin->csrApp = pParse->nTab++;
- pWin->regApp = pParse->nMem+1;
- pParse->nMem += 3;
- if( pKeyInfo && pWin->pFunc->zName[1]=='i' ){
- assert( pKeyInfo->aSortOrder[0]==0 );
- pKeyInfo->aSortOrder[0] = 1;
- }
- sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pWin->csrApp, 2);
- sqlite3VdbeAppendP4(v, pKeyInfo, P4_KEYINFO);
- sqlite3VdbeAddOp2(v, OP_Integer, 0, pWin->regApp+1);
- }
- else if( p->zName==nth_valueName || p->zName==first_valueName ){
- /* Allocate two registers at pWin->regApp. These will be used to
- ** store the start and end index of the current frame. */
- assert( pMWin->iEphCsr );
- pWin->regApp = pParse->nMem+1;
- pWin->csrApp = pParse->nTab++;
- pParse->nMem += 2;
- sqlite3VdbeAddOp2(v, OP_OpenDup, pWin->csrApp, pMWin->iEphCsr);
- }
- else if( p->zName==leadName || p->zName==lagName ){
- assert( pMWin->iEphCsr );
- pWin->csrApp = pParse->nTab++;
- sqlite3VdbeAddOp2(v, OP_OpenDup, pWin->csrApp, pMWin->iEphCsr);
- }
- }
-}
-
-/*
-** A "PRECEDING " (eCond==0) or "FOLLOWING " (eCond==1) or the
-** value of the second argument to nth_value() (eCond==2) has just been
-** evaluated and the result left in register reg. This function generates VM
-** code to check that the value is a non-negative integer and throws an
-** exception if it is not.
-*/
-static void windowCheckIntValue(Parse *pParse, int reg, int eCond){
- static const char *azErr[] = {
- "frame starting offset must be a non-negative integer",
- "frame ending offset must be a non-negative integer",
- "second argument to nth_value must be a positive integer"
- };
- static int aOp[] = { OP_Ge, OP_Ge, OP_Gt };
- Vdbe *v = sqlite3GetVdbe(pParse);
- int regZero = sqlite3GetTempReg(pParse);
- assert( eCond==0 || eCond==1 || eCond==2 );
- sqlite3VdbeAddOp2(v, OP_Integer, 0, regZero);
- sqlite3VdbeAddOp2(v, OP_MustBeInt, reg, sqlite3VdbeCurrentAddr(v)+2);
- VdbeCoverageIf(v, eCond==0);
- VdbeCoverageIf(v, eCond==1);
- VdbeCoverageIf(v, eCond==2);
- sqlite3VdbeAddOp3(v, aOp[eCond], regZero, sqlite3VdbeCurrentAddr(v)+2, reg);
- VdbeCoverageNeverNullIf(v, eCond==0);
- VdbeCoverageNeverNullIf(v, eCond==1);
- VdbeCoverageNeverNullIf(v, eCond==2);
- sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_ERROR, OE_Abort);
- sqlite3VdbeAppendP4(v, (void*)azErr[eCond], P4_STATIC);
- sqlite3ReleaseTempReg(pParse, regZero);
-}
-
-/*
-** Return the number of arguments passed to the window-function associated
-** with the object passed as the only argument to this function.
-*/
-static int windowArgCount(Window *pWin){
- ExprList *pList = pWin->pOwner->x.pList;
- return (pList ? pList->nExpr : 0);
-}
-
-/*
-** Generate VM code to invoke either xStep() (if bInverse is 0) or
-** xInverse (if bInverse is non-zero) for each window function in the
-** linked list starting at pMWin. Or, for built-in window functions
-** that do not use the standard function API, generate the required
-** inline VM code.
-**
-** If argument csr is greater than or equal to 0, then argument reg is
-** the first register in an array of registers guaranteed to be large
-** enough to hold the array of arguments for each function. In this case
-** the arguments are extracted from the current row of csr into the
-** array of registers before invoking OP_AggStep or OP_AggInverse
-**
-** Or, if csr is less than zero, then the array of registers at reg is
-** already populated with all columns from the current row of the sub-query.
-**
-** If argument regPartSize is non-zero, then it is a register containing the
-** number of rows in the current partition.
-*/
-static void windowAggStep(
- Parse *pParse,
- Window *pMWin, /* Linked list of window functions */
- int csr, /* Read arguments from this cursor */
- int bInverse, /* True to invoke xInverse instead of xStep */
- int reg, /* Array of registers */
- int regPartSize /* Register containing size of partition */
-){
- Vdbe *v = sqlite3GetVdbe(pParse);
- Window *pWin;
- for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
- int flags = pWin->pFunc->funcFlags;
- int regArg;
- int nArg = windowArgCount(pWin);
-
- if( csr>=0 ){
- int i;
- for(i=0; iiArgCol+i, reg+i);
- }
- regArg = reg;
- if( flags & SQLITE_FUNC_WINDOW_SIZE ){
- if( nArg==0 ){
- regArg = regPartSize;
- }else{
- sqlite3VdbeAddOp2(v, OP_SCopy, regPartSize, reg+nArg);
- }
- nArg++;
- }
- }else{
- assert( !(flags & SQLITE_FUNC_WINDOW_SIZE) );
- regArg = reg + pWin->iArgCol;
- }
-
- if( (pWin->pFunc->funcFlags & SQLITE_FUNC_MINMAX)
- && pWin->eStart!=TK_UNBOUNDED
- ){
- int addrIsNull = sqlite3VdbeAddOp1(v, OP_IsNull, regArg);
- VdbeCoverage(v);
- if( bInverse==0 ){
- sqlite3VdbeAddOp2(v, OP_AddImm, pWin->regApp+1, 1);
- sqlite3VdbeAddOp2(v, OP_SCopy, regArg, pWin->regApp);
- sqlite3VdbeAddOp3(v, OP_MakeRecord, pWin->regApp, 2, pWin->regApp+2);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, pWin->csrApp, pWin->regApp+2);
- }else{
- sqlite3VdbeAddOp4Int(v, OP_SeekGE, pWin->csrApp, 0, regArg, 1);
- VdbeCoverageNeverTaken(v);
- sqlite3VdbeAddOp1(v, OP_Delete, pWin->csrApp);
- sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2);
- }
- sqlite3VdbeJumpHere(v, addrIsNull);
- }else if( pWin->regApp ){
- assert( pWin->pFunc->zName==nth_valueName
- || pWin->pFunc->zName==first_valueName
- );
- assert( bInverse==0 || bInverse==1 );
- sqlite3VdbeAddOp2(v, OP_AddImm, pWin->regApp+1-bInverse, 1);
- }else if( pWin->pFunc->zName==leadName
- || pWin->pFunc->zName==lagName
- ){
- /* no-op */
- }else{
- int addrIf = 0;
- if( pWin->pFilter ){
- int regTmp;
- assert( nArg==0 || nArg==pWin->pOwner->x.pList->nExpr );
- assert( nArg || pWin->pOwner->x.pList==0 );
- if( csr>0 ){
- regTmp = sqlite3GetTempReg(pParse);
- sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol+nArg,regTmp);
- }else{
- regTmp = regArg + nArg;
- }
- addrIf = sqlite3VdbeAddOp3(v, OP_IfNot, regTmp, 0, 1);
- VdbeCoverage(v);
- if( csr>0 ){
- sqlite3ReleaseTempReg(pParse, regTmp);
- }
- }
- if( pWin->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){
- CollSeq *pColl;
- assert( nArg>0 );
- pColl = sqlite3ExprNNCollSeq(pParse, pWin->pOwner->x.pList->a[0].pExpr);
- sqlite3VdbeAddOp4(v, OP_CollSeq, 0,0,0, (const char*)pColl, P4_COLLSEQ);
- }
- sqlite3VdbeAddOp3(v, bInverse? OP_AggInverse : OP_AggStep,
- bInverse, regArg, pWin->regAccum);
- sqlite3VdbeAppendP4(v, pWin->pFunc, P4_FUNCDEF);
- sqlite3VdbeChangeP5(v, (u8)nArg);
- if( addrIf ) sqlite3VdbeJumpHere(v, addrIf);
- }
- }
-}
-
-/*
-** Generate VM code to invoke either xValue() (bFinal==0) or xFinalize()
-** (bFinal==1) for each window function in the linked list starting at
-** pMWin. Or, for built-in window-functions that do not use the standard
-** API, generate the equivalent VM code.
-*/
-static void windowAggFinal(Parse *pParse, Window *pMWin, int bFinal){
- Vdbe *v = sqlite3GetVdbe(pParse);
- Window *pWin;
-
- for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
- if( (pWin->pFunc->funcFlags & SQLITE_FUNC_MINMAX)
- && pWin->eStart!=TK_UNBOUNDED
- ){
- sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regResult);
- sqlite3VdbeAddOp1(v, OP_Last, pWin->csrApp);
- VdbeCoverage(v);
- sqlite3VdbeAddOp3(v, OP_Column, pWin->csrApp, 0, pWin->regResult);
- sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2);
- if( bFinal ){
- sqlite3VdbeAddOp1(v, OP_ResetSorter, pWin->csrApp);
- }
- }else if( pWin->regApp ){
- }else{
- if( bFinal ){
- sqlite3VdbeAddOp2(v, OP_AggFinal, pWin->regAccum, windowArgCount(pWin));
- sqlite3VdbeAppendP4(v, pWin->pFunc, P4_FUNCDEF);
- sqlite3VdbeAddOp2(v, OP_Copy, pWin->regAccum, pWin->regResult);
- sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regAccum);
- }else{
- sqlite3VdbeAddOp3(v, OP_AggValue, pWin->regAccum, windowArgCount(pWin),
- pWin->regResult);
- sqlite3VdbeAppendP4(v, pWin->pFunc, P4_FUNCDEF);
- }
- }
- }
-}
-
-/*
-** This function generates VM code to invoke the sub-routine at address
-** lblFlushPart once for each partition with the entire partition cached in
-** the Window.iEphCsr temp table.
-*/
-static void windowPartitionCache(
- Parse *pParse,
- Select *p, /* The rewritten SELECT statement */
- WhereInfo *pWInfo, /* WhereInfo to call WhereEnd() on */
- int regFlushPart, /* Register to use with Gosub lblFlushPart */
- int lblFlushPart, /* Subroutine to Gosub to */
- int *pRegSize /* OUT: Register containing partition size */
-){
- Window *pMWin = p->pWin;
- Vdbe *v = sqlite3GetVdbe(pParse);
- int iSubCsr = p->pSrc->a[0].iCursor;
- int nSub = p->pSrc->a[0].pTab->nCol;
- int k;
-
- int reg = pParse->nMem+1;
- int regRecord = reg+nSub;
- int regRowid = regRecord+1;
-
- *pRegSize = regRowid;
- pParse->nMem += nSub + 2;
-
- /* Load the column values for the row returned by the sub-select
- ** into an array of registers starting at reg. */
- for(k=0; kpPartition ){
- int addr;
- ExprList *pPart = pMWin->pPartition;
- int nPart = pPart->nExpr;
- int regNewPart = reg + pMWin->nBufferCol;
- KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pPart, 0, 0);
-
- addr = sqlite3VdbeAddOp3(v, OP_Compare, regNewPart, pMWin->regPart,nPart);
- sqlite3VdbeAppendP4(v, (void*)pKeyInfo, P4_KEYINFO);
- sqlite3VdbeAddOp3(v, OP_Jump, addr+2, addr+4, addr+2);
- VdbeCoverageEqNe(v);
- sqlite3VdbeAddOp3(v, OP_Copy, regNewPart, pMWin->regPart, nPart-1);
- sqlite3VdbeAddOp2(v, OP_Gosub, regFlushPart, lblFlushPart);
- VdbeComment((v, "call flush_partition"));
- }
-
- /* Buffer the current row in the ephemeral table. */
- sqlite3VdbeAddOp2(v, OP_NewRowid, pMWin->iEphCsr, regRowid);
- sqlite3VdbeAddOp3(v, OP_Insert, pMWin->iEphCsr, regRecord, regRowid);
-
- /* End of the input loop */
- sqlite3WhereEnd(pWInfo);
-
- /* Invoke "flush_partition" to deal with the final (or only) partition */
- sqlite3VdbeAddOp2(v, OP_Gosub, regFlushPart, lblFlushPart);
- VdbeComment((v, "call flush_partition"));
-}
-
-/*
-** Invoke the sub-routine at regGosub (generated by code in select.c) to
-** return the current row of Window.iEphCsr. If all window functions are
-** aggregate window functions that use the standard API, a single
-** OP_Gosub instruction is all that this routine generates. Extra VM code
-** for per-row processing is only generated for the following built-in window
-** functions:
-**
-** nth_value()
-** first_value()
-** lag()
-** lead()
-*/
-static void windowReturnOneRow(
- Parse *pParse,
- Window *pMWin,
- int regGosub,
- int addrGosub
-){
- Vdbe *v = sqlite3GetVdbe(pParse);
- Window *pWin;
- for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
- FuncDef *pFunc = pWin->pFunc;
- if( pFunc->zName==nth_valueName
- || pFunc->zName==first_valueName
- ){
- int csr = pWin->csrApp;
- int lbl = sqlite3VdbeMakeLabel(v);
- int tmpReg = sqlite3GetTempReg(pParse);
- sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regResult);
-
- if( pFunc->zName==nth_valueName ){
- sqlite3VdbeAddOp3(v, OP_Column, pMWin->iEphCsr, pWin->iArgCol+1,tmpReg);
- windowCheckIntValue(pParse, tmpReg, 2);
- }else{
- sqlite3VdbeAddOp2(v, OP_Integer, 1, tmpReg);
- }
- sqlite3VdbeAddOp3(v, OP_Add, tmpReg, pWin->regApp, tmpReg);
- sqlite3VdbeAddOp3(v, OP_Gt, pWin->regApp+1, lbl, tmpReg);
- VdbeCoverageNeverNull(v);
- sqlite3VdbeAddOp3(v, OP_SeekRowid, csr, 0, tmpReg);
- VdbeCoverageNeverTaken(v);
- sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol, pWin->regResult);
- sqlite3VdbeResolveLabel(v, lbl);
- sqlite3ReleaseTempReg(pParse, tmpReg);
- }
- else if( pFunc->zName==leadName || pFunc->zName==lagName ){
- int nArg = pWin->pOwner->x.pList->nExpr;
- int iEph = pMWin->iEphCsr;
- int csr = pWin->csrApp;
- int lbl = sqlite3VdbeMakeLabel(v);
- int tmpReg = sqlite3GetTempReg(pParse);
-
- if( nArg<3 ){
- sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regResult);
- }else{
- sqlite3VdbeAddOp3(v, OP_Column, iEph, pWin->iArgCol+2, pWin->regResult);
- }
- sqlite3VdbeAddOp2(v, OP_Rowid, iEph, tmpReg);
- if( nArg<2 ){
- int val = (pFunc->zName==leadName ? 1 : -1);
- sqlite3VdbeAddOp2(v, OP_AddImm, tmpReg, val);
- }else{
- int op = (pFunc->zName==leadName ? OP_Add : OP_Subtract);
- int tmpReg2 = sqlite3GetTempReg(pParse);
- sqlite3VdbeAddOp3(v, OP_Column, iEph, pWin->iArgCol+1, tmpReg2);
- sqlite3VdbeAddOp3(v, op, tmpReg2, tmpReg, tmpReg);
- sqlite3ReleaseTempReg(pParse, tmpReg2);
- }
-
- sqlite3VdbeAddOp3(v, OP_SeekRowid, csr, lbl, tmpReg);
- VdbeCoverage(v);
- sqlite3VdbeAddOp3(v, OP_Column, csr, pWin->iArgCol, pWin->regResult);
- sqlite3VdbeResolveLabel(v, lbl);
- sqlite3ReleaseTempReg(pParse, tmpReg);
- }
- }
- sqlite3VdbeAddOp2(v, OP_Gosub, regGosub, addrGosub);
-}
-
-/*
-** Invoke the code generated by windowReturnOneRow() and, optionally, the
-** xInverse() function for each window function, for one or more rows
-** from the Window.iEphCsr temp table. This routine generates VM code
-** similar to:
-**
-** while( regCtr>0 ){
-** regCtr--;
-** windowReturnOneRow()
-** if( bInverse ){
-** AggInverse
-** }
-** Next (Window.iEphCsr)
-** }
-*/
-static void windowReturnRows(
- Parse *pParse,
- Window *pMWin, /* List of window functions */
- int regCtr, /* Register containing number of rows */
- int regGosub, /* Register for Gosub addrGosub */
- int addrGosub, /* Address of sub-routine for ReturnOneRow */
- int regInvArg, /* Array of registers for xInverse args */
- int regInvSize /* Register containing size of partition */
-){
- int addr;
- Vdbe *v = sqlite3GetVdbe(pParse);
- windowAggFinal(pParse, pMWin, 0);
- addr = sqlite3VdbeAddOp3(v, OP_IfPos, regCtr, sqlite3VdbeCurrentAddr(v)+2 ,1);
- VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_Goto, 0, 0);
- windowReturnOneRow(pParse, pMWin, regGosub, addrGosub);
- if( regInvArg ){
- windowAggStep(pParse, pMWin, pMWin->iEphCsr, 1, regInvArg, regInvSize);
- }
- sqlite3VdbeAddOp2(v, OP_Next, pMWin->iEphCsr, addr);
- VdbeCoverage(v);
- sqlite3VdbeJumpHere(v, addr+1); /* The OP_Goto */
-}
-
-/*
-** Generate code to set the accumulator register for each window function
-** in the linked list passed as the second argument to NULL. And perform
-** any equivalent initialization required by any built-in window functions
-** in the list.
-*/
-static int windowInitAccum(Parse *pParse, Window *pMWin){
- Vdbe *v = sqlite3GetVdbe(pParse);
- int regArg;
- int nArg = 0;
- Window *pWin;
- for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
- FuncDef *pFunc = pWin->pFunc;
- sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regAccum);
- nArg = MAX(nArg, windowArgCount(pWin));
- if( pFunc->zName==nth_valueName
- || pFunc->zName==first_valueName
- ){
- sqlite3VdbeAddOp2(v, OP_Integer, 0, pWin->regApp);
- sqlite3VdbeAddOp2(v, OP_Integer, 0, pWin->regApp+1);
- }
-
- if( (pFunc->funcFlags & SQLITE_FUNC_MINMAX) && pWin->csrApp ){
- assert( pWin->eStart!=TK_UNBOUNDED );
- sqlite3VdbeAddOp1(v, OP_ResetSorter, pWin->csrApp);
- sqlite3VdbeAddOp2(v, OP_Integer, 0, pWin->regApp+1);
- }
- }
- regArg = pParse->nMem+1;
- pParse->nMem += nArg;
- return regArg;
-}
-
-
-/*
-** This function does the work of sqlite3WindowCodeStep() for all "ROWS"
-** window frame types except for "BETWEEN UNBOUNDED PRECEDING AND CURRENT
-** ROW". Pseudo-code for each follows.
-**
-** ROWS BETWEEN PRECEDING AND FOLLOWING
-**
-** ...
-** if( new partition ){
-** Gosub flush_partition
-** }
-** Insert (record in eph-table)
-** sqlite3WhereEnd()
-** Gosub flush_partition
-**
-** flush_partition:
-** Once {
-** OpenDup (iEphCsr -> csrStart)
-** OpenDup (iEphCsr -> csrEnd)
-** }
-** regStart = // PRECEDING expression
-** regEnd = // FOLLOWING expression
-** if( regStart<0 || regEnd<0 ){ error! }
-** Rewind (csr,csrStart,csrEnd) // if EOF goto flush_partition_done
-** Next(csrEnd) // if EOF skip Aggstep
-** Aggstep (csrEnd)
-** if( (regEnd--)<=0 ){
-** AggFinal (xValue)
-** Gosub addrGosub
-** Next(csr) // if EOF goto flush_partition_done
-** if( (regStart--)<=0 ){
-** AggInverse (csrStart)
-** Next(csrStart)
-** }
-** }
-** flush_partition_done:
-** ResetSorter (csr)
-** Return
-**
-** ROWS BETWEEN PRECEDING AND CURRENT ROW
-** ROWS BETWEEN CURRENT ROW AND FOLLOWING
-** ROWS BETWEEN UNBOUNDED PRECEDING AND FOLLOWING
-**
-** These are similar to the above. For "CURRENT ROW", intialize the
-** register to 0. For "UNBOUNDED PRECEDING" to infinity.
-**
-** ROWS BETWEEN PRECEDING AND UNBOUNDED FOLLOWING
-** ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING
-**
-** Rewind (csr,csrStart,csrEnd) // if EOF goto flush_partition_done
-** while( 1 ){
-** Next(csrEnd) // Exit while(1) at EOF
-** Aggstep (csrEnd)
-** }
-** while( 1 ){
-** AggFinal (xValue)
-** Gosub addrGosub
-** Next(csr) // if EOF goto flush_partition_done
-** if( (regStart--)<=0 ){
-** AggInverse (csrStart)
-** Next(csrStart)
-** }
-** }
-**
-** For the "CURRENT ROW AND UNBOUNDED FOLLOWING" case, the final if()
-** condition is always true (as if regStart were initialized to 0).
-**
-** RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING
-**
-** This is the only RANGE case handled by this routine. It modifies the
-** second while( 1 ) loop in "ROWS BETWEEN CURRENT ... UNBOUNDED..." to
-** be:
-**
-** while( 1 ){
-** AggFinal (xValue)
-** while( 1 ){
-** regPeer++
-** Gosub addrGosub
-** Next(csr) // if EOF goto flush_partition_done
-** if( new peer ) break;
-** }
-** while( (regPeer--)>0 ){
-** AggInverse (csrStart)
-** Next(csrStart)
-** }
-** }
-**
-** ROWS BETWEEN FOLLOWING AND FOLLOWING
-**
-** regEnd = regEnd - regStart
-** Rewind (csr,csrStart,csrEnd) // if EOF goto flush_partition_done
-** Aggstep (csrEnd)
-** Next(csrEnd) // if EOF fall-through
-** if( (regEnd--)<=0 ){
-** if( (regStart--)<=0 ){
-** AggFinal (xValue)
-** Gosub addrGosub
-** Next(csr) // if EOF goto flush_partition_done
-** }
-** AggInverse (csrStart)
-** Next (csrStart)
-** }
-**
-** ROWS BETWEEN PRECEDING AND PRECEDING
-**
-** Replace the bit after "Rewind" in the above with:
-**
-** if( (regEnd--)<=0 ){
-** AggStep (csrEnd)
-** Next (csrEnd)
-** }
-** AggFinal (xValue)
-** Gosub addrGosub
-** Next(csr) // if EOF goto flush_partition_done
-** if( (regStart--)<=0 ){
-** AggInverse (csr2)
-** Next (csr2)
-** }
-**
-*/
-static void windowCodeRowExprStep(
- Parse *pParse,
- Select *p,
- WhereInfo *pWInfo,
- int regGosub,
- int addrGosub
-){
- Window *pMWin = p->pWin;
- Vdbe *v = sqlite3GetVdbe(pParse);
- int regFlushPart; /* Register for "Gosub flush_partition" */
- int lblFlushPart; /* Label for "Gosub flush_partition" */
- int lblFlushDone; /* Label for "Gosub flush_partition_done" */
-
- int regArg;
- int addr;
- int csrStart = pParse->nTab++;
- int csrEnd = pParse->nTab++;
- int regStart; /* Value of PRECEDING */
- int regEnd; /* Value of FOLLOWING */
- int addrGoto;
- int addrTop;
- int addrIfPos1 = 0;
- int addrIfPos2 = 0;
- int regSize = 0;
-
- assert( pMWin->eStart==TK_PRECEDING
- || pMWin->eStart==TK_CURRENT
- || pMWin->eStart==TK_FOLLOWING
- || pMWin->eStart==TK_UNBOUNDED
- );
- assert( pMWin->eEnd==TK_FOLLOWING
- || pMWin->eEnd==TK_CURRENT
- || pMWin->eEnd==TK_UNBOUNDED
- || pMWin->eEnd==TK_PRECEDING
- );
-
- /* Allocate register and label for the "flush_partition" sub-routine. */
- regFlushPart = ++pParse->nMem;
- lblFlushPart = sqlite3VdbeMakeLabel(v);
- lblFlushDone = sqlite3VdbeMakeLabel(v);
-
- regStart = ++pParse->nMem;
- regEnd = ++pParse->nMem;
-
- windowPartitionCache(pParse, p, pWInfo, regFlushPart, lblFlushPart, ®Size);
-
- addrGoto = sqlite3VdbeAddOp0(v, OP_Goto);
-
- /* Start of "flush_partition" */
- sqlite3VdbeResolveLabel(v, lblFlushPart);
- sqlite3VdbeAddOp2(v, OP_Once, 0, sqlite3VdbeCurrentAddr(v)+3);
- VdbeCoverage(v);
- VdbeComment((v, "Flush_partition subroutine"));
- sqlite3VdbeAddOp2(v, OP_OpenDup, csrStart, pMWin->iEphCsr);
- sqlite3VdbeAddOp2(v, OP_OpenDup, csrEnd, pMWin->iEphCsr);
-
- /* If either regStart or regEnd are not non-negative integers, throw
- ** an exception. */
- if( pMWin->pStart ){
- sqlite3ExprCode(pParse, pMWin->pStart, regStart);
- windowCheckIntValue(pParse, regStart, 0);
- }
- if( pMWin->pEnd ){
- sqlite3ExprCode(pParse, pMWin->pEnd, regEnd);
- windowCheckIntValue(pParse, regEnd, 1);
- }
-
- /* If this is "ROWS FOLLOWING AND ROWS FOLLOWING", do:
- **
- ** if( regEndpEnd && pMWin->eStart==TK_FOLLOWING ){
- assert( pMWin->pStart!=0 );
- assert( pMWin->eEnd==TK_FOLLOWING );
- sqlite3VdbeAddOp3(v, OP_Ge, regStart, sqlite3VdbeCurrentAddr(v)+2, regEnd);
- VdbeCoverageNeverNull(v);
- sqlite3VdbeAddOp2(v, OP_Copy, regSize, regStart);
- sqlite3VdbeAddOp3(v, OP_Subtract, regStart, regEnd, regEnd);
- }
-
- if( pMWin->pStart && pMWin->eEnd==TK_PRECEDING ){
- assert( pMWin->pEnd!=0 );
- assert( pMWin->eStart==TK_PRECEDING );
- sqlite3VdbeAddOp3(v, OP_Le, regStart, sqlite3VdbeCurrentAddr(v)+3, regEnd);
- VdbeCoverageNeverNull(v);
- sqlite3VdbeAddOp2(v, OP_Copy, regSize, regStart);
- sqlite3VdbeAddOp2(v, OP_Copy, regSize, regEnd);
- }
-
- /* Initialize the accumulator register for each window function to NULL */
- regArg = windowInitAccum(pParse, pMWin);
-
- sqlite3VdbeAddOp2(v, OP_Rewind, pMWin->iEphCsr, lblFlushDone);
- VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_Rewind, csrStart, lblFlushDone);
- VdbeCoverageNeverTaken(v);
- sqlite3VdbeChangeP5(v, 1);
- sqlite3VdbeAddOp2(v, OP_Rewind, csrEnd, lblFlushDone);
- VdbeCoverageNeverTaken(v);
- sqlite3VdbeChangeP5(v, 1);
-
- /* Invoke AggStep function for each window function using the row that
- ** csrEnd currently points to. Or, if csrEnd is already at EOF,
- ** do nothing. */
- addrTop = sqlite3VdbeCurrentAddr(v);
- if( pMWin->eEnd==TK_PRECEDING ){
- addrIfPos1 = sqlite3VdbeAddOp3(v, OP_IfPos, regEnd, 0 , 1);
- VdbeCoverage(v);
- }
- sqlite3VdbeAddOp2(v, OP_Next, csrEnd, sqlite3VdbeCurrentAddr(v)+2);
- VdbeCoverage(v);
- addr = sqlite3VdbeAddOp0(v, OP_Goto);
- windowAggStep(pParse, pMWin, csrEnd, 0, regArg, regSize);
- if( pMWin->eEnd==TK_UNBOUNDED ){
- sqlite3VdbeAddOp2(v, OP_Goto, 0, addrTop);
- sqlite3VdbeJumpHere(v, addr);
- addrTop = sqlite3VdbeCurrentAddr(v);
- }else{
- sqlite3VdbeJumpHere(v, addr);
- if( pMWin->eEnd==TK_PRECEDING ){
- sqlite3VdbeJumpHere(v, addrIfPos1);
- }
- }
-
- if( pMWin->eEnd==TK_FOLLOWING ){
- addrIfPos1 = sqlite3VdbeAddOp3(v, OP_IfPos, regEnd, 0 , 1);
- VdbeCoverage(v);
- }
- if( pMWin->eStart==TK_FOLLOWING ){
- addrIfPos2 = sqlite3VdbeAddOp3(v, OP_IfPos, regStart, 0 , 1);
- VdbeCoverage(v);
- }
- windowAggFinal(pParse, pMWin, 0);
- windowReturnOneRow(pParse, pMWin, regGosub, addrGosub);
- sqlite3VdbeAddOp2(v, OP_Next, pMWin->iEphCsr, sqlite3VdbeCurrentAddr(v)+2);
- VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_Goto, 0, lblFlushDone);
- if( pMWin->eStart==TK_FOLLOWING ){
- sqlite3VdbeJumpHere(v, addrIfPos2);
- }
-
- if( pMWin->eStart==TK_CURRENT
- || pMWin->eStart==TK_PRECEDING
- || pMWin->eStart==TK_FOLLOWING
- ){
- int lblSkipInverse = sqlite3VdbeMakeLabel(v);;
- if( pMWin->eStart==TK_PRECEDING ){
- sqlite3VdbeAddOp3(v, OP_IfPos, regStart, lblSkipInverse, 1);
- VdbeCoverage(v);
- }
- if( pMWin->eStart==TK_FOLLOWING ){
- sqlite3VdbeAddOp2(v, OP_Next, csrStart, sqlite3VdbeCurrentAddr(v)+2);
- VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_Goto, 0, lblSkipInverse);
- }else{
- sqlite3VdbeAddOp2(v, OP_Next, csrStart, sqlite3VdbeCurrentAddr(v)+1);
- VdbeCoverageAlwaysTaken(v);
- }
- windowAggStep(pParse, pMWin, csrStart, 1, regArg, regSize);
- sqlite3VdbeResolveLabel(v, lblSkipInverse);
- }
- if( pMWin->eEnd==TK_FOLLOWING ){
- sqlite3VdbeJumpHere(v, addrIfPos1);
- }
- sqlite3VdbeAddOp2(v, OP_Goto, 0, addrTop);
-
- /* flush_partition_done: */
- sqlite3VdbeResolveLabel(v, lblFlushDone);
- sqlite3VdbeAddOp1(v, OP_ResetSorter, pMWin->iEphCsr);
- sqlite3VdbeAddOp1(v, OP_Return, regFlushPart);
- VdbeComment((v, "end flush_partition subroutine"));
-
- /* Jump to here to skip over flush_partition */
- sqlite3VdbeJumpHere(v, addrGoto);
-}
-
-/*
-** This function does the work of sqlite3WindowCodeStep() for cases that
-** would normally be handled by windowCodeDefaultStep() when there are
-** one or more built-in window-functions that require the entire partition
-** to be cached in a temp table before any rows can be returned. Additionally.
-** "RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING" is always handled by
-** this function.
-**
-** Pseudo-code corresponding to the VM code generated by this function
-** for each type of window follows.
-**
-** RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
-**
-** flush_partition:
-** Once {
-** OpenDup (iEphCsr -> csrLead)
-** }
-** Integer ctr 0
-** foreach row (csrLead){
-** if( new peer ){
-** AggFinal (xValue)
-** for(i=0; i csrLead)
-** }
-** foreach row (csrLead) {
-** AggStep (csrLead)
-** }
-** foreach row (iEphCsr) {
-** Gosub addrGosub
-** }
-**
-** RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING
-**
-** flush_partition:
-** Once {
-** OpenDup (iEphCsr -> csrLead)
-** }
-** foreach row (csrLead){
-** AggStep (csrLead)
-** }
-** Rewind (csrLead)
-** Integer ctr 0
-** foreach row (csrLead){
-** if( new peer ){
-** AggFinal (xValue)
-** for(i=0; ipWin;
- Vdbe *v = sqlite3GetVdbe(pParse);
- int k;
- int addr;
- ExprList *pPart = pMWin->pPartition;
- ExprList *pOrderBy = pMWin->pOrderBy;
- int nPeer = pOrderBy ? pOrderBy->nExpr : 0;
- int regNewPeer;
-
- int addrGoto; /* Address of Goto used to jump flush_par.. */
- int addrNext; /* Jump here for next iteration of loop */
- int regFlushPart;
- int lblFlushPart;
- int csrLead;
- int regCtr;
- int regArg; /* Register array to martial function args */
- int regSize;
- int lblEmpty;
- int bReverse = pMWin->pOrderBy && pMWin->eStart==TK_CURRENT
- && pMWin->eEnd==TK_UNBOUNDED;
-
- assert( (pMWin->eStart==TK_UNBOUNDED && pMWin->eEnd==TK_CURRENT)
- || (pMWin->eStart==TK_UNBOUNDED && pMWin->eEnd==TK_UNBOUNDED)
- || (pMWin->eStart==TK_CURRENT && pMWin->eEnd==TK_CURRENT)
- || (pMWin->eStart==TK_CURRENT && pMWin->eEnd==TK_UNBOUNDED)
- );
-
- lblEmpty = sqlite3VdbeMakeLabel(v);
- regNewPeer = pParse->nMem+1;
- pParse->nMem += nPeer;
-
- /* Allocate register and label for the "flush_partition" sub-routine. */
- regFlushPart = ++pParse->nMem;
- lblFlushPart = sqlite3VdbeMakeLabel(v);
-
- csrLead = pParse->nTab++;
- regCtr = ++pParse->nMem;
-
- windowPartitionCache(pParse, p, pWInfo, regFlushPart, lblFlushPart, ®Size);
- addrGoto = sqlite3VdbeAddOp0(v, OP_Goto);
-
- /* Start of "flush_partition" */
- sqlite3VdbeResolveLabel(v, lblFlushPart);
- sqlite3VdbeAddOp2(v, OP_Once, 0, sqlite3VdbeCurrentAddr(v)+2);
- VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_OpenDup, csrLead, pMWin->iEphCsr);
-
- /* Initialize the accumulator register for each window function to NULL */
- regArg = windowInitAccum(pParse, pMWin);
-
- sqlite3VdbeAddOp2(v, OP_Integer, 0, regCtr);
- sqlite3VdbeAddOp2(v, OP_Rewind, csrLead, lblEmpty);
- VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_Rewind, pMWin->iEphCsr, lblEmpty);
- VdbeCoverageNeverTaken(v);
-
- if( bReverse ){
- int addr2 = sqlite3VdbeCurrentAddr(v);
- windowAggStep(pParse, pMWin, csrLead, 0, regArg, regSize);
- sqlite3VdbeAddOp2(v, OP_Next, csrLead, addr2);
- VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_Rewind, csrLead, lblEmpty);
- VdbeCoverageNeverTaken(v);
- }
- addrNext = sqlite3VdbeCurrentAddr(v);
-
- if( pOrderBy && (pMWin->eEnd==TK_CURRENT || pMWin->eStart==TK_CURRENT) ){
- int bCurrent = (pMWin->eStart==TK_CURRENT);
- int addrJump = 0; /* Address of OP_Jump below */
- if( pMWin->eType==TK_RANGE ){
- int iOff = pMWin->nBufferCol + (pPart ? pPart->nExpr : 0);
- int regPeer = pMWin->regPart + (pPart ? pPart->nExpr : 0);
- KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pOrderBy, 0, 0);
- for(k=0; kiEphCsr);
- sqlite3VdbeAddOp1(v, OP_Return, regFlushPart);
-
- /* Jump to here to skip over flush_partition */
- sqlite3VdbeJumpHere(v, addrGoto);
-}
-
-
-/*
-** RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
-**
-** ...
-** if( new partition ){
-** AggFinal (xFinalize)
-** Gosub addrGosub
-** ResetSorter eph-table
-** }
-** else if( new peer ){
-** AggFinal (xValue)
-** Gosub addrGosub
-** ResetSorter eph-table
-** }
-** AggStep
-** Insert (record into eph-table)
-** sqlite3WhereEnd()
-** AggFinal (xFinalize)
-** Gosub addrGosub
-**
-** RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
-**
-** As above, except take no action for a "new peer". Invoke
-** the sub-routine once only for each partition.
-**
-** RANGE BETWEEN CURRENT ROW AND CURRENT ROW
-**
-** As above, except that the "new peer" condition is handled in the
-** same way as "new partition" (so there is no "else if" block).
-**
-** ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
-**
-** As above, except assume every row is a "new peer".
-*/
-static void windowCodeDefaultStep(
- Parse *pParse,
- Select *p,
- WhereInfo *pWInfo,
- int regGosub,
- int addrGosub
-){
- Window *pMWin = p->pWin;
- Vdbe *v = sqlite3GetVdbe(pParse);
- int k;
- int iSubCsr = p->pSrc->a[0].iCursor;
- int nSub = p->pSrc->a[0].pTab->nCol;
- int reg = pParse->nMem+1;
- int regRecord = reg+nSub;
- int regRowid = regRecord+1;
- int addr;
- ExprList *pPart = pMWin->pPartition;
- ExprList *pOrderBy = pMWin->pOrderBy;
-
- assert( pMWin->eType==TK_RANGE
- || (pMWin->eStart==TK_UNBOUNDED && pMWin->eEnd==TK_CURRENT)
- );
-
- assert( (pMWin->eStart==TK_UNBOUNDED && pMWin->eEnd==TK_CURRENT)
- || (pMWin->eStart==TK_UNBOUNDED && pMWin->eEnd==TK_UNBOUNDED)
- || (pMWin->eStart==TK_CURRENT && pMWin->eEnd==TK_CURRENT)
- || (pMWin->eStart==TK_CURRENT && pMWin->eEnd==TK_UNBOUNDED && !pOrderBy)
- );
-
- if( pMWin->eEnd==TK_UNBOUNDED ){
- pOrderBy = 0;
- }
-
- pParse->nMem += nSub + 2;
-
- /* Load the individual column values of the row returned by
- ** the sub-select into an array of registers. */
- for(k=0; knExpr : 0);
- int addrGoto = 0;
- int addrJump = 0;
- int nPeer = (pOrderBy ? pOrderBy->nExpr : 0);
-
- if( pPart ){
- int regNewPart = reg + pMWin->nBufferCol;
- KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pPart, 0, 0);
- addr = sqlite3VdbeAddOp3(v, OP_Compare, regNewPart, pMWin->regPart,nPart);
- sqlite3VdbeAppendP4(v, (void*)pKeyInfo, P4_KEYINFO);
- addrJump = sqlite3VdbeAddOp3(v, OP_Jump, addr+2, 0, addr+2);
- VdbeCoverageEqNe(v);
- windowAggFinal(pParse, pMWin, 1);
- if( pOrderBy ){
- addrGoto = sqlite3VdbeAddOp0(v, OP_Goto);
- }
- }
-
- if( pOrderBy ){
- int regNewPeer = reg + pMWin->nBufferCol + nPart;
- int regPeer = pMWin->regPart + nPart;
-
- if( addrJump ) sqlite3VdbeJumpHere(v, addrJump);
- if( pMWin->eType==TK_RANGE ){
- KeyInfo *pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pOrderBy, 0, 0);
- addr = sqlite3VdbeAddOp3(v, OP_Compare, regNewPeer, regPeer, nPeer);
- sqlite3VdbeAppendP4(v, (void*)pKeyInfo, P4_KEYINFO);
- addrJump = sqlite3VdbeAddOp3(v, OP_Jump, addr+2, 0, addr+2);
- VdbeCoverage(v);
- }else{
- addrJump = 0;
- }
- windowAggFinal(pParse, pMWin, pMWin->eStart==TK_CURRENT);
- if( addrGoto ) sqlite3VdbeJumpHere(v, addrGoto);
- }
-
- sqlite3VdbeAddOp2(v, OP_Rewind, pMWin->iEphCsr,sqlite3VdbeCurrentAddr(v)+3);
- VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_Gosub, regGosub, addrGosub);
- sqlite3VdbeAddOp2(v, OP_Next, pMWin->iEphCsr, sqlite3VdbeCurrentAddr(v)-1);
- VdbeCoverage(v);
-
- sqlite3VdbeAddOp1(v, OP_ResetSorter, pMWin->iEphCsr);
- sqlite3VdbeAddOp3(
- v, OP_Copy, reg+pMWin->nBufferCol, pMWin->regPart, nPart+nPeer-1
- );
-
- if( addrJump ) sqlite3VdbeJumpHere(v, addrJump);
- }
-
- /* Invoke step function for window functions */
- windowAggStep(pParse, pMWin, -1, 0, reg, 0);
-
- /* Buffer the current row in the ephemeral table. */
- if( pMWin->nBufferCol>0 ){
- sqlite3VdbeAddOp3(v, OP_MakeRecord, reg, pMWin->nBufferCol, regRecord);
- }else{
- sqlite3VdbeAddOp2(v, OP_Blob, 0, regRecord);
- sqlite3VdbeAppendP4(v, (void*)"", 0);
- }
- sqlite3VdbeAddOp2(v, OP_NewRowid, pMWin->iEphCsr, regRowid);
- sqlite3VdbeAddOp3(v, OP_Insert, pMWin->iEphCsr, regRecord, regRowid);
-
- /* End the database scan loop. */
- sqlite3WhereEnd(pWInfo);
-
- windowAggFinal(pParse, pMWin, 1);
- sqlite3VdbeAddOp2(v, OP_Rewind, pMWin->iEphCsr,sqlite3VdbeCurrentAddr(v)+3);
- VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_Gosub, regGosub, addrGosub);
- sqlite3VdbeAddOp2(v, OP_Next, pMWin->iEphCsr, sqlite3VdbeCurrentAddr(v)-1);
- VdbeCoverage(v);
-}
-
-/*
-** Allocate and return a duplicate of the Window object indicated by the
-** third argument. Set the Window.pOwner field of the new object to
-** pOwner.
-*/
-SQLITE_PRIVATE Window *sqlite3WindowDup(sqlite3 *db, Expr *pOwner, Window *p){
- Window *pNew = 0;
- if( p ){
- pNew = sqlite3DbMallocZero(db, sizeof(Window));
- if( pNew ){
- pNew->zName = sqlite3DbStrDup(db, p->zName);
- pNew->pFilter = sqlite3ExprDup(db, p->pFilter, 0);
- pNew->pPartition = sqlite3ExprListDup(db, p->pPartition, 0);
- pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy, 0);
- pNew->eType = p->eType;
- pNew->eEnd = p->eEnd;
- pNew->eStart = p->eStart;
- pNew->pStart = sqlite3ExprDup(db, p->pStart, 0);
- pNew->pEnd = sqlite3ExprDup(db, p->pEnd, 0);
- pNew->pOwner = pOwner;
- }
- }
- return pNew;
-}
-
-/*
-** Return a copy of the linked list of Window objects passed as the
-** second argument.
-*/
-SQLITE_PRIVATE Window *sqlite3WindowListDup(sqlite3 *db, Window *p){
- Window *pWin;
- Window *pRet = 0;
- Window **pp = &pRet;
-
- for(pWin=p; pWin; pWin=pWin->pNextWin){
- *pp = sqlite3WindowDup(db, 0, pWin);
- if( *pp==0 ) break;
- pp = &((*pp)->pNextWin);
- }
-
- return pRet;
-}
-
-/*
-** sqlite3WhereBegin() has already been called for the SELECT statement
-** passed as the second argument when this function is invoked. It generates
-** code to populate the Window.regResult register for each window function and
-** invoke the sub-routine at instruction addrGosub once for each row.
-** This function calls sqlite3WhereEnd() before returning.
-*/
-SQLITE_PRIVATE void sqlite3WindowCodeStep(
- Parse *pParse, /* Parse context */
- Select *p, /* Rewritten SELECT statement */
- WhereInfo *pWInfo, /* Context returned by sqlite3WhereBegin() */
- int regGosub, /* Register for OP_Gosub */
- int addrGosub /* OP_Gosub here to return each row */
-){
- Window *pMWin = p->pWin;
-
- /* There are three different functions that may be used to do the work
- ** of this one, depending on the window frame and the specific built-in
- ** window functions used (if any).
- **
- ** windowCodeRowExprStep() handles all "ROWS" window frames, except for:
- **
- ** ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
- **
- ** The exception is because windowCodeRowExprStep() implements all window
- ** frame types by caching the entire partition in a temp table, and
- ** "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW" is easy enough to
- ** implement without such a cache.
- **
- ** windowCodeCacheStep() is used for:
- **
- ** RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING
- **
- ** It is also used for anything not handled by windowCodeRowExprStep()
- ** that invokes a built-in window function that requires the entire
- ** partition to be cached in a temp table before any rows are returned
- ** (e.g. nth_value() or percent_rank()).
- **
- ** Finally, assuming there is no built-in window function that requires
- ** the partition to be cached, windowCodeDefaultStep() is used for:
- **
- ** RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
- ** RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
- ** RANGE BETWEEN CURRENT ROW AND CURRENT ROW
- ** ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
- **
- ** windowCodeDefaultStep() is the only one of the three functions that
- ** does not cache each partition in a temp table before beginning to
- ** return rows.
- */
- if( pMWin->eType==TK_ROWS
- && (pMWin->eStart!=TK_UNBOUNDED||pMWin->eEnd!=TK_CURRENT||!pMWin->pOrderBy)
- ){
- VdbeModuleComment((pParse->pVdbe, "Begin RowExprStep()"));
- windowCodeRowExprStep(pParse, p, pWInfo, regGosub, addrGosub);
- }else{
- Window *pWin;
- int bCache = 0; /* True to use CacheStep() */
-
- if( pMWin->eStart==TK_CURRENT && pMWin->eEnd==TK_UNBOUNDED ){
- bCache = 1;
- }else{
- for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
- FuncDef *pFunc = pWin->pFunc;
- if( (pFunc->funcFlags & SQLITE_FUNC_WINDOW_SIZE)
- || (pFunc->zName==nth_valueName)
- || (pFunc->zName==first_valueName)
- || (pFunc->zName==leadName)
- || (pFunc->zName==lagName)
- ){
- bCache = 1;
- break;
- }
- }
- }
-
- /* Otherwise, call windowCodeDefaultStep(). */
- if( bCache ){
- VdbeModuleComment((pParse->pVdbe, "Begin CacheStep()"));
- windowCodeCacheStep(pParse, p, pWInfo, regGosub, addrGosub);
- }else{
- VdbeModuleComment((pParse->pVdbe, "Begin DefaultStep()"));
- windowCodeDefaultStep(pParse, p, pWInfo, regGosub, addrGosub);
- }
- }
-}
-
-#endif /* SQLITE_OMIT_WINDOWFUNC */
-
-/************** End of window.c **********************************************/
/************** Begin file parse.c *******************************************/
/*
** 2000-05-29
@@ -145982,8 +141457,6 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep(
*/
struct TrigEvent { int a; IdList * b; };
-struct FrameBound { int eType; Expr *pExpr; };
-
/*
** Disable lookaside memory allocation for objects that might be
** shared across database connections.
@@ -146024,21 +141497,10 @@ static void disableLookaside(Parse *pParse){
static Expr *tokenExpr(Parse *pParse, int op, Token t){
Expr *p = sqlite3DbMallocRawNN(pParse->db, sizeof(Expr)+t.n+1);
if( p ){
- /* memset(p, 0, sizeof(Expr)); */
+ memset(p, 0, sizeof(Expr));
p->op = (u8)op;
- p->affinity = 0;
p->flags = EP_Leaf;
p->iAgg = -1;
- p->pLeft = p->pRight = 0;
- p->x.pList = 0;
- p->pAggInfo = 0;
- p->pTab = 0;
- p->op2 = 0;
- p->iTable = 0;
- p->iColumn = 0;
-#ifndef SQLITE_OMIT_WINDOWFUNC
- p->pWin = 0;
-#endif
p->u.zToken = (char*)&p[1];
memcpy(p->u.zToken, t.z, t.n);
p->u.zToken[t.n] = 0;
@@ -146049,19 +141511,15 @@ static void disableLookaside(Parse *pParse){
#if SQLITE_MAX_EXPR_DEPTH>0
p->nHeight = 1;
#endif
- if( IN_RENAME_OBJECT ){
- return (Expr*)sqlite3RenameTokenMap(pParse, (void*)p, &t);
- }
}
return p;
}
-
/* A routine to convert a binary TK_IS or TK_ISNOT expression into a
** unary TK_ISNULL or TK_NOTNULL expression. */
static void binaryToUnaryIfNull(Parse *pParse, Expr *pY, Expr *pA, int op){
sqlite3 *db = pParse->db;
- if( pA && pY && pY->op==TK_NULL && !IN_RENAME_OBJECT ){
+ if( pA && pY && pY->op==TK_NULL ){
pA->op = (u8)op;
sqlite3ExprDelete(db, pA->pRight);
pA->pRight = 0;
@@ -146152,28 +141610,26 @@ static void disableLookaside(Parse *pParse){
# define INTERFACE 1
#endif
/************* Begin control #defines *****************************************/
-#define YYCODETYPE unsigned short int
-#define YYNOCODE 277
+#define YYCODETYPE unsigned char
+#define YYNOCODE 255
#define YYACTIONTYPE unsigned short int
-#define YYWILDCARD 91
+#define YYWILDCARD 84
#define sqlite3ParserTOKENTYPE Token
typedef union {
int yyinit;
sqlite3ParserTOKENTYPE yy0;
- Expr* yy18;
- struct TrigEvent yy34;
- IdList* yy48;
- int yy70;
- struct {int value; int mask;} yy111;
- struct FrameBound yy119;
- SrcList* yy135;
- TriggerStep* yy207;
- Window* yy327;
- Upsert* yy340;
- const char* yy392;
- ExprList* yy420;
- With* yy449;
- Select* yy489;
+ const char* yy36;
+ TriggerStep* yy47;
+ With* yy91;
+ struct {int value; int mask;} yy107;
+ Expr* yy182;
+ Upsert* yy198;
+ ExprList* yy232;
+ struct TrigEvent yy300;
+ Select* yy399;
+ SrcList* yy427;
+ int yy502;
+ IdList* yy510;
} YYMINORTYPE;
#ifndef YYSTACKDEPTH
#define YYSTACKDEPTH 100
@@ -146189,19 +141645,18 @@ typedef union {
#define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse;
#define sqlite3ParserCTX_STORE yypParser->pParse=pParse;
#define YYFALLBACK 1
-#define YYNSTATE 521
-#define YYNRULE 367
-#define YYNTOKEN 155
-#define YY_MAX_SHIFT 520
-#define YY_MIN_SHIFTREDUCE 756
-#define YY_MAX_SHIFTREDUCE 1122
-#define YY_ERROR_ACTION 1123
-#define YY_ACCEPT_ACTION 1124
-#define YY_NO_ACTION 1125
-#define YY_MIN_REDUCE 1126
-#define YY_MAX_REDUCE 1492
+#define YYNSTATE 490
+#define YYNRULE 341
+#define YYNTOKEN 145
+#define YY_MAX_SHIFT 489
+#define YY_MIN_SHIFTREDUCE 705
+#define YY_MAX_SHIFTREDUCE 1045
+#define YY_ERROR_ACTION 1046
+#define YY_ACCEPT_ACTION 1047
+#define YY_NO_ACTION 1048
+#define YY_MIN_REDUCE 1049
+#define YY_MAX_REDUCE 1389
/************* End control #defines *******************************************/
-#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
/* Define the yytestcase() macro to be a no-op if is not already defined
** otherwise.
@@ -146266,568 +141721,503 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (2009)
+#define YY_ACTTAB_COUNT (1657)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 368, 105, 102, 197, 105, 102, 197, 515, 1124, 1,
- /* 10 */ 1, 520, 2, 1128, 515, 1192, 1171, 1456, 275, 370,
- /* 20 */ 127, 1389, 1197, 1197, 1192, 1166, 178, 1205, 64, 64,
- /* 30 */ 477, 887, 322, 428, 348, 37, 37, 808, 362, 888,
- /* 40 */ 509, 509, 509, 112, 113, 103, 1100, 1100, 953, 956,
- /* 50 */ 946, 946, 110, 110, 111, 111, 111, 111, 365, 252,
- /* 60 */ 252, 515, 252, 252, 497, 515, 309, 515, 459, 515,
- /* 70 */ 1079, 491, 512, 478, 6, 512, 809, 134, 498, 228,
- /* 80 */ 194, 428, 37, 37, 515, 208, 64, 64, 64, 64,
- /* 90 */ 13, 13, 109, 109, 109, 109, 108, 108, 107, 107,
- /* 100 */ 107, 106, 401, 258, 381, 13, 13, 398, 397, 428,
- /* 110 */ 252, 252, 370, 476, 405, 1104, 1079, 1080, 1081, 386,
- /* 120 */ 1106, 390, 497, 512, 497, 1423, 1419, 304, 1105, 307,
- /* 130 */ 1256, 496, 370, 499, 16, 16, 112, 113, 103, 1100,
- /* 140 */ 1100, 953, 956, 946, 946, 110, 110, 111, 111, 111,
- /* 150 */ 111, 262, 1107, 495, 1107, 401, 112, 113, 103, 1100,
- /* 160 */ 1100, 953, 956, 946, 946, 110, 110, 111, 111, 111,
- /* 170 */ 111, 129, 1425, 343, 1420, 339, 1059, 492, 1057, 263,
- /* 180 */ 73, 105, 102, 197, 994, 109, 109, 109, 109, 108,
- /* 190 */ 108, 107, 107, 107, 106, 401, 370, 111, 111, 111,
- /* 200 */ 111, 104, 492, 89, 1432, 109, 109, 109, 109, 108,
- /* 210 */ 108, 107, 107, 107, 106, 401, 111, 111, 111, 111,
- /* 220 */ 112, 113, 103, 1100, 1100, 953, 956, 946, 946, 110,
- /* 230 */ 110, 111, 111, 111, 111, 109, 109, 109, 109, 108,
- /* 240 */ 108, 107, 107, 107, 106, 401, 114, 108, 108, 107,
- /* 250 */ 107, 107, 106, 401, 109, 109, 109, 109, 108, 108,
- /* 260 */ 107, 107, 107, 106, 401, 152, 399, 399, 399, 109,
- /* 270 */ 109, 109, 109, 108, 108, 107, 107, 107, 106, 401,
- /* 280 */ 178, 493, 1412, 434, 1037, 1486, 1079, 515, 1486, 370,
- /* 290 */ 421, 297, 357, 412, 74, 1079, 109, 109, 109, 109,
- /* 300 */ 108, 108, 107, 107, 107, 106, 401, 1413, 37, 37,
- /* 310 */ 1431, 274, 506, 112, 113, 103, 1100, 1100, 953, 956,
- /* 320 */ 946, 946, 110, 110, 111, 111, 111, 111, 1436, 520,
- /* 330 */ 2, 1128, 1079, 1080, 1081, 430, 275, 1079, 127, 366,
- /* 340 */ 933, 1079, 1080, 1081, 220, 1205, 913, 458, 455, 454,
- /* 350 */ 392, 167, 515, 1035, 152, 445, 924, 453, 152, 874,
- /* 360 */ 923, 289, 109, 109, 109, 109, 108, 108, 107, 107,
- /* 370 */ 107, 106, 401, 13, 13, 261, 853, 252, 252, 227,
- /* 380 */ 106, 401, 370, 1079, 1080, 1081, 311, 388, 1079, 296,
- /* 390 */ 512, 923, 923, 925, 231, 323, 1255, 1388, 1423, 490,
- /* 400 */ 274, 506, 12, 208, 274, 506, 112, 113, 103, 1100,
- /* 410 */ 1100, 953, 956, 946, 946, 110, 110, 111, 111, 111,
- /* 420 */ 111, 1440, 286, 1128, 288, 1079, 1097, 247, 275, 1098,
- /* 430 */ 127, 387, 405, 389, 1079, 1080, 1081, 1205, 159, 238,
- /* 440 */ 255, 321, 461, 316, 460, 225, 790, 105, 102, 197,
- /* 450 */ 513, 314, 842, 842, 445, 109, 109, 109, 109, 108,
- /* 460 */ 108, 107, 107, 107, 106, 401, 515, 514, 515, 252,
- /* 470 */ 252, 1079, 1080, 1081, 435, 370, 1098, 933, 1460, 794,
- /* 480 */ 274, 506, 512, 105, 102, 197, 336, 63, 63, 64,
- /* 490 */ 64, 27, 790, 924, 287, 208, 1354, 923, 515, 112,
- /* 500 */ 113, 103, 1100, 1100, 953, 956, 946, 946, 110, 110,
- /* 510 */ 111, 111, 111, 111, 107, 107, 107, 106, 401, 49,
- /* 520 */ 49, 515, 28, 1079, 405, 497, 421, 297, 923, 923,
- /* 530 */ 925, 186, 468, 1079, 467, 999, 999, 442, 515, 1079,
- /* 540 */ 334, 515, 45, 45, 1083, 342, 173, 168, 109, 109,
- /* 550 */ 109, 109, 108, 108, 107, 107, 107, 106, 401, 13,
- /* 560 */ 13, 205, 13, 13, 252, 252, 1195, 1195, 370, 1079,
- /* 570 */ 1080, 1081, 787, 265, 5, 359, 494, 512, 469, 1079,
- /* 580 */ 1080, 1081, 398, 397, 1079, 1079, 1080, 1081, 3, 282,
- /* 590 */ 1079, 1083, 112, 113, 103, 1100, 1100, 953, 956, 946,
- /* 600 */ 946, 110, 110, 111, 111, 111, 111, 252, 252, 1015,
- /* 610 */ 220, 1079, 873, 458, 455, 454, 943, 943, 954, 957,
- /* 620 */ 512, 252, 252, 453, 1016, 1079, 445, 1107, 1209, 1107,
- /* 630 */ 1079, 1080, 1081, 515, 512, 426, 1079, 1080, 1081, 1017,
- /* 640 */ 512, 109, 109, 109, 109, 108, 108, 107, 107, 107,
- /* 650 */ 106, 401, 1052, 515, 50, 50, 515, 1079, 1080, 1081,
- /* 660 */ 828, 370, 1051, 379, 411, 1064, 1358, 207, 408, 773,
- /* 670 */ 829, 1079, 1080, 1081, 64, 64, 322, 64, 64, 1302,
- /* 680 */ 947, 411, 410, 1358, 1360, 112, 113, 103, 1100, 1100,
- /* 690 */ 953, 956, 946, 946, 110, 110, 111, 111, 111, 111,
- /* 700 */ 294, 482, 515, 1037, 1487, 515, 434, 1487, 354, 1120,
- /* 710 */ 483, 996, 913, 485, 466, 996, 132, 178, 33, 450,
- /* 720 */ 1203, 136, 406, 64, 64, 479, 64, 64, 419, 369,
- /* 730 */ 283, 1146, 252, 252, 109, 109, 109, 109, 108, 108,
- /* 740 */ 107, 107, 107, 106, 401, 512, 224, 440, 411, 266,
- /* 750 */ 1358, 266, 252, 252, 370, 296, 416, 284, 934, 396,
- /* 760 */ 976, 470, 400, 252, 252, 512, 9, 473, 231, 500,
- /* 770 */ 354, 1036, 1035, 1488, 355, 374, 512, 1121, 112, 113,
- /* 780 */ 103, 1100, 1100, 953, 956, 946, 946, 110, 110, 111,
- /* 790 */ 111, 111, 111, 252, 252, 1015, 515, 1347, 295, 252,
- /* 800 */ 252, 252, 252, 1098, 375, 249, 512, 445, 872, 322,
- /* 810 */ 1016, 480, 512, 195, 512, 434, 273, 15, 15, 515,
- /* 820 */ 314, 515, 95, 515, 93, 1017, 367, 109, 109, 109,
- /* 830 */ 109, 108, 108, 107, 107, 107, 106, 401, 515, 1121,
- /* 840 */ 39, 39, 51, 51, 52, 52, 503, 370, 515, 1204,
- /* 850 */ 1098, 918, 439, 341, 133, 436, 223, 222, 221, 53,
- /* 860 */ 53, 322, 1400, 761, 762, 763, 515, 370, 88, 54,
- /* 870 */ 54, 112, 113, 103, 1100, 1100, 953, 956, 946, 946,
- /* 880 */ 110, 110, 111, 111, 111, 111, 407, 55, 55, 196,
- /* 890 */ 515, 112, 113, 103, 1100, 1100, 953, 956, 946, 946,
- /* 900 */ 110, 110, 111, 111, 111, 111, 135, 264, 1149, 376,
- /* 910 */ 515, 40, 40, 515, 872, 515, 993, 515, 993, 116,
- /* 920 */ 109, 109, 109, 109, 108, 108, 107, 107, 107, 106,
- /* 930 */ 401, 41, 41, 515, 43, 43, 44, 44, 56, 56,
- /* 940 */ 109, 109, 109, 109, 108, 108, 107, 107, 107, 106,
- /* 950 */ 401, 515, 379, 515, 57, 57, 515, 799, 515, 379,
- /* 960 */ 515, 445, 200, 515, 323, 515, 1397, 515, 1459, 515,
- /* 970 */ 1287, 817, 58, 58, 14, 14, 515, 59, 59, 118,
- /* 980 */ 118, 60, 60, 515, 46, 46, 61, 61, 62, 62,
- /* 990 */ 47, 47, 515, 190, 189, 91, 515, 140, 140, 515,
- /* 1000 */ 394, 515, 277, 1200, 141, 141, 515, 1115, 515, 992,
- /* 1010 */ 515, 992, 515, 69, 69, 370, 278, 48, 48, 259,
- /* 1020 */ 65, 65, 119, 119, 246, 246, 260, 66, 66, 120,
- /* 1030 */ 120, 121, 121, 117, 117, 370, 515, 512, 383, 112,
- /* 1040 */ 113, 103, 1100, 1100, 953, 956, 946, 946, 110, 110,
- /* 1050 */ 111, 111, 111, 111, 515, 872, 515, 139, 139, 112,
- /* 1060 */ 113, 103, 1100, 1100, 953, 956, 946, 946, 110, 110,
- /* 1070 */ 111, 111, 111, 111, 1287, 138, 138, 125, 125, 515,
- /* 1080 */ 12, 515, 281, 1287, 515, 445, 131, 1287, 109, 109,
- /* 1090 */ 109, 109, 108, 108, 107, 107, 107, 106, 401, 515,
- /* 1100 */ 124, 124, 122, 122, 515, 123, 123, 515, 109, 109,
- /* 1110 */ 109, 109, 108, 108, 107, 107, 107, 106, 401, 515,
- /* 1120 */ 68, 68, 463, 783, 515, 70, 70, 302, 67, 67,
- /* 1130 */ 1032, 253, 253, 356, 1287, 191, 196, 1433, 465, 1301,
- /* 1140 */ 38, 38, 384, 94, 512, 42, 42, 177, 848, 274,
- /* 1150 */ 506, 385, 420, 847, 1356, 441, 508, 376, 377, 153,
- /* 1160 */ 423, 872, 432, 370, 224, 251, 194, 887, 182, 293,
- /* 1170 */ 783, 848, 88, 254, 466, 888, 847, 915, 807, 806,
- /* 1180 */ 230, 1241, 910, 370, 17, 413, 797, 112, 113, 103,
- /* 1190 */ 1100, 1100, 953, 956, 946, 946, 110, 110, 111, 111,
- /* 1200 */ 111, 111, 395, 814, 815, 1175, 983, 112, 101, 103,
- /* 1210 */ 1100, 1100, 953, 956, 946, 946, 110, 110, 111, 111,
- /* 1220 */ 111, 111, 375, 422, 427, 429, 298, 230, 230, 88,
- /* 1230 */ 1240, 451, 312, 797, 226, 88, 109, 109, 109, 109,
- /* 1240 */ 108, 108, 107, 107, 107, 106, 401, 86, 433, 979,
- /* 1250 */ 927, 881, 226, 983, 230, 415, 109, 109, 109, 109,
- /* 1260 */ 108, 108, 107, 107, 107, 106, 401, 320, 845, 781,
- /* 1270 */ 846, 100, 130, 100, 1403, 290, 370, 319, 1377, 1376,
- /* 1280 */ 437, 1449, 299, 1237, 303, 306, 308, 310, 1188, 1174,
- /* 1290 */ 1173, 1172, 315, 324, 325, 1228, 370, 927, 1249, 271,
- /* 1300 */ 1286, 113, 103, 1100, 1100, 953, 956, 946, 946, 110,
- /* 1310 */ 110, 111, 111, 111, 111, 1224, 1235, 502, 501, 1292,
- /* 1320 */ 1221, 1155, 103, 1100, 1100, 953, 956, 946, 946, 110,
- /* 1330 */ 110, 111, 111, 111, 111, 1148, 1137, 1136, 1138, 1443,
- /* 1340 */ 446, 244, 184, 98, 507, 188, 4, 353, 327, 109,
- /* 1350 */ 109, 109, 109, 108, 108, 107, 107, 107, 106, 401,
- /* 1360 */ 510, 329, 331, 199, 414, 456, 292, 285, 318, 109,
- /* 1370 */ 109, 109, 109, 108, 108, 107, 107, 107, 106, 401,
- /* 1380 */ 11, 1271, 1279, 402, 361, 192, 1171, 1351, 431, 505,
- /* 1390 */ 346, 1350, 333, 98, 507, 504, 4, 187, 1446, 1115,
- /* 1400 */ 233, 1396, 155, 1394, 1112, 152, 72, 75, 378, 425,
- /* 1410 */ 510, 165, 149, 157, 933, 1276, 86, 30, 1268, 417,
- /* 1420 */ 96, 96, 8, 160, 161, 162, 163, 97, 418, 402,
- /* 1430 */ 517, 516, 449, 402, 923, 210, 358, 424, 1282, 438,
- /* 1440 */ 169, 214, 360, 1345, 80, 504, 31, 444, 1365, 301,
- /* 1450 */ 245, 274, 506, 216, 174, 305, 488, 447, 217, 462,
- /* 1460 */ 1139, 487, 218, 363, 933, 923, 923, 925, 926, 24,
- /* 1470 */ 96, 96, 1191, 1190, 1189, 391, 1182, 97, 1163, 402,
- /* 1480 */ 517, 516, 799, 364, 923, 1162, 317, 1161, 98, 507,
- /* 1490 */ 1181, 4, 1458, 472, 393, 269, 270, 475, 481, 1232,
- /* 1500 */ 85, 1233, 326, 328, 232, 510, 495, 1231, 330, 98,
- /* 1510 */ 507, 1230, 4, 486, 335, 923, 923, 925, 926, 24,
- /* 1520 */ 1435, 1068, 404, 181, 336, 256, 510, 115, 402, 332,
- /* 1530 */ 352, 352, 351, 241, 349, 1214, 1414, 770, 338, 10,
- /* 1540 */ 504, 340, 272, 92, 1331, 1213, 87, 183, 484, 402,
- /* 1550 */ 201, 488, 280, 239, 344, 345, 489, 1145, 29, 933,
- /* 1560 */ 279, 504, 1074, 518, 240, 96, 96, 242, 243, 519,
- /* 1570 */ 1134, 1129, 97, 154, 402, 517, 516, 372, 373, 923,
- /* 1580 */ 933, 142, 143, 128, 1381, 267, 96, 96, 852, 757,
- /* 1590 */ 203, 144, 403, 97, 1382, 402, 517, 516, 204, 1380,
- /* 1600 */ 923, 146, 1379, 1159, 1158, 71, 1156, 276, 202, 185,
- /* 1610 */ 923, 923, 925, 926, 24, 198, 257, 126, 991, 989,
- /* 1620 */ 907, 98, 507, 156, 4, 145, 158, 206, 831, 209,
- /* 1630 */ 291, 923, 923, 925, 926, 24, 1005, 911, 510, 164,
- /* 1640 */ 147, 380, 371, 382, 166, 76, 77, 274, 506, 148,
- /* 1650 */ 78, 79, 1008, 211, 212, 1004, 137, 213, 18, 300,
- /* 1660 */ 230, 402, 997, 1109, 443, 215, 32, 170, 171, 772,
- /* 1670 */ 409, 448, 319, 504, 219, 172, 452, 81, 19, 457,
- /* 1680 */ 313, 20, 82, 268, 488, 150, 810, 179, 83, 487,
- /* 1690 */ 464, 151, 933, 180, 959, 84, 1040, 34, 96, 96,
- /* 1700 */ 471, 1041, 35, 474, 193, 97, 248, 402, 517, 516,
- /* 1710 */ 1068, 404, 923, 250, 256, 880, 229, 175, 875, 352,
- /* 1720 */ 352, 351, 241, 349, 100, 21, 770, 22, 1054, 1056,
- /* 1730 */ 7, 98, 507, 1045, 4, 337, 1058, 23, 974, 201,
- /* 1740 */ 176, 280, 88, 923, 923, 925, 926, 24, 510, 279,
- /* 1750 */ 960, 958, 962, 1014, 963, 1013, 235, 234, 25, 36,
- /* 1760 */ 99, 90, 507, 928, 4, 511, 350, 782, 26, 841,
- /* 1770 */ 236, 402, 347, 1069, 237, 1125, 1125, 1451, 510, 203,
- /* 1780 */ 1450, 1125, 1125, 504, 1125, 1125, 1125, 204, 1125, 1125,
- /* 1790 */ 146, 1125, 1125, 1125, 1125, 1125, 1125, 202, 1125, 1125,
- /* 1800 */ 1125, 402, 933, 1125, 1125, 1125, 1125, 1125, 96, 96,
- /* 1810 */ 1125, 1125, 1125, 504, 1125, 97, 1125, 402, 517, 516,
- /* 1820 */ 1125, 1125, 923, 1125, 1125, 1125, 1125, 1125, 1125, 1125,
- /* 1830 */ 1125, 371, 933, 1125, 1125, 1125, 274, 506, 96, 96,
- /* 1840 */ 1125, 1125, 1125, 1125, 1125, 97, 1125, 402, 517, 516,
- /* 1850 */ 1125, 1125, 923, 923, 923, 925, 926, 24, 1125, 409,
- /* 1860 */ 1125, 1125, 1125, 256, 1125, 1125, 1125, 1125, 352, 352,
- /* 1870 */ 351, 241, 349, 1125, 1125, 770, 1125, 1125, 1125, 1125,
- /* 1880 */ 1125, 1125, 1125, 923, 923, 925, 926, 24, 201, 1125,
- /* 1890 */ 280, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 279, 1125,
- /* 1900 */ 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125,
- /* 1910 */ 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125,
- /* 1920 */ 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 203, 1125,
- /* 1930 */ 1125, 1125, 1125, 1125, 1125, 1125, 204, 1125, 1125, 146,
- /* 1940 */ 1125, 1125, 1125, 1125, 1125, 1125, 202, 1125, 1125, 1125,
- /* 1950 */ 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125,
- /* 1960 */ 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125,
- /* 1970 */ 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125,
- /* 1980 */ 371, 1125, 1125, 1125, 1125, 274, 506, 1125, 1125, 1125,
- /* 1990 */ 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125,
- /* 2000 */ 1125, 1125, 1125, 1125, 1125, 1125, 1125, 1125, 409,
+ /* 0 */ 349, 99, 96, 185, 99, 96, 185, 233, 1047, 1,
+ /* 10 */ 1, 489, 2, 1051, 484, 477, 477, 477, 260, 351,
+ /* 20 */ 121, 1310, 1120, 1120, 1178, 1115, 1094, 1128, 380, 380,
+ /* 30 */ 380, 835, 454, 410, 1115, 59, 59, 1357, 425, 836,
+ /* 40 */ 710, 711, 712, 106, 107, 97, 1023, 1023, 900, 903,
+ /* 50 */ 892, 892, 104, 104, 105, 105, 105, 105, 346, 238,
+ /* 60 */ 238, 99, 96, 185, 238, 238, 889, 889, 901, 904,
+ /* 70 */ 460, 481, 351, 99, 96, 185, 481, 347, 1177, 82,
+ /* 80 */ 388, 214, 182, 23, 194, 103, 103, 103, 103, 102,
+ /* 90 */ 102, 101, 101, 101, 100, 381, 106, 107, 97, 1023,
+ /* 100 */ 1023, 900, 903, 892, 892, 104, 104, 105, 105, 105,
+ /* 110 */ 105, 10, 385, 484, 24, 484, 1333, 489, 2, 1051,
+ /* 120 */ 335, 1043, 108, 893, 260, 351, 121, 99, 96, 185,
+ /* 130 */ 100, 381, 386, 1128, 59, 59, 59, 59, 103, 103,
+ /* 140 */ 103, 103, 102, 102, 101, 101, 101, 100, 381, 106,
+ /* 150 */ 107, 97, 1023, 1023, 900, 903, 892, 892, 104, 104,
+ /* 160 */ 105, 105, 105, 105, 360, 238, 238, 170, 170, 467,
+ /* 170 */ 455, 467, 464, 67, 381, 329, 169, 481, 351, 343,
+ /* 180 */ 338, 400, 1044, 68, 101, 101, 101, 100, 381, 393,
+ /* 190 */ 194, 103, 103, 103, 103, 102, 102, 101, 101, 101,
+ /* 200 */ 100, 381, 106, 107, 97, 1023, 1023, 900, 903, 892,
+ /* 210 */ 892, 104, 104, 105, 105, 105, 105, 483, 385, 103,
+ /* 220 */ 103, 103, 103, 102, 102, 101, 101, 101, 100, 381,
+ /* 230 */ 268, 351, 946, 946, 422, 296, 102, 102, 101, 101,
+ /* 240 */ 101, 100, 381, 861, 103, 103, 103, 103, 102, 102,
+ /* 250 */ 101, 101, 101, 100, 381, 106, 107, 97, 1023, 1023,
+ /* 260 */ 900, 903, 892, 892, 104, 104, 105, 105, 105, 105,
+ /* 270 */ 484, 983, 1383, 206, 1353, 1383, 438, 435, 434, 281,
+ /* 280 */ 396, 269, 1089, 941, 351, 1002, 433, 861, 743, 401,
+ /* 290 */ 282, 57, 57, 482, 145, 791, 791, 103, 103, 103,
+ /* 300 */ 103, 102, 102, 101, 101, 101, 100, 381, 106, 107,
+ /* 310 */ 97, 1023, 1023, 900, 903, 892, 892, 104, 104, 105,
+ /* 320 */ 105, 105, 105, 281, 1002, 1003, 1004, 206, 879, 319,
+ /* 330 */ 438, 435, 434, 981, 259, 474, 360, 351, 1118, 1118,
+ /* 340 */ 433, 736, 379, 378, 872, 1002, 1356, 322, 871, 766,
+ /* 350 */ 103, 103, 103, 103, 102, 102, 101, 101, 101, 100,
+ /* 360 */ 381, 106, 107, 97, 1023, 1023, 900, 903, 892, 892,
+ /* 370 */ 104, 104, 105, 105, 105, 105, 484, 801, 484, 871,
+ /* 380 */ 871, 873, 401, 282, 1002, 1003, 1004, 1030, 360, 1030,
+ /* 390 */ 351, 983, 1384, 213, 880, 1384, 145, 59, 59, 59,
+ /* 400 */ 59, 1002, 244, 103, 103, 103, 103, 102, 102, 101,
+ /* 410 */ 101, 101, 100, 381, 106, 107, 97, 1023, 1023, 900,
+ /* 420 */ 903, 892, 892, 104, 104, 105, 105, 105, 105, 274,
+ /* 430 */ 484, 110, 467, 479, 467, 444, 259, 474, 232, 232,
+ /* 440 */ 1002, 1003, 1004, 351, 210, 335, 982, 866, 1385, 336,
+ /* 450 */ 481, 59, 59, 981, 245, 307, 103, 103, 103, 103,
+ /* 460 */ 102, 102, 101, 101, 101, 100, 381, 106, 107, 97,
+ /* 470 */ 1023, 1023, 900, 903, 892, 892, 104, 104, 105, 105,
+ /* 480 */ 105, 105, 453, 459, 484, 408, 377, 259, 474, 271,
+ /* 490 */ 183, 273, 209, 208, 207, 356, 351, 307, 178, 177,
+ /* 500 */ 127, 1006, 1098, 14, 14, 43, 43, 1044, 425, 103,
+ /* 510 */ 103, 103, 103, 102, 102, 101, 101, 101, 100, 381,
+ /* 520 */ 106, 107, 97, 1023, 1023, 900, 903, 892, 892, 104,
+ /* 530 */ 104, 105, 105, 105, 105, 294, 1132, 408, 160, 484,
+ /* 540 */ 408, 1006, 129, 962, 1209, 239, 239, 481, 307, 425,
+ /* 550 */ 1309, 1097, 351, 235, 243, 272, 820, 481, 963, 425,
+ /* 560 */ 11, 11, 103, 103, 103, 103, 102, 102, 101, 101,
+ /* 570 */ 101, 100, 381, 964, 362, 1002, 106, 107, 97, 1023,
+ /* 580 */ 1023, 900, 903, 892, 892, 104, 104, 105, 105, 105,
+ /* 590 */ 105, 1275, 161, 126, 777, 289, 1209, 292, 1072, 357,
+ /* 600 */ 1209, 1127, 476, 357, 778, 425, 247, 425, 351, 248,
+ /* 610 */ 414, 364, 414, 171, 1002, 1003, 1004, 84, 103, 103,
+ /* 620 */ 103, 103, 102, 102, 101, 101, 101, 100, 381, 1002,
+ /* 630 */ 184, 484, 106, 107, 97, 1023, 1023, 900, 903, 892,
+ /* 640 */ 892, 104, 104, 105, 105, 105, 105, 1123, 1209, 287,
+ /* 650 */ 484, 1209, 11, 11, 179, 820, 259, 474, 307, 237,
+ /* 660 */ 182, 351, 321, 365, 414, 308, 367, 366, 1002, 1003,
+ /* 670 */ 1004, 44, 44, 87, 103, 103, 103, 103, 102, 102,
+ /* 680 */ 101, 101, 101, 100, 381, 106, 107, 97, 1023, 1023,
+ /* 690 */ 900, 903, 892, 892, 104, 104, 105, 105, 105, 105,
+ /* 700 */ 246, 368, 280, 128, 10, 358, 146, 796, 835, 258,
+ /* 710 */ 1020, 88, 795, 86, 351, 421, 836, 943, 376, 348,
+ /* 720 */ 191, 943, 1318, 267, 308, 279, 456, 103, 103, 103,
+ /* 730 */ 103, 102, 102, 101, 101, 101, 100, 381, 106, 95,
+ /* 740 */ 97, 1023, 1023, 900, 903, 892, 892, 104, 104, 105,
+ /* 750 */ 105, 105, 105, 420, 249, 238, 238, 238, 238, 79,
+ /* 760 */ 375, 125, 305, 29, 262, 978, 351, 481, 337, 481,
+ /* 770 */ 756, 755, 304, 278, 415, 15, 81, 940, 1126, 940,
+ /* 780 */ 103, 103, 103, 103, 102, 102, 101, 101, 101, 100,
+ /* 790 */ 381, 107, 97, 1023, 1023, 900, 903, 892, 892, 104,
+ /* 800 */ 104, 105, 105, 105, 105, 457, 263, 484, 174, 484,
+ /* 810 */ 238, 238, 863, 407, 402, 216, 216, 351, 409, 193,
+ /* 820 */ 283, 216, 481, 81, 763, 764, 266, 5, 13, 13,
+ /* 830 */ 34, 34, 103, 103, 103, 103, 102, 102, 101, 101,
+ /* 840 */ 101, 100, 381, 97, 1023, 1023, 900, 903, 892, 892,
+ /* 850 */ 104, 104, 105, 105, 105, 105, 93, 475, 1002, 4,
+ /* 860 */ 403, 1002, 340, 431, 1002, 297, 212, 1277, 81, 746,
+ /* 870 */ 1163, 152, 926, 478, 166, 212, 757, 829, 930, 939,
+ /* 880 */ 216, 939, 858, 103, 103, 103, 103, 102, 102, 101,
+ /* 890 */ 101, 101, 100, 381, 238, 238, 382, 1002, 1003, 1004,
+ /* 900 */ 1002, 1003, 1004, 1002, 1003, 1004, 481, 439, 472, 746,
+ /* 910 */ 105, 105, 105, 105, 98, 758, 1162, 145, 930, 412,
+ /* 920 */ 879, 406, 793, 81, 395, 89, 90, 91, 105, 105,
+ /* 930 */ 105, 105, 1323, 92, 484, 382, 486, 485, 240, 275,
+ /* 940 */ 871, 103, 103, 103, 103, 102, 102, 101, 101, 101,
+ /* 950 */ 100, 381, 1096, 371, 355, 45, 45, 259, 474, 103,
+ /* 960 */ 103, 103, 103, 102, 102, 101, 101, 101, 100, 381,
+ /* 970 */ 1150, 871, 871, 873, 874, 21, 1332, 991, 384, 730,
+ /* 980 */ 722, 242, 123, 1298, 124, 875, 333, 333, 332, 227,
+ /* 990 */ 330, 991, 384, 719, 256, 242, 484, 391, 413, 1297,
+ /* 1000 */ 333, 333, 332, 227, 330, 748, 187, 719, 265, 470,
+ /* 1010 */ 1279, 1002, 484, 417, 391, 390, 264, 11, 11, 284,
+ /* 1020 */ 187, 732, 265, 93, 475, 875, 4, 1279, 1281, 419,
+ /* 1030 */ 264, 369, 416, 11, 11, 1159, 288, 484, 399, 1346,
+ /* 1040 */ 478, 379, 378, 291, 484, 293, 189, 250, 295, 1027,
+ /* 1050 */ 1002, 1003, 1004, 190, 1029, 1111, 140, 188, 11, 11,
+ /* 1060 */ 189, 732, 1028, 382, 923, 46, 46, 190, 1095, 230,
+ /* 1070 */ 140, 188, 462, 93, 475, 472, 4, 300, 309, 391,
+ /* 1080 */ 373, 6, 1069, 217, 739, 310, 1030, 879, 1030, 1171,
+ /* 1090 */ 478, 352, 1279, 90, 91, 800, 259, 474, 1208, 484,
+ /* 1100 */ 92, 1268, 382, 486, 485, 352, 1002, 871, 879, 426,
+ /* 1110 */ 259, 474, 172, 382, 238, 238, 1146, 170, 1021, 389,
+ /* 1120 */ 47, 47, 1157, 739, 872, 472, 481, 469, 871, 350,
+ /* 1130 */ 1214, 83, 475, 389, 4, 1078, 1071, 879, 871, 871,
+ /* 1140 */ 873, 874, 21, 90, 91, 1002, 1003, 1004, 478, 251,
+ /* 1150 */ 92, 251, 382, 486, 485, 443, 370, 871, 1021, 871,
+ /* 1160 */ 871, 873, 224, 241, 306, 441, 301, 440, 211, 1060,
+ /* 1170 */ 820, 382, 822, 447, 299, 1059, 484, 1061, 1143, 962,
+ /* 1180 */ 430, 796, 484, 472, 1340, 312, 795, 465, 871, 871,
+ /* 1190 */ 873, 874, 21, 314, 963, 879, 316, 59, 59, 1002,
+ /* 1200 */ 9, 90, 91, 48, 48, 238, 238, 210, 92, 964,
+ /* 1210 */ 382, 486, 485, 176, 334, 871, 242, 481, 1193, 238,
+ /* 1220 */ 238, 333, 333, 332, 227, 330, 394, 270, 719, 277,
+ /* 1230 */ 471, 481, 467, 466, 484, 145, 217, 1201, 1002, 1003,
+ /* 1240 */ 1004, 187, 3, 265, 184, 445, 871, 871, 873, 874,
+ /* 1250 */ 21, 264, 1337, 450, 1051, 39, 39, 392, 356, 260,
+ /* 1260 */ 342, 121, 468, 411, 436, 821, 180, 1094, 1128, 820,
+ /* 1270 */ 303, 1021, 1272, 1271, 299, 259, 474, 238, 238, 1002,
+ /* 1280 */ 473, 189, 484, 318, 327, 238, 238, 484, 190, 481,
+ /* 1290 */ 446, 140, 188, 1343, 238, 238, 1038, 481, 148, 175,
+ /* 1300 */ 238, 238, 484, 49, 49, 219, 481, 484, 35, 35,
+ /* 1310 */ 1317, 1021, 481, 484, 1035, 484, 1315, 484, 1002, 1003,
+ /* 1320 */ 1004, 484, 66, 36, 36, 194, 352, 484, 38, 38,
+ /* 1330 */ 484, 259, 474, 69, 50, 50, 51, 51, 52, 52,
+ /* 1340 */ 359, 484, 12, 12, 484, 1198, 484, 158, 53, 53,
+ /* 1350 */ 405, 112, 112, 385, 389, 484, 26, 484, 143, 484,
+ /* 1360 */ 150, 484, 54, 54, 397, 40, 40, 55, 55, 484,
+ /* 1370 */ 79, 484, 153, 1190, 484, 154, 56, 56, 41, 41,
+ /* 1380 */ 58, 58, 133, 133, 484, 398, 484, 429, 484, 155,
+ /* 1390 */ 134, 134, 135, 135, 484, 63, 63, 484, 341, 484,
+ /* 1400 */ 339, 484, 196, 484, 156, 42, 42, 113, 113, 60,
+ /* 1410 */ 60, 484, 404, 484, 27, 114, 114, 1204, 115, 115,
+ /* 1420 */ 111, 111, 132, 132, 131, 131, 1266, 418, 484, 162,
+ /* 1430 */ 484, 200, 119, 119, 118, 118, 484, 74, 424, 484,
+ /* 1440 */ 1286, 484, 231, 484, 202, 484, 167, 286, 427, 116,
+ /* 1450 */ 116, 117, 117, 290, 203, 442, 1062, 62, 62, 204,
+ /* 1460 */ 64, 64, 61, 61, 33, 33, 37, 37, 344, 372,
+ /* 1470 */ 1114, 1105, 748, 1113, 374, 1112, 254, 458, 1086, 255,
+ /* 1480 */ 345, 1085, 302, 1084, 1355, 78, 1154, 311, 1104, 449,
+ /* 1490 */ 452, 1155, 1153, 218, 7, 313, 315, 320, 1152, 85,
+ /* 1500 */ 1252, 317, 109, 80, 463, 225, 461, 1068, 25, 487,
+ /* 1510 */ 997, 323, 257, 226, 229, 228, 1136, 324, 325, 326,
+ /* 1520 */ 488, 136, 1057, 1052, 1302, 1303, 1301, 706, 1300, 137,
+ /* 1530 */ 122, 138, 383, 173, 1082, 261, 186, 252, 1081, 65,
+ /* 1540 */ 387, 120, 938, 936, 855, 353, 149, 1079, 139, 151,
+ /* 1550 */ 192, 780, 195, 276, 952, 157, 141, 361, 70, 363,
+ /* 1560 */ 859, 159, 71, 72, 142, 73, 955, 354, 147, 197,
+ /* 1570 */ 198, 951, 130, 16, 199, 285, 216, 1032, 201, 423,
+ /* 1580 */ 164, 944, 163, 28, 721, 428, 304, 165, 205, 759,
+ /* 1590 */ 75, 432, 298, 17, 18, 437, 76, 253, 878, 144,
+ /* 1600 */ 877, 906, 77, 986, 30, 448, 987, 31, 451, 181,
+ /* 1610 */ 234, 236, 168, 828, 823, 89, 910, 921, 81, 907,
+ /* 1620 */ 215, 905, 909, 961, 960, 19, 221, 20, 220, 22,
+ /* 1630 */ 32, 331, 876, 731, 94, 790, 794, 8, 992, 222,
+ /* 1640 */ 480, 328, 1048, 1048, 1048, 1048, 1048, 1048, 1048, 1048,
+ /* 1650 */ 223, 1048, 1048, 1048, 1048, 1348, 1347,
};
static const YYCODETYPE yy_lookahead[] = {
- /* 0 */ 184, 238, 239, 240, 238, 239, 240, 163, 155, 156,
- /* 10 */ 157, 158, 159, 160, 163, 191, 192, 183, 165, 19,
- /* 20 */ 167, 258, 202, 203, 200, 191, 163, 174, 184, 185,
- /* 30 */ 174, 31, 163, 163, 171, 184, 185, 35, 175, 39,
- /* 40 */ 179, 180, 181, 43, 44, 45, 46, 47, 48, 49,
- /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 184, 206,
- /* 60 */ 207, 163, 206, 207, 220, 163, 16, 163, 66, 163,
- /* 70 */ 59, 270, 219, 229, 273, 219, 74, 208, 174, 223,
- /* 80 */ 224, 163, 184, 185, 163, 232, 184, 185, 184, 185,
- /* 90 */ 184, 185, 92, 93, 94, 95, 96, 97, 98, 99,
- /* 100 */ 100, 101, 102, 233, 198, 184, 185, 96, 97, 163,
- /* 110 */ 206, 207, 19, 163, 261, 104, 105, 106, 107, 198,
- /* 120 */ 109, 119, 220, 219, 220, 274, 275, 77, 117, 79,
- /* 130 */ 187, 229, 19, 229, 184, 185, 43, 44, 45, 46,
- /* 140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 150 */ 57, 233, 141, 134, 143, 102, 43, 44, 45, 46,
- /* 160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 170 */ 57, 152, 274, 216, 276, 218, 83, 163, 85, 233,
- /* 180 */ 67, 238, 239, 240, 11, 92, 93, 94, 95, 96,
- /* 190 */ 97, 98, 99, 100, 101, 102, 19, 54, 55, 56,
- /* 200 */ 57, 58, 163, 26, 163, 92, 93, 94, 95, 96,
- /* 210 */ 97, 98, 99, 100, 101, 102, 54, 55, 56, 57,
- /* 220 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
- /* 230 */ 53, 54, 55, 56, 57, 92, 93, 94, 95, 96,
- /* 240 */ 97, 98, 99, 100, 101, 102, 69, 96, 97, 98,
- /* 250 */ 99, 100, 101, 102, 92, 93, 94, 95, 96, 97,
- /* 260 */ 98, 99, 100, 101, 102, 81, 179, 180, 181, 92,
- /* 270 */ 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
- /* 280 */ 163, 267, 268, 163, 22, 23, 59, 163, 26, 19,
- /* 290 */ 117, 118, 175, 109, 24, 59, 92, 93, 94, 95,
- /* 300 */ 96, 97, 98, 99, 100, 101, 102, 268, 184, 185,
- /* 310 */ 269, 127, 128, 43, 44, 45, 46, 47, 48, 49,
- /* 320 */ 50, 51, 52, 53, 54, 55, 56, 57, 157, 158,
- /* 330 */ 159, 160, 105, 106, 107, 163, 165, 59, 167, 184,
- /* 340 */ 90, 105, 106, 107, 108, 174, 73, 111, 112, 113,
- /* 350 */ 19, 22, 163, 91, 81, 163, 106, 121, 81, 132,
- /* 360 */ 110, 16, 92, 93, 94, 95, 96, 97, 98, 99,
- /* 370 */ 100, 101, 102, 184, 185, 255, 98, 206, 207, 26,
- /* 380 */ 101, 102, 19, 105, 106, 107, 23, 198, 59, 116,
- /* 390 */ 219, 141, 142, 143, 24, 163, 187, 205, 274, 275,
- /* 400 */ 127, 128, 182, 232, 127, 128, 43, 44, 45, 46,
- /* 410 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 420 */ 57, 158, 77, 160, 79, 59, 26, 182, 165, 59,
- /* 430 */ 167, 199, 261, 102, 105, 106, 107, 174, 72, 108,
- /* 440 */ 109, 110, 111, 112, 113, 114, 59, 238, 239, 240,
- /* 450 */ 123, 120, 125, 126, 163, 92, 93, 94, 95, 96,
- /* 460 */ 97, 98, 99, 100, 101, 102, 163, 163, 163, 206,
- /* 470 */ 207, 105, 106, 107, 254, 19, 106, 90, 197, 23,
- /* 480 */ 127, 128, 219, 238, 239, 240, 22, 184, 185, 184,
- /* 490 */ 185, 22, 105, 106, 149, 232, 205, 110, 163, 43,
- /* 500 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- /* 510 */ 54, 55, 56, 57, 98, 99, 100, 101, 102, 184,
- /* 520 */ 185, 163, 53, 59, 261, 220, 117, 118, 141, 142,
- /* 530 */ 143, 131, 174, 59, 229, 116, 117, 118, 163, 59,
- /* 540 */ 163, 163, 184, 185, 59, 242, 72, 22, 92, 93,
- /* 550 */ 94, 95, 96, 97, 98, 99, 100, 101, 102, 184,
- /* 560 */ 185, 24, 184, 185, 206, 207, 202, 203, 19, 105,
- /* 570 */ 106, 107, 23, 198, 22, 174, 198, 219, 220, 105,
- /* 580 */ 106, 107, 96, 97, 59, 105, 106, 107, 22, 174,
- /* 590 */ 59, 106, 43, 44, 45, 46, 47, 48, 49, 50,
- /* 600 */ 51, 52, 53, 54, 55, 56, 57, 206, 207, 12,
- /* 610 */ 108, 59, 132, 111, 112, 113, 46, 47, 48, 49,
- /* 620 */ 219, 206, 207, 121, 27, 59, 163, 141, 207, 143,
- /* 630 */ 105, 106, 107, 163, 219, 234, 105, 106, 107, 42,
- /* 640 */ 219, 92, 93, 94, 95, 96, 97, 98, 99, 100,
- /* 650 */ 101, 102, 76, 163, 184, 185, 163, 105, 106, 107,
- /* 660 */ 63, 19, 86, 163, 163, 23, 163, 130, 205, 21,
- /* 670 */ 73, 105, 106, 107, 184, 185, 163, 184, 185, 237,
- /* 680 */ 110, 180, 181, 180, 181, 43, 44, 45, 46, 47,
+ /* 0 */ 174, 226, 227, 228, 226, 227, 228, 172, 145, 146,
+ /* 10 */ 147, 148, 149, 150, 153, 169, 170, 171, 155, 19,
+ /* 20 */ 157, 246, 192, 193, 177, 181, 182, 164, 169, 170,
+ /* 30 */ 171, 31, 164, 153, 190, 174, 175, 187, 153, 39,
+ /* 40 */ 7, 8, 9, 43, 44, 45, 46, 47, 48, 49,
+ /* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 174, 196,
+ /* 60 */ 197, 226, 227, 228, 196, 197, 46, 47, 48, 49,
+ /* 70 */ 209, 208, 19, 226, 227, 228, 208, 174, 177, 26,
+ /* 80 */ 195, 213, 214, 22, 221, 85, 86, 87, 88, 89,
+ /* 90 */ 90, 91, 92, 93, 94, 95, 43, 44, 45, 46,
+ /* 100 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ /* 110 */ 57, 172, 249, 153, 53, 153, 147, 148, 149, 150,
+ /* 120 */ 22, 23, 69, 103, 155, 19, 157, 226, 227, 228,
+ /* 130 */ 94, 95, 247, 164, 174, 175, 174, 175, 85, 86,
+ /* 140 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 43,
+ /* 150 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ /* 160 */ 54, 55, 56, 57, 153, 196, 197, 153, 153, 209,
+ /* 170 */ 210, 209, 210, 67, 95, 161, 237, 208, 19, 165,
+ /* 180 */ 165, 242, 84, 24, 91, 92, 93, 94, 95, 223,
+ /* 190 */ 221, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ /* 200 */ 94, 95, 43, 44, 45, 46, 47, 48, 49, 50,
+ /* 210 */ 51, 52, 53, 54, 55, 56, 57, 153, 249, 85,
+ /* 220 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ /* 230 */ 219, 19, 109, 110, 111, 23, 89, 90, 91, 92,
+ /* 240 */ 93, 94, 95, 73, 85, 86, 87, 88, 89, 90,
+ /* 250 */ 91, 92, 93, 94, 95, 43, 44, 45, 46, 47,
+ /* 260 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ /* 270 */ 153, 22, 23, 101, 173, 26, 104, 105, 106, 109,
+ /* 280 */ 110, 111, 181, 11, 19, 59, 114, 73, 23, 110,
+ /* 290 */ 111, 174, 175, 116, 80, 118, 119, 85, 86, 87,
+ /* 300 */ 88, 89, 90, 91, 92, 93, 94, 95, 43, 44,
+ /* 310 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ /* 320 */ 55, 56, 57, 109, 98, 99, 100, 101, 83, 153,
+ /* 330 */ 104, 105, 106, 84, 120, 121, 153, 19, 192, 193,
+ /* 340 */ 114, 23, 89, 90, 99, 59, 23, 230, 103, 26,
+ /* 350 */ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ /* 360 */ 95, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ /* 370 */ 52, 53, 54, 55, 56, 57, 153, 91, 153, 134,
+ /* 380 */ 135, 136, 110, 111, 98, 99, 100, 134, 153, 136,
+ /* 390 */ 19, 22, 23, 26, 23, 26, 80, 174, 175, 174,
+ /* 400 */ 175, 59, 219, 85, 86, 87, 88, 89, 90, 91,
+ /* 410 */ 92, 93, 94, 95, 43, 44, 45, 46, 47, 48,
+ /* 420 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 16,
+ /* 430 */ 153, 22, 209, 210, 209, 210, 120, 121, 196, 197,
+ /* 440 */ 98, 99, 100, 19, 46, 22, 23, 23, 252, 253,
+ /* 450 */ 208, 174, 175, 84, 219, 153, 85, 86, 87, 88,
+ /* 460 */ 89, 90, 91, 92, 93, 94, 95, 43, 44, 45,
+ /* 470 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+ /* 480 */ 56, 57, 153, 153, 153, 153, 209, 120, 121, 76,
+ /* 490 */ 153, 78, 109, 110, 111, 97, 19, 153, 89, 90,
+ /* 500 */ 198, 59, 183, 174, 175, 174, 175, 84, 153, 85,
+ /* 510 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ /* 520 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ /* 530 */ 53, 54, 55, 56, 57, 16, 197, 153, 22, 153,
+ /* 540 */ 153, 99, 198, 12, 153, 196, 197, 208, 153, 153,
+ /* 550 */ 195, 183, 19, 23, 222, 142, 26, 208, 27, 153,
+ /* 560 */ 174, 175, 85, 86, 87, 88, 89, 90, 91, 92,
+ /* 570 */ 93, 94, 95, 42, 188, 59, 43, 44, 45, 46,
+ /* 580 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ /* 590 */ 57, 195, 22, 198, 63, 76, 153, 78, 167, 168,
+ /* 600 */ 153, 195, 167, 168, 73, 153, 222, 153, 19, 222,
+ /* 610 */ 153, 220, 153, 24, 98, 99, 100, 140, 85, 86,
+ /* 620 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 59,
+ /* 630 */ 100, 153, 43, 44, 45, 46, 47, 48, 49, 50,
+ /* 640 */ 51, 52, 53, 54, 55, 56, 57, 195, 153, 195,
+ /* 650 */ 153, 153, 174, 175, 26, 125, 120, 121, 153, 213,
+ /* 660 */ 214, 19, 153, 220, 153, 153, 188, 220, 98, 99,
+ /* 670 */ 100, 174, 175, 140, 85, 86, 87, 88, 89, 90,
+ /* 680 */ 91, 92, 93, 94, 95, 43, 44, 45, 46, 47,
/* 690 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- /* 700 */ 174, 163, 163, 22, 23, 163, 163, 26, 22, 23,
- /* 710 */ 220, 29, 73, 220, 272, 33, 22, 163, 24, 19,
- /* 720 */ 174, 208, 259, 184, 185, 19, 184, 185, 80, 175,
- /* 730 */ 230, 174, 206, 207, 92, 93, 94, 95, 96, 97,
- /* 740 */ 98, 99, 100, 101, 102, 219, 46, 65, 247, 195,
- /* 750 */ 247, 197, 206, 207, 19, 116, 117, 118, 23, 220,
- /* 760 */ 112, 174, 220, 206, 207, 219, 22, 174, 24, 174,
- /* 770 */ 22, 23, 91, 264, 265, 168, 219, 91, 43, 44,
- /* 780 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
- /* 790 */ 55, 56, 57, 206, 207, 12, 163, 149, 255, 206,
- /* 800 */ 207, 206, 207, 59, 104, 23, 219, 163, 26, 163,
- /* 810 */ 27, 105, 219, 163, 219, 163, 211, 184, 185, 163,
- /* 820 */ 120, 163, 146, 163, 148, 42, 221, 92, 93, 94,
- /* 830 */ 95, 96, 97, 98, 99, 100, 101, 102, 163, 91,
- /* 840 */ 184, 185, 184, 185, 184, 185, 63, 19, 163, 205,
- /* 850 */ 106, 23, 245, 163, 208, 248, 116, 117, 118, 184,
- /* 860 */ 185, 163, 163, 7, 8, 9, 163, 19, 26, 184,
- /* 870 */ 185, 43, 44, 45, 46, 47, 48, 49, 50, 51,
- /* 880 */ 52, 53, 54, 55, 56, 57, 163, 184, 185, 107,
- /* 890 */ 163, 43, 44, 45, 46, 47, 48, 49, 50, 51,
- /* 900 */ 52, 53, 54, 55, 56, 57, 208, 255, 177, 178,
- /* 910 */ 163, 184, 185, 163, 132, 163, 141, 163, 143, 22,
- /* 920 */ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
- /* 930 */ 102, 184, 185, 163, 184, 185, 184, 185, 184, 185,
- /* 940 */ 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
- /* 950 */ 102, 163, 163, 163, 184, 185, 163, 115, 163, 163,
- /* 960 */ 163, 163, 15, 163, 163, 163, 163, 163, 23, 163,
- /* 970 */ 163, 26, 184, 185, 184, 185, 163, 184, 185, 184,
- /* 980 */ 185, 184, 185, 163, 184, 185, 184, 185, 184, 185,
- /* 990 */ 184, 185, 163, 96, 97, 147, 163, 184, 185, 163,
- /* 1000 */ 199, 163, 163, 205, 184, 185, 163, 60, 163, 141,
- /* 1010 */ 163, 143, 163, 184, 185, 19, 163, 184, 185, 230,
- /* 1020 */ 184, 185, 184, 185, 206, 207, 230, 184, 185, 184,
- /* 1030 */ 185, 184, 185, 184, 185, 19, 163, 219, 231, 43,
- /* 1040 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- /* 1050 */ 54, 55, 56, 57, 163, 26, 163, 184, 185, 43,
- /* 1060 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- /* 1070 */ 54, 55, 56, 57, 163, 184, 185, 184, 185, 163,
- /* 1080 */ 182, 163, 163, 163, 163, 163, 22, 163, 92, 93,
- /* 1090 */ 94, 95, 96, 97, 98, 99, 100, 101, 102, 163,
- /* 1100 */ 184, 185, 184, 185, 163, 184, 185, 163, 92, 93,
- /* 1110 */ 94, 95, 96, 97, 98, 99, 100, 101, 102, 163,
- /* 1120 */ 184, 185, 98, 59, 163, 184, 185, 205, 184, 185,
- /* 1130 */ 23, 206, 207, 26, 163, 26, 107, 153, 154, 237,
- /* 1140 */ 184, 185, 231, 147, 219, 184, 185, 249, 124, 127,
- /* 1150 */ 128, 231, 254, 129, 163, 231, 177, 178, 262, 263,
- /* 1160 */ 118, 132, 19, 19, 46, 223, 224, 31, 24, 23,
- /* 1170 */ 106, 124, 26, 22, 272, 39, 129, 23, 109, 110,
- /* 1180 */ 26, 163, 140, 19, 22, 234, 59, 43, 44, 45,
- /* 1190 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
- /* 1200 */ 56, 57, 231, 7, 8, 193, 59, 43, 44, 45,
- /* 1210 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
- /* 1220 */ 56, 57, 104, 61, 23, 23, 23, 26, 26, 26,
- /* 1230 */ 163, 23, 23, 106, 26, 26, 92, 93, 94, 95,
- /* 1240 */ 96, 97, 98, 99, 100, 101, 102, 138, 105, 23,
- /* 1250 */ 59, 23, 26, 106, 26, 163, 92, 93, 94, 95,
- /* 1260 */ 96, 97, 98, 99, 100, 101, 102, 110, 23, 23,
- /* 1270 */ 23, 26, 26, 26, 163, 163, 19, 120, 163, 163,
- /* 1280 */ 163, 130, 163, 163, 163, 163, 163, 163, 163, 193,
- /* 1290 */ 193, 163, 163, 163, 163, 225, 19, 106, 163, 222,
- /* 1300 */ 163, 44, 45, 46, 47, 48, 49, 50, 51, 52,
- /* 1310 */ 53, 54, 55, 56, 57, 163, 163, 203, 163, 163,
- /* 1320 */ 222, 163, 45, 46, 47, 48, 49, 50, 51, 52,
- /* 1330 */ 53, 54, 55, 56, 57, 163, 163, 163, 163, 163,
- /* 1340 */ 251, 250, 209, 19, 20, 182, 22, 161, 222, 92,
- /* 1350 */ 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
- /* 1360 */ 36, 222, 222, 260, 226, 188, 256, 226, 187, 92,
- /* 1370 */ 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
- /* 1380 */ 210, 213, 213, 59, 213, 196, 192, 187, 256, 244,
- /* 1390 */ 212, 187, 226, 19, 20, 71, 22, 210, 166, 60,
- /* 1400 */ 130, 170, 260, 170, 38, 81, 257, 257, 170, 104,
- /* 1410 */ 36, 22, 43, 201, 90, 236, 138, 235, 213, 18,
- /* 1420 */ 96, 97, 48, 204, 204, 204, 204, 103, 170, 105,
- /* 1430 */ 106, 107, 18, 59, 110, 169, 213, 213, 201, 170,
- /* 1440 */ 201, 169, 236, 213, 146, 71, 235, 62, 253, 252,
- /* 1450 */ 170, 127, 128, 169, 22, 170, 82, 189, 169, 104,
- /* 1460 */ 170, 87, 169, 189, 90, 141, 142, 143, 144, 145,
- /* 1470 */ 96, 97, 186, 186, 186, 64, 194, 103, 186, 105,
- /* 1480 */ 106, 107, 115, 189, 110, 188, 186, 186, 19, 20,
- /* 1490 */ 194, 22, 186, 189, 102, 246, 246, 189, 133, 228,
- /* 1500 */ 104, 228, 227, 227, 170, 36, 134, 228, 227, 19,
- /* 1510 */ 20, 228, 22, 84, 271, 141, 142, 143, 144, 145,
- /* 1520 */ 0, 1, 2, 216, 22, 5, 36, 137, 59, 227,
- /* 1530 */ 10, 11, 12, 13, 14, 217, 269, 17, 216, 22,
- /* 1540 */ 71, 170, 243, 146, 241, 217, 136, 215, 135, 59,
- /* 1550 */ 30, 82, 32, 25, 214, 213, 87, 173, 26, 90,
- /* 1560 */ 40, 71, 13, 172, 164, 96, 97, 164, 6, 162,
- /* 1570 */ 162, 162, 103, 263, 105, 106, 107, 266, 266, 110,
- /* 1580 */ 90, 176, 176, 190, 182, 190, 96, 97, 98, 4,
- /* 1590 */ 70, 176, 3, 103, 182, 105, 106, 107, 78, 182,
- /* 1600 */ 110, 81, 182, 182, 182, 182, 182, 151, 88, 22,
- /* 1610 */ 141, 142, 143, 144, 145, 15, 89, 16, 23, 23,
- /* 1620 */ 128, 19, 20, 139, 22, 119, 131, 24, 20, 133,
- /* 1630 */ 16, 141, 142, 143, 144, 145, 1, 140, 36, 131,
- /* 1640 */ 119, 61, 122, 37, 139, 53, 53, 127, 128, 119,
- /* 1650 */ 53, 53, 105, 34, 130, 1, 5, 104, 22, 149,
- /* 1660 */ 26, 59, 68, 75, 41, 130, 24, 68, 104, 20,
- /* 1670 */ 150, 19, 120, 71, 114, 22, 67, 22, 22, 67,
- /* 1680 */ 23, 22, 22, 67, 82, 37, 28, 23, 138, 87,
- /* 1690 */ 22, 153, 90, 23, 23, 26, 23, 22, 96, 97,
- /* 1700 */ 24, 23, 22, 24, 130, 103, 23, 105, 106, 107,
- /* 1710 */ 1, 2, 110, 23, 5, 105, 34, 22, 132, 10,
- /* 1720 */ 11, 12, 13, 14, 26, 34, 17, 34, 85, 83,
- /* 1730 */ 44, 19, 20, 23, 22, 24, 75, 34, 23, 30,
- /* 1740 */ 26, 32, 26, 141, 142, 143, 144, 145, 36, 40,
- /* 1750 */ 23, 23, 23, 23, 11, 23, 22, 26, 22, 22,
- /* 1760 */ 22, 19, 20, 23, 22, 26, 15, 23, 22, 124,
- /* 1770 */ 130, 59, 23, 1, 130, 277, 277, 130, 36, 70,
- /* 1780 */ 130, 277, 277, 71, 277, 277, 277, 78, 277, 277,
- /* 1790 */ 81, 277, 277, 277, 277, 277, 277, 88, 277, 277,
- /* 1800 */ 277, 59, 90, 277, 277, 277, 277, 277, 96, 97,
- /* 1810 */ 277, 277, 277, 71, 277, 103, 277, 105, 106, 107,
- /* 1820 */ 277, 277, 110, 277, 277, 277, 277, 277, 277, 277,
- /* 1830 */ 277, 122, 90, 277, 277, 277, 127, 128, 96, 97,
- /* 1840 */ 277, 277, 277, 277, 277, 103, 277, 105, 106, 107,
- /* 1850 */ 277, 277, 110, 141, 142, 143, 144, 145, 277, 150,
- /* 1860 */ 277, 277, 277, 5, 277, 277, 277, 277, 10, 11,
- /* 1870 */ 12, 13, 14, 277, 277, 17, 277, 277, 277, 277,
- /* 1880 */ 277, 277, 277, 141, 142, 143, 144, 145, 30, 277,
- /* 1890 */ 32, 277, 277, 277, 277, 277, 277, 277, 40, 277,
- /* 1900 */ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
- /* 1910 */ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
- /* 1920 */ 277, 277, 277, 277, 277, 277, 277, 277, 70, 277,
- /* 1930 */ 277, 277, 277, 277, 277, 277, 78, 277, 277, 81,
- /* 1940 */ 277, 277, 277, 277, 277, 277, 88, 277, 277, 277,
- /* 1950 */ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
- /* 1960 */ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
- /* 1970 */ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
- /* 1980 */ 122, 277, 277, 277, 277, 127, 128, 277, 277, 277,
- /* 1990 */ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
- /* 2000 */ 277, 277, 277, 277, 277, 277, 277, 277, 150, 277,
- /* 2010 */ 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ /* 700 */ 243, 189, 243, 198, 172, 250, 251, 117, 31, 201,
+ /* 710 */ 26, 139, 122, 141, 19, 220, 39, 29, 220, 211,
+ /* 720 */ 24, 33, 153, 164, 153, 164, 19, 85, 86, 87,
+ /* 730 */ 88, 89, 90, 91, 92, 93, 94, 95, 43, 44,
+ /* 740 */ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ /* 750 */ 55, 56, 57, 65, 243, 196, 197, 196, 197, 131,
+ /* 760 */ 189, 22, 103, 24, 153, 23, 19, 208, 26, 208,
+ /* 770 */ 102, 103, 113, 23, 242, 22, 26, 134, 164, 136,
+ /* 780 */ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ /* 790 */ 95, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ /* 800 */ 53, 54, 55, 56, 57, 98, 153, 153, 124, 153,
+ /* 810 */ 196, 197, 23, 23, 61, 26, 26, 19, 23, 123,
+ /* 820 */ 23, 26, 208, 26, 7, 8, 153, 22, 174, 175,
+ /* 830 */ 174, 175, 85, 86, 87, 88, 89, 90, 91, 92,
+ /* 840 */ 93, 94, 95, 45, 46, 47, 48, 49, 50, 51,
+ /* 850 */ 52, 53, 54, 55, 56, 57, 19, 20, 59, 22,
+ /* 860 */ 111, 59, 164, 23, 59, 23, 26, 153, 26, 59,
+ /* 870 */ 153, 72, 23, 36, 72, 26, 35, 23, 59, 134,
+ /* 880 */ 26, 136, 133, 85, 86, 87, 88, 89, 90, 91,
+ /* 890 */ 92, 93, 94, 95, 196, 197, 59, 98, 99, 100,
+ /* 900 */ 98, 99, 100, 98, 99, 100, 208, 66, 71, 99,
+ /* 910 */ 54, 55, 56, 57, 58, 74, 153, 80, 99, 19,
+ /* 920 */ 83, 223, 23, 26, 153, 26, 89, 90, 54, 55,
+ /* 930 */ 56, 57, 153, 96, 153, 98, 99, 100, 22, 153,
+ /* 940 */ 103, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+ /* 950 */ 94, 95, 183, 112, 158, 174, 175, 120, 121, 85,
+ /* 960 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ /* 970 */ 215, 134, 135, 136, 137, 138, 0, 1, 2, 23,
+ /* 980 */ 21, 5, 26, 153, 22, 59, 10, 11, 12, 13,
+ /* 990 */ 14, 1, 2, 17, 212, 5, 153, 153, 98, 153,
+ /* 1000 */ 10, 11, 12, 13, 14, 108, 30, 17, 32, 193,
+ /* 1010 */ 153, 59, 153, 153, 170, 171, 40, 174, 175, 153,
+ /* 1020 */ 30, 59, 32, 19, 20, 99, 22, 170, 171, 233,
+ /* 1030 */ 40, 188, 236, 174, 175, 153, 153, 153, 79, 123,
+ /* 1040 */ 36, 89, 90, 153, 153, 153, 70, 188, 153, 97,
+ /* 1050 */ 98, 99, 100, 77, 102, 153, 80, 81, 174, 175,
+ /* 1060 */ 70, 99, 110, 59, 105, 174, 175, 77, 153, 238,
+ /* 1070 */ 80, 81, 188, 19, 20, 71, 22, 153, 153, 235,
+ /* 1080 */ 19, 22, 164, 24, 59, 153, 134, 83, 136, 153,
+ /* 1090 */ 36, 115, 235, 89, 90, 91, 120, 121, 153, 153,
+ /* 1100 */ 96, 142, 98, 99, 100, 115, 59, 103, 83, 239,
+ /* 1110 */ 120, 121, 199, 59, 196, 197, 153, 153, 59, 143,
+ /* 1120 */ 174, 175, 153, 98, 99, 71, 208, 153, 103, 165,
+ /* 1130 */ 153, 19, 20, 143, 22, 153, 153, 83, 134, 135,
+ /* 1140 */ 136, 137, 138, 89, 90, 98, 99, 100, 36, 185,
+ /* 1150 */ 96, 187, 98, 99, 100, 91, 95, 103, 99, 134,
+ /* 1160 */ 135, 136, 101, 102, 103, 104, 105, 106, 107, 153,
+ /* 1170 */ 26, 59, 125, 164, 113, 153, 153, 153, 212, 12,
+ /* 1180 */ 19, 117, 153, 71, 153, 212, 122, 164, 134, 135,
+ /* 1190 */ 136, 137, 138, 212, 27, 83, 212, 174, 175, 59,
+ /* 1200 */ 200, 89, 90, 174, 175, 196, 197, 46, 96, 42,
+ /* 1210 */ 98, 99, 100, 172, 151, 103, 5, 208, 203, 196,
+ /* 1220 */ 197, 10, 11, 12, 13, 14, 216, 216, 17, 244,
+ /* 1230 */ 63, 208, 209, 210, 153, 80, 24, 203, 98, 99,
+ /* 1240 */ 100, 30, 22, 32, 100, 164, 134, 135, 136, 137,
+ /* 1250 */ 138, 40, 148, 164, 150, 174, 175, 102, 97, 155,
+ /* 1260 */ 203, 157, 164, 244, 178, 125, 186, 182, 164, 125,
+ /* 1270 */ 177, 59, 177, 177, 113, 120, 121, 196, 197, 59,
+ /* 1280 */ 232, 70, 153, 216, 202, 196, 197, 153, 77, 208,
+ /* 1290 */ 209, 80, 81, 156, 196, 197, 60, 208, 248, 200,
+ /* 1300 */ 196, 197, 153, 174, 175, 123, 208, 153, 174, 175,
+ /* 1310 */ 160, 99, 208, 153, 38, 153, 160, 153, 98, 99,
+ /* 1320 */ 100, 153, 245, 174, 175, 221, 115, 153, 174, 175,
+ /* 1330 */ 153, 120, 121, 245, 174, 175, 174, 175, 174, 175,
+ /* 1340 */ 160, 153, 174, 175, 153, 225, 153, 22, 174, 175,
+ /* 1350 */ 97, 174, 175, 249, 143, 153, 224, 153, 43, 153,
+ /* 1360 */ 191, 153, 174, 175, 18, 174, 175, 174, 175, 153,
+ /* 1370 */ 131, 153, 194, 203, 153, 194, 174, 175, 174, 175,
+ /* 1380 */ 174, 175, 174, 175, 153, 160, 153, 18, 153, 194,
+ /* 1390 */ 174, 175, 174, 175, 153, 174, 175, 153, 225, 153,
+ /* 1400 */ 203, 153, 159, 153, 194, 174, 175, 174, 175, 174,
+ /* 1410 */ 175, 153, 203, 153, 224, 174, 175, 191, 174, 175,
+ /* 1420 */ 174, 175, 174, 175, 174, 175, 203, 160, 153, 191,
+ /* 1430 */ 153, 159, 174, 175, 174, 175, 153, 139, 62, 153,
+ /* 1440 */ 241, 153, 160, 153, 159, 153, 22, 240, 179, 174,
+ /* 1450 */ 175, 174, 175, 160, 159, 97, 160, 174, 175, 159,
+ /* 1460 */ 174, 175, 174, 175, 174, 175, 174, 175, 179, 64,
+ /* 1470 */ 176, 184, 108, 176, 95, 176, 234, 126, 176, 234,
+ /* 1480 */ 179, 178, 176, 176, 176, 97, 218, 217, 184, 179,
+ /* 1490 */ 179, 218, 218, 160, 22, 217, 217, 160, 218, 139,
+ /* 1500 */ 229, 217, 130, 129, 127, 25, 128, 163, 26, 162,
+ /* 1510 */ 13, 206, 231, 154, 6, 154, 207, 205, 204, 203,
+ /* 1520 */ 152, 166, 152, 152, 172, 172, 172, 4, 172, 166,
+ /* 1530 */ 180, 166, 3, 22, 172, 144, 15, 180, 172, 172,
+ /* 1540 */ 82, 16, 23, 23, 121, 254, 132, 172, 112, 124,
+ /* 1550 */ 24, 20, 126, 16, 1, 124, 112, 61, 53, 37,
+ /* 1560 */ 133, 132, 53, 53, 112, 53, 98, 254, 251, 34,
+ /* 1570 */ 123, 1, 5, 22, 97, 142, 26, 75, 123, 41,
+ /* 1580 */ 97, 68, 68, 24, 20, 19, 113, 22, 107, 28,
+ /* 1590 */ 22, 67, 23, 22, 22, 67, 22, 67, 23, 37,
+ /* 1600 */ 23, 23, 26, 23, 22, 24, 23, 22, 24, 123,
+ /* 1610 */ 23, 23, 22, 98, 125, 26, 11, 23, 26, 23,
+ /* 1620 */ 34, 23, 23, 23, 23, 34, 22, 34, 26, 22,
+ /* 1630 */ 22, 15, 23, 23, 22, 117, 23, 22, 1, 123,
+ /* 1640 */ 26, 23, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1650 */ 123, 255, 255, 255, 255, 123, 123, 255, 255, 255,
+ /* 1660 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1670 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1680 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1690 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1700 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1710 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1720 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1730 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1740 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1750 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1760 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1770 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1780 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1790 */ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ /* 1800 */ 255, 255,
};
-#define YY_SHIFT_COUNT (520)
+#define YY_SHIFT_COUNT (489)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (1858)
+#define YY_SHIFT_MAX (1637)
static const unsigned short int yy_shift_ofst[] = {
- /* 0 */ 1709, 1520, 1858, 1324, 1324, 277, 1374, 1469, 1602, 1712,
- /* 10 */ 1712, 1712, 273, 0, 0, 113, 1016, 1712, 1712, 1712,
- /* 20 */ 1712, 1712, 1712, 1712, 1712, 1712, 1712, 11, 11, 236,
- /* 30 */ 184, 277, 277, 277, 277, 277, 277, 93, 177, 270,
- /* 40 */ 363, 456, 549, 642, 735, 828, 848, 996, 1144, 1016,
- /* 50 */ 1016, 1016, 1016, 1016, 1016, 1016, 1016, 1016, 1016, 1016,
- /* 60 */ 1016, 1016, 1016, 1016, 1016, 1016, 1164, 1016, 1257, 1277,
- /* 70 */ 1277, 1490, 1712, 1712, 1712, 1712, 1712, 1712, 1712, 1712,
- /* 80 */ 1712, 1712, 1712, 1712, 1712, 1712, 1712, 1712, 1712, 1712,
- /* 90 */ 1712, 1712, 1712, 1712, 1712, 1712, 1712, 1712, 1712, 1712,
- /* 100 */ 1712, 1712, 1712, 1742, 1712, 1712, 1712, 1712, 1712, 1712,
- /* 110 */ 1712, 1712, 1712, 1712, 1712, 1712, 1712, 143, 162, 162,
- /* 120 */ 162, 162, 162, 204, 151, 416, 531, 648, 700, 531,
- /* 130 */ 486, 486, 531, 353, 353, 353, 353, 409, 279, 53,
- /* 140 */ 2009, 2009, 331, 331, 331, 329, 366, 329, 329, 597,
- /* 150 */ 597, 464, 474, 262, 681, 531, 531, 531, 531, 531,
- /* 160 */ 531, 531, 531, 531, 531, 531, 531, 531, 531, 531,
- /* 170 */ 531, 531, 531, 531, 531, 531, 531, 173, 485, 984,
- /* 180 */ 984, 576, 485, 19, 1022, 2009, 2009, 2009, 387, 250,
- /* 190 */ 250, 525, 502, 278, 552, 227, 480, 566, 531, 531,
- /* 200 */ 531, 531, 531, 531, 531, 531, 531, 531, 639, 531,
- /* 210 */ 531, 531, 531, 531, 531, 531, 531, 531, 531, 531,
- /* 220 */ 531, 2, 2, 2, 531, 531, 531, 531, 782, 531,
- /* 230 */ 531, 531, 744, 531, 531, 783, 531, 531, 531, 531,
- /* 240 */ 531, 531, 531, 531, 419, 682, 327, 370, 370, 370,
- /* 250 */ 370, 1029, 327, 327, 1024, 897, 856, 947, 1109, 706,
- /* 260 */ 706, 1143, 1109, 1109, 1143, 842, 945, 1118, 1136, 1136,
- /* 270 */ 1136, 706, 676, 400, 1047, 694, 1339, 1270, 1270, 1366,
- /* 280 */ 1366, 1270, 1305, 1389, 1369, 1278, 1401, 1401, 1401, 1401,
- /* 290 */ 1270, 1414, 1278, 1278, 1305, 1389, 1369, 1369, 1278, 1270,
- /* 300 */ 1414, 1298, 1385, 1270, 1414, 1432, 1270, 1414, 1270, 1414,
- /* 310 */ 1432, 1355, 1355, 1355, 1411, 1432, 1355, 1367, 1355, 1411,
- /* 320 */ 1355, 1355, 1432, 1392, 1392, 1432, 1365, 1396, 1365, 1396,
- /* 330 */ 1365, 1396, 1365, 1396, 1270, 1372, 1429, 1502, 1390, 1372,
- /* 340 */ 1517, 1270, 1397, 1390, 1410, 1413, 1278, 1528, 1532, 1549,
- /* 350 */ 1549, 1562, 1562, 1562, 2009, 2009, 2009, 2009, 2009, 2009,
- /* 360 */ 2009, 2009, 2009, 2009, 2009, 2009, 2009, 2009, 2009, 2009,
- /* 370 */ 570, 345, 686, 748, 50, 740, 1064, 1107, 469, 537,
- /* 380 */ 1042, 1146, 1162, 1154, 1201, 1202, 1203, 1208, 1209, 1127,
- /* 390 */ 1069, 1196, 1157, 1147, 1226, 1228, 1245, 775, 868, 1246,
- /* 400 */ 1247, 1191, 1151, 1585, 1589, 1587, 1456, 1600, 1527, 1601,
- /* 410 */ 1595, 1596, 1492, 1484, 1506, 1603, 1495, 1608, 1496, 1614,
- /* 420 */ 1635, 1508, 1497, 1521, 1580, 1606, 1505, 1592, 1593, 1597,
- /* 430 */ 1598, 1530, 1547, 1619, 1524, 1654, 1651, 1636, 1553, 1510,
- /* 440 */ 1594, 1634, 1599, 1588, 1623, 1535, 1564, 1642, 1649, 1652,
- /* 450 */ 1552, 1560, 1653, 1609, 1655, 1656, 1657, 1659, 1612, 1658,
- /* 460 */ 1660, 1616, 1648, 1664, 1550, 1668, 1538, 1670, 1671, 1669,
- /* 470 */ 1673, 1675, 1676, 1678, 1680, 1679, 1574, 1683, 1690, 1610,
- /* 480 */ 1682, 1695, 1586, 1698, 1691, 1698, 1693, 1643, 1661, 1646,
- /* 490 */ 1686, 1710, 1711, 1714, 1716, 1703, 1715, 1698, 1727, 1728,
- /* 500 */ 1729, 1730, 1731, 1732, 1734, 1743, 1736, 1737, 1740, 1744,
- /* 510 */ 1738, 1746, 1739, 1645, 1640, 1644, 1647, 1650, 1749, 1751,
- /* 520 */ 1772,
+ /* 0 */ 990, 976, 1211, 837, 837, 316, 1054, 1054, 1054, 1054,
+ /* 10 */ 214, 0, 0, 106, 642, 1054, 1054, 1054, 1054, 1054,
+ /* 20 */ 1054, 1054, 1054, 952, 952, 226, 1155, 316, 316, 316,
+ /* 30 */ 316, 316, 316, 53, 159, 212, 265, 318, 371, 424,
+ /* 40 */ 477, 533, 589, 642, 642, 642, 642, 642, 642, 642,
+ /* 50 */ 642, 642, 642, 642, 642, 642, 642, 642, 642, 642,
+ /* 60 */ 695, 642, 747, 798, 798, 1004, 1054, 1054, 1054, 1054,
+ /* 70 */ 1054, 1054, 1054, 1054, 1054, 1054, 1054, 1054, 1054, 1054,
+ /* 80 */ 1054, 1054, 1054, 1054, 1054, 1054, 1054, 1054, 1054, 1054,
+ /* 90 */ 1054, 1054, 1054, 1054, 1054, 1054, 1054, 1112, 1054, 1054,
+ /* 100 */ 1054, 1054, 1054, 1054, 1054, 1054, 1054, 1054, 1054, 1054,
+ /* 110 */ 1054, 856, 874, 874, 874, 874, 874, 134, 147, 93,
+ /* 120 */ 342, 959, 1161, 253, 253, 342, 367, 367, 367, 367,
+ /* 130 */ 179, 36, 79, 1657, 1657, 1657, 1061, 1061, 1061, 516,
+ /* 140 */ 799, 516, 516, 531, 531, 802, 249, 369, 342, 342,
+ /* 150 */ 342, 342, 342, 342, 342, 342, 342, 342, 342, 342,
+ /* 160 */ 342, 342, 342, 342, 342, 342, 342, 342, 342, 272,
+ /* 170 */ 442, 442, 536, 1657, 1657, 1657, 1025, 245, 245, 570,
+ /* 180 */ 172, 286, 805, 1047, 1140, 1220, 342, 342, 342, 342,
+ /* 190 */ 342, 342, 342, 342, 170, 342, 342, 342, 342, 342,
+ /* 200 */ 342, 342, 342, 342, 342, 342, 342, 841, 841, 841,
+ /* 210 */ 342, 342, 342, 342, 530, 342, 342, 342, 1059, 342,
+ /* 220 */ 342, 1167, 342, 342, 342, 342, 342, 342, 342, 342,
+ /* 230 */ 123, 688, 177, 1212, 1212, 1212, 1212, 1144, 177, 177,
+ /* 240 */ 1064, 409, 33, 628, 707, 707, 900, 628, 628, 900,
+ /* 250 */ 897, 323, 398, 677, 677, 677, 707, 572, 684, 590,
+ /* 260 */ 739, 1236, 1182, 1182, 1276, 1276, 1182, 1253, 1325, 1315,
+ /* 270 */ 1239, 1346, 1346, 1346, 1346, 1182, 1369, 1239, 1239, 1253,
+ /* 280 */ 1325, 1315, 1315, 1239, 1182, 1369, 1298, 1376, 1182, 1369,
+ /* 290 */ 1424, 1182, 1369, 1182, 1369, 1424, 1358, 1358, 1358, 1405,
+ /* 300 */ 1424, 1358, 1364, 1358, 1405, 1358, 1358, 1424, 1379, 1379,
+ /* 310 */ 1424, 1351, 1388, 1351, 1388, 1351, 1388, 1351, 1388, 1182,
+ /* 320 */ 1472, 1182, 1360, 1372, 1377, 1374, 1378, 1239, 1480, 1482,
+ /* 330 */ 1497, 1497, 1508, 1508, 1508, 1657, 1657, 1657, 1657, 1657,
+ /* 340 */ 1657, 1657, 1657, 1657, 1657, 1657, 1657, 1657, 1657, 1657,
+ /* 350 */ 1657, 20, 413, 98, 423, 519, 383, 962, 742, 61,
+ /* 360 */ 696, 749, 750, 753, 789, 790, 795, 797, 840, 842,
+ /* 370 */ 810, 668, 817, 659, 819, 849, 854, 899, 643, 745,
+ /* 380 */ 956, 926, 916, 1523, 1529, 1511, 1391, 1521, 1458, 1525,
+ /* 390 */ 1519, 1520, 1423, 1414, 1436, 1526, 1425, 1531, 1426, 1537,
+ /* 400 */ 1553, 1431, 1427, 1444, 1496, 1522, 1429, 1505, 1509, 1510,
+ /* 410 */ 1512, 1452, 1468, 1535, 1447, 1570, 1567, 1551, 1477, 1433,
+ /* 420 */ 1513, 1550, 1514, 1502, 1538, 1455, 1483, 1559, 1564, 1566,
+ /* 430 */ 1473, 1481, 1565, 1524, 1568, 1571, 1569, 1572, 1528, 1561,
+ /* 440 */ 1574, 1530, 1562, 1575, 1577, 1578, 1576, 1580, 1582, 1581,
+ /* 450 */ 1583, 1585, 1584, 1486, 1587, 1588, 1515, 1586, 1590, 1489,
+ /* 460 */ 1589, 1591, 1592, 1593, 1594, 1596, 1598, 1589, 1599, 1600,
+ /* 470 */ 1602, 1601, 1604, 1605, 1607, 1608, 1609, 1610, 1612, 1613,
+ /* 480 */ 1615, 1614, 1518, 1516, 1527, 1532, 1533, 1618, 1616, 1637,
};
-#define YY_REDUCE_COUNT (369)
-#define YY_REDUCE_MIN (-237)
-#define YY_REDUCE_MAX (1424)
+#define YY_REDUCE_COUNT (350)
+#define YY_REDUCE_MIN (-225)
+#define YY_REDUCE_MAX (1375)
static const short yy_reduce_ofst[] = {
- /* 0 */ -147, 171, 263, -96, 358, -144, -149, -102, 124, -156,
- /* 10 */ -98, 305, 401, -57, 209, -237, 245, -94, -79, 189,
- /* 20 */ 375, 490, 493, 378, 303, 539, 542, 501, 503, 554,
- /* 30 */ 415, 526, 546, 557, 587, 593, 595, -234, -234, -234,
- /* 40 */ -234, -234, -234, -234, -234, -234, -234, -234, -234, -234,
- /* 50 */ -234, -234, -234, -234, -234, -234, -234, -234, -234, -234,
- /* 60 */ -234, -234, -234, -234, -234, -234, -234, -234, -234, -234,
- /* 70 */ -234, -50, 335, 470, 633, 656, 658, 660, 675, 685,
- /* 80 */ 703, 727, 747, 750, 752, 754, 770, 788, 790, 793,
- /* 90 */ 795, 797, 800, 802, 804, 806, 813, 820, 829, 833,
- /* 100 */ 836, 838, 843, 845, 847, 849, 873, 891, 893, 916,
- /* 110 */ 918, 921, 936, 941, 944, 956, 961, -234, -234, -234,
- /* 120 */ -234, -234, -234, -234, -234, -234, 463, 607, -176, 14,
- /* 130 */ -139, 87, -137, 818, 925, 818, 925, 898, -234, -234,
- /* 140 */ -234, -234, -166, -166, -166, -130, -131, -82, -54, -180,
- /* 150 */ 364, 41, 513, 509, 509, 117, 500, 789, 796, 646,
- /* 160 */ 192, 291, 644, 798, 120, 807, 543, 911, 920, 652,
- /* 170 */ 924, 922, 232, 698, 801, 971, 39, 220, 731, 442,
- /* 180 */ 902, -199, 979, -43, 421, 896, 942, 605, -184, -126,
- /* 190 */ 155, 172, 281, 304, 377, 538, 650, 690, 699, 723,
- /* 200 */ 803, 839, 853, 919, 991, 1018, 1067, 1092, 951, 1111,
- /* 210 */ 1112, 1115, 1116, 1117, 1119, 1120, 1121, 1122, 1123, 1124,
- /* 220 */ 1125, 1012, 1096, 1097, 1128, 1129, 1130, 1131, 1070, 1135,
- /* 230 */ 1137, 1152, 1077, 1153, 1155, 1114, 1156, 304, 1158, 1172,
- /* 240 */ 1173, 1174, 1175, 1176, 1089, 1091, 1133, 1098, 1126, 1139,
- /* 250 */ 1140, 1070, 1133, 1133, 1170, 1163, 1186, 1103, 1168, 1138,
- /* 260 */ 1141, 1110, 1169, 1171, 1132, 1177, 1189, 1194, 1181, 1200,
- /* 270 */ 1204, 1166, 1145, 1178, 1187, 1232, 1142, 1231, 1233, 1149,
- /* 280 */ 1150, 1238, 1179, 1182, 1212, 1205, 1219, 1220, 1221, 1222,
- /* 290 */ 1258, 1266, 1223, 1224, 1206, 1211, 1237, 1239, 1230, 1269,
- /* 300 */ 1272, 1195, 1197, 1280, 1284, 1268, 1285, 1289, 1290, 1293,
- /* 310 */ 1274, 1286, 1287, 1288, 1282, 1294, 1292, 1297, 1300, 1296,
- /* 320 */ 1301, 1306, 1304, 1249, 1250, 1308, 1271, 1275, 1273, 1276,
- /* 330 */ 1279, 1281, 1283, 1302, 1334, 1307, 1243, 1267, 1318, 1322,
- /* 340 */ 1303, 1371, 1299, 1328, 1332, 1340, 1342, 1384, 1391, 1400,
- /* 350 */ 1403, 1407, 1408, 1409, 1311, 1312, 1310, 1405, 1402, 1412,
- /* 360 */ 1417, 1420, 1406, 1393, 1395, 1421, 1422, 1423, 1424, 1415,
+ /* 0 */ -137, -31, 1104, 1023, 1081, -132, -40, -38, 223, 225,
+ /* 10 */ 698, -153, -99, -225, -165, 386, 478, 843, 859, -139,
+ /* 20 */ 884, 117, 277, 844, 857, 964, 559, 561, 614, 918,
+ /* 30 */ 1009, 1089, 1098, -222, -222, -222, -222, -222, -222, -222,
+ /* 40 */ -222, -222, -222, -222, -222, -222, -222, -222, -222, -222,
+ /* 50 */ -222, -222, -222, -222, -222, -222, -222, -222, -222, -222,
+ /* 60 */ -222, -222, -222, -222, -222, 329, 331, 497, 654, 656,
+ /* 70 */ 781, 891, 946, 1029, 1129, 1134, 1149, 1154, 1160, 1162,
+ /* 80 */ 1164, 1168, 1174, 1177, 1188, 1191, 1193, 1202, 1204, 1206,
+ /* 90 */ 1208, 1216, 1218, 1221, 1231, 1233, 1235, 1241, 1244, 1246,
+ /* 100 */ 1248, 1250, 1258, 1260, 1275, 1277, 1283, 1286, 1288, 1290,
+ /* 110 */ 1292, -222, -222, -222, -222, -222, -222, -222, -222, -222,
+ /* 120 */ -115, 796, -156, -154, -141, 14, 242, 349, 242, 349,
+ /* 130 */ -61, -222, -222, -222, -222, -222, 101, 101, 101, 332,
+ /* 140 */ 302, 384, 387, -170, 146, 344, 196, 196, 15, 11,
+ /* 150 */ 183, 235, 395, 355, 396, 406, 452, 457, 391, 459,
+ /* 160 */ 443, 447, 511, 495, 454, 512, 505, 571, 498, 532,
+ /* 170 */ 431, 435, 339, 455, 446, 508, -174, -116, -97, -120,
+ /* 180 */ -150, 64, 176, 330, 337, 509, 569, 611, 653, 673,
+ /* 190 */ 714, 717, 763, 771, -34, 779, 786, 830, 846, 860,
+ /* 200 */ 866, 882, 883, 890, 892, 895, 902, 319, 368, 769,
+ /* 210 */ 915, 924, 925, 932, 755, 936, 945, 963, 782, 969,
+ /* 220 */ 974, 816, 977, 64, 982, 983, 1016, 1022, 1024, 1031,
+ /* 230 */ 870, 831, 913, 966, 973, 981, 984, 755, 913, 913,
+ /* 240 */ 1000, 1041, 1063, 1015, 1010, 1011, 985, 1034, 1057, 1019,
+ /* 250 */ 1086, 1080, 1085, 1093, 1095, 1096, 1067, 1048, 1082, 1099,
+ /* 260 */ 1137, 1050, 1150, 1156, 1077, 1088, 1180, 1120, 1132, 1169,
+ /* 270 */ 1170, 1178, 1181, 1195, 1210, 1225, 1243, 1197, 1209, 1173,
+ /* 280 */ 1190, 1226, 1238, 1223, 1267, 1272, 1199, 1207, 1282, 1285,
+ /* 290 */ 1269, 1293, 1295, 1296, 1300, 1289, 1294, 1297, 1299, 1287,
+ /* 300 */ 1301, 1302, 1303, 1306, 1304, 1307, 1308, 1310, 1242, 1245,
+ /* 310 */ 1311, 1268, 1270, 1273, 1278, 1274, 1279, 1280, 1284, 1333,
+ /* 320 */ 1271, 1337, 1281, 1309, 1305, 1312, 1314, 1316, 1344, 1347,
+ /* 330 */ 1359, 1361, 1368, 1370, 1371, 1291, 1313, 1317, 1355, 1352,
+ /* 340 */ 1353, 1354, 1356, 1363, 1350, 1357, 1362, 1366, 1367, 1375,
+ /* 350 */ 1365,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 1492, 1492, 1492, 1340, 1123, 1229, 1123, 1123, 1123, 1340,
- /* 10 */ 1340, 1340, 1123, 1259, 1259, 1391, 1154, 1123, 1123, 1123,
- /* 20 */ 1123, 1123, 1123, 1123, 1339, 1123, 1123, 1123, 1123, 1123,
- /* 30 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1265, 1123,
- /* 40 */ 1123, 1123, 1123, 1123, 1341, 1342, 1123, 1123, 1123, 1390,
- /* 50 */ 1392, 1275, 1274, 1273, 1272, 1373, 1246, 1270, 1263, 1267,
- /* 60 */ 1335, 1336, 1334, 1338, 1342, 1341, 1123, 1266, 1306, 1320,
- /* 70 */ 1305, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 80 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 90 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 100 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 110 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1314, 1319, 1325,
- /* 120 */ 1318, 1315, 1308, 1307, 1309, 1310, 1123, 1144, 1193, 1123,
- /* 130 */ 1123, 1123, 1123, 1409, 1408, 1123, 1123, 1154, 1311, 1312,
- /* 140 */ 1322, 1321, 1398, 1448, 1447, 1123, 1123, 1123, 1123, 1123,
- /* 150 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 160 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 170 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1154, 1150, 1300,
- /* 180 */ 1299, 1418, 1150, 1253, 1123, 1404, 1229, 1220, 1123, 1123,
- /* 190 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 200 */ 1123, 1395, 1393, 1123, 1355, 1123, 1123, 1123, 1123, 1123,
- /* 210 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 220 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 230 */ 1123, 1123, 1225, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 240 */ 1123, 1123, 1123, 1442, 1123, 1368, 1207, 1225, 1225, 1225,
- /* 250 */ 1225, 1227, 1208, 1206, 1219, 1154, 1130, 1484, 1269, 1248,
- /* 260 */ 1248, 1481, 1269, 1269, 1481, 1168, 1462, 1165, 1259, 1259,
- /* 270 */ 1259, 1248, 1337, 1226, 1219, 1123, 1484, 1234, 1234, 1483,
- /* 280 */ 1483, 1234, 1278, 1284, 1196, 1269, 1202, 1202, 1202, 1202,
- /* 290 */ 1234, 1141, 1269, 1269, 1278, 1284, 1196, 1196, 1269, 1234,
- /* 300 */ 1141, 1372, 1478, 1234, 1141, 1348, 1234, 1141, 1234, 1141,
- /* 310 */ 1348, 1194, 1194, 1194, 1183, 1348, 1194, 1168, 1194, 1183,
- /* 320 */ 1194, 1194, 1348, 1352, 1352, 1348, 1252, 1247, 1252, 1247,
- /* 330 */ 1252, 1247, 1252, 1247, 1234, 1253, 1417, 1123, 1264, 1253,
- /* 340 */ 1343, 1234, 1123, 1264, 1262, 1260, 1269, 1147, 1186, 1445,
- /* 350 */ 1445, 1441, 1441, 1441, 1489, 1489, 1404, 1457, 1154, 1154,
- /* 360 */ 1154, 1154, 1457, 1170, 1170, 1154, 1154, 1154, 1154, 1457,
- /* 370 */ 1123, 1123, 1123, 1123, 1123, 1123, 1452, 1123, 1357, 1238,
- /* 380 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 390 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 400 */ 1123, 1123, 1289, 1123, 1126, 1401, 1123, 1123, 1399, 1123,
- /* 410 */ 1123, 1123, 1123, 1123, 1123, 1239, 1123, 1123, 1123, 1123,
- /* 420 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 430 */ 1123, 1123, 1123, 1123, 1480, 1123, 1123, 1123, 1123, 1123,
- /* 440 */ 1123, 1371, 1370, 1123, 1123, 1236, 1123, 1123, 1123, 1123,
- /* 450 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 460 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 470 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 480 */ 1123, 1123, 1123, 1261, 1123, 1416, 1123, 1123, 1123, 1123,
- /* 490 */ 1123, 1123, 1123, 1430, 1254, 1123, 1123, 1471, 1123, 1123,
- /* 500 */ 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123, 1123,
- /* 510 */ 1123, 1123, 1466, 1210, 1291, 1123, 1290, 1294, 1123, 1135,
- /* 520 */ 1123,
+ /* 0 */ 1389, 1389, 1389, 1261, 1046, 1151, 1261, 1261, 1261, 1261,
+ /* 10 */ 1046, 1181, 1181, 1312, 1077, 1046, 1046, 1046, 1046, 1046,
+ /* 20 */ 1046, 1260, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 30 */ 1046, 1046, 1046, 1187, 1046, 1046, 1046, 1046, 1262, 1263,
+ /* 40 */ 1046, 1046, 1046, 1311, 1313, 1197, 1196, 1195, 1194, 1294,
+ /* 50 */ 1168, 1192, 1185, 1189, 1256, 1257, 1255, 1259, 1262, 1263,
+ /* 60 */ 1046, 1188, 1226, 1240, 1225, 1046, 1046, 1046, 1046, 1046,
+ /* 70 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 80 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 90 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 100 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 110 */ 1046, 1234, 1239, 1246, 1238, 1235, 1228, 1227, 1229, 1230,
+ /* 120 */ 1046, 1067, 1116, 1046, 1046, 1046, 1329, 1328, 1046, 1046,
+ /* 130 */ 1077, 1231, 1232, 1243, 1242, 1241, 1319, 1345, 1344, 1046,
+ /* 140 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 150 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 160 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1077,
+ /* 170 */ 1073, 1073, 1046, 1324, 1151, 1142, 1046, 1046, 1046, 1046,
+ /* 180 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1316, 1314, 1046,
+ /* 190 */ 1276, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 200 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 210 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1147, 1046,
+ /* 220 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1339,
+ /* 230 */ 1046, 1289, 1130, 1147, 1147, 1147, 1147, 1149, 1131, 1129,
+ /* 240 */ 1141, 1077, 1053, 1191, 1170, 1170, 1378, 1191, 1191, 1378,
+ /* 250 */ 1091, 1359, 1088, 1181, 1181, 1181, 1170, 1258, 1148, 1141,
+ /* 260 */ 1046, 1381, 1156, 1156, 1380, 1380, 1156, 1200, 1206, 1119,
+ /* 270 */ 1191, 1125, 1125, 1125, 1125, 1156, 1064, 1191, 1191, 1200,
+ /* 280 */ 1206, 1119, 1119, 1191, 1156, 1064, 1293, 1375, 1156, 1064,
+ /* 290 */ 1269, 1156, 1064, 1156, 1064, 1269, 1117, 1117, 1117, 1106,
+ /* 300 */ 1269, 1117, 1091, 1117, 1106, 1117, 1117, 1269, 1273, 1273,
+ /* 310 */ 1269, 1174, 1169, 1174, 1169, 1174, 1169, 1174, 1169, 1156,
+ /* 320 */ 1264, 1156, 1046, 1186, 1175, 1184, 1182, 1191, 1070, 1109,
+ /* 330 */ 1342, 1342, 1338, 1338, 1338, 1386, 1386, 1324, 1354, 1077,
+ /* 340 */ 1077, 1077, 1077, 1354, 1093, 1093, 1077, 1077, 1077, 1077,
+ /* 350 */ 1354, 1046, 1046, 1046, 1046, 1046, 1046, 1349, 1046, 1278,
+ /* 360 */ 1160, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 370 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 380 */ 1046, 1046, 1211, 1046, 1049, 1321, 1046, 1046, 1320, 1046,
+ /* 390 */ 1046, 1046, 1046, 1046, 1046, 1161, 1046, 1046, 1046, 1046,
+ /* 400 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 410 */ 1046, 1046, 1046, 1046, 1377, 1046, 1046, 1046, 1046, 1046,
+ /* 420 */ 1046, 1292, 1291, 1046, 1046, 1158, 1046, 1046, 1046, 1046,
+ /* 430 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 440 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 450 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 460 */ 1183, 1046, 1176, 1046, 1046, 1046, 1046, 1368, 1046, 1046,
+ /* 470 */ 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
+ /* 480 */ 1046, 1363, 1133, 1213, 1046, 1212, 1216, 1046, 1058, 1046,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -146923,18 +142313,11 @@ static const YYCODETYPE yyFallback[] = {
59, /* REPLACE => ID */
59, /* RESTRICT => ID */
59, /* ROW => ID */
- 59, /* ROWS => ID */
59, /* TRIGGER => ID */
59, /* VACUUM => ID */
59, /* VIEW => ID */
59, /* VIRTUAL => ID */
59, /* WITH => ID */
- 59, /* CURRENT => ID */
- 59, /* FOLLOWING => ID */
- 59, /* PARTITION => ID */
- 59, /* PRECEDING => ID */
- 59, /* RANGE => ID */
- 59, /* UNBOUNDED => ID */
59, /* REINDEX => ID */
59, /* RENAME => ID */
59, /* CTIME_KW => ID */
@@ -147101,207 +142484,185 @@ static const char *const yyTokenName[] = {
/* 73 */ "REPLACE",
/* 74 */ "RESTRICT",
/* 75 */ "ROW",
- /* 76 */ "ROWS",
- /* 77 */ "TRIGGER",
- /* 78 */ "VACUUM",
- /* 79 */ "VIEW",
- /* 80 */ "VIRTUAL",
- /* 81 */ "WITH",
- /* 82 */ "CURRENT",
- /* 83 */ "FOLLOWING",
- /* 84 */ "PARTITION",
- /* 85 */ "PRECEDING",
- /* 86 */ "RANGE",
- /* 87 */ "UNBOUNDED",
- /* 88 */ "REINDEX",
- /* 89 */ "RENAME",
- /* 90 */ "CTIME_KW",
- /* 91 */ "ANY",
- /* 92 */ "BITAND",
- /* 93 */ "BITOR",
- /* 94 */ "LSHIFT",
- /* 95 */ "RSHIFT",
- /* 96 */ "PLUS",
- /* 97 */ "MINUS",
- /* 98 */ "STAR",
- /* 99 */ "SLASH",
- /* 100 */ "REM",
- /* 101 */ "CONCAT",
- /* 102 */ "COLLATE",
- /* 103 */ "BITNOT",
- /* 104 */ "ON",
- /* 105 */ "INDEXED",
- /* 106 */ "STRING",
- /* 107 */ "JOIN_KW",
- /* 108 */ "CONSTRAINT",
- /* 109 */ "DEFAULT",
- /* 110 */ "NULL",
- /* 111 */ "PRIMARY",
- /* 112 */ "UNIQUE",
- /* 113 */ "CHECK",
- /* 114 */ "REFERENCES",
- /* 115 */ "AUTOINCR",
- /* 116 */ "INSERT",
- /* 117 */ "DELETE",
- /* 118 */ "UPDATE",
- /* 119 */ "SET",
- /* 120 */ "DEFERRABLE",
- /* 121 */ "FOREIGN",
- /* 122 */ "DROP",
- /* 123 */ "UNION",
- /* 124 */ "ALL",
- /* 125 */ "EXCEPT",
- /* 126 */ "INTERSECT",
- /* 127 */ "SELECT",
- /* 128 */ "VALUES",
- /* 129 */ "DISTINCT",
- /* 130 */ "DOT",
- /* 131 */ "FROM",
- /* 132 */ "JOIN",
- /* 133 */ "USING",
- /* 134 */ "ORDER",
- /* 135 */ "GROUP",
- /* 136 */ "HAVING",
- /* 137 */ "LIMIT",
- /* 138 */ "WHERE",
- /* 139 */ "INTO",
- /* 140 */ "NOTHING",
- /* 141 */ "FLOAT",
- /* 142 */ "BLOB",
- /* 143 */ "INTEGER",
- /* 144 */ "VARIABLE",
- /* 145 */ "CASE",
- /* 146 */ "WHEN",
- /* 147 */ "THEN",
- /* 148 */ "ELSE",
- /* 149 */ "INDEX",
- /* 150 */ "ALTER",
- /* 151 */ "ADD",
- /* 152 */ "WINDOW",
- /* 153 */ "OVER",
- /* 154 */ "FILTER",
- /* 155 */ "input",
- /* 156 */ "cmdlist",
- /* 157 */ "ecmd",
- /* 158 */ "cmdx",
- /* 159 */ "explain",
- /* 160 */ "cmd",
- /* 161 */ "transtype",
- /* 162 */ "trans_opt",
- /* 163 */ "nm",
- /* 164 */ "savepoint_opt",
- /* 165 */ "create_table",
- /* 166 */ "create_table_args",
- /* 167 */ "createkw",
- /* 168 */ "temp",
- /* 169 */ "ifnotexists",
- /* 170 */ "dbnm",
- /* 171 */ "columnlist",
- /* 172 */ "conslist_opt",
- /* 173 */ "table_options",
- /* 174 */ "select",
- /* 175 */ "columnname",
- /* 176 */ "carglist",
- /* 177 */ "typetoken",
- /* 178 */ "typename",
- /* 179 */ "signed",
- /* 180 */ "plus_num",
- /* 181 */ "minus_num",
- /* 182 */ "scanpt",
- /* 183 */ "ccons",
- /* 184 */ "term",
- /* 185 */ "expr",
- /* 186 */ "onconf",
- /* 187 */ "sortorder",
- /* 188 */ "autoinc",
- /* 189 */ "eidlist_opt",
- /* 190 */ "refargs",
- /* 191 */ "defer_subclause",
- /* 192 */ "refarg",
- /* 193 */ "refact",
- /* 194 */ "init_deferred_pred_opt",
- /* 195 */ "conslist",
- /* 196 */ "tconscomma",
- /* 197 */ "tcons",
- /* 198 */ "sortlist",
- /* 199 */ "eidlist",
- /* 200 */ "defer_subclause_opt",
- /* 201 */ "orconf",
- /* 202 */ "resolvetype",
- /* 203 */ "raisetype",
- /* 204 */ "ifexists",
- /* 205 */ "fullname",
- /* 206 */ "selectnowith",
- /* 207 */ "oneselect",
- /* 208 */ "wqlist",
- /* 209 */ "multiselect_op",
- /* 210 */ "distinct",
- /* 211 */ "selcollist",
- /* 212 */ "from",
- /* 213 */ "where_opt",
- /* 214 */ "groupby_opt",
- /* 215 */ "having_opt",
- /* 216 */ "orderby_opt",
- /* 217 */ "limit_opt",
- /* 218 */ "window_clause",
- /* 219 */ "values",
- /* 220 */ "nexprlist",
- /* 221 */ "sclp",
- /* 222 */ "as",
- /* 223 */ "seltablist",
- /* 224 */ "stl_prefix",
- /* 225 */ "joinop",
- /* 226 */ "indexed_opt",
- /* 227 */ "on_opt",
- /* 228 */ "using_opt",
- /* 229 */ "exprlist",
- /* 230 */ "xfullname",
- /* 231 */ "idlist",
- /* 232 */ "with",
- /* 233 */ "setlist",
- /* 234 */ "insert_cmd",
- /* 235 */ "idlist_opt",
- /* 236 */ "upsert",
- /* 237 */ "over_clause",
- /* 238 */ "likeop",
- /* 239 */ "between_op",
- /* 240 */ "in_op",
- /* 241 */ "paren_exprlist",
- /* 242 */ "case_operand",
- /* 243 */ "case_exprlist",
- /* 244 */ "case_else",
- /* 245 */ "uniqueflag",
- /* 246 */ "collate",
- /* 247 */ "nmnum",
- /* 248 */ "trigger_decl",
- /* 249 */ "trigger_cmd_list",
- /* 250 */ "trigger_time",
- /* 251 */ "trigger_event",
- /* 252 */ "foreach_clause",
- /* 253 */ "when_clause",
- /* 254 */ "trigger_cmd",
- /* 255 */ "trnm",
- /* 256 */ "tridxby",
- /* 257 */ "database_kw_opt",
- /* 258 */ "key_opt",
- /* 259 */ "add_column_fullname",
- /* 260 */ "kwcolumn_opt",
- /* 261 */ "create_vtab",
- /* 262 */ "vtabarglist",
- /* 263 */ "vtabarg",
- /* 264 */ "vtabargtoken",
- /* 265 */ "lp",
- /* 266 */ "anylist",
- /* 267 */ "windowdefn_list",
- /* 268 */ "windowdefn",
- /* 269 */ "window",
- /* 270 */ "frame_opt",
- /* 271 */ "part_opt",
- /* 272 */ "filter_opt",
- /* 273 */ "range_or_rows",
- /* 274 */ "frame_bound",
- /* 275 */ "frame_bound_s",
- /* 276 */ "frame_bound_e",
+ /* 76 */ "TRIGGER",
+ /* 77 */ "VACUUM",
+ /* 78 */ "VIEW",
+ /* 79 */ "VIRTUAL",
+ /* 80 */ "WITH",
+ /* 81 */ "REINDEX",
+ /* 82 */ "RENAME",
+ /* 83 */ "CTIME_KW",
+ /* 84 */ "ANY",
+ /* 85 */ "BITAND",
+ /* 86 */ "BITOR",
+ /* 87 */ "LSHIFT",
+ /* 88 */ "RSHIFT",
+ /* 89 */ "PLUS",
+ /* 90 */ "MINUS",
+ /* 91 */ "STAR",
+ /* 92 */ "SLASH",
+ /* 93 */ "REM",
+ /* 94 */ "CONCAT",
+ /* 95 */ "COLLATE",
+ /* 96 */ "BITNOT",
+ /* 97 */ "ON",
+ /* 98 */ "INDEXED",
+ /* 99 */ "STRING",
+ /* 100 */ "JOIN_KW",
+ /* 101 */ "CONSTRAINT",
+ /* 102 */ "DEFAULT",
+ /* 103 */ "NULL",
+ /* 104 */ "PRIMARY",
+ /* 105 */ "UNIQUE",
+ /* 106 */ "CHECK",
+ /* 107 */ "REFERENCES",
+ /* 108 */ "AUTOINCR",
+ /* 109 */ "INSERT",
+ /* 110 */ "DELETE",
+ /* 111 */ "UPDATE",
+ /* 112 */ "SET",
+ /* 113 */ "DEFERRABLE",
+ /* 114 */ "FOREIGN",
+ /* 115 */ "DROP",
+ /* 116 */ "UNION",
+ /* 117 */ "ALL",
+ /* 118 */ "EXCEPT",
+ /* 119 */ "INTERSECT",
+ /* 120 */ "SELECT",
+ /* 121 */ "VALUES",
+ /* 122 */ "DISTINCT",
+ /* 123 */ "DOT",
+ /* 124 */ "FROM",
+ /* 125 */ "JOIN",
+ /* 126 */ "USING",
+ /* 127 */ "ORDER",
+ /* 128 */ "GROUP",
+ /* 129 */ "HAVING",
+ /* 130 */ "LIMIT",
+ /* 131 */ "WHERE",
+ /* 132 */ "INTO",
+ /* 133 */ "NOTHING",
+ /* 134 */ "FLOAT",
+ /* 135 */ "BLOB",
+ /* 136 */ "INTEGER",
+ /* 137 */ "VARIABLE",
+ /* 138 */ "CASE",
+ /* 139 */ "WHEN",
+ /* 140 */ "THEN",
+ /* 141 */ "ELSE",
+ /* 142 */ "INDEX",
+ /* 143 */ "ALTER",
+ /* 144 */ "ADD",
+ /* 145 */ "input",
+ /* 146 */ "cmdlist",
+ /* 147 */ "ecmd",
+ /* 148 */ "cmdx",
+ /* 149 */ "explain",
+ /* 150 */ "cmd",
+ /* 151 */ "transtype",
+ /* 152 */ "trans_opt",
+ /* 153 */ "nm",
+ /* 154 */ "savepoint_opt",
+ /* 155 */ "create_table",
+ /* 156 */ "create_table_args",
+ /* 157 */ "createkw",
+ /* 158 */ "temp",
+ /* 159 */ "ifnotexists",
+ /* 160 */ "dbnm",
+ /* 161 */ "columnlist",
+ /* 162 */ "conslist_opt",
+ /* 163 */ "table_options",
+ /* 164 */ "select",
+ /* 165 */ "columnname",
+ /* 166 */ "carglist",
+ /* 167 */ "typetoken",
+ /* 168 */ "typename",
+ /* 169 */ "signed",
+ /* 170 */ "plus_num",
+ /* 171 */ "minus_num",
+ /* 172 */ "scanpt",
+ /* 173 */ "ccons",
+ /* 174 */ "term",
+ /* 175 */ "expr",
+ /* 176 */ "onconf",
+ /* 177 */ "sortorder",
+ /* 178 */ "autoinc",
+ /* 179 */ "eidlist_opt",
+ /* 180 */ "refargs",
+ /* 181 */ "defer_subclause",
+ /* 182 */ "refarg",
+ /* 183 */ "refact",
+ /* 184 */ "init_deferred_pred_opt",
+ /* 185 */ "conslist",
+ /* 186 */ "tconscomma",
+ /* 187 */ "tcons",
+ /* 188 */ "sortlist",
+ /* 189 */ "eidlist",
+ /* 190 */ "defer_subclause_opt",
+ /* 191 */ "orconf",
+ /* 192 */ "resolvetype",
+ /* 193 */ "raisetype",
+ /* 194 */ "ifexists",
+ /* 195 */ "fullname",
+ /* 196 */ "selectnowith",
+ /* 197 */ "oneselect",
+ /* 198 */ "wqlist",
+ /* 199 */ "multiselect_op",
+ /* 200 */ "distinct",
+ /* 201 */ "selcollist",
+ /* 202 */ "from",
+ /* 203 */ "where_opt",
+ /* 204 */ "groupby_opt",
+ /* 205 */ "having_opt",
+ /* 206 */ "orderby_opt",
+ /* 207 */ "limit_opt",
+ /* 208 */ "values",
+ /* 209 */ "nexprlist",
+ /* 210 */ "exprlist",
+ /* 211 */ "sclp",
+ /* 212 */ "as",
+ /* 213 */ "seltablist",
+ /* 214 */ "stl_prefix",
+ /* 215 */ "joinop",
+ /* 216 */ "indexed_opt",
+ /* 217 */ "on_opt",
+ /* 218 */ "using_opt",
+ /* 219 */ "xfullname",
+ /* 220 */ "idlist",
+ /* 221 */ "with",
+ /* 222 */ "setlist",
+ /* 223 */ "insert_cmd",
+ /* 224 */ "idlist_opt",
+ /* 225 */ "upsert",
+ /* 226 */ "likeop",
+ /* 227 */ "between_op",
+ /* 228 */ "in_op",
+ /* 229 */ "paren_exprlist",
+ /* 230 */ "case_operand",
+ /* 231 */ "case_exprlist",
+ /* 232 */ "case_else",
+ /* 233 */ "uniqueflag",
+ /* 234 */ "collate",
+ /* 235 */ "nmnum",
+ /* 236 */ "trigger_decl",
+ /* 237 */ "trigger_cmd_list",
+ /* 238 */ "trigger_time",
+ /* 239 */ "trigger_event",
+ /* 240 */ "foreach_clause",
+ /* 241 */ "when_clause",
+ /* 242 */ "trigger_cmd",
+ /* 243 */ "trnm",
+ /* 244 */ "tridxby",
+ /* 245 */ "database_kw_opt",
+ /* 246 */ "key_opt",
+ /* 247 */ "add_column_fullname",
+ /* 248 */ "kwcolumn_opt",
+ /* 249 */ "create_vtab",
+ /* 250 */ "vtabarglist",
+ /* 251 */ "vtabarg",
+ /* 252 */ "vtabargtoken",
+ /* 253 */ "lp",
+ /* 254 */ "anylist",
};
#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */
@@ -147397,285 +142758,259 @@ static const char *const yyRuleName[] = {
/* 85 */ "multiselect_op ::= UNION ALL",
/* 86 */ "multiselect_op ::= EXCEPT|INTERSECT",
/* 87 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt",
- /* 88 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt",
- /* 89 */ "values ::= VALUES LP nexprlist RP",
- /* 90 */ "values ::= values COMMA LP nexprlist RP",
- /* 91 */ "distinct ::= DISTINCT",
- /* 92 */ "distinct ::= ALL",
- /* 93 */ "distinct ::=",
- /* 94 */ "sclp ::=",
- /* 95 */ "selcollist ::= sclp scanpt expr scanpt as",
- /* 96 */ "selcollist ::= sclp scanpt STAR",
- /* 97 */ "selcollist ::= sclp scanpt nm DOT STAR",
- /* 98 */ "as ::= AS nm",
- /* 99 */ "as ::=",
- /* 100 */ "from ::=",
- /* 101 */ "from ::= FROM seltablist",
- /* 102 */ "stl_prefix ::= seltablist joinop",
- /* 103 */ "stl_prefix ::=",
- /* 104 */ "seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt",
- /* 105 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt",
- /* 106 */ "seltablist ::= stl_prefix LP select RP as on_opt using_opt",
- /* 107 */ "seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt",
- /* 108 */ "dbnm ::=",
- /* 109 */ "dbnm ::= DOT nm",
- /* 110 */ "fullname ::= nm",
- /* 111 */ "fullname ::= nm DOT nm",
- /* 112 */ "xfullname ::= nm",
- /* 113 */ "xfullname ::= nm DOT nm",
- /* 114 */ "xfullname ::= nm DOT nm AS nm",
- /* 115 */ "xfullname ::= nm AS nm",
- /* 116 */ "joinop ::= COMMA|JOIN",
- /* 117 */ "joinop ::= JOIN_KW JOIN",
- /* 118 */ "joinop ::= JOIN_KW nm JOIN",
- /* 119 */ "joinop ::= JOIN_KW nm nm JOIN",
- /* 120 */ "on_opt ::= ON expr",
- /* 121 */ "on_opt ::=",
- /* 122 */ "indexed_opt ::=",
- /* 123 */ "indexed_opt ::= INDEXED BY nm",
- /* 124 */ "indexed_opt ::= NOT INDEXED",
- /* 125 */ "using_opt ::= USING LP idlist RP",
- /* 126 */ "using_opt ::=",
- /* 127 */ "orderby_opt ::=",
- /* 128 */ "orderby_opt ::= ORDER BY sortlist",
- /* 129 */ "sortlist ::= sortlist COMMA expr sortorder",
- /* 130 */ "sortlist ::= expr sortorder",
- /* 131 */ "sortorder ::= ASC",
- /* 132 */ "sortorder ::= DESC",
- /* 133 */ "sortorder ::=",
- /* 134 */ "groupby_opt ::=",
- /* 135 */ "groupby_opt ::= GROUP BY nexprlist",
- /* 136 */ "having_opt ::=",
- /* 137 */ "having_opt ::= HAVING expr",
- /* 138 */ "limit_opt ::=",
- /* 139 */ "limit_opt ::= LIMIT expr",
- /* 140 */ "limit_opt ::= LIMIT expr OFFSET expr",
- /* 141 */ "limit_opt ::= LIMIT expr COMMA expr",
- /* 142 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt",
- /* 143 */ "where_opt ::=",
- /* 144 */ "where_opt ::= WHERE expr",
- /* 145 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist where_opt",
- /* 146 */ "setlist ::= setlist COMMA nm EQ expr",
- /* 147 */ "setlist ::= setlist COMMA LP idlist RP EQ expr",
- /* 148 */ "setlist ::= nm EQ expr",
- /* 149 */ "setlist ::= LP idlist RP EQ expr",
- /* 150 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert",
- /* 151 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES",
- /* 152 */ "upsert ::=",
- /* 153 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt",
- /* 154 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING",
- /* 155 */ "upsert ::= ON CONFLICT DO NOTHING",
- /* 156 */ "insert_cmd ::= INSERT orconf",
- /* 157 */ "insert_cmd ::= REPLACE",
- /* 158 */ "idlist_opt ::=",
- /* 159 */ "idlist_opt ::= LP idlist RP",
- /* 160 */ "idlist ::= idlist COMMA nm",
- /* 161 */ "idlist ::= nm",
- /* 162 */ "expr ::= LP expr RP",
- /* 163 */ "expr ::= ID|INDEXED",
- /* 164 */ "expr ::= JOIN_KW",
- /* 165 */ "expr ::= nm DOT nm",
- /* 166 */ "expr ::= nm DOT nm DOT nm",
- /* 167 */ "term ::= NULL|FLOAT|BLOB",
- /* 168 */ "term ::= STRING",
- /* 169 */ "term ::= INTEGER",
- /* 170 */ "expr ::= VARIABLE",
- /* 171 */ "expr ::= expr COLLATE ID|STRING",
- /* 172 */ "expr ::= CAST LP expr AS typetoken RP",
- /* 173 */ "expr ::= ID|INDEXED LP distinct exprlist RP",
- /* 174 */ "expr ::= ID|INDEXED LP STAR RP",
- /* 175 */ "expr ::= ID|INDEXED LP distinct exprlist RP over_clause",
- /* 176 */ "expr ::= ID|INDEXED LP STAR RP over_clause",
- /* 177 */ "term ::= CTIME_KW",
- /* 178 */ "expr ::= LP nexprlist COMMA expr RP",
- /* 179 */ "expr ::= expr AND expr",
- /* 180 */ "expr ::= expr OR expr",
- /* 181 */ "expr ::= expr LT|GT|GE|LE expr",
- /* 182 */ "expr ::= expr EQ|NE expr",
- /* 183 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr",
- /* 184 */ "expr ::= expr PLUS|MINUS expr",
- /* 185 */ "expr ::= expr STAR|SLASH|REM expr",
- /* 186 */ "expr ::= expr CONCAT expr",
- /* 187 */ "likeop ::= NOT LIKE_KW|MATCH",
- /* 188 */ "expr ::= expr likeop expr",
- /* 189 */ "expr ::= expr likeop expr ESCAPE expr",
- /* 190 */ "expr ::= expr ISNULL|NOTNULL",
- /* 191 */ "expr ::= expr NOT NULL",
- /* 192 */ "expr ::= expr IS expr",
- /* 193 */ "expr ::= expr IS NOT expr",
- /* 194 */ "expr ::= NOT expr",
- /* 195 */ "expr ::= BITNOT expr",
- /* 196 */ "expr ::= PLUS|MINUS expr",
- /* 197 */ "between_op ::= BETWEEN",
- /* 198 */ "between_op ::= NOT BETWEEN",
- /* 199 */ "expr ::= expr between_op expr AND expr",
- /* 200 */ "in_op ::= IN",
- /* 201 */ "in_op ::= NOT IN",
- /* 202 */ "expr ::= expr in_op LP exprlist RP",
- /* 203 */ "expr ::= LP select RP",
- /* 204 */ "expr ::= expr in_op LP select RP",
- /* 205 */ "expr ::= expr in_op nm dbnm paren_exprlist",
- /* 206 */ "expr ::= EXISTS LP select RP",
- /* 207 */ "expr ::= CASE case_operand case_exprlist case_else END",
- /* 208 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
- /* 209 */ "case_exprlist ::= WHEN expr THEN expr",
- /* 210 */ "case_else ::= ELSE expr",
- /* 211 */ "case_else ::=",
- /* 212 */ "case_operand ::= expr",
- /* 213 */ "case_operand ::=",
- /* 214 */ "exprlist ::=",
- /* 215 */ "nexprlist ::= nexprlist COMMA expr",
- /* 216 */ "nexprlist ::= expr",
- /* 217 */ "paren_exprlist ::=",
- /* 218 */ "paren_exprlist ::= LP exprlist RP",
- /* 219 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt",
- /* 220 */ "uniqueflag ::= UNIQUE",
- /* 221 */ "uniqueflag ::=",
- /* 222 */ "eidlist_opt ::=",
- /* 223 */ "eidlist_opt ::= LP eidlist RP",
- /* 224 */ "eidlist ::= eidlist COMMA nm collate sortorder",
- /* 225 */ "eidlist ::= nm collate sortorder",
- /* 226 */ "collate ::=",
- /* 227 */ "collate ::= COLLATE ID|STRING",
- /* 228 */ "cmd ::= DROP INDEX ifexists fullname",
- /* 229 */ "cmd ::= VACUUM",
- /* 230 */ "cmd ::= VACUUM nm",
- /* 231 */ "cmd ::= PRAGMA nm dbnm",
- /* 232 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
- /* 233 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
- /* 234 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
- /* 235 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
- /* 236 */ "plus_num ::= PLUS INTEGER|FLOAT",
- /* 237 */ "minus_num ::= MINUS INTEGER|FLOAT",
- /* 238 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
- /* 239 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
- /* 240 */ "trigger_time ::= BEFORE|AFTER",
- /* 241 */ "trigger_time ::= INSTEAD OF",
- /* 242 */ "trigger_time ::=",
- /* 243 */ "trigger_event ::= DELETE|INSERT",
- /* 244 */ "trigger_event ::= UPDATE",
- /* 245 */ "trigger_event ::= UPDATE OF idlist",
- /* 246 */ "when_clause ::=",
- /* 247 */ "when_clause ::= WHEN expr",
- /* 248 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
- /* 249 */ "trigger_cmd_list ::= trigger_cmd SEMI",
- /* 250 */ "trnm ::= nm DOT nm",
- /* 251 */ "tridxby ::= INDEXED BY nm",
- /* 252 */ "tridxby ::= NOT INDEXED",
- /* 253 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt scanpt",
- /* 254 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt",
- /* 255 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt",
- /* 256 */ "trigger_cmd ::= scanpt select scanpt",
- /* 257 */ "expr ::= RAISE LP IGNORE RP",
- /* 258 */ "expr ::= RAISE LP raisetype COMMA nm RP",
- /* 259 */ "raisetype ::= ROLLBACK",
- /* 260 */ "raisetype ::= ABORT",
- /* 261 */ "raisetype ::= FAIL",
- /* 262 */ "cmd ::= DROP TRIGGER ifexists fullname",
- /* 263 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
- /* 264 */ "cmd ::= DETACH database_kw_opt expr",
- /* 265 */ "key_opt ::=",
- /* 266 */ "key_opt ::= KEY expr",
- /* 267 */ "cmd ::= REINDEX",
- /* 268 */ "cmd ::= REINDEX nm dbnm",
- /* 269 */ "cmd ::= ANALYZE",
- /* 270 */ "cmd ::= ANALYZE nm dbnm",
- /* 271 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
- /* 272 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist",
- /* 273 */ "add_column_fullname ::= fullname",
- /* 274 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm",
- /* 275 */ "cmd ::= create_vtab",
- /* 276 */ "cmd ::= create_vtab LP vtabarglist RP",
- /* 277 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
- /* 278 */ "vtabarg ::=",
- /* 279 */ "vtabargtoken ::= ANY",
- /* 280 */ "vtabargtoken ::= lp anylist RP",
- /* 281 */ "lp ::= LP",
- /* 282 */ "with ::= WITH wqlist",
- /* 283 */ "with ::= WITH RECURSIVE wqlist",
- /* 284 */ "wqlist ::= nm eidlist_opt AS LP select RP",
- /* 285 */ "wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP",
- /* 286 */ "windowdefn_list ::= windowdefn",
- /* 287 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn",
- /* 288 */ "windowdefn ::= nm AS window",
- /* 289 */ "window ::= LP part_opt orderby_opt frame_opt RP",
- /* 290 */ "part_opt ::= PARTITION BY nexprlist",
- /* 291 */ "part_opt ::=",
- /* 292 */ "frame_opt ::=",
- /* 293 */ "frame_opt ::= range_or_rows frame_bound_s",
- /* 294 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e",
- /* 295 */ "range_or_rows ::= RANGE",
- /* 296 */ "range_or_rows ::= ROWS",
- /* 297 */ "frame_bound_s ::= frame_bound",
- /* 298 */ "frame_bound_s ::= UNBOUNDED PRECEDING",
- /* 299 */ "frame_bound_e ::= frame_bound",
- /* 300 */ "frame_bound_e ::= UNBOUNDED FOLLOWING",
- /* 301 */ "frame_bound ::= expr PRECEDING",
- /* 302 */ "frame_bound ::= CURRENT ROW",
- /* 303 */ "frame_bound ::= expr FOLLOWING",
- /* 304 */ "window_clause ::= WINDOW windowdefn_list",
- /* 305 */ "over_clause ::= filter_opt OVER window",
- /* 306 */ "over_clause ::= filter_opt OVER nm",
- /* 307 */ "filter_opt ::=",
- /* 308 */ "filter_opt ::= FILTER LP WHERE expr RP",
- /* 309 */ "input ::= cmdlist",
- /* 310 */ "cmdlist ::= cmdlist ecmd",
- /* 311 */ "cmdlist ::= ecmd",
- /* 312 */ "ecmd ::= SEMI",
- /* 313 */ "ecmd ::= cmdx SEMI",
- /* 314 */ "ecmd ::= explain cmdx",
- /* 315 */ "trans_opt ::=",
- /* 316 */ "trans_opt ::= TRANSACTION",
- /* 317 */ "trans_opt ::= TRANSACTION nm",
- /* 318 */ "savepoint_opt ::= SAVEPOINT",
- /* 319 */ "savepoint_opt ::=",
- /* 320 */ "cmd ::= create_table create_table_args",
- /* 321 */ "columnlist ::= columnlist COMMA columnname carglist",
- /* 322 */ "columnlist ::= columnname carglist",
- /* 323 */ "nm ::= ID|INDEXED",
- /* 324 */ "nm ::= STRING",
- /* 325 */ "nm ::= JOIN_KW",
- /* 326 */ "typetoken ::= typename",
- /* 327 */ "typename ::= ID|STRING",
- /* 328 */ "signed ::= plus_num",
- /* 329 */ "signed ::= minus_num",
- /* 330 */ "carglist ::= carglist ccons",
- /* 331 */ "carglist ::=",
- /* 332 */ "ccons ::= NULL onconf",
- /* 333 */ "conslist_opt ::= COMMA conslist",
- /* 334 */ "conslist ::= conslist tconscomma tcons",
- /* 335 */ "conslist ::= tcons",
- /* 336 */ "tconscomma ::=",
- /* 337 */ "defer_subclause_opt ::= defer_subclause",
- /* 338 */ "resolvetype ::= raisetype",
- /* 339 */ "selectnowith ::= oneselect",
- /* 340 */ "oneselect ::= values",
- /* 341 */ "sclp ::= selcollist COMMA",
- /* 342 */ "as ::= ID|STRING",
- /* 343 */ "expr ::= term",
- /* 344 */ "likeop ::= LIKE_KW|MATCH",
- /* 345 */ "exprlist ::= nexprlist",
- /* 346 */ "nmnum ::= plus_num",
- /* 347 */ "nmnum ::= nm",
- /* 348 */ "nmnum ::= ON",
- /* 349 */ "nmnum ::= DELETE",
- /* 350 */ "nmnum ::= DEFAULT",
- /* 351 */ "plus_num ::= INTEGER|FLOAT",
- /* 352 */ "foreach_clause ::=",
- /* 353 */ "foreach_clause ::= FOR EACH ROW",
- /* 354 */ "trnm ::= nm",
- /* 355 */ "tridxby ::=",
- /* 356 */ "database_kw_opt ::= DATABASE",
- /* 357 */ "database_kw_opt ::=",
- /* 358 */ "kwcolumn_opt ::=",
- /* 359 */ "kwcolumn_opt ::= COLUMNKW",
- /* 360 */ "vtabarglist ::= vtabarg",
- /* 361 */ "vtabarglist ::= vtabarglist COMMA vtabarg",
- /* 362 */ "vtabarg ::= vtabarg vtabargtoken",
- /* 363 */ "anylist ::=",
- /* 364 */ "anylist ::= anylist LP anylist RP",
- /* 365 */ "anylist ::= anylist ANY",
- /* 366 */ "with ::=",
+ /* 88 */ "values ::= VALUES LP nexprlist RP",
+ /* 89 */ "values ::= values COMMA LP exprlist RP",
+ /* 90 */ "distinct ::= DISTINCT",
+ /* 91 */ "distinct ::= ALL",
+ /* 92 */ "distinct ::=",
+ /* 93 */ "sclp ::=",
+ /* 94 */ "selcollist ::= sclp scanpt expr scanpt as",
+ /* 95 */ "selcollist ::= sclp scanpt STAR",
+ /* 96 */ "selcollist ::= sclp scanpt nm DOT STAR",
+ /* 97 */ "as ::= AS nm",
+ /* 98 */ "as ::=",
+ /* 99 */ "from ::=",
+ /* 100 */ "from ::= FROM seltablist",
+ /* 101 */ "stl_prefix ::= seltablist joinop",
+ /* 102 */ "stl_prefix ::=",
+ /* 103 */ "seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt",
+ /* 104 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt",
+ /* 105 */ "seltablist ::= stl_prefix LP select RP as on_opt using_opt",
+ /* 106 */ "seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt",
+ /* 107 */ "dbnm ::=",
+ /* 108 */ "dbnm ::= DOT nm",
+ /* 109 */ "fullname ::= nm",
+ /* 110 */ "fullname ::= nm DOT nm",
+ /* 111 */ "xfullname ::= nm",
+ /* 112 */ "xfullname ::= nm DOT nm",
+ /* 113 */ "xfullname ::= nm DOT nm AS nm",
+ /* 114 */ "xfullname ::= nm AS nm",
+ /* 115 */ "joinop ::= COMMA|JOIN",
+ /* 116 */ "joinop ::= JOIN_KW JOIN",
+ /* 117 */ "joinop ::= JOIN_KW nm JOIN",
+ /* 118 */ "joinop ::= JOIN_KW nm nm JOIN",
+ /* 119 */ "on_opt ::= ON expr",
+ /* 120 */ "on_opt ::=",
+ /* 121 */ "indexed_opt ::=",
+ /* 122 */ "indexed_opt ::= INDEXED BY nm",
+ /* 123 */ "indexed_opt ::= NOT INDEXED",
+ /* 124 */ "using_opt ::= USING LP idlist RP",
+ /* 125 */ "using_opt ::=",
+ /* 126 */ "orderby_opt ::=",
+ /* 127 */ "orderby_opt ::= ORDER BY sortlist",
+ /* 128 */ "sortlist ::= sortlist COMMA expr sortorder",
+ /* 129 */ "sortlist ::= expr sortorder",
+ /* 130 */ "sortorder ::= ASC",
+ /* 131 */ "sortorder ::= DESC",
+ /* 132 */ "sortorder ::=",
+ /* 133 */ "groupby_opt ::=",
+ /* 134 */ "groupby_opt ::= GROUP BY nexprlist",
+ /* 135 */ "having_opt ::=",
+ /* 136 */ "having_opt ::= HAVING expr",
+ /* 137 */ "limit_opt ::=",
+ /* 138 */ "limit_opt ::= LIMIT expr",
+ /* 139 */ "limit_opt ::= LIMIT expr OFFSET expr",
+ /* 140 */ "limit_opt ::= LIMIT expr COMMA expr",
+ /* 141 */ "cmd ::= with DELETE FROM xfullname indexed_opt where_opt",
+ /* 142 */ "where_opt ::=",
+ /* 143 */ "where_opt ::= WHERE expr",
+ /* 144 */ "cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist where_opt",
+ /* 145 */ "setlist ::= setlist COMMA nm EQ expr",
+ /* 146 */ "setlist ::= setlist COMMA LP idlist RP EQ expr",
+ /* 147 */ "setlist ::= nm EQ expr",
+ /* 148 */ "setlist ::= LP idlist RP EQ expr",
+ /* 149 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert",
+ /* 150 */ "cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES",
+ /* 151 */ "upsert ::=",
+ /* 152 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt",
+ /* 153 */ "upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING",
+ /* 154 */ "upsert ::= ON CONFLICT DO NOTHING",
+ /* 155 */ "insert_cmd ::= INSERT orconf",
+ /* 156 */ "insert_cmd ::= REPLACE",
+ /* 157 */ "idlist_opt ::=",
+ /* 158 */ "idlist_opt ::= LP idlist RP",
+ /* 159 */ "idlist ::= idlist COMMA nm",
+ /* 160 */ "idlist ::= nm",
+ /* 161 */ "expr ::= LP expr RP",
+ /* 162 */ "expr ::= ID|INDEXED",
+ /* 163 */ "expr ::= JOIN_KW",
+ /* 164 */ "expr ::= nm DOT nm",
+ /* 165 */ "expr ::= nm DOT nm DOT nm",
+ /* 166 */ "term ::= NULL|FLOAT|BLOB",
+ /* 167 */ "term ::= STRING",
+ /* 168 */ "term ::= INTEGER",
+ /* 169 */ "expr ::= VARIABLE",
+ /* 170 */ "expr ::= expr COLLATE ID|STRING",
+ /* 171 */ "expr ::= CAST LP expr AS typetoken RP",
+ /* 172 */ "expr ::= ID|INDEXED LP distinct exprlist RP",
+ /* 173 */ "expr ::= ID|INDEXED LP STAR RP",
+ /* 174 */ "term ::= CTIME_KW",
+ /* 175 */ "expr ::= LP nexprlist COMMA expr RP",
+ /* 176 */ "expr ::= expr AND expr",
+ /* 177 */ "expr ::= expr OR expr",
+ /* 178 */ "expr ::= expr LT|GT|GE|LE expr",
+ /* 179 */ "expr ::= expr EQ|NE expr",
+ /* 180 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr",
+ /* 181 */ "expr ::= expr PLUS|MINUS expr",
+ /* 182 */ "expr ::= expr STAR|SLASH|REM expr",
+ /* 183 */ "expr ::= expr CONCAT expr",
+ /* 184 */ "likeop ::= NOT LIKE_KW|MATCH",
+ /* 185 */ "expr ::= expr likeop expr",
+ /* 186 */ "expr ::= expr likeop expr ESCAPE expr",
+ /* 187 */ "expr ::= expr ISNULL|NOTNULL",
+ /* 188 */ "expr ::= expr NOT NULL",
+ /* 189 */ "expr ::= expr IS expr",
+ /* 190 */ "expr ::= expr IS NOT expr",
+ /* 191 */ "expr ::= NOT expr",
+ /* 192 */ "expr ::= BITNOT expr",
+ /* 193 */ "expr ::= MINUS expr",
+ /* 194 */ "expr ::= PLUS expr",
+ /* 195 */ "between_op ::= BETWEEN",
+ /* 196 */ "between_op ::= NOT BETWEEN",
+ /* 197 */ "expr ::= expr between_op expr AND expr",
+ /* 198 */ "in_op ::= IN",
+ /* 199 */ "in_op ::= NOT IN",
+ /* 200 */ "expr ::= expr in_op LP exprlist RP",
+ /* 201 */ "expr ::= LP select RP",
+ /* 202 */ "expr ::= expr in_op LP select RP",
+ /* 203 */ "expr ::= expr in_op nm dbnm paren_exprlist",
+ /* 204 */ "expr ::= EXISTS LP select RP",
+ /* 205 */ "expr ::= CASE case_operand case_exprlist case_else END",
+ /* 206 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
+ /* 207 */ "case_exprlist ::= WHEN expr THEN expr",
+ /* 208 */ "case_else ::= ELSE expr",
+ /* 209 */ "case_else ::=",
+ /* 210 */ "case_operand ::= expr",
+ /* 211 */ "case_operand ::=",
+ /* 212 */ "exprlist ::=",
+ /* 213 */ "nexprlist ::= nexprlist COMMA expr",
+ /* 214 */ "nexprlist ::= expr",
+ /* 215 */ "paren_exprlist ::=",
+ /* 216 */ "paren_exprlist ::= LP exprlist RP",
+ /* 217 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt",
+ /* 218 */ "uniqueflag ::= UNIQUE",
+ /* 219 */ "uniqueflag ::=",
+ /* 220 */ "eidlist_opt ::=",
+ /* 221 */ "eidlist_opt ::= LP eidlist RP",
+ /* 222 */ "eidlist ::= eidlist COMMA nm collate sortorder",
+ /* 223 */ "eidlist ::= nm collate sortorder",
+ /* 224 */ "collate ::=",
+ /* 225 */ "collate ::= COLLATE ID|STRING",
+ /* 226 */ "cmd ::= DROP INDEX ifexists fullname",
+ /* 227 */ "cmd ::= VACUUM",
+ /* 228 */ "cmd ::= VACUUM nm",
+ /* 229 */ "cmd ::= PRAGMA nm dbnm",
+ /* 230 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
+ /* 231 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
+ /* 232 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
+ /* 233 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
+ /* 234 */ "plus_num ::= PLUS INTEGER|FLOAT",
+ /* 235 */ "minus_num ::= MINUS INTEGER|FLOAT",
+ /* 236 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
+ /* 237 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
+ /* 238 */ "trigger_time ::= BEFORE|AFTER",
+ /* 239 */ "trigger_time ::= INSTEAD OF",
+ /* 240 */ "trigger_time ::=",
+ /* 241 */ "trigger_event ::= DELETE|INSERT",
+ /* 242 */ "trigger_event ::= UPDATE",
+ /* 243 */ "trigger_event ::= UPDATE OF idlist",
+ /* 244 */ "when_clause ::=",
+ /* 245 */ "when_clause ::= WHEN expr",
+ /* 246 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
+ /* 247 */ "trigger_cmd_list ::= trigger_cmd SEMI",
+ /* 248 */ "trnm ::= nm DOT nm",
+ /* 249 */ "tridxby ::= INDEXED BY nm",
+ /* 250 */ "tridxby ::= NOT INDEXED",
+ /* 251 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt scanpt",
+ /* 252 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt",
+ /* 253 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt",
+ /* 254 */ "trigger_cmd ::= scanpt select scanpt",
+ /* 255 */ "expr ::= RAISE LP IGNORE RP",
+ /* 256 */ "expr ::= RAISE LP raisetype COMMA nm RP",
+ /* 257 */ "raisetype ::= ROLLBACK",
+ /* 258 */ "raisetype ::= ABORT",
+ /* 259 */ "raisetype ::= FAIL",
+ /* 260 */ "cmd ::= DROP TRIGGER ifexists fullname",
+ /* 261 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
+ /* 262 */ "cmd ::= DETACH database_kw_opt expr",
+ /* 263 */ "key_opt ::=",
+ /* 264 */ "key_opt ::= KEY expr",
+ /* 265 */ "cmd ::= REINDEX",
+ /* 266 */ "cmd ::= REINDEX nm dbnm",
+ /* 267 */ "cmd ::= ANALYZE",
+ /* 268 */ "cmd ::= ANALYZE nm dbnm",
+ /* 269 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
+ /* 270 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist",
+ /* 271 */ "add_column_fullname ::= fullname",
+ /* 272 */ "cmd ::= create_vtab",
+ /* 273 */ "cmd ::= create_vtab LP vtabarglist RP",
+ /* 274 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
+ /* 275 */ "vtabarg ::=",
+ /* 276 */ "vtabargtoken ::= ANY",
+ /* 277 */ "vtabargtoken ::= lp anylist RP",
+ /* 278 */ "lp ::= LP",
+ /* 279 */ "with ::= WITH wqlist",
+ /* 280 */ "with ::= WITH RECURSIVE wqlist",
+ /* 281 */ "wqlist ::= nm eidlist_opt AS LP select RP",
+ /* 282 */ "wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP",
+ /* 283 */ "input ::= cmdlist",
+ /* 284 */ "cmdlist ::= cmdlist ecmd",
+ /* 285 */ "cmdlist ::= ecmd",
+ /* 286 */ "ecmd ::= SEMI",
+ /* 287 */ "ecmd ::= cmdx SEMI",
+ /* 288 */ "ecmd ::= explain cmdx",
+ /* 289 */ "trans_opt ::=",
+ /* 290 */ "trans_opt ::= TRANSACTION",
+ /* 291 */ "trans_opt ::= TRANSACTION nm",
+ /* 292 */ "savepoint_opt ::= SAVEPOINT",
+ /* 293 */ "savepoint_opt ::=",
+ /* 294 */ "cmd ::= create_table create_table_args",
+ /* 295 */ "columnlist ::= columnlist COMMA columnname carglist",
+ /* 296 */ "columnlist ::= columnname carglist",
+ /* 297 */ "nm ::= ID|INDEXED",
+ /* 298 */ "nm ::= STRING",
+ /* 299 */ "nm ::= JOIN_KW",
+ /* 300 */ "typetoken ::= typename",
+ /* 301 */ "typename ::= ID|STRING",
+ /* 302 */ "signed ::= plus_num",
+ /* 303 */ "signed ::= minus_num",
+ /* 304 */ "carglist ::= carglist ccons",
+ /* 305 */ "carglist ::=",
+ /* 306 */ "ccons ::= NULL onconf",
+ /* 307 */ "conslist_opt ::= COMMA conslist",
+ /* 308 */ "conslist ::= conslist tconscomma tcons",
+ /* 309 */ "conslist ::= tcons",
+ /* 310 */ "tconscomma ::=",
+ /* 311 */ "defer_subclause_opt ::= defer_subclause",
+ /* 312 */ "resolvetype ::= raisetype",
+ /* 313 */ "selectnowith ::= oneselect",
+ /* 314 */ "oneselect ::= values",
+ /* 315 */ "sclp ::= selcollist COMMA",
+ /* 316 */ "as ::= ID|STRING",
+ /* 317 */ "expr ::= term",
+ /* 318 */ "likeop ::= LIKE_KW|MATCH",
+ /* 319 */ "exprlist ::= nexprlist",
+ /* 320 */ "nmnum ::= plus_num",
+ /* 321 */ "nmnum ::= nm",
+ /* 322 */ "nmnum ::= ON",
+ /* 323 */ "nmnum ::= DELETE",
+ /* 324 */ "nmnum ::= DEFAULT",
+ /* 325 */ "plus_num ::= INTEGER|FLOAT",
+ /* 326 */ "foreach_clause ::=",
+ /* 327 */ "foreach_clause ::= FOR EACH ROW",
+ /* 328 */ "trnm ::= nm",
+ /* 329 */ "tridxby ::=",
+ /* 330 */ "database_kw_opt ::= DATABASE",
+ /* 331 */ "database_kw_opt ::=",
+ /* 332 */ "kwcolumn_opt ::=",
+ /* 333 */ "kwcolumn_opt ::= COLUMNKW",
+ /* 334 */ "vtabarglist ::= vtabarg",
+ /* 335 */ "vtabarglist ::= vtabarglist COMMA vtabarg",
+ /* 336 */ "vtabarg ::= vtabarg vtabargtoken",
+ /* 337 */ "anylist ::=",
+ /* 338 */ "anylist ::= anylist LP anylist RP",
+ /* 339 */ "anylist ::= anylist ANY",
+ /* 340 */ "with ::=",
};
#endif /* NDEBUG */
@@ -147801,96 +143136,73 @@ static void yy_destructor(
** inside the C code.
*/
/********* Begin destructor definitions ***************************************/
- case 174: /* select */
- case 206: /* selectnowith */
- case 207: /* oneselect */
- case 219: /* values */
+ case 164: /* select */
+ case 196: /* selectnowith */
+ case 197: /* oneselect */
+ case 208: /* values */
{
-sqlite3SelectDelete(pParse->db, (yypminor->yy489));
+sqlite3SelectDelete(pParse->db, (yypminor->yy399));
}
break;
- case 184: /* term */
- case 185: /* expr */
- case 213: /* where_opt */
- case 215: /* having_opt */
- case 227: /* on_opt */
- case 242: /* case_operand */
- case 244: /* case_else */
- case 253: /* when_clause */
- case 258: /* key_opt */
- case 272: /* filter_opt */
+ case 174: /* term */
+ case 175: /* expr */
+ case 203: /* where_opt */
+ case 205: /* having_opt */
+ case 217: /* on_opt */
+ case 230: /* case_operand */
+ case 232: /* case_else */
+ case 241: /* when_clause */
+ case 246: /* key_opt */
{
-sqlite3ExprDelete(pParse->db, (yypminor->yy18));
+sqlite3ExprDelete(pParse->db, (yypminor->yy182));
}
break;
- case 189: /* eidlist_opt */
- case 198: /* sortlist */
- case 199: /* eidlist */
- case 211: /* selcollist */
- case 214: /* groupby_opt */
- case 216: /* orderby_opt */
- case 220: /* nexprlist */
- case 221: /* sclp */
- case 229: /* exprlist */
- case 233: /* setlist */
- case 241: /* paren_exprlist */
- case 243: /* case_exprlist */
- case 271: /* part_opt */
+ case 179: /* eidlist_opt */
+ case 188: /* sortlist */
+ case 189: /* eidlist */
+ case 201: /* selcollist */
+ case 204: /* groupby_opt */
+ case 206: /* orderby_opt */
+ case 209: /* nexprlist */
+ case 210: /* exprlist */
+ case 211: /* sclp */
+ case 222: /* setlist */
+ case 229: /* paren_exprlist */
+ case 231: /* case_exprlist */
{
-sqlite3ExprListDelete(pParse->db, (yypminor->yy420));
+sqlite3ExprListDelete(pParse->db, (yypminor->yy232));
}
break;
- case 205: /* fullname */
- case 212: /* from */
- case 223: /* seltablist */
- case 224: /* stl_prefix */
- case 230: /* xfullname */
+ case 195: /* fullname */
+ case 202: /* from */
+ case 213: /* seltablist */
+ case 214: /* stl_prefix */
+ case 219: /* xfullname */
{
-sqlite3SrcListDelete(pParse->db, (yypminor->yy135));
+sqlite3SrcListDelete(pParse->db, (yypminor->yy427));
}
break;
- case 208: /* wqlist */
+ case 198: /* wqlist */
{
-sqlite3WithDelete(pParse->db, (yypminor->yy449));
+sqlite3WithDelete(pParse->db, (yypminor->yy91));
}
break;
- case 218: /* window_clause */
- case 267: /* windowdefn_list */
+ case 218: /* using_opt */
+ case 220: /* idlist */
+ case 224: /* idlist_opt */
{
-sqlite3WindowListDelete(pParse->db, (yypminor->yy327));
+sqlite3IdListDelete(pParse->db, (yypminor->yy510));
}
break;
- case 228: /* using_opt */
- case 231: /* idlist */
- case 235: /* idlist_opt */
+ case 237: /* trigger_cmd_list */
+ case 242: /* trigger_cmd */
{
-sqlite3IdListDelete(pParse->db, (yypminor->yy48));
+sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy47));
}
break;
- case 237: /* over_clause */
- case 268: /* windowdefn */
- case 269: /* window */
- case 270: /* frame_opt */
+ case 239: /* trigger_event */
{
-sqlite3WindowDelete(pParse->db, (yypminor->yy327));
-}
- break;
- case 249: /* trigger_cmd_list */
- case 254: /* trigger_cmd */
-{
-sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy207));
-}
- break;
- case 251: /* trigger_event */
-{
-sqlite3IdListDelete(pParse->db, (yypminor->yy34).b);
-}
- break;
- case 274: /* frame_bound */
- case 275: /* frame_bound_s */
- case 276: /* frame_bound_e */
-{
-sqlite3ExprDelete(pParse->db, (yypminor->yy119).pExpr);
+sqlite3IdListDelete(pParse->db, (yypminor->yy300).b);
}
break;
/********* End destructor definitions *****************************************/
@@ -148016,11 +143328,11 @@ static YYACTIONTYPE yy_find_shift_action(
do{
i = yy_shift_ofst[stateno];
assert( i>=0 );
- /* assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD ); */
+ assert( i+YYNTOKEN<=(int)sizeof(yy_lookahead)/sizeof(yy_lookahead[0]) );
assert( iLookAhead!=YYNOCODE );
assert( iLookAhead < YYNTOKEN );
i += iLookAhead;
- if( i>=YY_NLOOKAHEAD || yy_lookahead[i]!=iLookAhead ){
+ if( yy_lookahead[i]!=iLookAhead ){
#ifdef YYFALLBACK
YYCODETYPE iFallback; /* Fallback token */
if( iLookAhead=YY_ACTTAB_COUNT
j0
){
#ifndef NDEBUG
@@ -148071,7 +143382,7 @@ static YYACTIONTYPE yy_find_shift_action(
** Find the appropriate action for a parser given the non-terminal
** look-ahead token iLookAhead.
*/
-static YYACTIONTYPE yy_find_reduce_action(
+static int yy_find_reduce_action(
YYACTIONTYPE stateno, /* Current state number */
YYCODETYPE iLookAhead /* The look-ahead token */
){
@@ -148189,373 +143500,347 @@ static const struct {
YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */
signed char nrhs; /* Negative of the number of RHS symbols in the rule */
} yyRuleInfo[] = {
- { 159, -1 }, /* (0) explain ::= EXPLAIN */
- { 159, -3 }, /* (1) explain ::= EXPLAIN QUERY PLAN */
- { 158, -1 }, /* (2) cmdx ::= cmd */
- { 160, -3 }, /* (3) cmd ::= BEGIN transtype trans_opt */
- { 161, 0 }, /* (4) transtype ::= */
- { 161, -1 }, /* (5) transtype ::= DEFERRED */
- { 161, -1 }, /* (6) transtype ::= IMMEDIATE */
- { 161, -1 }, /* (7) transtype ::= EXCLUSIVE */
- { 160, -2 }, /* (8) cmd ::= COMMIT|END trans_opt */
- { 160, -2 }, /* (9) cmd ::= ROLLBACK trans_opt */
- { 160, -2 }, /* (10) cmd ::= SAVEPOINT nm */
- { 160, -3 }, /* (11) cmd ::= RELEASE savepoint_opt nm */
- { 160, -5 }, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */
- { 165, -6 }, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */
- { 167, -1 }, /* (14) createkw ::= CREATE */
- { 169, 0 }, /* (15) ifnotexists ::= */
- { 169, -3 }, /* (16) ifnotexists ::= IF NOT EXISTS */
- { 168, -1 }, /* (17) temp ::= TEMP */
- { 168, 0 }, /* (18) temp ::= */
- { 166, -5 }, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_options */
- { 166, -2 }, /* (20) create_table_args ::= AS select */
- { 173, 0 }, /* (21) table_options ::= */
- { 173, -2 }, /* (22) table_options ::= WITHOUT nm */
- { 175, -2 }, /* (23) columnname ::= nm typetoken */
- { 177, 0 }, /* (24) typetoken ::= */
- { 177, -4 }, /* (25) typetoken ::= typename LP signed RP */
- { 177, -6 }, /* (26) typetoken ::= typename LP signed COMMA signed RP */
- { 178, -2 }, /* (27) typename ::= typename ID|STRING */
- { 182, 0 }, /* (28) scanpt ::= */
- { 183, -2 }, /* (29) ccons ::= CONSTRAINT nm */
- { 183, -4 }, /* (30) ccons ::= DEFAULT scanpt term scanpt */
- { 183, -4 }, /* (31) ccons ::= DEFAULT LP expr RP */
- { 183, -4 }, /* (32) ccons ::= DEFAULT PLUS term scanpt */
- { 183, -4 }, /* (33) ccons ::= DEFAULT MINUS term scanpt */
- { 183, -3 }, /* (34) ccons ::= DEFAULT scanpt ID|INDEXED */
- { 183, -3 }, /* (35) ccons ::= NOT NULL onconf */
- { 183, -5 }, /* (36) ccons ::= PRIMARY KEY sortorder onconf autoinc */
- { 183, -2 }, /* (37) ccons ::= UNIQUE onconf */
- { 183, -4 }, /* (38) ccons ::= CHECK LP expr RP */
- { 183, -4 }, /* (39) ccons ::= REFERENCES nm eidlist_opt refargs */
- { 183, -1 }, /* (40) ccons ::= defer_subclause */
- { 183, -2 }, /* (41) ccons ::= COLLATE ID|STRING */
- { 188, 0 }, /* (42) autoinc ::= */
- { 188, -1 }, /* (43) autoinc ::= AUTOINCR */
- { 190, 0 }, /* (44) refargs ::= */
- { 190, -2 }, /* (45) refargs ::= refargs refarg */
- { 192, -2 }, /* (46) refarg ::= MATCH nm */
- { 192, -3 }, /* (47) refarg ::= ON INSERT refact */
- { 192, -3 }, /* (48) refarg ::= ON DELETE refact */
- { 192, -3 }, /* (49) refarg ::= ON UPDATE refact */
- { 193, -2 }, /* (50) refact ::= SET NULL */
- { 193, -2 }, /* (51) refact ::= SET DEFAULT */
- { 193, -1 }, /* (52) refact ::= CASCADE */
- { 193, -1 }, /* (53) refact ::= RESTRICT */
- { 193, -2 }, /* (54) refact ::= NO ACTION */
- { 191, -3 }, /* (55) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */
- { 191, -2 }, /* (56) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */
- { 194, 0 }, /* (57) init_deferred_pred_opt ::= */
- { 194, -2 }, /* (58) init_deferred_pred_opt ::= INITIALLY DEFERRED */
- { 194, -2 }, /* (59) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */
- { 172, 0 }, /* (60) conslist_opt ::= */
- { 196, -1 }, /* (61) tconscomma ::= COMMA */
- { 197, -2 }, /* (62) tcons ::= CONSTRAINT nm */
- { 197, -7 }, /* (63) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */
- { 197, -5 }, /* (64) tcons ::= UNIQUE LP sortlist RP onconf */
- { 197, -5 }, /* (65) tcons ::= CHECK LP expr RP onconf */
- { 197, -10 }, /* (66) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */
- { 200, 0 }, /* (67) defer_subclause_opt ::= */
- { 186, 0 }, /* (68) onconf ::= */
- { 186, -3 }, /* (69) onconf ::= ON CONFLICT resolvetype */
- { 201, 0 }, /* (70) orconf ::= */
- { 201, -2 }, /* (71) orconf ::= OR resolvetype */
- { 202, -1 }, /* (72) resolvetype ::= IGNORE */
- { 202, -1 }, /* (73) resolvetype ::= REPLACE */
- { 160, -4 }, /* (74) cmd ::= DROP TABLE ifexists fullname */
- { 204, -2 }, /* (75) ifexists ::= IF EXISTS */
- { 204, 0 }, /* (76) ifexists ::= */
- { 160, -9 }, /* (77) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */
- { 160, -4 }, /* (78) cmd ::= DROP VIEW ifexists fullname */
- { 160, -1 }, /* (79) cmd ::= select */
- { 174, -3 }, /* (80) select ::= WITH wqlist selectnowith */
- { 174, -4 }, /* (81) select ::= WITH RECURSIVE wqlist selectnowith */
- { 174, -1 }, /* (82) select ::= selectnowith */
- { 206, -3 }, /* (83) selectnowith ::= selectnowith multiselect_op oneselect */
- { 209, -1 }, /* (84) multiselect_op ::= UNION */
- { 209, -2 }, /* (85) multiselect_op ::= UNION ALL */
- { 209, -1 }, /* (86) multiselect_op ::= EXCEPT|INTERSECT */
- { 207, -9 }, /* (87) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */
- { 207, -10 }, /* (88) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */
- { 219, -4 }, /* (89) values ::= VALUES LP nexprlist RP */
- { 219, -5 }, /* (90) values ::= values COMMA LP nexprlist RP */
- { 210, -1 }, /* (91) distinct ::= DISTINCT */
- { 210, -1 }, /* (92) distinct ::= ALL */
- { 210, 0 }, /* (93) distinct ::= */
- { 221, 0 }, /* (94) sclp ::= */
- { 211, -5 }, /* (95) selcollist ::= sclp scanpt expr scanpt as */
- { 211, -3 }, /* (96) selcollist ::= sclp scanpt STAR */
- { 211, -5 }, /* (97) selcollist ::= sclp scanpt nm DOT STAR */
- { 222, -2 }, /* (98) as ::= AS nm */
- { 222, 0 }, /* (99) as ::= */
- { 212, 0 }, /* (100) from ::= */
- { 212, -2 }, /* (101) from ::= FROM seltablist */
- { 224, -2 }, /* (102) stl_prefix ::= seltablist joinop */
- { 224, 0 }, /* (103) stl_prefix ::= */
- { 223, -7 }, /* (104) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */
- { 223, -9 }, /* (105) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */
- { 223, -7 }, /* (106) seltablist ::= stl_prefix LP select RP as on_opt using_opt */
- { 223, -7 }, /* (107) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */
- { 170, 0 }, /* (108) dbnm ::= */
- { 170, -2 }, /* (109) dbnm ::= DOT nm */
- { 205, -1 }, /* (110) fullname ::= nm */
- { 205, -3 }, /* (111) fullname ::= nm DOT nm */
- { 230, -1 }, /* (112) xfullname ::= nm */
- { 230, -3 }, /* (113) xfullname ::= nm DOT nm */
- { 230, -5 }, /* (114) xfullname ::= nm DOT nm AS nm */
- { 230, -3 }, /* (115) xfullname ::= nm AS nm */
- { 225, -1 }, /* (116) joinop ::= COMMA|JOIN */
- { 225, -2 }, /* (117) joinop ::= JOIN_KW JOIN */
- { 225, -3 }, /* (118) joinop ::= JOIN_KW nm JOIN */
- { 225, -4 }, /* (119) joinop ::= JOIN_KW nm nm JOIN */
- { 227, -2 }, /* (120) on_opt ::= ON expr */
- { 227, 0 }, /* (121) on_opt ::= */
- { 226, 0 }, /* (122) indexed_opt ::= */
- { 226, -3 }, /* (123) indexed_opt ::= INDEXED BY nm */
- { 226, -2 }, /* (124) indexed_opt ::= NOT INDEXED */
- { 228, -4 }, /* (125) using_opt ::= USING LP idlist RP */
- { 228, 0 }, /* (126) using_opt ::= */
- { 216, 0 }, /* (127) orderby_opt ::= */
- { 216, -3 }, /* (128) orderby_opt ::= ORDER BY sortlist */
- { 198, -4 }, /* (129) sortlist ::= sortlist COMMA expr sortorder */
- { 198, -2 }, /* (130) sortlist ::= expr sortorder */
- { 187, -1 }, /* (131) sortorder ::= ASC */
- { 187, -1 }, /* (132) sortorder ::= DESC */
- { 187, 0 }, /* (133) sortorder ::= */
- { 214, 0 }, /* (134) groupby_opt ::= */
- { 214, -3 }, /* (135) groupby_opt ::= GROUP BY nexprlist */
- { 215, 0 }, /* (136) having_opt ::= */
- { 215, -2 }, /* (137) having_opt ::= HAVING expr */
- { 217, 0 }, /* (138) limit_opt ::= */
- { 217, -2 }, /* (139) limit_opt ::= LIMIT expr */
- { 217, -4 }, /* (140) limit_opt ::= LIMIT expr OFFSET expr */
- { 217, -4 }, /* (141) limit_opt ::= LIMIT expr COMMA expr */
- { 160, -6 }, /* (142) cmd ::= with DELETE FROM xfullname indexed_opt where_opt */
- { 213, 0 }, /* (143) where_opt ::= */
- { 213, -2 }, /* (144) where_opt ::= WHERE expr */
- { 160, -8 }, /* (145) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist where_opt */
- { 233, -5 }, /* (146) setlist ::= setlist COMMA nm EQ expr */
- { 233, -7 }, /* (147) setlist ::= setlist COMMA LP idlist RP EQ expr */
- { 233, -3 }, /* (148) setlist ::= nm EQ expr */
- { 233, -5 }, /* (149) setlist ::= LP idlist RP EQ expr */
- { 160, -7 }, /* (150) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */
- { 160, -7 }, /* (151) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */
- { 236, 0 }, /* (152) upsert ::= */
- { 236, -11 }, /* (153) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */
- { 236, -8 }, /* (154) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */
- { 236, -4 }, /* (155) upsert ::= ON CONFLICT DO NOTHING */
- { 234, -2 }, /* (156) insert_cmd ::= INSERT orconf */
- { 234, -1 }, /* (157) insert_cmd ::= REPLACE */
- { 235, 0 }, /* (158) idlist_opt ::= */
- { 235, -3 }, /* (159) idlist_opt ::= LP idlist RP */
- { 231, -3 }, /* (160) idlist ::= idlist COMMA nm */
- { 231, -1 }, /* (161) idlist ::= nm */
- { 185, -3 }, /* (162) expr ::= LP expr RP */
- { 185, -1 }, /* (163) expr ::= ID|INDEXED */
- { 185, -1 }, /* (164) expr ::= JOIN_KW */
- { 185, -3 }, /* (165) expr ::= nm DOT nm */
- { 185, -5 }, /* (166) expr ::= nm DOT nm DOT nm */
- { 184, -1 }, /* (167) term ::= NULL|FLOAT|BLOB */
- { 184, -1 }, /* (168) term ::= STRING */
- { 184, -1 }, /* (169) term ::= INTEGER */
- { 185, -1 }, /* (170) expr ::= VARIABLE */
- { 185, -3 }, /* (171) expr ::= expr COLLATE ID|STRING */
- { 185, -6 }, /* (172) expr ::= CAST LP expr AS typetoken RP */
- { 185, -5 }, /* (173) expr ::= ID|INDEXED LP distinct exprlist RP */
- { 185, -4 }, /* (174) expr ::= ID|INDEXED LP STAR RP */
- { 185, -6 }, /* (175) expr ::= ID|INDEXED LP distinct exprlist RP over_clause */
- { 185, -5 }, /* (176) expr ::= ID|INDEXED LP STAR RP over_clause */
- { 184, -1 }, /* (177) term ::= CTIME_KW */
- { 185, -5 }, /* (178) expr ::= LP nexprlist COMMA expr RP */
- { 185, -3 }, /* (179) expr ::= expr AND expr */
- { 185, -3 }, /* (180) expr ::= expr OR expr */
- { 185, -3 }, /* (181) expr ::= expr LT|GT|GE|LE expr */
- { 185, -3 }, /* (182) expr ::= expr EQ|NE expr */
- { 185, -3 }, /* (183) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */
- { 185, -3 }, /* (184) expr ::= expr PLUS|MINUS expr */
- { 185, -3 }, /* (185) expr ::= expr STAR|SLASH|REM expr */
- { 185, -3 }, /* (186) expr ::= expr CONCAT expr */
- { 238, -2 }, /* (187) likeop ::= NOT LIKE_KW|MATCH */
- { 185, -3 }, /* (188) expr ::= expr likeop expr */
- { 185, -5 }, /* (189) expr ::= expr likeop expr ESCAPE expr */
- { 185, -2 }, /* (190) expr ::= expr ISNULL|NOTNULL */
- { 185, -3 }, /* (191) expr ::= expr NOT NULL */
- { 185, -3 }, /* (192) expr ::= expr IS expr */
- { 185, -4 }, /* (193) expr ::= expr IS NOT expr */
- { 185, -2 }, /* (194) expr ::= NOT expr */
- { 185, -2 }, /* (195) expr ::= BITNOT expr */
- { 185, -2 }, /* (196) expr ::= PLUS|MINUS expr */
- { 239, -1 }, /* (197) between_op ::= BETWEEN */
- { 239, -2 }, /* (198) between_op ::= NOT BETWEEN */
- { 185, -5 }, /* (199) expr ::= expr between_op expr AND expr */
- { 240, -1 }, /* (200) in_op ::= IN */
- { 240, -2 }, /* (201) in_op ::= NOT IN */
- { 185, -5 }, /* (202) expr ::= expr in_op LP exprlist RP */
- { 185, -3 }, /* (203) expr ::= LP select RP */
- { 185, -5 }, /* (204) expr ::= expr in_op LP select RP */
- { 185, -5 }, /* (205) expr ::= expr in_op nm dbnm paren_exprlist */
- { 185, -4 }, /* (206) expr ::= EXISTS LP select RP */
- { 185, -5 }, /* (207) expr ::= CASE case_operand case_exprlist case_else END */
- { 243, -5 }, /* (208) case_exprlist ::= case_exprlist WHEN expr THEN expr */
- { 243, -4 }, /* (209) case_exprlist ::= WHEN expr THEN expr */
- { 244, -2 }, /* (210) case_else ::= ELSE expr */
- { 244, 0 }, /* (211) case_else ::= */
- { 242, -1 }, /* (212) case_operand ::= expr */
- { 242, 0 }, /* (213) case_operand ::= */
- { 229, 0 }, /* (214) exprlist ::= */
- { 220, -3 }, /* (215) nexprlist ::= nexprlist COMMA expr */
- { 220, -1 }, /* (216) nexprlist ::= expr */
- { 241, 0 }, /* (217) paren_exprlist ::= */
- { 241, -3 }, /* (218) paren_exprlist ::= LP exprlist RP */
- { 160, -12 }, /* (219) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
- { 245, -1 }, /* (220) uniqueflag ::= UNIQUE */
- { 245, 0 }, /* (221) uniqueflag ::= */
- { 189, 0 }, /* (222) eidlist_opt ::= */
- { 189, -3 }, /* (223) eidlist_opt ::= LP eidlist RP */
- { 199, -5 }, /* (224) eidlist ::= eidlist COMMA nm collate sortorder */
- { 199, -3 }, /* (225) eidlist ::= nm collate sortorder */
- { 246, 0 }, /* (226) collate ::= */
- { 246, -2 }, /* (227) collate ::= COLLATE ID|STRING */
- { 160, -4 }, /* (228) cmd ::= DROP INDEX ifexists fullname */
- { 160, -1 }, /* (229) cmd ::= VACUUM */
- { 160, -2 }, /* (230) cmd ::= VACUUM nm */
- { 160, -3 }, /* (231) cmd ::= PRAGMA nm dbnm */
- { 160, -5 }, /* (232) cmd ::= PRAGMA nm dbnm EQ nmnum */
- { 160, -6 }, /* (233) cmd ::= PRAGMA nm dbnm LP nmnum RP */
- { 160, -5 }, /* (234) cmd ::= PRAGMA nm dbnm EQ minus_num */
- { 160, -6 }, /* (235) cmd ::= PRAGMA nm dbnm LP minus_num RP */
- { 180, -2 }, /* (236) plus_num ::= PLUS INTEGER|FLOAT */
- { 181, -2 }, /* (237) minus_num ::= MINUS INTEGER|FLOAT */
- { 160, -5 }, /* (238) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
- { 248, -11 }, /* (239) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
- { 250, -1 }, /* (240) trigger_time ::= BEFORE|AFTER */
- { 250, -2 }, /* (241) trigger_time ::= INSTEAD OF */
- { 250, 0 }, /* (242) trigger_time ::= */
- { 251, -1 }, /* (243) trigger_event ::= DELETE|INSERT */
- { 251, -1 }, /* (244) trigger_event ::= UPDATE */
- { 251, -3 }, /* (245) trigger_event ::= UPDATE OF idlist */
- { 253, 0 }, /* (246) when_clause ::= */
- { 253, -2 }, /* (247) when_clause ::= WHEN expr */
- { 249, -3 }, /* (248) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
- { 249, -2 }, /* (249) trigger_cmd_list ::= trigger_cmd SEMI */
- { 255, -3 }, /* (250) trnm ::= nm DOT nm */
- { 256, -3 }, /* (251) tridxby ::= INDEXED BY nm */
- { 256, -2 }, /* (252) tridxby ::= NOT INDEXED */
- { 254, -8 }, /* (253) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt scanpt */
- { 254, -8 }, /* (254) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
- { 254, -6 }, /* (255) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
- { 254, -3 }, /* (256) trigger_cmd ::= scanpt select scanpt */
- { 185, -4 }, /* (257) expr ::= RAISE LP IGNORE RP */
- { 185, -6 }, /* (258) expr ::= RAISE LP raisetype COMMA nm RP */
- { 203, -1 }, /* (259) raisetype ::= ROLLBACK */
- { 203, -1 }, /* (260) raisetype ::= ABORT */
- { 203, -1 }, /* (261) raisetype ::= FAIL */
- { 160, -4 }, /* (262) cmd ::= DROP TRIGGER ifexists fullname */
- { 160, -6 }, /* (263) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
- { 160, -3 }, /* (264) cmd ::= DETACH database_kw_opt expr */
- { 258, 0 }, /* (265) key_opt ::= */
- { 258, -2 }, /* (266) key_opt ::= KEY expr */
- { 160, -1 }, /* (267) cmd ::= REINDEX */
- { 160, -3 }, /* (268) cmd ::= REINDEX nm dbnm */
- { 160, -1 }, /* (269) cmd ::= ANALYZE */
- { 160, -3 }, /* (270) cmd ::= ANALYZE nm dbnm */
- { 160, -6 }, /* (271) cmd ::= ALTER TABLE fullname RENAME TO nm */
- { 160, -7 }, /* (272) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
- { 259, -1 }, /* (273) add_column_fullname ::= fullname */
- { 160, -8 }, /* (274) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
- { 160, -1 }, /* (275) cmd ::= create_vtab */
- { 160, -4 }, /* (276) cmd ::= create_vtab LP vtabarglist RP */
- { 261, -8 }, /* (277) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
- { 263, 0 }, /* (278) vtabarg ::= */
- { 264, -1 }, /* (279) vtabargtoken ::= ANY */
- { 264, -3 }, /* (280) vtabargtoken ::= lp anylist RP */
- { 265, -1 }, /* (281) lp ::= LP */
- { 232, -2 }, /* (282) with ::= WITH wqlist */
- { 232, -3 }, /* (283) with ::= WITH RECURSIVE wqlist */
- { 208, -6 }, /* (284) wqlist ::= nm eidlist_opt AS LP select RP */
- { 208, -8 }, /* (285) wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */
- { 267, -1 }, /* (286) windowdefn_list ::= windowdefn */
- { 267, -3 }, /* (287) windowdefn_list ::= windowdefn_list COMMA windowdefn */
- { 268, -3 }, /* (288) windowdefn ::= nm AS window */
- { 269, -5 }, /* (289) window ::= LP part_opt orderby_opt frame_opt RP */
- { 271, -3 }, /* (290) part_opt ::= PARTITION BY nexprlist */
- { 271, 0 }, /* (291) part_opt ::= */
- { 270, 0 }, /* (292) frame_opt ::= */
- { 270, -2 }, /* (293) frame_opt ::= range_or_rows frame_bound_s */
- { 270, -5 }, /* (294) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e */
- { 273, -1 }, /* (295) range_or_rows ::= RANGE */
- { 273, -1 }, /* (296) range_or_rows ::= ROWS */
- { 275, -1 }, /* (297) frame_bound_s ::= frame_bound */
- { 275, -2 }, /* (298) frame_bound_s ::= UNBOUNDED PRECEDING */
- { 276, -1 }, /* (299) frame_bound_e ::= frame_bound */
- { 276, -2 }, /* (300) frame_bound_e ::= UNBOUNDED FOLLOWING */
- { 274, -2 }, /* (301) frame_bound ::= expr PRECEDING */
- { 274, -2 }, /* (302) frame_bound ::= CURRENT ROW */
- { 274, -2 }, /* (303) frame_bound ::= expr FOLLOWING */
- { 218, -2 }, /* (304) window_clause ::= WINDOW windowdefn_list */
- { 237, -3 }, /* (305) over_clause ::= filter_opt OVER window */
- { 237, -3 }, /* (306) over_clause ::= filter_opt OVER nm */
- { 272, 0 }, /* (307) filter_opt ::= */
- { 272, -5 }, /* (308) filter_opt ::= FILTER LP WHERE expr RP */
- { 155, -1 }, /* (309) input ::= cmdlist */
- { 156, -2 }, /* (310) cmdlist ::= cmdlist ecmd */
- { 156, -1 }, /* (311) cmdlist ::= ecmd */
- { 157, -1 }, /* (312) ecmd ::= SEMI */
- { 157, -2 }, /* (313) ecmd ::= cmdx SEMI */
- { 157, -2 }, /* (314) ecmd ::= explain cmdx */
- { 162, 0 }, /* (315) trans_opt ::= */
- { 162, -1 }, /* (316) trans_opt ::= TRANSACTION */
- { 162, -2 }, /* (317) trans_opt ::= TRANSACTION nm */
- { 164, -1 }, /* (318) savepoint_opt ::= SAVEPOINT */
- { 164, 0 }, /* (319) savepoint_opt ::= */
- { 160, -2 }, /* (320) cmd ::= create_table create_table_args */
- { 171, -4 }, /* (321) columnlist ::= columnlist COMMA columnname carglist */
- { 171, -2 }, /* (322) columnlist ::= columnname carglist */
- { 163, -1 }, /* (323) nm ::= ID|INDEXED */
- { 163, -1 }, /* (324) nm ::= STRING */
- { 163, -1 }, /* (325) nm ::= JOIN_KW */
- { 177, -1 }, /* (326) typetoken ::= typename */
- { 178, -1 }, /* (327) typename ::= ID|STRING */
- { 179, -1 }, /* (328) signed ::= plus_num */
- { 179, -1 }, /* (329) signed ::= minus_num */
- { 176, -2 }, /* (330) carglist ::= carglist ccons */
- { 176, 0 }, /* (331) carglist ::= */
- { 183, -2 }, /* (332) ccons ::= NULL onconf */
- { 172, -2 }, /* (333) conslist_opt ::= COMMA conslist */
- { 195, -3 }, /* (334) conslist ::= conslist tconscomma tcons */
- { 195, -1 }, /* (335) conslist ::= tcons */
- { 196, 0 }, /* (336) tconscomma ::= */
- { 200, -1 }, /* (337) defer_subclause_opt ::= defer_subclause */
- { 202, -1 }, /* (338) resolvetype ::= raisetype */
- { 206, -1 }, /* (339) selectnowith ::= oneselect */
- { 207, -1 }, /* (340) oneselect ::= values */
- { 221, -2 }, /* (341) sclp ::= selcollist COMMA */
- { 222, -1 }, /* (342) as ::= ID|STRING */
- { 185, -1 }, /* (343) expr ::= term */
- { 238, -1 }, /* (344) likeop ::= LIKE_KW|MATCH */
- { 229, -1 }, /* (345) exprlist ::= nexprlist */
- { 247, -1 }, /* (346) nmnum ::= plus_num */
- { 247, -1 }, /* (347) nmnum ::= nm */
- { 247, -1 }, /* (348) nmnum ::= ON */
- { 247, -1 }, /* (349) nmnum ::= DELETE */
- { 247, -1 }, /* (350) nmnum ::= DEFAULT */
- { 180, -1 }, /* (351) plus_num ::= INTEGER|FLOAT */
- { 252, 0 }, /* (352) foreach_clause ::= */
- { 252, -3 }, /* (353) foreach_clause ::= FOR EACH ROW */
- { 255, -1 }, /* (354) trnm ::= nm */
- { 256, 0 }, /* (355) tridxby ::= */
- { 257, -1 }, /* (356) database_kw_opt ::= DATABASE */
- { 257, 0 }, /* (357) database_kw_opt ::= */
- { 260, 0 }, /* (358) kwcolumn_opt ::= */
- { 260, -1 }, /* (359) kwcolumn_opt ::= COLUMNKW */
- { 262, -1 }, /* (360) vtabarglist ::= vtabarg */
- { 262, -3 }, /* (361) vtabarglist ::= vtabarglist COMMA vtabarg */
- { 263, -2 }, /* (362) vtabarg ::= vtabarg vtabargtoken */
- { 266, 0 }, /* (363) anylist ::= */
- { 266, -4 }, /* (364) anylist ::= anylist LP anylist RP */
- { 266, -2 }, /* (365) anylist ::= anylist ANY */
- { 232, 0 }, /* (366) with ::= */
+ { 149, -1 }, /* (0) explain ::= EXPLAIN */
+ { 149, -3 }, /* (1) explain ::= EXPLAIN QUERY PLAN */
+ { 148, -1 }, /* (2) cmdx ::= cmd */
+ { 150, -3 }, /* (3) cmd ::= BEGIN transtype trans_opt */
+ { 151, 0 }, /* (4) transtype ::= */
+ { 151, -1 }, /* (5) transtype ::= DEFERRED */
+ { 151, -1 }, /* (6) transtype ::= IMMEDIATE */
+ { 151, -1 }, /* (7) transtype ::= EXCLUSIVE */
+ { 150, -2 }, /* (8) cmd ::= COMMIT|END trans_opt */
+ { 150, -2 }, /* (9) cmd ::= ROLLBACK trans_opt */
+ { 150, -2 }, /* (10) cmd ::= SAVEPOINT nm */
+ { 150, -3 }, /* (11) cmd ::= RELEASE savepoint_opt nm */
+ { 150, -5 }, /* (12) cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */
+ { 155, -6 }, /* (13) create_table ::= createkw temp TABLE ifnotexists nm dbnm */
+ { 157, -1 }, /* (14) createkw ::= CREATE */
+ { 159, 0 }, /* (15) ifnotexists ::= */
+ { 159, -3 }, /* (16) ifnotexists ::= IF NOT EXISTS */
+ { 158, -1 }, /* (17) temp ::= TEMP */
+ { 158, 0 }, /* (18) temp ::= */
+ { 156, -5 }, /* (19) create_table_args ::= LP columnlist conslist_opt RP table_options */
+ { 156, -2 }, /* (20) create_table_args ::= AS select */
+ { 163, 0 }, /* (21) table_options ::= */
+ { 163, -2 }, /* (22) table_options ::= WITHOUT nm */
+ { 165, -2 }, /* (23) columnname ::= nm typetoken */
+ { 167, 0 }, /* (24) typetoken ::= */
+ { 167, -4 }, /* (25) typetoken ::= typename LP signed RP */
+ { 167, -6 }, /* (26) typetoken ::= typename LP signed COMMA signed RP */
+ { 168, -2 }, /* (27) typename ::= typename ID|STRING */
+ { 172, 0 }, /* (28) scanpt ::= */
+ { 173, -2 }, /* (29) ccons ::= CONSTRAINT nm */
+ { 173, -4 }, /* (30) ccons ::= DEFAULT scanpt term scanpt */
+ { 173, -4 }, /* (31) ccons ::= DEFAULT LP expr RP */
+ { 173, -4 }, /* (32) ccons ::= DEFAULT PLUS term scanpt */
+ { 173, -4 }, /* (33) ccons ::= DEFAULT MINUS term scanpt */
+ { 173, -3 }, /* (34) ccons ::= DEFAULT scanpt ID|INDEXED */
+ { 173, -3 }, /* (35) ccons ::= NOT NULL onconf */
+ { 173, -5 }, /* (36) ccons ::= PRIMARY KEY sortorder onconf autoinc */
+ { 173, -2 }, /* (37) ccons ::= UNIQUE onconf */
+ { 173, -4 }, /* (38) ccons ::= CHECK LP expr RP */
+ { 173, -4 }, /* (39) ccons ::= REFERENCES nm eidlist_opt refargs */
+ { 173, -1 }, /* (40) ccons ::= defer_subclause */
+ { 173, -2 }, /* (41) ccons ::= COLLATE ID|STRING */
+ { 178, 0 }, /* (42) autoinc ::= */
+ { 178, -1 }, /* (43) autoinc ::= AUTOINCR */
+ { 180, 0 }, /* (44) refargs ::= */
+ { 180, -2 }, /* (45) refargs ::= refargs refarg */
+ { 182, -2 }, /* (46) refarg ::= MATCH nm */
+ { 182, -3 }, /* (47) refarg ::= ON INSERT refact */
+ { 182, -3 }, /* (48) refarg ::= ON DELETE refact */
+ { 182, -3 }, /* (49) refarg ::= ON UPDATE refact */
+ { 183, -2 }, /* (50) refact ::= SET NULL */
+ { 183, -2 }, /* (51) refact ::= SET DEFAULT */
+ { 183, -1 }, /* (52) refact ::= CASCADE */
+ { 183, -1 }, /* (53) refact ::= RESTRICT */
+ { 183, -2 }, /* (54) refact ::= NO ACTION */
+ { 181, -3 }, /* (55) defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */
+ { 181, -2 }, /* (56) defer_subclause ::= DEFERRABLE init_deferred_pred_opt */
+ { 184, 0 }, /* (57) init_deferred_pred_opt ::= */
+ { 184, -2 }, /* (58) init_deferred_pred_opt ::= INITIALLY DEFERRED */
+ { 184, -2 }, /* (59) init_deferred_pred_opt ::= INITIALLY IMMEDIATE */
+ { 162, 0 }, /* (60) conslist_opt ::= */
+ { 186, -1 }, /* (61) tconscomma ::= COMMA */
+ { 187, -2 }, /* (62) tcons ::= CONSTRAINT nm */
+ { 187, -7 }, /* (63) tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */
+ { 187, -5 }, /* (64) tcons ::= UNIQUE LP sortlist RP onconf */
+ { 187, -5 }, /* (65) tcons ::= CHECK LP expr RP onconf */
+ { 187, -10 }, /* (66) tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */
+ { 190, 0 }, /* (67) defer_subclause_opt ::= */
+ { 176, 0 }, /* (68) onconf ::= */
+ { 176, -3 }, /* (69) onconf ::= ON CONFLICT resolvetype */
+ { 191, 0 }, /* (70) orconf ::= */
+ { 191, -2 }, /* (71) orconf ::= OR resolvetype */
+ { 192, -1 }, /* (72) resolvetype ::= IGNORE */
+ { 192, -1 }, /* (73) resolvetype ::= REPLACE */
+ { 150, -4 }, /* (74) cmd ::= DROP TABLE ifexists fullname */
+ { 194, -2 }, /* (75) ifexists ::= IF EXISTS */
+ { 194, 0 }, /* (76) ifexists ::= */
+ { 150, -9 }, /* (77) cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */
+ { 150, -4 }, /* (78) cmd ::= DROP VIEW ifexists fullname */
+ { 150, -1 }, /* (79) cmd ::= select */
+ { 164, -3 }, /* (80) select ::= WITH wqlist selectnowith */
+ { 164, -4 }, /* (81) select ::= WITH RECURSIVE wqlist selectnowith */
+ { 164, -1 }, /* (82) select ::= selectnowith */
+ { 196, -3 }, /* (83) selectnowith ::= selectnowith multiselect_op oneselect */
+ { 199, -1 }, /* (84) multiselect_op ::= UNION */
+ { 199, -2 }, /* (85) multiselect_op ::= UNION ALL */
+ { 199, -1 }, /* (86) multiselect_op ::= EXCEPT|INTERSECT */
+ { 197, -9 }, /* (87) oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */
+ { 208, -4 }, /* (88) values ::= VALUES LP nexprlist RP */
+ { 208, -5 }, /* (89) values ::= values COMMA LP exprlist RP */
+ { 200, -1 }, /* (90) distinct ::= DISTINCT */
+ { 200, -1 }, /* (91) distinct ::= ALL */
+ { 200, 0 }, /* (92) distinct ::= */
+ { 211, 0 }, /* (93) sclp ::= */
+ { 201, -5 }, /* (94) selcollist ::= sclp scanpt expr scanpt as */
+ { 201, -3 }, /* (95) selcollist ::= sclp scanpt STAR */
+ { 201, -5 }, /* (96) selcollist ::= sclp scanpt nm DOT STAR */
+ { 212, -2 }, /* (97) as ::= AS nm */
+ { 212, 0 }, /* (98) as ::= */
+ { 202, 0 }, /* (99) from ::= */
+ { 202, -2 }, /* (100) from ::= FROM seltablist */
+ { 214, -2 }, /* (101) stl_prefix ::= seltablist joinop */
+ { 214, 0 }, /* (102) stl_prefix ::= */
+ { 213, -7 }, /* (103) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */
+ { 213, -9 }, /* (104) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */
+ { 213, -7 }, /* (105) seltablist ::= stl_prefix LP select RP as on_opt using_opt */
+ { 213, -7 }, /* (106) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */
+ { 160, 0 }, /* (107) dbnm ::= */
+ { 160, -2 }, /* (108) dbnm ::= DOT nm */
+ { 195, -1 }, /* (109) fullname ::= nm */
+ { 195, -3 }, /* (110) fullname ::= nm DOT nm */
+ { 219, -1 }, /* (111) xfullname ::= nm */
+ { 219, -3 }, /* (112) xfullname ::= nm DOT nm */
+ { 219, -5 }, /* (113) xfullname ::= nm DOT nm AS nm */
+ { 219, -3 }, /* (114) xfullname ::= nm AS nm */
+ { 215, -1 }, /* (115) joinop ::= COMMA|JOIN */
+ { 215, -2 }, /* (116) joinop ::= JOIN_KW JOIN */
+ { 215, -3 }, /* (117) joinop ::= JOIN_KW nm JOIN */
+ { 215, -4 }, /* (118) joinop ::= JOIN_KW nm nm JOIN */
+ { 217, -2 }, /* (119) on_opt ::= ON expr */
+ { 217, 0 }, /* (120) on_opt ::= */
+ { 216, 0 }, /* (121) indexed_opt ::= */
+ { 216, -3 }, /* (122) indexed_opt ::= INDEXED BY nm */
+ { 216, -2 }, /* (123) indexed_opt ::= NOT INDEXED */
+ { 218, -4 }, /* (124) using_opt ::= USING LP idlist RP */
+ { 218, 0 }, /* (125) using_opt ::= */
+ { 206, 0 }, /* (126) orderby_opt ::= */
+ { 206, -3 }, /* (127) orderby_opt ::= ORDER BY sortlist */
+ { 188, -4 }, /* (128) sortlist ::= sortlist COMMA expr sortorder */
+ { 188, -2 }, /* (129) sortlist ::= expr sortorder */
+ { 177, -1 }, /* (130) sortorder ::= ASC */
+ { 177, -1 }, /* (131) sortorder ::= DESC */
+ { 177, 0 }, /* (132) sortorder ::= */
+ { 204, 0 }, /* (133) groupby_opt ::= */
+ { 204, -3 }, /* (134) groupby_opt ::= GROUP BY nexprlist */
+ { 205, 0 }, /* (135) having_opt ::= */
+ { 205, -2 }, /* (136) having_opt ::= HAVING expr */
+ { 207, 0 }, /* (137) limit_opt ::= */
+ { 207, -2 }, /* (138) limit_opt ::= LIMIT expr */
+ { 207, -4 }, /* (139) limit_opt ::= LIMIT expr OFFSET expr */
+ { 207, -4 }, /* (140) limit_opt ::= LIMIT expr COMMA expr */
+ { 150, -6 }, /* (141) cmd ::= with DELETE FROM xfullname indexed_opt where_opt */
+ { 203, 0 }, /* (142) where_opt ::= */
+ { 203, -2 }, /* (143) where_opt ::= WHERE expr */
+ { 150, -8 }, /* (144) cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist where_opt */
+ { 222, -5 }, /* (145) setlist ::= setlist COMMA nm EQ expr */
+ { 222, -7 }, /* (146) setlist ::= setlist COMMA LP idlist RP EQ expr */
+ { 222, -3 }, /* (147) setlist ::= nm EQ expr */
+ { 222, -5 }, /* (148) setlist ::= LP idlist RP EQ expr */
+ { 150, -7 }, /* (149) cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */
+ { 150, -7 }, /* (150) cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */
+ { 225, 0 }, /* (151) upsert ::= */
+ { 225, -11 }, /* (152) upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */
+ { 225, -8 }, /* (153) upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */
+ { 225, -4 }, /* (154) upsert ::= ON CONFLICT DO NOTHING */
+ { 223, -2 }, /* (155) insert_cmd ::= INSERT orconf */
+ { 223, -1 }, /* (156) insert_cmd ::= REPLACE */
+ { 224, 0 }, /* (157) idlist_opt ::= */
+ { 224, -3 }, /* (158) idlist_opt ::= LP idlist RP */
+ { 220, -3 }, /* (159) idlist ::= idlist COMMA nm */
+ { 220, -1 }, /* (160) idlist ::= nm */
+ { 175, -3 }, /* (161) expr ::= LP expr RP */
+ { 175, -1 }, /* (162) expr ::= ID|INDEXED */
+ { 175, -1 }, /* (163) expr ::= JOIN_KW */
+ { 175, -3 }, /* (164) expr ::= nm DOT nm */
+ { 175, -5 }, /* (165) expr ::= nm DOT nm DOT nm */
+ { 174, -1 }, /* (166) term ::= NULL|FLOAT|BLOB */
+ { 174, -1 }, /* (167) term ::= STRING */
+ { 174, -1 }, /* (168) term ::= INTEGER */
+ { 175, -1 }, /* (169) expr ::= VARIABLE */
+ { 175, -3 }, /* (170) expr ::= expr COLLATE ID|STRING */
+ { 175, -6 }, /* (171) expr ::= CAST LP expr AS typetoken RP */
+ { 175, -5 }, /* (172) expr ::= ID|INDEXED LP distinct exprlist RP */
+ { 175, -4 }, /* (173) expr ::= ID|INDEXED LP STAR RP */
+ { 174, -1 }, /* (174) term ::= CTIME_KW */
+ { 175, -5 }, /* (175) expr ::= LP nexprlist COMMA expr RP */
+ { 175, -3 }, /* (176) expr ::= expr AND expr */
+ { 175, -3 }, /* (177) expr ::= expr OR expr */
+ { 175, -3 }, /* (178) expr ::= expr LT|GT|GE|LE expr */
+ { 175, -3 }, /* (179) expr ::= expr EQ|NE expr */
+ { 175, -3 }, /* (180) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */
+ { 175, -3 }, /* (181) expr ::= expr PLUS|MINUS expr */
+ { 175, -3 }, /* (182) expr ::= expr STAR|SLASH|REM expr */
+ { 175, -3 }, /* (183) expr ::= expr CONCAT expr */
+ { 226, -2 }, /* (184) likeop ::= NOT LIKE_KW|MATCH */
+ { 175, -3 }, /* (185) expr ::= expr likeop expr */
+ { 175, -5 }, /* (186) expr ::= expr likeop expr ESCAPE expr */
+ { 175, -2 }, /* (187) expr ::= expr ISNULL|NOTNULL */
+ { 175, -3 }, /* (188) expr ::= expr NOT NULL */
+ { 175, -3 }, /* (189) expr ::= expr IS expr */
+ { 175, -4 }, /* (190) expr ::= expr IS NOT expr */
+ { 175, -2 }, /* (191) expr ::= NOT expr */
+ { 175, -2 }, /* (192) expr ::= BITNOT expr */
+ { 175, -2 }, /* (193) expr ::= MINUS expr */
+ { 175, -2 }, /* (194) expr ::= PLUS expr */
+ { 227, -1 }, /* (195) between_op ::= BETWEEN */
+ { 227, -2 }, /* (196) between_op ::= NOT BETWEEN */
+ { 175, -5 }, /* (197) expr ::= expr between_op expr AND expr */
+ { 228, -1 }, /* (198) in_op ::= IN */
+ { 228, -2 }, /* (199) in_op ::= NOT IN */
+ { 175, -5 }, /* (200) expr ::= expr in_op LP exprlist RP */
+ { 175, -3 }, /* (201) expr ::= LP select RP */
+ { 175, -5 }, /* (202) expr ::= expr in_op LP select RP */
+ { 175, -5 }, /* (203) expr ::= expr in_op nm dbnm paren_exprlist */
+ { 175, -4 }, /* (204) expr ::= EXISTS LP select RP */
+ { 175, -5 }, /* (205) expr ::= CASE case_operand case_exprlist case_else END */
+ { 231, -5 }, /* (206) case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ { 231, -4 }, /* (207) case_exprlist ::= WHEN expr THEN expr */
+ { 232, -2 }, /* (208) case_else ::= ELSE expr */
+ { 232, 0 }, /* (209) case_else ::= */
+ { 230, -1 }, /* (210) case_operand ::= expr */
+ { 230, 0 }, /* (211) case_operand ::= */
+ { 210, 0 }, /* (212) exprlist ::= */
+ { 209, -3 }, /* (213) nexprlist ::= nexprlist COMMA expr */
+ { 209, -1 }, /* (214) nexprlist ::= expr */
+ { 229, 0 }, /* (215) paren_exprlist ::= */
+ { 229, -3 }, /* (216) paren_exprlist ::= LP exprlist RP */
+ { 150, -12 }, /* (217) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ { 233, -1 }, /* (218) uniqueflag ::= UNIQUE */
+ { 233, 0 }, /* (219) uniqueflag ::= */
+ { 179, 0 }, /* (220) eidlist_opt ::= */
+ { 179, -3 }, /* (221) eidlist_opt ::= LP eidlist RP */
+ { 189, -5 }, /* (222) eidlist ::= eidlist COMMA nm collate sortorder */
+ { 189, -3 }, /* (223) eidlist ::= nm collate sortorder */
+ { 234, 0 }, /* (224) collate ::= */
+ { 234, -2 }, /* (225) collate ::= COLLATE ID|STRING */
+ { 150, -4 }, /* (226) cmd ::= DROP INDEX ifexists fullname */
+ { 150, -1 }, /* (227) cmd ::= VACUUM */
+ { 150, -2 }, /* (228) cmd ::= VACUUM nm */
+ { 150, -3 }, /* (229) cmd ::= PRAGMA nm dbnm */
+ { 150, -5 }, /* (230) cmd ::= PRAGMA nm dbnm EQ nmnum */
+ { 150, -6 }, /* (231) cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ { 150, -5 }, /* (232) cmd ::= PRAGMA nm dbnm EQ minus_num */
+ { 150, -6 }, /* (233) cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ { 170, -2 }, /* (234) plus_num ::= PLUS INTEGER|FLOAT */
+ { 171, -2 }, /* (235) minus_num ::= MINUS INTEGER|FLOAT */
+ { 150, -5 }, /* (236) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ { 236, -11 }, /* (237) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ { 238, -1 }, /* (238) trigger_time ::= BEFORE|AFTER */
+ { 238, -2 }, /* (239) trigger_time ::= INSTEAD OF */
+ { 238, 0 }, /* (240) trigger_time ::= */
+ { 239, -1 }, /* (241) trigger_event ::= DELETE|INSERT */
+ { 239, -1 }, /* (242) trigger_event ::= UPDATE */
+ { 239, -3 }, /* (243) trigger_event ::= UPDATE OF idlist */
+ { 241, 0 }, /* (244) when_clause ::= */
+ { 241, -2 }, /* (245) when_clause ::= WHEN expr */
+ { 237, -3 }, /* (246) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ { 237, -2 }, /* (247) trigger_cmd_list ::= trigger_cmd SEMI */
+ { 243, -3 }, /* (248) trnm ::= nm DOT nm */
+ { 244, -3 }, /* (249) tridxby ::= INDEXED BY nm */
+ { 244, -2 }, /* (250) tridxby ::= NOT INDEXED */
+ { 242, -8 }, /* (251) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt scanpt */
+ { 242, -8 }, /* (252) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ { 242, -6 }, /* (253) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+ { 242, -3 }, /* (254) trigger_cmd ::= scanpt select scanpt */
+ { 175, -4 }, /* (255) expr ::= RAISE LP IGNORE RP */
+ { 175, -6 }, /* (256) expr ::= RAISE LP raisetype COMMA nm RP */
+ { 193, -1 }, /* (257) raisetype ::= ROLLBACK */
+ { 193, -1 }, /* (258) raisetype ::= ABORT */
+ { 193, -1 }, /* (259) raisetype ::= FAIL */
+ { 150, -4 }, /* (260) cmd ::= DROP TRIGGER ifexists fullname */
+ { 150, -6 }, /* (261) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ { 150, -3 }, /* (262) cmd ::= DETACH database_kw_opt expr */
+ { 246, 0 }, /* (263) key_opt ::= */
+ { 246, -2 }, /* (264) key_opt ::= KEY expr */
+ { 150, -1 }, /* (265) cmd ::= REINDEX */
+ { 150, -3 }, /* (266) cmd ::= REINDEX nm dbnm */
+ { 150, -1 }, /* (267) cmd ::= ANALYZE */
+ { 150, -3 }, /* (268) cmd ::= ANALYZE nm dbnm */
+ { 150, -6 }, /* (269) cmd ::= ALTER TABLE fullname RENAME TO nm */
+ { 150, -7 }, /* (270) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ { 247, -1 }, /* (271) add_column_fullname ::= fullname */
+ { 150, -1 }, /* (272) cmd ::= create_vtab */
+ { 150, -4 }, /* (273) cmd ::= create_vtab LP vtabarglist RP */
+ { 249, -8 }, /* (274) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ { 251, 0 }, /* (275) vtabarg ::= */
+ { 252, -1 }, /* (276) vtabargtoken ::= ANY */
+ { 252, -3 }, /* (277) vtabargtoken ::= lp anylist RP */
+ { 253, -1 }, /* (278) lp ::= LP */
+ { 221, -2 }, /* (279) with ::= WITH wqlist */
+ { 221, -3 }, /* (280) with ::= WITH RECURSIVE wqlist */
+ { 198, -6 }, /* (281) wqlist ::= nm eidlist_opt AS LP select RP */
+ { 198, -8 }, /* (282) wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */
+ { 145, -1 }, /* (283) input ::= cmdlist */
+ { 146, -2 }, /* (284) cmdlist ::= cmdlist ecmd */
+ { 146, -1 }, /* (285) cmdlist ::= ecmd */
+ { 147, -1 }, /* (286) ecmd ::= SEMI */
+ { 147, -2 }, /* (287) ecmd ::= cmdx SEMI */
+ { 147, -2 }, /* (288) ecmd ::= explain cmdx */
+ { 152, 0 }, /* (289) trans_opt ::= */
+ { 152, -1 }, /* (290) trans_opt ::= TRANSACTION */
+ { 152, -2 }, /* (291) trans_opt ::= TRANSACTION nm */
+ { 154, -1 }, /* (292) savepoint_opt ::= SAVEPOINT */
+ { 154, 0 }, /* (293) savepoint_opt ::= */
+ { 150, -2 }, /* (294) cmd ::= create_table create_table_args */
+ { 161, -4 }, /* (295) columnlist ::= columnlist COMMA columnname carglist */
+ { 161, -2 }, /* (296) columnlist ::= columnname carglist */
+ { 153, -1 }, /* (297) nm ::= ID|INDEXED */
+ { 153, -1 }, /* (298) nm ::= STRING */
+ { 153, -1 }, /* (299) nm ::= JOIN_KW */
+ { 167, -1 }, /* (300) typetoken ::= typename */
+ { 168, -1 }, /* (301) typename ::= ID|STRING */
+ { 169, -1 }, /* (302) signed ::= plus_num */
+ { 169, -1 }, /* (303) signed ::= minus_num */
+ { 166, -2 }, /* (304) carglist ::= carglist ccons */
+ { 166, 0 }, /* (305) carglist ::= */
+ { 173, -2 }, /* (306) ccons ::= NULL onconf */
+ { 162, -2 }, /* (307) conslist_opt ::= COMMA conslist */
+ { 185, -3 }, /* (308) conslist ::= conslist tconscomma tcons */
+ { 185, -1 }, /* (309) conslist ::= tcons */
+ { 186, 0 }, /* (310) tconscomma ::= */
+ { 190, -1 }, /* (311) defer_subclause_opt ::= defer_subclause */
+ { 192, -1 }, /* (312) resolvetype ::= raisetype */
+ { 196, -1 }, /* (313) selectnowith ::= oneselect */
+ { 197, -1 }, /* (314) oneselect ::= values */
+ { 211, -2 }, /* (315) sclp ::= selcollist COMMA */
+ { 212, -1 }, /* (316) as ::= ID|STRING */
+ { 175, -1 }, /* (317) expr ::= term */
+ { 226, -1 }, /* (318) likeop ::= LIKE_KW|MATCH */
+ { 210, -1 }, /* (319) exprlist ::= nexprlist */
+ { 235, -1 }, /* (320) nmnum ::= plus_num */
+ { 235, -1 }, /* (321) nmnum ::= nm */
+ { 235, -1 }, /* (322) nmnum ::= ON */
+ { 235, -1 }, /* (323) nmnum ::= DELETE */
+ { 235, -1 }, /* (324) nmnum ::= DEFAULT */
+ { 170, -1 }, /* (325) plus_num ::= INTEGER|FLOAT */
+ { 240, 0 }, /* (326) foreach_clause ::= */
+ { 240, -3 }, /* (327) foreach_clause ::= FOR EACH ROW */
+ { 243, -1 }, /* (328) trnm ::= nm */
+ { 244, 0 }, /* (329) tridxby ::= */
+ { 245, -1 }, /* (330) database_kw_opt ::= DATABASE */
+ { 245, 0 }, /* (331) database_kw_opt ::= */
+ { 248, 0 }, /* (332) kwcolumn_opt ::= */
+ { 248, -1 }, /* (333) kwcolumn_opt ::= COLUMNKW */
+ { 250, -1 }, /* (334) vtabarglist ::= vtabarg */
+ { 250, -3 }, /* (335) vtabarglist ::= vtabarglist COMMA vtabarg */
+ { 251, -2 }, /* (336) vtabarg ::= vtabarg vtabargtoken */
+ { 254, 0 }, /* (337) anylist ::= */
+ { 254, -4 }, /* (338) anylist ::= anylist LP anylist RP */
+ { 254, -2 }, /* (339) anylist ::= anylist ANY */
+ { 221, 0 }, /* (340) with ::= */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -148578,7 +143863,7 @@ static YYACTIONTYPE yy_reduce(
sqlite3ParserCTX_PDECL /* %extra_context */
){
int yygoto; /* The next state */
- YYACTIONTYPE yyact; /* The next action */
+ int yyact; /* The next action */
yyStackEntry *yymsp; /* The top of the parser's stack */
int yysize; /* Amount to pop the stack */
sqlite3ParserARG_FETCH
@@ -148652,15 +143937,15 @@ static YYACTIONTYPE yy_reduce(
{ sqlite3FinishCoding(pParse); }
break;
case 3: /* cmd ::= BEGIN transtype trans_opt */
-{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy70);}
+{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy502);}
break;
case 4: /* transtype ::= */
-{yymsp[1].minor.yy70 = TK_DEFERRED;}
+{yymsp[1].minor.yy502 = TK_DEFERRED;}
break;
case 5: /* transtype ::= DEFERRED */
case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6);
case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7);
-{yymsp[0].minor.yy70 = yymsp[0].major; /*A-overwrites-X*/}
+{yymsp[0].minor.yy502 = yymsp[0].major; /*A-overwrites-X*/}
break;
case 8: /* cmd ::= COMMIT|END trans_opt */
case 9: /* cmd ::= ROLLBACK trans_opt */ yytestcase(yyruleno==9);
@@ -148683,7 +143968,7 @@ static YYACTIONTYPE yy_reduce(
break;
case 13: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */
{
- sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy70,0,0,yymsp[-2].minor.yy70);
+ sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy502,0,0,yymsp[-2].minor.yy502);
}
break;
case 14: /* createkw ::= CREATE */
@@ -148696,34 +143981,34 @@ static YYACTIONTYPE yy_reduce(
case 57: /* init_deferred_pred_opt ::= */ yytestcase(yyruleno==57);
case 67: /* defer_subclause_opt ::= */ yytestcase(yyruleno==67);
case 76: /* ifexists ::= */ yytestcase(yyruleno==76);
- case 93: /* distinct ::= */ yytestcase(yyruleno==93);
- case 226: /* collate ::= */ yytestcase(yyruleno==226);
-{yymsp[1].minor.yy70 = 0;}
+ case 92: /* distinct ::= */ yytestcase(yyruleno==92);
+ case 224: /* collate ::= */ yytestcase(yyruleno==224);
+{yymsp[1].minor.yy502 = 0;}
break;
case 16: /* ifnotexists ::= IF NOT EXISTS */
-{yymsp[-2].minor.yy70 = 1;}
+{yymsp[-2].minor.yy502 = 1;}
break;
case 17: /* temp ::= TEMP */
case 43: /* autoinc ::= AUTOINCR */ yytestcase(yyruleno==43);
-{yymsp[0].minor.yy70 = 1;}
+{yymsp[0].minor.yy502 = 1;}
break;
case 19: /* create_table_args ::= LP columnlist conslist_opt RP table_options */
{
- sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy70,0);
+ sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy502,0);
}
break;
case 20: /* create_table_args ::= AS select */
{
- sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy489);
- sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy489);
+ sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy399);
+ sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy399);
}
break;
case 22: /* table_options ::= WITHOUT nm */
{
if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){
- yymsp[-1].minor.yy70 = TF_WithoutRowid | TF_NoVisibleRowid;
+ yymsp[-1].minor.yy502 = TF_WithoutRowid | TF_NoVisibleRowid;
}else{
- yymsp[-1].minor.yy70 = 0;
+ yymsp[-1].minor.yy502 = 0;
sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z);
}
}
@@ -148733,7 +144018,7 @@ static YYACTIONTYPE yy_reduce(
break;
case 24: /* typetoken ::= */
case 60: /* conslist_opt ::= */ yytestcase(yyruleno==60);
- case 99: /* as ::= */ yytestcase(yyruleno==99);
+ case 98: /* as ::= */ yytestcase(yyruleno==98);
{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = 0;}
break;
case 25: /* typetoken ::= typename LP signed RP */
@@ -148752,7 +144037,7 @@ static YYACTIONTYPE yy_reduce(
case 28: /* scanpt ::= */
{
assert( yyLookahead!=YYNOCODE );
- yymsp[1].minor.yy392 = yyLookaheadToken.z;
+ yymsp[1].minor.yy36 = yyLookaheadToken.z;
}
break;
case 29: /* ccons ::= CONSTRAINT nm */
@@ -148760,18 +144045,18 @@ static YYACTIONTYPE yy_reduce(
{pParse->constraintName = yymsp[0].minor.yy0;}
break;
case 30: /* ccons ::= DEFAULT scanpt term scanpt */
-{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy18,yymsp[-2].minor.yy392,yymsp[0].minor.yy392);}
+{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy182,yymsp[-2].minor.yy36,yymsp[0].minor.yy36);}
break;
case 31: /* ccons ::= DEFAULT LP expr RP */
-{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy18,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);}
+{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy182,yymsp[-2].minor.yy0.z+1,yymsp[0].minor.yy0.z);}
break;
case 32: /* ccons ::= DEFAULT PLUS term scanpt */
-{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy18,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy392);}
+{sqlite3AddDefaultValue(pParse,yymsp[-1].minor.yy182,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy36);}
break;
case 33: /* ccons ::= DEFAULT MINUS term scanpt */
{
- Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[-1].minor.yy18, 0);
- sqlite3AddDefaultValue(pParse,p,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy392);
+ Expr *p = sqlite3PExpr(pParse, TK_UMINUS, yymsp[-1].minor.yy182, 0);
+ sqlite3AddDefaultValue(pParse,p,yymsp[-2].minor.yy0.z,yymsp[0].minor.yy36);
}
break;
case 34: /* ccons ::= DEFAULT scanpt ID|INDEXED */
@@ -148781,174 +144066,174 @@ static YYACTIONTYPE yy_reduce(
sqlite3ExprIdToTrueFalse(p);
testcase( p->op==TK_TRUEFALSE && sqlite3ExprTruthValue(p) );
}
- sqlite3AddDefaultValue(pParse,p,yymsp[0].minor.yy0.z,yymsp[0].minor.yy0.z+yymsp[0].minor.yy0.n);
+ sqlite3AddDefaultValue(pParse,p,yymsp[0].minor.yy0.z,yymsp[0].minor.yy0.z+yymsp[0].minor.yy0.n);
}
break;
case 35: /* ccons ::= NOT NULL onconf */
-{sqlite3AddNotNull(pParse, yymsp[0].minor.yy70);}
+{sqlite3AddNotNull(pParse, yymsp[0].minor.yy502);}
break;
case 36: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */
-{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy70,yymsp[0].minor.yy70,yymsp[-2].minor.yy70);}
+{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy502,yymsp[0].minor.yy502,yymsp[-2].minor.yy502);}
break;
case 37: /* ccons ::= UNIQUE onconf */
-{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy70,0,0,0,0,
+{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy502,0,0,0,0,
SQLITE_IDXTYPE_UNIQUE);}
break;
case 38: /* ccons ::= CHECK LP expr RP */
-{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy18);}
+{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy182);}
break;
case 39: /* ccons ::= REFERENCES nm eidlist_opt refargs */
-{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy420,yymsp[0].minor.yy70);}
+{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy232,yymsp[0].minor.yy502);}
break;
case 40: /* ccons ::= defer_subclause */
-{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy70);}
+{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy502);}
break;
case 41: /* ccons ::= COLLATE ID|STRING */
{sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);}
break;
case 44: /* refargs ::= */
-{ yymsp[1].minor.yy70 = OE_None*0x0101; /* EV: R-19803-45884 */}
+{ yymsp[1].minor.yy502 = OE_None*0x0101; /* EV: R-19803-45884 */}
break;
case 45: /* refargs ::= refargs refarg */
-{ yymsp[-1].minor.yy70 = (yymsp[-1].minor.yy70 & ~yymsp[0].minor.yy111.mask) | yymsp[0].minor.yy111.value; }
+{ yymsp[-1].minor.yy502 = (yymsp[-1].minor.yy502 & ~yymsp[0].minor.yy107.mask) | yymsp[0].minor.yy107.value; }
break;
case 46: /* refarg ::= MATCH nm */
-{ yymsp[-1].minor.yy111.value = 0; yymsp[-1].minor.yy111.mask = 0x000000; }
+{ yymsp[-1].minor.yy107.value = 0; yymsp[-1].minor.yy107.mask = 0x000000; }
break;
case 47: /* refarg ::= ON INSERT refact */
-{ yymsp[-2].minor.yy111.value = 0; yymsp[-2].minor.yy111.mask = 0x000000; }
+{ yymsp[-2].minor.yy107.value = 0; yymsp[-2].minor.yy107.mask = 0x000000; }
break;
case 48: /* refarg ::= ON DELETE refact */
-{ yymsp[-2].minor.yy111.value = yymsp[0].minor.yy70; yymsp[-2].minor.yy111.mask = 0x0000ff; }
+{ yymsp[-2].minor.yy107.value = yymsp[0].minor.yy502; yymsp[-2].minor.yy107.mask = 0x0000ff; }
break;
case 49: /* refarg ::= ON UPDATE refact */
-{ yymsp[-2].minor.yy111.value = yymsp[0].minor.yy70<<8; yymsp[-2].minor.yy111.mask = 0x00ff00; }
+{ yymsp[-2].minor.yy107.value = yymsp[0].minor.yy502<<8; yymsp[-2].minor.yy107.mask = 0x00ff00; }
break;
case 50: /* refact ::= SET NULL */
-{ yymsp[-1].minor.yy70 = OE_SetNull; /* EV: R-33326-45252 */}
+{ yymsp[-1].minor.yy502 = OE_SetNull; /* EV: R-33326-45252 */}
break;
case 51: /* refact ::= SET DEFAULT */
-{ yymsp[-1].minor.yy70 = OE_SetDflt; /* EV: R-33326-45252 */}
+{ yymsp[-1].minor.yy502 = OE_SetDflt; /* EV: R-33326-45252 */}
break;
case 52: /* refact ::= CASCADE */
-{ yymsp[0].minor.yy70 = OE_Cascade; /* EV: R-33326-45252 */}
+{ yymsp[0].minor.yy502 = OE_Cascade; /* EV: R-33326-45252 */}
break;
case 53: /* refact ::= RESTRICT */
-{ yymsp[0].minor.yy70 = OE_Restrict; /* EV: R-33326-45252 */}
+{ yymsp[0].minor.yy502 = OE_Restrict; /* EV: R-33326-45252 */}
break;
case 54: /* refact ::= NO ACTION */
-{ yymsp[-1].minor.yy70 = OE_None; /* EV: R-33326-45252 */}
+{ yymsp[-1].minor.yy502 = OE_None; /* EV: R-33326-45252 */}
break;
case 55: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */
-{yymsp[-2].minor.yy70 = 0;}
+{yymsp[-2].minor.yy502 = 0;}
break;
case 56: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */
case 71: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==71);
- case 156: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==156);
-{yymsp[-1].minor.yy70 = yymsp[0].minor.yy70;}
+ case 155: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==155);
+{yymsp[-1].minor.yy502 = yymsp[0].minor.yy502;}
break;
case 58: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */
case 75: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==75);
- case 198: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==198);
- case 201: /* in_op ::= NOT IN */ yytestcase(yyruleno==201);
- case 227: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==227);
-{yymsp[-1].minor.yy70 = 1;}
+ case 196: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==196);
+ case 199: /* in_op ::= NOT IN */ yytestcase(yyruleno==199);
+ case 225: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==225);
+{yymsp[-1].minor.yy502 = 1;}
break;
case 59: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */
-{yymsp[-1].minor.yy70 = 0;}
+{yymsp[-1].minor.yy502 = 0;}
break;
case 61: /* tconscomma ::= COMMA */
{pParse->constraintName.n = 0;}
break;
case 63: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */
-{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy420,yymsp[0].minor.yy70,yymsp[-2].minor.yy70,0);}
+{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy232,yymsp[0].minor.yy502,yymsp[-2].minor.yy502,0);}
break;
case 64: /* tcons ::= UNIQUE LP sortlist RP onconf */
-{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy420,yymsp[0].minor.yy70,0,0,0,0,
+{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy232,yymsp[0].minor.yy502,0,0,0,0,
SQLITE_IDXTYPE_UNIQUE);}
break;
case 65: /* tcons ::= CHECK LP expr RP onconf */
-{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy18);}
+{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy182);}
break;
case 66: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */
{
- sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy420, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy420, yymsp[-1].minor.yy70);
- sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy70);
+ sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy232, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy232, yymsp[-1].minor.yy502);
+ sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy502);
}
break;
case 68: /* onconf ::= */
case 70: /* orconf ::= */ yytestcase(yyruleno==70);
-{yymsp[1].minor.yy70 = OE_Default;}
+{yymsp[1].minor.yy502 = OE_Default;}
break;
case 69: /* onconf ::= ON CONFLICT resolvetype */
-{yymsp[-2].minor.yy70 = yymsp[0].minor.yy70;}
+{yymsp[-2].minor.yy502 = yymsp[0].minor.yy502;}
break;
case 72: /* resolvetype ::= IGNORE */
-{yymsp[0].minor.yy70 = OE_Ignore;}
+{yymsp[0].minor.yy502 = OE_Ignore;}
break;
case 73: /* resolvetype ::= REPLACE */
- case 157: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==157);
-{yymsp[0].minor.yy70 = OE_Replace;}
+ case 156: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==156);
+{yymsp[0].minor.yy502 = OE_Replace;}
break;
case 74: /* cmd ::= DROP TABLE ifexists fullname */
{
- sqlite3DropTable(pParse, yymsp[0].minor.yy135, 0, yymsp[-1].minor.yy70);
+ sqlite3DropTable(pParse, yymsp[0].minor.yy427, 0, yymsp[-1].minor.yy502);
}
break;
case 77: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */
{
- sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy420, yymsp[0].minor.yy489, yymsp[-7].minor.yy70, yymsp[-5].minor.yy70);
+ sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy232, yymsp[0].minor.yy399, yymsp[-7].minor.yy502, yymsp[-5].minor.yy502);
}
break;
case 78: /* cmd ::= DROP VIEW ifexists fullname */
{
- sqlite3DropTable(pParse, yymsp[0].minor.yy135, 1, yymsp[-1].minor.yy70);
+ sqlite3DropTable(pParse, yymsp[0].minor.yy427, 1, yymsp[-1].minor.yy502);
}
break;
case 79: /* cmd ::= select */
{
SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0};
- sqlite3Select(pParse, yymsp[0].minor.yy489, &dest);
- sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy489);
+ sqlite3Select(pParse, yymsp[0].minor.yy399, &dest);
+ sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy399);
}
break;
case 80: /* select ::= WITH wqlist selectnowith */
{
- Select *p = yymsp[0].minor.yy489;
+ Select *p = yymsp[0].minor.yy399;
if( p ){
- p->pWith = yymsp[-1].minor.yy449;
+ p->pWith = yymsp[-1].minor.yy91;
parserDoubleLinkSelect(pParse, p);
}else{
- sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy449);
+ sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy91);
}
- yymsp[-2].minor.yy489 = p;
+ yymsp[-2].minor.yy399 = p;
}
break;
case 81: /* select ::= WITH RECURSIVE wqlist selectnowith */
{
- Select *p = yymsp[0].minor.yy489;
+ Select *p = yymsp[0].minor.yy399;
if( p ){
- p->pWith = yymsp[-1].minor.yy449;
+ p->pWith = yymsp[-1].minor.yy91;
parserDoubleLinkSelect(pParse, p);
}else{
- sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy449);
+ sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy91);
}
- yymsp[-3].minor.yy489 = p;
+ yymsp[-3].minor.yy399 = p;
}
break;
case 82: /* select ::= selectnowith */
{
- Select *p = yymsp[0].minor.yy489;
+ Select *p = yymsp[0].minor.yy399;
if( p ){
parserDoubleLinkSelect(pParse, p);
}
- yymsp[0].minor.yy489 = p; /*A-overwrites-X*/
+ yymsp[0].minor.yy399 = p; /*A-overwrites-X*/
}
break;
case 83: /* selectnowith ::= selectnowith multiselect_op oneselect */
{
- Select *pRhs = yymsp[0].minor.yy489;
- Select *pLhs = yymsp[-2].minor.yy489;
+ Select *pRhs = yymsp[0].minor.yy399;
+ Select *pLhs = yymsp[-2].minor.yy399;
if( pRhs && pRhs->pPrior ){
SrcList *pFrom;
Token x;
@@ -148958,382 +144243,378 @@ static YYACTIONTYPE yy_reduce(
pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0);
}
if( pRhs ){
- pRhs->op = (u8)yymsp[-1].minor.yy70;
+ pRhs->op = (u8)yymsp[-1].minor.yy502;
pRhs->pPrior = pLhs;
if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue;
pRhs->selFlags &= ~SF_MultiValue;
- if( yymsp[-1].minor.yy70!=TK_ALL ) pParse->hasCompound = 1;
+ if( yymsp[-1].minor.yy502!=TK_ALL ) pParse->hasCompound = 1;
}else{
sqlite3SelectDelete(pParse->db, pLhs);
}
- yymsp[-2].minor.yy489 = pRhs;
+ yymsp[-2].minor.yy399 = pRhs;
}
break;
case 84: /* multiselect_op ::= UNION */
case 86: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==86);
-{yymsp[0].minor.yy70 = yymsp[0].major; /*A-overwrites-OP*/}
+{yymsp[0].minor.yy502 = yymsp[0].major; /*A-overwrites-OP*/}
break;
case 85: /* multiselect_op ::= UNION ALL */
-{yymsp[-1].minor.yy70 = TK_ALL;}
+{yymsp[-1].minor.yy502 = TK_ALL;}
break;
case 87: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */
{
- yymsp[-8].minor.yy489 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy420,yymsp[-5].minor.yy135,yymsp[-4].minor.yy18,yymsp[-3].minor.yy420,yymsp[-2].minor.yy18,yymsp[-1].minor.yy420,yymsp[-7].minor.yy70,yymsp[0].minor.yy18);
-}
- break;
- case 88: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt window_clause orderby_opt limit_opt */
-{
- yymsp[-9].minor.yy489 = sqlite3SelectNew(pParse,yymsp[-7].minor.yy420,yymsp[-6].minor.yy135,yymsp[-5].minor.yy18,yymsp[-4].minor.yy420,yymsp[-3].minor.yy18,yymsp[-1].minor.yy420,yymsp[-8].minor.yy70,yymsp[0].minor.yy18);
- if( yymsp[-9].minor.yy489 ){
- yymsp[-9].minor.yy489->pWinDefn = yymsp[-2].minor.yy327;
- }else{
- sqlite3WindowListDelete(pParse->db, yymsp[-2].minor.yy327);
+#if SELECTTRACE_ENABLED
+ Token s = yymsp[-8].minor.yy0; /*A-overwrites-S*/
+#endif
+ yymsp[-8].minor.yy399 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy232,yymsp[-5].minor.yy427,yymsp[-4].minor.yy182,yymsp[-3].minor.yy232,yymsp[-2].minor.yy182,yymsp[-1].minor.yy232,yymsp[-7].minor.yy502,yymsp[0].minor.yy182);
+#if SELECTTRACE_ENABLED
+ /* Populate the Select.zSelName[] string that is used to help with
+ ** query planner debugging, to differentiate between multiple Select
+ ** objects in a complex query.
+ **
+ ** If the SELECT keyword is immediately followed by a C-style comment
+ ** then extract the first few alphanumeric characters from within that
+ ** comment to be the zSelName value. Otherwise, the label is #N where
+ ** is an integer that is incremented with each SELECT statement seen.
+ */
+ if( yymsp[-8].minor.yy399!=0 ){
+ const char *z = s.z+6;
+ int i;
+ sqlite3_snprintf(sizeof(yymsp[-8].minor.yy399->zSelName), yymsp[-8].minor.yy399->zSelName,"#%d",++pParse->nSelect);
+ while( z[0]==' ' ) z++;
+ if( z[0]=='/' && z[1]=='*' ){
+ z += 2;
+ while( z[0]==' ' ) z++;
+ for(i=0; sqlite3Isalnum(z[i]); i++){}
+ sqlite3_snprintf(sizeof(yymsp[-8].minor.yy399->zSelName), yymsp[-8].minor.yy399->zSelName, "%.*s", i, z);
+ }
}
+#endif /* SELECTRACE_ENABLED */
}
break;
- case 89: /* values ::= VALUES LP nexprlist RP */
+ case 88: /* values ::= VALUES LP nexprlist RP */
{
- yymsp[-3].minor.yy489 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy420,0,0,0,0,0,SF_Values,0);
+ yymsp[-3].minor.yy399 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy232,0,0,0,0,0,SF_Values,0);
}
break;
- case 90: /* values ::= values COMMA LP nexprlist RP */
+ case 89: /* values ::= values COMMA LP exprlist RP */
{
- Select *pRight, *pLeft = yymsp[-4].minor.yy489;
- pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy420,0,0,0,0,0,SF_Values|SF_MultiValue,0);
+ Select *pRight, *pLeft = yymsp[-4].minor.yy399;
+ pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy232,0,0,0,0,0,SF_Values|SF_MultiValue,0);
if( ALWAYS(pLeft) ) pLeft->selFlags &= ~SF_MultiValue;
if( pRight ){
pRight->op = TK_ALL;
pRight->pPrior = pLeft;
- yymsp[-4].minor.yy489 = pRight;
+ yymsp[-4].minor.yy399 = pRight;
}else{
- yymsp[-4].minor.yy489 = pLeft;
+ yymsp[-4].minor.yy399 = pLeft;
}
}
break;
- case 91: /* distinct ::= DISTINCT */
-{yymsp[0].minor.yy70 = SF_Distinct;}
+ case 90: /* distinct ::= DISTINCT */
+{yymsp[0].minor.yy502 = SF_Distinct;}
break;
- case 92: /* distinct ::= ALL */
-{yymsp[0].minor.yy70 = SF_All;}
+ case 91: /* distinct ::= ALL */
+{yymsp[0].minor.yy502 = SF_All;}
break;
- case 94: /* sclp ::= */
- case 127: /* orderby_opt ::= */ yytestcase(yyruleno==127);
- case 134: /* groupby_opt ::= */ yytestcase(yyruleno==134);
- case 214: /* exprlist ::= */ yytestcase(yyruleno==214);
- case 217: /* paren_exprlist ::= */ yytestcase(yyruleno==217);
- case 222: /* eidlist_opt ::= */ yytestcase(yyruleno==222);
-{yymsp[1].minor.yy420 = 0;}
+ case 93: /* sclp ::= */
+ case 126: /* orderby_opt ::= */ yytestcase(yyruleno==126);
+ case 133: /* groupby_opt ::= */ yytestcase(yyruleno==133);
+ case 212: /* exprlist ::= */ yytestcase(yyruleno==212);
+ case 215: /* paren_exprlist ::= */ yytestcase(yyruleno==215);
+ case 220: /* eidlist_opt ::= */ yytestcase(yyruleno==220);
+{yymsp[1].minor.yy232 = 0;}
break;
- case 95: /* selcollist ::= sclp scanpt expr scanpt as */
+ case 94: /* selcollist ::= sclp scanpt expr scanpt as */
{
- yymsp[-4].minor.yy420 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy420, yymsp[-2].minor.yy18);
- if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy420, &yymsp[0].minor.yy0, 1);
- sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy420,yymsp[-3].minor.yy392,yymsp[-1].minor.yy392);
+ yymsp[-4].minor.yy232 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy232, yymsp[-2].minor.yy182);
+ if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy232, &yymsp[0].minor.yy0, 1);
+ sqlite3ExprListSetSpan(pParse,yymsp[-4].minor.yy232,yymsp[-3].minor.yy36,yymsp[-1].minor.yy36);
}
break;
- case 96: /* selcollist ::= sclp scanpt STAR */
+ case 95: /* selcollist ::= sclp scanpt STAR */
{
Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0);
- yymsp[-2].minor.yy420 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy420, p);
+ yymsp[-2].minor.yy232 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy232, p);
}
break;
- case 97: /* selcollist ::= sclp scanpt nm DOT STAR */
+ case 96: /* selcollist ::= sclp scanpt nm DOT STAR */
{
Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0);
Expr *pLeft = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1);
Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight);
- yymsp[-4].minor.yy420 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy420, pDot);
+ yymsp[-4].minor.yy232 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy232, pDot);
}
break;
- case 98: /* as ::= AS nm */
- case 109: /* dbnm ::= DOT nm */ yytestcase(yyruleno==109);
- case 236: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==236);
- case 237: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==237);
+ case 97: /* as ::= AS nm */
+ case 108: /* dbnm ::= DOT nm */ yytestcase(yyruleno==108);
+ case 234: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==234);
+ case 235: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==235);
{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;}
break;
- case 100: /* from ::= */
-{yymsp[1].minor.yy135 = sqlite3DbMallocZero(pParse->db, sizeof(*yymsp[1].minor.yy135));}
+ case 99: /* from ::= */
+{yymsp[1].minor.yy427 = sqlite3DbMallocZero(pParse->db, sizeof(*yymsp[1].minor.yy427));}
break;
- case 101: /* from ::= FROM seltablist */
+ case 100: /* from ::= FROM seltablist */
{
- yymsp[-1].minor.yy135 = yymsp[0].minor.yy135;
- sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy135);
+ yymsp[-1].minor.yy427 = yymsp[0].minor.yy427;
+ sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy427);
}
break;
- case 102: /* stl_prefix ::= seltablist joinop */
+ case 101: /* stl_prefix ::= seltablist joinop */
{
- if( ALWAYS(yymsp[-1].minor.yy135 && yymsp[-1].minor.yy135->nSrc>0) ) yymsp[-1].minor.yy135->a[yymsp[-1].minor.yy135->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy70;
+ if( ALWAYS(yymsp[-1].minor.yy427 && yymsp[-1].minor.yy427->nSrc>0) ) yymsp[-1].minor.yy427->a[yymsp[-1].minor.yy427->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy502;
}
break;
- case 103: /* stl_prefix ::= */
-{yymsp[1].minor.yy135 = 0;}
+ case 102: /* stl_prefix ::= */
+{yymsp[1].minor.yy427 = 0;}
break;
- case 104: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */
+ case 103: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */
{
- yymsp[-6].minor.yy135 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy135,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy18,yymsp[0].minor.yy48);
- sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy135, &yymsp[-2].minor.yy0);
+ yymsp[-6].minor.yy427 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy427,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy182,yymsp[0].minor.yy510);
+ sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy427, &yymsp[-2].minor.yy0);
}
break;
- case 105: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */
+ case 104: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */
{
- yymsp[-8].minor.yy135 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy135,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy18,yymsp[0].minor.yy48);
- sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy135, yymsp[-4].minor.yy420);
+ yymsp[-8].minor.yy427 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy427,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy182,yymsp[0].minor.yy510);
+ sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy427, yymsp[-4].minor.yy232);
}
break;
- case 106: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */
+ case 105: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */
{
- yymsp[-6].minor.yy135 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy135,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy489,yymsp[-1].minor.yy18,yymsp[0].minor.yy48);
+ yymsp[-6].minor.yy427 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy427,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy399,yymsp[-1].minor.yy182,yymsp[0].minor.yy510);
}
break;
- case 107: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */
+ case 106: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */
{
- if( yymsp[-6].minor.yy135==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy18==0 && yymsp[0].minor.yy48==0 ){
- yymsp[-6].minor.yy135 = yymsp[-4].minor.yy135;
- }else if( yymsp[-4].minor.yy135->nSrc==1 ){
- yymsp[-6].minor.yy135 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy135,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy18,yymsp[0].minor.yy48);
- if( yymsp[-6].minor.yy135 ){
- struct SrcList_item *pNew = &yymsp[-6].minor.yy135->a[yymsp[-6].minor.yy135->nSrc-1];
- struct SrcList_item *pOld = yymsp[-4].minor.yy135->a;
+ if( yymsp[-6].minor.yy427==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy182==0 && yymsp[0].minor.yy510==0 ){
+ yymsp[-6].minor.yy427 = yymsp[-4].minor.yy427;
+ }else if( yymsp[-4].minor.yy427->nSrc==1 ){
+ yymsp[-6].minor.yy427 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy427,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy182,yymsp[0].minor.yy510);
+ if( yymsp[-6].minor.yy427 ){
+ struct SrcList_item *pNew = &yymsp[-6].minor.yy427->a[yymsp[-6].minor.yy427->nSrc-1];
+ struct SrcList_item *pOld = yymsp[-4].minor.yy427->a;
pNew->zName = pOld->zName;
pNew->zDatabase = pOld->zDatabase;
pNew->pSelect = pOld->pSelect;
pOld->zName = pOld->zDatabase = 0;
pOld->pSelect = 0;
}
- sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy135);
+ sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy427);
}else{
Select *pSubquery;
- sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy135);
- pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy135,0,0,0,0,SF_NestedFrom,0);
- yymsp[-6].minor.yy135 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy135,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy18,yymsp[0].minor.yy48);
+ sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy427);
+ pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy427,0,0,0,0,SF_NestedFrom,0);
+ yymsp[-6].minor.yy427 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy427,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy182,yymsp[0].minor.yy510);
}
}
break;
- case 108: /* dbnm ::= */
- case 122: /* indexed_opt ::= */ yytestcase(yyruleno==122);
+ case 107: /* dbnm ::= */
+ case 121: /* indexed_opt ::= */ yytestcase(yyruleno==121);
{yymsp[1].minor.yy0.z=0; yymsp[1].minor.yy0.n=0;}
break;
- case 110: /* fullname ::= nm */
+ case 109: /* fullname ::= nm */
+ case 111: /* xfullname ::= nm */ yytestcase(yyruleno==111);
+{yymsp[0].minor.yy427 = sqlite3SrcListAppend(pParse->db,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/}
+ break;
+ case 110: /* fullname ::= nm DOT nm */
+ case 112: /* xfullname ::= nm DOT nm */ yytestcase(yyruleno==112);
+{yymsp[-2].minor.yy427 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/}
+ break;
+ case 113: /* xfullname ::= nm DOT nm AS nm */
{
- yylhsminor.yy135 = sqlite3SrcListAppend(pParse->db,0,&yymsp[0].minor.yy0,0);
- if( IN_RENAME_OBJECT && yylhsminor.yy135 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy135->a[0].zName, &yymsp[0].minor.yy0);
-}
- yymsp[0].minor.yy135 = yylhsminor.yy135;
- break;
- case 111: /* fullname ::= nm DOT nm */
-{
- yylhsminor.yy135 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0);
- if( IN_RENAME_OBJECT && yylhsminor.yy135 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy135->a[0].zName, &yymsp[0].minor.yy0);
-}
- yymsp[-2].minor.yy135 = yylhsminor.yy135;
- break;
- case 112: /* xfullname ::= nm */
-{yymsp[0].minor.yy135 = sqlite3SrcListAppend(pParse->db,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/}
- break;
- case 113: /* xfullname ::= nm DOT nm */
-{yymsp[-2].minor.yy135 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/}
- break;
- case 114: /* xfullname ::= nm DOT nm AS nm */
-{
- yymsp[-4].minor.yy135 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/
- if( yymsp[-4].minor.yy135 ) yymsp[-4].minor.yy135->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0);
+ yymsp[-4].minor.yy427 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/
+ if( yymsp[-4].minor.yy427 ) yymsp[-4].minor.yy427->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0);
}
break;
- case 115: /* xfullname ::= nm AS nm */
+ case 114: /* xfullname ::= nm AS nm */
{
- yymsp[-2].minor.yy135 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/
- if( yymsp[-2].minor.yy135 ) yymsp[-2].minor.yy135->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0);
+ yymsp[-2].minor.yy427 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/
+ if( yymsp[-2].minor.yy427 ) yymsp[-2].minor.yy427->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0);
}
break;
- case 116: /* joinop ::= COMMA|JOIN */
-{ yymsp[0].minor.yy70 = JT_INNER; }
+ case 115: /* joinop ::= COMMA|JOIN */
+{ yymsp[0].minor.yy502 = JT_INNER; }
break;
- case 117: /* joinop ::= JOIN_KW JOIN */
-{yymsp[-1].minor.yy70 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/}
+ case 116: /* joinop ::= JOIN_KW JOIN */
+{yymsp[-1].minor.yy502 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/}
break;
- case 118: /* joinop ::= JOIN_KW nm JOIN */
-{yymsp[-2].minor.yy70 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/}
+ case 117: /* joinop ::= JOIN_KW nm JOIN */
+{yymsp[-2].minor.yy502 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/}
break;
- case 119: /* joinop ::= JOIN_KW nm nm JOIN */
-{yymsp[-3].minor.yy70 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/}
+ case 118: /* joinop ::= JOIN_KW nm nm JOIN */
+{yymsp[-3].minor.yy502 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/}
break;
- case 120: /* on_opt ::= ON expr */
- case 137: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==137);
- case 144: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==144);
- case 210: /* case_else ::= ELSE expr */ yytestcase(yyruleno==210);
-{yymsp[-1].minor.yy18 = yymsp[0].minor.yy18;}
+ case 119: /* on_opt ::= ON expr */
+ case 136: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==136);
+ case 143: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==143);
+ case 208: /* case_else ::= ELSE expr */ yytestcase(yyruleno==208);
+{yymsp[-1].minor.yy182 = yymsp[0].minor.yy182;}
break;
- case 121: /* on_opt ::= */
- case 136: /* having_opt ::= */ yytestcase(yyruleno==136);
- case 138: /* limit_opt ::= */ yytestcase(yyruleno==138);
- case 143: /* where_opt ::= */ yytestcase(yyruleno==143);
- case 211: /* case_else ::= */ yytestcase(yyruleno==211);
- case 213: /* case_operand ::= */ yytestcase(yyruleno==213);
-{yymsp[1].minor.yy18 = 0;}
+ case 120: /* on_opt ::= */
+ case 135: /* having_opt ::= */ yytestcase(yyruleno==135);
+ case 137: /* limit_opt ::= */ yytestcase(yyruleno==137);
+ case 142: /* where_opt ::= */ yytestcase(yyruleno==142);
+ case 209: /* case_else ::= */ yytestcase(yyruleno==209);
+ case 211: /* case_operand ::= */ yytestcase(yyruleno==211);
+{yymsp[1].minor.yy182 = 0;}
break;
- case 123: /* indexed_opt ::= INDEXED BY nm */
+ case 122: /* indexed_opt ::= INDEXED BY nm */
{yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;}
break;
- case 124: /* indexed_opt ::= NOT INDEXED */
+ case 123: /* indexed_opt ::= NOT INDEXED */
{yymsp[-1].minor.yy0.z=0; yymsp[-1].minor.yy0.n=1;}
break;
- case 125: /* using_opt ::= USING LP idlist RP */
-{yymsp[-3].minor.yy48 = yymsp[-1].minor.yy48;}
+ case 124: /* using_opt ::= USING LP idlist RP */
+{yymsp[-3].minor.yy510 = yymsp[-1].minor.yy510;}
break;
- case 126: /* using_opt ::= */
- case 158: /* idlist_opt ::= */ yytestcase(yyruleno==158);
-{yymsp[1].minor.yy48 = 0;}
+ case 125: /* using_opt ::= */
+ case 157: /* idlist_opt ::= */ yytestcase(yyruleno==157);
+{yymsp[1].minor.yy510 = 0;}
break;
- case 128: /* orderby_opt ::= ORDER BY sortlist */
- case 135: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==135);
-{yymsp[-2].minor.yy420 = yymsp[0].minor.yy420;}
+ case 127: /* orderby_opt ::= ORDER BY sortlist */
+ case 134: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==134);
+{yymsp[-2].minor.yy232 = yymsp[0].minor.yy232;}
break;
- case 129: /* sortlist ::= sortlist COMMA expr sortorder */
+ case 128: /* sortlist ::= sortlist COMMA expr sortorder */
{
- yymsp[-3].minor.yy420 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy420,yymsp[-1].minor.yy18);
- sqlite3ExprListSetSortOrder(yymsp[-3].minor.yy420,yymsp[0].minor.yy70);
+ yymsp[-3].minor.yy232 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy232,yymsp[-1].minor.yy182);
+ sqlite3ExprListSetSortOrder(yymsp[-3].minor.yy232,yymsp[0].minor.yy502);
}
break;
- case 130: /* sortlist ::= expr sortorder */
+ case 129: /* sortlist ::= expr sortorder */
{
- yymsp[-1].minor.yy420 = sqlite3ExprListAppend(pParse,0,yymsp[-1].minor.yy18); /*A-overwrites-Y*/
- sqlite3ExprListSetSortOrder(yymsp[-1].minor.yy420,yymsp[0].minor.yy70);
+ yymsp[-1].minor.yy232 = sqlite3ExprListAppend(pParse,0,yymsp[-1].minor.yy182); /*A-overwrites-Y*/
+ sqlite3ExprListSetSortOrder(yymsp[-1].minor.yy232,yymsp[0].minor.yy502);
}
break;
- case 131: /* sortorder ::= ASC */
-{yymsp[0].minor.yy70 = SQLITE_SO_ASC;}
+ case 130: /* sortorder ::= ASC */
+{yymsp[0].minor.yy502 = SQLITE_SO_ASC;}
break;
- case 132: /* sortorder ::= DESC */
-{yymsp[0].minor.yy70 = SQLITE_SO_DESC;}
+ case 131: /* sortorder ::= DESC */
+{yymsp[0].minor.yy502 = SQLITE_SO_DESC;}
break;
- case 133: /* sortorder ::= */
-{yymsp[1].minor.yy70 = SQLITE_SO_UNDEFINED;}
+ case 132: /* sortorder ::= */
+{yymsp[1].minor.yy502 = SQLITE_SO_UNDEFINED;}
break;
- case 139: /* limit_opt ::= LIMIT expr */
-{yymsp[-1].minor.yy18 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy18,0);}
+ case 138: /* limit_opt ::= LIMIT expr */
+{yymsp[-1].minor.yy182 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy182,0);}
break;
- case 140: /* limit_opt ::= LIMIT expr OFFSET expr */
-{yymsp[-3].minor.yy18 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy18,yymsp[0].minor.yy18);}
+ case 139: /* limit_opt ::= LIMIT expr OFFSET expr */
+{yymsp[-3].minor.yy182 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[-2].minor.yy182,yymsp[0].minor.yy182);}
break;
- case 141: /* limit_opt ::= LIMIT expr COMMA expr */
-{yymsp[-3].minor.yy18 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy18,yymsp[-2].minor.yy18);}
+ case 140: /* limit_opt ::= LIMIT expr COMMA expr */
+{yymsp[-3].minor.yy182 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy182,yymsp[-2].minor.yy182);}
break;
- case 142: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt */
+ case 141: /* cmd ::= with DELETE FROM xfullname indexed_opt where_opt */
{
- sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy135, &yymsp[-1].minor.yy0);
- sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy135,yymsp[0].minor.yy18,0,0);
+ sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy427, &yymsp[-1].minor.yy0);
+ sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy427,yymsp[0].minor.yy182,0,0);
}
break;
- case 145: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist where_opt */
+ case 144: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist where_opt */
{
- sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy135, &yymsp[-3].minor.yy0);
- sqlite3ExprListCheckLength(pParse,yymsp[-1].minor.yy420,"set list");
- sqlite3Update(pParse,yymsp[-4].minor.yy135,yymsp[-1].minor.yy420,yymsp[0].minor.yy18,yymsp[-5].minor.yy70,0,0,0);
+ sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy427, &yymsp[-3].minor.yy0);
+ sqlite3ExprListCheckLength(pParse,yymsp[-1].minor.yy232,"set list");
+ sqlite3Update(pParse,yymsp[-4].minor.yy427,yymsp[-1].minor.yy232,yymsp[0].minor.yy182,yymsp[-5].minor.yy502,0,0,0);
}
break;
- case 146: /* setlist ::= setlist COMMA nm EQ expr */
+ case 145: /* setlist ::= setlist COMMA nm EQ expr */
{
- yymsp[-4].minor.yy420 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy420, yymsp[0].minor.yy18);
- sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy420, &yymsp[-2].minor.yy0, 1);
+ yymsp[-4].minor.yy232 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy232, yymsp[0].minor.yy182);
+ sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy232, &yymsp[-2].minor.yy0, 1);
}
break;
- case 147: /* setlist ::= setlist COMMA LP idlist RP EQ expr */
+ case 146: /* setlist ::= setlist COMMA LP idlist RP EQ expr */
{
- yymsp[-6].minor.yy420 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy420, yymsp[-3].minor.yy48, yymsp[0].minor.yy18);
+ yymsp[-6].minor.yy232 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy232, yymsp[-3].minor.yy510, yymsp[0].minor.yy182);
}
break;
- case 148: /* setlist ::= nm EQ expr */
+ case 147: /* setlist ::= nm EQ expr */
{
- yylhsminor.yy420 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy18);
- sqlite3ExprListSetName(pParse, yylhsminor.yy420, &yymsp[-2].minor.yy0, 1);
+ yylhsminor.yy232 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy182);
+ sqlite3ExprListSetName(pParse, yylhsminor.yy232, &yymsp[-2].minor.yy0, 1);
}
- yymsp[-2].minor.yy420 = yylhsminor.yy420;
+ yymsp[-2].minor.yy232 = yylhsminor.yy232;
break;
- case 149: /* setlist ::= LP idlist RP EQ expr */
+ case 148: /* setlist ::= LP idlist RP EQ expr */
{
- yymsp[-4].minor.yy420 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy48, yymsp[0].minor.yy18);
+ yymsp[-4].minor.yy232 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy510, yymsp[0].minor.yy182);
}
break;
- case 150: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */
+ case 149: /* cmd ::= with insert_cmd INTO xfullname idlist_opt select upsert */
{
- sqlite3Insert(pParse, yymsp[-3].minor.yy135, yymsp[-1].minor.yy489, yymsp[-2].minor.yy48, yymsp[-5].minor.yy70, yymsp[0].minor.yy340);
+ sqlite3Insert(pParse, yymsp[-3].minor.yy427, yymsp[-1].minor.yy399, yymsp[-2].minor.yy510, yymsp[-5].minor.yy502, yymsp[0].minor.yy198);
}
break;
- case 151: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */
+ case 150: /* cmd ::= with insert_cmd INTO xfullname idlist_opt DEFAULT VALUES */
{
- sqlite3Insert(pParse, yymsp[-3].minor.yy135, 0, yymsp[-2].minor.yy48, yymsp[-5].minor.yy70, 0);
+ sqlite3Insert(pParse, yymsp[-3].minor.yy427, 0, yymsp[-2].minor.yy510, yymsp[-5].minor.yy502, 0);
}
break;
- case 152: /* upsert ::= */
-{ yymsp[1].minor.yy340 = 0; }
+ case 151: /* upsert ::= */
+{ yymsp[1].minor.yy198 = 0; }
break;
- case 153: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */
-{ yymsp[-10].minor.yy340 = sqlite3UpsertNew(pParse->db,yymsp[-7].minor.yy420,yymsp[-5].minor.yy18,yymsp[-1].minor.yy420,yymsp[0].minor.yy18);}
+ case 152: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO UPDATE SET setlist where_opt */
+{ yymsp[-10].minor.yy198 = sqlite3UpsertNew(pParse->db,yymsp[-7].minor.yy232,yymsp[-5].minor.yy182,yymsp[-1].minor.yy232,yymsp[0].minor.yy182);}
break;
- case 154: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */
-{ yymsp[-7].minor.yy340 = sqlite3UpsertNew(pParse->db,yymsp[-4].minor.yy420,yymsp[-2].minor.yy18,0,0); }
+ case 153: /* upsert ::= ON CONFLICT LP sortlist RP where_opt DO NOTHING */
+{ yymsp[-7].minor.yy198 = sqlite3UpsertNew(pParse->db,yymsp[-4].minor.yy232,yymsp[-2].minor.yy182,0,0); }
break;
- case 155: /* upsert ::= ON CONFLICT DO NOTHING */
-{ yymsp[-3].minor.yy340 = sqlite3UpsertNew(pParse->db,0,0,0,0); }
+ case 154: /* upsert ::= ON CONFLICT DO NOTHING */
+{ yymsp[-3].minor.yy198 = sqlite3UpsertNew(pParse->db,0,0,0,0); }
break;
- case 159: /* idlist_opt ::= LP idlist RP */
-{yymsp[-2].minor.yy48 = yymsp[-1].minor.yy48;}
+ case 158: /* idlist_opt ::= LP idlist RP */
+{yymsp[-2].minor.yy510 = yymsp[-1].minor.yy510;}
break;
- case 160: /* idlist ::= idlist COMMA nm */
-{yymsp[-2].minor.yy48 = sqlite3IdListAppend(pParse,yymsp[-2].minor.yy48,&yymsp[0].minor.yy0);}
+ case 159: /* idlist ::= idlist COMMA nm */
+{yymsp[-2].minor.yy510 = sqlite3IdListAppend(pParse->db,yymsp[-2].minor.yy510,&yymsp[0].minor.yy0);}
break;
- case 161: /* idlist ::= nm */
-{yymsp[0].minor.yy48 = sqlite3IdListAppend(pParse,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/}
+ case 160: /* idlist ::= nm */
+{yymsp[0].minor.yy510 = sqlite3IdListAppend(pParse->db,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/}
break;
- case 162: /* expr ::= LP expr RP */
-{yymsp[-2].minor.yy18 = yymsp[-1].minor.yy18;}
+ case 161: /* expr ::= LP expr RP */
+{yymsp[-2].minor.yy182 = yymsp[-1].minor.yy182;}
break;
- case 163: /* expr ::= ID|INDEXED */
- case 164: /* expr ::= JOIN_KW */ yytestcase(yyruleno==164);
-{yymsp[0].minor.yy18=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/}
+ case 162: /* expr ::= ID|INDEXED */
+ case 163: /* expr ::= JOIN_KW */ yytestcase(yyruleno==163);
+{yymsp[0].minor.yy182=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/}
break;
- case 165: /* expr ::= nm DOT nm */
+ case 164: /* expr ::= nm DOT nm */
{
Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1);
Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1);
- if( IN_RENAME_OBJECT ){
- sqlite3RenameTokenMap(pParse, (void*)temp2, &yymsp[0].minor.yy0);
- sqlite3RenameTokenMap(pParse, (void*)temp1, &yymsp[-2].minor.yy0);
- }
- yylhsminor.yy18 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2);
+ yylhsminor.yy182 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2);
}
- yymsp[-2].minor.yy18 = yylhsminor.yy18;
+ yymsp[-2].minor.yy182 = yylhsminor.yy182;
break;
- case 166: /* expr ::= nm DOT nm DOT nm */
+ case 165: /* expr ::= nm DOT nm DOT nm */
{
Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-4].minor.yy0, 1);
Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1);
Expr *temp3 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1);
Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3);
- if( IN_RENAME_OBJECT ){
- sqlite3RenameTokenMap(pParse, (void*)temp3, &yymsp[0].minor.yy0);
- sqlite3RenameTokenMap(pParse, (void*)temp2, &yymsp[-2].minor.yy0);
- }
- yylhsminor.yy18 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4);
+ yylhsminor.yy182 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4);
}
- yymsp[-4].minor.yy18 = yylhsminor.yy18;
+ yymsp[-4].minor.yy182 = yylhsminor.yy182;
break;
- case 167: /* term ::= NULL|FLOAT|BLOB */
- case 168: /* term ::= STRING */ yytestcase(yyruleno==168);
-{yymsp[0].minor.yy18=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/}
+ case 166: /* term ::= NULL|FLOAT|BLOB */
+ case 167: /* term ::= STRING */ yytestcase(yyruleno==167);
+{yymsp[0].minor.yy182=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/}
break;
- case 169: /* term ::= INTEGER */
+ case 168: /* term ::= INTEGER */
{
- yylhsminor.yy18 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1);
+ yylhsminor.yy182 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1);
}
- yymsp[0].minor.yy18 = yylhsminor.yy18;
+ yymsp[0].minor.yy182 = yylhsminor.yy182;
break;
- case 170: /* expr ::= VARIABLE */
+ case 169: /* expr ::= VARIABLE */
{
if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){
u32 n = yymsp[0].minor.yy0.n;
- yymsp[0].minor.yy18 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0);
- sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy18, n);
+ yymsp[0].minor.yy182 = tokenExpr(pParse, TK_VARIABLE, yymsp[0].minor.yy0);
+ sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy182, n);
}else{
/* When doing a nested parse, one can include terms in an expression
** that look like this: #1 #2 ... These terms refer to registers
@@ -149342,154 +144623,146 @@ static YYACTIONTYPE yy_reduce(
assert( t.n>=2 );
if( pParse->nested==0 ){
sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t);
- yymsp[0].minor.yy18 = 0;
+ yymsp[0].minor.yy182 = 0;
}else{
- yymsp[0].minor.yy18 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0);
- if( yymsp[0].minor.yy18 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy18->iTable);
+ yymsp[0].minor.yy182 = sqlite3PExpr(pParse, TK_REGISTER, 0, 0);
+ if( yymsp[0].minor.yy182 ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy182->iTable);
}
}
}
break;
- case 171: /* expr ::= expr COLLATE ID|STRING */
+ case 170: /* expr ::= expr COLLATE ID|STRING */
{
- yymsp[-2].minor.yy18 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy18, &yymsp[0].minor.yy0, 1);
+ yymsp[-2].minor.yy182 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy182, &yymsp[0].minor.yy0, 1);
}
break;
- case 172: /* expr ::= CAST LP expr AS typetoken RP */
+ case 171: /* expr ::= CAST LP expr AS typetoken RP */
{
- yymsp[-5].minor.yy18 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1);
- sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy18, yymsp[-3].minor.yy18, 0);
+ yymsp[-5].minor.yy182 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1);
+ sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy182, yymsp[-3].minor.yy182, 0);
}
break;
- case 173: /* expr ::= ID|INDEXED LP distinct exprlist RP */
+ case 172: /* expr ::= ID|INDEXED LP distinct exprlist RP */
{
- yylhsminor.yy18 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy420, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy70);
+ if( yymsp[-1].minor.yy232 && yymsp[-1].minor.yy232->nExpr>pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){
+ sqlite3ErrorMsg(pParse, "too many arguments on function %T", &yymsp[-4].minor.yy0);
+ }
+ yylhsminor.yy182 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy232, &yymsp[-4].minor.yy0);
+ if( yymsp[-2].minor.yy502==SF_Distinct && yylhsminor.yy182 ){
+ yylhsminor.yy182->flags |= EP_Distinct;
+ }
}
- yymsp[-4].minor.yy18 = yylhsminor.yy18;
+ yymsp[-4].minor.yy182 = yylhsminor.yy182;
break;
- case 174: /* expr ::= ID|INDEXED LP STAR RP */
+ case 173: /* expr ::= ID|INDEXED LP STAR RP */
{
- yylhsminor.yy18 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0);
+ yylhsminor.yy182 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0);
}
- yymsp[-3].minor.yy18 = yylhsminor.yy18;
+ yymsp[-3].minor.yy182 = yylhsminor.yy182;
break;
- case 175: /* expr ::= ID|INDEXED LP distinct exprlist RP over_clause */
+ case 174: /* term ::= CTIME_KW */
{
- yylhsminor.yy18 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy420, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy70);
- sqlite3WindowAttach(pParse, yylhsminor.yy18, yymsp[0].minor.yy327);
+ yylhsminor.yy182 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0);
}
- yymsp[-5].minor.yy18 = yylhsminor.yy18;
+ yymsp[0].minor.yy182 = yylhsminor.yy182;
break;
- case 176: /* expr ::= ID|INDEXED LP STAR RP over_clause */
+ case 175: /* expr ::= LP nexprlist COMMA expr RP */
{
- yylhsminor.yy18 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0);
- sqlite3WindowAttach(pParse, yylhsminor.yy18, yymsp[0].minor.yy327);
-}
- yymsp[-4].minor.yy18 = yylhsminor.yy18;
- break;
- case 177: /* term ::= CTIME_KW */
-{
- yylhsminor.yy18 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0);
-}
- yymsp[0].minor.yy18 = yylhsminor.yy18;
- break;
- case 178: /* expr ::= LP nexprlist COMMA expr RP */
-{
- ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy420, yymsp[-1].minor.yy18);
- yymsp[-4].minor.yy18 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0);
- if( yymsp[-4].minor.yy18 ){
- yymsp[-4].minor.yy18->x.pList = pList;
+ ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy232, yymsp[-1].minor.yy182);
+ yymsp[-4].minor.yy182 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0);
+ if( yymsp[-4].minor.yy182 ){
+ yymsp[-4].minor.yy182->x.pList = pList;
}else{
sqlite3ExprListDelete(pParse->db, pList);
}
}
break;
- case 179: /* expr ::= expr AND expr */
- case 180: /* expr ::= expr OR expr */ yytestcase(yyruleno==180);
- case 181: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==181);
- case 182: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==182);
- case 183: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==183);
- case 184: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==184);
- case 185: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==185);
- case 186: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==186);
-{yymsp[-2].minor.yy18=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy18,yymsp[0].minor.yy18);}
+ case 176: /* expr ::= expr AND expr */
+ case 177: /* expr ::= expr OR expr */ yytestcase(yyruleno==177);
+ case 178: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==178);
+ case 179: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==179);
+ case 180: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==180);
+ case 181: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==181);
+ case 182: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==182);
+ case 183: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==183);
+{yymsp[-2].minor.yy182=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy182,yymsp[0].minor.yy182);}
break;
- case 187: /* likeop ::= NOT LIKE_KW|MATCH */
+ case 184: /* likeop ::= NOT LIKE_KW|MATCH */
{yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/}
break;
- case 188: /* expr ::= expr likeop expr */
+ case 185: /* expr ::= expr likeop expr */
{
ExprList *pList;
int bNot = yymsp[-1].minor.yy0.n & 0x80000000;
yymsp[-1].minor.yy0.n &= 0x7fffffff;
- pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy18);
- pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy18);
- yymsp[-2].minor.yy18 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0);
- if( bNot ) yymsp[-2].minor.yy18 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy18, 0);
- if( yymsp[-2].minor.yy18 ) yymsp[-2].minor.yy18->flags |= EP_InfixFunc;
+ pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy182);
+ pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy182);
+ yymsp[-2].minor.yy182 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0);
+ if( bNot ) yymsp[-2].minor.yy182 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy182, 0);
+ if( yymsp[-2].minor.yy182 ) yymsp[-2].minor.yy182->flags |= EP_InfixFunc;
}
break;
- case 189: /* expr ::= expr likeop expr ESCAPE expr */
+ case 186: /* expr ::= expr likeop expr ESCAPE expr */
{
ExprList *pList;
int bNot = yymsp[-3].minor.yy0.n & 0x80000000;
yymsp[-3].minor.yy0.n &= 0x7fffffff;
- pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy18);
- pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy18);
- pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy18);
- yymsp[-4].minor.yy18 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0, 0);
- if( bNot ) yymsp[-4].minor.yy18 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy18, 0);
- if( yymsp[-4].minor.yy18 ) yymsp[-4].minor.yy18->flags |= EP_InfixFunc;
+ pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy182);
+ pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy182);
+ pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy182);
+ yymsp[-4].minor.yy182 = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0);
+ if( bNot ) yymsp[-4].minor.yy182 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy182, 0);
+ if( yymsp[-4].minor.yy182 ) yymsp[-4].minor.yy182->flags |= EP_InfixFunc;
}
break;
- case 190: /* expr ::= expr ISNULL|NOTNULL */
-{yymsp[-1].minor.yy18 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy18,0);}
+ case 187: /* expr ::= expr ISNULL|NOTNULL */
+{yymsp[-1].minor.yy182 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy182,0);}
break;
- case 191: /* expr ::= expr NOT NULL */
-{yymsp[-2].minor.yy18 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy18,0);}
+ case 188: /* expr ::= expr NOT NULL */
+{yymsp[-2].minor.yy182 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy182,0);}
break;
- case 192: /* expr ::= expr IS expr */
+ case 189: /* expr ::= expr IS expr */
{
- yymsp[-2].minor.yy18 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy18,yymsp[0].minor.yy18);
- binaryToUnaryIfNull(pParse, yymsp[0].minor.yy18, yymsp[-2].minor.yy18, TK_ISNULL);
+ yymsp[-2].minor.yy182 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy182,yymsp[0].minor.yy182);
+ binaryToUnaryIfNull(pParse, yymsp[0].minor.yy182, yymsp[-2].minor.yy182, TK_ISNULL);
}
break;
- case 193: /* expr ::= expr IS NOT expr */
+ case 190: /* expr ::= expr IS NOT expr */
{
- yymsp[-3].minor.yy18 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy18,yymsp[0].minor.yy18);
- binaryToUnaryIfNull(pParse, yymsp[0].minor.yy18, yymsp[-3].minor.yy18, TK_NOTNULL);
+ yymsp[-3].minor.yy182 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy182,yymsp[0].minor.yy182);
+ binaryToUnaryIfNull(pParse, yymsp[0].minor.yy182, yymsp[-3].minor.yy182, TK_NOTNULL);
}
break;
- case 194: /* expr ::= NOT expr */
- case 195: /* expr ::= BITNOT expr */ yytestcase(yyruleno==195);
-{yymsp[-1].minor.yy18 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy18, 0);/*A-overwrites-B*/}
+ case 191: /* expr ::= NOT expr */
+ case 192: /* expr ::= BITNOT expr */ yytestcase(yyruleno==192);
+{yymsp[-1].minor.yy182 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy182, 0);/*A-overwrites-B*/}
break;
- case 196: /* expr ::= PLUS|MINUS expr */
+ case 193: /* expr ::= MINUS expr */
+{yymsp[-1].minor.yy182 = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy182, 0);}
+ break;
+ case 194: /* expr ::= PLUS expr */
+{yymsp[-1].minor.yy182 = sqlite3PExpr(pParse, TK_UPLUS, yymsp[0].minor.yy182, 0);}
+ break;
+ case 195: /* between_op ::= BETWEEN */
+ case 198: /* in_op ::= IN */ yytestcase(yyruleno==198);
+{yymsp[0].minor.yy502 = 0;}
+ break;
+ case 197: /* expr ::= expr between_op expr AND expr */
{
- yymsp[-1].minor.yy18 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy18, 0);
- /*A-overwrites-B*/
-}
- break;
- case 197: /* between_op ::= BETWEEN */
- case 200: /* in_op ::= IN */ yytestcase(yyruleno==200);
-{yymsp[0].minor.yy70 = 0;}
- break;
- case 199: /* expr ::= expr between_op expr AND expr */
-{
- ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy18);
- pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy18);
- yymsp[-4].minor.yy18 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy18, 0);
- if( yymsp[-4].minor.yy18 ){
- yymsp[-4].minor.yy18->x.pList = pList;
+ ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy182);
+ pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy182);
+ yymsp[-4].minor.yy182 = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy182, 0);
+ if( yymsp[-4].minor.yy182 ){
+ yymsp[-4].minor.yy182->x.pList = pList;
}else{
sqlite3ExprListDelete(pParse->db, pList);
}
- if( yymsp[-3].minor.yy70 ) yymsp[-4].minor.yy18 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy18, 0);
+ if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy182 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy182, 0);
}
break;
- case 202: /* expr ::= expr in_op LP exprlist RP */
+ case 200: /* expr ::= expr in_op LP exprlist RP */
{
- if( yymsp[-1].minor.yy420==0 ){
+ if( yymsp[-1].minor.yy232==0 ){
/* Expressions of the form
**
** expr1 IN ()
@@ -149498,9 +144771,9 @@ static YYACTIONTYPE yy_reduce(
** simplify to constants 0 (false) and 1 (true), respectively,
** regardless of the value of expr1.
*/
- sqlite3ExprDelete(pParse->db, yymsp[-4].minor.yy18);
- yymsp[-4].minor.yy18 = sqlite3ExprAlloc(pParse->db, TK_INTEGER,&sqlite3IntTokens[yymsp[-3].minor.yy70],1);
- }else if( yymsp[-1].minor.yy420->nExpr==1 ){
+ sqlite3ExprDelete(pParse->db, yymsp[-4].minor.yy182);
+ yymsp[-4].minor.yy182 = sqlite3ExprAlloc(pParse->db, TK_INTEGER,&sqlite3IntTokens[yymsp[-3].minor.yy502],1);
+ }else if( yymsp[-1].minor.yy232->nExpr==1 ){
/* Expressions of the form:
**
** expr1 IN (?1)
@@ -149517,199 +144790,195 @@ static YYACTIONTYPE yy_reduce(
** affinity or the collating sequence to use for comparison. Otherwise,
** the semantics would be subtly different from IN or NOT IN.
*/
- Expr *pRHS = yymsp[-1].minor.yy420->a[0].pExpr;
- yymsp[-1].minor.yy420->a[0].pExpr = 0;
- sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy420);
+ Expr *pRHS = yymsp[-1].minor.yy232->a[0].pExpr;
+ yymsp[-1].minor.yy232->a[0].pExpr = 0;
+ sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy232);
/* pRHS cannot be NULL because a malloc error would have been detected
** before now and control would have never reached this point */
if( ALWAYS(pRHS) ){
pRHS->flags &= ~EP_Collate;
pRHS->flags |= EP_Generic;
}
- yymsp[-4].minor.yy18 = sqlite3PExpr(pParse, yymsp[-3].minor.yy70 ? TK_NE : TK_EQ, yymsp[-4].minor.yy18, pRHS);
+ yymsp[-4].minor.yy182 = sqlite3PExpr(pParse, yymsp[-3].minor.yy502 ? TK_NE : TK_EQ, yymsp[-4].minor.yy182, pRHS);
}else{
- yymsp[-4].minor.yy18 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy18, 0);
- if( yymsp[-4].minor.yy18 ){
- yymsp[-4].minor.yy18->x.pList = yymsp[-1].minor.yy420;
- sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy18);
+ yymsp[-4].minor.yy182 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy182, 0);
+ if( yymsp[-4].minor.yy182 ){
+ yymsp[-4].minor.yy182->x.pList = yymsp[-1].minor.yy232;
+ sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy182);
}else{
- sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy420);
+ sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy232);
}
- if( yymsp[-3].minor.yy70 ) yymsp[-4].minor.yy18 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy18, 0);
+ if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy182 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy182, 0);
}
}
break;
- case 203: /* expr ::= LP select RP */
+ case 201: /* expr ::= LP select RP */
{
- yymsp[-2].minor.yy18 = sqlite3PExpr(pParse, TK_SELECT, 0, 0);
- sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy18, yymsp[-1].minor.yy489);
+ yymsp[-2].minor.yy182 = sqlite3PExpr(pParse, TK_SELECT, 0, 0);
+ sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy182, yymsp[-1].minor.yy399);
}
break;
- case 204: /* expr ::= expr in_op LP select RP */
+ case 202: /* expr ::= expr in_op LP select RP */
{
- yymsp[-4].minor.yy18 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy18, 0);
- sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy18, yymsp[-1].minor.yy489);
- if( yymsp[-3].minor.yy70 ) yymsp[-4].minor.yy18 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy18, 0);
+ yymsp[-4].minor.yy182 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy182, 0);
+ sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy182, yymsp[-1].minor.yy399);
+ if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy182 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy182, 0);
}
break;
- case 205: /* expr ::= expr in_op nm dbnm paren_exprlist */
+ case 203: /* expr ::= expr in_op nm dbnm paren_exprlist */
{
SrcList *pSrc = sqlite3SrcListAppend(pParse->db, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);
Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0);
- if( yymsp[0].minor.yy420 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy420);
- yymsp[-4].minor.yy18 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy18, 0);
- sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy18, pSelect);
- if( yymsp[-3].minor.yy70 ) yymsp[-4].minor.yy18 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy18, 0);
+ if( yymsp[0].minor.yy232 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy232);
+ yymsp[-4].minor.yy182 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy182, 0);
+ sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy182, pSelect);
+ if( yymsp[-3].minor.yy502 ) yymsp[-4].minor.yy182 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy182, 0);
}
break;
- case 206: /* expr ::= EXISTS LP select RP */
+ case 204: /* expr ::= EXISTS LP select RP */
{
Expr *p;
- p = yymsp[-3].minor.yy18 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0);
- sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy489);
+ p = yymsp[-3].minor.yy182 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0);
+ sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy399);
}
break;
- case 207: /* expr ::= CASE case_operand case_exprlist case_else END */
+ case 205: /* expr ::= CASE case_operand case_exprlist case_else END */
{
- yymsp[-4].minor.yy18 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy18, 0);
- if( yymsp[-4].minor.yy18 ){
- yymsp[-4].minor.yy18->x.pList = yymsp[-1].minor.yy18 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy420,yymsp[-1].minor.yy18) : yymsp[-2].minor.yy420;
- sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy18);
+ yymsp[-4].minor.yy182 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy182, 0);
+ if( yymsp[-4].minor.yy182 ){
+ yymsp[-4].minor.yy182->x.pList = yymsp[-1].minor.yy182 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy232,yymsp[-1].minor.yy182) : yymsp[-2].minor.yy232;
+ sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy182);
}else{
- sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy420);
- sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy18);
+ sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy232);
+ sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy182);
}
}
break;
- case 208: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ case 206: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
{
- yymsp[-4].minor.yy420 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy420, yymsp[-2].minor.yy18);
- yymsp[-4].minor.yy420 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy420, yymsp[0].minor.yy18);
+ yymsp[-4].minor.yy232 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy232, yymsp[-2].minor.yy182);
+ yymsp[-4].minor.yy232 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy232, yymsp[0].minor.yy182);
}
break;
- case 209: /* case_exprlist ::= WHEN expr THEN expr */
+ case 207: /* case_exprlist ::= WHEN expr THEN expr */
{
- yymsp[-3].minor.yy420 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy18);
- yymsp[-3].minor.yy420 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy420, yymsp[0].minor.yy18);
+ yymsp[-3].minor.yy232 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy182);
+ yymsp[-3].minor.yy232 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy232, yymsp[0].minor.yy182);
}
break;
- case 212: /* case_operand ::= expr */
-{yymsp[0].minor.yy18 = yymsp[0].minor.yy18; /*A-overwrites-X*/}
+ case 210: /* case_operand ::= expr */
+{yymsp[0].minor.yy182 = yymsp[0].minor.yy182; /*A-overwrites-X*/}
break;
- case 215: /* nexprlist ::= nexprlist COMMA expr */
-{yymsp[-2].minor.yy420 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy420,yymsp[0].minor.yy18);}
+ case 213: /* nexprlist ::= nexprlist COMMA expr */
+{yymsp[-2].minor.yy232 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy232,yymsp[0].minor.yy182);}
break;
- case 216: /* nexprlist ::= expr */
-{yymsp[0].minor.yy420 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy18); /*A-overwrites-Y*/}
+ case 214: /* nexprlist ::= expr */
+{yymsp[0].minor.yy232 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy182); /*A-overwrites-Y*/}
break;
- case 218: /* paren_exprlist ::= LP exprlist RP */
- case 223: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==223);
-{yymsp[-2].minor.yy420 = yymsp[-1].minor.yy420;}
+ case 216: /* paren_exprlist ::= LP exprlist RP */
+ case 221: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==221);
+{yymsp[-2].minor.yy232 = yymsp[-1].minor.yy232;}
break;
- case 219: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ case 217: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
{
sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0,
- sqlite3SrcListAppend(pParse->db,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy420, yymsp[-10].minor.yy70,
- &yymsp[-11].minor.yy0, yymsp[0].minor.yy18, SQLITE_SO_ASC, yymsp[-8].minor.yy70, SQLITE_IDXTYPE_APPDEF);
- if( IN_RENAME_OBJECT && pParse->pNewIndex ){
- sqlite3RenameTokenMap(pParse, pParse->pNewIndex->zName, &yymsp[-4].minor.yy0);
- }
+ sqlite3SrcListAppend(pParse->db,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy232, yymsp[-10].minor.yy502,
+ &yymsp[-11].minor.yy0, yymsp[0].minor.yy182, SQLITE_SO_ASC, yymsp[-8].minor.yy502, SQLITE_IDXTYPE_APPDEF);
}
break;
- case 220: /* uniqueflag ::= UNIQUE */
- case 260: /* raisetype ::= ABORT */ yytestcase(yyruleno==260);
-{yymsp[0].minor.yy70 = OE_Abort;}
+ case 218: /* uniqueflag ::= UNIQUE */
+ case 258: /* raisetype ::= ABORT */ yytestcase(yyruleno==258);
+{yymsp[0].minor.yy502 = OE_Abort;}
break;
- case 221: /* uniqueflag ::= */
-{yymsp[1].minor.yy70 = OE_None;}
+ case 219: /* uniqueflag ::= */
+{yymsp[1].minor.yy502 = OE_None;}
break;
- case 224: /* eidlist ::= eidlist COMMA nm collate sortorder */
+ case 222: /* eidlist ::= eidlist COMMA nm collate sortorder */
{
- yymsp[-4].minor.yy420 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy420, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy70, yymsp[0].minor.yy70);
+ yymsp[-4].minor.yy232 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy232, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy502, yymsp[0].minor.yy502);
}
break;
- case 225: /* eidlist ::= nm collate sortorder */
+ case 223: /* eidlist ::= nm collate sortorder */
{
- yymsp[-2].minor.yy420 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy70, yymsp[0].minor.yy70); /*A-overwrites-Y*/
+ yymsp[-2].minor.yy232 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy502, yymsp[0].minor.yy502); /*A-overwrites-Y*/
}
break;
- case 228: /* cmd ::= DROP INDEX ifexists fullname */
-{sqlite3DropIndex(pParse, yymsp[0].minor.yy135, yymsp[-1].minor.yy70);}
+ case 226: /* cmd ::= DROP INDEX ifexists fullname */
+{sqlite3DropIndex(pParse, yymsp[0].minor.yy427, yymsp[-1].minor.yy502);}
break;
- case 229: /* cmd ::= VACUUM */
+ case 227: /* cmd ::= VACUUM */
{sqlite3Vacuum(pParse,0);}
break;
- case 230: /* cmd ::= VACUUM nm */
+ case 228: /* cmd ::= VACUUM nm */
{sqlite3Vacuum(pParse,&yymsp[0].minor.yy0);}
break;
- case 231: /* cmd ::= PRAGMA nm dbnm */
+ case 229: /* cmd ::= PRAGMA nm dbnm */
{sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);}
break;
- case 232: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
+ case 230: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);}
break;
- case 233: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ case 231: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);}
break;
- case 234: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
+ case 232: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);}
break;
- case 235: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ case 233: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);}
break;
- case 238: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ case 236: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
{
Token all;
all.z = yymsp[-3].minor.yy0.z;
all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n;
- sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy207, &all);
+ sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy47, &all);
}
break;
- case 239: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ case 237: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
{
- sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy70, yymsp[-4].minor.yy34.a, yymsp[-4].minor.yy34.b, yymsp[-2].minor.yy135, yymsp[0].minor.yy18, yymsp[-10].minor.yy70, yymsp[-8].minor.yy70);
+ sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy502, yymsp[-4].minor.yy300.a, yymsp[-4].minor.yy300.b, yymsp[-2].minor.yy427, yymsp[0].minor.yy182, yymsp[-10].minor.yy502, yymsp[-8].minor.yy502);
yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/
}
break;
- case 240: /* trigger_time ::= BEFORE|AFTER */
-{ yymsp[0].minor.yy70 = yymsp[0].major; /*A-overwrites-X*/ }
+ case 238: /* trigger_time ::= BEFORE|AFTER */
+{ yymsp[0].minor.yy502 = yymsp[0].major; /*A-overwrites-X*/ }
break;
- case 241: /* trigger_time ::= INSTEAD OF */
-{ yymsp[-1].minor.yy70 = TK_INSTEAD;}
+ case 239: /* trigger_time ::= INSTEAD OF */
+{ yymsp[-1].minor.yy502 = TK_INSTEAD;}
break;
- case 242: /* trigger_time ::= */
-{ yymsp[1].minor.yy70 = TK_BEFORE; }
+ case 240: /* trigger_time ::= */
+{ yymsp[1].minor.yy502 = TK_BEFORE; }
break;
- case 243: /* trigger_event ::= DELETE|INSERT */
- case 244: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==244);
-{yymsp[0].minor.yy34.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy34.b = 0;}
+ case 241: /* trigger_event ::= DELETE|INSERT */
+ case 242: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==242);
+{yymsp[0].minor.yy300.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy300.b = 0;}
break;
- case 245: /* trigger_event ::= UPDATE OF idlist */
-{yymsp[-2].minor.yy34.a = TK_UPDATE; yymsp[-2].minor.yy34.b = yymsp[0].minor.yy48;}
+ case 243: /* trigger_event ::= UPDATE OF idlist */
+{yymsp[-2].minor.yy300.a = TK_UPDATE; yymsp[-2].minor.yy300.b = yymsp[0].minor.yy510;}
break;
- case 246: /* when_clause ::= */
- case 265: /* key_opt ::= */ yytestcase(yyruleno==265);
- case 307: /* filter_opt ::= */ yytestcase(yyruleno==307);
-{ yymsp[1].minor.yy18 = 0; }
+ case 244: /* when_clause ::= */
+ case 263: /* key_opt ::= */ yytestcase(yyruleno==263);
+{ yymsp[1].minor.yy182 = 0; }
break;
- case 247: /* when_clause ::= WHEN expr */
- case 266: /* key_opt ::= KEY expr */ yytestcase(yyruleno==266);
-{ yymsp[-1].minor.yy18 = yymsp[0].minor.yy18; }
+ case 245: /* when_clause ::= WHEN expr */
+ case 264: /* key_opt ::= KEY expr */ yytestcase(yyruleno==264);
+{ yymsp[-1].minor.yy182 = yymsp[0].minor.yy182; }
break;
- case 248: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ case 246: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
{
- assert( yymsp[-2].minor.yy207!=0 );
- yymsp[-2].minor.yy207->pLast->pNext = yymsp[-1].minor.yy207;
- yymsp[-2].minor.yy207->pLast = yymsp[-1].minor.yy207;
+ assert( yymsp[-2].minor.yy47!=0 );
+ yymsp[-2].minor.yy47->pLast->pNext = yymsp[-1].minor.yy47;
+ yymsp[-2].minor.yy47->pLast = yymsp[-1].minor.yy47;
}
break;
- case 249: /* trigger_cmd_list ::= trigger_cmd SEMI */
+ case 247: /* trigger_cmd_list ::= trigger_cmd SEMI */
{
- assert( yymsp[-1].minor.yy207!=0 );
- yymsp[-1].minor.yy207->pLast = yymsp[-1].minor.yy207;
+ assert( yymsp[-1].minor.yy47!=0 );
+ yymsp[-1].minor.yy47->pLast = yymsp[-1].minor.yy47;
}
break;
- case 250: /* trnm ::= nm DOT nm */
+ case 248: /* trnm ::= nm DOT nm */
{
yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;
sqlite3ErrorMsg(pParse,
@@ -149717,306 +144986,196 @@ static YYACTIONTYPE yy_reduce(
"statements within triggers");
}
break;
- case 251: /* tridxby ::= INDEXED BY nm */
+ case 249: /* tridxby ::= INDEXED BY nm */
{
sqlite3ErrorMsg(pParse,
"the INDEXED BY clause is not allowed on UPDATE or DELETE statements "
"within triggers");
}
break;
- case 252: /* tridxby ::= NOT INDEXED */
+ case 250: /* tridxby ::= NOT INDEXED */
{
sqlite3ErrorMsg(pParse,
"the NOT INDEXED clause is not allowed on UPDATE or DELETE statements "
"within triggers");
}
break;
- case 253: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt scanpt */
-{yylhsminor.yy207 = sqlite3TriggerUpdateStep(pParse, &yymsp[-5].minor.yy0, yymsp[-2].minor.yy420, yymsp[-1].minor.yy18, yymsp[-6].minor.yy70, yymsp[-7].minor.yy0.z, yymsp[0].minor.yy392);}
- yymsp[-7].minor.yy207 = yylhsminor.yy207;
+ case 251: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt scanpt */
+{yylhsminor.yy47 = sqlite3TriggerUpdateStep(pParse->db, &yymsp[-5].minor.yy0, yymsp[-2].minor.yy232, yymsp[-1].minor.yy182, yymsp[-6].minor.yy502, yymsp[-7].minor.yy0.z, yymsp[0].minor.yy36);}
+ yymsp[-7].minor.yy47 = yylhsminor.yy47;
break;
- case 254: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ case 252: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
{
- yylhsminor.yy207 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy48,yymsp[-2].minor.yy489,yymsp[-6].minor.yy70,yymsp[-1].minor.yy340,yymsp[-7].minor.yy392,yymsp[0].minor.yy392);/*yylhsminor.yy207-overwrites-yymsp[-6].minor.yy70*/
+ yylhsminor.yy47 = sqlite3TriggerInsertStep(pParse->db,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy510,yymsp[-2].minor.yy399,yymsp[-6].minor.yy502,yymsp[-1].minor.yy198,yymsp[-7].minor.yy36,yymsp[0].minor.yy36);/*yylhsminor.yy47-overwrites-yymsp[-6].minor.yy502*/
}
- yymsp[-7].minor.yy207 = yylhsminor.yy207;
+ yymsp[-7].minor.yy47 = yylhsminor.yy47;
break;
- case 255: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
-{yylhsminor.yy207 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy18, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy392);}
- yymsp[-5].minor.yy207 = yylhsminor.yy207;
+ case 253: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+{yylhsminor.yy47 = sqlite3TriggerDeleteStep(pParse->db, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy182, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy36);}
+ yymsp[-5].minor.yy47 = yylhsminor.yy47;
break;
- case 256: /* trigger_cmd ::= scanpt select scanpt */
-{yylhsminor.yy207 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy489, yymsp[-2].minor.yy392, yymsp[0].minor.yy392); /*yylhsminor.yy207-overwrites-yymsp[-1].minor.yy489*/}
- yymsp[-2].minor.yy207 = yylhsminor.yy207;
+ case 254: /* trigger_cmd ::= scanpt select scanpt */
+{yylhsminor.yy47 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy399, yymsp[-2].minor.yy36, yymsp[0].minor.yy36); /*yylhsminor.yy47-overwrites-yymsp[-1].minor.yy399*/}
+ yymsp[-2].minor.yy47 = yylhsminor.yy47;
break;
- case 257: /* expr ::= RAISE LP IGNORE RP */
+ case 255: /* expr ::= RAISE LP IGNORE RP */
{
- yymsp[-3].minor.yy18 = sqlite3PExpr(pParse, TK_RAISE, 0, 0);
- if( yymsp[-3].minor.yy18 ){
- yymsp[-3].minor.yy18->affinity = OE_Ignore;
+ yymsp[-3].minor.yy182 = sqlite3PExpr(pParse, TK_RAISE, 0, 0);
+ if( yymsp[-3].minor.yy182 ){
+ yymsp[-3].minor.yy182->affinity = OE_Ignore;
}
}
break;
- case 258: /* expr ::= RAISE LP raisetype COMMA nm RP */
+ case 256: /* expr ::= RAISE LP raisetype COMMA nm RP */
{
- yymsp[-5].minor.yy18 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1);
- if( yymsp[-5].minor.yy18 ) {
- yymsp[-5].minor.yy18->affinity = (char)yymsp[-3].minor.yy70;
+ yymsp[-5].minor.yy182 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1);
+ if( yymsp[-5].minor.yy182 ) {
+ yymsp[-5].minor.yy182->affinity = (char)yymsp[-3].minor.yy502;
}
}
break;
- case 259: /* raisetype ::= ROLLBACK */
-{yymsp[0].minor.yy70 = OE_Rollback;}
+ case 257: /* raisetype ::= ROLLBACK */
+{yymsp[0].minor.yy502 = OE_Rollback;}
break;
- case 261: /* raisetype ::= FAIL */
-{yymsp[0].minor.yy70 = OE_Fail;}
+ case 259: /* raisetype ::= FAIL */
+{yymsp[0].minor.yy502 = OE_Fail;}
break;
- case 262: /* cmd ::= DROP TRIGGER ifexists fullname */
+ case 260: /* cmd ::= DROP TRIGGER ifexists fullname */
{
- sqlite3DropTrigger(pParse,yymsp[0].minor.yy135,yymsp[-1].minor.yy70);
+ sqlite3DropTrigger(pParse,yymsp[0].minor.yy427,yymsp[-1].minor.yy502);
}
break;
- case 263: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ case 261: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
{
- sqlite3Attach(pParse, yymsp[-3].minor.yy18, yymsp[-1].minor.yy18, yymsp[0].minor.yy18);
+ sqlite3Attach(pParse, yymsp[-3].minor.yy182, yymsp[-1].minor.yy182, yymsp[0].minor.yy182);
}
break;
- case 264: /* cmd ::= DETACH database_kw_opt expr */
+ case 262: /* cmd ::= DETACH database_kw_opt expr */
{
- sqlite3Detach(pParse, yymsp[0].minor.yy18);
+ sqlite3Detach(pParse, yymsp[0].minor.yy182);
}
break;
- case 267: /* cmd ::= REINDEX */
+ case 265: /* cmd ::= REINDEX */
{sqlite3Reindex(pParse, 0, 0);}
break;
- case 268: /* cmd ::= REINDEX nm dbnm */
+ case 266: /* cmd ::= REINDEX nm dbnm */
{sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 269: /* cmd ::= ANALYZE */
+ case 267: /* cmd ::= ANALYZE */
{sqlite3Analyze(pParse, 0, 0);}
break;
- case 270: /* cmd ::= ANALYZE nm dbnm */
+ case 268: /* cmd ::= ANALYZE nm dbnm */
{sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 271: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
+ case 269: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
{
- sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy135,&yymsp[0].minor.yy0);
+ sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy427,&yymsp[0].minor.yy0);
}
break;
- case 272: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ case 270: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
{
yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n;
sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0);
}
break;
- case 273: /* add_column_fullname ::= fullname */
+ case 271: /* add_column_fullname ::= fullname */
{
disableLookaside(pParse);
- sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy135);
+ sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy427);
}
break;
- case 274: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
-{
- sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy135, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
-}
- break;
- case 275: /* cmd ::= create_vtab */
+ case 272: /* cmd ::= create_vtab */
{sqlite3VtabFinishParse(pParse,0);}
break;
- case 276: /* cmd ::= create_vtab LP vtabarglist RP */
+ case 273: /* cmd ::= create_vtab LP vtabarglist RP */
{sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);}
break;
- case 277: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ case 274: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
{
- sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy70);
+ sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy502);
}
break;
- case 278: /* vtabarg ::= */
+ case 275: /* vtabarg ::= */
{sqlite3VtabArgInit(pParse);}
break;
- case 279: /* vtabargtoken ::= ANY */
- case 280: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==280);
- case 281: /* lp ::= LP */ yytestcase(yyruleno==281);
+ case 276: /* vtabargtoken ::= ANY */
+ case 277: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==277);
+ case 278: /* lp ::= LP */ yytestcase(yyruleno==278);
{sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);}
break;
- case 282: /* with ::= WITH wqlist */
- case 283: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==283);
-{ sqlite3WithPush(pParse, yymsp[0].minor.yy449, 1); }
+ case 279: /* with ::= WITH wqlist */
+ case 280: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==280);
+{ sqlite3WithPush(pParse, yymsp[0].minor.yy91, 1); }
break;
- case 284: /* wqlist ::= nm eidlist_opt AS LP select RP */
+ case 281: /* wqlist ::= nm eidlist_opt AS LP select RP */
{
- yymsp[-5].minor.yy449 = sqlite3WithAdd(pParse, 0, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy420, yymsp[-1].minor.yy489); /*A-overwrites-X*/
+ yymsp[-5].minor.yy91 = sqlite3WithAdd(pParse, 0, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy232, yymsp[-1].minor.yy399); /*A-overwrites-X*/
}
break;
- case 285: /* wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */
+ case 282: /* wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */
{
- yymsp[-7].minor.yy449 = sqlite3WithAdd(pParse, yymsp[-7].minor.yy449, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy420, yymsp[-1].minor.yy489);
+ yymsp[-7].minor.yy91 = sqlite3WithAdd(pParse, yymsp[-7].minor.yy91, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy232, yymsp[-1].minor.yy399);
}
break;
- case 286: /* windowdefn_list ::= windowdefn */
-{ yylhsminor.yy327 = yymsp[0].minor.yy327; }
- yymsp[0].minor.yy327 = yylhsminor.yy327;
- break;
- case 287: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */
-{
- assert( yymsp[0].minor.yy327!=0 );
- yymsp[0].minor.yy327->pNextWin = yymsp[-2].minor.yy327;
- yylhsminor.yy327 = yymsp[0].minor.yy327;
-}
- yymsp[-2].minor.yy327 = yylhsminor.yy327;
- break;
- case 288: /* windowdefn ::= nm AS window */
-{
- if( ALWAYS(yymsp[0].minor.yy327) ){
- yymsp[0].minor.yy327->zName = sqlite3DbStrNDup(pParse->db, yymsp[-2].minor.yy0.z, yymsp[-2].minor.yy0.n);
- }
- yylhsminor.yy327 = yymsp[0].minor.yy327;
-}
- yymsp[-2].minor.yy327 = yylhsminor.yy327;
- break;
- case 289: /* window ::= LP part_opt orderby_opt frame_opt RP */
-{
- yymsp[-4].minor.yy327 = yymsp[-1].minor.yy327;
- if( ALWAYS(yymsp[-4].minor.yy327) ){
- yymsp[-4].minor.yy327->pPartition = yymsp[-3].minor.yy420;
- yymsp[-4].minor.yy327->pOrderBy = yymsp[-2].minor.yy420;
- }
-}
- break;
- case 290: /* part_opt ::= PARTITION BY nexprlist */
-{ yymsp[-2].minor.yy420 = yymsp[0].minor.yy420; }
- break;
- case 291: /* part_opt ::= */
-{ yymsp[1].minor.yy420 = 0; }
- break;
- case 292: /* frame_opt ::= */
-{
- yymsp[1].minor.yy327 = sqlite3WindowAlloc(pParse, TK_RANGE, TK_UNBOUNDED, 0, TK_CURRENT, 0);
-}
- break;
- case 293: /* frame_opt ::= range_or_rows frame_bound_s */
-{
- yylhsminor.yy327 = sqlite3WindowAlloc(pParse, yymsp[-1].minor.yy70, yymsp[0].minor.yy119.eType, yymsp[0].minor.yy119.pExpr, TK_CURRENT, 0);
-}
- yymsp[-1].minor.yy327 = yylhsminor.yy327;
- break;
- case 294: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e */
-{
- yylhsminor.yy327 = sqlite3WindowAlloc(pParse, yymsp[-4].minor.yy70, yymsp[-2].minor.yy119.eType, yymsp[-2].minor.yy119.pExpr, yymsp[0].minor.yy119.eType, yymsp[0].minor.yy119.pExpr);
-}
- yymsp[-4].minor.yy327 = yylhsminor.yy327;
- break;
- case 295: /* range_or_rows ::= RANGE */
-{ yymsp[0].minor.yy70 = TK_RANGE; }
- break;
- case 296: /* range_or_rows ::= ROWS */
-{ yymsp[0].minor.yy70 = TK_ROWS; }
- break;
- case 297: /* frame_bound_s ::= frame_bound */
- case 299: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==299);
-{ yylhsminor.yy119 = yymsp[0].minor.yy119; }
- yymsp[0].minor.yy119 = yylhsminor.yy119;
- break;
- case 298: /* frame_bound_s ::= UNBOUNDED PRECEDING */
- case 300: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==300);
-{yymsp[-1].minor.yy119.eType = TK_UNBOUNDED; yymsp[-1].minor.yy119.pExpr = 0;}
- break;
- case 301: /* frame_bound ::= expr PRECEDING */
-{ yylhsminor.yy119.eType = TK_PRECEDING; yylhsminor.yy119.pExpr = yymsp[-1].minor.yy18; }
- yymsp[-1].minor.yy119 = yylhsminor.yy119;
- break;
- case 302: /* frame_bound ::= CURRENT ROW */
-{ yymsp[-1].minor.yy119.eType = TK_CURRENT ; yymsp[-1].minor.yy119.pExpr = 0; }
- break;
- case 303: /* frame_bound ::= expr FOLLOWING */
-{ yylhsminor.yy119.eType = TK_FOLLOWING; yylhsminor.yy119.pExpr = yymsp[-1].minor.yy18; }
- yymsp[-1].minor.yy119 = yylhsminor.yy119;
- break;
- case 304: /* window_clause ::= WINDOW windowdefn_list */
-{ yymsp[-1].minor.yy327 = yymsp[0].minor.yy327; }
- break;
- case 305: /* over_clause ::= filter_opt OVER window */
-{
- yylhsminor.yy327 = yymsp[0].minor.yy327;
- assert( yylhsminor.yy327!=0 );
- yylhsminor.yy327->pFilter = yymsp[-2].minor.yy18;
-}
- yymsp[-2].minor.yy327 = yylhsminor.yy327;
- break;
- case 306: /* over_clause ::= filter_opt OVER nm */
-{
- yylhsminor.yy327 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window));
- if( yylhsminor.yy327 ){
- yylhsminor.yy327->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n);
- yylhsminor.yy327->pFilter = yymsp[-2].minor.yy18;
- }else{
- sqlite3ExprDelete(pParse->db, yymsp[-2].minor.yy18);
- }
-}
- yymsp[-2].minor.yy327 = yylhsminor.yy327;
- break;
- case 308: /* filter_opt ::= FILTER LP WHERE expr RP */
-{ yymsp[-4].minor.yy18 = yymsp[-1].minor.yy18; }
- break;
default:
- /* (309) input ::= cmdlist */ yytestcase(yyruleno==309);
- /* (310) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==310);
- /* (311) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=311);
- /* (312) ecmd ::= SEMI */ yytestcase(yyruleno==312);
- /* (313) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==313);
- /* (314) ecmd ::= explain cmdx */ yytestcase(yyruleno==314);
- /* (315) trans_opt ::= */ yytestcase(yyruleno==315);
- /* (316) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==316);
- /* (317) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==317);
- /* (318) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==318);
- /* (319) savepoint_opt ::= */ yytestcase(yyruleno==319);
- /* (320) cmd ::= create_table create_table_args */ yytestcase(yyruleno==320);
- /* (321) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==321);
- /* (322) columnlist ::= columnname carglist */ yytestcase(yyruleno==322);
- /* (323) nm ::= ID|INDEXED */ yytestcase(yyruleno==323);
- /* (324) nm ::= STRING */ yytestcase(yyruleno==324);
- /* (325) nm ::= JOIN_KW */ yytestcase(yyruleno==325);
- /* (326) typetoken ::= typename */ yytestcase(yyruleno==326);
- /* (327) typename ::= ID|STRING */ yytestcase(yyruleno==327);
- /* (328) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=328);
- /* (329) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=329);
- /* (330) carglist ::= carglist ccons */ yytestcase(yyruleno==330);
- /* (331) carglist ::= */ yytestcase(yyruleno==331);
- /* (332) ccons ::= NULL onconf */ yytestcase(yyruleno==332);
- /* (333) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==333);
- /* (334) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==334);
- /* (335) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=335);
- /* (336) tconscomma ::= */ yytestcase(yyruleno==336);
- /* (337) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=337);
- /* (338) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=338);
- /* (339) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=339);
- /* (340) oneselect ::= values */ yytestcase(yyruleno==340);
- /* (341) sclp ::= selcollist COMMA */ yytestcase(yyruleno==341);
- /* (342) as ::= ID|STRING */ yytestcase(yyruleno==342);
- /* (343) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=343);
- /* (344) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==344);
- /* (345) exprlist ::= nexprlist */ yytestcase(yyruleno==345);
- /* (346) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=346);
- /* (347) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=347);
- /* (348) nmnum ::= ON */ yytestcase(yyruleno==348);
- /* (349) nmnum ::= DELETE */ yytestcase(yyruleno==349);
- /* (350) nmnum ::= DEFAULT */ yytestcase(yyruleno==350);
- /* (351) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==351);
- /* (352) foreach_clause ::= */ yytestcase(yyruleno==352);
- /* (353) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==353);
- /* (354) trnm ::= nm */ yytestcase(yyruleno==354);
- /* (355) tridxby ::= */ yytestcase(yyruleno==355);
- /* (356) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==356);
- /* (357) database_kw_opt ::= */ yytestcase(yyruleno==357);
- /* (358) kwcolumn_opt ::= */ yytestcase(yyruleno==358);
- /* (359) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==359);
- /* (360) vtabarglist ::= vtabarg */ yytestcase(yyruleno==360);
- /* (361) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==361);
- /* (362) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==362);
- /* (363) anylist ::= */ yytestcase(yyruleno==363);
- /* (364) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==364);
- /* (365) anylist ::= anylist ANY */ yytestcase(yyruleno==365);
- /* (366) with ::= */ yytestcase(yyruleno==366);
+ /* (283) input ::= cmdlist */ yytestcase(yyruleno==283);
+ /* (284) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==284);
+ /* (285) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=285);
+ /* (286) ecmd ::= SEMI */ yytestcase(yyruleno==286);
+ /* (287) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==287);
+ /* (288) ecmd ::= explain cmdx */ yytestcase(yyruleno==288);
+ /* (289) trans_opt ::= */ yytestcase(yyruleno==289);
+ /* (290) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==290);
+ /* (291) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==291);
+ /* (292) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==292);
+ /* (293) savepoint_opt ::= */ yytestcase(yyruleno==293);
+ /* (294) cmd ::= create_table create_table_args */ yytestcase(yyruleno==294);
+ /* (295) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==295);
+ /* (296) columnlist ::= columnname carglist */ yytestcase(yyruleno==296);
+ /* (297) nm ::= ID|INDEXED */ yytestcase(yyruleno==297);
+ /* (298) nm ::= STRING */ yytestcase(yyruleno==298);
+ /* (299) nm ::= JOIN_KW */ yytestcase(yyruleno==299);
+ /* (300) typetoken ::= typename */ yytestcase(yyruleno==300);
+ /* (301) typename ::= ID|STRING */ yytestcase(yyruleno==301);
+ /* (302) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=302);
+ /* (303) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=303);
+ /* (304) carglist ::= carglist ccons */ yytestcase(yyruleno==304);
+ /* (305) carglist ::= */ yytestcase(yyruleno==305);
+ /* (306) ccons ::= NULL onconf */ yytestcase(yyruleno==306);
+ /* (307) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==307);
+ /* (308) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==308);
+ /* (309) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=309);
+ /* (310) tconscomma ::= */ yytestcase(yyruleno==310);
+ /* (311) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=311);
+ /* (312) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=312);
+ /* (313) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=313);
+ /* (314) oneselect ::= values */ yytestcase(yyruleno==314);
+ /* (315) sclp ::= selcollist COMMA */ yytestcase(yyruleno==315);
+ /* (316) as ::= ID|STRING */ yytestcase(yyruleno==316);
+ /* (317) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=317);
+ /* (318) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==318);
+ /* (319) exprlist ::= nexprlist */ yytestcase(yyruleno==319);
+ /* (320) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=320);
+ /* (321) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=321);
+ /* (322) nmnum ::= ON */ yytestcase(yyruleno==322);
+ /* (323) nmnum ::= DELETE */ yytestcase(yyruleno==323);
+ /* (324) nmnum ::= DEFAULT */ yytestcase(yyruleno==324);
+ /* (325) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==325);
+ /* (326) foreach_clause ::= */ yytestcase(yyruleno==326);
+ /* (327) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==327);
+ /* (328) trnm ::= nm */ yytestcase(yyruleno==328);
+ /* (329) tridxby ::= */ yytestcase(yyruleno==329);
+ /* (330) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==330);
+ /* (331) database_kw_opt ::= */ yytestcase(yyruleno==331);
+ /* (332) kwcolumn_opt ::= */ yytestcase(yyruleno==332);
+ /* (333) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==333);
+ /* (334) vtabarglist ::= vtabarg */ yytestcase(yyruleno==334);
+ /* (335) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==335);
+ /* (336) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==336);
+ /* (337) anylist ::= */ yytestcase(yyruleno==337);
+ /* (338) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==338);
+ /* (339) anylist ::= anylist ANY */ yytestcase(yyruleno==339);
+ /* (340) with ::= */ yytestcase(yyruleno==340);
break;
/********** End reduce actions ************************************************/
};
@@ -150170,12 +145329,12 @@ SQLITE_PRIVATE void sqlite3Parser(
do{
assert( yyact==yypParser->yytos->stateno );
- yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact);
+ yyact = yy_find_shift_action(yymajor,yyact);
if( yyact >= YY_MIN_REDUCE ){
yyact = yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor,
yyminor sqlite3ParserCTX_PARAM);
}else if( yyact <= YY_MAX_SHIFTREDUCE ){
- yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor);
+ yy_shift(yypParser,yyact,yymajor,yyminor);
#ifndef YYNOERRORRECOVERY
yypParser->yyerrcnt--;
#endif
@@ -150303,21 +145462,6 @@ SQLITE_PRIVATE void sqlite3Parser(
return;
}
-/*
-** Return the fallback token corresponding to canonical token iToken, or
-** 0 if iToken has no fallback.
-*/
-SQLITE_PRIVATE int sqlite3ParserFallback(int iToken){
-#ifdef YYFALLBACK
- if( iToken<(int)(sizeof(yyFallback)/sizeof(yyFallback[0])) ){
- return yyFallback[iToken];
- }
-#else
- (void)iToken;
-#endif
- return 0;
-}
-
/************** End of parse.c ***********************************************/
/************** Begin file tokenize.c ****************************************/
/*
@@ -150376,12 +145520,11 @@ SQLITE_PRIVATE int sqlite3ParserFallback(int iToken){
#define CC_TILDA 25 /* '~' */
#define CC_DOT 26 /* '.' */
#define CC_ILLEGAL 27 /* Illegal character */
-#define CC_NUL 28 /* 0x00 */
static const unsigned char aiClass[] = {
#ifdef SQLITE_ASCII
/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xa xb xc xd xe xf */
-/* 0x */ 28, 27, 27, 27, 27, 27, 27, 27, 27, 7, 7, 27, 7, 7, 27, 27,
+/* 0x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 7, 7, 27, 7, 7, 27, 27,
/* 1x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
/* 2x */ 7, 15, 8, 5, 4, 22, 24, 8, 17, 18, 21, 20, 23, 11, 26, 16,
/* 3x */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 19, 12, 14, 13, 6,
@@ -150480,20 +145623,19 @@ const unsigned char ebcdicToAscii[] = {
** is substantially reduced. This is important for embedded applications
** on platforms with limited memory.
*/
-/* Hash score: 208 */
-/* zKWText[] encodes 923 bytes of keyword text in 614 bytes */
+/* Hash score: 185 */
+/* zKWText[] encodes 845 bytes of keyword text in 561 bytes */
/* REINDEXEDESCAPEACHECKEYBEFOREIGNOREGEXPLAINSTEADDATABASELECT */
/* ABLEFTHENDEFERRABLELSEXCEPTRANSACTIONATURALTERAISEXCLUSIVE */
/* XISTSAVEPOINTERSECTRIGGEREFERENCESCONSTRAINTOFFSETEMPORARY */
-/* UNIQUERYWITHOUTERELEASEATTACHAVINGROUPDATEBEGINNERANGEBETWEEN */
-/* OTHINGLOBYCASCADELETECASECOLLATECREATECURRENT_DATEDETACH */
-/* IMMEDIATEJOINSERTLIKEMATCHPLANALYZEPRAGMABORTVALUESVIRTUALIMIT */
-/* WHENOTNULLWHERECURSIVEAFTERENAMEANDEFAULTAUTOINCREMENTCAST */
-/* COLUMNCOMMITCONFLICTCROSSCURRENT_TIMESTAMPARTITIONDEFERRED */
-/* ISTINCTDROPRECEDINGFAILFILTEREPLACEFOLLOWINGFROMFULLIFISNULL */
-/* ORDERESTRICTOVERIGHTROLLBACKROWSUNBOUNDEDUNIONUSINGVACUUMVIEW */
-/* INDOWINITIALLYPRIMARY */
-static const char zKWText[613] = {
+/* UNIQUERYWITHOUTERELEASEATTACHAVINGROUPDATEBEGINNERECURSIVE */
+/* BETWEENOTHINGLOBYCASCADELETECASECOLLATECREATECURRENT_DATE */
+/* DETACHIMMEDIATEJOINSERTLIKEMATCHPLANALYZEPRAGMABORTVALUES */
+/* VIRTUALIMITWHENOTNULLWHERENAMEAFTEREPLACEANDEFAULT */
+/* AUTOINCREMENTCASTCOLUMNCOMMITCONFLICTCROSSCURRENT_TIMESTAMP */
+/* RIMARYDEFERREDISTINCTDORDERESTRICTDROPFAILFROMFULLIFISNULL */
+/* RIGHTROLLBACKROWUNIONUSINGVACUUMVIEWINITIALLY */
+static const char zKWText[560] = {
'R','E','I','N','D','E','X','E','D','E','S','C','A','P','E','A','C','H',
'E','C','K','E','Y','B','E','F','O','R','E','I','G','N','O','R','E','G',
'E','X','P','L','A','I','N','S','T','E','A','D','D','A','T','A','B','A',
@@ -150506,90 +145648,84 @@ static const char zKWText[613] = {
'O','F','F','S','E','T','E','M','P','O','R','A','R','Y','U','N','I','Q',
'U','E','R','Y','W','I','T','H','O','U','T','E','R','E','L','E','A','S',
'E','A','T','T','A','C','H','A','V','I','N','G','R','O','U','P','D','A',
- 'T','E','B','E','G','I','N','N','E','R','A','N','G','E','B','E','T','W',
- 'E','E','N','O','T','H','I','N','G','L','O','B','Y','C','A','S','C','A',
- 'D','E','L','E','T','E','C','A','S','E','C','O','L','L','A','T','E','C',
- 'R','E','A','T','E','C','U','R','R','E','N','T','_','D','A','T','E','D',
- 'E','T','A','C','H','I','M','M','E','D','I','A','T','E','J','O','I','N',
- 'S','E','R','T','L','I','K','E','M','A','T','C','H','P','L','A','N','A',
- 'L','Y','Z','E','P','R','A','G','M','A','B','O','R','T','V','A','L','U',
- 'E','S','V','I','R','T','U','A','L','I','M','I','T','W','H','E','N','O',
- 'T','N','U','L','L','W','H','E','R','E','C','U','R','S','I','V','E','A',
- 'F','T','E','R','E','N','A','M','E','A','N','D','E','F','A','U','L','T',
- 'A','U','T','O','I','N','C','R','E','M','E','N','T','C','A','S','T','C',
- 'O','L','U','M','N','C','O','M','M','I','T','C','O','N','F','L','I','C',
- 'T','C','R','O','S','S','C','U','R','R','E','N','T','_','T','I','M','E',
- 'S','T','A','M','P','A','R','T','I','T','I','O','N','D','E','F','E','R',
- 'R','E','D','I','S','T','I','N','C','T','D','R','O','P','R','E','C','E',
- 'D','I','N','G','F','A','I','L','F','I','L','T','E','R','E','P','L','A',
- 'C','E','F','O','L','L','O','W','I','N','G','F','R','O','M','F','U','L',
- 'L','I','F','I','S','N','U','L','L','O','R','D','E','R','E','S','T','R',
- 'I','C','T','O','V','E','R','I','G','H','T','R','O','L','L','B','A','C',
- 'K','R','O','W','S','U','N','B','O','U','N','D','E','D','U','N','I','O',
- 'N','U','S','I','N','G','V','A','C','U','U','M','V','I','E','W','I','N',
- 'D','O','W','I','N','I','T','I','A','L','L','Y','P','R','I','M','A','R',
- 'Y',
+ 'T','E','B','E','G','I','N','N','E','R','E','C','U','R','S','I','V','E',
+ 'B','E','T','W','E','E','N','O','T','H','I','N','G','L','O','B','Y','C',
+ 'A','S','C','A','D','E','L','E','T','E','C','A','S','E','C','O','L','L',
+ 'A','T','E','C','R','E','A','T','E','C','U','R','R','E','N','T','_','D',
+ 'A','T','E','D','E','T','A','C','H','I','M','M','E','D','I','A','T','E',
+ 'J','O','I','N','S','E','R','T','L','I','K','E','M','A','T','C','H','P',
+ 'L','A','N','A','L','Y','Z','E','P','R','A','G','M','A','B','O','R','T',
+ 'V','A','L','U','E','S','V','I','R','T','U','A','L','I','M','I','T','W',
+ 'H','E','N','O','T','N','U','L','L','W','H','E','R','E','N','A','M','E',
+ 'A','F','T','E','R','E','P','L','A','C','E','A','N','D','E','F','A','U',
+ 'L','T','A','U','T','O','I','N','C','R','E','M','E','N','T','C','A','S',
+ 'T','C','O','L','U','M','N','C','O','M','M','I','T','C','O','N','F','L',
+ 'I','C','T','C','R','O','S','S','C','U','R','R','E','N','T','_','T','I',
+ 'M','E','S','T','A','M','P','R','I','M','A','R','Y','D','E','F','E','R',
+ 'R','E','D','I','S','T','I','N','C','T','D','O','R','D','E','R','E','S',
+ 'T','R','I','C','T','D','R','O','P','F','A','I','L','F','R','O','M','F',
+ 'U','L','L','I','F','I','S','N','U','L','L','R','I','G','H','T','R','O',
+ 'L','L','B','A','C','K','R','O','W','U','N','I','O','N','U','S','I','N',
+ 'G','V','A','C','U','U','M','V','I','E','W','I','N','I','T','I','A','L',
+ 'L','Y',
};
/* aKWHash[i] is the hash value for the i-th keyword */
static const unsigned char aKWHash[127] = {
- 74, 109, 124, 72, 106, 45, 0, 0, 81, 0, 76, 61, 0,
- 42, 12, 77, 15, 0, 123, 84, 54, 118, 125, 19, 0, 0,
- 130, 0, 128, 121, 0, 22, 96, 0, 9, 0, 0, 115, 69,
- 0, 67, 6, 0, 48, 93, 136, 0, 126, 104, 0, 0, 44,
- 0, 107, 24, 0, 17, 0, 131, 53, 23, 0, 5, 62, 132,
- 99, 0, 0, 135, 110, 60, 134, 57, 113, 55, 0, 94, 0,
- 103, 26, 0, 102, 0, 0, 0, 98, 95, 100, 105, 117, 14,
- 39, 116, 0, 80, 0, 133, 114, 92, 59, 0, 129, 79, 119,
- 86, 46, 83, 0, 0, 97, 40, 122, 120, 0, 127, 0, 0,
- 29, 0, 89, 87, 88, 0, 20, 85, 111, 56,
+ 74, 108, 119, 72, 0, 45, 0, 0, 81, 0, 76, 61, 0,
+ 42, 12, 77, 15, 0, 118, 84, 54, 116, 0, 19, 0, 0,
+ 123, 0, 121, 111, 0, 22, 96, 0, 9, 0, 0, 68, 69,
+ 0, 67, 6, 0, 48, 93, 105, 0, 120, 104, 0, 0, 44,
+ 0, 106, 24, 0, 17, 0, 124, 53, 23, 0, 5, 62, 25,
+ 99, 0, 0, 126, 112, 60, 125, 57, 28, 55, 0, 94, 0,
+ 103, 26, 0, 102, 0, 0, 0, 98, 95, 100, 91, 115, 14,
+ 39, 114, 0, 80, 0, 109, 92, 90, 32, 0, 122, 79, 117,
+ 86, 46, 83, 0, 0, 97, 40, 59, 110, 0, 36, 0, 0,
+ 29, 0, 89, 87, 88, 0, 20, 85, 0, 56,
};
/* aKWNext[] forms the hash collision chain. If aKWHash[i]==0
** then the i-th keyword has no more hash collisions. Otherwise,
** the next keyword with the same hash is aKWHash[i]-1. */
-static const unsigned char aKWNext[136] = {
+static const unsigned char aKWNext[126] = {
0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0,
0, 2, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0,
0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 33, 0, 21, 0, 0, 0, 0, 0, 50,
- 0, 43, 3, 47, 0, 0, 32, 0, 0, 0, 0, 0, 0,
+ 0, 43, 3, 47, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 64, 0, 0, 65, 0, 41, 0, 38, 0, 0, 0,
- 0, 0, 49, 75, 0, 0, 30, 0, 58, 0, 0, 0, 31,
- 63, 16, 34, 10, 0, 0, 0, 0, 0, 0, 0, 11, 70,
- 91, 0, 0, 8, 0, 108, 0, 101, 28, 52, 68, 0, 112,
- 0, 73, 51, 0, 90, 27, 37, 0, 71, 36, 82, 0, 35,
- 66, 25, 18, 0, 0, 78,
+ 0, 0, 49, 75, 0, 0, 30, 0, 58, 0, 0, 63, 31,
+ 52, 16, 34, 10, 0, 0, 0, 0, 0, 0, 0, 11, 70,
+ 78, 0, 8, 0, 18, 51, 0, 107, 101, 0, 113, 0, 73,
+ 27, 37, 71, 82, 0, 35, 66, 0, 0,
};
/* aKWLen[i] is the length (in bytes) of the i-th keyword */
-static const unsigned char aKWLen[136] = {
+static const unsigned char aKWLen[126] = {
7, 7, 5, 4, 6, 4, 5, 3, 6, 7, 3, 6, 6,
7, 7, 3, 8, 2, 6, 5, 4, 4, 3, 10, 4, 6,
11, 6, 2, 7, 5, 5, 9, 6, 9, 9, 7, 10, 10,
4, 6, 2, 3, 9, 4, 2, 6, 5, 7, 4, 5, 7,
- 6, 6, 5, 6, 5, 5, 5, 7, 7, 4, 2, 7, 3,
+ 6, 6, 5, 6, 5, 5, 9, 7, 7, 4, 2, 7, 3,
6, 4, 7, 6, 12, 6, 9, 4, 6, 4, 5, 4, 7,
- 6, 5, 6, 7, 5, 4, 7, 3, 2, 4, 5, 9, 5,
- 6, 3, 7, 13, 2, 2, 4, 6, 6, 8, 5, 17, 12,
- 7, 9, 8, 8, 2, 4, 9, 4, 6, 7, 9, 4, 4,
- 2, 6, 5, 8, 4, 5, 8, 4, 3, 9, 5, 5, 6,
- 4, 6, 2, 9, 3, 7,
+ 6, 5, 6, 7, 5, 4, 7, 3, 2, 4, 5, 6, 5,
+ 7, 3, 7, 13, 2, 2, 4, 6, 6, 8, 5, 17, 12,
+ 7, 8, 8, 2, 2, 5, 8, 4, 4, 4, 4, 2, 6,
+ 5, 8, 3, 5, 5, 6, 4, 9, 3,
};
/* aKWOffset[i] is the index into zKWText[] of the start of
** the text for the i-th keyword. */
-static const unsigned short int aKWOffset[136] = {
+static const unsigned short int aKWOffset[126] = {
0, 2, 2, 8, 9, 14, 16, 20, 23, 25, 25, 29, 33,
36, 41, 46, 48, 53, 54, 59, 62, 65, 67, 69, 78, 81,
86, 91, 95, 96, 101, 105, 109, 117, 122, 128, 136, 142, 152,
159, 162, 162, 165, 167, 167, 171, 176, 179, 184, 184, 188, 192,
- 199, 204, 209, 212, 218, 221, 225, 230, 236, 242, 245, 247, 248,
- 252, 258, 262, 269, 275, 287, 293, 302, 304, 310, 314, 319, 321,
- 328, 333, 338, 344, 350, 355, 358, 358, 358, 361, 365, 368, 377,
- 381, 387, 389, 396, 398, 400, 409, 413, 419, 425, 433, 438, 438,
- 438, 454, 463, 470, 471, 478, 481, 490, 494, 499, 506, 515, 519,
- 523, 525, 531, 535, 543, 546, 551, 559, 559, 563, 572, 577, 582,
- 588, 591, 594, 597, 602, 606,
+ 199, 204, 209, 212, 218, 221, 225, 234, 240, 246, 249, 251, 252,
+ 256, 262, 266, 273, 279, 291, 297, 306, 308, 314, 318, 323, 325,
+ 332, 337, 342, 348, 354, 359, 362, 362, 362, 365, 369, 372, 378,
+ 382, 389, 391, 398, 400, 402, 411, 415, 421, 427, 435, 440, 440,
+ 456, 463, 470, 471, 478, 479, 483, 491, 495, 499, 503, 507, 509,
+ 515, 520, 528, 531, 536, 541, 547, 551, 556,
};
/* aKWCode[i] is the parser symbol code for the i-th keyword */
-static const unsigned char aKWCode[136] = {
+static const unsigned char aKWCode[126] = {
TK_REINDEX, TK_INDEXED, TK_INDEX, TK_DESC, TK_ESCAPE,
TK_EACH, TK_CHECK, TK_KEY, TK_BEFORE, TK_FOREIGN,
TK_FOR, TK_IGNORE, TK_LIKE_KW, TK_EXPLAIN, TK_INSTEAD,
@@ -150601,23 +145737,21 @@ static const unsigned char aKWCode[136] = {
TK_OFFSET, TK_OF, TK_SET, TK_TEMP, TK_TEMP,
TK_OR, TK_UNIQUE, TK_QUERY, TK_WITHOUT, TK_WITH,
TK_JOIN_KW, TK_RELEASE, TK_ATTACH, TK_HAVING, TK_GROUP,
- TK_UPDATE, TK_BEGIN, TK_JOIN_KW, TK_RANGE, TK_BETWEEN,
+ TK_UPDATE, TK_BEGIN, TK_JOIN_KW, TK_RECURSIVE, TK_BETWEEN,
TK_NOTHING, TK_LIKE_KW, TK_BY, TK_CASCADE, TK_ASC,
TK_DELETE, TK_CASE, TK_COLLATE, TK_CREATE, TK_CTIME_KW,
TK_DETACH, TK_IMMEDIATE, TK_JOIN, TK_INSERT, TK_LIKE_KW,
TK_MATCH, TK_PLAN, TK_ANALYZE, TK_PRAGMA, TK_ABORT,
TK_VALUES, TK_VIRTUAL, TK_LIMIT, TK_WHEN, TK_NOTNULL,
- TK_NOT, TK_NO, TK_NULL, TK_WHERE, TK_RECURSIVE,
- TK_AFTER, TK_RENAME, TK_AND, TK_DEFAULT, TK_AUTOINCR,
+ TK_NOT, TK_NO, TK_NULL, TK_WHERE, TK_RENAME,
+ TK_AFTER, TK_REPLACE, TK_AND, TK_DEFAULT, TK_AUTOINCR,
TK_TO, TK_IN, TK_CAST, TK_COLUMNKW, TK_COMMIT,
- TK_CONFLICT, TK_JOIN_KW, TK_CTIME_KW, TK_CTIME_KW, TK_CURRENT,
- TK_PARTITION, TK_DEFERRED, TK_DISTINCT, TK_IS, TK_DROP,
- TK_PRECEDING, TK_FAIL, TK_FILTER, TK_REPLACE, TK_FOLLOWING,
- TK_FROM, TK_JOIN_KW, TK_IF, TK_ISNULL, TK_ORDER,
- TK_RESTRICT, TK_OVER, TK_JOIN_KW, TK_ROLLBACK, TK_ROWS,
- TK_ROW, TK_UNBOUNDED, TK_UNION, TK_USING, TK_VACUUM,
- TK_VIEW, TK_WINDOW, TK_DO, TK_INITIALLY, TK_ALL,
- TK_PRIMARY,
+ TK_CONFLICT, TK_JOIN_KW, TK_CTIME_KW, TK_CTIME_KW, TK_PRIMARY,
+ TK_DEFERRED, TK_DISTINCT, TK_IS, TK_DO, TK_ORDER,
+ TK_RESTRICT, TK_DROP, TK_FAIL, TK_FROM, TK_JOIN_KW,
+ TK_IF, TK_ISNULL, TK_JOIN_KW, TK_ROLLBACK, TK_ROW,
+ TK_UNION, TK_USING, TK_VACUUM, TK_VIEW, TK_INITIALLY,
+ TK_ALL,
};
/* Check to see if z[0..n-1] is a keyword. If it is, write the
** parser symbol code for that keyword into *pType. Always
@@ -150696,7 +145830,7 @@ static int keywordCode(const char *z, int n, int *pType){
testcase( i==55 ); /* UPDATE */
testcase( i==56 ); /* BEGIN */
testcase( i==57 ); /* INNER */
- testcase( i==58 ); /* RANGE */
+ testcase( i==58 ); /* RECURSIVE */
testcase( i==59 ); /* BETWEEN */
testcase( i==60 ); /* NOTHING */
testcase( i==61 ); /* GLOB */
@@ -150727,9 +145861,9 @@ static int keywordCode(const char *z, int n, int *pType){
testcase( i==86 ); /* NO */
testcase( i==87 ); /* NULL */
testcase( i==88 ); /* WHERE */
- testcase( i==89 ); /* RECURSIVE */
+ testcase( i==89 ); /* RENAME */
testcase( i==90 ); /* AFTER */
- testcase( i==91 ); /* RENAME */
+ testcase( i==91 ); /* REPLACE */
testcase( i==92 ); /* AND */
testcase( i==93 ); /* DEFAULT */
testcase( i==94 ); /* AUTOINCREMENT */
@@ -150742,38 +145876,28 @@ static int keywordCode(const char *z, int n, int *pType){
testcase( i==101 ); /* CROSS */
testcase( i==102 ); /* CURRENT_TIMESTAMP */
testcase( i==103 ); /* CURRENT_TIME */
- testcase( i==104 ); /* CURRENT */
- testcase( i==105 ); /* PARTITION */
- testcase( i==106 ); /* DEFERRED */
- testcase( i==107 ); /* DISTINCT */
- testcase( i==108 ); /* IS */
- testcase( i==109 ); /* DROP */
- testcase( i==110 ); /* PRECEDING */
- testcase( i==111 ); /* FAIL */
- testcase( i==112 ); /* FILTER */
- testcase( i==113 ); /* REPLACE */
- testcase( i==114 ); /* FOLLOWING */
- testcase( i==115 ); /* FROM */
- testcase( i==116 ); /* FULL */
- testcase( i==117 ); /* IF */
- testcase( i==118 ); /* ISNULL */
- testcase( i==119 ); /* ORDER */
- testcase( i==120 ); /* RESTRICT */
- testcase( i==121 ); /* OVER */
- testcase( i==122 ); /* RIGHT */
- testcase( i==123 ); /* ROLLBACK */
- testcase( i==124 ); /* ROWS */
- testcase( i==125 ); /* ROW */
- testcase( i==126 ); /* UNBOUNDED */
- testcase( i==127 ); /* UNION */
- testcase( i==128 ); /* USING */
- testcase( i==129 ); /* VACUUM */
- testcase( i==130 ); /* VIEW */
- testcase( i==131 ); /* WINDOW */
- testcase( i==132 ); /* DO */
- testcase( i==133 ); /* INITIALLY */
- testcase( i==134 ); /* ALL */
- testcase( i==135 ); /* PRIMARY */
+ testcase( i==104 ); /* PRIMARY */
+ testcase( i==105 ); /* DEFERRED */
+ testcase( i==106 ); /* DISTINCT */
+ testcase( i==107 ); /* IS */
+ testcase( i==108 ); /* DO */
+ testcase( i==109 ); /* ORDER */
+ testcase( i==110 ); /* RESTRICT */
+ testcase( i==111 ); /* DROP */
+ testcase( i==112 ); /* FAIL */
+ testcase( i==113 ); /* FROM */
+ testcase( i==114 ); /* FULL */
+ testcase( i==115 ); /* IF */
+ testcase( i==116 ); /* ISNULL */
+ testcase( i==117 ); /* RIGHT */
+ testcase( i==118 ); /* ROLLBACK */
+ testcase( i==119 ); /* ROW */
+ testcase( i==120 ); /* UNION */
+ testcase( i==121 ); /* USING */
+ testcase( i==122 ); /* VACUUM */
+ testcase( i==123 ); /* VIEW */
+ testcase( i==124 ); /* INITIALLY */
+ testcase( i==125 ); /* ALL */
*pType = aKWCode[i];
break;
}
@@ -150785,7 +145909,7 @@ SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char *z, int n){
keywordCode((char*)z, n, &id);
return id;
}
-#define SQLITE_N_KEYWORD 136
+#define SQLITE_N_KEYWORD 126
SQLITE_API int sqlite3_keyword_name(int i,const char **pzName,int *pnName){
if( i<0 || i>=SQLITE_N_KEYWORD ) return SQLITE_ERROR;
*pzName = zKWText + aKWOffset[i];
@@ -150839,85 +145963,11 @@ SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[] = {
#define IdChar(C) (((c=C)>=0x42 && sqlite3IsEbcdicIdChar[c-0x40]))
#endif
-/* Make the IdChar function accessible from ctime.c and alter.c */
+/* Make the IdChar function accessible from ctime.c */
+#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
SQLITE_PRIVATE int sqlite3IsIdChar(u8 c){ return IdChar(c); }
+#endif
-#ifndef SQLITE_OMIT_WINDOWFUNC
-/*
-** Return the id of the next token in string (*pz). Before returning, set
-** (*pz) to point to the byte following the parsed token.
-*/
-static int getToken(const unsigned char **pz){
- const unsigned char *z = *pz;
- int t; /* Token type to return */
- do {
- z += sqlite3GetToken(z, &t);
- }while( t==TK_SPACE );
- if( t==TK_ID
- || t==TK_STRING
- || t==TK_JOIN_KW
- || t==TK_WINDOW
- || t==TK_OVER
- || sqlite3ParserFallback(t)==TK_ID
- ){
- t = TK_ID;
- }
- *pz = z;
- return t;
-}
-
-/*
-** The following three functions are called immediately after the tokenizer
-** reads the keywords WINDOW, OVER and FILTER, respectively, to determine
-** whether the token should be treated as a keyword or an SQL identifier.
-** This cannot be handled by the usual lemon %fallback method, due to
-** the ambiguity in some constructions. e.g.
-**
-** SELECT sum(x) OVER ...
-**
-** In the above, "OVER" might be a keyword, or it might be an alias for the
-** sum(x) expression. If a "%fallback ID OVER" directive were added to
-** grammar, then SQLite would always treat "OVER" as an alias, making it
-** impossible to call a window-function without a FILTER clause.
-**
-** WINDOW is treated as a keyword if:
-**
-** * the following token is an identifier, or a keyword that can fallback
-** to being an identifier, and
-** * the token after than one is TK_AS.
-**
-** OVER is a keyword if:
-**
-** * the previous token was TK_RP, and
-** * the next token is either TK_LP or an identifier.
-**
-** FILTER is a keyword if:
-**
-** * the previous token was TK_RP, and
-** * the next token is TK_LP.
-*/
-static int analyzeWindowKeyword(const unsigned char *z){
- int t;
- t = getToken(&z);
- if( t!=TK_ID ) return TK_ID;
- t = getToken(&z);
- if( t!=TK_AS ) return TK_ID;
- return TK_WINDOW;
-}
-static int analyzeOverKeyword(const unsigned char *z, int lastToken){
- if( lastToken==TK_RP ){
- int t = getToken(&z);
- if( t==TK_LP || t==TK_ID ) return TK_OVER;
- }
- return TK_ID;
-}
-static int analyzeFilterKeyword(const unsigned char *z, int lastToken){
- if( lastToken==TK_RP && getToken(&z)==TK_LP ){
- return TK_FILTER;
- }
- return TK_ID;
-}
-#endif /* SQLITE_OMIT_WINDOWFUNC */
/*
** Return the length (in bytes) of the token that begins at z[0].
@@ -151186,10 +146236,6 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){
i = 1;
break;
}
- case CC_NUL: {
- *tokenType = TK_ILLEGAL;
- return 0;
- }
default: {
*tokenType = TK_ILLEGAL;
return 1;
@@ -151243,64 +146289,47 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
assert( pParse->nVar==0 );
assert( pParse->pVList==0 );
while( 1 ){
- n = sqlite3GetToken((u8*)zSql, &tokenType);
- mxSqlLen -= n;
- if( mxSqlLen<0 ){
- pParse->rc = SQLITE_TOOBIG;
- break;
+ if( zSql[0]!=0 ){
+ n = sqlite3GetToken((u8*)zSql, &tokenType);
+ mxSqlLen -= n;
+ if( mxSqlLen<0 ){
+ pParse->rc = SQLITE_TOOBIG;
+ break;
+ }
+ }else{
+ /* Upon reaching the end of input, call the parser two more times
+ ** with tokens TK_SEMI and 0, in that order. */
+ if( lastTokenParsed==TK_SEMI ){
+ tokenType = 0;
+ }else if( lastTokenParsed==0 ){
+ break;
+ }else{
+ tokenType = TK_SEMI;
+ }
+ n = 0;
}
-#ifndef SQLITE_OMIT_WINDOWFUNC
- if( tokenType>=TK_WINDOW ){
- assert( tokenType==TK_SPACE || tokenType==TK_OVER || tokenType==TK_FILTER
- || tokenType==TK_ILLEGAL || tokenType==TK_WINDOW
- );
-#else
if( tokenType>=TK_SPACE ){
assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL );
-#endif /* SQLITE_OMIT_WINDOWFUNC */
if( db->u1.isInterrupted ){
pParse->rc = SQLITE_INTERRUPT;
break;
}
- if( tokenType==TK_SPACE ){
- zSql += n;
- continue;
- }
- if( zSql[0]==0 ){
- /* Upon reaching the end of input, call the parser two more times
- ** with tokens TK_SEMI and 0, in that order. */
- if( lastTokenParsed==TK_SEMI ){
- tokenType = 0;
- }else if( lastTokenParsed==0 ){
- break;
- }else{
- tokenType = TK_SEMI;
- }
- n = 0;
-#ifndef SQLITE_OMIT_WINDOWFUNC
- }else if( tokenType==TK_WINDOW ){
- assert( n==6 );
- tokenType = analyzeWindowKeyword((const u8*)&zSql[6]);
- }else if( tokenType==TK_OVER ){
- assert( n==4 );
- tokenType = analyzeOverKeyword((const u8*)&zSql[4], lastTokenParsed);
- }else if( tokenType==TK_FILTER ){
- assert( n==6 );
- tokenType = analyzeFilterKeyword((const u8*)&zSql[6], lastTokenParsed);
-#endif /* SQLITE_OMIT_WINDOWFUNC */
- }else{
+ if( tokenType==TK_ILLEGAL ){
sqlite3ErrorMsg(pParse, "unrecognized token: \"%.*s\"", n, zSql);
break;
}
+ zSql += n;
+ }else{
+ pParse->sLastToken.z = zSql;
+ pParse->sLastToken.n = n;
+ sqlite3Parser(pEngine, tokenType, pParse->sLastToken);
+ lastTokenParsed = tokenType;
+ zSql += n;
+ if( pParse->rc!=SQLITE_OK || db->mallocFailed ) break;
}
- pParse->sLastToken.z = zSql;
- pParse->sLastToken.n = n;
- sqlite3Parser(pEngine, tokenType, pParse->sLastToken);
- lastTokenParsed = tokenType;
- zSql += n;
- if( pParse->rc!=SQLITE_OK || db->mallocFailed ) break;
}
assert( nErr==0 );
+ pParse->zTail = zSql;
#ifdef YYTRACKMAXSTACKDEPTH
sqlite3_mutex_enter(sqlite3MallocMutex());
sqlite3StatusHighwater(SQLITE_STATUS_PARSER_STACK,
@@ -151322,12 +146351,10 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
assert( pzErrMsg!=0 );
if( pParse->zErrMsg ){
*pzErrMsg = pParse->zErrMsg;
- sqlite3_log(pParse->rc, "%s in \"%s\"",
- *pzErrMsg, pParse->zTail);
+ sqlite3_log(pParse->rc, "%s", *pzErrMsg);
pParse->zErrMsg = 0;
nErr++;
}
- pParse->zTail = zSql;
if( pParse->pVdbe && pParse->nErr>0 && pParse->nested==0 ){
sqlite3VdbeDelete(pParse->pVdbe);
pParse->pVdbe = 0;
@@ -151343,18 +146370,16 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
sqlite3_free(pParse->apVtabLock);
#endif
- if( !IN_SPECIAL_PARSE ){
+ if( !IN_DECLARE_VTAB ){
/* If the pParse->declareVtab flag is set, do not delete any table
** structure built up in pParse->pNewTable. The calling code (see vtab.c)
** will take responsibility for freeing the Table structure.
*/
sqlite3DeleteTable(db, pParse->pNewTable);
}
- if( !IN_RENAME_OBJECT ){
- sqlite3DeleteTrigger(db, pParse->pNewTrigger);
- }
if( pParse->pWithToFree ) sqlite3WithDelete(db, pParse->pWithToFree);
+ sqlite3DeleteTrigger(db, pParse->pNewTrigger);
sqlite3DbFree(db, pParse->pVList);
while( pParse->pAinc ){
AutoincInfo *p = pParse->pAinc;
@@ -152611,7 +147636,7 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){
db->flags &= ~aFlagOp[i].mask;
}
if( oldFlags!=db->flags ){
- sqlite3ExpirePreparedStatements(db, 0);
+ sqlite3ExpirePreparedStatements(db);
}
if( pRes ){
*pRes = (db->flags & aFlagOp[i].mask)!=0;
@@ -152672,15 +147697,6 @@ static int binCollFunc(
return rc;
}
-/*
-** Return true if CollSeq is the default built-in BINARY.
-*/
-SQLITE_PRIVATE int sqlite3IsBinary(const CollSeq *p){
- assert( p==0 || p->xCmp!=binCollFunc || p->pUser!=0
- || strcmp(p->zName,"BINARY")==0 );
- return p==0 || (p->xCmp==binCollFunc && p->pUser==0);
-}
-
/*
** Another built-in collating sequence: NOCASE.
**
@@ -152802,7 +147818,7 @@ static void disconnectAllVtab(sqlite3 *db){
sqlite3BtreeEnterAll(db);
for(i=0; inDb; i++){
Schema *pSchema = db->aDb[i].pSchema;
- if( pSchema ){
+ if( db->aDb[i].pSchema ){
for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){
Table *pTab = (Table *)sqliteHashData(p);
if( IsVirtual(pTab) ) sqlite3VtabDisconnect(db, pTab);
@@ -153062,8 +148078,8 @@ SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){
sqlite3VtabRollback(db);
sqlite3EndBenignMalloc();
- if( schemaChange ){
- sqlite3ExpirePreparedStatements(db, 0);
+ if( (db->mDbFlags&DBFLAG_SchemaChange)!=0 && db->init.busy==0 ){
+ sqlite3ExpirePreparedStatements(db);
sqlite3ResetAllSchemasOfConnection(db);
}
sqlite3BtreeLeaveAll(db);
@@ -153091,7 +148107,6 @@ SQLITE_PRIVATE const char *sqlite3ErrName(int rc){
switch( rc ){
case SQLITE_OK: zName = "SQLITE_OK"; break;
case SQLITE_ERROR: zName = "SQLITE_ERROR"; break;
- case SQLITE_ERROR_SNAPSHOT: zName = "SQLITE_ERROR_SNAPSHOT"; break;
case SQLITE_INTERNAL: zName = "SQLITE_INTERNAL"; break;
case SQLITE_PERM: zName = "SQLITE_PERM"; break;
case SQLITE_ABORT: zName = "SQLITE_ABORT"; break;
@@ -153455,8 +148470,6 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
void (*xSFunc)(sqlite3_context*,int,sqlite3_value **),
void (*xStep)(sqlite3_context*,int,sqlite3_value **),
void (*xFinal)(sqlite3_context*),
- void (*xValue)(sqlite3_context*),
- void (*xInverse)(sqlite3_context*,int,sqlite3_value **),
FuncDestructor *pDestructor
){
FuncDef *p;
@@ -153464,14 +148477,12 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
int extraFlags;
assert( sqlite3_mutex_held(db->mutex) );
- assert( xValue==0 || xSFunc==0 );
- if( zFunctionName==0 /* Must have a valid name */
- || (xSFunc!=0 && xFinal!=0) /* Not both xSFunc and xFinal */
- || ((xFinal==0)!=(xStep==0)) /* Both or neither of xFinal and xStep */
- || ((xValue==0)!=(xInverse==0)) /* Both or neither of xValue, xInverse */
- || (nArg<-1 || nArg>SQLITE_MAX_FUNCTION_ARG)
- || (255<(nName = sqlite3Strlen30( zFunctionName)))
- ){
+ if( zFunctionName==0 ||
+ (xSFunc && (xFinal || xStep)) ||
+ (!xSFunc && (xFinal && !xStep)) ||
+ (!xSFunc && (!xFinal && xStep)) ||
+ (nArg<-1 || nArg>SQLITE_MAX_FUNCTION_ARG) ||
+ (255<(nName = sqlite3Strlen30( zFunctionName))) ){
return SQLITE_MISUSE_BKPT;
}
@@ -153492,10 +148503,10 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
}else if( enc==SQLITE_ANY ){
int rc;
rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF8|extraFlags,
- pUserData, xSFunc, xStep, xFinal, xValue, xInverse, pDestructor);
+ pUserData, xSFunc, xStep, xFinal, pDestructor);
if( rc==SQLITE_OK ){
rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF16LE|extraFlags,
- pUserData, xSFunc, xStep, xFinal, xValue, xInverse, pDestructor);
+ pUserData, xSFunc, xStep, xFinal, pDestructor);
}
if( rc!=SQLITE_OK ){
return rc;
@@ -153512,14 +148523,14 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
** operation to continue but invalidate all precompiled statements.
*/
p = sqlite3FindFunction(db, zFunctionName, nArg, (u8)enc, 0);
- if( p && (p->funcFlags & SQLITE_FUNC_ENCMASK)==(u32)enc && p->nArg==nArg ){
+ if( p && (p->funcFlags & SQLITE_FUNC_ENCMASK)==enc && p->nArg==nArg ){
if( db->nVdbeActive ){
sqlite3ErrorWithMsg(db, SQLITE_BUSY,
"unable to delete/modify user-function due to active statements");
assert( !db->mallocFailed );
return SQLITE_BUSY;
}else{
- sqlite3ExpirePreparedStatements(db, 0);
+ sqlite3ExpirePreparedStatements(db);
}
}
@@ -153541,32 +148552,38 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
testcase( p->funcFlags & SQLITE_DETERMINISTIC );
p->xSFunc = xSFunc ? xSFunc : xStep;
p->xFinalize = xFinal;
- p->xValue = xValue;
- p->xInverse = xInverse;
p->pUserData = pUserData;
p->nArg = (u16)nArg;
return SQLITE_OK;
}
/*
-** Worker function used by utf-8 APIs that create new functions:
-**
-** sqlite3_create_function()
-** sqlite3_create_function_v2()
-** sqlite3_create_window_function()
+** Create new user functions.
*/
-static int createFunctionApi(
+SQLITE_API int sqlite3_create_function(
sqlite3 *db,
const char *zFunc,
int nArg,
int enc,
void *p,
- void (*xSFunc)(sqlite3_context*,int,sqlite3_value**),
- void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+ void (*xSFunc)(sqlite3_context*,int,sqlite3_value **),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value **),
+ void (*xFinal)(sqlite3_context*)
+){
+ return sqlite3_create_function_v2(db, zFunc, nArg, enc, p, xSFunc, xStep,
+ xFinal, 0);
+}
+
+SQLITE_API int sqlite3_create_function_v2(
+ sqlite3 *db,
+ const char *zFunc,
+ int nArg,
+ int enc,
+ void *p,
+ void (*xSFunc)(sqlite3_context*,int,sqlite3_value **),
+ void (*xStep)(sqlite3_context*,int,sqlite3_value **),
void (*xFinal)(sqlite3_context*),
- void (*xValue)(sqlite3_context*),
- void (*xInverse)(sqlite3_context*,int,sqlite3_value**),
- void(*xDestroy)(void*)
+ void (*xDestroy)(void *)
){
int rc = SQLITE_ERROR;
FuncDestructor *pArg = 0;
@@ -153588,9 +148605,7 @@ static int createFunctionApi(
pArg->xDestroy = xDestroy;
pArg->pUserData = p;
}
- rc = sqlite3CreateFunc(db, zFunc, nArg, enc, p,
- xSFunc, xStep, xFinal, xValue, xInverse, pArg
- );
+ rc = sqlite3CreateFunc(db, zFunc, nArg, enc, p, xSFunc, xStep, xFinal, pArg);
if( pArg && pArg->nRef==0 ){
assert( rc!=SQLITE_OK );
xDestroy(p);
@@ -153603,52 +148618,6 @@ static int createFunctionApi(
return rc;
}
-/*
-** Create new user functions.
-*/
-SQLITE_API int sqlite3_create_function(
- sqlite3 *db,
- const char *zFunc,
- int nArg,
- int enc,
- void *p,
- void (*xSFunc)(sqlite3_context*,int,sqlite3_value **),
- void (*xStep)(sqlite3_context*,int,sqlite3_value **),
- void (*xFinal)(sqlite3_context*)
-){
- return createFunctionApi(db, zFunc, nArg, enc, p, xSFunc, xStep,
- xFinal, 0, 0, 0);
-}
-SQLITE_API int sqlite3_create_function_v2(
- sqlite3 *db,
- const char *zFunc,
- int nArg,
- int enc,
- void *p,
- void (*xSFunc)(sqlite3_context*,int,sqlite3_value **),
- void (*xStep)(sqlite3_context*,int,sqlite3_value **),
- void (*xFinal)(sqlite3_context*),
- void (*xDestroy)(void *)
-){
- return createFunctionApi(db, zFunc, nArg, enc, p, xSFunc, xStep,
- xFinal, 0, 0, xDestroy);
-}
-SQLITE_API int sqlite3_create_window_function(
- sqlite3 *db,
- const char *zFunc,
- int nArg,
- int enc,
- void *p,
- void (*xStep)(sqlite3_context*,int,sqlite3_value **),
- void (*xFinal)(sqlite3_context*),
- void (*xValue)(sqlite3_context*),
- void (*xInverse)(sqlite3_context*,int,sqlite3_value **),
- void (*xDestroy)(void *)
-){
- return createFunctionApi(db, zFunc, nArg, enc, p, 0, xStep,
- xFinal, xValue, xInverse, xDestroy);
-}
-
#ifndef SQLITE_OMIT_UTF16
SQLITE_API int sqlite3_create_function16(
sqlite3 *db,
@@ -153669,7 +148638,7 @@ SQLITE_API int sqlite3_create_function16(
sqlite3_mutex_enter(db->mutex);
assert( !db->mallocFailed );
zFunc8 = sqlite3Utf16to8(db, zFunctionName, -1, SQLITE_UTF16NATIVE);
- rc = sqlite3CreateFunc(db, zFunc8, nArg, eTextRep, p, xSFunc,xStep,xFinal,0,0,0);
+ rc = sqlite3CreateFunc(db, zFunc8, nArg, eTextRep, p, xSFunc,xStep,xFinal,0);
sqlite3DbFree(db, zFunc8);
rc = sqlite3ApiExit(db, rc);
sqlite3_mutex_leave(db->mutex);
@@ -154294,7 +149263,7 @@ static int createCollation(
"unable to delete/modify collation sequence due to active statements");
return SQLITE_BUSY;
}
- sqlite3ExpirePreparedStatements(db, 0);
+ sqlite3ExpirePreparedStatements(db);
/* If collation sequence pColl was created directly by a call to
** sqlite3_create_collation, and not generated by synthCollSeq(),
@@ -154783,7 +149752,6 @@ static int openDatabase(
db->nDb = 2;
db->magic = SQLITE_MAGIC_BUSY;
db->aDb = db->aDbStatic;
- db->lookaside.bDisable = 1;
assert( sizeof(db->aLimit)==sizeof(aHardLimit) );
memcpy(db->aLimit, aHardLimit, sizeof(db->aLimit));
@@ -155484,9 +150452,6 @@ SQLITE_API int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, vo
}else if( op==SQLITE_FCNTL_JOURNAL_POINTER ){
*(sqlite3_file**)pArg = sqlite3PagerJrnlFile(pPager);
rc = SQLITE_OK;
- }else if( op==SQLITE_FCNTL_DATA_VERSION ){
- *(unsigned int*)pArg = sqlite3PagerDataVersion(pPager);
- rc = SQLITE_OK;
}else{
rc = sqlite3OsFileControl(fd, op, pArg);
}
@@ -155750,8 +150715,7 @@ SQLITE_API int sqlite3_test_control(int op, ...){
*/
case SQLITE_TESTCTRL_VDBE_COVERAGE: {
#ifdef SQLITE_VDBE_COVERAGE
- typedef void (*branch_callback)(void*,unsigned int,
- unsigned char,unsigned char);
+ typedef void (*branch_callback)(void*,int,u8,u8);
sqlite3GlobalConfig.xVdbeBranch = va_arg(ap,branch_callback);
sqlite3GlobalConfig.pVdbeBranchArg = va_arg(ap,void*);
#endif
@@ -155938,7 +150902,7 @@ SQLITE_API int sqlite3_snapshot_get(
if( iDb==0 || iDb>1 ){
Btree *pBt = db->aDb[iDb].pBt;
if( 0==sqlite3BtreeIsInTrans(pBt) ){
- rc = sqlite3BtreeBeginTrans(pBt, 0, 0);
+ rc = sqlite3BtreeBeginTrans(pBt, 0);
if( rc==SQLITE_OK ){
rc = sqlite3PagerSnapshotGet(sqlite3BtreePager(pBt), ppSnapshot);
}
@@ -155973,29 +150937,11 @@ SQLITE_API int sqlite3_snapshot_open(
iDb = sqlite3FindDbName(db, zDb);
if( iDb==0 || iDb>1 ){
Btree *pBt = db->aDb[iDb].pBt;
- if( sqlite3BtreeIsInTrans(pBt)==0 ){
- Pager *pPager = sqlite3BtreePager(pBt);
- int bUnlock = 0;
- if( sqlite3BtreeIsInReadTrans(pBt) ){
- if( db->nVdbeActive==0 ){
- rc = sqlite3PagerSnapshotCheck(pPager, pSnapshot);
- if( rc==SQLITE_OK ){
- bUnlock = 1;
- rc = sqlite3BtreeCommit(pBt);
- }
- }
- }else{
- rc = SQLITE_OK;
- }
+ if( 0==sqlite3BtreeIsInReadTrans(pBt) ){
+ rc = sqlite3PagerSnapshotOpen(sqlite3BtreePager(pBt), pSnapshot);
if( rc==SQLITE_OK ){
- rc = sqlite3PagerSnapshotOpen(pPager, pSnapshot);
- }
- if( rc==SQLITE_OK ){
- rc = sqlite3BtreeBeginTrans(pBt, 0, 0);
- sqlite3PagerSnapshotOpen(pPager, 0);
- }
- if( bUnlock ){
- sqlite3PagerSnapshotUnlock(pPager);
+ rc = sqlite3BtreeBeginTrans(pBt, 0);
+ sqlite3PagerSnapshotOpen(sqlite3BtreePager(pBt), 0);
}
}
}
@@ -156026,7 +150972,7 @@ SQLITE_API int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb){
if( iDb==0 || iDb>1 ){
Btree *pBt = db->aDb[iDb].pBt;
if( 0==sqlite3BtreeIsInReadTrans(pBt) ){
- rc = sqlite3BtreeBeginTrans(pBt, 0, 0);
+ rc = sqlite3BtreeBeginTrans(pBt, 0);
if( rc==SQLITE_OK ){
rc = sqlite3PagerSnapshotRecover(sqlite3BtreePager(pBt));
sqlite3BtreeCommit(pBt);
@@ -161149,7 +156095,7 @@ static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){
int rc = SQLITE_OK;
UNUSED_PARAMETER(iSavepoint);
assert( ((Fts3Table *)pVtab)->inTransaction );
- assert( ((Fts3Table *)pVtab)->mxSavepoint <= iSavepoint );
+ assert( ((Fts3Table *)pVtab)->mxSavepoint < iSavepoint );
TESTONLY( ((Fts3Table *)pVtab)->mxSavepoint = iSavepoint );
if( ((Fts3Table *)pVtab)->bIgnoreSavepoint==0 ){
rc = fts3SyncMethod(pVtab);
@@ -175572,2526 +170518,6 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int bRemoveDiacritic){
#endif /* !defined(SQLITE_DISABLE_FTS3_UNICODE) */
/************** End of fts3_unicode2.c ***************************************/
-/************** Begin file json1.c *******************************************/
-/*
-** 2015-08-12
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This SQLite extension implements JSON functions. The interface is
-** modeled after MySQL JSON functions:
-**
-** https://dev.mysql.com/doc/refman/5.7/en/json.html
-**
-** For the time being, all JSON is stored as pure text. (We might add
-** a JSONB type in the future which stores a binary encoding of JSON in
-** a BLOB, but there is no support for JSONB in the current implementation.
-** This implementation parses JSON text at 250 MB/s, so it is hard to see
-** how JSONB might improve on that.)
-*/
-#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_JSON1)
-#if !defined(SQLITEINT_H)
-/* #include "sqlite3ext.h" */
-#endif
-SQLITE_EXTENSION_INIT1
-/* #include */
-/* #include */
-/* #include */
-/* #include */
-
-/* Mark a function parameter as unused, to suppress nuisance compiler
-** warnings. */
-#ifndef UNUSED_PARAM
-# define UNUSED_PARAM(X) (void)(X)
-#endif
-
-#ifndef LARGEST_INT64
-# define LARGEST_INT64 (0xffffffff|(((sqlite3_int64)0x7fffffff)<<32))
-# define SMALLEST_INT64 (((sqlite3_int64)-1) - LARGEST_INT64)
-#endif
-
-/*
-** Versions of isspace(), isalnum() and isdigit() to which it is safe
-** to pass signed char values.
-*/
-#ifdef sqlite3Isdigit
- /* Use the SQLite core versions if this routine is part of the
- ** SQLite amalgamation */
-# define safe_isdigit(x) sqlite3Isdigit(x)
-# define safe_isalnum(x) sqlite3Isalnum(x)
-# define safe_isxdigit(x) sqlite3Isxdigit(x)
-#else
- /* Use the standard library for separate compilation */
-#include /* amalgamator: keep */
-# define safe_isdigit(x) isdigit((unsigned char)(x))
-# define safe_isalnum(x) isalnum((unsigned char)(x))
-# define safe_isxdigit(x) isxdigit((unsigned char)(x))
-#endif
-
-/*
-** Growing our own isspace() routine this way is twice as fast as
-** the library isspace() function, resulting in a 7% overall performance
-** increase for the parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os).
-*/
-static const char jsonIsSpace[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-};
-#define safe_isspace(x) (jsonIsSpace[(unsigned char)x])
-
-#ifndef SQLITE_AMALGAMATION
- /* Unsigned integer types. These are already defined in the sqliteInt.h,
- ** but the definitions need to be repeated for separate compilation. */
- typedef sqlite3_uint64 u64;
- typedef unsigned int u32;
- typedef unsigned short int u16;
- typedef unsigned char u8;
-#endif
-
-/* Objects */
-typedef struct JsonString JsonString;
-typedef struct JsonNode JsonNode;
-typedef struct JsonParse JsonParse;
-
-/* An instance of this object represents a JSON string
-** under construction. Really, this is a generic string accumulator
-** that can be and is used to create strings other than JSON.
-*/
-struct JsonString {
- sqlite3_context *pCtx; /* Function context - put error messages here */
- char *zBuf; /* Append JSON content here */
- u64 nAlloc; /* Bytes of storage available in zBuf[] */
- u64 nUsed; /* Bytes of zBuf[] currently used */
- u8 bStatic; /* True if zBuf is static space */
- u8 bErr; /* True if an error has been encountered */
- char zSpace[100]; /* Initial static space */
-};
-
-/* JSON type values
-*/
-#define JSON_NULL 0
-#define JSON_TRUE 1
-#define JSON_FALSE 2
-#define JSON_INT 3
-#define JSON_REAL 4
-#define JSON_STRING 5
-#define JSON_ARRAY 6
-#define JSON_OBJECT 7
-
-/* The "subtype" set for JSON values */
-#define JSON_SUBTYPE 74 /* Ascii for "J" */
-
-/*
-** Names of the various JSON types:
-*/
-static const char * const jsonType[] = {
- "null", "true", "false", "integer", "real", "text", "array", "object"
-};
-
-/* Bit values for the JsonNode.jnFlag field
-*/
-#define JNODE_RAW 0x01 /* Content is raw, not JSON encoded */
-#define JNODE_ESCAPE 0x02 /* Content is text with \ escapes */
-#define JNODE_REMOVE 0x04 /* Do not output */
-#define JNODE_REPLACE 0x08 /* Replace with JsonNode.u.iReplace */
-#define JNODE_PATCH 0x10 /* Patch with JsonNode.u.pPatch */
-#define JNODE_APPEND 0x20 /* More ARRAY/OBJECT entries at u.iAppend */
-#define JNODE_LABEL 0x40 /* Is a label of an object */
-
-
-/* A single node of parsed JSON
-*/
-struct JsonNode {
- u8 eType; /* One of the JSON_ type values */
- u8 jnFlags; /* JNODE flags */
- u32 n; /* Bytes of content, or number of sub-nodes */
- union {
- const char *zJContent; /* Content for INT, REAL, and STRING */
- u32 iAppend; /* More terms for ARRAY and OBJECT */
- u32 iKey; /* Key for ARRAY objects in json_tree() */
- u32 iReplace; /* Replacement content for JNODE_REPLACE */
- JsonNode *pPatch; /* Node chain of patch for JNODE_PATCH */
- } u;
-};
-
-/* A completely parsed JSON string
-*/
-struct JsonParse {
- u32 nNode; /* Number of slots of aNode[] used */
- u32 nAlloc; /* Number of slots of aNode[] allocated */
- JsonNode *aNode; /* Array of nodes containing the parse */
- const char *zJson; /* Original JSON string */
- u32 *aUp; /* Index of parent of each node */
- u8 oom; /* Set to true if out of memory */
- u8 nErr; /* Number of errors seen */
- u16 iDepth; /* Nesting depth */
- int nJson; /* Length of the zJson string in bytes */
- u32 iHold; /* Replace cache line with the lowest iHold value */
-};
-
-/*
-** Maximum nesting depth of JSON for this implementation.
-**
-** This limit is needed to avoid a stack overflow in the recursive
-** descent parser. A depth of 2000 is far deeper than any sane JSON
-** should go.
-*/
-#define JSON_MAX_DEPTH 2000
-
-/**************************************************************************
-** Utility routines for dealing with JsonString objects
-**************************************************************************/
-
-/* Set the JsonString object to an empty string
-*/
-static void jsonZero(JsonString *p){
- p->zBuf = p->zSpace;
- p->nAlloc = sizeof(p->zSpace);
- p->nUsed = 0;
- p->bStatic = 1;
-}
-
-/* Initialize the JsonString object
-*/
-static void jsonInit(JsonString *p, sqlite3_context *pCtx){
- p->pCtx = pCtx;
- p->bErr = 0;
- jsonZero(p);
-}
-
-
-/* Free all allocated memory and reset the JsonString object back to its
-** initial state.
-*/
-static void jsonReset(JsonString *p){
- if( !p->bStatic ) sqlite3_free(p->zBuf);
- jsonZero(p);
-}
-
-
-/* Report an out-of-memory (OOM) condition
-*/
-static void jsonOom(JsonString *p){
- p->bErr = 1;
- sqlite3_result_error_nomem(p->pCtx);
- jsonReset(p);
-}
-
-/* Enlarge pJson->zBuf so that it can hold at least N more bytes.
-** Return zero on success. Return non-zero on an OOM error
-*/
-static int jsonGrow(JsonString *p, u32 N){
- u64 nTotal = NnAlloc ? p->nAlloc*2 : p->nAlloc+N+10;
- char *zNew;
- if( p->bStatic ){
- if( p->bErr ) return 1;
- zNew = sqlite3_malloc64(nTotal);
- if( zNew==0 ){
- jsonOom(p);
- return SQLITE_NOMEM;
- }
- memcpy(zNew, p->zBuf, (size_t)p->nUsed);
- p->zBuf = zNew;
- p->bStatic = 0;
- }else{
- zNew = sqlite3_realloc64(p->zBuf, nTotal);
- if( zNew==0 ){
- jsonOom(p);
- return SQLITE_NOMEM;
- }
- p->zBuf = zNew;
- }
- p->nAlloc = nTotal;
- return SQLITE_OK;
-}
-
-/* Append N bytes from zIn onto the end of the JsonString string.
-*/
-static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){
- if( (N+p->nUsed >= p->nAlloc) && jsonGrow(p,N)!=0 ) return;
- memcpy(p->zBuf+p->nUsed, zIn, N);
- p->nUsed += N;
-}
-
-/* Append formatted text (not to exceed N bytes) to the JsonString.
-*/
-static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){
- va_list ap;
- if( (p->nUsed + N >= p->nAlloc) && jsonGrow(p, N) ) return;
- va_start(ap, zFormat);
- sqlite3_vsnprintf(N, p->zBuf+p->nUsed, zFormat, ap);
- va_end(ap);
- p->nUsed += (int)strlen(p->zBuf+p->nUsed);
-}
-
-/* Append a single character
-*/
-static void jsonAppendChar(JsonString *p, char c){
- if( p->nUsed>=p->nAlloc && jsonGrow(p,1)!=0 ) return;
- p->zBuf[p->nUsed++] = c;
-}
-
-/* Append a comma separator to the output buffer, if the previous
-** character is not '[' or '{'.
-*/
-static void jsonAppendSeparator(JsonString *p){
- char c;
- if( p->nUsed==0 ) return;
- c = p->zBuf[p->nUsed-1];
- if( c!='[' && c!='{' ) jsonAppendChar(p, ',');
-}
-
-/* Append the N-byte string in zIn to the end of the JsonString string
-** under construction. Enclose the string in "..." and escape
-** any double-quotes or backslash characters contained within the
-** string.
-*/
-static void jsonAppendString(JsonString *p, const char *zIn, u32 N){
- u32 i;
- if( (N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0 ) return;
- p->zBuf[p->nUsed++] = '"';
- for(i=0; inUsed+N+3-i > p->nAlloc) && jsonGrow(p,N+3-i)!=0 ) return;
- p->zBuf[p->nUsed++] = '\\';
- }else if( c<=0x1f ){
- static const char aSpecial[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- };
- assert( sizeof(aSpecial)==32 );
- assert( aSpecial['\b']=='b' );
- assert( aSpecial['\f']=='f' );
- assert( aSpecial['\n']=='n' );
- assert( aSpecial['\r']=='r' );
- assert( aSpecial['\t']=='t' );
- if( aSpecial[c] ){
- c = aSpecial[c];
- goto json_simple_escape;
- }
- if( (p->nUsed+N+7+i > p->nAlloc) && jsonGrow(p,N+7-i)!=0 ) return;
- p->zBuf[p->nUsed++] = '\\';
- p->zBuf[p->nUsed++] = 'u';
- p->zBuf[p->nUsed++] = '0';
- p->zBuf[p->nUsed++] = '0';
- p->zBuf[p->nUsed++] = '0' + (c>>4);
- c = "0123456789abcdef"[c&0xf];
- }
- p->zBuf[p->nUsed++] = c;
- }
- p->zBuf[p->nUsed++] = '"';
- assert( p->nUsednAlloc );
-}
-
-/*
-** Append a function parameter value to the JSON string under
-** construction.
-*/
-static void jsonAppendValue(
- JsonString *p, /* Append to this JSON string */
- sqlite3_value *pValue /* Value to append */
-){
- switch( sqlite3_value_type(pValue) ){
- case SQLITE_NULL: {
- jsonAppendRaw(p, "null", 4);
- break;
- }
- case SQLITE_INTEGER:
- case SQLITE_FLOAT: {
- const char *z = (const char*)sqlite3_value_text(pValue);
- u32 n = (u32)sqlite3_value_bytes(pValue);
- jsonAppendRaw(p, z, n);
- break;
- }
- case SQLITE_TEXT: {
- const char *z = (const char*)sqlite3_value_text(pValue);
- u32 n = (u32)sqlite3_value_bytes(pValue);
- if( sqlite3_value_subtype(pValue)==JSON_SUBTYPE ){
- jsonAppendRaw(p, z, n);
- }else{
- jsonAppendString(p, z, n);
- }
- break;
- }
- default: {
- if( p->bErr==0 ){
- sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1);
- p->bErr = 2;
- jsonReset(p);
- }
- break;
- }
- }
-}
-
-
-/* Make the JSON in p the result of the SQL function.
-*/
-static void jsonResult(JsonString *p){
- if( p->bErr==0 ){
- sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed,
- p->bStatic ? SQLITE_TRANSIENT : sqlite3_free,
- SQLITE_UTF8);
- jsonZero(p);
- }
- assert( p->bStatic );
-}
-
-/**************************************************************************
-** Utility routines for dealing with JsonNode and JsonParse objects
-**************************************************************************/
-
-/*
-** Return the number of consecutive JsonNode slots need to represent
-** the parsed JSON at pNode. The minimum answer is 1. For ARRAY and
-** OBJECT types, the number might be larger.
-**
-** Appended elements are not counted. The value returned is the number
-** by which the JsonNode counter should increment in order to go to the
-** next peer value.
-*/
-static u32 jsonNodeSize(JsonNode *pNode){
- return pNode->eType>=JSON_ARRAY ? pNode->n+1 : 1;
-}
-
-/*
-** Reclaim all memory allocated by a JsonParse object. But do not
-** delete the JsonParse object itself.
-*/
-static void jsonParseReset(JsonParse *pParse){
- sqlite3_free(pParse->aNode);
- pParse->aNode = 0;
- pParse->nNode = 0;
- pParse->nAlloc = 0;
- sqlite3_free(pParse->aUp);
- pParse->aUp = 0;
-}
-
-/*
-** Free a JsonParse object that was obtained from sqlite3_malloc().
-*/
-static void jsonParseFree(JsonParse *pParse){
- jsonParseReset(pParse);
- sqlite3_free(pParse);
-}
-
-/*
-** Convert the JsonNode pNode into a pure JSON string and
-** append to pOut. Subsubstructure is also included. Return
-** the number of JsonNode objects that are encoded.
-*/
-static void jsonRenderNode(
- JsonNode *pNode, /* The node to render */
- JsonString *pOut, /* Write JSON here */
- sqlite3_value **aReplace /* Replacement values */
-){
- if( pNode->jnFlags & (JNODE_REPLACE|JNODE_PATCH) ){
- if( pNode->jnFlags & JNODE_REPLACE ){
- jsonAppendValue(pOut, aReplace[pNode->u.iReplace]);
- return;
- }
- pNode = pNode->u.pPatch;
- }
- switch( pNode->eType ){
- default: {
- assert( pNode->eType==JSON_NULL );
- jsonAppendRaw(pOut, "null", 4);
- break;
- }
- case JSON_TRUE: {
- jsonAppendRaw(pOut, "true", 4);
- break;
- }
- case JSON_FALSE: {
- jsonAppendRaw(pOut, "false", 5);
- break;
- }
- case JSON_STRING: {
- if( pNode->jnFlags & JNODE_RAW ){
- jsonAppendString(pOut, pNode->u.zJContent, pNode->n);
- break;
- }
- /* Fall through into the next case */
- }
- case JSON_REAL:
- case JSON_INT: {
- jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
- break;
- }
- case JSON_ARRAY: {
- u32 j = 1;
- jsonAppendChar(pOut, '[');
- for(;;){
- while( j<=pNode->n ){
- if( (pNode[j].jnFlags & JNODE_REMOVE)==0 ){
- jsonAppendSeparator(pOut);
- jsonRenderNode(&pNode[j], pOut, aReplace);
- }
- j += jsonNodeSize(&pNode[j]);
- }
- if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
- pNode = &pNode[pNode->u.iAppend];
- j = 1;
- }
- jsonAppendChar(pOut, ']');
- break;
- }
- case JSON_OBJECT: {
- u32 j = 1;
- jsonAppendChar(pOut, '{');
- for(;;){
- while( j<=pNode->n ){
- if( (pNode[j+1].jnFlags & JNODE_REMOVE)==0 ){
- jsonAppendSeparator(pOut);
- jsonRenderNode(&pNode[j], pOut, aReplace);
- jsonAppendChar(pOut, ':');
- jsonRenderNode(&pNode[j+1], pOut, aReplace);
- }
- j += 1 + jsonNodeSize(&pNode[j+1]);
- }
- if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
- pNode = &pNode[pNode->u.iAppend];
- j = 1;
- }
- jsonAppendChar(pOut, '}');
- break;
- }
- }
-}
-
-/*
-** Return a JsonNode and all its descendents as a JSON string.
-*/
-static void jsonReturnJson(
- JsonNode *pNode, /* Node to return */
- sqlite3_context *pCtx, /* Return value for this function */
- sqlite3_value **aReplace /* Array of replacement values */
-){
- JsonString s;
- jsonInit(&s, pCtx);
- jsonRenderNode(pNode, &s, aReplace);
- jsonResult(&s);
- sqlite3_result_subtype(pCtx, JSON_SUBTYPE);
-}
-
-/*
-** Make the JsonNode the return value of the function.
-*/
-static void jsonReturn(
- JsonNode *pNode, /* Node to return */
- sqlite3_context *pCtx, /* Return value for this function */
- sqlite3_value **aReplace /* Array of replacement values */
-){
- switch( pNode->eType ){
- default: {
- assert( pNode->eType==JSON_NULL );
- sqlite3_result_null(pCtx);
- break;
- }
- case JSON_TRUE: {
- sqlite3_result_int(pCtx, 1);
- break;
- }
- case JSON_FALSE: {
- sqlite3_result_int(pCtx, 0);
- break;
- }
- case JSON_INT: {
- sqlite3_int64 i = 0;
- const char *z = pNode->u.zJContent;
- if( z[0]=='-' ){ z++; }
- while( z[0]>='0' && z[0]<='9' ){
- unsigned v = *(z++) - '0';
- if( i>=LARGEST_INT64/10 ){
- if( i>LARGEST_INT64/10 ) goto int_as_real;
- if( z[0]>='0' && z[0]<='9' ) goto int_as_real;
- if( v==9 ) goto int_as_real;
- if( v==8 ){
- if( pNode->u.zJContent[0]=='-' ){
- sqlite3_result_int64(pCtx, SMALLEST_INT64);
- goto int_done;
- }else{
- goto int_as_real;
- }
- }
- }
- i = i*10 + v;
- }
- if( pNode->u.zJContent[0]=='-' ){ i = -i; }
- sqlite3_result_int64(pCtx, i);
- int_done:
- break;
- int_as_real: /* fall through to real */;
- }
- case JSON_REAL: {
- double r;
-#ifdef SQLITE_AMALGAMATION
- const char *z = pNode->u.zJContent;
- sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8);
-#else
- r = strtod(pNode->u.zJContent, 0);
-#endif
- sqlite3_result_double(pCtx, r);
- break;
- }
- case JSON_STRING: {
-#if 0 /* Never happens because JNODE_RAW is only set by json_set(),
- ** json_insert() and json_replace() and those routines do not
- ** call jsonReturn() */
- if( pNode->jnFlags & JNODE_RAW ){
- sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n,
- SQLITE_TRANSIENT);
- }else
-#endif
- assert( (pNode->jnFlags & JNODE_RAW)==0 );
- if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){
- /* JSON formatted without any backslash-escapes */
- sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2,
- SQLITE_TRANSIENT);
- }else{
- /* Translate JSON formatted string into raw text */
- u32 i;
- u32 n = pNode->n;
- const char *z = pNode->u.zJContent;
- char *zOut;
- u32 j;
- zOut = sqlite3_malloc( n+1 );
- if( zOut==0 ){
- sqlite3_result_error_nomem(pCtx);
- break;
- }
- for(i=1, j=0; i>6));
- zOut[j++] = 0x80 | (v&0x3f);
- }else{
- zOut[j++] = (char)(0xe0 | (v>>12));
- zOut[j++] = 0x80 | ((v>>6)&0x3f);
- zOut[j++] = 0x80 | (v&0x3f);
- }
- }else{
- if( c=='b' ){
- c = '\b';
- }else if( c=='f' ){
- c = '\f';
- }else if( c=='n' ){
- c = '\n';
- }else if( c=='r' ){
- c = '\r';
- }else if( c=='t' ){
- c = '\t';
- }
- zOut[j++] = c;
- }
- }
- }
- zOut[j] = 0;
- sqlite3_result_text(pCtx, zOut, j, sqlite3_free);
- }
- break;
- }
- case JSON_ARRAY:
- case JSON_OBJECT: {
- jsonReturnJson(pNode, pCtx, aReplace);
- break;
- }
- }
-}
-
-/* Forward reference */
-static int jsonParseAddNode(JsonParse*,u32,u32,const char*);
-
-/*
-** A macro to hint to the compiler that a function should not be
-** inlined.
-*/
-#if defined(__GNUC__)
-# define JSON_NOINLINE __attribute__((noinline))
-#elif defined(_MSC_VER) && _MSC_VER>=1310
-# define JSON_NOINLINE __declspec(noinline)
-#else
-# define JSON_NOINLINE
-#endif
-
-
-static JSON_NOINLINE int jsonParseAddNodeExpand(
- JsonParse *pParse, /* Append the node to this object */
- u32 eType, /* Node type */
- u32 n, /* Content size or sub-node count */
- const char *zContent /* Content */
-){
- u32 nNew;
- JsonNode *pNew;
- assert( pParse->nNode>=pParse->nAlloc );
- if( pParse->oom ) return -1;
- nNew = pParse->nAlloc*2 + 10;
- pNew = sqlite3_realloc(pParse->aNode, sizeof(JsonNode)*nNew);
- if( pNew==0 ){
- pParse->oom = 1;
- return -1;
- }
- pParse->nAlloc = nNew;
- pParse->aNode = pNew;
- assert( pParse->nNodenAlloc );
- return jsonParseAddNode(pParse, eType, n, zContent);
-}
-
-/*
-** Create a new JsonNode instance based on the arguments and append that
-** instance to the JsonParse. Return the index in pParse->aNode[] of the
-** new node, or -1 if a memory allocation fails.
-*/
-static int jsonParseAddNode(
- JsonParse *pParse, /* Append the node to this object */
- u32 eType, /* Node type */
- u32 n, /* Content size or sub-node count */
- const char *zContent /* Content */
-){
- JsonNode *p;
- if( pParse->nNode>=pParse->nAlloc ){
- return jsonParseAddNodeExpand(pParse, eType, n, zContent);
- }
- p = &pParse->aNode[pParse->nNode];
- p->eType = (u8)eType;
- p->jnFlags = 0;
- p->n = n;
- p->u.zJContent = zContent;
- return pParse->nNode++;
-}
-
-/*
-** Return true if z[] begins with 4 (or more) hexadecimal digits
-*/
-static int jsonIs4Hex(const char *z){
- int i;
- for(i=0; i<4; i++) if( !safe_isxdigit(z[i]) ) return 0;
- return 1;
-}
-
-/*
-** Parse a single JSON value which begins at pParse->zJson[i]. Return the
-** index of the first character past the end of the value parsed.
-**
-** Return negative for a syntax error. Special cases: return -2 if the
-** first non-whitespace character is '}' and return -3 if the first
-** non-whitespace character is ']'.
-*/
-static int jsonParseValue(JsonParse *pParse, u32 i){
- char c;
- u32 j;
- int iThis;
- int x;
- JsonNode *pNode;
- const char *z = pParse->zJson;
- while( safe_isspace(z[i]) ){ i++; }
- if( (c = z[i])=='{' ){
- /* Parse object */
- iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
- if( iThis<0 ) return -1;
- for(j=i+1;;j++){
- while( safe_isspace(z[j]) ){ j++; }
- if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1;
- x = jsonParseValue(pParse, j);
- if( x<0 ){
- pParse->iDepth--;
- if( x==(-2) && pParse->nNode==(u32)iThis+1 ) return j+1;
- return -1;
- }
- if( pParse->oom ) return -1;
- pNode = &pParse->aNode[pParse->nNode-1];
- if( pNode->eType!=JSON_STRING ) return -1;
- pNode->jnFlags |= JNODE_LABEL;
- j = x;
- while( safe_isspace(z[j]) ){ j++; }
- if( z[j]!=':' ) return -1;
- j++;
- x = jsonParseValue(pParse, j);
- pParse->iDepth--;
- if( x<0 ) return -1;
- j = x;
- while( safe_isspace(z[j]) ){ j++; }
- c = z[j];
- if( c==',' ) continue;
- if( c!='}' ) return -1;
- break;
- }
- pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
- return j+1;
- }else if( c=='[' ){
- /* Parse array */
- iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
- if( iThis<0 ) return -1;
- for(j=i+1;;j++){
- while( safe_isspace(z[j]) ){ j++; }
- if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1;
- x = jsonParseValue(pParse, j);
- pParse->iDepth--;
- if( x<0 ){
- if( x==(-3) && pParse->nNode==(u32)iThis+1 ) return j+1;
- return -1;
- }
- j = x;
- while( safe_isspace(z[j]) ){ j++; }
- c = z[j];
- if( c==',' ) continue;
- if( c!=']' ) return -1;
- break;
- }
- pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
- return j+1;
- }else if( c=='"' ){
- /* Parse string */
- u8 jnFlags = 0;
- j = i+1;
- for(;;){
- c = z[j];
- if( (c & ~0x1f)==0 ){
- /* Control characters are not allowed in strings */
- return -1;
- }
- if( c=='\\' ){
- c = z[++j];
- if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f'
- || c=='n' || c=='r' || c=='t'
- || (c=='u' && jsonIs4Hex(z+j+1)) ){
- jnFlags = JNODE_ESCAPE;
- }else{
- return -1;
- }
- }else if( c=='"' ){
- break;
- }
- j++;
- }
- jsonParseAddNode(pParse, JSON_STRING, j+1-i, &z[i]);
- if( !pParse->oom ) pParse->aNode[pParse->nNode-1].jnFlags = jnFlags;
- return j+1;
- }else if( c=='n'
- && strncmp(z+i,"null",4)==0
- && !safe_isalnum(z[i+4]) ){
- jsonParseAddNode(pParse, JSON_NULL, 0, 0);
- return i+4;
- }else if( c=='t'
- && strncmp(z+i,"true",4)==0
- && !safe_isalnum(z[i+4]) ){
- jsonParseAddNode(pParse, JSON_TRUE, 0, 0);
- return i+4;
- }else if( c=='f'
- && strncmp(z+i,"false",5)==0
- && !safe_isalnum(z[i+5]) ){
- jsonParseAddNode(pParse, JSON_FALSE, 0, 0);
- return i+5;
- }else if( c=='-' || (c>='0' && c<='9') ){
- /* Parse number */
- u8 seenDP = 0;
- u8 seenE = 0;
- assert( '-' < '0' );
- if( c<='0' ){
- j = c=='-' ? i+1 : i;
- if( z[j]=='0' && z[j+1]>='0' && z[j+1]<='9' ) return -1;
- }
- j = i+1;
- for(;; j++){
- c = z[j];
- if( c>='0' && c<='9' ) continue;
- if( c=='.' ){
- if( z[j-1]=='-' ) return -1;
- if( seenDP ) return -1;
- seenDP = 1;
- continue;
- }
- if( c=='e' || c=='E' ){
- if( z[j-1]<'0' ) return -1;
- if( seenE ) return -1;
- seenDP = seenE = 1;
- c = z[j+1];
- if( c=='+' || c=='-' ){
- j++;
- c = z[j+1];
- }
- if( c<'0' || c>'9' ) return -1;
- continue;
- }
- break;
- }
- if( z[j-1]<'0' ) return -1;
- jsonParseAddNode(pParse, seenDP ? JSON_REAL : JSON_INT,
- j - i, &z[i]);
- return j;
- }else if( c=='}' ){
- return -2; /* End of {...} */
- }else if( c==']' ){
- return -3; /* End of [...] */
- }else if( c==0 ){
- return 0; /* End of file */
- }else{
- return -1; /* Syntax error */
- }
-}
-
-/*
-** Parse a complete JSON string. Return 0 on success or non-zero if there
-** are any errors. If an error occurs, free all memory associated with
-** pParse.
-**
-** pParse is uninitialized when this routine is called.
-*/
-static int jsonParse(
- JsonParse *pParse, /* Initialize and fill this JsonParse object */
- sqlite3_context *pCtx, /* Report errors here */
- const char *zJson /* Input JSON text to be parsed */
-){
- int i;
- memset(pParse, 0, sizeof(*pParse));
- if( zJson==0 ) return 1;
- pParse->zJson = zJson;
- i = jsonParseValue(pParse, 0);
- if( pParse->oom ) i = -1;
- if( i>0 ){
- assert( pParse->iDepth==0 );
- while( safe_isspace(zJson[i]) ) i++;
- if( zJson[i] ) i = -1;
- }
- if( i<=0 ){
- if( pCtx!=0 ){
- if( pParse->oom ){
- sqlite3_result_error_nomem(pCtx);
- }else{
- sqlite3_result_error(pCtx, "malformed JSON", -1);
- }
- }
- jsonParseReset(pParse);
- return 1;
- }
- return 0;
-}
-
-/* Mark node i of pParse as being a child of iParent. Call recursively
-** to fill in all the descendants of node i.
-*/
-static void jsonParseFillInParentage(JsonParse *pParse, u32 i, u32 iParent){
- JsonNode *pNode = &pParse->aNode[i];
- u32 j;
- pParse->aUp[i] = iParent;
- switch( pNode->eType ){
- case JSON_ARRAY: {
- for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j)){
- jsonParseFillInParentage(pParse, i+j, i);
- }
- break;
- }
- case JSON_OBJECT: {
- for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j+1)+1){
- pParse->aUp[i+j] = i;
- jsonParseFillInParentage(pParse, i+j+1, i);
- }
- break;
- }
- default: {
- break;
- }
- }
-}
-
-/*
-** Compute the parentage of all nodes in a completed parse.
-*/
-static int jsonParseFindParents(JsonParse *pParse){
- u32 *aUp;
- assert( pParse->aUp==0 );
- aUp = pParse->aUp = sqlite3_malloc( sizeof(u32)*pParse->nNode );
- if( aUp==0 ){
- pParse->oom = 1;
- return SQLITE_NOMEM;
- }
- jsonParseFillInParentage(pParse, 0, 0);
- return SQLITE_OK;
-}
-
-/*
-** Magic number used for the JSON parse cache in sqlite3_get_auxdata()
-*/
-#define JSON_CACHE_ID (-429938) /* First cache entry */
-#define JSON_CACHE_SZ 4 /* Max number of cache entries */
-
-/*
-** Obtain a complete parse of the JSON found in the first argument
-** of the argv array. Use the sqlite3_get_auxdata() cache for this
-** parse if it is available. If the cache is not available or if it
-** is no longer valid, parse the JSON again and return the new parse,
-** and also register the new parse so that it will be available for
-** future sqlite3_get_auxdata() calls.
-*/
-static JsonParse *jsonParseCached(
- sqlite3_context *pCtx,
- sqlite3_value **argv,
- sqlite3_context *pErrCtx
-){
- const char *zJson = (const char*)sqlite3_value_text(argv[0]);
- int nJson = sqlite3_value_bytes(argv[0]);
- JsonParse *p;
- JsonParse *pMatch = 0;
- int iKey;
- int iMinKey = 0;
- u32 iMinHold = 0xffffffff;
- u32 iMaxHold = 0;
- if( zJson==0 ) return 0;
- for(iKey=0; iKeynJson==nJson
- && memcmp(p->zJson,zJson,nJson)==0
- ){
- p->nErr = 0;
- pMatch = p;
- }else if( p->iHoldiHold;
- iMinKey = iKey;
- }
- if( p->iHold>iMaxHold ){
- iMaxHold = p->iHold;
- }
- }
- if( pMatch ){
- pMatch->nErr = 0;
- pMatch->iHold = iMaxHold+1;
- return pMatch;
- }
- p = sqlite3_malloc( sizeof(*p) + nJson + 1 );
- if( p==0 ){
- sqlite3_result_error_nomem(pCtx);
- return 0;
- }
- memset(p, 0, sizeof(*p));
- p->zJson = (char*)&p[1];
- memcpy((char*)p->zJson, zJson, nJson+1);
- if( jsonParse(p, pErrCtx, p->zJson) ){
- sqlite3_free(p);
- return 0;
- }
- p->nJson = nJson;
- p->iHold = iMaxHold+1;
- sqlite3_set_auxdata(pCtx, JSON_CACHE_ID+iMinKey, p,
- (void(*)(void*))jsonParseFree);
- return (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iMinKey);
-}
-
-/*
-** Compare the OBJECT label at pNode against zKey,nKey. Return true on
-** a match.
-*/
-static int jsonLabelCompare(JsonNode *pNode, const char *zKey, u32 nKey){
- if( pNode->jnFlags & JNODE_RAW ){
- if( pNode->n!=nKey ) return 0;
- return strncmp(pNode->u.zJContent, zKey, nKey)==0;
- }else{
- if( pNode->n!=nKey+2 ) return 0;
- return strncmp(pNode->u.zJContent+1, zKey, nKey)==0;
- }
-}
-
-/* forward declaration */
-static JsonNode *jsonLookupAppend(JsonParse*,const char*,int*,const char**);
-
-/*
-** Search along zPath to find the node specified. Return a pointer
-** to that node, or NULL if zPath is malformed or if there is no such
-** node.
-**
-** If pApnd!=0, then try to append new nodes to complete zPath if it is
-** possible to do so and if no existing node corresponds to zPath. If
-** new nodes are appended *pApnd is set to 1.
-*/
-static JsonNode *jsonLookupStep(
- JsonParse *pParse, /* The JSON to search */
- u32 iRoot, /* Begin the search at this node */
- const char *zPath, /* The path to search */
- int *pApnd, /* Append nodes to complete path if not NULL */
- const char **pzErr /* Make *pzErr point to any syntax error in zPath */
-){
- u32 i, j, nKey;
- const char *zKey;
- JsonNode *pRoot = &pParse->aNode[iRoot];
- if( zPath[0]==0 ) return pRoot;
- if( zPath[0]=='.' ){
- if( pRoot->eType!=JSON_OBJECT ) return 0;
- zPath++;
- if( zPath[0]=='"' ){
- zKey = zPath + 1;
- for(i=1; zPath[i] && zPath[i]!='"'; i++){}
- nKey = i-1;
- if( zPath[i] ){
- i++;
- }else{
- *pzErr = zPath;
- return 0;
- }
- }else{
- zKey = zPath;
- for(i=0; zPath[i] && zPath[i]!='.' && zPath[i]!='['; i++){}
- nKey = i;
- }
- if( nKey==0 ){
- *pzErr = zPath;
- return 0;
- }
- j = 1;
- for(;;){
- while( j<=pRoot->n ){
- if( jsonLabelCompare(pRoot+j, zKey, nKey) ){
- return jsonLookupStep(pParse, iRoot+j+1, &zPath[i], pApnd, pzErr);
- }
- j++;
- j += jsonNodeSize(&pRoot[j]);
- }
- if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
- iRoot += pRoot->u.iAppend;
- pRoot = &pParse->aNode[iRoot];
- j = 1;
- }
- if( pApnd ){
- u32 iStart, iLabel;
- JsonNode *pNode;
- iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0);
- iLabel = jsonParseAddNode(pParse, JSON_STRING, i, zPath);
- zPath += i;
- pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
- if( pParse->oom ) return 0;
- if( pNode ){
- pRoot = &pParse->aNode[iRoot];
- pRoot->u.iAppend = iStart - iRoot;
- pRoot->jnFlags |= JNODE_APPEND;
- pParse->aNode[iLabel].jnFlags |= JNODE_RAW;
- }
- return pNode;
- }
- }else if( zPath[0]=='[' && safe_isdigit(zPath[1]) ){
- if( pRoot->eType!=JSON_ARRAY ) return 0;
- i = 0;
- j = 1;
- while( safe_isdigit(zPath[j]) ){
- i = i*10 + zPath[j] - '0';
- j++;
- }
- if( zPath[j]!=']' ){
- *pzErr = zPath;
- return 0;
- }
- zPath += j + 1;
- j = 1;
- for(;;){
- while( j<=pRoot->n && (i>0 || (pRoot[j].jnFlags & JNODE_REMOVE)!=0) ){
- if( (pRoot[j].jnFlags & JNODE_REMOVE)==0 ) i--;
- j += jsonNodeSize(&pRoot[j]);
- }
- if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
- iRoot += pRoot->u.iAppend;
- pRoot = &pParse->aNode[iRoot];
- j = 1;
- }
- if( j<=pRoot->n ){
- return jsonLookupStep(pParse, iRoot+j, zPath, pApnd, pzErr);
- }
- if( i==0 && pApnd ){
- u32 iStart;
- JsonNode *pNode;
- iStart = jsonParseAddNode(pParse, JSON_ARRAY, 1, 0);
- pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
- if( pParse->oom ) return 0;
- if( pNode ){
- pRoot = &pParse->aNode[iRoot];
- pRoot->u.iAppend = iStart - iRoot;
- pRoot->jnFlags |= JNODE_APPEND;
- }
- return pNode;
- }
- }else{
- *pzErr = zPath;
- }
- return 0;
-}
-
-/*
-** Append content to pParse that will complete zPath. Return a pointer
-** to the inserted node, or return NULL if the append fails.
-*/
-static JsonNode *jsonLookupAppend(
- JsonParse *pParse, /* Append content to the JSON parse */
- const char *zPath, /* Description of content to append */
- int *pApnd, /* Set this flag to 1 */
- const char **pzErr /* Make this point to any syntax error */
-){
- *pApnd = 1;
- if( zPath[0]==0 ){
- jsonParseAddNode(pParse, JSON_NULL, 0, 0);
- return pParse->oom ? 0 : &pParse->aNode[pParse->nNode-1];
- }
- if( zPath[0]=='.' ){
- jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
- }else if( strncmp(zPath,"[0]",3)==0 ){
- jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
- }else{
- return 0;
- }
- if( pParse->oom ) return 0;
- return jsonLookupStep(pParse, pParse->nNode-1, zPath, pApnd, pzErr);
-}
-
-/*
-** Return the text of a syntax error message on a JSON path. Space is
-** obtained from sqlite3_malloc().
-*/
-static char *jsonPathSyntaxError(const char *zErr){
- return sqlite3_mprintf("JSON path error near '%q'", zErr);
-}
-
-/*
-** Do a node lookup using zPath. Return a pointer to the node on success.
-** Return NULL if not found or if there is an error.
-**
-** On an error, write an error message into pCtx and increment the
-** pParse->nErr counter.
-**
-** If pApnd!=NULL then try to append missing nodes and set *pApnd = 1 if
-** nodes are appended.
-*/
-static JsonNode *jsonLookup(
- JsonParse *pParse, /* The JSON to search */
- const char *zPath, /* The path to search */
- int *pApnd, /* Append nodes to complete path if not NULL */
- sqlite3_context *pCtx /* Report errors here, if not NULL */
-){
- const char *zErr = 0;
- JsonNode *pNode = 0;
- char *zMsg;
-
- if( zPath==0 ) return 0;
- if( zPath[0]!='$' ){
- zErr = zPath;
- goto lookup_err;
- }
- zPath++;
- pNode = jsonLookupStep(pParse, 0, zPath, pApnd, &zErr);
- if( zErr==0 ) return pNode;
-
-lookup_err:
- pParse->nErr++;
- assert( zErr!=0 && pCtx!=0 );
- zMsg = jsonPathSyntaxError(zErr);
- if( zMsg ){
- sqlite3_result_error(pCtx, zMsg, -1);
- sqlite3_free(zMsg);
- }else{
- sqlite3_result_error_nomem(pCtx);
- }
- return 0;
-}
-
-
-/*
-** Report the wrong number of arguments for json_insert(), json_replace()
-** or json_set().
-*/
-static void jsonWrongNumArgs(
- sqlite3_context *pCtx,
- const char *zFuncName
-){
- char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments",
- zFuncName);
- sqlite3_result_error(pCtx, zMsg, -1);
- sqlite3_free(zMsg);
-}
-
-/*
-** Mark all NULL entries in the Object passed in as JNODE_REMOVE.
-*/
-static void jsonRemoveAllNulls(JsonNode *pNode){
- int i, n;
- assert( pNode->eType==JSON_OBJECT );
- n = pNode->n;
- for(i=2; i<=n; i += jsonNodeSize(&pNode[i])+1){
- switch( pNode[i].eType ){
- case JSON_NULL:
- pNode[i].jnFlags |= JNODE_REMOVE;
- break;
- case JSON_OBJECT:
- jsonRemoveAllNulls(&pNode[i]);
- break;
- }
- }
-}
-
-
-/****************************************************************************
-** SQL functions used for testing and debugging
-****************************************************************************/
-
-#ifdef SQLITE_DEBUG
-/*
-** The json_parse(JSON) function returns a string which describes
-** a parse of the JSON provided. Or it returns NULL if JSON is not
-** well-formed.
-*/
-static void jsonParseFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonString s; /* Output string - not real JSON */
- JsonParse x; /* The parse */
- u32 i;
-
- assert( argc==1 );
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- jsonParseFindParents(&x);
- jsonInit(&s, ctx);
- for(i=0; inNode );
- if( argc==2 ){
- const char *zPath = (const char*)sqlite3_value_text(argv[1]);
- pNode = jsonLookup(p, zPath, 0, ctx);
- }else{
- pNode = p->aNode;
- }
- if( pNode==0 ){
- return;
- }
- if( pNode->eType==JSON_ARRAY ){
- assert( (pNode->jnFlags & JNODE_APPEND)==0 );
- for(i=1; i<=pNode->n; n++){
- i += jsonNodeSize(&pNode[i]);
- }
- }
- sqlite3_result_int64(ctx, n);
-}
-
-/*
-** json_extract(JSON, PATH, ...)
-**
-** Return the element described by PATH. Return NULL if there is no
-** PATH element. If there are multiple PATHs, then return a JSON array
-** with the result from each path. Throw an error if the JSON or any PATH
-** is malformed.
-*/
-static void jsonExtractFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse *p; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- JsonString jx;
- int i;
-
- if( argc<2 ) return;
- p = jsonParseCached(ctx, argv, ctx);
- if( p==0 ) return;
- jsonInit(&jx, ctx);
- jsonAppendChar(&jx, '[');
- for(i=1; inErr ) break;
- if( argc>2 ){
- jsonAppendSeparator(&jx);
- if( pNode ){
- jsonRenderNode(pNode, &jx, 0);
- }else{
- jsonAppendRaw(&jx, "null", 4);
- }
- }else if( pNode ){
- jsonReturn(pNode, ctx, 0);
- }
- }
- if( argc>2 && i==argc ){
- jsonAppendChar(&jx, ']');
- jsonResult(&jx);
- sqlite3_result_subtype(ctx, JSON_SUBTYPE);
- }
- jsonReset(&jx);
-}
-
-/* This is the RFC 7396 MergePatch algorithm.
-*/
-static JsonNode *jsonMergePatch(
- JsonParse *pParse, /* The JSON parser that contains the TARGET */
- u32 iTarget, /* Node of the TARGET in pParse */
- JsonNode *pPatch /* The PATCH */
-){
- u32 i, j;
- u32 iRoot;
- JsonNode *pTarget;
- if( pPatch->eType!=JSON_OBJECT ){
- return pPatch;
- }
- assert( iTarget>=0 && iTargetnNode );
- pTarget = &pParse->aNode[iTarget];
- assert( (pPatch->jnFlags & JNODE_APPEND)==0 );
- if( pTarget->eType!=JSON_OBJECT ){
- jsonRemoveAllNulls(pPatch);
- return pPatch;
- }
- iRoot = iTarget;
- for(i=1; in; i += jsonNodeSize(&pPatch[i+1])+1){
- u32 nKey;
- const char *zKey;
- assert( pPatch[i].eType==JSON_STRING );
- assert( pPatch[i].jnFlags & JNODE_LABEL );
- nKey = pPatch[i].n;
- zKey = pPatch[i].u.zJContent;
- assert( (pPatch[i].jnFlags & JNODE_RAW)==0 );
- for(j=1; jn; j += jsonNodeSize(&pTarget[j+1])+1 ){
- assert( pTarget[j].eType==JSON_STRING );
- assert( pTarget[j].jnFlags & JNODE_LABEL );
- assert( (pPatch[i].jnFlags & JNODE_RAW)==0 );
- if( pTarget[j].n==nKey && strncmp(pTarget[j].u.zJContent,zKey,nKey)==0 ){
- if( pTarget[j+1].jnFlags & (JNODE_REMOVE|JNODE_PATCH) ) break;
- if( pPatch[i+1].eType==JSON_NULL ){
- pTarget[j+1].jnFlags |= JNODE_REMOVE;
- }else{
- JsonNode *pNew = jsonMergePatch(pParse, iTarget+j+1, &pPatch[i+1]);
- if( pNew==0 ) return 0;
- pTarget = &pParse->aNode[iTarget];
- if( pNew!=&pTarget[j+1] ){
- pTarget[j+1].u.pPatch = pNew;
- pTarget[j+1].jnFlags |= JNODE_PATCH;
- }
- }
- break;
- }
- }
- if( j>=pTarget->n && pPatch[i+1].eType!=JSON_NULL ){
- int iStart, iPatch;
- iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0);
- jsonParseAddNode(pParse, JSON_STRING, nKey, zKey);
- iPatch = jsonParseAddNode(pParse, JSON_TRUE, 0, 0);
- if( pParse->oom ) return 0;
- jsonRemoveAllNulls(pPatch);
- pTarget = &pParse->aNode[iTarget];
- pParse->aNode[iRoot].jnFlags |= JNODE_APPEND;
- pParse->aNode[iRoot].u.iAppend = iStart - iRoot;
- iRoot = iStart;
- pParse->aNode[iPatch].jnFlags |= JNODE_PATCH;
- pParse->aNode[iPatch].u.pPatch = &pPatch[i+1];
- }
- }
- return pTarget;
-}
-
-/*
-** Implementation of the json_mergepatch(JSON1,JSON2) function. Return a JSON
-** object that is the result of running the RFC 7396 MergePatch() algorithm
-** on the two arguments.
-*/
-static void jsonPatchFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse x; /* The JSON that is being patched */
- JsonParse y; /* The patch */
- JsonNode *pResult; /* The result of the merge */
-
- UNUSED_PARAM(argc);
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- if( jsonParse(&y, ctx, (const char*)sqlite3_value_text(argv[1])) ){
- jsonParseReset(&x);
- return;
- }
- pResult = jsonMergePatch(&x, 0, y.aNode);
- assert( pResult!=0 || x.oom );
- if( pResult ){
- jsonReturnJson(pResult, ctx, 0);
- }else{
- sqlite3_result_error_nomem(ctx);
- }
- jsonParseReset(&x);
- jsonParseReset(&y);
-}
-
-
-/*
-** Implementation of the json_object(NAME,VALUE,...) function. Return a JSON
-** object that contains all name/value given in arguments. Or if any name
-** is not a string or if any value is a BLOB, throw an error.
-*/
-static void jsonObjectFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- int i;
- JsonString jx;
- const char *z;
- u32 n;
-
- if( argc&1 ){
- sqlite3_result_error(ctx, "json_object() requires an even number "
- "of arguments", -1);
- return;
- }
- jsonInit(&jx, ctx);
- jsonAppendChar(&jx, '{');
- for(i=0; ijnFlags |= JNODE_REMOVE;
- }
- if( (x.aNode[0].jnFlags & JNODE_REMOVE)==0 ){
- jsonReturnJson(x.aNode, ctx, 0);
- }
-remove_done:
- jsonParseReset(&x);
-}
-
-/*
-** json_replace(JSON, PATH, VALUE, ...)
-**
-** Replace the value at PATH with VALUE. If PATH does not already exist,
-** this routine is a no-op. If JSON or PATH is malformed, throw an error.
-*/
-static void jsonReplaceFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse x; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
-
- if( argc<1 ) return;
- if( (argc&1)==0 ) {
- jsonWrongNumArgs(ctx, "replace");
- return;
- }
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- for(i=1; i<(u32)argc; i+=2){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- pNode = jsonLookup(&x, zPath, 0, ctx);
- if( x.nErr ) goto replace_err;
- if( pNode ){
- pNode->jnFlags |= (u8)JNODE_REPLACE;
- pNode->u.iReplace = i + 1;
- }
- }
- if( x.aNode[0].jnFlags & JNODE_REPLACE ){
- sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]);
- }else{
- jsonReturnJson(x.aNode, ctx, argv);
- }
-replace_err:
- jsonParseReset(&x);
-}
-
-/*
-** json_set(JSON, PATH, VALUE, ...)
-**
-** Set the value at PATH to VALUE. Create the PATH if it does not already
-** exist. Overwrite existing values that do exist.
-** If JSON or PATH is malformed, throw an error.
-**
-** json_insert(JSON, PATH, VALUE, ...)
-**
-** Create PATH and initialize it to VALUE. If PATH already exists, this
-** routine is a no-op. If JSON or PATH is malformed, throw an error.
-*/
-static void jsonSetFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse x; /* The parse */
- JsonNode *pNode;
- const char *zPath;
- u32 i;
- int bApnd;
- int bIsSet = *(int*)sqlite3_user_data(ctx);
-
- if( argc<1 ) return;
- if( (argc&1)==0 ) {
- jsonWrongNumArgs(ctx, bIsSet ? "set" : "insert");
- return;
- }
- if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
- assert( x.nNode );
- for(i=1; i<(u32)argc; i+=2){
- zPath = (const char*)sqlite3_value_text(argv[i]);
- bApnd = 0;
- pNode = jsonLookup(&x, zPath, &bApnd, ctx);
- if( x.oom ){
- sqlite3_result_error_nomem(ctx);
- goto jsonSetDone;
- }else if( x.nErr ){
- goto jsonSetDone;
- }else if( pNode && (bApnd || bIsSet) ){
- pNode->jnFlags |= (u8)JNODE_REPLACE;
- pNode->u.iReplace = i + 1;
- }
- }
- if( x.aNode[0].jnFlags & JNODE_REPLACE ){
- sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]);
- }else{
- jsonReturnJson(x.aNode, ctx, argv);
- }
-jsonSetDone:
- jsonParseReset(&x);
-}
-
-/*
-** json_type(JSON)
-** json_type(JSON, PATH)
-**
-** Return the top-level "type" of a JSON string. Throw an error if
-** either the JSON or PATH inputs are not well-formed.
-*/
-static void jsonTypeFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse *p; /* The parse */
- const char *zPath;
- JsonNode *pNode;
-
- p = jsonParseCached(ctx, argv, ctx);
- if( p==0 ) return;
- if( argc==2 ){
- zPath = (const char*)sqlite3_value_text(argv[1]);
- pNode = jsonLookup(p, zPath, 0, ctx);
- }else{
- pNode = p->aNode;
- }
- if( pNode ){
- sqlite3_result_text(ctx, jsonType[pNode->eType], -1, SQLITE_STATIC);
- }
-}
-
-/*
-** json_valid(JSON)
-**
-** Return 1 if JSON is a well-formed JSON string according to RFC-7159.
-** Return 0 otherwise.
-*/
-static void jsonValidFunc(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonParse *p; /* The parse */
- UNUSED_PARAM(argc);
- p = jsonParseCached(ctx, argv, 0);
- sqlite3_result_int(ctx, p!=0);
-}
-
-
-/****************************************************************************
-** Aggregate SQL function implementations
-****************************************************************************/
-/*
-** json_group_array(VALUE)
-**
-** Return a JSON array composed of all values in the aggregate.
-*/
-static void jsonArrayStep(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonString *pStr;
- UNUSED_PARAM(argc);
- pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr));
- if( pStr ){
- if( pStr->zBuf==0 ){
- jsonInit(pStr, ctx);
- jsonAppendChar(pStr, '[');
- }else{
- jsonAppendChar(pStr, ',');
- pStr->pCtx = ctx;
- }
- jsonAppendValue(pStr, argv[0]);
- }
-}
-static void jsonArrayCompute(sqlite3_context *ctx, int isFinal){
- JsonString *pStr;
- pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
- if( pStr ){
- pStr->pCtx = ctx;
- jsonAppendChar(pStr, ']');
- if( pStr->bErr ){
- if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
- assert( pStr->bStatic );
- }else if( isFinal ){
- sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed,
- pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free);
- pStr->bStatic = 1;
- }else{
- sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT);
- pStr->nUsed--;
- }
- }else{
- sqlite3_result_text(ctx, "[]", 2, SQLITE_STATIC);
- }
- sqlite3_result_subtype(ctx, JSON_SUBTYPE);
-}
-static void jsonArrayValue(sqlite3_context *ctx){
- jsonArrayCompute(ctx, 0);
-}
-static void jsonArrayFinal(sqlite3_context *ctx){
- jsonArrayCompute(ctx, 1);
-}
-
-#ifndef SQLITE_OMIT_WINDOWFUNC
-/*
-** This method works for both json_group_array() and json_group_object().
-** It works by removing the first element of the group by searching forward
-** to the first comma (",") that is not within a string and deleting all
-** text through that comma.
-*/
-static void jsonGroupInverse(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- int i;
- int inStr = 0;
- char *z;
- JsonString *pStr;
- UNUSED_PARAM(argc);
- UNUSED_PARAM(argv);
- pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
-#ifdef NEVER
- /* pStr is always non-NULL since jsonArrayStep() or jsonObjectStep() will
- ** always have been called to initalize it */
- if( NEVER(!pStr) ) return;
-#endif
- z = pStr->zBuf;
- for(i=1; z[i]!=',' || inStr; i++){
- assert( inUsed );
- if( z[i]=='"' ){
- inStr = !inStr;
- }else if( z[i]=='\\' ){
- i++;
- }
- }
- pStr->nUsed -= i;
- memmove(&z[1], &z[i+1], (size_t)pStr->nUsed-1);
-}
-#else
-# define jsonGroupInverse 0
-#endif
-
-
-/*
-** json_group_obj(NAME,VALUE)
-**
-** Return a JSON object composed of all names and values in the aggregate.
-*/
-static void jsonObjectStep(
- sqlite3_context *ctx,
- int argc,
- sqlite3_value **argv
-){
- JsonString *pStr;
- const char *z;
- u32 n;
- UNUSED_PARAM(argc);
- pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr));
- if( pStr ){
- if( pStr->zBuf==0 ){
- jsonInit(pStr, ctx);
- jsonAppendChar(pStr, '{');
- }else{
- jsonAppendChar(pStr, ',');
- pStr->pCtx = ctx;
- }
- z = (const char*)sqlite3_value_text(argv[0]);
- n = (u32)sqlite3_value_bytes(argv[0]);
- jsonAppendString(pStr, z, n);
- jsonAppendChar(pStr, ':');
- jsonAppendValue(pStr, argv[1]);
- }
-}
-static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){
- JsonString *pStr;
- pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
- if( pStr ){
- jsonAppendChar(pStr, '}');
- if( pStr->bErr ){
- if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
- assert( pStr->bStatic );
- }else if( isFinal ){
- sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed,
- pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free);
- pStr->bStatic = 1;
- }else{
- sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT);
- pStr->nUsed--;
- }
- }else{
- sqlite3_result_text(ctx, "{}", 2, SQLITE_STATIC);
- }
- sqlite3_result_subtype(ctx, JSON_SUBTYPE);
-}
-static void jsonObjectValue(sqlite3_context *ctx){
- jsonObjectCompute(ctx, 0);
-}
-static void jsonObjectFinal(sqlite3_context *ctx){
- jsonObjectCompute(ctx, 1);
-}
-
-
-
-#ifndef SQLITE_OMIT_VIRTUALTABLE
-/****************************************************************************
-** The json_each virtual table
-****************************************************************************/
-typedef struct JsonEachCursor JsonEachCursor;
-struct JsonEachCursor {
- sqlite3_vtab_cursor base; /* Base class - must be first */
- u32 iRowid; /* The rowid */
- u32 iBegin; /* The first node of the scan */
- u32 i; /* Index in sParse.aNode[] of current row */
- u32 iEnd; /* EOF when i equals or exceeds this value */
- u8 eType; /* Type of top-level element */
- u8 bRecursive; /* True for json_tree(). False for json_each() */
- char *zJson; /* Input JSON */
- char *zRoot; /* Path by which to filter zJson */
- JsonParse sParse; /* Parse of the input JSON */
-};
-
-/* Constructor for the json_each virtual table */
-static int jsonEachConnect(
- sqlite3 *db,
- void *pAux,
- int argc, const char *const*argv,
- sqlite3_vtab **ppVtab,
- char **pzErr
-){
- sqlite3_vtab *pNew;
- int rc;
-
-/* Column numbers */
-#define JEACH_KEY 0
-#define JEACH_VALUE 1
-#define JEACH_TYPE 2
-#define JEACH_ATOM 3
-#define JEACH_ID 4
-#define JEACH_PARENT 5
-#define JEACH_FULLKEY 6
-#define JEACH_PATH 7
-#define JEACH_JSON 8
-#define JEACH_ROOT 9
-
- UNUSED_PARAM(pzErr);
- UNUSED_PARAM(argv);
- UNUSED_PARAM(argc);
- UNUSED_PARAM(pAux);
- rc = sqlite3_declare_vtab(db,
- "CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,"
- "json HIDDEN,root HIDDEN)");
- if( rc==SQLITE_OK ){
- pNew = *ppVtab = sqlite3_malloc( sizeof(*pNew) );
- if( pNew==0 ) return SQLITE_NOMEM;
- memset(pNew, 0, sizeof(*pNew));
- }
- return rc;
-}
-
-/* destructor for json_each virtual table */
-static int jsonEachDisconnect(sqlite3_vtab *pVtab){
- sqlite3_free(pVtab);
- return SQLITE_OK;
-}
-
-/* constructor for a JsonEachCursor object for json_each(). */
-static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
- JsonEachCursor *pCur;
-
- UNUSED_PARAM(p);
- pCur = sqlite3_malloc( sizeof(*pCur) );
- if( pCur==0 ) return SQLITE_NOMEM;
- memset(pCur, 0, sizeof(*pCur));
- *ppCursor = &pCur->base;
- return SQLITE_OK;
-}
-
-/* constructor for a JsonEachCursor object for json_tree(). */
-static int jsonEachOpenTree(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
- int rc = jsonEachOpenEach(p, ppCursor);
- if( rc==SQLITE_OK ){
- JsonEachCursor *pCur = (JsonEachCursor*)*ppCursor;
- pCur->bRecursive = 1;
- }
- return rc;
-}
-
-/* Reset a JsonEachCursor back to its original state. Free any memory
-** held. */
-static void jsonEachCursorReset(JsonEachCursor *p){
- sqlite3_free(p->zJson);
- sqlite3_free(p->zRoot);
- jsonParseReset(&p->sParse);
- p->iRowid = 0;
- p->i = 0;
- p->iEnd = 0;
- p->eType = 0;
- p->zJson = 0;
- p->zRoot = 0;
-}
-
-/* Destructor for a jsonEachCursor object */
-static int jsonEachClose(sqlite3_vtab_cursor *cur){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- jsonEachCursorReset(p);
- sqlite3_free(cur);
- return SQLITE_OK;
-}
-
-/* Return TRUE if the jsonEachCursor object has been advanced off the end
-** of the JSON object */
-static int jsonEachEof(sqlite3_vtab_cursor *cur){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- return p->i >= p->iEnd;
-}
-
-/* Advance the cursor to the next element for json_tree() */
-static int jsonEachNext(sqlite3_vtab_cursor *cur){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- if( p->bRecursive ){
- if( p->sParse.aNode[p->i].jnFlags & JNODE_LABEL ) p->i++;
- p->i++;
- p->iRowid++;
- if( p->iiEnd ){
- u32 iUp = p->sParse.aUp[p->i];
- JsonNode *pUp = &p->sParse.aNode[iUp];
- p->eType = pUp->eType;
- if( pUp->eType==JSON_ARRAY ){
- if( iUp==p->i-1 ){
- pUp->u.iKey = 0;
- }else{
- pUp->u.iKey++;
- }
- }
- }
- }else{
- switch( p->eType ){
- case JSON_ARRAY: {
- p->i += jsonNodeSize(&p->sParse.aNode[p->i]);
- p->iRowid++;
- break;
- }
- case JSON_OBJECT: {
- p->i += 1 + jsonNodeSize(&p->sParse.aNode[p->i+1]);
- p->iRowid++;
- break;
- }
- default: {
- p->i = p->iEnd;
- break;
- }
- }
- }
- return SQLITE_OK;
-}
-
-/* Append the name of the path for element i to pStr
-*/
-static void jsonEachComputePath(
- JsonEachCursor *p, /* The cursor */
- JsonString *pStr, /* Write the path here */
- u32 i /* Path to this element */
-){
- JsonNode *pNode, *pUp;
- u32 iUp;
- if( i==0 ){
- jsonAppendChar(pStr, '$');
- return;
- }
- iUp = p->sParse.aUp[i];
- jsonEachComputePath(p, pStr, iUp);
- pNode = &p->sParse.aNode[i];
- pUp = &p->sParse.aNode[iUp];
- if( pUp->eType==JSON_ARRAY ){
- jsonPrintf(30, pStr, "[%d]", pUp->u.iKey);
- }else{
- assert( pUp->eType==JSON_OBJECT );
- if( (pNode->jnFlags & JNODE_LABEL)==0 ) pNode--;
- assert( pNode->eType==JSON_STRING );
- assert( pNode->jnFlags & JNODE_LABEL );
- jsonPrintf(pNode->n+1, pStr, ".%.*s", pNode->n-2, pNode->u.zJContent+1);
- }
-}
-
-/* Return the value of a column */
-static int jsonEachColumn(
- sqlite3_vtab_cursor *cur, /* The cursor */
- sqlite3_context *ctx, /* First argument to sqlite3_result_...() */
- int i /* Which column to return */
-){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- JsonNode *pThis = &p->sParse.aNode[p->i];
- switch( i ){
- case JEACH_KEY: {
- if( p->i==0 ) break;
- if( p->eType==JSON_OBJECT ){
- jsonReturn(pThis, ctx, 0);
- }else if( p->eType==JSON_ARRAY ){
- u32 iKey;
- if( p->bRecursive ){
- if( p->iRowid==0 ) break;
- iKey = p->sParse.aNode[p->sParse.aUp[p->i]].u.iKey;
- }else{
- iKey = p->iRowid;
- }
- sqlite3_result_int64(ctx, (sqlite3_int64)iKey);
- }
- break;
- }
- case JEACH_VALUE: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- jsonReturn(pThis, ctx, 0);
- break;
- }
- case JEACH_TYPE: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- sqlite3_result_text(ctx, jsonType[pThis->eType], -1, SQLITE_STATIC);
- break;
- }
- case JEACH_ATOM: {
- if( pThis->jnFlags & JNODE_LABEL ) pThis++;
- if( pThis->eType>=JSON_ARRAY ) break;
- jsonReturn(pThis, ctx, 0);
- break;
- }
- case JEACH_ID: {
- sqlite3_result_int64(ctx,
- (sqlite3_int64)p->i + ((pThis->jnFlags & JNODE_LABEL)!=0));
- break;
- }
- case JEACH_PARENT: {
- if( p->i>p->iBegin && p->bRecursive ){
- sqlite3_result_int64(ctx, (sqlite3_int64)p->sParse.aUp[p->i]);
- }
- break;
- }
- case JEACH_FULLKEY: {
- JsonString x;
- jsonInit(&x, ctx);
- if( p->bRecursive ){
- jsonEachComputePath(p, &x, p->i);
- }else{
- if( p->zRoot ){
- jsonAppendRaw(&x, p->zRoot, (int)strlen(p->zRoot));
- }else{
- jsonAppendChar(&x, '$');
- }
- if( p->eType==JSON_ARRAY ){
- jsonPrintf(30, &x, "[%d]", p->iRowid);
- }else if( p->eType==JSON_OBJECT ){
- jsonPrintf(pThis->n, &x, ".%.*s", pThis->n-2, pThis->u.zJContent+1);
- }
- }
- jsonResult(&x);
- break;
- }
- case JEACH_PATH: {
- if( p->bRecursive ){
- JsonString x;
- jsonInit(&x, ctx);
- jsonEachComputePath(p, &x, p->sParse.aUp[p->i]);
- jsonResult(&x);
- break;
- }
- /* For json_each() path and root are the same so fall through
- ** into the root case */
- }
- default: {
- const char *zRoot = p->zRoot;
- if( zRoot==0 ) zRoot = "$";
- sqlite3_result_text(ctx, zRoot, -1, SQLITE_STATIC);
- break;
- }
- case JEACH_JSON: {
- assert( i==JEACH_JSON );
- sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC);
- break;
- }
- }
- return SQLITE_OK;
-}
-
-/* Return the current rowid value */
-static int jsonEachRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- *pRowid = p->iRowid;
- return SQLITE_OK;
-}
-
-/* The query strategy is to look for an equality constraint on the json
-** column. Without such a constraint, the table cannot operate. idxNum is
-** 1 if the constraint is found, 3 if the constraint and zRoot are found,
-** and 0 otherwise.
-*/
-static int jsonEachBestIndex(
- sqlite3_vtab *tab,
- sqlite3_index_info *pIdxInfo
-){
- int i;
- int jsonIdx = -1;
- int rootIdx = -1;
- const struct sqlite3_index_constraint *pConstraint;
-
- UNUSED_PARAM(tab);
- pConstraint = pIdxInfo->aConstraint;
- for(i=0; inConstraint; i++, pConstraint++){
- if( pConstraint->usable==0 ) continue;
- if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue;
- switch( pConstraint->iColumn ){
- case JEACH_JSON: jsonIdx = i; break;
- case JEACH_ROOT: rootIdx = i; break;
- default: /* no-op */ break;
- }
- }
- if( jsonIdx<0 ){
- pIdxInfo->idxNum = 0;
- pIdxInfo->estimatedCost = 1e99;
- }else{
- pIdxInfo->estimatedCost = 1.0;
- pIdxInfo->aConstraintUsage[jsonIdx].argvIndex = 1;
- pIdxInfo->aConstraintUsage[jsonIdx].omit = 1;
- if( rootIdx<0 ){
- pIdxInfo->idxNum = 1;
- }else{
- pIdxInfo->aConstraintUsage[rootIdx].argvIndex = 2;
- pIdxInfo->aConstraintUsage[rootIdx].omit = 1;
- pIdxInfo->idxNum = 3;
- }
- }
- return SQLITE_OK;
-}
-
-/* Start a search on a new JSON string */
-static int jsonEachFilter(
- sqlite3_vtab_cursor *cur,
- int idxNum, const char *idxStr,
- int argc, sqlite3_value **argv
-){
- JsonEachCursor *p = (JsonEachCursor*)cur;
- const char *z;
- const char *zRoot = 0;
- sqlite3_int64 n;
-
- UNUSED_PARAM(idxStr);
- UNUSED_PARAM(argc);
- jsonEachCursorReset(p);
- if( idxNum==0 ) return SQLITE_OK;
- z = (const char*)sqlite3_value_text(argv[0]);
- if( z==0 ) return SQLITE_OK;
- n = sqlite3_value_bytes(argv[0]);
- p->zJson = sqlite3_malloc64( n+1 );
- if( p->zJson==0 ) return SQLITE_NOMEM;
- memcpy(p->zJson, z, (size_t)n+1);
- if( jsonParse(&p->sParse, 0, p->zJson) ){
- int rc = SQLITE_NOMEM;
- if( p->sParse.oom==0 ){
- sqlite3_free(cur->pVtab->zErrMsg);
- cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON");
- if( cur->pVtab->zErrMsg ) rc = SQLITE_ERROR;
- }
- jsonEachCursorReset(p);
- return rc;
- }else if( p->bRecursive && jsonParseFindParents(&p->sParse) ){
- jsonEachCursorReset(p);
- return SQLITE_NOMEM;
- }else{
- JsonNode *pNode = 0;
- if( idxNum==3 ){
- const char *zErr = 0;
- zRoot = (const char*)sqlite3_value_text(argv[1]);
- if( zRoot==0 ) return SQLITE_OK;
- n = sqlite3_value_bytes(argv[1]);
- p->zRoot = sqlite3_malloc64( n+1 );
- if( p->zRoot==0 ) return SQLITE_NOMEM;
- memcpy(p->zRoot, zRoot, (size_t)n+1);
- if( zRoot[0]!='$' ){
- zErr = zRoot;
- }else{
- pNode = jsonLookupStep(&p->sParse, 0, p->zRoot+1, 0, &zErr);
- }
- if( zErr ){
- sqlite3_free(cur->pVtab->zErrMsg);
- cur->pVtab->zErrMsg = jsonPathSyntaxError(zErr);
- jsonEachCursorReset(p);
- return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
- }else if( pNode==0 ){
- return SQLITE_OK;
- }
- }else{
- pNode = p->sParse.aNode;
- }
- p->iBegin = p->i = (int)(pNode - p->sParse.aNode);
- p->eType = pNode->eType;
- if( p->eType>=JSON_ARRAY ){
- pNode->u.iKey = 0;
- p->iEnd = p->i + pNode->n + 1;
- if( p->bRecursive ){
- p->eType = p->sParse.aNode[p->sParse.aUp[p->i]].eType;
- if( p->i>0 && (p->sParse.aNode[p->i-1].jnFlags & JNODE_LABEL)!=0 ){
- p->i--;
- }
- }else{
- p->i++;
- }
- }else{
- p->iEnd = p->i+1;
- }
- }
- return SQLITE_OK;
-}
-
-/* The methods of the json_each virtual table */
-static sqlite3_module jsonEachModule = {
- 0, /* iVersion */
- 0, /* xCreate */
- jsonEachConnect, /* xConnect */
- jsonEachBestIndex, /* xBestIndex */
- jsonEachDisconnect, /* xDisconnect */
- 0, /* xDestroy */
- jsonEachOpenEach, /* xOpen - open a cursor */
- jsonEachClose, /* xClose - close a cursor */
- jsonEachFilter, /* xFilter - configure scan constraints */
- jsonEachNext, /* xNext - advance a cursor */
- jsonEachEof, /* xEof - check for end of scan */
- jsonEachColumn, /* xColumn - read data */
- jsonEachRowid, /* xRowid - read data */
- 0, /* xUpdate */
- 0, /* xBegin */
- 0, /* xSync */
- 0, /* xCommit */
- 0, /* xRollback */
- 0, /* xFindMethod */
- 0, /* xRename */
- 0, /* xSavepoint */
- 0, /* xRelease */
- 0 /* xRollbackTo */
-};
-
-/* The methods of the json_tree virtual table. */
-static sqlite3_module jsonTreeModule = {
- 0, /* iVersion */
- 0, /* xCreate */
- jsonEachConnect, /* xConnect */
- jsonEachBestIndex, /* xBestIndex */
- jsonEachDisconnect, /* xDisconnect */
- 0, /* xDestroy */
- jsonEachOpenTree, /* xOpen - open a cursor */
- jsonEachClose, /* xClose - close a cursor */
- jsonEachFilter, /* xFilter - configure scan constraints */
- jsonEachNext, /* xNext - advance a cursor */
- jsonEachEof, /* xEof - check for end of scan */
- jsonEachColumn, /* xColumn - read data */
- jsonEachRowid, /* xRowid - read data */
- 0, /* xUpdate */
- 0, /* xBegin */
- 0, /* xSync */
- 0, /* xCommit */
- 0, /* xRollback */
- 0, /* xFindMethod */
- 0, /* xRename */
- 0, /* xSavepoint */
- 0, /* xRelease */
- 0 /* xRollbackTo */
-};
-#endif /* SQLITE_OMIT_VIRTUALTABLE */
-
-/****************************************************************************
-** The following routines are the only publically visible identifiers in this
-** file. Call the following routines in order to register the various SQL
-** functions and the virtual table implemented by this file.
-****************************************************************************/
-
-SQLITE_PRIVATE int sqlite3Json1Init(sqlite3 *db){
- int rc = SQLITE_OK;
- unsigned int i;
- static const struct {
- const char *zName;
- int nArg;
- int flag;
- void (*xFunc)(sqlite3_context*,int,sqlite3_value**);
- } aFunc[] = {
- { "json", 1, 0, jsonRemoveFunc },
- { "json_array", -1, 0, jsonArrayFunc },
- { "json_array_length", 1, 0, jsonArrayLengthFunc },
- { "json_array_length", 2, 0, jsonArrayLengthFunc },
- { "json_extract", -1, 0, jsonExtractFunc },
- { "json_insert", -1, 0, jsonSetFunc },
- { "json_object", -1, 0, jsonObjectFunc },
- { "json_patch", 2, 0, jsonPatchFunc },
- { "json_quote", 1, 0, jsonQuoteFunc },
- { "json_remove", -1, 0, jsonRemoveFunc },
- { "json_replace", -1, 0, jsonReplaceFunc },
- { "json_set", -1, 1, jsonSetFunc },
- { "json_type", 1, 0, jsonTypeFunc },
- { "json_type", 2, 0, jsonTypeFunc },
- { "json_valid", 1, 0, jsonValidFunc },
-
-#if SQLITE_DEBUG
- /* DEBUG and TESTING functions */
- { "json_parse", 1, 0, jsonParseFunc },
- { "json_test1", 1, 0, jsonTest1Func },
-#endif
- };
- static const struct {
- const char *zName;
- int nArg;
- void (*xStep)(sqlite3_context*,int,sqlite3_value**);
- void (*xFinal)(sqlite3_context*);
- void (*xValue)(sqlite3_context*);
- } aAgg[] = {
- { "json_group_array", 1,
- jsonArrayStep, jsonArrayFinal, jsonArrayValue },
- { "json_group_object", 2,
- jsonObjectStep, jsonObjectFinal, jsonObjectValue },
- };
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- static const struct {
- const char *zName;
- sqlite3_module *pModule;
- } aMod[] = {
- { "json_each", &jsonEachModule },
- { "json_tree", &jsonTreeModule },
- };
-#endif
- for(i=0; ipWriteRowid, 1);
sqlite3_bind_null(pRtree->pWriteRowid, 2);
@@ -181276,7 +173701,7 @@ static int rtreeUpdate(
/* Figure out the rowid of the new row. */
if( bHaveRowid==0 ){
- rc = rtreeNewRowid(pRtree, &cell.iRowid);
+ rc = newRowid(pRtree, &cell.iRowid);
}
*pRowid = cell.iRowid;
@@ -181368,7 +173793,7 @@ static int rtreeRename(sqlite3_vtab *pVtab, const char *zNewName){
*/
static int rtreeSavepoint(sqlite3_vtab *pVtab, int iSavepoint){
Rtree *pRtree = (Rtree *)pVtab;
- u8 iwt = pRtree->inWrTrans;
+ int iwt = pRtree->inWrTrans;
UNUSED_PARAMETER(iSavepoint);
pRtree->inWrTrans = 0;
nodeBlobReset(pRtree);
@@ -181549,11 +173974,7 @@ static int rtreeSqlInit(
sqlite3_str_appendf(p, "UPDATE \"%w\".\"%w_rowid\"SET ", zDb, zPrefix);
for(ii=0; iinAux; ii++){
if( ii ) sqlite3_str_append(p, ",", 1);
- if( iinAuxNotNull ){
- sqlite3_str_appendf(p,"a%d=coalesce(?%d,a%d)",ii,ii+2,ii);
- }else{
- sqlite3_str_appendf(p,"a%d=?%d",ii,ii+2);
- }
+ sqlite3_str_appendf(p,"a%d=?%d",ii,ii+2);
}
sqlite3_str_appendf(p, " WHERE rowid=?1");
zSql = sqlite3_str_finish(p);
@@ -182322,1673 +174743,6 @@ static void rtreecheck(
}
}
-/* Conditionally include the geopoly code */
-#ifdef SQLITE_ENABLE_GEOPOLY
-/************** Include geopoly.c in the middle of rtree.c *******************/
-/************** Begin file geopoly.c *****************************************/
-/*
-** 2018-05-25
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file implements an alternative R-Tree virtual table that
-** uses polygons to express the boundaries of 2-dimensional objects.
-**
-** This file is #include-ed onto the end of "rtree.c" so that it has
-** access to all of the R-Tree internals.
-*/
-/* #include */
-
-/* Enable -DGEOPOLY_ENABLE_DEBUG for debugging facilities */
-#ifdef GEOPOLY_ENABLE_DEBUG
- static int geo_debug = 0;
-# define GEODEBUG(X) if(geo_debug)printf X
-#else
-# define GEODEBUG(X)
-#endif
-
-#ifndef JSON_NULL /* The following stuff repeats things found in json1 */
-/*
-** Versions of isspace(), isalnum() and isdigit() to which it is safe
-** to pass signed char values.
-*/
-#ifdef sqlite3Isdigit
- /* Use the SQLite core versions if this routine is part of the
- ** SQLite amalgamation */
-# define safe_isdigit(x) sqlite3Isdigit(x)
-# define safe_isalnum(x) sqlite3Isalnum(x)
-# define safe_isxdigit(x) sqlite3Isxdigit(x)
-#else
- /* Use the standard library for separate compilation */
-#include /* amalgamator: keep */
-# define safe_isdigit(x) isdigit((unsigned char)(x))
-# define safe_isalnum(x) isalnum((unsigned char)(x))
-# define safe_isxdigit(x) isxdigit((unsigned char)(x))
-#endif
-
-/*
-** Growing our own isspace() routine this way is twice as fast as
-** the library isspace() function.
-*/
-static const char geopolyIsSpace[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-};
-#define safe_isspace(x) (geopolyIsSpace[(unsigned char)x])
-#endif /* JSON NULL - back to original code */
-
-/* Compiler and version */
-#ifndef GCC_VERSION
-#if defined(__GNUC__) && !defined(SQLITE_DISABLE_INTRINSIC)
-# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__)
-#else
-# define GCC_VERSION 0
-#endif
-#endif
-#ifndef MSVC_VERSION
-#if defined(_MSC_VER) && !defined(SQLITE_DISABLE_INTRINSIC)
-# define MSVC_VERSION _MSC_VER
-#else
-# define MSVC_VERSION 0
-#endif
-#endif
-
-/* Datatype for coordinates
-*/
-typedef float GeoCoord;
-
-/*
-** Internal representation of a polygon.
-**
-** The polygon consists of a sequence of vertexes. There is a line
-** segment between each pair of vertexes, and one final segment from
-** the last vertex back to the first. (This differs from the GeoJSON
-** standard in which the final vertex is a repeat of the first.)
-**
-** The polygon follows the right-hand rule. The area to the right of
-** each segment is "outside" and the area to the left is "inside".
-**
-** The on-disk representation consists of a 4-byte header followed by
-** the values. The 4-byte header is:
-**
-** encoding (1 byte) 0=big-endian, 1=little-endian
-** nvertex (3 bytes) Number of vertexes as a big-endian integer
-*/
-typedef struct GeoPoly GeoPoly;
-struct GeoPoly {
- int nVertex; /* Number of vertexes */
- unsigned char hdr[4]; /* Header for on-disk representation */
- GeoCoord a[2]; /* 2*nVertex values. X (longitude) first, then Y */
-};
-
-/*
-** State of a parse of a GeoJSON input.
-*/
-typedef struct GeoParse GeoParse;
-struct GeoParse {
- const unsigned char *z; /* Unparsed input */
- int nVertex; /* Number of vertexes in a[] */
- int nAlloc; /* Space allocated to a[] */
- int nErr; /* Number of errors encountered */
- GeoCoord *a; /* Array of vertexes. From sqlite3_malloc64() */
-};
-
-/* Do a 4-byte byte swap */
-static void geopolySwab32(unsigned char *a){
- unsigned char t = a[0];
- a[0] = a[3];
- a[3] = t;
- t = a[1];
- a[1] = a[2];
- a[2] = t;
-}
-
-/* Skip whitespace. Return the next non-whitespace character. */
-static char geopolySkipSpace(GeoParse *p){
- while( p->z[0] && safe_isspace(p->z[0]) ) p->z++;
- return p->z[0];
-}
-
-/* Parse out a number. Write the value into *pVal if pVal!=0.
-** return non-zero on success and zero if the next token is not a number.
-*/
-static int geopolyParseNumber(GeoParse *p, GeoCoord *pVal){
- char c = geopolySkipSpace(p);
- const unsigned char *z = p->z;
- int j = 0;
- int seenDP = 0;
- int seenE = 0;
- if( c=='-' ){
- j = 1;
- c = z[j];
- }
- if( c=='0' && z[j+1]>='0' && z[j+1]<='9' ) return 0;
- for(;; j++){
- c = z[j];
- if( c>='0' && c<='9' ) continue;
- if( c=='.' ){
- if( z[j-1]=='-' ) return 0;
- if( seenDP ) return 0;
- seenDP = 1;
- continue;
- }
- if( c=='e' || c=='E' ){
- if( z[j-1]<'0' ) return 0;
- if( seenE ) return -1;
- seenDP = seenE = 1;
- c = z[j+1];
- if( c=='+' || c=='-' ){
- j++;
- c = z[j+1];
- }
- if( c<'0' || c>'9' ) return 0;
- continue;
- }
- break;
- }
- if( z[j-1]<'0' ) return 0;
- if( pVal ) *pVal = (GeoCoord)atof((const char*)p->z);
- p->z += j;
- return 1;
-}
-
-/*
-** If the input is a well-formed JSON array of coordinates with at least
-** four coordinates and where each coordinate is itself a two-value array,
-** then convert the JSON into a GeoPoly object and return a pointer to
-** that object.
-**
-** If any error occurs, return NULL.
-*/
-static GeoPoly *geopolyParseJson(const unsigned char *z, int *pRc){
- GeoParse s;
- int rc = SQLITE_OK;
- memset(&s, 0, sizeof(s));
- s.z = z;
- if( geopolySkipSpace(&s)=='[' ){
- s.z++;
- while( geopolySkipSpace(&s)=='[' ){
- int ii = 0;
- char c;
- s.z++;
- if( s.nVertex>=s.nAlloc ){
- GeoCoord *aNew;
- s.nAlloc = s.nAlloc*2 + 16;
- aNew = sqlite3_realloc64(s.a, s.nAlloc*sizeof(GeoCoord)*2 );
- if( aNew==0 ){
- rc = SQLITE_NOMEM;
- s.nErr++;
- break;
- }
- s.a = aNew;
- }
- while( geopolyParseNumber(&s, ii<=1 ? &s.a[s.nVertex*2+ii] : 0) ){
- ii++;
- if( ii==2 ) s.nVertex++;
- c = geopolySkipSpace(&s);
- s.z++;
- if( c==',' ) continue;
- if( c==']' && ii>=2 ) break;
- s.nErr++;
- rc = SQLITE_ERROR;
- goto parse_json_err;
- }
- if( geopolySkipSpace(&s)==',' ){
- s.z++;
- continue;
- }
- break;
- }
- if( geopolySkipSpace(&s)==']'
- && s.nVertex>=4
- && s.a[0]==s.a[s.nVertex*2-2]
- && s.a[1]==s.a[s.nVertex*2-1]
- && (s.z++, geopolySkipSpace(&s)==0)
- ){
- int nByte;
- GeoPoly *pOut;
- int x = 1;
- s.nVertex--; /* Remove the redundant vertex at the end */
- nByte = sizeof(GeoPoly) * s.nVertex*2*sizeof(GeoCoord);
- pOut = sqlite3_malloc64( nByte );
- x = 1;
- if( pOut==0 ) goto parse_json_err;
- pOut->nVertex = s.nVertex;
- memcpy(pOut->a, s.a, s.nVertex*2*sizeof(GeoCoord));
- pOut->hdr[0] = *(unsigned char*)&x;
- pOut->hdr[1] = (s.nVertex>>16)&0xff;
- pOut->hdr[2] = (s.nVertex>>8)&0xff;
- pOut->hdr[3] = s.nVertex&0xff;
- sqlite3_free(s.a);
- if( pRc ) *pRc = SQLITE_OK;
- return pOut;
- }else{
- s.nErr++;
- rc = SQLITE_ERROR;
- }
- }
-parse_json_err:
- if( pRc ) *pRc = rc;
- sqlite3_free(s.a);
- return 0;
-}
-
-/*
-** Given a function parameter, try to interpret it as a polygon, either
-** in the binary format or JSON text. Compute a GeoPoly object and
-** return a pointer to that object. Or if the input is not a well-formed
-** polygon, put an error message in sqlite3_context and return NULL.
-*/
-static GeoPoly *geopolyFuncParam(
- sqlite3_context *pCtx, /* Context for error messages */
- sqlite3_value *pVal, /* The value to decode */
- int *pRc /* Write error here */
-){
- GeoPoly *p = 0;
- int nByte;
- if( sqlite3_value_type(pVal)==SQLITE_BLOB
- && (nByte = sqlite3_value_bytes(pVal))>=(4+6*sizeof(GeoCoord))
- ){
- const unsigned char *a = sqlite3_value_blob(pVal);
- int nVertex;
- nVertex = (a[1]<<16) + (a[2]<<8) + a[3];
- if( (a[0]==0 || a[0]==1)
- && (nVertex*2*sizeof(GeoCoord) + 4)==(unsigned int)nByte
- ){
- p = sqlite3_malloc64( sizeof(*p) + (nVertex-1)*2*sizeof(GeoCoord) );
- if( p==0 ){
- if( pRc ) *pRc = SQLITE_NOMEM;
- if( pCtx ) sqlite3_result_error_nomem(pCtx);
- }else{
- int x = 1;
- p->nVertex = nVertex;
- memcpy(p->hdr, a, nByte);
- if( a[0] != *(unsigned char*)&x ){
- int ii;
- for(ii=0; iia[ii]);
- }
- p->hdr[0] ^= 1;
- }
- }
- }
- if( pRc ) *pRc = SQLITE_OK;
- return p;
- }else if( sqlite3_value_type(pVal)==SQLITE_TEXT ){
- const unsigned char *zJson = sqlite3_value_text(pVal);
- if( zJson==0 ){
- if( pRc ) *pRc = SQLITE_NOMEM;
- return 0;
- }
- return geopolyParseJson(zJson, pRc);
- }else{
- if( pRc ) *pRc = SQLITE_ERROR;
- return 0;
- }
-}
-
-/*
-** Implementation of the geopoly_blob(X) function.
-**
-** If the input is a well-formed Geopoly BLOB or JSON string
-** then return the BLOB representation of the polygon. Otherwise
-** return NULL.
-*/
-static void geopolyBlobFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
- if( p ){
- sqlite3_result_blob(context, p->hdr,
- 4+8*p->nVertex, SQLITE_TRANSIENT);
- sqlite3_free(p);
- }
-}
-
-/*
-** SQL function: geopoly_json(X)
-**
-** Interpret X as a polygon and render it as a JSON array
-** of coordinates. Or, if X is not a valid polygon, return NULL.
-*/
-static void geopolyJsonFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
- if( p ){
- sqlite3 *db = sqlite3_context_db_handle(context);
- sqlite3_str *x = sqlite3_str_new(db);
- int i;
- sqlite3_str_append(x, "[", 1);
- for(i=0; inVertex; i++){
- sqlite3_str_appendf(x, "[%!g,%!g],", p->a[i*2], p->a[i*2+1]);
- }
- sqlite3_str_appendf(x, "[%!g,%!g]]", p->a[0], p->a[1]);
- sqlite3_result_text(context, sqlite3_str_finish(x), -1, sqlite3_free);
- sqlite3_free(p);
- }
-}
-
-/*
-** SQL function: geopoly_svg(X, ....)
-**
-** Interpret X as a polygon and render it as a SVG .
-** Additional arguments are added as attributes to the .
-*/
-static void geopolySvgFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
- if( p ){
- sqlite3 *db = sqlite3_context_db_handle(context);
- sqlite3_str *x = sqlite3_str_new(db);
- int i;
- char cSep = '\'';
- sqlite3_str_appendf(x, "a[i*2], p->a[i*2+1]);
- cSep = ' ';
- }
- sqlite3_str_appendf(x, " %g,%g'", p->a[0], p->a[1]);
- for(i=1; i");
- sqlite3_result_text(context, sqlite3_str_finish(x), -1, sqlite3_free);
- sqlite3_free(p);
- }
-}
-
-/*
-** SQL Function: geopoly_xform(poly, A, B, C, D, E, F)
-**
-** Transform and/or translate a polygon as follows:
-**
-** x1 = A*x0 + B*y0 + E
-** y1 = C*x0 + D*y0 + F
-**
-** For a translation:
-**
-** geopoly_xform(poly, 1, 0, 0, 1, x-offset, y-offset)
-**
-** Rotate by R around the point (0,0):
-**
-** geopoly_xform(poly, cos(R), sin(R), -sin(R), cos(R), 0, 0)
-*/
-static void geopolyXformFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
- double A = sqlite3_value_double(argv[1]);
- double B = sqlite3_value_double(argv[2]);
- double C = sqlite3_value_double(argv[3]);
- double D = sqlite3_value_double(argv[4]);
- double E = sqlite3_value_double(argv[5]);
- double F = sqlite3_value_double(argv[6]);
- GeoCoord x1, y1, x0, y0;
- int ii;
- if( p ){
- for(ii=0; iinVertex; ii++){
- x0 = p->a[ii*2];
- y0 = p->a[ii*2+1];
- x1 = (GeoCoord)(A*x0 + B*y0 + E);
- y1 = (GeoCoord)(C*x0 + D*y0 + F);
- p->a[ii*2] = x1;
- p->a[ii*2+1] = y1;
- }
- sqlite3_result_blob(context, p->hdr,
- 4+8*p->nVertex, SQLITE_TRANSIENT);
- sqlite3_free(p);
- }
-}
-
-/*
-** Implementation of the geopoly_area(X) function.
-**
-** If the input is a well-formed Geopoly BLOB then return the area
-** enclosed by the polygon. If the polygon circulates clockwise instead
-** of counterclockwise (as it should) then return the negative of the
-** enclosed area. Otherwise return NULL.
-*/
-static void geopolyAreaFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
- if( p ){
- double rArea = 0.0;
- int ii;
- for(ii=0; iinVertex-1; ii++){
- rArea += (p->a[ii*2] - p->a[ii*2+2]) /* (x0 - x1) */
- * (p->a[ii*2+1] + p->a[ii*2+3]) /* (y0 + y1) */
- * 0.5;
- }
- rArea += (p->a[ii*2] - p->a[0]) /* (xN - x0) */
- * (p->a[ii*2+1] + p->a[1]) /* (yN + y0) */
- * 0.5;
- sqlite3_result_double(context, rArea);
- sqlite3_free(p);
- }
-}
-
-/*
-** If pPoly is a polygon, compute its bounding box. Then:
-**
-** (1) if aCoord!=0 store the bounding box in aCoord, returning NULL
-** (2) otherwise, compute a GeoPoly for the bounding box and return the
-** new GeoPoly
-**
-** If pPoly is NULL but aCoord is not NULL, then compute a new GeoPoly from
-** the bounding box in aCoord and return a pointer to that GeoPoly.
-*/
-static GeoPoly *geopolyBBox(
- sqlite3_context *context, /* For recording the error */
- sqlite3_value *pPoly, /* The polygon */
- RtreeCoord *aCoord, /* Results here */
- int *pRc /* Error code here */
-){
- GeoPoly *pOut = 0;
- GeoPoly *p;
- float mnX, mxX, mnY, mxY;
- if( pPoly==0 && aCoord!=0 ){
- p = 0;
- mnX = aCoord[0].f;
- mxX = aCoord[1].f;
- mnY = aCoord[2].f;
- mxY = aCoord[3].f;
- goto geopolyBboxFill;
- }else{
- p = geopolyFuncParam(context, pPoly, pRc);
- }
- if( p ){
- int ii;
- mnX = mxX = p->a[0];
- mnY = mxY = p->a[1];
- for(ii=1; iinVertex; ii++){
- double r = p->a[ii*2];
- if( rmxX ) mxX = (float)r;
- r = p->a[ii*2+1];
- if( rmxY ) mxY = (float)r;
- }
- if( pRc ) *pRc = SQLITE_OK;
- if( aCoord==0 ){
- geopolyBboxFill:
- pOut = sqlite3_realloc(p, sizeof(GeoPoly)+sizeof(GeoCoord)*6);
- if( pOut==0 ){
- sqlite3_free(p);
- if( context ) sqlite3_result_error_nomem(context);
- if( pRc ) *pRc = SQLITE_NOMEM;
- return 0;
- }
- pOut->nVertex = 4;
- ii = 1;
- pOut->hdr[0] = *(unsigned char*)ⅈ
- pOut->hdr[1] = 0;
- pOut->hdr[2] = 0;
- pOut->hdr[3] = 4;
- pOut->a[0] = mnX;
- pOut->a[1] = mnY;
- pOut->a[2] = mxX;
- pOut->a[3] = mnY;
- pOut->a[4] = mxX;
- pOut->a[5] = mxY;
- pOut->a[6] = mnX;
- pOut->a[7] = mxY;
- }else{
- sqlite3_free(p);
- aCoord[0].f = mnX;
- aCoord[1].f = mxX;
- aCoord[2].f = mnY;
- aCoord[3].f = mxY;
- }
- }
- return pOut;
-}
-
-/*
-** Implementation of the geopoly_bbox(X) SQL function.
-*/
-static void geopolyBBoxFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- GeoPoly *p = geopolyBBox(context, argv[0], 0, 0);
- if( p ){
- sqlite3_result_blob(context, p->hdr,
- 4+8*p->nVertex, SQLITE_TRANSIENT);
- sqlite3_free(p);
- }
-}
-
-/*
-** State vector for the geopoly_group_bbox() aggregate function.
-*/
-typedef struct GeoBBox GeoBBox;
-struct GeoBBox {
- int isInit;
- RtreeCoord a[4];
-};
-
-
-/*
-** Implementation of the geopoly_group_bbox(X) aggregate SQL function.
-*/
-static void geopolyBBoxStep(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- RtreeCoord a[4];
- int rc = SQLITE_OK;
- (void)geopolyBBox(context, argv[0], a, &rc);
- if( rc==SQLITE_OK ){
- GeoBBox *pBBox;
- pBBox = (GeoBBox*)sqlite3_aggregate_context(context, sizeof(*pBBox));
- if( pBBox==0 ) return;
- if( pBBox->isInit==0 ){
- pBBox->isInit = 1;
- memcpy(pBBox->a, a, sizeof(RtreeCoord)*4);
- }else{
- if( a[0].f < pBBox->a[0].f ) pBBox->a[0] = a[0];
- if( a[1].f > pBBox->a[1].f ) pBBox->a[1] = a[1];
- if( a[2].f < pBBox->a[2].f ) pBBox->a[2] = a[2];
- if( a[3].f > pBBox->a[3].f ) pBBox->a[3] = a[3];
- }
- }
-}
-static void geopolyBBoxFinal(
- sqlite3_context *context
-){
- GeoPoly *p;
- GeoBBox *pBBox;
- pBBox = (GeoBBox*)sqlite3_aggregate_context(context, 0);
- if( pBBox==0 ) return;
- p = geopolyBBox(context, 0, pBBox->a, 0);
- if( p ){
- sqlite3_result_blob(context, p->hdr,
- 4+8*p->nVertex, SQLITE_TRANSIENT);
- sqlite3_free(p);
- }
-}
-
-
-/*
-** Determine if point (x0,y0) is beneath line segment (x1,y1)->(x2,y2).
-** Returns:
-**
-** +2 x0,y0 is on the line segement
-**
-** +1 x0,y0 is beneath line segment
-**
-** 0 x0,y0 is not on or beneath the line segment or the line segment
-** is vertical and x0,y0 is not on the line segment
-**
-** The left-most coordinate min(x1,x2) is not considered to be part of
-** the line segment for the purposes of this analysis.
-*/
-static int pointBeneathLine(
- double x0, double y0,
- double x1, double y1,
- double x2, double y2
-){
- double y;
- if( x0==x1 && y0==y1 ) return 2;
- if( x1x2 ) return 0;
- }else if( x1>x2 ){
- if( x0<=x2 || x0>x1 ) return 0;
- }else{
- /* Vertical line segment */
- if( x0!=x1 ) return 0;
- if( y0y1 && y0>y2 ) return 0;
- return 2;
- }
- y = y1 + (y2-y1)*(x0-x1)/(x2-x1);
- if( y0==y ) return 2;
- if( y0nVertex-1; ii++){
- v = pointBeneathLine(x0,y0,p1->a[ii*2],p1->a[ii*2+1],
- p1->a[ii*2+2],p1->a[ii*2+3]);
- if( v==2 ) break;
- cnt += v;
- }
- if( v!=2 ){
- v = pointBeneathLine(x0,y0,p1->a[ii*2],p1->a[ii*2+1],
- p1->a[0],p1->a[1]);
- }
- if( v==2 ){
- sqlite3_result_int(context, 1);
- }else if( ((v+cnt)&1)==0 ){
- sqlite3_result_int(context, 0);
- }else{
- sqlite3_result_int(context, 2);
- }
- sqlite3_free(p1);
-}
-
-/* Forward declaration */
-static int geopolyOverlap(GeoPoly *p1, GeoPoly *p2);
-
-/*
-** SQL function: geopoly_within(P1,P2)
-**
-** Return +2 if P1 and P2 are the same polygon
-** Return +1 if P2 is contained within P1
-** Return 0 if any part of P2 is on the outside of P1
-**
-*/
-static void geopolyWithinFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- GeoPoly *p1 = geopolyFuncParam(context, argv[0], 0);
- GeoPoly *p2 = geopolyFuncParam(context, argv[1], 0);
- if( p1 && p2 ){
- int x = geopolyOverlap(p1, p2);
- if( x<0 ){
- sqlite3_result_error_nomem(context);
- }else{
- sqlite3_result_int(context, x==2 ? 1 : x==4 ? 2 : 0);
- }
- }
- sqlite3_free(p1);
- sqlite3_free(p2);
-}
-
-/* Objects used by the overlap algorihm. */
-typedef struct GeoEvent GeoEvent;
-typedef struct GeoSegment GeoSegment;
-typedef struct GeoOverlap GeoOverlap;
-struct GeoEvent {
- double x; /* X coordinate at which event occurs */
- int eType; /* 0 for ADD, 1 for REMOVE */
- GeoSegment *pSeg; /* The segment to be added or removed */
- GeoEvent *pNext; /* Next event in the sorted list */
-};
-struct GeoSegment {
- double C, B; /* y = C*x + B */
- double y; /* Current y value */
- float y0; /* Initial y value */
- unsigned char side; /* 1 for p1, 2 for p2 */
- unsigned int idx; /* Which segment within the side */
- GeoSegment *pNext; /* Next segment in a list sorted by y */
-};
-struct GeoOverlap {
- GeoEvent *aEvent; /* Array of all events */
- GeoSegment *aSegment; /* Array of all segments */
- int nEvent; /* Number of events */
- int nSegment; /* Number of segments */
-};
-
-/*
-** Add a single segment and its associated events.
-*/
-static void geopolyAddOneSegment(
- GeoOverlap *p,
- GeoCoord x0,
- GeoCoord y0,
- GeoCoord x1,
- GeoCoord y1,
- unsigned char side,
- unsigned int idx
-){
- GeoSegment *pSeg;
- GeoEvent *pEvent;
- if( x0==x1 ) return; /* Ignore vertical segments */
- if( x0>x1 ){
- GeoCoord t = x0;
- x0 = x1;
- x1 = t;
- t = y0;
- y0 = y1;
- y1 = t;
- }
- pSeg = p->aSegment + p->nSegment;
- p->nSegment++;
- pSeg->C = (y1-y0)/(x1-x0);
- pSeg->B = y1 - x1*pSeg->C;
- pSeg->y0 = y0;
- pSeg->side = side;
- pSeg->idx = idx;
- pEvent = p->aEvent + p->nEvent;
- p->nEvent++;
- pEvent->x = x0;
- pEvent->eType = 0;
- pEvent->pSeg = pSeg;
- pEvent = p->aEvent + p->nEvent;
- p->nEvent++;
- pEvent->x = x1;
- pEvent->eType = 1;
- pEvent->pSeg = pSeg;
-}
-
-
-
-/*
-** Insert all segments and events for polygon pPoly.
-*/
-static void geopolyAddSegments(
- GeoOverlap *p, /* Add segments to this Overlap object */
- GeoPoly *pPoly, /* Take all segments from this polygon */
- unsigned char side /* The side of pPoly */
-){
- unsigned int i;
- GeoCoord *x;
- for(i=0; i<(unsigned)pPoly->nVertex-1; i++){
- x = pPoly->a + (i*2);
- geopolyAddOneSegment(p, x[0], x[1], x[2], x[3], side, i);
- }
- x = pPoly->a + (i*2);
- geopolyAddOneSegment(p, x[0], x[1], pPoly->a[0], pPoly->a[1], side, i);
-}
-
-/*
-** Merge two lists of sorted events by X coordinate
-*/
-static GeoEvent *geopolyEventMerge(GeoEvent *pLeft, GeoEvent *pRight){
- GeoEvent head, *pLast;
- head.pNext = 0;
- pLast = &head;
- while( pRight && pLeft ){
- if( pRight->x <= pLeft->x ){
- pLast->pNext = pRight;
- pLast = pRight;
- pRight = pRight->pNext;
- }else{
- pLast->pNext = pLeft;
- pLast = pLeft;
- pLeft = pLeft->pNext;
- }
- }
- pLast->pNext = pRight ? pRight : pLeft;
- return head.pNext;
-}
-
-/*
-** Sort an array of nEvent event objects into a list.
-*/
-static GeoEvent *geopolySortEventsByX(GeoEvent *aEvent, int nEvent){
- int mx = 0;
- int i, j;
- GeoEvent *p;
- GeoEvent *a[50];
- for(i=0; ipNext = 0;
- for(j=0; j=mx ) mx = j+1;
- }
- p = 0;
- for(i=0; iy - pLeft->y;
- if( r==0.0 ) r = pRight->C - pLeft->C;
- if( r<0.0 ){
- pLast->pNext = pRight;
- pLast = pRight;
- pRight = pRight->pNext;
- }else{
- pLast->pNext = pLeft;
- pLast = pLeft;
- pLeft = pLeft->pNext;
- }
- }
- pLast->pNext = pRight ? pRight : pLeft;
- return head.pNext;
-}
-
-/*
-** Sort a list of GeoSegments in order of increasing Y and in the event of
-** a tie, increasing C (slope).
-*/
-static GeoSegment *geopolySortSegmentsByYAndC(GeoSegment *pList){
- int mx = 0;
- int i;
- GeoSegment *p;
- GeoSegment *a[50];
- while( pList ){
- p = pList;
- pList = pList->pNext;
- p->pNext = 0;
- for(i=0; i=mx ) mx = i+1;
- }
- p = 0;
- for(i=0; inVertex + p2->nVertex + 2;
- GeoOverlap *p;
- int nByte;
- GeoEvent *pThisEvent;
- double rX;
- int rc = 0;
- int needSort = 0;
- GeoSegment *pActive = 0;
- GeoSegment *pSeg;
- unsigned char aOverlap[4];
-
- nByte = sizeof(GeoEvent)*nVertex*2
- + sizeof(GeoSegment)*nVertex
- + sizeof(GeoOverlap);
- p = sqlite3_malloc( nByte );
- if( p==0 ) return -1;
- p->aEvent = (GeoEvent*)&p[1];
- p->aSegment = (GeoSegment*)&p->aEvent[nVertex*2];
- p->nEvent = p->nSegment = 0;
- geopolyAddSegments(p, p1, 1);
- geopolyAddSegments(p, p2, 2);
- pThisEvent = geopolySortEventsByX(p->aEvent, p->nEvent);
- rX = pThisEvent->x==0.0 ? -1.0 : 0.0;
- memset(aOverlap, 0, sizeof(aOverlap));
- while( pThisEvent ){
- if( pThisEvent->x!=rX ){
- GeoSegment *pPrev = 0;
- int iMask = 0;
- GEODEBUG(("Distinct X: %g\n", pThisEvent->x));
- rX = pThisEvent->x;
- if( needSort ){
- GEODEBUG(("SORT\n"));
- pActive = geopolySortSegmentsByYAndC(pActive);
- needSort = 0;
- }
- for(pSeg=pActive; pSeg; pSeg=pSeg->pNext){
- if( pPrev ){
- if( pPrev->y!=pSeg->y ){
- GEODEBUG(("MASK: %d\n", iMask));
- aOverlap[iMask] = 1;
- }
- }
- iMask ^= pSeg->side;
- pPrev = pSeg;
- }
- pPrev = 0;
- for(pSeg=pActive; pSeg; pSeg=pSeg->pNext){
- double y = pSeg->C*rX + pSeg->B;
- GEODEBUG(("Segment %d.%d %g->%g\n", pSeg->side, pSeg->idx, pSeg->y, y));
- pSeg->y = y;
- if( pPrev ){
- if( pPrev->y>pSeg->y && pPrev->side!=pSeg->side ){
- rc = 1;
- GEODEBUG(("Crossing: %d.%d and %d.%d\n",
- pPrev->side, pPrev->idx,
- pSeg->side, pSeg->idx));
- goto geopolyOverlapDone;
- }else if( pPrev->y!=pSeg->y ){
- GEODEBUG(("MASK: %d\n", iMask));
- aOverlap[iMask] = 1;
- }
- }
- iMask ^= pSeg->side;
- pPrev = pSeg;
- }
- }
- GEODEBUG(("%s %d.%d C=%g B=%g\n",
- pThisEvent->eType ? "RM " : "ADD",
- pThisEvent->pSeg->side, pThisEvent->pSeg->idx,
- pThisEvent->pSeg->C,
- pThisEvent->pSeg->B));
- if( pThisEvent->eType==0 ){
- /* Add a segment */
- pSeg = pThisEvent->pSeg;
- pSeg->y = pSeg->y0;
- pSeg->pNext = pActive;
- pActive = pSeg;
- needSort = 1;
- }else{
- /* Remove a segment */
- if( pActive==pThisEvent->pSeg ){
- pActive = pActive->pNext;
- }else{
- for(pSeg=pActive; pSeg; pSeg=pSeg->pNext){
- if( pSeg->pNext==pThisEvent->pSeg ){
- pSeg->pNext = pSeg->pNext->pNext;
- break;
- }
- }
- }
- }
- pThisEvent = pThisEvent->pNext;
- }
- if( aOverlap[3]==0 ){
- rc = 0;
- }else if( aOverlap[1]!=0 && aOverlap[2]==0 ){
- rc = 3;
- }else if( aOverlap[1]==0 && aOverlap[2]!=0 ){
- rc = 2;
- }else if( aOverlap[1]==0 && aOverlap[2]==0 ){
- rc = 4;
- }else{
- rc = 1;
- }
-
-geopolyOverlapDone:
- sqlite3_free(p);
- return rc;
-}
-
-/*
-** SQL function: geopoly_overlap(P1,P2)
-**
-** Determine whether or not P1 and P2 overlap. Return value:
-**
-** 0 The two polygons are disjoint
-** 1 They overlap
-** 2 P1 is completely contained within P2
-** 3 P2 is completely contained within P1
-** 4 P1 and P2 are the same polygon
-** NULL Either P1 or P2 or both are not valid polygons
-*/
-static void geopolyOverlapFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- GeoPoly *p1 = geopolyFuncParam(context, argv[0], 0);
- GeoPoly *p2 = geopolyFuncParam(context, argv[1], 0);
- if( p1 && p2 ){
- int x = geopolyOverlap(p1, p2);
- if( x<0 ){
- sqlite3_result_error_nomem(context);
- }else{
- sqlite3_result_int(context, x);
- }
- }
- sqlite3_free(p1);
- sqlite3_free(p2);
-}
-
-/*
-** Enable or disable debugging output
-*/
-static void geopolyDebugFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
-#ifdef GEOPOLY_ENABLE_DEBUG
- geo_debug = sqlite3_value_int(argv[0]);
-#endif
-}
-
-/*
-** This function is the implementation of both the xConnect and xCreate
-** methods of the geopoly virtual table.
-**
-** argv[0] -> module name
-** argv[1] -> database name
-** argv[2] -> table name
-** argv[...] -> column names...
-*/
-static int geopolyInit(
- sqlite3 *db, /* Database connection */
- void *pAux, /* One of the RTREE_COORD_* constants */
- int argc, const char *const*argv, /* Parameters to CREATE TABLE statement */
- sqlite3_vtab **ppVtab, /* OUT: New virtual table */
- char **pzErr, /* OUT: Error message, if any */
- int isCreate /* True for xCreate, false for xConnect */
-){
- int rc = SQLITE_OK;
- Rtree *pRtree;
- int nDb; /* Length of string argv[1] */
- int nName; /* Length of string argv[2] */
- sqlite3_str *pSql;
- char *zSql;
- int ii;
-
- sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1);
-
- /* Allocate the sqlite3_vtab structure */
- nDb = (int)strlen(argv[1]);
- nName = (int)strlen(argv[2]);
- pRtree = (Rtree *)sqlite3_malloc(sizeof(Rtree)+nDb+nName+2);
- if( !pRtree ){
- return SQLITE_NOMEM;
- }
- memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2);
- pRtree->nBusy = 1;
- pRtree->base.pModule = &rtreeModule;
- pRtree->zDb = (char *)&pRtree[1];
- pRtree->zName = &pRtree->zDb[nDb+1];
- pRtree->eCoordType = RTREE_COORD_REAL32;
- pRtree->nDim = 2;
- pRtree->nDim2 = 4;
- memcpy(pRtree->zDb, argv[1], nDb);
- memcpy(pRtree->zName, argv[2], nName);
-
-
- /* Create/Connect to the underlying relational database schema. If
- ** that is successful, call sqlite3_declare_vtab() to configure
- ** the r-tree table schema.
- */
- pSql = sqlite3_str_new(db);
- sqlite3_str_appendf(pSql, "CREATE TABLE x(_shape");
- pRtree->nAux = 1; /* Add one for _shape */
- pRtree->nAuxNotNull = 1; /* The _shape column is always not-null */
- for(ii=3; iinAux++;
- sqlite3_str_appendf(pSql, ",%s", argv[ii]);
- }
- sqlite3_str_appendf(pSql, ");");
- zSql = sqlite3_str_finish(pSql);
- if( !zSql ){
- rc = SQLITE_NOMEM;
- }else if( SQLITE_OK!=(rc = sqlite3_declare_vtab(db, zSql)) ){
- *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db));
- }
- sqlite3_free(zSql);
- if( rc ) goto geopolyInit_fail;
- pRtree->nBytesPerCell = 8 + pRtree->nDim2*4;
-
- /* Figure out the node size to use. */
- rc = getNodeSize(db, pRtree, isCreate, pzErr);
- if( rc ) goto geopolyInit_fail;
- rc = rtreeSqlInit(pRtree, db, argv[1], argv[2], isCreate);
- if( rc ){
- *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db));
- goto geopolyInit_fail;
- }
-
- *ppVtab = (sqlite3_vtab *)pRtree;
- return SQLITE_OK;
-
-geopolyInit_fail:
- if( rc==SQLITE_OK ) rc = SQLITE_ERROR;
- assert( *ppVtab==0 );
- assert( pRtree->nBusy==1 );
- rtreeRelease(pRtree);
- return rc;
-}
-
-
-/*
-** GEOPOLY virtual table module xCreate method.
-*/
-static int geopolyCreate(
- sqlite3 *db,
- void *pAux,
- int argc, const char *const*argv,
- sqlite3_vtab **ppVtab,
- char **pzErr
-){
- return geopolyInit(db, pAux, argc, argv, ppVtab, pzErr, 1);
-}
-
-/*
-** GEOPOLY virtual table module xConnect method.
-*/
-static int geopolyConnect(
- sqlite3 *db,
- void *pAux,
- int argc, const char *const*argv,
- sqlite3_vtab **ppVtab,
- char **pzErr
-){
- return geopolyInit(db, pAux, argc, argv, ppVtab, pzErr, 0);
-}
-
-
-/*
-** GEOPOLY virtual table module xFilter method.
-**
-** Query plans:
-**
-** 1 rowid lookup
-** 2 search for objects overlapping the same bounding box
-** that contains polygon argv[0]
-** 3 search for objects overlapping the same bounding box
-** that contains polygon argv[0]
-** 4 full table scan
-*/
-static int geopolyFilter(
- sqlite3_vtab_cursor *pVtabCursor, /* The cursor to initialize */
- int idxNum, /* Query plan */
- const char *idxStr, /* Not Used */
- int argc, sqlite3_value **argv /* Parameters to the query plan */
-){
- Rtree *pRtree = (Rtree *)pVtabCursor->pVtab;
- RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor;
- RtreeNode *pRoot = 0;
- int rc = SQLITE_OK;
- int iCell = 0;
- sqlite3_stmt *pStmt;
-
- rtreeReference(pRtree);
-
- /* Reset the cursor to the same state as rtreeOpen() leaves it in. */
- freeCursorConstraints(pCsr);
- sqlite3_free(pCsr->aPoint);
- pStmt = pCsr->pReadAux;
- memset(pCsr, 0, sizeof(RtreeCursor));
- pCsr->base.pVtab = (sqlite3_vtab*)pRtree;
- pCsr->pReadAux = pStmt;
-
- pCsr->iStrategy = idxNum;
- if( idxNum==1 ){
- /* Special case - lookup by rowid. */
- RtreeNode *pLeaf; /* Leaf on which the required cell resides */
- RtreeSearchPoint *p; /* Search point for the leaf */
- i64 iRowid = sqlite3_value_int64(argv[0]);
- i64 iNode = 0;
- rc = findLeafNode(pRtree, iRowid, &pLeaf, &iNode);
- if( rc==SQLITE_OK && pLeaf!=0 ){
- p = rtreeSearchPointNew(pCsr, RTREE_ZERO, 0);
- assert( p!=0 ); /* Always returns pCsr->sPoint */
- pCsr->aNode[0] = pLeaf;
- p->id = iNode;
- p->eWithin = PARTLY_WITHIN;
- rc = nodeRowidIndex(pRtree, pLeaf, iRowid, &iCell);
- p->iCell = (u8)iCell;
- RTREE_QUEUE_TRACE(pCsr, "PUSH-F1:");
- }else{
- pCsr->atEOF = 1;
- }
- }else{
- /* Normal case - r-tree scan. Set up the RtreeCursor.aConstraint array
- ** with the configured constraints.
- */
- rc = nodeAcquire(pRtree, 1, 0, &pRoot);
- if( rc==SQLITE_OK && idxNum<=3 ){
- RtreeCoord bbox[4];
- RtreeConstraint *p;
- assert( argc==1 );
- geopolyBBox(0, argv[0], bbox, &rc);
- if( rc ){
- goto geopoly_filter_end;
- }
- pCsr->aConstraint = p = sqlite3_malloc(sizeof(RtreeConstraint)*4);
- pCsr->nConstraint = 4;
- if( p==0 ){
- rc = SQLITE_NOMEM;
- }else{
- memset(pCsr->aConstraint, 0, sizeof(RtreeConstraint)*4);
- memset(pCsr->anQueue, 0, sizeof(u32)*(pRtree->iDepth + 1));
- if( idxNum==2 ){
- /* Overlap query */
- p->op = 'B';
- p->iCoord = 0;
- p->u.rValue = bbox[1].f;
- p++;
- p->op = 'D';
- p->iCoord = 1;
- p->u.rValue = bbox[0].f;
- p++;
- p->op = 'B';
- p->iCoord = 2;
- p->u.rValue = bbox[3].f;
- p++;
- p->op = 'D';
- p->iCoord = 3;
- p->u.rValue = bbox[2].f;
- }else{
- /* Within query */
- p->op = 'D';
- p->iCoord = 0;
- p->u.rValue = bbox[0].f;
- p++;
- p->op = 'B';
- p->iCoord = 1;
- p->u.rValue = bbox[1].f;
- p++;
- p->op = 'D';
- p->iCoord = 2;
- p->u.rValue = bbox[2].f;
- p++;
- p->op = 'B';
- p->iCoord = 3;
- p->u.rValue = bbox[3].f;
- }
- }
- }
- if( rc==SQLITE_OK ){
- RtreeSearchPoint *pNew;
- pNew = rtreeSearchPointNew(pCsr, RTREE_ZERO, (u8)(pRtree->iDepth+1));
- if( pNew==0 ){
- rc = SQLITE_NOMEM;
- goto geopoly_filter_end;
- }
- pNew->id = 1;
- pNew->iCell = 0;
- pNew->eWithin = PARTLY_WITHIN;
- assert( pCsr->bPoint==1 );
- pCsr->aNode[0] = pRoot;
- pRoot = 0;
- RTREE_QUEUE_TRACE(pCsr, "PUSH-Fm:");
- rc = rtreeStepToLeaf(pCsr);
- }
- }
-
-geopoly_filter_end:
- nodeRelease(pRtree, pRoot);
- rtreeRelease(pRtree);
- return rc;
-}
-
-/*
-** Rtree virtual table module xBestIndex method. There are three
-** table scan strategies to choose from (in order from most to
-** least desirable):
-**
-** idxNum idxStr Strategy
-** ------------------------------------------------
-** 1 "rowid" Direct lookup by rowid.
-** 2 "rtree" R-tree overlap query using geopoly_overlap()
-** 3 "rtree" R-tree within query using geopoly_within()
-** 4 "fullscan" full-table scan.
-** ------------------------------------------------
-*/
-static int geopolyBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
- int ii;
- int iRowidTerm = -1;
- int iFuncTerm = -1;
- int idxNum = 0;
-
- for(ii=0; iinConstraint; ii++){
- struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii];
- if( !p->usable ) continue;
- if( p->iColumn<0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ ){
- iRowidTerm = ii;
- break;
- }
- if( p->iColumn==0 && p->op>=SQLITE_INDEX_CONSTRAINT_FUNCTION ){
- /* p->op==SQLITE_INDEX_CONSTRAINT_FUNCTION for geopoly_overlap()
- ** p->op==(SQLITE_INDEX_CONTRAINT_FUNCTION+1) for geopoly_within().
- ** See geopolyFindFunction() */
- iFuncTerm = ii;
- idxNum = p->op - SQLITE_INDEX_CONSTRAINT_FUNCTION + 2;
- }
- }
-
- if( iRowidTerm>=0 ){
- pIdxInfo->idxNum = 1;
- pIdxInfo->idxStr = "rowid";
- pIdxInfo->aConstraintUsage[iRowidTerm].argvIndex = 1;
- pIdxInfo->aConstraintUsage[iRowidTerm].omit = 1;
- pIdxInfo->estimatedCost = 30.0;
- pIdxInfo->estimatedRows = 1;
- pIdxInfo->idxFlags = SQLITE_INDEX_SCAN_UNIQUE;
- return SQLITE_OK;
- }
- if( iFuncTerm>=0 ){
- pIdxInfo->idxNum = idxNum;
- pIdxInfo->idxStr = "rtree";
- pIdxInfo->aConstraintUsage[iFuncTerm].argvIndex = 1;
- pIdxInfo->aConstraintUsage[iFuncTerm].omit = 0;
- pIdxInfo->estimatedCost = 300.0;
- pIdxInfo->estimatedRows = 10;
- return SQLITE_OK;
- }
- pIdxInfo->idxNum = 4;
- pIdxInfo->idxStr = "fullscan";
- pIdxInfo->estimatedCost = 3000000.0;
- pIdxInfo->estimatedRows = 100000;
- return SQLITE_OK;
-}
-
-
-/*
-** GEOPOLY virtual table module xColumn method.
-*/
-static int geopolyColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){
- Rtree *pRtree = (Rtree *)cur->pVtab;
- RtreeCursor *pCsr = (RtreeCursor *)cur;
- RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr);
- int rc = SQLITE_OK;
- RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc);
-
- if( rc ) return rc;
- if( p==0 ) return SQLITE_OK;
- if( i==0 && sqlite3_vtab_nochange(ctx) ) return SQLITE_OK;
- if( i<=pRtree->nAux ){
- if( !pCsr->bAuxValid ){
- if( pCsr->pReadAux==0 ){
- rc = sqlite3_prepare_v3(pRtree->db, pRtree->zReadAuxSql, -1, 0,
- &pCsr->pReadAux, 0);
- if( rc ) return rc;
- }
- sqlite3_bind_int64(pCsr->pReadAux, 1,
- nodeGetRowid(pRtree, pNode, p->iCell));
- rc = sqlite3_step(pCsr->pReadAux);
- if( rc==SQLITE_ROW ){
- pCsr->bAuxValid = 1;
- }else{
- sqlite3_reset(pCsr->pReadAux);
- if( rc==SQLITE_DONE ) rc = SQLITE_OK;
- return rc;
- }
- }
- sqlite3_result_value(ctx, sqlite3_column_value(pCsr->pReadAux, i+2));
- }
- return SQLITE_OK;
-}
-
-
-/*
-** The xUpdate method for GEOPOLY module virtual tables.
-**
-** For DELETE:
-**
-** argv[0] = the rowid to be deleted
-**
-** For INSERT:
-**
-** argv[0] = SQL NULL
-** argv[1] = rowid to insert, or an SQL NULL to select automatically
-** argv[2] = _shape column
-** argv[3] = first application-defined column....
-**
-** For UPDATE:
-**
-** argv[0] = rowid to modify. Never NULL
-** argv[1] = rowid after the change. Never NULL
-** argv[2] = new value for _shape
-** argv[3] = new value for first application-defined column....
-*/
-static int geopolyUpdate(
- sqlite3_vtab *pVtab,
- int nData,
- sqlite3_value **aData,
- sqlite_int64 *pRowid
-){
- Rtree *pRtree = (Rtree *)pVtab;
- int rc = SQLITE_OK;
- RtreeCell cell; /* New cell to insert if nData>1 */
- i64 oldRowid; /* The old rowid */
- int oldRowidValid; /* True if oldRowid is valid */
- i64 newRowid; /* The new rowid */
- int newRowidValid; /* True if newRowid is valid */
- int coordChange = 0; /* Change in coordinates */
-
- if( pRtree->nNodeRef ){
- /* Unable to write to the btree while another cursor is reading from it,
- ** since the write might do a rebalance which would disrupt the read
- ** cursor. */
- return SQLITE_LOCKED_VTAB;
- }
- rtreeReference(pRtree);
- assert(nData>=1);
-
- oldRowidValid = sqlite3_value_type(aData[0])!=SQLITE_NULL;;
- oldRowid = oldRowidValid ? sqlite3_value_int64(aData[0]) : 0;
- newRowidValid = nData>1 && sqlite3_value_type(aData[1])!=SQLITE_NULL;
- newRowid = newRowidValid ? sqlite3_value_int64(aData[1]) : 0;
- cell.iRowid = newRowid;
-
- if( nData>1 /* not a DELETE */
- && (!oldRowidValid /* INSERT */
- || !sqlite3_value_nochange(aData[2]) /* UPDATE _shape */
- || oldRowid!=newRowid) /* Rowid change */
- ){
- geopolyBBox(0, aData[2], cell.aCoord, &rc);
- if( rc ){
- if( rc==SQLITE_ERROR ){
- pVtab->zErrMsg =
- sqlite3_mprintf("_shape does not contain a valid polygon");
- }
- goto geopoly_update_end;
- }
- coordChange = 1;
-
- /* If a rowid value was supplied, check if it is already present in
- ** the table. If so, the constraint has failed. */
- if( newRowidValid && (!oldRowidValid || oldRowid!=newRowid) ){
- int steprc;
- sqlite3_bind_int64(pRtree->pReadRowid, 1, cell.iRowid);
- steprc = sqlite3_step(pRtree->pReadRowid);
- rc = sqlite3_reset(pRtree->pReadRowid);
- if( SQLITE_ROW==steprc ){
- if( sqlite3_vtab_on_conflict(pRtree->db)==SQLITE_REPLACE ){
- rc = rtreeDeleteRowid(pRtree, cell.iRowid);
- }else{
- rc = rtreeConstraintError(pRtree, 0);
- }
- }
- }
- }
-
- /* If aData[0] is not an SQL NULL value, it is the rowid of a
- ** record to delete from the r-tree table. The following block does
- ** just that.
- */
- if( rc==SQLITE_OK && (nData==1 || (coordChange && oldRowidValid)) ){
- rc = rtreeDeleteRowid(pRtree, oldRowid);
- }
-
- /* If the aData[] array contains more than one element, elements
- ** (aData[2]..aData[argc-1]) contain a new record to insert into
- ** the r-tree structure.
- */
- if( rc==SQLITE_OK && nData>1 && coordChange ){
- /* Insert the new record into the r-tree */
- RtreeNode *pLeaf = 0;
- if( !newRowidValid ){
- rc = rtreeNewRowid(pRtree, &cell.iRowid);
- }
- *pRowid = cell.iRowid;
- if( rc==SQLITE_OK ){
- rc = ChooseLeaf(pRtree, &cell, 0, &pLeaf);
- }
- if( rc==SQLITE_OK ){
- int rc2;
- pRtree->iReinsertHeight = -1;
- rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0);
- rc2 = nodeRelease(pRtree, pLeaf);
- if( rc==SQLITE_OK ){
- rc = rc2;
- }
- }
- }
-
- /* Change the data */
- if( rc==SQLITE_OK && nData>1 ){
- sqlite3_stmt *pUp = pRtree->pWriteAux;
- int jj;
- int nChange = 0;
- sqlite3_bind_int64(pUp, 1, cell.iRowid);
- assert( pRtree->nAux>=1 );
- if( sqlite3_value_nochange(aData[2]) ){
- sqlite3_bind_null(pUp, 2);
- }else{
- sqlite3_bind_value(pUp, 2, aData[2]);
- nChange = 1;
- }
- for(jj=1; jjnAux; jj++){
- nChange++;
- sqlite3_bind_value(pUp, jj+2, aData[jj+2]);
- }
- if( nChange ){
- sqlite3_step(pUp);
- rc = sqlite3_reset(pUp);
- }
- }
-
-geopoly_update_end:
- rtreeRelease(pRtree);
- return rc;
-}
-
-/*
-** Report that geopoly_overlap() is an overloaded function suitable
-** for use in xBestIndex.
-*/
-static int geopolyFindFunction(
- sqlite3_vtab *pVtab,
- int nArg,
- const char *zName,
- void (**pxFunc)(sqlite3_context*,int,sqlite3_value**),
- void **ppArg
-){
- if( sqlite3_stricmp(zName, "geopoly_overlap")==0 ){
- *pxFunc = geopolyOverlapFunc;
- *ppArg = 0;
- return SQLITE_INDEX_CONSTRAINT_FUNCTION;
- }
- if( sqlite3_stricmp(zName, "geopoly_within")==0 ){
- *pxFunc = geopolyWithinFunc;
- *ppArg = 0;
- return SQLITE_INDEX_CONSTRAINT_FUNCTION+1;
- }
- return 0;
-}
-
-
-static sqlite3_module geopolyModule = {
- 2, /* iVersion */
- geopolyCreate, /* xCreate - create a table */
- geopolyConnect, /* xConnect - connect to an existing table */
- geopolyBestIndex, /* xBestIndex - Determine search strategy */
- rtreeDisconnect, /* xDisconnect - Disconnect from a table */
- rtreeDestroy, /* xDestroy - Drop a table */
- rtreeOpen, /* xOpen - open a cursor */
- rtreeClose, /* xClose - close a cursor */
- geopolyFilter, /* xFilter - configure scan constraints */
- rtreeNext, /* xNext - advance a cursor */
- rtreeEof, /* xEof */
- geopolyColumn, /* xColumn - read data */
- rtreeRowid, /* xRowid - read data */
- geopolyUpdate, /* xUpdate - write data */
- rtreeBeginTransaction, /* xBegin - begin transaction */
- rtreeEndTransaction, /* xSync - sync transaction */
- rtreeEndTransaction, /* xCommit - commit transaction */
- rtreeEndTransaction, /* xRollback - rollback transaction */
- geopolyFindFunction, /* xFindFunction - function overloading */
- rtreeRename, /* xRename - rename the table */
- rtreeSavepoint, /* xSavepoint */
- 0, /* xRelease */
- 0, /* xRollbackTo */
-};
-
-static int sqlite3_geopoly_init(sqlite3 *db){
- int rc = SQLITE_OK;
- static const struct {
- void (*xFunc)(sqlite3_context*,int,sqlite3_value**);
- int nArg;
- const char *zName;
- } aFunc[] = {
- { geopolyAreaFunc, 1, "geopoly_area" },
- { geopolyBlobFunc, 1, "geopoly_blob" },
- { geopolyJsonFunc, 1, "geopoly_json" },
- { geopolySvgFunc, -1, "geopoly_svg" },
- { geopolyWithinFunc, 2, "geopoly_within" },
- { geopolyContainsPointFunc, 3, "geopoly_contains_point" },
- { geopolyOverlapFunc, 2, "geopoly_overlap" },
- { geopolyDebugFunc, 1, "geopoly_debug" },
- { geopolyBBoxFunc, 1, "geopoly_bbox" },
- { geopolyXformFunc, 7, "geopoly_xform" },
- };
- static const struct {
- void (*xStep)(sqlite3_context*,int,sqlite3_value**);
- void (*xFinal)(sqlite3_context*);
- const char *zName;
- } aAgg[] = {
- { geopolyBBoxStep, geopolyBBoxFinal, "geopoly_group_bbox" },
- };
- int i;
- for(i=0; inDb; i++){
Btree *pBt = db->aDb[i].pBt;
- if( pBt ) sqlite3BtreeBeginTrans(pBt, 1, 0);
+ if( pBt ) sqlite3BtreeBeginTrans(pBt, 1);
}
return SQLITE_OK;
}
@@ -196985,6 +187734,2436 @@ SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p){
#endif /* SQLITE_ENABLE_SESSION && SQLITE_ENABLE_PREUPDATE_HOOK */
/************** End of sqlite3session.c **************************************/
+/************** Begin file json1.c *******************************************/
+/*
+** 2015-08-12
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This SQLite extension implements JSON functions. The interface is
+** modeled after MySQL JSON functions:
+**
+** https://dev.mysql.com/doc/refman/5.7/en/json.html
+**
+** For the time being, all JSON is stored as pure text. (We might add
+** a JSONB type in the future which stores a binary encoding of JSON in
+** a BLOB, but there is no support for JSONB in the current implementation.
+** This implementation parses JSON text at 250 MB/s, so it is hard to see
+** how JSONB might improve on that.)
+*/
+#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_JSON1)
+#if !defined(SQLITEINT_H)
+/* #include "sqlite3ext.h" */
+#endif
+SQLITE_EXTENSION_INIT1
+/* #include */
+/* #include */
+/* #include */
+/* #include */
+
+/* Mark a function parameter as unused, to suppress nuisance compiler
+** warnings. */
+#ifndef UNUSED_PARAM
+# define UNUSED_PARAM(X) (void)(X)
+#endif
+
+#ifndef LARGEST_INT64
+# define LARGEST_INT64 (0xffffffff|(((sqlite3_int64)0x7fffffff)<<32))
+# define SMALLEST_INT64 (((sqlite3_int64)-1) - LARGEST_INT64)
+#endif
+
+/*
+** Versions of isspace(), isalnum() and isdigit() to which it is safe
+** to pass signed char values.
+*/
+#ifdef sqlite3Isdigit
+ /* Use the SQLite core versions if this routine is part of the
+ ** SQLite amalgamation */
+# define safe_isdigit(x) sqlite3Isdigit(x)
+# define safe_isalnum(x) sqlite3Isalnum(x)
+# define safe_isxdigit(x) sqlite3Isxdigit(x)
+#else
+ /* Use the standard library for separate compilation */
+#include /* amalgamator: keep */
+# define safe_isdigit(x) isdigit((unsigned char)(x))
+# define safe_isalnum(x) isalnum((unsigned char)(x))
+# define safe_isxdigit(x) isxdigit((unsigned char)(x))
+#endif
+
+/*
+** Growing our own isspace() routine this way is twice as fast as
+** the library isspace() function, resulting in a 7% overall performance
+** increase for the parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os).
+*/
+static const char jsonIsSpace[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+#define safe_isspace(x) (jsonIsSpace[(unsigned char)x])
+
+#ifndef SQLITE_AMALGAMATION
+ /* Unsigned integer types. These are already defined in the sqliteInt.h,
+ ** but the definitions need to be repeated for separate compilation. */
+ typedef sqlite3_uint64 u64;
+ typedef unsigned int u32;
+ typedef unsigned short int u16;
+ typedef unsigned char u8;
+#endif
+
+/* Objects */
+typedef struct JsonString JsonString;
+typedef struct JsonNode JsonNode;
+typedef struct JsonParse JsonParse;
+
+/* An instance of this object represents a JSON string
+** under construction. Really, this is a generic string accumulator
+** that can be and is used to create strings other than JSON.
+*/
+struct JsonString {
+ sqlite3_context *pCtx; /* Function context - put error messages here */
+ char *zBuf; /* Append JSON content here */
+ u64 nAlloc; /* Bytes of storage available in zBuf[] */
+ u64 nUsed; /* Bytes of zBuf[] currently used */
+ u8 bStatic; /* True if zBuf is static space */
+ u8 bErr; /* True if an error has been encountered */
+ char zSpace[100]; /* Initial static space */
+};
+
+/* JSON type values
+*/
+#define JSON_NULL 0
+#define JSON_TRUE 1
+#define JSON_FALSE 2
+#define JSON_INT 3
+#define JSON_REAL 4
+#define JSON_STRING 5
+#define JSON_ARRAY 6
+#define JSON_OBJECT 7
+
+/* The "subtype" set for JSON values */
+#define JSON_SUBTYPE 74 /* Ascii for "J" */
+
+/*
+** Names of the various JSON types:
+*/
+static const char * const jsonType[] = {
+ "null", "true", "false", "integer", "real", "text", "array", "object"
+};
+
+/* Bit values for the JsonNode.jnFlag field
+*/
+#define JNODE_RAW 0x01 /* Content is raw, not JSON encoded */
+#define JNODE_ESCAPE 0x02 /* Content is text with \ escapes */
+#define JNODE_REMOVE 0x04 /* Do not output */
+#define JNODE_REPLACE 0x08 /* Replace with JsonNode.u.iReplace */
+#define JNODE_PATCH 0x10 /* Patch with JsonNode.u.pPatch */
+#define JNODE_APPEND 0x20 /* More ARRAY/OBJECT entries at u.iAppend */
+#define JNODE_LABEL 0x40 /* Is a label of an object */
+
+
+/* A single node of parsed JSON
+*/
+struct JsonNode {
+ u8 eType; /* One of the JSON_ type values */
+ u8 jnFlags; /* JNODE flags */
+ u32 n; /* Bytes of content, or number of sub-nodes */
+ union {
+ const char *zJContent; /* Content for INT, REAL, and STRING */
+ u32 iAppend; /* More terms for ARRAY and OBJECT */
+ u32 iKey; /* Key for ARRAY objects in json_tree() */
+ u32 iReplace; /* Replacement content for JNODE_REPLACE */
+ JsonNode *pPatch; /* Node chain of patch for JNODE_PATCH */
+ } u;
+};
+
+/* A completely parsed JSON string
+*/
+struct JsonParse {
+ u32 nNode; /* Number of slots of aNode[] used */
+ u32 nAlloc; /* Number of slots of aNode[] allocated */
+ JsonNode *aNode; /* Array of nodes containing the parse */
+ const char *zJson; /* Original JSON string */
+ u32 *aUp; /* Index of parent of each node */
+ u8 oom; /* Set to true if out of memory */
+ u8 nErr; /* Number of errors seen */
+ u16 iDepth; /* Nesting depth */
+ int nJson; /* Length of the zJson string in bytes */
+};
+
+/*
+** Maximum nesting depth of JSON for this implementation.
+**
+** This limit is needed to avoid a stack overflow in the recursive
+** descent parser. A depth of 2000 is far deeper than any sane JSON
+** should go.
+*/
+#define JSON_MAX_DEPTH 2000
+
+/**************************************************************************
+** Utility routines for dealing with JsonString objects
+**************************************************************************/
+
+/* Set the JsonString object to an empty string
+*/
+static void jsonZero(JsonString *p){
+ p->zBuf = p->zSpace;
+ p->nAlloc = sizeof(p->zSpace);
+ p->nUsed = 0;
+ p->bStatic = 1;
+}
+
+/* Initialize the JsonString object
+*/
+static void jsonInit(JsonString *p, sqlite3_context *pCtx){
+ p->pCtx = pCtx;
+ p->bErr = 0;
+ jsonZero(p);
+}
+
+
+/* Free all allocated memory and reset the JsonString object back to its
+** initial state.
+*/
+static void jsonReset(JsonString *p){
+ if( !p->bStatic ) sqlite3_free(p->zBuf);
+ jsonZero(p);
+}
+
+
+/* Report an out-of-memory (OOM) condition
+*/
+static void jsonOom(JsonString *p){
+ p->bErr = 1;
+ sqlite3_result_error_nomem(p->pCtx);
+ jsonReset(p);
+}
+
+/* Enlarge pJson->zBuf so that it can hold at least N more bytes.
+** Return zero on success. Return non-zero on an OOM error
+*/
+static int jsonGrow(JsonString *p, u32 N){
+ u64 nTotal = NnAlloc ? p->nAlloc*2 : p->nAlloc+N+10;
+ char *zNew;
+ if( p->bStatic ){
+ if( p->bErr ) return 1;
+ zNew = sqlite3_malloc64(nTotal);
+ if( zNew==0 ){
+ jsonOom(p);
+ return SQLITE_NOMEM;
+ }
+ memcpy(zNew, p->zBuf, (size_t)p->nUsed);
+ p->zBuf = zNew;
+ p->bStatic = 0;
+ }else{
+ zNew = sqlite3_realloc64(p->zBuf, nTotal);
+ if( zNew==0 ){
+ jsonOom(p);
+ return SQLITE_NOMEM;
+ }
+ p->zBuf = zNew;
+ }
+ p->nAlloc = nTotal;
+ return SQLITE_OK;
+}
+
+/* Append N bytes from zIn onto the end of the JsonString string.
+*/
+static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){
+ if( (N+p->nUsed >= p->nAlloc) && jsonGrow(p,N)!=0 ) return;
+ memcpy(p->zBuf+p->nUsed, zIn, N);
+ p->nUsed += N;
+}
+
+/* Append formatted text (not to exceed N bytes) to the JsonString.
+*/
+static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){
+ va_list ap;
+ if( (p->nUsed + N >= p->nAlloc) && jsonGrow(p, N) ) return;
+ va_start(ap, zFormat);
+ sqlite3_vsnprintf(N, p->zBuf+p->nUsed, zFormat, ap);
+ va_end(ap);
+ p->nUsed += (int)strlen(p->zBuf+p->nUsed);
+}
+
+/* Append a single character
+*/
+static void jsonAppendChar(JsonString *p, char c){
+ if( p->nUsed>=p->nAlloc && jsonGrow(p,1)!=0 ) return;
+ p->zBuf[p->nUsed++] = c;
+}
+
+/* Append a comma separator to the output buffer, if the previous
+** character is not '[' or '{'.
+*/
+static void jsonAppendSeparator(JsonString *p){
+ char c;
+ if( p->nUsed==0 ) return;
+ c = p->zBuf[p->nUsed-1];
+ if( c!='[' && c!='{' ) jsonAppendChar(p, ',');
+}
+
+/* Append the N-byte string in zIn to the end of the JsonString string
+** under construction. Enclose the string in "..." and escape
+** any double-quotes or backslash characters contained within the
+** string.
+*/
+static void jsonAppendString(JsonString *p, const char *zIn, u32 N){
+ u32 i;
+ if( (N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0 ) return;
+ p->zBuf[p->nUsed++] = '"';
+ for(i=0; inUsed+N+3-i > p->nAlloc) && jsonGrow(p,N+3-i)!=0 ) return;
+ p->zBuf[p->nUsed++] = '\\';
+ }else if( c<=0x1f ){
+ static const char aSpecial[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ assert( sizeof(aSpecial)==32 );
+ assert( aSpecial['\b']=='b' );
+ assert( aSpecial['\f']=='f' );
+ assert( aSpecial['\n']=='n' );
+ assert( aSpecial['\r']=='r' );
+ assert( aSpecial['\t']=='t' );
+ if( aSpecial[c] ){
+ c = aSpecial[c];
+ goto json_simple_escape;
+ }
+ if( (p->nUsed+N+7+i > p->nAlloc) && jsonGrow(p,N+7-i)!=0 ) return;
+ p->zBuf[p->nUsed++] = '\\';
+ p->zBuf[p->nUsed++] = 'u';
+ p->zBuf[p->nUsed++] = '0';
+ p->zBuf[p->nUsed++] = '0';
+ p->zBuf[p->nUsed++] = '0' + (c>>4);
+ c = "0123456789abcdef"[c&0xf];
+ }
+ p->zBuf[p->nUsed++] = c;
+ }
+ p->zBuf[p->nUsed++] = '"';
+ assert( p->nUsednAlloc );
+}
+
+/*
+** Append a function parameter value to the JSON string under
+** construction.
+*/
+static void jsonAppendValue(
+ JsonString *p, /* Append to this JSON string */
+ sqlite3_value *pValue /* Value to append */
+){
+ switch( sqlite3_value_type(pValue) ){
+ case SQLITE_NULL: {
+ jsonAppendRaw(p, "null", 4);
+ break;
+ }
+ case SQLITE_INTEGER:
+ case SQLITE_FLOAT: {
+ const char *z = (const char*)sqlite3_value_text(pValue);
+ u32 n = (u32)sqlite3_value_bytes(pValue);
+ jsonAppendRaw(p, z, n);
+ break;
+ }
+ case SQLITE_TEXT: {
+ const char *z = (const char*)sqlite3_value_text(pValue);
+ u32 n = (u32)sqlite3_value_bytes(pValue);
+ if( sqlite3_value_subtype(pValue)==JSON_SUBTYPE ){
+ jsonAppendRaw(p, z, n);
+ }else{
+ jsonAppendString(p, z, n);
+ }
+ break;
+ }
+ default: {
+ if( p->bErr==0 ){
+ sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1);
+ p->bErr = 2;
+ jsonReset(p);
+ }
+ break;
+ }
+ }
+}
+
+
+/* Make the JSON in p the result of the SQL function.
+*/
+static void jsonResult(JsonString *p){
+ if( p->bErr==0 ){
+ sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed,
+ p->bStatic ? SQLITE_TRANSIENT : sqlite3_free,
+ SQLITE_UTF8);
+ jsonZero(p);
+ }
+ assert( p->bStatic );
+}
+
+/**************************************************************************
+** Utility routines for dealing with JsonNode and JsonParse objects
+**************************************************************************/
+
+/*
+** Return the number of consecutive JsonNode slots need to represent
+** the parsed JSON at pNode. The minimum answer is 1. For ARRAY and
+** OBJECT types, the number might be larger.
+**
+** Appended elements are not counted. The value returned is the number
+** by which the JsonNode counter should increment in order to go to the
+** next peer value.
+*/
+static u32 jsonNodeSize(JsonNode *pNode){
+ return pNode->eType>=JSON_ARRAY ? pNode->n+1 : 1;
+}
+
+/*
+** Reclaim all memory allocated by a JsonParse object. But do not
+** delete the JsonParse object itself.
+*/
+static void jsonParseReset(JsonParse *pParse){
+ sqlite3_free(pParse->aNode);
+ pParse->aNode = 0;
+ pParse->nNode = 0;
+ pParse->nAlloc = 0;
+ sqlite3_free(pParse->aUp);
+ pParse->aUp = 0;
+}
+
+/*
+** Free a JsonParse object that was obtained from sqlite3_malloc().
+*/
+static void jsonParseFree(JsonParse *pParse){
+ jsonParseReset(pParse);
+ sqlite3_free(pParse);
+}
+
+/*
+** Convert the JsonNode pNode into a pure JSON string and
+** append to pOut. Subsubstructure is also included. Return
+** the number of JsonNode objects that are encoded.
+*/
+static void jsonRenderNode(
+ JsonNode *pNode, /* The node to render */
+ JsonString *pOut, /* Write JSON here */
+ sqlite3_value **aReplace /* Replacement values */
+){
+ if( pNode->jnFlags & (JNODE_REPLACE|JNODE_PATCH) ){
+ if( pNode->jnFlags & JNODE_REPLACE ){
+ jsonAppendValue(pOut, aReplace[pNode->u.iReplace]);
+ return;
+ }
+ pNode = pNode->u.pPatch;
+ }
+ switch( pNode->eType ){
+ default: {
+ assert( pNode->eType==JSON_NULL );
+ jsonAppendRaw(pOut, "null", 4);
+ break;
+ }
+ case JSON_TRUE: {
+ jsonAppendRaw(pOut, "true", 4);
+ break;
+ }
+ case JSON_FALSE: {
+ jsonAppendRaw(pOut, "false", 5);
+ break;
+ }
+ case JSON_STRING: {
+ if( pNode->jnFlags & JNODE_RAW ){
+ jsonAppendString(pOut, pNode->u.zJContent, pNode->n);
+ break;
+ }
+ /* Fall through into the next case */
+ }
+ case JSON_REAL:
+ case JSON_INT: {
+ jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n);
+ break;
+ }
+ case JSON_ARRAY: {
+ u32 j = 1;
+ jsonAppendChar(pOut, '[');
+ for(;;){
+ while( j<=pNode->n ){
+ if( (pNode[j].jnFlags & JNODE_REMOVE)==0 ){
+ jsonAppendSeparator(pOut);
+ jsonRenderNode(&pNode[j], pOut, aReplace);
+ }
+ j += jsonNodeSize(&pNode[j]);
+ }
+ if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
+ pNode = &pNode[pNode->u.iAppend];
+ j = 1;
+ }
+ jsonAppendChar(pOut, ']');
+ break;
+ }
+ case JSON_OBJECT: {
+ u32 j = 1;
+ jsonAppendChar(pOut, '{');
+ for(;;){
+ while( j<=pNode->n ){
+ if( (pNode[j+1].jnFlags & JNODE_REMOVE)==0 ){
+ jsonAppendSeparator(pOut);
+ jsonRenderNode(&pNode[j], pOut, aReplace);
+ jsonAppendChar(pOut, ':');
+ jsonRenderNode(&pNode[j+1], pOut, aReplace);
+ }
+ j += 1 + jsonNodeSize(&pNode[j+1]);
+ }
+ if( (pNode->jnFlags & JNODE_APPEND)==0 ) break;
+ pNode = &pNode[pNode->u.iAppend];
+ j = 1;
+ }
+ jsonAppendChar(pOut, '}');
+ break;
+ }
+ }
+}
+
+/*
+** Return a JsonNode and all its descendents as a JSON string.
+*/
+static void jsonReturnJson(
+ JsonNode *pNode, /* Node to return */
+ sqlite3_context *pCtx, /* Return value for this function */
+ sqlite3_value **aReplace /* Array of replacement values */
+){
+ JsonString s;
+ jsonInit(&s, pCtx);
+ jsonRenderNode(pNode, &s, aReplace);
+ jsonResult(&s);
+ sqlite3_result_subtype(pCtx, JSON_SUBTYPE);
+}
+
+/*
+** Make the JsonNode the return value of the function.
+*/
+static void jsonReturn(
+ JsonNode *pNode, /* Node to return */
+ sqlite3_context *pCtx, /* Return value for this function */
+ sqlite3_value **aReplace /* Array of replacement values */
+){
+ switch( pNode->eType ){
+ default: {
+ assert( pNode->eType==JSON_NULL );
+ sqlite3_result_null(pCtx);
+ break;
+ }
+ case JSON_TRUE: {
+ sqlite3_result_int(pCtx, 1);
+ break;
+ }
+ case JSON_FALSE: {
+ sqlite3_result_int(pCtx, 0);
+ break;
+ }
+ case JSON_INT: {
+ sqlite3_int64 i = 0;
+ const char *z = pNode->u.zJContent;
+ if( z[0]=='-' ){ z++; }
+ while( z[0]>='0' && z[0]<='9' ){
+ unsigned v = *(z++) - '0';
+ if( i>=LARGEST_INT64/10 ){
+ if( i>LARGEST_INT64/10 ) goto int_as_real;
+ if( z[0]>='0' && z[0]<='9' ) goto int_as_real;
+ if( v==9 ) goto int_as_real;
+ if( v==8 ){
+ if( pNode->u.zJContent[0]=='-' ){
+ sqlite3_result_int64(pCtx, SMALLEST_INT64);
+ goto int_done;
+ }else{
+ goto int_as_real;
+ }
+ }
+ }
+ i = i*10 + v;
+ }
+ if( pNode->u.zJContent[0]=='-' ){ i = -i; }
+ sqlite3_result_int64(pCtx, i);
+ int_done:
+ break;
+ int_as_real: /* fall through to real */;
+ }
+ case JSON_REAL: {
+ double r;
+#ifdef SQLITE_AMALGAMATION
+ const char *z = pNode->u.zJContent;
+ sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8);
+#else
+ r = strtod(pNode->u.zJContent, 0);
+#endif
+ sqlite3_result_double(pCtx, r);
+ break;
+ }
+ case JSON_STRING: {
+#if 0 /* Never happens because JNODE_RAW is only set by json_set(),
+ ** json_insert() and json_replace() and those routines do not
+ ** call jsonReturn() */
+ if( pNode->jnFlags & JNODE_RAW ){
+ sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n,
+ SQLITE_TRANSIENT);
+ }else
+#endif
+ assert( (pNode->jnFlags & JNODE_RAW)==0 );
+ if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){
+ /* JSON formatted without any backslash-escapes */
+ sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2,
+ SQLITE_TRANSIENT);
+ }else{
+ /* Translate JSON formatted string into raw text */
+ u32 i;
+ u32 n = pNode->n;
+ const char *z = pNode->u.zJContent;
+ char *zOut;
+ u32 j;
+ zOut = sqlite3_malloc( n+1 );
+ if( zOut==0 ){
+ sqlite3_result_error_nomem(pCtx);
+ break;
+ }
+ for(i=1, j=0; i>6));
+ zOut[j++] = 0x80 | (v&0x3f);
+ }else{
+ zOut[j++] = (char)(0xe0 | (v>>12));
+ zOut[j++] = 0x80 | ((v>>6)&0x3f);
+ zOut[j++] = 0x80 | (v&0x3f);
+ }
+ }else{
+ if( c=='b' ){
+ c = '\b';
+ }else if( c=='f' ){
+ c = '\f';
+ }else if( c=='n' ){
+ c = '\n';
+ }else if( c=='r' ){
+ c = '\r';
+ }else if( c=='t' ){
+ c = '\t';
+ }
+ zOut[j++] = c;
+ }
+ }
+ }
+ zOut[j] = 0;
+ sqlite3_result_text(pCtx, zOut, j, sqlite3_free);
+ }
+ break;
+ }
+ case JSON_ARRAY:
+ case JSON_OBJECT: {
+ jsonReturnJson(pNode, pCtx, aReplace);
+ break;
+ }
+ }
+}
+
+/* Forward reference */
+static int jsonParseAddNode(JsonParse*,u32,u32,const char*);
+
+/*
+** A macro to hint to the compiler that a function should not be
+** inlined.
+*/
+#if defined(__GNUC__)
+# define JSON_NOINLINE __attribute__((noinline))
+#elif defined(_MSC_VER) && _MSC_VER>=1310
+# define JSON_NOINLINE __declspec(noinline)
+#else
+# define JSON_NOINLINE
+#endif
+
+
+static JSON_NOINLINE int jsonParseAddNodeExpand(
+ JsonParse *pParse, /* Append the node to this object */
+ u32 eType, /* Node type */
+ u32 n, /* Content size or sub-node count */
+ const char *zContent /* Content */
+){
+ u32 nNew;
+ JsonNode *pNew;
+ assert( pParse->nNode>=pParse->nAlloc );
+ if( pParse->oom ) return -1;
+ nNew = pParse->nAlloc*2 + 10;
+ pNew = sqlite3_realloc(pParse->aNode, sizeof(JsonNode)*nNew);
+ if( pNew==0 ){
+ pParse->oom = 1;
+ return -1;
+ }
+ pParse->nAlloc = nNew;
+ pParse->aNode = pNew;
+ assert( pParse->nNodenAlloc );
+ return jsonParseAddNode(pParse, eType, n, zContent);
+}
+
+/*
+** Create a new JsonNode instance based on the arguments and append that
+** instance to the JsonParse. Return the index in pParse->aNode[] of the
+** new node, or -1 if a memory allocation fails.
+*/
+static int jsonParseAddNode(
+ JsonParse *pParse, /* Append the node to this object */
+ u32 eType, /* Node type */
+ u32 n, /* Content size or sub-node count */
+ const char *zContent /* Content */
+){
+ JsonNode *p;
+ if( pParse->nNode>=pParse->nAlloc ){
+ return jsonParseAddNodeExpand(pParse, eType, n, zContent);
+ }
+ p = &pParse->aNode[pParse->nNode];
+ p->eType = (u8)eType;
+ p->jnFlags = 0;
+ p->n = n;
+ p->u.zJContent = zContent;
+ return pParse->nNode++;
+}
+
+/*
+** Return true if z[] begins with 4 (or more) hexadecimal digits
+*/
+static int jsonIs4Hex(const char *z){
+ int i;
+ for(i=0; i<4; i++) if( !safe_isxdigit(z[i]) ) return 0;
+ return 1;
+}
+
+/*
+** Parse a single JSON value which begins at pParse->zJson[i]. Return the
+** index of the first character past the end of the value parsed.
+**
+** Return negative for a syntax error. Special cases: return -2 if the
+** first non-whitespace character is '}' and return -3 if the first
+** non-whitespace character is ']'.
+*/
+static int jsonParseValue(JsonParse *pParse, u32 i){
+ char c;
+ u32 j;
+ int iThis;
+ int x;
+ JsonNode *pNode;
+ const char *z = pParse->zJson;
+ while( safe_isspace(z[i]) ){ i++; }
+ if( (c = z[i])=='{' ){
+ /* Parse object */
+ iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
+ if( iThis<0 ) return -1;
+ for(j=i+1;;j++){
+ while( safe_isspace(z[j]) ){ j++; }
+ if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1;
+ x = jsonParseValue(pParse, j);
+ if( x<0 ){
+ pParse->iDepth--;
+ if( x==(-2) && pParse->nNode==(u32)iThis+1 ) return j+1;
+ return -1;
+ }
+ if( pParse->oom ) return -1;
+ pNode = &pParse->aNode[pParse->nNode-1];
+ if( pNode->eType!=JSON_STRING ) return -1;
+ pNode->jnFlags |= JNODE_LABEL;
+ j = x;
+ while( safe_isspace(z[j]) ){ j++; }
+ if( z[j]!=':' ) return -1;
+ j++;
+ x = jsonParseValue(pParse, j);
+ pParse->iDepth--;
+ if( x<0 ) return -1;
+ j = x;
+ while( safe_isspace(z[j]) ){ j++; }
+ c = z[j];
+ if( c==',' ) continue;
+ if( c!='}' ) return -1;
+ break;
+ }
+ pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
+ return j+1;
+ }else if( c=='[' ){
+ /* Parse array */
+ iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
+ if( iThis<0 ) return -1;
+ for(j=i+1;;j++){
+ while( safe_isspace(z[j]) ){ j++; }
+ if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1;
+ x = jsonParseValue(pParse, j);
+ pParse->iDepth--;
+ if( x<0 ){
+ if( x==(-3) && pParse->nNode==(u32)iThis+1 ) return j+1;
+ return -1;
+ }
+ j = x;
+ while( safe_isspace(z[j]) ){ j++; }
+ c = z[j];
+ if( c==',' ) continue;
+ if( c!=']' ) return -1;
+ break;
+ }
+ pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1;
+ return j+1;
+ }else if( c=='"' ){
+ /* Parse string */
+ u8 jnFlags = 0;
+ j = i+1;
+ for(;;){
+ c = z[j];
+ if( (c & ~0x1f)==0 ){
+ /* Control characters are not allowed in strings */
+ return -1;
+ }
+ if( c=='\\' ){
+ c = z[++j];
+ if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f'
+ || c=='n' || c=='r' || c=='t'
+ || (c=='u' && jsonIs4Hex(z+j+1)) ){
+ jnFlags = JNODE_ESCAPE;
+ }else{
+ return -1;
+ }
+ }else if( c=='"' ){
+ break;
+ }
+ j++;
+ }
+ jsonParseAddNode(pParse, JSON_STRING, j+1-i, &z[i]);
+ if( !pParse->oom ) pParse->aNode[pParse->nNode-1].jnFlags = jnFlags;
+ return j+1;
+ }else if( c=='n'
+ && strncmp(z+i,"null",4)==0
+ && !safe_isalnum(z[i+4]) ){
+ jsonParseAddNode(pParse, JSON_NULL, 0, 0);
+ return i+4;
+ }else if( c=='t'
+ && strncmp(z+i,"true",4)==0
+ && !safe_isalnum(z[i+4]) ){
+ jsonParseAddNode(pParse, JSON_TRUE, 0, 0);
+ return i+4;
+ }else if( c=='f'
+ && strncmp(z+i,"false",5)==0
+ && !safe_isalnum(z[i+5]) ){
+ jsonParseAddNode(pParse, JSON_FALSE, 0, 0);
+ return i+5;
+ }else if( c=='-' || (c>='0' && c<='9') ){
+ /* Parse number */
+ u8 seenDP = 0;
+ u8 seenE = 0;
+ assert( '-' < '0' );
+ if( c<='0' ){
+ j = c=='-' ? i+1 : i;
+ if( z[j]=='0' && z[j+1]>='0' && z[j+1]<='9' ) return -1;
+ }
+ j = i+1;
+ for(;; j++){
+ c = z[j];
+ if( c>='0' && c<='9' ) continue;
+ if( c=='.' ){
+ if( z[j-1]=='-' ) return -1;
+ if( seenDP ) return -1;
+ seenDP = 1;
+ continue;
+ }
+ if( c=='e' || c=='E' ){
+ if( z[j-1]<'0' ) return -1;
+ if( seenE ) return -1;
+ seenDP = seenE = 1;
+ c = z[j+1];
+ if( c=='+' || c=='-' ){
+ j++;
+ c = z[j+1];
+ }
+ if( c<'0' || c>'9' ) return -1;
+ continue;
+ }
+ break;
+ }
+ if( z[j-1]<'0' ) return -1;
+ jsonParseAddNode(pParse, seenDP ? JSON_REAL : JSON_INT,
+ j - i, &z[i]);
+ return j;
+ }else if( c=='}' ){
+ return -2; /* End of {...} */
+ }else if( c==']' ){
+ return -3; /* End of [...] */
+ }else if( c==0 ){
+ return 0; /* End of file */
+ }else{
+ return -1; /* Syntax error */
+ }
+}
+
+/*
+** Parse a complete JSON string. Return 0 on success or non-zero if there
+** are any errors. If an error occurs, free all memory associated with
+** pParse.
+**
+** pParse is uninitialized when this routine is called.
+*/
+static int jsonParse(
+ JsonParse *pParse, /* Initialize and fill this JsonParse object */
+ sqlite3_context *pCtx, /* Report errors here */
+ const char *zJson /* Input JSON text to be parsed */
+){
+ int i;
+ memset(pParse, 0, sizeof(*pParse));
+ if( zJson==0 ) return 1;
+ pParse->zJson = zJson;
+ i = jsonParseValue(pParse, 0);
+ if( pParse->oom ) i = -1;
+ if( i>0 ){
+ assert( pParse->iDepth==0 );
+ while( safe_isspace(zJson[i]) ) i++;
+ if( zJson[i] ) i = -1;
+ }
+ if( i<=0 ){
+ if( pCtx!=0 ){
+ if( pParse->oom ){
+ sqlite3_result_error_nomem(pCtx);
+ }else{
+ sqlite3_result_error(pCtx, "malformed JSON", -1);
+ }
+ }
+ jsonParseReset(pParse);
+ return 1;
+ }
+ return 0;
+}
+
+/* Mark node i of pParse as being a child of iParent. Call recursively
+** to fill in all the descendants of node i.
+*/
+static void jsonParseFillInParentage(JsonParse *pParse, u32 i, u32 iParent){
+ JsonNode *pNode = &pParse->aNode[i];
+ u32 j;
+ pParse->aUp[i] = iParent;
+ switch( pNode->eType ){
+ case JSON_ARRAY: {
+ for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j)){
+ jsonParseFillInParentage(pParse, i+j, i);
+ }
+ break;
+ }
+ case JSON_OBJECT: {
+ for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j+1)+1){
+ pParse->aUp[i+j] = i;
+ jsonParseFillInParentage(pParse, i+j+1, i);
+ }
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+}
+
+/*
+** Compute the parentage of all nodes in a completed parse.
+*/
+static int jsonParseFindParents(JsonParse *pParse){
+ u32 *aUp;
+ assert( pParse->aUp==0 );
+ aUp = pParse->aUp = sqlite3_malloc( sizeof(u32)*pParse->nNode );
+ if( aUp==0 ){
+ pParse->oom = 1;
+ return SQLITE_NOMEM;
+ }
+ jsonParseFillInParentage(pParse, 0, 0);
+ return SQLITE_OK;
+}
+
+/*
+** Magic number used for the JSON parse cache in sqlite3_get_auxdata()
+*/
+#define JSON_CACHE_ID (-429938)
+
+/*
+** Obtain a complete parse of the JSON found in the first argument
+** of the argv array. Use the sqlite3_get_auxdata() cache for this
+** parse if it is available. If the cache is not available or if it
+** is no longer valid, parse the JSON again and return the new parse,
+** and also register the new parse so that it will be available for
+** future sqlite3_get_auxdata() calls.
+*/
+static JsonParse *jsonParseCached(
+ sqlite3_context *pCtx,
+ sqlite3_value **argv
+){
+ const char *zJson = (const char*)sqlite3_value_text(argv[0]);
+ int nJson = sqlite3_value_bytes(argv[0]);
+ JsonParse *p;
+ if( zJson==0 ) return 0;
+ p = (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID);
+ if( p && p->nJson==nJson && memcmp(p->zJson,zJson,nJson)==0 ){
+ p->nErr = 0;
+ return p; /* The cached entry matches, so return it */
+ }
+ p = sqlite3_malloc( sizeof(*p) + nJson + 1 );
+ if( p==0 ){
+ sqlite3_result_error_nomem(pCtx);
+ return 0;
+ }
+ memset(p, 0, sizeof(*p));
+ p->zJson = (char*)&p[1];
+ memcpy((char*)p->zJson, zJson, nJson+1);
+ if( jsonParse(p, pCtx, p->zJson) ){
+ sqlite3_free(p);
+ return 0;
+ }
+ p->nJson = nJson;
+ sqlite3_set_auxdata(pCtx, JSON_CACHE_ID, p, (void(*)(void*))jsonParseFree);
+ return (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID);
+}
+
+/*
+** Compare the OBJECT label at pNode against zKey,nKey. Return true on
+** a match.
+*/
+static int jsonLabelCompare(JsonNode *pNode, const char *zKey, u32 nKey){
+ if( pNode->jnFlags & JNODE_RAW ){
+ if( pNode->n!=nKey ) return 0;
+ return strncmp(pNode->u.zJContent, zKey, nKey)==0;
+ }else{
+ if( pNode->n!=nKey+2 ) return 0;
+ return strncmp(pNode->u.zJContent+1, zKey, nKey)==0;
+ }
+}
+
+/* forward declaration */
+static JsonNode *jsonLookupAppend(JsonParse*,const char*,int*,const char**);
+
+/*
+** Search along zPath to find the node specified. Return a pointer
+** to that node, or NULL if zPath is malformed or if there is no such
+** node.
+**
+** If pApnd!=0, then try to append new nodes to complete zPath if it is
+** possible to do so and if no existing node corresponds to zPath. If
+** new nodes are appended *pApnd is set to 1.
+*/
+static JsonNode *jsonLookupStep(
+ JsonParse *pParse, /* The JSON to search */
+ u32 iRoot, /* Begin the search at this node */
+ const char *zPath, /* The path to search */
+ int *pApnd, /* Append nodes to complete path if not NULL */
+ const char **pzErr /* Make *pzErr point to any syntax error in zPath */
+){
+ u32 i, j, nKey;
+ const char *zKey;
+ JsonNode *pRoot = &pParse->aNode[iRoot];
+ if( zPath[0]==0 ) return pRoot;
+ if( zPath[0]=='.' ){
+ if( pRoot->eType!=JSON_OBJECT ) return 0;
+ zPath++;
+ if( zPath[0]=='"' ){
+ zKey = zPath + 1;
+ for(i=1; zPath[i] && zPath[i]!='"'; i++){}
+ nKey = i-1;
+ if( zPath[i] ){
+ i++;
+ }else{
+ *pzErr = zPath;
+ return 0;
+ }
+ }else{
+ zKey = zPath;
+ for(i=0; zPath[i] && zPath[i]!='.' && zPath[i]!='['; i++){}
+ nKey = i;
+ }
+ if( nKey==0 ){
+ *pzErr = zPath;
+ return 0;
+ }
+ j = 1;
+ for(;;){
+ while( j<=pRoot->n ){
+ if( jsonLabelCompare(pRoot+j, zKey, nKey) ){
+ return jsonLookupStep(pParse, iRoot+j+1, &zPath[i], pApnd, pzErr);
+ }
+ j++;
+ j += jsonNodeSize(&pRoot[j]);
+ }
+ if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
+ iRoot += pRoot->u.iAppend;
+ pRoot = &pParse->aNode[iRoot];
+ j = 1;
+ }
+ if( pApnd ){
+ u32 iStart, iLabel;
+ JsonNode *pNode;
+ iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0);
+ iLabel = jsonParseAddNode(pParse, JSON_STRING, i, zPath);
+ zPath += i;
+ pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
+ if( pParse->oom ) return 0;
+ if( pNode ){
+ pRoot = &pParse->aNode[iRoot];
+ pRoot->u.iAppend = iStart - iRoot;
+ pRoot->jnFlags |= JNODE_APPEND;
+ pParse->aNode[iLabel].jnFlags |= JNODE_RAW;
+ }
+ return pNode;
+ }
+ }else if( zPath[0]=='[' && safe_isdigit(zPath[1]) ){
+ if( pRoot->eType!=JSON_ARRAY ) return 0;
+ i = 0;
+ j = 1;
+ while( safe_isdigit(zPath[j]) ){
+ i = i*10 + zPath[j] - '0';
+ j++;
+ }
+ if( zPath[j]!=']' ){
+ *pzErr = zPath;
+ return 0;
+ }
+ zPath += j + 1;
+ j = 1;
+ for(;;){
+ while( j<=pRoot->n && (i>0 || (pRoot[j].jnFlags & JNODE_REMOVE)!=0) ){
+ if( (pRoot[j].jnFlags & JNODE_REMOVE)==0 ) i--;
+ j += jsonNodeSize(&pRoot[j]);
+ }
+ if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break;
+ iRoot += pRoot->u.iAppend;
+ pRoot = &pParse->aNode[iRoot];
+ j = 1;
+ }
+ if( j<=pRoot->n ){
+ return jsonLookupStep(pParse, iRoot+j, zPath, pApnd, pzErr);
+ }
+ if( i==0 && pApnd ){
+ u32 iStart;
+ JsonNode *pNode;
+ iStart = jsonParseAddNode(pParse, JSON_ARRAY, 1, 0);
+ pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr);
+ if( pParse->oom ) return 0;
+ if( pNode ){
+ pRoot = &pParse->aNode[iRoot];
+ pRoot->u.iAppend = iStart - iRoot;
+ pRoot->jnFlags |= JNODE_APPEND;
+ }
+ return pNode;
+ }
+ }else{
+ *pzErr = zPath;
+ }
+ return 0;
+}
+
+/*
+** Append content to pParse that will complete zPath. Return a pointer
+** to the inserted node, or return NULL if the append fails.
+*/
+static JsonNode *jsonLookupAppend(
+ JsonParse *pParse, /* Append content to the JSON parse */
+ const char *zPath, /* Description of content to append */
+ int *pApnd, /* Set this flag to 1 */
+ const char **pzErr /* Make this point to any syntax error */
+){
+ *pApnd = 1;
+ if( zPath[0]==0 ){
+ jsonParseAddNode(pParse, JSON_NULL, 0, 0);
+ return pParse->oom ? 0 : &pParse->aNode[pParse->nNode-1];
+ }
+ if( zPath[0]=='.' ){
+ jsonParseAddNode(pParse, JSON_OBJECT, 0, 0);
+ }else if( strncmp(zPath,"[0]",3)==0 ){
+ jsonParseAddNode(pParse, JSON_ARRAY, 0, 0);
+ }else{
+ return 0;
+ }
+ if( pParse->oom ) return 0;
+ return jsonLookupStep(pParse, pParse->nNode-1, zPath, pApnd, pzErr);
+}
+
+/*
+** Return the text of a syntax error message on a JSON path. Space is
+** obtained from sqlite3_malloc().
+*/
+static char *jsonPathSyntaxError(const char *zErr){
+ return sqlite3_mprintf("JSON path error near '%q'", zErr);
+}
+
+/*
+** Do a node lookup using zPath. Return a pointer to the node on success.
+** Return NULL if not found or if there is an error.
+**
+** On an error, write an error message into pCtx and increment the
+** pParse->nErr counter.
+**
+** If pApnd!=NULL then try to append missing nodes and set *pApnd = 1 if
+** nodes are appended.
+*/
+static JsonNode *jsonLookup(
+ JsonParse *pParse, /* The JSON to search */
+ const char *zPath, /* The path to search */
+ int *pApnd, /* Append nodes to complete path if not NULL */
+ sqlite3_context *pCtx /* Report errors here, if not NULL */
+){
+ const char *zErr = 0;
+ JsonNode *pNode = 0;
+ char *zMsg;
+
+ if( zPath==0 ) return 0;
+ if( zPath[0]!='$' ){
+ zErr = zPath;
+ goto lookup_err;
+ }
+ zPath++;
+ pNode = jsonLookupStep(pParse, 0, zPath, pApnd, &zErr);
+ if( zErr==0 ) return pNode;
+
+lookup_err:
+ pParse->nErr++;
+ assert( zErr!=0 && pCtx!=0 );
+ zMsg = jsonPathSyntaxError(zErr);
+ if( zMsg ){
+ sqlite3_result_error(pCtx, zMsg, -1);
+ sqlite3_free(zMsg);
+ }else{
+ sqlite3_result_error_nomem(pCtx);
+ }
+ return 0;
+}
+
+
+/*
+** Report the wrong number of arguments for json_insert(), json_replace()
+** or json_set().
+*/
+static void jsonWrongNumArgs(
+ sqlite3_context *pCtx,
+ const char *zFuncName
+){
+ char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments",
+ zFuncName);
+ sqlite3_result_error(pCtx, zMsg, -1);
+ sqlite3_free(zMsg);
+}
+
+/*
+** Mark all NULL entries in the Object passed in as JNODE_REMOVE.
+*/
+static void jsonRemoveAllNulls(JsonNode *pNode){
+ int i, n;
+ assert( pNode->eType==JSON_OBJECT );
+ n = pNode->n;
+ for(i=2; i<=n; i += jsonNodeSize(&pNode[i])+1){
+ switch( pNode[i].eType ){
+ case JSON_NULL:
+ pNode[i].jnFlags |= JNODE_REMOVE;
+ break;
+ case JSON_OBJECT:
+ jsonRemoveAllNulls(&pNode[i]);
+ break;
+ }
+ }
+}
+
+
+/****************************************************************************
+** SQL functions used for testing and debugging
+****************************************************************************/
+
+#ifdef SQLITE_DEBUG
+/*
+** The json_parse(JSON) function returns a string which describes
+** a parse of the JSON provided. Or it returns NULL if JSON is not
+** well-formed.
+*/
+static void jsonParseFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonString s; /* Output string - not real JSON */
+ JsonParse x; /* The parse */
+ u32 i;
+
+ assert( argc==1 );
+ if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
+ jsonParseFindParents(&x);
+ jsonInit(&s, ctx);
+ for(i=0; inNode );
+ if( argc==2 ){
+ const char *zPath = (const char*)sqlite3_value_text(argv[1]);
+ pNode = jsonLookup(p, zPath, 0, ctx);
+ }else{
+ pNode = p->aNode;
+ }
+ if( pNode==0 ){
+ return;
+ }
+ if( pNode->eType==JSON_ARRAY ){
+ assert( (pNode->jnFlags & JNODE_APPEND)==0 );
+ for(i=1; i<=pNode->n; n++){
+ i += jsonNodeSize(&pNode[i]);
+ }
+ }
+ sqlite3_result_int64(ctx, n);
+}
+
+/*
+** json_extract(JSON, PATH, ...)
+**
+** Return the element described by PATH. Return NULL if there is no
+** PATH element. If there are multiple PATHs, then return a JSON array
+** with the result from each path. Throw an error if the JSON or any PATH
+** is malformed.
+*/
+static void jsonExtractFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse *p; /* The parse */
+ JsonNode *pNode;
+ const char *zPath;
+ JsonString jx;
+ int i;
+
+ if( argc<2 ) return;
+ p = jsonParseCached(ctx, argv);
+ if( p==0 ) return;
+ jsonInit(&jx, ctx);
+ jsonAppendChar(&jx, '[');
+ for(i=1; inErr ) break;
+ if( argc>2 ){
+ jsonAppendSeparator(&jx);
+ if( pNode ){
+ jsonRenderNode(pNode, &jx, 0);
+ }else{
+ jsonAppendRaw(&jx, "null", 4);
+ }
+ }else if( pNode ){
+ jsonReturn(pNode, ctx, 0);
+ }
+ }
+ if( argc>2 && i==argc ){
+ jsonAppendChar(&jx, ']');
+ jsonResult(&jx);
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+ }
+ jsonReset(&jx);
+}
+
+/* This is the RFC 7396 MergePatch algorithm.
+*/
+static JsonNode *jsonMergePatch(
+ JsonParse *pParse, /* The JSON parser that contains the TARGET */
+ u32 iTarget, /* Node of the TARGET in pParse */
+ JsonNode *pPatch /* The PATCH */
+){
+ u32 i, j;
+ u32 iRoot;
+ JsonNode *pTarget;
+ if( pPatch->eType!=JSON_OBJECT ){
+ return pPatch;
+ }
+ assert( iTarget>=0 && iTargetnNode );
+ pTarget = &pParse->aNode[iTarget];
+ assert( (pPatch->jnFlags & JNODE_APPEND)==0 );
+ if( pTarget->eType!=JSON_OBJECT ){
+ jsonRemoveAllNulls(pPatch);
+ return pPatch;
+ }
+ iRoot = iTarget;
+ for(i=1; in; i += jsonNodeSize(&pPatch[i+1])+1){
+ u32 nKey;
+ const char *zKey;
+ assert( pPatch[i].eType==JSON_STRING );
+ assert( pPatch[i].jnFlags & JNODE_LABEL );
+ nKey = pPatch[i].n;
+ zKey = pPatch[i].u.zJContent;
+ assert( (pPatch[i].jnFlags & JNODE_RAW)==0 );
+ for(j=1; jn; j += jsonNodeSize(&pTarget[j+1])+1 ){
+ assert( pTarget[j].eType==JSON_STRING );
+ assert( pTarget[j].jnFlags & JNODE_LABEL );
+ assert( (pPatch[i].jnFlags & JNODE_RAW)==0 );
+ if( pTarget[j].n==nKey && strncmp(pTarget[j].u.zJContent,zKey,nKey)==0 ){
+ if( pTarget[j+1].jnFlags & (JNODE_REMOVE|JNODE_PATCH) ) break;
+ if( pPatch[i+1].eType==JSON_NULL ){
+ pTarget[j+1].jnFlags |= JNODE_REMOVE;
+ }else{
+ JsonNode *pNew = jsonMergePatch(pParse, iTarget+j+1, &pPatch[i+1]);
+ if( pNew==0 ) return 0;
+ pTarget = &pParse->aNode[iTarget];
+ if( pNew!=&pTarget[j+1] ){
+ pTarget[j+1].u.pPatch = pNew;
+ pTarget[j+1].jnFlags |= JNODE_PATCH;
+ }
+ }
+ break;
+ }
+ }
+ if( j>=pTarget->n && pPatch[i+1].eType!=JSON_NULL ){
+ int iStart, iPatch;
+ iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0);
+ jsonParseAddNode(pParse, JSON_STRING, nKey, zKey);
+ iPatch = jsonParseAddNode(pParse, JSON_TRUE, 0, 0);
+ if( pParse->oom ) return 0;
+ jsonRemoveAllNulls(pPatch);
+ pTarget = &pParse->aNode[iTarget];
+ pParse->aNode[iRoot].jnFlags |= JNODE_APPEND;
+ pParse->aNode[iRoot].u.iAppend = iStart - iRoot;
+ iRoot = iStart;
+ pParse->aNode[iPatch].jnFlags |= JNODE_PATCH;
+ pParse->aNode[iPatch].u.pPatch = &pPatch[i+1];
+ }
+ }
+ return pTarget;
+}
+
+/*
+** Implementation of the json_mergepatch(JSON1,JSON2) function. Return a JSON
+** object that is the result of running the RFC 7396 MergePatch() algorithm
+** on the two arguments.
+*/
+static void jsonPatchFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse x; /* The JSON that is being patched */
+ JsonParse y; /* The patch */
+ JsonNode *pResult; /* The result of the merge */
+
+ UNUSED_PARAM(argc);
+ if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
+ if( jsonParse(&y, ctx, (const char*)sqlite3_value_text(argv[1])) ){
+ jsonParseReset(&x);
+ return;
+ }
+ pResult = jsonMergePatch(&x, 0, y.aNode);
+ assert( pResult!=0 || x.oom );
+ if( pResult ){
+ jsonReturnJson(pResult, ctx, 0);
+ }else{
+ sqlite3_result_error_nomem(ctx);
+ }
+ jsonParseReset(&x);
+ jsonParseReset(&y);
+}
+
+
+/*
+** Implementation of the json_object(NAME,VALUE,...) function. Return a JSON
+** object that contains all name/value given in arguments. Or if any name
+** is not a string or if any value is a BLOB, throw an error.
+*/
+static void jsonObjectFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ int i;
+ JsonString jx;
+ const char *z;
+ u32 n;
+
+ if( argc&1 ){
+ sqlite3_result_error(ctx, "json_object() requires an even number "
+ "of arguments", -1);
+ return;
+ }
+ jsonInit(&jx, ctx);
+ jsonAppendChar(&jx, '{');
+ for(i=0; ijnFlags |= JNODE_REMOVE;
+ }
+ if( (x.aNode[0].jnFlags & JNODE_REMOVE)==0 ){
+ jsonReturnJson(x.aNode, ctx, 0);
+ }
+remove_done:
+ jsonParseReset(&x);
+}
+
+/*
+** json_replace(JSON, PATH, VALUE, ...)
+**
+** Replace the value at PATH with VALUE. If PATH does not already exist,
+** this routine is a no-op. If JSON or PATH is malformed, throw an error.
+*/
+static void jsonReplaceFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse x; /* The parse */
+ JsonNode *pNode;
+ const char *zPath;
+ u32 i;
+
+ if( argc<1 ) return;
+ if( (argc&1)==0 ) {
+ jsonWrongNumArgs(ctx, "replace");
+ return;
+ }
+ if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
+ assert( x.nNode );
+ for(i=1; i<(u32)argc; i+=2){
+ zPath = (const char*)sqlite3_value_text(argv[i]);
+ pNode = jsonLookup(&x, zPath, 0, ctx);
+ if( x.nErr ) goto replace_err;
+ if( pNode ){
+ pNode->jnFlags |= (u8)JNODE_REPLACE;
+ pNode->u.iReplace = i + 1;
+ }
+ }
+ if( x.aNode[0].jnFlags & JNODE_REPLACE ){
+ sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]);
+ }else{
+ jsonReturnJson(x.aNode, ctx, argv);
+ }
+replace_err:
+ jsonParseReset(&x);
+}
+
+/*
+** json_set(JSON, PATH, VALUE, ...)
+**
+** Set the value at PATH to VALUE. Create the PATH if it does not already
+** exist. Overwrite existing values that do exist.
+** If JSON or PATH is malformed, throw an error.
+**
+** json_insert(JSON, PATH, VALUE, ...)
+**
+** Create PATH and initialize it to VALUE. If PATH already exists, this
+** routine is a no-op. If JSON or PATH is malformed, throw an error.
+*/
+static void jsonSetFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse x; /* The parse */
+ JsonNode *pNode;
+ const char *zPath;
+ u32 i;
+ int bApnd;
+ int bIsSet = *(int*)sqlite3_user_data(ctx);
+
+ if( argc<1 ) return;
+ if( (argc&1)==0 ) {
+ jsonWrongNumArgs(ctx, bIsSet ? "set" : "insert");
+ return;
+ }
+ if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
+ assert( x.nNode );
+ for(i=1; i<(u32)argc; i+=2){
+ zPath = (const char*)sqlite3_value_text(argv[i]);
+ bApnd = 0;
+ pNode = jsonLookup(&x, zPath, &bApnd, ctx);
+ if( x.oom ){
+ sqlite3_result_error_nomem(ctx);
+ goto jsonSetDone;
+ }else if( x.nErr ){
+ goto jsonSetDone;
+ }else if( pNode && (bApnd || bIsSet) ){
+ pNode->jnFlags |= (u8)JNODE_REPLACE;
+ pNode->u.iReplace = i + 1;
+ }
+ }
+ if( x.aNode[0].jnFlags & JNODE_REPLACE ){
+ sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]);
+ }else{
+ jsonReturnJson(x.aNode, ctx, argv);
+ }
+jsonSetDone:
+ jsonParseReset(&x);
+}
+
+/*
+** json_type(JSON)
+** json_type(JSON, PATH)
+**
+** Return the top-level "type" of a JSON string. Throw an error if
+** either the JSON or PATH inputs are not well-formed.
+*/
+static void jsonTypeFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse x; /* The parse */
+ const char *zPath;
+ JsonNode *pNode;
+
+ if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return;
+ assert( x.nNode );
+ if( argc==2 ){
+ zPath = (const char*)sqlite3_value_text(argv[1]);
+ pNode = jsonLookup(&x, zPath, 0, ctx);
+ }else{
+ pNode = x.aNode;
+ }
+ if( pNode ){
+ sqlite3_result_text(ctx, jsonType[pNode->eType], -1, SQLITE_STATIC);
+ }
+ jsonParseReset(&x);
+}
+
+/*
+** json_valid(JSON)
+**
+** Return 1 if JSON is a well-formed JSON string according to RFC-7159.
+** Return 0 otherwise.
+*/
+static void jsonValidFunc(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonParse x; /* The parse */
+ int rc = 0;
+
+ UNUSED_PARAM(argc);
+ if( jsonParse(&x, 0, (const char*)sqlite3_value_text(argv[0]))==0 ){
+ rc = 1;
+ }
+ jsonParseReset(&x);
+ sqlite3_result_int(ctx, rc);
+}
+
+
+/****************************************************************************
+** Aggregate SQL function implementations
+****************************************************************************/
+/*
+** json_group_array(VALUE)
+**
+** Return a JSON array composed of all values in the aggregate.
+*/
+static void jsonArrayStep(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonString *pStr;
+ UNUSED_PARAM(argc);
+ pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr));
+ if( pStr ){
+ if( pStr->zBuf==0 ){
+ jsonInit(pStr, ctx);
+ jsonAppendChar(pStr, '[');
+ }else{
+ jsonAppendChar(pStr, ',');
+ pStr->pCtx = ctx;
+ }
+ jsonAppendValue(pStr, argv[0]);
+ }
+}
+static void jsonArrayFinal(sqlite3_context *ctx){
+ JsonString *pStr;
+ pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
+ if( pStr ){
+ pStr->pCtx = ctx;
+ jsonAppendChar(pStr, ']');
+ if( pStr->bErr ){
+ if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
+ assert( pStr->bStatic );
+ }else{
+ sqlite3_result_text(ctx, pStr->zBuf, pStr->nUsed,
+ pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free);
+ pStr->bStatic = 1;
+ }
+ }else{
+ sqlite3_result_text(ctx, "[]", 2, SQLITE_STATIC);
+ }
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+}
+
+/*
+** json_group_obj(NAME,VALUE)
+**
+** Return a JSON object composed of all names and values in the aggregate.
+*/
+static void jsonObjectStep(
+ sqlite3_context *ctx,
+ int argc,
+ sqlite3_value **argv
+){
+ JsonString *pStr;
+ const char *z;
+ u32 n;
+ UNUSED_PARAM(argc);
+ pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr));
+ if( pStr ){
+ if( pStr->zBuf==0 ){
+ jsonInit(pStr, ctx);
+ jsonAppendChar(pStr, '{');
+ }else{
+ jsonAppendChar(pStr, ',');
+ pStr->pCtx = ctx;
+ }
+ z = (const char*)sqlite3_value_text(argv[0]);
+ n = (u32)sqlite3_value_bytes(argv[0]);
+ jsonAppendString(pStr, z, n);
+ jsonAppendChar(pStr, ':');
+ jsonAppendValue(pStr, argv[1]);
+ }
+}
+static void jsonObjectFinal(sqlite3_context *ctx){
+ JsonString *pStr;
+ pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0);
+ if( pStr ){
+ jsonAppendChar(pStr, '}');
+ if( pStr->bErr ){
+ if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
+ assert( pStr->bStatic );
+ }else{
+ sqlite3_result_text(ctx, pStr->zBuf, pStr->nUsed,
+ pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free);
+ pStr->bStatic = 1;
+ }
+ }else{
+ sqlite3_result_text(ctx, "{}", 2, SQLITE_STATIC);
+ }
+ sqlite3_result_subtype(ctx, JSON_SUBTYPE);
+}
+
+
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/****************************************************************************
+** The json_each virtual table
+****************************************************************************/
+typedef struct JsonEachCursor JsonEachCursor;
+struct JsonEachCursor {
+ sqlite3_vtab_cursor base; /* Base class - must be first */
+ u32 iRowid; /* The rowid */
+ u32 iBegin; /* The first node of the scan */
+ u32 i; /* Index in sParse.aNode[] of current row */
+ u32 iEnd; /* EOF when i equals or exceeds this value */
+ u8 eType; /* Type of top-level element */
+ u8 bRecursive; /* True for json_tree(). False for json_each() */
+ char *zJson; /* Input JSON */
+ char *zRoot; /* Path by which to filter zJson */
+ JsonParse sParse; /* Parse of the input JSON */
+};
+
+/* Constructor for the json_each virtual table */
+static int jsonEachConnect(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ sqlite3_vtab *pNew;
+ int rc;
+
+/* Column numbers */
+#define JEACH_KEY 0
+#define JEACH_VALUE 1
+#define JEACH_TYPE 2
+#define JEACH_ATOM 3
+#define JEACH_ID 4
+#define JEACH_PARENT 5
+#define JEACH_FULLKEY 6
+#define JEACH_PATH 7
+#define JEACH_JSON 8
+#define JEACH_ROOT 9
+
+ UNUSED_PARAM(pzErr);
+ UNUSED_PARAM(argv);
+ UNUSED_PARAM(argc);
+ UNUSED_PARAM(pAux);
+ rc = sqlite3_declare_vtab(db,
+ "CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path,"
+ "json HIDDEN,root HIDDEN)");
+ if( rc==SQLITE_OK ){
+ pNew = *ppVtab = sqlite3_malloc( sizeof(*pNew) );
+ if( pNew==0 ) return SQLITE_NOMEM;
+ memset(pNew, 0, sizeof(*pNew));
+ }
+ return rc;
+}
+
+/* destructor for json_each virtual table */
+static int jsonEachDisconnect(sqlite3_vtab *pVtab){
+ sqlite3_free(pVtab);
+ return SQLITE_OK;
+}
+
+/* constructor for a JsonEachCursor object for json_each(). */
+static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
+ JsonEachCursor *pCur;
+
+ UNUSED_PARAM(p);
+ pCur = sqlite3_malloc( sizeof(*pCur) );
+ if( pCur==0 ) return SQLITE_NOMEM;
+ memset(pCur, 0, sizeof(*pCur));
+ *ppCursor = &pCur->base;
+ return SQLITE_OK;
+}
+
+/* constructor for a JsonEachCursor object for json_tree(). */
+static int jsonEachOpenTree(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
+ int rc = jsonEachOpenEach(p, ppCursor);
+ if( rc==SQLITE_OK ){
+ JsonEachCursor *pCur = (JsonEachCursor*)*ppCursor;
+ pCur->bRecursive = 1;
+ }
+ return rc;
+}
+
+/* Reset a JsonEachCursor back to its original state. Free any memory
+** held. */
+static void jsonEachCursorReset(JsonEachCursor *p){
+ sqlite3_free(p->zJson);
+ sqlite3_free(p->zRoot);
+ jsonParseReset(&p->sParse);
+ p->iRowid = 0;
+ p->i = 0;
+ p->iEnd = 0;
+ p->eType = 0;
+ p->zJson = 0;
+ p->zRoot = 0;
+}
+
+/* Destructor for a jsonEachCursor object */
+static int jsonEachClose(sqlite3_vtab_cursor *cur){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ jsonEachCursorReset(p);
+ sqlite3_free(cur);
+ return SQLITE_OK;
+}
+
+/* Return TRUE if the jsonEachCursor object has been advanced off the end
+** of the JSON object */
+static int jsonEachEof(sqlite3_vtab_cursor *cur){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ return p->i >= p->iEnd;
+}
+
+/* Advance the cursor to the next element for json_tree() */
+static int jsonEachNext(sqlite3_vtab_cursor *cur){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ if( p->bRecursive ){
+ if( p->sParse.aNode[p->i].jnFlags & JNODE_LABEL ) p->i++;
+ p->i++;
+ p->iRowid++;
+ if( p->iiEnd ){
+ u32 iUp = p->sParse.aUp[p->i];
+ JsonNode *pUp = &p->sParse.aNode[iUp];
+ p->eType = pUp->eType;
+ if( pUp->eType==JSON_ARRAY ){
+ if( iUp==p->i-1 ){
+ pUp->u.iKey = 0;
+ }else{
+ pUp->u.iKey++;
+ }
+ }
+ }
+ }else{
+ switch( p->eType ){
+ case JSON_ARRAY: {
+ p->i += jsonNodeSize(&p->sParse.aNode[p->i]);
+ p->iRowid++;
+ break;
+ }
+ case JSON_OBJECT: {
+ p->i += 1 + jsonNodeSize(&p->sParse.aNode[p->i+1]);
+ p->iRowid++;
+ break;
+ }
+ default: {
+ p->i = p->iEnd;
+ break;
+ }
+ }
+ }
+ return SQLITE_OK;
+}
+
+/* Append the name of the path for element i to pStr
+*/
+static void jsonEachComputePath(
+ JsonEachCursor *p, /* The cursor */
+ JsonString *pStr, /* Write the path here */
+ u32 i /* Path to this element */
+){
+ JsonNode *pNode, *pUp;
+ u32 iUp;
+ if( i==0 ){
+ jsonAppendChar(pStr, '$');
+ return;
+ }
+ iUp = p->sParse.aUp[i];
+ jsonEachComputePath(p, pStr, iUp);
+ pNode = &p->sParse.aNode[i];
+ pUp = &p->sParse.aNode[iUp];
+ if( pUp->eType==JSON_ARRAY ){
+ jsonPrintf(30, pStr, "[%d]", pUp->u.iKey);
+ }else{
+ assert( pUp->eType==JSON_OBJECT );
+ if( (pNode->jnFlags & JNODE_LABEL)==0 ) pNode--;
+ assert( pNode->eType==JSON_STRING );
+ assert( pNode->jnFlags & JNODE_LABEL );
+ jsonPrintf(pNode->n+1, pStr, ".%.*s", pNode->n-2, pNode->u.zJContent+1);
+ }
+}
+
+/* Return the value of a column */
+static int jsonEachColumn(
+ sqlite3_vtab_cursor *cur, /* The cursor */
+ sqlite3_context *ctx, /* First argument to sqlite3_result_...() */
+ int i /* Which column to return */
+){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ JsonNode *pThis = &p->sParse.aNode[p->i];
+ switch( i ){
+ case JEACH_KEY: {
+ if( p->i==0 ) break;
+ if( p->eType==JSON_OBJECT ){
+ jsonReturn(pThis, ctx, 0);
+ }else if( p->eType==JSON_ARRAY ){
+ u32 iKey;
+ if( p->bRecursive ){
+ if( p->iRowid==0 ) break;
+ iKey = p->sParse.aNode[p->sParse.aUp[p->i]].u.iKey;
+ }else{
+ iKey = p->iRowid;
+ }
+ sqlite3_result_int64(ctx, (sqlite3_int64)iKey);
+ }
+ break;
+ }
+ case JEACH_VALUE: {
+ if( pThis->jnFlags & JNODE_LABEL ) pThis++;
+ jsonReturn(pThis, ctx, 0);
+ break;
+ }
+ case JEACH_TYPE: {
+ if( pThis->jnFlags & JNODE_LABEL ) pThis++;
+ sqlite3_result_text(ctx, jsonType[pThis->eType], -1, SQLITE_STATIC);
+ break;
+ }
+ case JEACH_ATOM: {
+ if( pThis->jnFlags & JNODE_LABEL ) pThis++;
+ if( pThis->eType>=JSON_ARRAY ) break;
+ jsonReturn(pThis, ctx, 0);
+ break;
+ }
+ case JEACH_ID: {
+ sqlite3_result_int64(ctx,
+ (sqlite3_int64)p->i + ((pThis->jnFlags & JNODE_LABEL)!=0));
+ break;
+ }
+ case JEACH_PARENT: {
+ if( p->i>p->iBegin && p->bRecursive ){
+ sqlite3_result_int64(ctx, (sqlite3_int64)p->sParse.aUp[p->i]);
+ }
+ break;
+ }
+ case JEACH_FULLKEY: {
+ JsonString x;
+ jsonInit(&x, ctx);
+ if( p->bRecursive ){
+ jsonEachComputePath(p, &x, p->i);
+ }else{
+ if( p->zRoot ){
+ jsonAppendRaw(&x, p->zRoot, (int)strlen(p->zRoot));
+ }else{
+ jsonAppendChar(&x, '$');
+ }
+ if( p->eType==JSON_ARRAY ){
+ jsonPrintf(30, &x, "[%d]", p->iRowid);
+ }else if( p->eType==JSON_OBJECT ){
+ jsonPrintf(pThis->n, &x, ".%.*s", pThis->n-2, pThis->u.zJContent+1);
+ }
+ }
+ jsonResult(&x);
+ break;
+ }
+ case JEACH_PATH: {
+ if( p->bRecursive ){
+ JsonString x;
+ jsonInit(&x, ctx);
+ jsonEachComputePath(p, &x, p->sParse.aUp[p->i]);
+ jsonResult(&x);
+ break;
+ }
+ /* For json_each() path and root are the same so fall through
+ ** into the root case */
+ }
+ default: {
+ const char *zRoot = p->zRoot;
+ if( zRoot==0 ) zRoot = "$";
+ sqlite3_result_text(ctx, zRoot, -1, SQLITE_STATIC);
+ break;
+ }
+ case JEACH_JSON: {
+ assert( i==JEACH_JSON );
+ sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC);
+ break;
+ }
+ }
+ return SQLITE_OK;
+}
+
+/* Return the current rowid value */
+static int jsonEachRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ *pRowid = p->iRowid;
+ return SQLITE_OK;
+}
+
+/* The query strategy is to look for an equality constraint on the json
+** column. Without such a constraint, the table cannot operate. idxNum is
+** 1 if the constraint is found, 3 if the constraint and zRoot are found,
+** and 0 otherwise.
+*/
+static int jsonEachBestIndex(
+ sqlite3_vtab *tab,
+ sqlite3_index_info *pIdxInfo
+){
+ int i;
+ int jsonIdx = -1;
+ int rootIdx = -1;
+ const struct sqlite3_index_constraint *pConstraint;
+
+ UNUSED_PARAM(tab);
+ pConstraint = pIdxInfo->aConstraint;
+ for(i=0; inConstraint; i++, pConstraint++){
+ if( pConstraint->usable==0 ) continue;
+ if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue;
+ switch( pConstraint->iColumn ){
+ case JEACH_JSON: jsonIdx = i; break;
+ case JEACH_ROOT: rootIdx = i; break;
+ default: /* no-op */ break;
+ }
+ }
+ if( jsonIdx<0 ){
+ pIdxInfo->idxNum = 0;
+ pIdxInfo->estimatedCost = 1e99;
+ }else{
+ pIdxInfo->estimatedCost = 1.0;
+ pIdxInfo->aConstraintUsage[jsonIdx].argvIndex = 1;
+ pIdxInfo->aConstraintUsage[jsonIdx].omit = 1;
+ if( rootIdx<0 ){
+ pIdxInfo->idxNum = 1;
+ }else{
+ pIdxInfo->aConstraintUsage[rootIdx].argvIndex = 2;
+ pIdxInfo->aConstraintUsage[rootIdx].omit = 1;
+ pIdxInfo->idxNum = 3;
+ }
+ }
+ return SQLITE_OK;
+}
+
+/* Start a search on a new JSON string */
+static int jsonEachFilter(
+ sqlite3_vtab_cursor *cur,
+ int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv
+){
+ JsonEachCursor *p = (JsonEachCursor*)cur;
+ const char *z;
+ const char *zRoot = 0;
+ sqlite3_int64 n;
+
+ UNUSED_PARAM(idxStr);
+ UNUSED_PARAM(argc);
+ jsonEachCursorReset(p);
+ if( idxNum==0 ) return SQLITE_OK;
+ z = (const char*)sqlite3_value_text(argv[0]);
+ if( z==0 ) return SQLITE_OK;
+ n = sqlite3_value_bytes(argv[0]);
+ p->zJson = sqlite3_malloc64( n+1 );
+ if( p->zJson==0 ) return SQLITE_NOMEM;
+ memcpy(p->zJson, z, (size_t)n+1);
+ if( jsonParse(&p->sParse, 0, p->zJson) ){
+ int rc = SQLITE_NOMEM;
+ if( p->sParse.oom==0 ){
+ sqlite3_free(cur->pVtab->zErrMsg);
+ cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON");
+ if( cur->pVtab->zErrMsg ) rc = SQLITE_ERROR;
+ }
+ jsonEachCursorReset(p);
+ return rc;
+ }else if( p->bRecursive && jsonParseFindParents(&p->sParse) ){
+ jsonEachCursorReset(p);
+ return SQLITE_NOMEM;
+ }else{
+ JsonNode *pNode = 0;
+ if( idxNum==3 ){
+ const char *zErr = 0;
+ zRoot = (const char*)sqlite3_value_text(argv[1]);
+ if( zRoot==0 ) return SQLITE_OK;
+ n = sqlite3_value_bytes(argv[1]);
+ p->zRoot = sqlite3_malloc64( n+1 );
+ if( p->zRoot==0 ) return SQLITE_NOMEM;
+ memcpy(p->zRoot, zRoot, (size_t)n+1);
+ if( zRoot[0]!='$' ){
+ zErr = zRoot;
+ }else{
+ pNode = jsonLookupStep(&p->sParse, 0, p->zRoot+1, 0, &zErr);
+ }
+ if( zErr ){
+ sqlite3_free(cur->pVtab->zErrMsg);
+ cur->pVtab->zErrMsg = jsonPathSyntaxError(zErr);
+ jsonEachCursorReset(p);
+ return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM;
+ }else if( pNode==0 ){
+ return SQLITE_OK;
+ }
+ }else{
+ pNode = p->sParse.aNode;
+ }
+ p->iBegin = p->i = (int)(pNode - p->sParse.aNode);
+ p->eType = pNode->eType;
+ if( p->eType>=JSON_ARRAY ){
+ pNode->u.iKey = 0;
+ p->iEnd = p->i + pNode->n + 1;
+ if( p->bRecursive ){
+ p->eType = p->sParse.aNode[p->sParse.aUp[p->i]].eType;
+ if( p->i>0 && (p->sParse.aNode[p->i-1].jnFlags & JNODE_LABEL)!=0 ){
+ p->i--;
+ }
+ }else{
+ p->i++;
+ }
+ }else{
+ p->iEnd = p->i+1;
+ }
+ }
+ return SQLITE_OK;
+}
+
+/* The methods of the json_each virtual table */
+static sqlite3_module jsonEachModule = {
+ 0, /* iVersion */
+ 0, /* xCreate */
+ jsonEachConnect, /* xConnect */
+ jsonEachBestIndex, /* xBestIndex */
+ jsonEachDisconnect, /* xDisconnect */
+ 0, /* xDestroy */
+ jsonEachOpenEach, /* xOpen - open a cursor */
+ jsonEachClose, /* xClose - close a cursor */
+ jsonEachFilter, /* xFilter - configure scan constraints */
+ jsonEachNext, /* xNext - advance a cursor */
+ jsonEachEof, /* xEof - check for end of scan */
+ jsonEachColumn, /* xColumn - read data */
+ jsonEachRowid, /* xRowid - read data */
+ 0, /* xUpdate */
+ 0, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindMethod */
+ 0, /* xRename */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0 /* xRollbackTo */
+};
+
+/* The methods of the json_tree virtual table. */
+static sqlite3_module jsonTreeModule = {
+ 0, /* iVersion */
+ 0, /* xCreate */
+ jsonEachConnect, /* xConnect */
+ jsonEachBestIndex, /* xBestIndex */
+ jsonEachDisconnect, /* xDisconnect */
+ 0, /* xDestroy */
+ jsonEachOpenTree, /* xOpen - open a cursor */
+ jsonEachClose, /* xClose - close a cursor */
+ jsonEachFilter, /* xFilter - configure scan constraints */
+ jsonEachNext, /* xNext - advance a cursor */
+ jsonEachEof, /* xEof - check for end of scan */
+ jsonEachColumn, /* xColumn - read data */
+ jsonEachRowid, /* xRowid - read data */
+ 0, /* xUpdate */
+ 0, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindMethod */
+ 0, /* xRename */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0 /* xRollbackTo */
+};
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
+
+/****************************************************************************
+** The following routines are the only publically visible identifiers in this
+** file. Call the following routines in order to register the various SQL
+** functions and the virtual table implemented by this file.
+****************************************************************************/
+
+SQLITE_PRIVATE int sqlite3Json1Init(sqlite3 *db){
+ int rc = SQLITE_OK;
+ unsigned int i;
+ static const struct {
+ const char *zName;
+ int nArg;
+ int flag;
+ void (*xFunc)(sqlite3_context*,int,sqlite3_value**);
+ } aFunc[] = {
+ { "json", 1, 0, jsonRemoveFunc },
+ { "json_array", -1, 0, jsonArrayFunc },
+ { "json_array_length", 1, 0, jsonArrayLengthFunc },
+ { "json_array_length", 2, 0, jsonArrayLengthFunc },
+ { "json_extract", -1, 0, jsonExtractFunc },
+ { "json_insert", -1, 0, jsonSetFunc },
+ { "json_object", -1, 0, jsonObjectFunc },
+ { "json_patch", 2, 0, jsonPatchFunc },
+ { "json_quote", 1, 0, jsonQuoteFunc },
+ { "json_remove", -1, 0, jsonRemoveFunc },
+ { "json_replace", -1, 0, jsonReplaceFunc },
+ { "json_set", -1, 1, jsonSetFunc },
+ { "json_type", 1, 0, jsonTypeFunc },
+ { "json_type", 2, 0, jsonTypeFunc },
+ { "json_valid", 1, 0, jsonValidFunc },
+
+#if SQLITE_DEBUG
+ /* DEBUG and TESTING functions */
+ { "json_parse", 1, 0, jsonParseFunc },
+ { "json_test1", 1, 0, jsonTest1Func },
+#endif
+ };
+ static const struct {
+ const char *zName;
+ int nArg;
+ void (*xStep)(sqlite3_context*,int,sqlite3_value**);
+ void (*xFinal)(sqlite3_context*);
+ } aAgg[] = {
+ { "json_group_array", 1, jsonArrayStep, jsonArrayFinal },
+ { "json_group_object", 2, jsonObjectStep, jsonObjectFinal },
+ };
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ static const struct {
+ const char *zName;
+ sqlite3_module *pModule;
+ } aMod[] = {
+ { "json_each", &jsonEachModule },
+ { "json_tree", &jsonTreeModule },
+ };
+#endif
+ for(i=0; i
**
@@ -197471,7 +190650,7 @@ struct Fts5ExtensionApi {
** extra data to the FTS index or require FTS5 to query for multiple terms,
** so it is efficient in terms of disk space and query speed. However, it
** does not support prefix queries very well. If, as suggested above, the
-** token "first" is substituted for "1st" by the tokenizer, then the query:
+** token "first" is subsituted for "1st" by the tokenizer, then the query:
**
**
** ... MATCH '1s*'
@@ -198363,12 +191542,9 @@ static int sqlite3Fts5VocabInit(Fts5Global*, sqlite3*);
/**************************************************************************
** Interface to automatically generated code in fts5_unicode2.c.
*/
+static int sqlite3Fts5UnicodeIsalnum(int c);
static int sqlite3Fts5UnicodeIsdiacritic(int c);
static int sqlite3Fts5UnicodeFold(int c, int bRemoveDiacritic);
-
-static int sqlite3Fts5UnicodeCatParse(const char*, u8*);
-static int sqlite3Fts5UnicodeCategory(int iCode);
-static void sqlite3Fts5UnicodeAscii(u8*, u8*);
/*
** End of interface to code in fts5_unicode2.c.
**************************************************************************/
@@ -198544,7 +191720,6 @@ typedef union {
#define fts5YY_MIN_REDUCE 83
#define fts5YY_MAX_REDUCE 110
/************* End control #defines *******************************************/
-#define fts5YY_NLOOKAHEAD ((int)(sizeof(fts5yy_lookahead)/sizeof(fts5yy_lookahead[0])))
/* Define the fts5yytestcase() macro to be a no-op if is not already defined
** otherwise.
@@ -199104,11 +192279,11 @@ static fts5YYACTIONTYPE fts5yy_find_shift_action(
do{
i = fts5yy_shift_ofst[stateno];
assert( i>=0 );
- /* assert( i+fts5YYNFTS5TOKEN<=(int)fts5YY_NLOOKAHEAD ); */
+ assert( i+fts5YYNFTS5TOKEN<=(int)sizeof(fts5yy_lookahead)/sizeof(fts5yy_lookahead[0]) );
assert( iLookAhead!=fts5YYNOCODE );
assert( iLookAhead < fts5YYNFTS5TOKEN );
i += iLookAhead;
- if( i>=fts5YY_NLOOKAHEAD || fts5yy_lookahead[i]!=iLookAhead ){
+ if( fts5yy_lookahead[i]!=iLookAhead ){
#ifdef fts5YYFALLBACK
fts5YYCODETYPE iFallback; /* Fallback token */
if( iLookAhead=fts5YY_ACTTAB_COUNT
j0
){
#ifndef NDEBUG
@@ -199159,7 +192333,7 @@ static fts5YYACTIONTYPE fts5yy_find_shift_action(
** Find the appropriate action for a parser given the non-terminal
** look-ahead token iLookAhead.
*/
-static fts5YYACTIONTYPE fts5yy_find_reduce_action(
+static int fts5yy_find_reduce_action(
fts5YYACTIONTYPE stateno, /* Current state number */
fts5YYCODETYPE iLookAhead /* The look-ahead token */
){
@@ -199327,7 +192501,7 @@ static fts5YYACTIONTYPE fts5yy_reduce(
sqlite3Fts5ParserCTX_PDECL /* %extra_context */
){
int fts5yygoto; /* The next state */
- fts5YYACTIONTYPE fts5yyact; /* The next action */
+ int fts5yyact; /* The next action */
fts5yyStackEntry *fts5yymsp; /* The top of the parser's stack */
int fts5yysize; /* Amount to pop the stack */
sqlite3Fts5ParserARG_FETCH
@@ -199683,12 +192857,12 @@ static void sqlite3Fts5Parser(
do{
assert( fts5yyact==fts5yypParser->fts5yytos->stateno );
- fts5yyact = fts5yy_find_shift_action((fts5YYCODETYPE)fts5yymajor,fts5yyact);
+ fts5yyact = fts5yy_find_shift_action(fts5yymajor,fts5yyact);
if( fts5yyact >= fts5YY_MIN_REDUCE ){
fts5yyact = fts5yy_reduce(fts5yypParser,fts5yyact-fts5YY_MIN_REDUCE,fts5yymajor,
fts5yyminor sqlite3Fts5ParserCTX_PARAM);
}else if( fts5yyact <= fts5YY_MAX_SHIFTREDUCE ){
- fts5yy_shift(fts5yypParser,fts5yyact,(fts5YYCODETYPE)fts5yymajor,fts5yyminor);
+ fts5yy_shift(fts5yypParser,fts5yyact,fts5yymajor,fts5yyminor);
#ifndef fts5YYNOERRORRECOVERY
fts5yypParser->fts5yyerrcnt--;
#endif
@@ -199816,21 +192990,6 @@ static void sqlite3Fts5Parser(
return;
}
-/*
-** Return the fallback token corresponding to canonical token iToken, or
-** 0 if iToken has no fallback.
-*/
-static int sqlite3Fts5ParserFallback(int iToken){
-#ifdef fts5YYFALLBACK
- if( iToken<(int)(sizeof(fts5yyFallback)/sizeof(fts5yyFallback[0])) ){
- return fts5yyFallback[iToken];
- }
-#else
- (void)iToken;
-#endif
- return 0;
-}
-
/*
** 2014 May 31
**
@@ -201941,7 +195100,6 @@ static void sqlite3Fts5Parser(void*, int, Fts5Token, Fts5Parse*);
/* #include */
static void sqlite3Fts5ParserTrace(FILE*, char*);
#endif
-static int sqlite3Fts5ParserFallback(int);
struct Fts5Expr {
@@ -204446,19 +197604,14 @@ static void fts5ExprIsAlnum(
sqlite3_value **apVal /* Function arguments */
){
int iCode;
- u8 aArr[32];
if( nArg!=1 ){
sqlite3_result_error(pCtx,
"wrong number of arguments to function fts5_isalnum", -1
);
return;
}
- memset(aArr, 0, sizeof(aArr));
- sqlite3Fts5UnicodeCatParse("L*", aArr);
- sqlite3Fts5UnicodeCatParse("N*", aArr);
- sqlite3Fts5UnicodeCatParse("Co", aArr);
iCode = sqlite3_value_int(apVal[0]);
- sqlite3_result_int(pCtx, aArr[sqlite3Fts5UnicodeCategory(iCode)]);
+ sqlite3_result_int(pCtx, sqlite3Fts5UnicodeIsalnum(iCode));
}
static void fts5ExprFold(
@@ -204502,12 +197655,10 @@ static int sqlite3Fts5ExprInit(Fts5Global *pGlobal, sqlite3 *db){
rc = sqlite3_create_function(db, p->z, -1, SQLITE_UTF8, pCtx, p->x, 0, 0);
}
- /* Avoid warnings indicating that sqlite3Fts5ParserTrace() and
- ** sqlite3Fts5ParserFallback() are unused */
+ /* Avoid a warning indicating that sqlite3Fts5ParserTrace() is unused */
#ifndef NDEBUG
(void)sqlite3Fts5ParserTrace;
#endif
- (void)sqlite3Fts5ParserFallback;
return rc;
}
@@ -210555,10 +203706,7 @@ static int sqlite3Fts5IndexCharlenToBytelen(
for(i=0; i=nByte ) return 0; /* Input contains fewer than nChar chars */
if( (unsigned char)p[n++]>=0xc0 ){
- while( (p[n] & 0xc0)==0x80 ){
- n++;
- if( n>=nByte ) break;
- }
+ while( (p[n] & 0xc0)==0x80 ) n++;
}
}
return n;
@@ -212083,7 +205231,7 @@ static void fts5CheckTransactionState(Fts5Table *p, int op, int iSavepoint){
case FTS5_SAVEPOINT:
assert( p->ts.eState==1 );
assert( iSavepoint>=0 );
- assert( iSavepoint>=p->ts.iSavepoint );
+ assert( iSavepoint>p->ts.iSavepoint );
p->ts.iSavepoint = iSavepoint;
break;
@@ -213008,13 +206156,6 @@ static int fts5FilterMethod(
assert( nVal==0 && pMatch==0 && bOrderByRank==0 && bDesc==0 );
assert( pCsr->iLastRowid==LARGEST_INT64 );
assert( pCsr->iFirstRowid==SMALLEST_INT64 );
- if( pTab->pSortCsr->bDesc ){
- pCsr->iLastRowid = pTab->pSortCsr->iFirstRowid;
- pCsr->iFirstRowid = pTab->pSortCsr->iLastRowid;
- }else{
- pCsr->iLastRowid = pTab->pSortCsr->iLastRowid;
- pCsr->iFirstRowid = pTab->pSortCsr->iFirstRowid;
- }
pCsr->ePlan = FTS5_PLAN_SOURCE;
pCsr->pExpr = pTab->pSortCsr->pExpr;
rc = fts5CursorFirst(pTab, pCsr, bDesc);
@@ -214445,7 +207586,7 @@ static void fts5SourceIdFunc(
){
assert( nArg==0 );
UNUSED_PARAM2(nArg, apUnused);
- sqlite3_result_text(pCtx, "fts5: 2018-09-25 19:08:10 fb90e7189ae6d62e77ba3a308ca5d683f90bbe633cf681865365b8e92792d1c7", -1, SQLITE_TRANSIENT);
+ sqlite3_result_text(pCtx, "fts5: 2018-06-04 19:24:41 c7ee0833225bfd8c5ec2f9bf62b97c4e04d03bd9566366d5221ac8fb199a87ca", -1, SQLITE_TRANSIENT);
}
static int fts5Init(sqlite3 *db){
@@ -215933,8 +209074,6 @@ struct Unicode61Tokenizer {
int bRemoveDiacritic; /* True if remove_diacritics=1 is set */
int nException;
int *aiException;
-
- unsigned char aCategory[32]; /* True for token char categories */
};
static int fts5UnicodeAddExceptions(
@@ -215959,7 +209098,7 @@ static int fts5UnicodeAddExceptions(
if( iCode<128 ){
p->aTokenChar[iCode] = (unsigned char)bTokenChars;
}else{
- bToken = p->aCategory[sqlite3Fts5UnicodeCategory(iCode)];
+ bToken = sqlite3Fts5UnicodeIsalnum(iCode);
assert( (bToken==0 || bToken==1) );
assert( (bTokenChars==0 || bTokenChars==1) );
if( bToken!=bTokenChars && sqlite3Fts5UnicodeIsdiacritic(iCode)==0 ){
@@ -216020,21 +209159,6 @@ static void fts5UnicodeDelete(Fts5Tokenizer *pTok){
return;
}
-static int unicodeSetCategories(Unicode61Tokenizer *p, const char *zCat){
- const char *z = zCat;
-
- while( *z ){
- while( *z==' ' || *z=='\t' ) z++;
- if( *z && sqlite3Fts5UnicodeCatParse(z, p->aCategory) ){
- return SQLITE_ERROR;
- }
- while( *z!=' ' && *z!='\t' && *z!='\0' ) z++;
- }
-
- sqlite3Fts5UnicodeAscii(p->aCategory, p->aTokenChar);
- return SQLITE_OK;
-}
-
/*
** Create a "unicode61" tokenizer.
*/
@@ -216053,28 +209177,15 @@ static int fts5UnicodeCreate(
}else{
p = (Unicode61Tokenizer*)sqlite3_malloc(sizeof(Unicode61Tokenizer));
if( p ){
- const char *zCat = "L* N* Co";
int i;
memset(p, 0, sizeof(Unicode61Tokenizer));
-
+ memcpy(p->aTokenChar, aAsciiTokenChar, sizeof(aAsciiTokenChar));
p->bRemoveDiacritic = 1;
p->nFold = 64;
p->aFold = sqlite3_malloc(p->nFold * sizeof(char));
if( p->aFold==0 ){
rc = SQLITE_NOMEM;
}
-
- /* Search for a "categories" argument */
- for(i=0; rc==SQLITE_OK && iaCategory[sqlite3Fts5UnicodeCategory(iCode)]
- ^ fts5UnicodeIsException(p, iCode)
- );
+ assert( (sqlite3Fts5UnicodeIsalnum(iCode) & 0xFFFFFFFE)==0 );
+ return sqlite3Fts5UnicodeIsalnum(iCode) ^ fts5UnicodeIsException(p, iCode);
}
static int fts5UnicodeTokenize(
@@ -216993,6 +210098,135 @@ static int sqlite3Fts5TokenizerInit(fts5_api *pApi){
/* #include */
+/*
+** Return true if the argument corresponds to a unicode codepoint
+** classified as either a letter or a number. Otherwise false.
+**
+** The results are undefined if the value passed to this function
+** is less than zero.
+*/
+static int sqlite3Fts5UnicodeIsalnum(int c){
+ /* Each unsigned integer in the following array corresponds to a contiguous
+ ** range of unicode codepoints that are not either letters or numbers (i.e.
+ ** codepoints for which this function should return 0).
+ **
+ ** The most significant 22 bits in each 32-bit value contain the first
+ ** codepoint in the range. The least significant 10 bits are used to store
+ ** the size of the range (always at least 1). In other words, the value
+ ** ((C<<22) + N) represents a range of N codepoints starting with codepoint
+ ** C. It is not possible to represent a range larger than 1023 codepoints
+ ** using this format.
+ */
+ static const unsigned int aEntry[] = {
+ 0x00000030, 0x0000E807, 0x00016C06, 0x0001EC2F, 0x0002AC07,
+ 0x0002D001, 0x0002D803, 0x0002EC01, 0x0002FC01, 0x00035C01,
+ 0x0003DC01, 0x000B0804, 0x000B480E, 0x000B9407, 0x000BB401,
+ 0x000BBC81, 0x000DD401, 0x000DF801, 0x000E1002, 0x000E1C01,
+ 0x000FD801, 0x00120808, 0x00156806, 0x00162402, 0x00163C01,
+ 0x00164437, 0x0017CC02, 0x00180005, 0x00181816, 0x00187802,
+ 0x00192C15, 0x0019A804, 0x0019C001, 0x001B5001, 0x001B580F,
+ 0x001B9C07, 0x001BF402, 0x001C000E, 0x001C3C01, 0x001C4401,
+ 0x001CC01B, 0x001E980B, 0x001FAC09, 0x001FD804, 0x00205804,
+ 0x00206C09, 0x00209403, 0x0020A405, 0x0020C00F, 0x00216403,
+ 0x00217801, 0x0023901B, 0x00240004, 0x0024E803, 0x0024F812,
+ 0x00254407, 0x00258804, 0x0025C001, 0x00260403, 0x0026F001,
+ 0x0026F807, 0x00271C02, 0x00272C03, 0x00275C01, 0x00278802,
+ 0x0027C802, 0x0027E802, 0x00280403, 0x0028F001, 0x0028F805,
+ 0x00291C02, 0x00292C03, 0x00294401, 0x0029C002, 0x0029D401,
+ 0x002A0403, 0x002AF001, 0x002AF808, 0x002B1C03, 0x002B2C03,
+ 0x002B8802, 0x002BC002, 0x002C0403, 0x002CF001, 0x002CF807,
+ 0x002D1C02, 0x002D2C03, 0x002D5802, 0x002D8802, 0x002DC001,
+ 0x002E0801, 0x002EF805, 0x002F1803, 0x002F2804, 0x002F5C01,
+ 0x002FCC08, 0x00300403, 0x0030F807, 0x00311803, 0x00312804,
+ 0x00315402, 0x00318802, 0x0031FC01, 0x00320802, 0x0032F001,
+ 0x0032F807, 0x00331803, 0x00332804, 0x00335402, 0x00338802,
+ 0x00340802, 0x0034F807, 0x00351803, 0x00352804, 0x00355C01,
+ 0x00358802, 0x0035E401, 0x00360802, 0x00372801, 0x00373C06,
+ 0x00375801, 0x00376008, 0x0037C803, 0x0038C401, 0x0038D007,
+ 0x0038FC01, 0x00391C09, 0x00396802, 0x003AC401, 0x003AD006,
+ 0x003AEC02, 0x003B2006, 0x003C041F, 0x003CD00C, 0x003DC417,
+ 0x003E340B, 0x003E6424, 0x003EF80F, 0x003F380D, 0x0040AC14,
+ 0x00412806, 0x00415804, 0x00417803, 0x00418803, 0x00419C07,
+ 0x0041C404, 0x0042080C, 0x00423C01, 0x00426806, 0x0043EC01,
+ 0x004D740C, 0x004E400A, 0x00500001, 0x0059B402, 0x005A0001,
+ 0x005A6C02, 0x005BAC03, 0x005C4803, 0x005CC805, 0x005D4802,
+ 0x005DC802, 0x005ED023, 0x005F6004, 0x005F7401, 0x0060000F,
+ 0x0062A401, 0x0064800C, 0x0064C00C, 0x00650001, 0x00651002,
+ 0x0066C011, 0x00672002, 0x00677822, 0x00685C05, 0x00687802,
+ 0x0069540A, 0x0069801D, 0x0069FC01, 0x006A8007, 0x006AA006,
+ 0x006C0005, 0x006CD011, 0x006D6823, 0x006E0003, 0x006E840D,
+ 0x006F980E, 0x006FF004, 0x00709014, 0x0070EC05, 0x0071F802,
+ 0x00730008, 0x00734019, 0x0073B401, 0x0073C803, 0x00770027,
+ 0x0077F004, 0x007EF401, 0x007EFC03, 0x007F3403, 0x007F7403,
+ 0x007FB403, 0x007FF402, 0x00800065, 0x0081A806, 0x0081E805,
+ 0x00822805, 0x0082801A, 0x00834021, 0x00840002, 0x00840C04,
+ 0x00842002, 0x00845001, 0x00845803, 0x00847806, 0x00849401,
+ 0x00849C01, 0x0084A401, 0x0084B801, 0x0084E802, 0x00850005,
+ 0x00852804, 0x00853C01, 0x00864264, 0x00900027, 0x0091000B,
+ 0x0092704E, 0x00940200, 0x009C0475, 0x009E53B9, 0x00AD400A,
+ 0x00B39406, 0x00B3BC03, 0x00B3E404, 0x00B3F802, 0x00B5C001,
+ 0x00B5FC01, 0x00B7804F, 0x00B8C00C, 0x00BA001A, 0x00BA6C59,
+ 0x00BC00D6, 0x00BFC00C, 0x00C00005, 0x00C02019, 0x00C0A807,
+ 0x00C0D802, 0x00C0F403, 0x00C26404, 0x00C28001, 0x00C3EC01,
+ 0x00C64002, 0x00C6580A, 0x00C70024, 0x00C8001F, 0x00C8A81E,
+ 0x00C94001, 0x00C98020, 0x00CA2827, 0x00CB003F, 0x00CC0100,
+ 0x01370040, 0x02924037, 0x0293F802, 0x02983403, 0x0299BC10,
+ 0x029A7C01, 0x029BC008, 0x029C0017, 0x029C8002, 0x029E2402,
+ 0x02A00801, 0x02A01801, 0x02A02C01, 0x02A08C09, 0x02A0D804,
+ 0x02A1D004, 0x02A20002, 0x02A2D011, 0x02A33802, 0x02A38012,
+ 0x02A3E003, 0x02A4980A, 0x02A51C0D, 0x02A57C01, 0x02A60004,
+ 0x02A6CC1B, 0x02A77802, 0x02A8A40E, 0x02A90C01, 0x02A93002,
+ 0x02A97004, 0x02A9DC03, 0x02A9EC01, 0x02AAC001, 0x02AAC803,
+ 0x02AADC02, 0x02AAF802, 0x02AB0401, 0x02AB7802, 0x02ABAC07,
+ 0x02ABD402, 0x02AF8C0B, 0x03600001, 0x036DFC02, 0x036FFC02,
+ 0x037FFC01, 0x03EC7801, 0x03ECA401, 0x03EEC810, 0x03F4F802,
+ 0x03F7F002, 0x03F8001A, 0x03F88007, 0x03F8C023, 0x03F95013,
+ 0x03F9A004, 0x03FBFC01, 0x03FC040F, 0x03FC6807, 0x03FCEC06,
+ 0x03FD6C0B, 0x03FF8007, 0x03FFA007, 0x03FFE405, 0x04040003,
+ 0x0404DC09, 0x0405E411, 0x0406400C, 0x0407402E, 0x040E7C01,
+ 0x040F4001, 0x04215C01, 0x04247C01, 0x0424FC01, 0x04280403,
+ 0x04281402, 0x04283004, 0x0428E003, 0x0428FC01, 0x04294009,
+ 0x0429FC01, 0x042CE407, 0x04400003, 0x0440E016, 0x04420003,
+ 0x0442C012, 0x04440003, 0x04449C0E, 0x04450004, 0x04460003,
+ 0x0446CC0E, 0x04471404, 0x045AAC0D, 0x0491C004, 0x05BD442E,
+ 0x05BE3C04, 0x074000F6, 0x07440027, 0x0744A4B5, 0x07480046,
+ 0x074C0057, 0x075B0401, 0x075B6C01, 0x075BEC01, 0x075C5401,
+ 0x075CD401, 0x075D3C01, 0x075DBC01, 0x075E2401, 0x075EA401,
+ 0x075F0C01, 0x07BBC002, 0x07C0002C, 0x07C0C064, 0x07C2800F,
+ 0x07C2C40E, 0x07C3040F, 0x07C3440F, 0x07C4401F, 0x07C4C03C,
+ 0x07C5C02B, 0x07C7981D, 0x07C8402B, 0x07C90009, 0x07C94002,
+ 0x07CC0021, 0x07CCC006, 0x07CCDC46, 0x07CE0014, 0x07CE8025,
+ 0x07CF1805, 0x07CF8011, 0x07D0003F, 0x07D10001, 0x07D108B6,
+ 0x07D3E404, 0x07D4003E, 0x07D50004, 0x07D54018, 0x07D7EC46,
+ 0x07D9140B, 0x07DA0046, 0x07DC0074, 0x38000401, 0x38008060,
+ 0x380400F0,
+ };
+ static const unsigned int aAscii[4] = {
+ 0xFFFFFFFF, 0xFC00FFFF, 0xF8000001, 0xF8000001,
+ };
+
+ if( (unsigned int)c<128 ){
+ return ( (aAscii[c >> 5] & (1 << (c & 0x001F)))==0 );
+ }else if( (unsigned int)c<(1<<22) ){
+ unsigned int key = (((unsigned int)c)<<10) | 0x000003FF;
+ int iRes = 0;
+ int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1;
+ int iLo = 0;
+ while( iHi>=iLo ){
+ int iTest = (iHi + iLo) / 2;
+ if( key >= aEntry[iTest] ){
+ iRes = iTest;
+ iLo = iTest+1;
+ }else{
+ iHi = iTest-1;
+ }
+ }
+ assert( aEntry[0]=aEntry[iRes] );
+ return (((unsigned int)c) >= ((aEntry[iRes]>>10) + (aEntry[iRes]&0x3FF)));
+ }
+ return 1;
+}
/*
@@ -217205,539 +210439,6 @@ static int sqlite3Fts5UnicodeFold(int c, int bRemoveDiacritic){
return ret;
}
-
-#if 0
-static int sqlite3Fts5UnicodeNCat(void) {
- return 32;
-}
-#endif
-
-static int sqlite3Fts5UnicodeCatParse(const char *zCat, u8 *aArray){
- aArray[0] = 1;
- switch( zCat[0] ){
- case 'C':
- switch( zCat[1] ){
- case 'c': aArray[1] = 1; break;
- case 'f': aArray[2] = 1; break;
- case 'n': aArray[3] = 1; break;
- case 's': aArray[4] = 1; break;
- case 'o': aArray[31] = 1; break;
- case '*':
- aArray[1] = 1;
- aArray[2] = 1;
- aArray[3] = 1;
- aArray[4] = 1;
- aArray[31] = 1;
- break;
- default: return 1; }
- break;
-
- case 'L':
- switch( zCat[1] ){
- case 'l': aArray[5] = 1; break;
- case 'm': aArray[6] = 1; break;
- case 'o': aArray[7] = 1; break;
- case 't': aArray[8] = 1; break;
- case 'u': aArray[9] = 1; break;
- case 'C': aArray[30] = 1; break;
- case '*':
- aArray[5] = 1;
- aArray[6] = 1;
- aArray[7] = 1;
- aArray[8] = 1;
- aArray[9] = 1;
- aArray[30] = 1;
- break;
- default: return 1; }
- break;
-
- case 'M':
- switch( zCat[1] ){
- case 'c': aArray[10] = 1; break;
- case 'e': aArray[11] = 1; break;
- case 'n': aArray[12] = 1; break;
- case '*':
- aArray[10] = 1;
- aArray[11] = 1;
- aArray[12] = 1;
- break;
- default: return 1; }
- break;
-
- case 'N':
- switch( zCat[1] ){
- case 'd': aArray[13] = 1; break;
- case 'l': aArray[14] = 1; break;
- case 'o': aArray[15] = 1; break;
- case '*':
- aArray[13] = 1;
- aArray[14] = 1;
- aArray[15] = 1;
- break;
- default: return 1; }
- break;
-
- case 'P':
- switch( zCat[1] ){
- case 'c': aArray[16] = 1; break;
- case 'd': aArray[17] = 1; break;
- case 'e': aArray[18] = 1; break;
- case 'f': aArray[19] = 1; break;
- case 'i': aArray[20] = 1; break;
- case 'o': aArray[21] = 1; break;
- case 's': aArray[22] = 1; break;
- case '*':
- aArray[16] = 1;
- aArray[17] = 1;
- aArray[18] = 1;
- aArray[19] = 1;
- aArray[20] = 1;
- aArray[21] = 1;
- aArray[22] = 1;
- break;
- default: return 1; }
- break;
-
- case 'S':
- switch( zCat[1] ){
- case 'c': aArray[23] = 1; break;
- case 'k': aArray[24] = 1; break;
- case 'm': aArray[25] = 1; break;
- case 'o': aArray[26] = 1; break;
- case '*':
- aArray[23] = 1;
- aArray[24] = 1;
- aArray[25] = 1;
- aArray[26] = 1;
- break;
- default: return 1; }
- break;
-
- case 'Z':
- switch( zCat[1] ){
- case 'l': aArray[27] = 1; break;
- case 'p': aArray[28] = 1; break;
- case 's': aArray[29] = 1; break;
- case '*':
- aArray[27] = 1;
- aArray[28] = 1;
- aArray[29] = 1;
- break;
- default: return 1; }
- break;
-
- }
- return 0;
-}
-
-static u16 aFts5UnicodeBlock[] = {
- 0, 1471, 1753, 1760, 1760, 1760, 1760, 1760, 1760, 1760,
- 1760, 1760, 1760, 1760, 1760, 1763, 1765,
- };
-static u16 aFts5UnicodeMap[] = {
- 0, 32, 33, 36, 37, 40, 41, 42, 43, 44,
- 45, 46, 48, 58, 60, 63, 65, 91, 92, 93,
- 94, 95, 96, 97, 123, 124, 125, 126, 127, 160,
- 161, 162, 166, 167, 168, 169, 170, 171, 172, 173,
- 174, 175, 176, 177, 178, 180, 181, 182, 184, 185,
- 186, 187, 188, 191, 192, 215, 216, 223, 247, 248,
- 256, 312, 313, 329, 330, 377, 383, 385, 387, 388,
- 391, 394, 396, 398, 402, 403, 405, 406, 409, 412,
- 414, 415, 417, 418, 423, 427, 428, 431, 434, 436,
- 437, 440, 442, 443, 444, 446, 448, 452, 453, 454,
- 455, 456, 457, 458, 459, 460, 461, 477, 478, 496,
- 497, 498, 499, 500, 503, 505, 506, 564, 570, 572,
- 573, 575, 577, 580, 583, 584, 592, 660, 661, 688,
- 706, 710, 722, 736, 741, 748, 749, 750, 751, 768,
- 880, 884, 885, 886, 890, 891, 894, 900, 902, 903,
- 904, 908, 910, 912, 913, 931, 940, 975, 977, 978,
- 981, 984, 1008, 1012, 1014, 1015, 1018, 1020, 1021, 1072,
- 1120, 1154, 1155, 1160, 1162, 1217, 1231, 1232, 1329, 1369,
- 1370, 1377, 1417, 1418, 1423, 1425, 1470, 1471, 1472, 1473,
- 1475, 1476, 1478, 1479, 1488, 1520, 1523, 1536, 1542, 1545,
- 1547, 1548, 1550, 1552, 1563, 1566, 1568, 1600, 1601, 1611,
- 1632, 1642, 1646, 1648, 1649, 1748, 1749, 1750, 1757, 1758,
- 1759, 1765, 1767, 1769, 1770, 1774, 1776, 1786, 1789, 1791,
- 1792, 1807, 1808, 1809, 1810, 1840, 1869, 1958, 1969, 1984,
- 1994, 2027, 2036, 2038, 2039, 2042, 2048, 2070, 2074, 2075,
- 2084, 2085, 2088, 2089, 2096, 2112, 2137, 2142, 2208, 2210,
- 2276, 2304, 2307, 2308, 2362, 2363, 2364, 2365, 2366, 2369,
- 2377, 2381, 2382, 2384, 2385, 2392, 2402, 2404, 2406, 2416,
- 2417, 2418, 2425, 2433, 2434, 2437, 2447, 2451, 2474, 2482,
- 2486, 2492, 2493, 2494, 2497, 2503, 2507, 2509, 2510, 2519,
- 2524, 2527, 2530, 2534, 2544, 2546, 2548, 2554, 2555, 2561,
- 2563, 2565, 2575, 2579, 2602, 2610, 2613, 2616, 2620, 2622,
- 2625, 2631, 2635, 2641, 2649, 2654, 2662, 2672, 2674, 2677,
- 2689, 2691, 2693, 2703, 2707, 2730, 2738, 2741, 2748, 2749,
- 2750, 2753, 2759, 2761, 2763, 2765, 2768, 2784, 2786, 2790,
- 2800, 2801, 2817, 2818, 2821, 2831, 2835, 2858, 2866, 2869,
- 2876, 2877, 2878, 2879, 2880, 2881, 2887, 2891, 2893, 2902,
- 2903, 2908, 2911, 2914, 2918, 2928, 2929, 2930, 2946, 2947,
- 2949, 2958, 2962, 2969, 2972, 2974, 2979, 2984, 2990, 3006,
- 3008, 3009, 3014, 3018, 3021, 3024, 3031, 3046, 3056, 3059,
- 3065, 3066, 3073, 3077, 3086, 3090, 3114, 3125, 3133, 3134,
- 3137, 3142, 3146, 3157, 3160, 3168, 3170, 3174, 3192, 3199,
- 3202, 3205, 3214, 3218, 3242, 3253, 3260, 3261, 3262, 3263,
- 3264, 3270, 3271, 3274, 3276, 3285, 3294, 3296, 3298, 3302,
- 3313, 3330, 3333, 3342, 3346, 3389, 3390, 3393, 3398, 3402,
- 3405, 3406, 3415, 3424, 3426, 3430, 3440, 3449, 3450, 3458,
- 3461, 3482, 3507, 3517, 3520, 3530, 3535, 3538, 3542, 3544,
- 3570, 3572, 3585, 3633, 3634, 3636, 3647, 3648, 3654, 3655,
- 3663, 3664, 3674, 3713, 3716, 3719, 3722, 3725, 3732, 3737,
- 3745, 3749, 3751, 3754, 3757, 3761, 3762, 3764, 3771, 3773,
- 3776, 3782, 3784, 3792, 3804, 3840, 3841, 3844, 3859, 3860,
- 3861, 3864, 3866, 3872, 3882, 3892, 3893, 3894, 3895, 3896,
- 3897, 3898, 3899, 3900, 3901, 3902, 3904, 3913, 3953, 3967,
- 3968, 3973, 3974, 3976, 3981, 3993, 4030, 4038, 4039, 4046,
- 4048, 4053, 4057, 4096, 4139, 4141, 4145, 4146, 4152, 4153,
- 4155, 4157, 4159, 4160, 4170, 4176, 4182, 4184, 4186, 4190,
- 4193, 4194, 4197, 4199, 4206, 4209, 4213, 4226, 4227, 4229,
- 4231, 4237, 4238, 4239, 4240, 4250, 4253, 4254, 4256, 4295,
- 4301, 4304, 4347, 4348, 4349, 4682, 4688, 4696, 4698, 4704,
- 4746, 4752, 4786, 4792, 4800, 4802, 4808, 4824, 4882, 4888,
- 4957, 4960, 4969, 4992, 5008, 5024, 5120, 5121, 5741, 5743,
- 5760, 5761, 5787, 5788, 5792, 5867, 5870, 5888, 5902, 5906,
- 5920, 5938, 5941, 5952, 5970, 5984, 5998, 6002, 6016, 6068,
- 6070, 6071, 6078, 6086, 6087, 6089, 6100, 6103, 6104, 6107,
- 6108, 6109, 6112, 6128, 6144, 6150, 6151, 6155, 6158, 6160,
- 6176, 6211, 6212, 6272, 6313, 6314, 6320, 6400, 6432, 6435,
- 6439, 6441, 6448, 6450, 6451, 6457, 6464, 6468, 6470, 6480,
- 6512, 6528, 6576, 6593, 6600, 6608, 6618, 6622, 6656, 6679,
- 6681, 6686, 6688, 6741, 6742, 6743, 6744, 6752, 6753, 6754,
- 6755, 6757, 6765, 6771, 6783, 6784, 6800, 6816, 6823, 6824,
- 6912, 6916, 6917, 6964, 6965, 6966, 6971, 6972, 6973, 6978,
- 6979, 6981, 6992, 7002, 7009, 7019, 7028, 7040, 7042, 7043,
- 7073, 7074, 7078, 7080, 7082, 7083, 7084, 7086, 7088, 7098,
- 7142, 7143, 7144, 7146, 7149, 7150, 7151, 7154, 7164, 7168,
- 7204, 7212, 7220, 7222, 7227, 7232, 7245, 7248, 7258, 7288,
- 7294, 7360, 7376, 7379, 7380, 7393, 7394, 7401, 7405, 7406,
- 7410, 7412, 7413, 7424, 7468, 7531, 7544, 7545, 7579, 7616,
- 7676, 7680, 7830, 7838, 7936, 7944, 7952, 7960, 7968, 7976,
- 7984, 7992, 8000, 8008, 8016, 8025, 8027, 8029, 8031, 8033,
- 8040, 8048, 8064, 8072, 8080, 8088, 8096, 8104, 8112, 8118,
- 8120, 8124, 8125, 8126, 8127, 8130, 8134, 8136, 8140, 8141,
- 8144, 8150, 8152, 8157, 8160, 8168, 8173, 8178, 8182, 8184,
- 8188, 8189, 8192, 8203, 8208, 8214, 8216, 8217, 8218, 8219,
- 8221, 8222, 8223, 8224, 8232, 8233, 8234, 8239, 8240, 8249,
- 8250, 8251, 8255, 8257, 8260, 8261, 8262, 8263, 8274, 8275,
- 8276, 8277, 8287, 8288, 8298, 8304, 8305, 8308, 8314, 8317,
- 8318, 8319, 8320, 8330, 8333, 8334, 8336, 8352, 8400, 8413,
- 8417, 8418, 8421, 8448, 8450, 8451, 8455, 8456, 8458, 8459,
- 8462, 8464, 8467, 8468, 8469, 8470, 8472, 8473, 8478, 8484,
- 8485, 8486, 8487, 8488, 8489, 8490, 8494, 8495, 8496, 8500,
- 8501, 8505, 8506, 8508, 8510, 8512, 8517, 8519, 8522, 8523,
- 8524, 8526, 8527, 8528, 8544, 8579, 8581, 8585, 8592, 8597,
- 8602, 8604, 8608, 8609, 8611, 8612, 8614, 8615, 8622, 8623,
- 8654, 8656, 8658, 8659, 8660, 8661, 8692, 8960, 8968, 8972,
- 8992, 8994, 9001, 9002, 9003, 9084, 9085, 9115, 9140, 9180,
- 9186, 9216, 9280, 9312, 9372, 9450, 9472, 9655, 9656, 9665,
- 9666, 9720, 9728, 9839, 9840, 9985, 10088, 10089, 10090, 10091,
- 10092, 10093, 10094, 10095, 10096, 10097, 10098, 10099, 10100, 10101,
- 10102, 10132, 10176, 10181, 10182, 10183, 10214, 10215, 10216, 10217,
- 10218, 10219, 10220, 10221, 10222, 10223, 10224, 10240, 10496, 10627,
- 10628, 10629, 10630, 10631, 10632, 10633, 10634, 10635, 10636, 10637,
- 10638, 10639, 10640, 10641, 10642, 10643, 10644, 10645, 10646, 10647,
- 10648, 10649, 10712, 10713, 10714, 10715, 10716, 10748, 10749, 10750,
- 11008, 11056, 11077, 11079, 11088, 11264, 11312, 11360, 11363, 11365,
- 11367, 11374, 11377, 11378, 11380, 11381, 11383, 11388, 11390, 11393,
- 11394, 11492, 11493, 11499, 11503, 11506, 11513, 11517, 11518, 11520,
- 11559, 11565, 11568, 11631, 11632, 11647, 11648, 11680, 11688, 11696,
- 11704, 11712, 11720, 11728, 11736, 11744, 11776, 11778, 11779, 11780,
- 11781, 11782, 11785, 11786, 11787, 11788, 11789, 11790, 11799, 11800,
- 11802, 11803, 11804, 11805, 11806, 11808, 11809, 11810, 11811, 11812,
- 11813, 11814, 11815, 11816, 11817, 11818, 11823, 11824, 11834, 11904,
- 11931, 12032, 12272, 12288, 12289, 12292, 12293, 12294, 12295, 12296,
- 12297, 12298, 12299, 12300, 12301, 12302, 12303, 12304, 12305, 12306,
- 12308, 12309, 12310, 12311, 12312, 12313, 12314, 12315, 12316, 12317,
- 12318, 12320, 12321, 12330, 12334, 12336, 12337, 12342, 12344, 12347,
- 12348, 12349, 12350, 12353, 12441, 12443, 12445, 12447, 12448, 12449,
- 12539, 12540, 12543, 12549, 12593, 12688, 12690, 12694, 12704, 12736,
- 12784, 12800, 12832, 12842, 12872, 12880, 12881, 12896, 12928, 12938,
- 12977, 12992, 13056, 13312, 19893, 19904, 19968, 40908, 40960, 40981,
- 40982, 42128, 42192, 42232, 42238, 42240, 42508, 42509, 42512, 42528,
- 42538, 42560, 42606, 42607, 42608, 42611, 42612, 42622, 42623, 42624,
- 42655, 42656, 42726, 42736, 42738, 42752, 42775, 42784, 42786, 42800,
- 42802, 42864, 42865, 42873, 42878, 42888, 42889, 42891, 42896, 42912,
- 43000, 43002, 43003, 43010, 43011, 43014, 43015, 43019, 43020, 43043,
- 43045, 43047, 43048, 43056, 43062, 43064, 43065, 43072, 43124, 43136,
- 43138, 43188, 43204, 43214, 43216, 43232, 43250, 43256, 43259, 43264,
- 43274, 43302, 43310, 43312, 43335, 43346, 43359, 43360, 43392, 43395,
- 43396, 43443, 43444, 43446, 43450, 43452, 43453, 43457, 43471, 43472,
- 43486, 43520, 43561, 43567, 43569, 43571, 43573, 43584, 43587, 43588,
- 43596, 43597, 43600, 43612, 43616, 43632, 43633, 43639, 43642, 43643,
- 43648, 43696, 43697, 43698, 43701, 43703, 43705, 43710, 43712, 43713,
- 43714, 43739, 43741, 43742, 43744, 43755, 43756, 43758, 43760, 43762,
- 43763, 43765, 43766, 43777, 43785, 43793, 43808, 43816, 43968, 44003,
- 44005, 44006, 44008, 44009, 44011, 44012, 44013, 44016, 44032, 55203,
- 55216, 55243, 55296, 56191, 56319, 57343, 57344, 63743, 63744, 64112,
- 64256, 64275, 64285, 64286, 64287, 64297, 64298, 64312, 64318, 64320,
- 64323, 64326, 64434, 64467, 64830, 64831, 64848, 64914, 65008, 65020,
- 65021, 65024, 65040, 65047, 65048, 65049, 65056, 65072, 65073, 65075,
- 65077, 65078, 65079, 65080, 65081, 65082, 65083, 65084, 65085, 65086,
- 65087, 65088, 65089, 65090, 65091, 65092, 65093, 65095, 65096, 65097,
- 65101, 65104, 65108, 65112, 65113, 65114, 65115, 65116, 65117, 65118,
- 65119, 65122, 65123, 65124, 65128, 65129, 65130, 65136, 65142, 65279,
- 65281, 65284, 65285, 65288, 65289, 65290, 65291, 65292, 65293, 65294,
- 65296, 65306, 65308, 65311, 65313, 65339, 65340, 65341, 65342, 65343,
- 65344, 65345, 65371, 65372, 65373, 65374, 65375, 65376, 65377, 65378,
- 65379, 65380, 65382, 65392, 65393, 65438, 65440, 65474, 65482, 65490,
- 65498, 65504, 65506, 65507, 65508, 65509, 65512, 65513, 65517, 65529,
- 65532, 0, 13, 40, 60, 63, 80, 128, 256, 263,
- 311, 320, 373, 377, 394, 400, 464, 509, 640, 672,
- 768, 800, 816, 833, 834, 842, 896, 927, 928, 968,
- 976, 977, 1024, 1064, 1104, 1184, 2048, 2056, 2058, 2103,
- 2108, 2111, 2135, 2136, 2304, 2326, 2335, 2336, 2367, 2432,
- 2494, 2560, 2561, 2565, 2572, 2576, 2581, 2585, 2616, 2623,
- 2624, 2640, 2656, 2685, 2687, 2816, 2873, 2880, 2904, 2912,
- 2936, 3072, 3680, 4096, 4097, 4098, 4099, 4152, 4167, 4178,
- 4198, 4224, 4226, 4227, 4272, 4275, 4279, 4281, 4283, 4285,
- 4286, 4304, 4336, 4352, 4355, 4391, 4396, 4397, 4406, 4416,
- 4480, 4482, 4483, 4531, 4534, 4543, 4545, 4549, 4560, 5760,
- 5803, 5804, 5805, 5806, 5808, 5814, 5815, 5824, 8192, 9216,
- 9328, 12288, 26624, 28416, 28496, 28497, 28559, 28563, 45056, 53248,
- 53504, 53545, 53605, 53607, 53610, 53613, 53619, 53627, 53635, 53637,
- 53644, 53674, 53678, 53760, 53826, 53829, 54016, 54112, 54272, 54298,
- 54324, 54350, 54358, 54376, 54402, 54428, 54430, 54434, 54437, 54441,
- 54446, 54454, 54459, 54461, 54469, 54480, 54506, 54532, 54535, 54541,
- 54550, 54558, 54584, 54587, 54592, 54598, 54602, 54610, 54636, 54662,
- 54688, 54714, 54740, 54766, 54792, 54818, 54844, 54870, 54896, 54922,
- 54952, 54977, 54978, 55003, 55004, 55010, 55035, 55036, 55061, 55062,
- 55068, 55093, 55094, 55119, 55120, 55126, 55151, 55152, 55177, 55178,
- 55184, 55209, 55210, 55235, 55236, 55242, 55246, 60928, 60933, 60961,
- 60964, 60967, 60969, 60980, 60985, 60987, 60994, 60999, 61001, 61003,
- 61005, 61009, 61012, 61015, 61017, 61019, 61021, 61023, 61025, 61028,
- 61031, 61036, 61044, 61049, 61054, 61056, 61067, 61089, 61093, 61099,
- 61168, 61440, 61488, 61600, 61617, 61633, 61649, 61696, 61712, 61744,
- 61808, 61926, 61968, 62016, 62032, 62208, 62256, 62263, 62336, 62368,
- 62406, 62432, 62464, 62528, 62530, 62713, 62720, 62784, 62800, 62971,
- 63045, 63104, 63232, 0, 42710, 42752, 46900, 46912, 47133, 63488,
- 1, 32, 256, 0, 65533,
- };
-static u16 aFts5UnicodeData[] = {
- 1025, 61, 117, 55, 117, 54, 50, 53, 57, 53,
- 49, 85, 333, 85, 121, 85, 841, 54, 53, 50,
- 56, 48, 56, 837, 54, 57, 50, 57, 1057, 61,
- 53, 151, 58, 53, 56, 58, 39, 52, 57, 34,
- 58, 56, 58, 57, 79, 56, 37, 85, 56, 47,
- 39, 51, 111, 53, 745, 57, 233, 773, 57, 261,
- 1822, 37, 542, 37, 1534, 222, 69, 73, 37, 126,
- 126, 73, 69, 137, 37, 73, 37, 105, 101, 73,
- 37, 73, 37, 190, 158, 37, 126, 126, 73, 37,
- 126, 94, 37, 39, 94, 69, 135, 41, 40, 37,
- 41, 40, 37, 41, 40, 37, 542, 37, 606, 37,
- 41, 40, 37, 126, 73, 37, 1886, 197, 73, 37,
- 73, 69, 126, 105, 37, 286, 2181, 39, 869, 582,
- 152, 390, 472, 166, 248, 38, 56, 38, 568, 3596,
- 158, 38, 56, 94, 38, 101, 53, 88, 41, 53,
- 105, 41, 73, 37, 553, 297, 1125, 94, 37, 105,
- 101, 798, 133, 94, 57, 126, 94, 37, 1641, 1541,
- 1118, 58, 172, 75, 1790, 478, 37, 2846, 1225, 38,
- 213, 1253, 53, 49, 55, 1452, 49, 44, 53, 76,
- 53, 76, 53, 44, 871, 103, 85, 162, 121, 85,
- 55, 85, 90, 364, 53, 85, 1031, 38, 327, 684,
- 333, 149, 71, 44, 3175, 53, 39, 236, 34, 58,
- 204, 70, 76, 58, 140, 71, 333, 103, 90, 39,
- 469, 34, 39, 44, 967, 876, 2855, 364, 39, 333,
- 1063, 300, 70, 58, 117, 38, 711, 140, 38, 300,
- 38, 108, 38, 172, 501, 807, 108, 53, 39, 359,
- 876, 108, 42, 1735, 44, 42, 44, 39, 106, 268,
- 138, 44, 74, 39, 236, 327, 76, 85, 333, 53,
- 38, 199, 231, 44, 74, 263, 71, 711, 231, 39,
- 135, 44, 39, 106, 140, 74, 74, 44, 39, 42,
- 71, 103, 76, 333, 71, 87, 207, 58, 55, 76,
- 42, 199, 71, 711, 231, 71, 71, 71, 44, 106,
- 76, 76, 108, 44, 135, 39, 333, 76, 103, 44,
- 76, 42, 295, 103, 711, 231, 71, 167, 44, 39,
- 106, 172, 76, 42, 74, 44, 39, 71, 76, 333,
- 53, 55, 44, 74, 263, 71, 711, 231, 71, 167,
- 44, 39, 42, 44, 42, 140, 74, 74, 44, 44,
- 42, 71, 103, 76, 333, 58, 39, 207, 44, 39,
- 199, 103, 135, 71, 39, 71, 71, 103, 391, 74,
- 44, 74, 106, 106, 44, 39, 42, 333, 111, 218,
- 55, 58, 106, 263, 103, 743, 327, 167, 39, 108,
- 138, 108, 140, 76, 71, 71, 76, 333, 239, 58,
- 74, 263, 103, 743, 327, 167, 44, 39, 42, 44,
- 170, 44, 74, 74, 76, 74, 39, 71, 76, 333,
- 71, 74, 263, 103, 1319, 39, 106, 140, 106, 106,
- 44, 39, 42, 71, 76, 333, 207, 58, 199, 74,
- 583, 775, 295, 39, 231, 44, 106, 108, 44, 266,
- 74, 53, 1543, 44, 71, 236, 55, 199, 38, 268,
- 53, 333, 85, 71, 39, 71, 39, 39, 135, 231,
- 103, 39, 39, 71, 135, 44, 71, 204, 76, 39,
- 167, 38, 204, 333, 135, 39, 122, 501, 58, 53,
- 122, 76, 218, 333, 335, 58, 44, 58, 44, 58,
- 44, 54, 50, 54, 50, 74, 263, 1159, 460, 42,
- 172, 53, 76, 167, 364, 1164, 282, 44, 218, 90,
- 181, 154, 85, 1383, 74, 140, 42, 204, 42, 76,
- 74, 76, 39, 333, 213, 199, 74, 76, 135, 108,
- 39, 106, 71, 234, 103, 140, 423, 44, 74, 76,
- 202, 44, 39, 42, 333, 106, 44, 90, 1225, 41,
- 41, 1383, 53, 38, 10631, 135, 231, 39, 135, 1319,
- 135, 1063, 135, 231, 39, 135, 487, 1831, 135, 2151,
- 108, 309, 655, 519, 346, 2727, 49, 19847, 85, 551,
- 61, 839, 54, 50, 2407, 117, 110, 423, 135, 108,
- 583, 108, 85, 583, 76, 423, 103, 76, 1671, 76,
- 42, 236, 266, 44, 74, 364, 117, 38, 117, 55,
- 39, 44, 333, 335, 213, 49, 149, 108, 61, 333,
- 1127, 38, 1671, 1319, 44, 39, 2247, 935, 108, 138,
- 76, 106, 74, 44, 202, 108, 58, 85, 333, 967,
- 167, 1415, 554, 231, 74, 333, 47, 1114, 743, 76,
- 106, 85, 1703, 42, 44, 42, 236, 44, 42, 44,
- 74, 268, 202, 332, 44, 333, 333, 245, 38, 213,
- 140, 42, 1511, 44, 42, 172, 42, 44, 170, 44,
- 74, 231, 333, 245, 346, 300, 314, 76, 42, 967,
- 42, 140, 74, 76, 42, 44, 74, 71, 333, 1415,
- 44, 42, 76, 106, 44, 42, 108, 74, 149, 1159,
- 266, 268, 74, 76, 181, 333, 103, 333, 967, 198,
- 85, 277, 108, 53, 428, 42, 236, 135, 44, 135,
- 74, 44, 71, 1413, 2022, 421, 38, 1093, 1190, 1260,
- 140, 4830, 261, 3166, 261, 265, 197, 201, 261, 265,
- 261, 265, 197, 201, 261, 41, 41, 41, 94, 229,
- 265, 453, 261, 264, 261, 264, 261, 264, 165, 69,
- 137, 40, 56, 37, 120, 101, 69, 137, 40, 120,
- 133, 69, 137, 120, 261, 169, 120, 101, 69, 137,
- 40, 88, 381, 162, 209, 85, 52, 51, 54, 84,
- 51, 54, 52, 277, 59, 60, 162, 61, 309, 52,
- 51, 149, 80, 117, 57, 54, 50, 373, 57, 53,
- 48, 341, 61, 162, 194, 47, 38, 207, 121, 54,
- 50, 38, 335, 121, 54, 50, 422, 855, 428, 139,
- 44, 107, 396, 90, 41, 154, 41, 90, 37, 105,
- 69, 105, 37, 58, 41, 90, 57, 169, 218, 41,
- 58, 41, 58, 41, 58, 137, 58, 37, 137, 37,
- 135, 37, 90, 69, 73, 185, 94, 101, 58, 57,
- 90, 37, 58, 527, 1134, 94, 142, 47, 185, 186,
- 89, 154, 57, 90, 57, 90, 57, 250, 57, 1018,
- 89, 90, 57, 58, 57, 1018, 8601, 282, 153, 666,
- 89, 250, 54, 50, 2618, 57, 986, 825, 1306, 217,
- 602, 1274, 378, 1935, 2522, 719, 5882, 57, 314, 57,
- 1754, 281, 3578, 57, 4634, 3322, 54, 50, 54, 50,
- 54, 50, 54, 50, 54, 50, 54, 50, 54, 50,
- 975, 1434, 185, 54, 50, 1017, 54, 50, 54, 50,
- 54, 50, 54, 50, 54, 50, 537, 8218, 4217, 54,
- 50, 54, 50, 54, 50, 54, 50, 54, 50, 54,
- 50, 54, 50, 54, 50, 54, 50, 54, 50, 54,
- 50, 2041, 54, 50, 54, 50, 1049, 54, 50, 8281,
- 1562, 697, 90, 217, 346, 1513, 1509, 126, 73, 69,
- 254, 105, 37, 94, 37, 94, 165, 70, 105, 37,
- 3166, 37, 218, 158, 108, 94, 149, 47, 85, 1221,
- 37, 37, 1799, 38, 53, 44, 743, 231, 231, 231,
- 231, 231, 231, 231, 231, 1036, 85, 52, 51, 52,
- 51, 117, 52, 51, 53, 52, 51, 309, 49, 85,
- 49, 53, 52, 51, 85, 52, 51, 54, 50, 54,
- 50, 54, 50, 54, 50, 181, 38, 341, 81, 858,
- 2874, 6874, 410, 61, 117, 58, 38, 39, 46, 54,
- 50, 54, 50, 54, 50, 54, 50, 54, 50, 90,
- 54, 50, 54, 50, 54, 50, 54, 50, 49, 54,
- 82, 58, 302, 140, 74, 49, 166, 90, 110, 38,
- 39, 53, 90, 2759, 76, 88, 70, 39, 49, 2887,
- 53, 102, 39, 1319, 3015, 90, 143, 346, 871, 1178,
- 519, 1018, 335, 986, 271, 58, 495, 1050, 335, 1274,
- 495, 2042, 8218, 39, 39, 2074, 39, 39, 679, 38,
- 36583, 1786, 1287, 198, 85, 8583, 38, 117, 519, 333,
- 71, 1502, 39, 44, 107, 53, 332, 53, 38, 798,
- 44, 2247, 334, 76, 213, 760, 294, 88, 478, 69,
- 2014, 38, 261, 190, 350, 38, 88, 158, 158, 382,
- 70, 37, 231, 44, 103, 44, 135, 44, 743, 74,
- 76, 42, 154, 207, 90, 55, 58, 1671, 149, 74,
- 1607, 522, 44, 85, 333, 588, 199, 117, 39, 333,
- 903, 268, 85, 743, 364, 74, 53, 935, 108, 42,
- 1511, 44, 74, 140, 74, 44, 138, 437, 38, 333,
- 85, 1319, 204, 74, 76, 74, 76, 103, 44, 263,
- 44, 42, 333, 149, 519, 38, 199, 122, 39, 42,
- 1543, 44, 39, 108, 71, 76, 167, 76, 39, 44,
- 39, 71, 38, 85, 359, 42, 76, 74, 85, 39,
- 70, 42, 44, 199, 199, 199, 231, 231, 1127, 74,
- 44, 74, 44, 74, 53, 42, 44, 333, 39, 39,
- 743, 1575, 36, 68, 68, 36, 63, 63, 11719, 3399,
- 229, 165, 39, 44, 327, 57, 423, 167, 39, 71,
- 71, 3463, 536, 11623, 54, 50, 2055, 1735, 391, 55,
- 58, 524, 245, 54, 50, 53, 236, 53, 81, 80,
- 54, 50, 54, 50, 54, 50, 54, 50, 54, 50,
- 54, 50, 54, 50, 54, 50, 85, 54, 50, 149,
- 112, 117, 149, 49, 54, 50, 54, 50, 54, 50,
- 117, 57, 49, 121, 53, 55, 85, 167, 4327, 34,
- 117, 55, 117, 54, 50, 53, 57, 53, 49, 85,
- 333, 85, 121, 85, 841, 54, 53, 50, 56, 48,
- 56, 837, 54, 57, 50, 57, 54, 50, 53, 54,
- 50, 85, 327, 38, 1447, 70, 999, 199, 199, 199,
- 103, 87, 57, 56, 58, 87, 58, 153, 90, 98,
- 90, 391, 839, 615, 71, 487, 455, 3943, 117, 1455,
- 314, 1710, 143, 570, 47, 410, 1466, 44, 935, 1575,
- 999, 143, 551, 46, 263, 46, 967, 53, 1159, 263,
- 53, 174, 1289, 1285, 2503, 333, 199, 39, 1415, 71,
- 39, 743, 53, 271, 711, 207, 53, 839, 53, 1799,
- 71, 39, 108, 76, 140, 135, 103, 871, 108, 44,
- 271, 309, 935, 79, 53, 1735, 245, 711, 271, 615,
- 271, 2343, 1007, 42, 44, 42, 1703, 492, 245, 655,
- 333, 76, 42, 1447, 106, 140, 74, 76, 85, 34,
- 149, 807, 333, 108, 1159, 172, 42, 268, 333, 149,
- 76, 42, 1543, 106, 300, 74, 135, 149, 333, 1383,
- 44, 42, 44, 74, 204, 42, 44, 333, 28135, 3182,
- 149, 34279, 18215, 2215, 39, 1482, 140, 422, 71, 7898,
- 1274, 1946, 74, 108, 122, 202, 258, 268, 90, 236,
- 986, 140, 1562, 2138, 108, 58, 2810, 591, 841, 837,
- 841, 229, 581, 841, 837, 41, 73, 41, 73, 137,
- 265, 133, 37, 229, 357, 841, 837, 73, 137, 265,
- 233, 837, 73, 137, 169, 41, 233, 837, 841, 837,
- 841, 837, 841, 837, 841, 837, 841, 837, 841, 901,
- 809, 57, 805, 57, 197, 809, 57, 805, 57, 197,
- 809, 57, 805, 57, 197, 809, 57, 805, 57, 197,
- 809, 57, 805, 57, 197, 94, 1613, 135, 871, 71,
- 39, 39, 327, 135, 39, 39, 39, 39, 39, 39,
- 103, 71, 39, 39, 39, 39, 39, 39, 71, 39,
- 135, 231, 135, 135, 39, 327, 551, 103, 167, 551,
- 89, 1434, 3226, 506, 474, 506, 506, 367, 1018, 1946,
- 1402, 954, 1402, 314, 90, 1082, 218, 2266, 666, 1210,
- 186, 570, 2042, 58, 5850, 154, 2010, 154, 794, 2266,
- 378, 2266, 3738, 39, 39, 39, 39, 39, 39, 17351,
- 34, 3074, 7692, 63, 63,
- };
-
-static int sqlite3Fts5UnicodeCategory(int iCode) {
- int iRes = -1;
- int iHi;
- int iLo;
- int ret;
- u16 iKey;
-
- if( iCode>=(1<<20) ){
- return 0;
- }
- iLo = aFts5UnicodeBlock[(iCode>>16)];
- iHi = aFts5UnicodeBlock[1+(iCode>>16)];
- iKey = (iCode & 0xFFFF);
- while( iHi>iLo ){
- int iTest = (iHi + iLo) / 2;
- assert( iTest>=iLo && iTest=aFts5UnicodeMap[iTest] ){
- iRes = iTest;
- iLo = iTest+1;
- }else{
- iHi = iTest;
- }
- }
-
- if( iRes<0 ) return 0;
- if( iKey>=(aFts5UnicodeMap[iRes]+(aFts5UnicodeData[iRes]>>5)) ) return 0;
- ret = aFts5UnicodeData[iRes] & 0x1F;
- if( ret!=30 ) return ret;
- return ((iKey - aFts5UnicodeMap[iRes]) & 0x01) ? 5 : 9;
-}
-
-static void sqlite3Fts5UnicodeAscii(u8 *aArray, u8 *aAscii){
- int i = 0;
- int iTbl = 0;
- while( i<128 ){
- int bToken = aArray[ aFts5UnicodeData[iTbl] & 0x1F ];
- int n = (aFts5UnicodeData[iTbl] >> 5) + i;
- for(; i<128 && iauth.nAuthPW = nPW;
rc = sqlite3UserAuthCheckLogin(db, "main", &authLevel);
db->auth.authLevel = authLevel;
- sqlite3ExpirePreparedStatements(db, 0);
+ sqlite3ExpirePreparedStatements(db);
if( rc ){
return rc; /* OOM error, I/O error, etc. */
}
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
index 05d11a8a6..4bf489af1 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
@@ -124,9 +124,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.25.2"
-#define SQLITE_VERSION_NUMBER 3025002
-#define SQLITE_SOURCE_ID "2018-09-25 19:08:10 fb90e7189ae6d62e77ba3a308ca5d683f90bbe633cf681865365b8e92792d1c7"
+#define SQLITE_VERSION "3.24.0"
+#define SQLITE_VERSION_NUMBER 3024000
+#define SQLITE_SOURCE_ID "2018-06-04 19:24:41 c7ee0833225bfd8c5ec2f9bf62b97c4e04d03bd9566366d5221ac8fb199a87ca"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -473,7 +473,6 @@ SQLITE_API int sqlite3_exec(
*/
#define SQLITE_ERROR_MISSING_COLLSEQ (SQLITE_ERROR | (1<<8))
#define SQLITE_ERROR_RETRY (SQLITE_ERROR | (2<<8))
-#define SQLITE_ERROR_SNAPSHOT (SQLITE_ERROR | (3<<8))
#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8))
#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8))
#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8))
@@ -513,7 +512,6 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8))
#define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8))
#define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8))
-#define SQLITE_CANTOPEN_DIRTYWAL (SQLITE_CANTOPEN | (5<<8)) /* Not Used */
#define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8))
#define SQLITE_CORRUPT_SEQUENCE (SQLITE_CORRUPT | (2<<8))
#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8))
@@ -889,8 +887,7 @@ struct sqlite3_io_methods {
**