Replace packr with go embed (#1751)

* Embed performer images
* Embed schema migrations
* Update dependencies
* Embed UI
* Remove remaining packr references
This commit is contained in:
WithoutPants
2021-09-22 13:08:34 +10:00
committed by GitHub
parent f292238e7f
commit 56111433a1
429 changed files with 39923 additions and 23061 deletions

View File

@@ -1,29 +0,0 @@
*.log
.DS_Store
doc
tmp
pkg
*.gem
*.pid
coverage
coverage.data
build/*
*.pbxuser
*.mode1v3
.svn
profile
.console_history
.sass-cache/*
.rake_tasks~
*.log.lck
solr/
.jhw-cache/
jhw.*
*.sublime*
node_modules/
dist/
generated/
.vendor/
bin/*
gin-bin
.idea/

View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2019 Mark Bates
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,61 +0,0 @@
TAGS ?= ""
GO_BIN ?= "go"
install:
$(GO_BIN) install -tags ${TAGS} -v .
make tidy
tidy:
ifeq ($(GO111MODULE),on)
$(GO_BIN) mod tidy
else
echo skipping go mod tidy
endif
deps:
$(GO_BIN) get -tags ${TAGS} -t ./...
make tidy
build:
$(GO_BIN) build -v .
make tidy
test:
$(GO_BIN) test -cover -tags ${TAGS} ./...
make tidy
ci-deps:
$(GO_BIN) get -tags ${TAGS} -t ./...
ci-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
lint:
go get github.com/golangci/golangci-lint/cmd/golangci-lint
golangci-lint run --enable-all
make tidy
update:
ifeq ($(GO111MODULE),on)
rm go.*
$(GO_BIN) mod init
$(GO_BIN) mod tidy
else
$(GO_BIN) get -u -tags ${TAGS}
endif
make test
make install
make tidy
release-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
make tidy
release:
$(GO_BIN) get github.com/gobuffalo/release
make tidy
release -y -f version.go --skip-packr
make tidy

View File

@@ -1,22 +0,0 @@
<p align="center"><img src="https://github.com/gobuffalo/buffalo/blob/master/logo.svg" width="360"></p>
<p align="center">
<a href="https://godoc.org/github.com/gobuffalo/logger"><img src="https://godoc.org/github.com/gobuffalo/logger?status.svg" alt="GoDoc" /></a>
<a href="https://travis-ci.org/gobuffalo/logger"><img src="https://travis-ci.org/gobuffalo/logger.svg?branch=master" alt="Build Status" /></a>
<a href="https://goreportcard.com/report/github.com/gobuffalo/logger"><img src="https://goreportcard.com/badge/github.com/gobuffalo/logger" alt="Go Report Card" /></a>
</p>
# Logger
The [`logger.Logger`](https://godoc.org/github.com/gobuffalo/logger#Logger) interface is used throughout Buffalo apps, and other systems, to log a whole manner of things.
## Installation
```bash
$ go get -u github.com/gobuffalo/logger
```
## Documentation
* [GoDoc](https://godoc.org/github.com/gobuffalo/logger)
* [General Buffalo Documentation](https://gobuffalo.io)

View File

@@ -1,18 +0,0 @@
# github.com/gobuffalo/logger Stands on the Shoulders of Giants
github.com/gobuffalo/logger does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants, this project would not be possible. Please make sure to check them out and thank them for all of their hard work.
Thank you to the following **GIANTS**:
* [github.com/gobuffalo/envy](https://godoc.org/github.com/gobuffalo/envy)
* [github.com/konsorten/go-windows-terminal-sequences](https://godoc.org/github.com/konsorten/go-windows-terminal-sequences)
* [github.com/rogpeppe/go-internal](https://godoc.org/github.com/rogpeppe/go-internal)
* [github.com/sirupsen/logrus](https://godoc.org/github.com/sirupsen/logrus)
* [golang.org/x/term](https://godoc.org/golang.org/x/term)
* [golang.org/x/sys](https://godoc.org/golang.org/x/sys)

View File

@@ -1,154 +0,0 @@
package logger
// I really don't want to have this, but until (if) https://github.com/sirupsen/logrus/pull/606 is merged we're stuck with all this code. And yes, this is ALL needed just to remove some blank space in the logs
import (
"bytes"
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/sirupsen/logrus"
)
const (
red = 31
yellow = 33
blue = 36
gray = 37
)
// textFormatter formats logs into text
type textFormatter struct {
ForceColors bool
isTerminal bool
sync.Once
}
func (f *textFormatter) init(entry *logrus.Entry) {
if entry.Logger != nil {
f.isTerminal = checkIfTerminal(entry.Logger.Out)
}
}
const defaultTimestampFormat = time.RFC3339
// Format renders a single log entry
func (f *textFormatter) Format(entry *logrus.Entry) ([]byte, error) {
prefixFieldClashes(entry.Data)
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
sort.Strings(keys)
var b *bytes.Buffer
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal)
if isColored {
f.printColored(b, entry, keys)
} else {
f.appendKeyValue(b, "level", entry.Level.String())
f.appendKeyValue(b, "time", entry.Time.Format(defaultTimestampFormat))
if entry.Message != "" {
f.appendKeyValue(b, "msg", entry.Message)
}
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func (f *textFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry, keys []string) {
var levelColor int
switch entry.Level {
case logrus.DebugLevel:
levelColor = gray
case logrus.WarnLevel:
levelColor = yellow
case logrus.ErrorLevel, logrus.FatalLevel, logrus.PanicLevel:
levelColor = red
default:
levelColor = blue
}
levelText := strings.ToUpper(entry.Level.String())[0:4]
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]", levelColor, levelText, entry.Time.Format(defaultTimestampFormat))
if entry.Message != "" {
fmt.Fprintf(b, " %s", entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
f.appendValue(b, v)
}
}
func (f *textFormatter) needsQuoting(text string) bool {
if len(text) == 0 {
return true
}
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
return true
}
}
return false
}
func (f *textFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
if b.Len() > 0 {
b.WriteByte(' ')
}
b.WriteString(key)
b.WriteByte('=')
f.appendValue(b, value)
}
func (f *textFormatter) appendValue(b *bytes.Buffer, value interface{}) {
stringVal, ok := value.(string)
if !ok {
stringVal = fmt.Sprint(value)
}
if !f.needsQuoting(stringVal) {
b.WriteString(stringVal)
} else {
b.WriteString(fmt.Sprintf("%q", stringVal))
}
}
func prefixFieldClashes(data logrus.Fields) {
if t, ok := data["time"]; ok {
data["fields.time"] = t
}
if m, ok := data["msg"]; ok {
data["fields.msg"] = m
}
if l, ok := data["level"]; ok {
data["fields.level"] = l
}
}

View File

@@ -1,25 +0,0 @@
package logger
import "github.com/sirupsen/logrus"
// Level of the logger
type Level = logrus.Level
const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel = logrus.PanicLevel
// FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel = logrus.FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
// Commonly used for hooks to send errors to an error tracking service.
ErrorLevel = logrus.ErrorLevel
// WarnLevel level. Non-critical entries that deserve eyes.
WarnLevel = logrus.WarnLevel
// InfoLevel level. General operational entries about what's going on inside the
// application.
InfoLevel = logrus.InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel = logrus.DebugLevel
)

View File

@@ -1,67 +0,0 @@
package logger
import (
"os"
"github.com/sirupsen/logrus"
)
// FieldLogger interface
type FieldLogger interface {
Logger
WithField(string, interface{}) FieldLogger
WithFields(map[string]interface{}) FieldLogger
}
// Logger interface is used throughout Buffalo
// apps to log a whole manner of things.
type Logger interface {
Debugf(string, ...interface{})
Infof(string, ...interface{})
Printf(string, ...interface{})
Warnf(string, ...interface{})
Errorf(string, ...interface{})
Fatalf(string, ...interface{})
Debug(...interface{})
Info(...interface{})
Warn(...interface{})
Error(...interface{})
Fatal(...interface{})
Panic(...interface{})
}
func ParseLevel(level string) (Level, error) {
l, err := logrus.ParseLevel(level)
return Level(l), err
}
// NewLogger based on the specified log level, defaults to "debug".
// See `New` for more details.
func NewLogger(level string) FieldLogger {
lvl, err := logrus.ParseLevel(level)
if err != nil {
lvl = logrus.DebugLevel
}
return New(lvl)
}
// New based on the specified log level, defaults to "debug".
// This logger will log to the STDOUT in a human readable,
// but parseable form.
/*
Example: time="2016-12-01T21:02:07-05:00" level=info duration=225.283µs human_size="106 B" method=GET path="/" render=199.79µs request_id=2265736089 size=106 status=200
*/
func New(lvl Level) FieldLogger {
e := os.Getenv("GO_ENV")
if len(e) == 0 {
e = "development"
}
dev := e == "development"
l := logrus.New()
l.SetOutput(os.Stdout)
l.Level = lvl
l.Formatter = &textFormatter{
ForceColors: dev,
}
return Logrus{l}
}

View File

@@ -1,34 +0,0 @@
package logger
import (
"io"
"github.com/sirupsen/logrus"
)
var _ Logger = Logrus{}
var _ FieldLogger = Logrus{}
var _ Outable = Logrus{}
// Logrus is a Logger implementation backed by sirupsen/logrus
type Logrus struct {
logrus.FieldLogger
}
// SetOutput will try and set the output of the underlying
// logrus.FieldLogger if it can
func (l Logrus) SetOutput(w io.Writer) {
if lg, ok := l.FieldLogger.(Outable); ok {
lg.SetOutput(w)
}
}
// WithField returns a new Logger with the field added
func (l Logrus) WithField(s string, i interface{}) FieldLogger {
return Logrus{l.FieldLogger.WithField(s, i)}
}
// WithFields returns a new Logger with the fields added
func (l Logrus) WithFields(m map[string]interface{}) FieldLogger {
return Logrus{l.FieldLogger.WithFields(m)}
}

View File

@@ -1,8 +0,0 @@
package logger
import "io"
// Outable interface for loggers that allow setting the output writer
type Outable interface {
SetOutput(out io.Writer)
}

View File

@@ -1,19 +0,0 @@
// +build !appengine
package logger
import (
"io"
"os"
"golang.org/x/term"
)
func checkIfTerminal(w io.Writer) bool {
switch v := w.(type) {
case *os.File:
return term.IsTerminal(int(v.Fd()))
default:
return false
}
}

View File

@@ -1,11 +0,0 @@
// +build appengine
package logger
import (
"io"
)
func checkIfTerminal(w io.Writer) bool {
return true
}

View File

@@ -1,4 +0,0 @@
package logger
// Version of the logger
const Version = "v1.0.1"

View File

@@ -1,29 +0,0 @@
*.log
.DS_Store
doc
tmp
pkg
*.gem
*.pid
coverage
coverage.data
build/*
*.pbxuser
*.mode1v3
.svn
profile
.console_history
.sass-cache/*
.rake_tasks~
*.log.lck
solr/
.jhw-cache/
jhw.*
*.sublime*
node_modules/
dist/
generated/
.vendor/
bin/*
gin-bin
.idea/

View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2019 Mark Bates
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,61 +0,0 @@
TAGS ?= ""
GO_BIN ?= "go"
install:
$(GO_BIN) install -tags ${TAGS} -v .
make tidy
tidy:
ifeq ($(GO111MODULE),on)
$(GO_BIN) mod tidy
else
echo skipping go mod tidy
endif
deps:
$(GO_BIN) get -tags ${TAGS} -t ./...
make tidy
build:
$(GO_BIN) build -v .
make tidy
test:
$(GO_BIN) test -cover -tags ${TAGS} ./...
make tidy
ci-deps:
$(GO_BIN) get -tags ${TAGS} -t ./...
ci-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
lint:
go get github.com/golangci/golangci-lint/cmd/golangci-lint
golangci-lint run --enable-all
make tidy
update:
ifeq ($(GO111MODULE),on)
rm go.*
$(GO_BIN) mod init
$(GO_BIN) mod tidy
else
$(GO_BIN) get -u -tags ${TAGS}
endif
make test
make install
make tidy
release-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
make tidy
release:
$(GO_BIN) get github.com/gobuffalo/release
make tidy
release -y -f version.go --skip-packr
make tidy

View File

@@ -1,24 +0,0 @@
<p align="center"><img src="https://github.com/gobuffalo/buffalo/blob/master/logo.svg" width="360"></p>
<p align="center">
<a href="https://godoc.org/github.com/gobuffalo/packd"><img src="https://godoc.org/github.com/gobuffalo/packd?status.svg" alt="GoDoc" /></a>
<a href="https://travis-ci.org/gobuffalo/packd"><img src="https://travis-ci.org/gobuffalo/packd.svg?branch=master" alt="Build Status" /></a>
<a href="https://goreportcard.com/report/github.com/gobuffalo/packd"><img src="https://goreportcard.com/badge/github.com/gobuffalo/packd" alt="Go Report Card" /></a>
</p>
# github.com/gobuffalo/packd
This is a collection of interfaces designed to make using [github.com/gobuffalo/packr](https://github.com/gobuffalo/packr) easier, and to make the transition between v1 and v2 as seamless as possible.
They can, and should, be used for testing, alternate Box implementations, etc...
## Installation
```bash
$ go get -u -v github.com/gobuffalo/packd
```
## Memory Box
The [`packd#MemoryBox`](https://godoc.org/github.com/gobuffalo/packd#MemoryBox) is a complete, thread-safe, implementation of [`packd#Box`](https://godoc.org/github.com/gobuffalo/packd#Box)

View File

@@ -1,10 +0,0 @@
# github.com/gobuffalo/packd Stands on the Shoulders of Giants
github.com/gobuffalo/packd does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants, this project would not be possible. Please make sure to check them out and thank them for all of their hard work.
Thank you to the following **GIANTS**:
* [github.com/davecgh/go-spew](https://godoc.org/github.com/davecgh/go-spew)
* [github.com/stretchr/testify](https://godoc.org/github.com/stretchr/testify)

View File

@@ -1,126 +0,0 @@
package packd
import (
"bytes"
"fmt"
"io"
"os"
"time"
)
var _ File = &virtualFile{}
var _ io.Reader = &virtualFile{}
var _ io.Writer = &virtualFile{}
var _ fmt.Stringer = &virtualFile{}
type virtualFile struct {
io.Reader
name string
info fileInfo
original []byte
}
func (f virtualFile) Name() string {
return f.name
}
func (f *virtualFile) Seek(offset int64, whence int) (int64, error) {
return f.Reader.(*bytes.Reader).Seek(offset, whence)
}
func (f virtualFile) FileInfo() (os.FileInfo, error) {
return f.info, nil
}
func (f *virtualFile) Close() error {
return nil
}
func (f virtualFile) Readdir(count int) ([]os.FileInfo, error) {
return []os.FileInfo{f.info}, nil
}
func (f virtualFile) Stat() (os.FileInfo, error) {
return f.info, nil
}
func (f virtualFile) String() string {
return string(f.original)
}
// Read reads the next len(p) bytes from the virtualFile and
// rewind read offset to 0 when it met EOF.
func (f *virtualFile) Read(p []byte) (int, error) {
i, err := f.Reader.Read(p)
if i == 0 || err == io.EOF {
f.Seek(0, io.SeekStart)
}
return i, err
}
// Write copies byte slice p to content of virtualFile.
func (f *virtualFile) Write(p []byte) (int, error) {
return f.write(p)
}
// write copies byte slice or data from io.Reader to content of the
// virtualFile and update related information of the virtualFile.
func (f *virtualFile) write(d interface{}) (c int, err error) {
bb := &bytes.Buffer{}
switch d.(type) {
case []byte:
c, err = bb.Write(d.([]byte))
case io.Reader:
if d != nil {
i64, e := io.Copy(bb, d.(io.Reader))
c = int(i64)
err = e
}
default:
err = fmt.Errorf("unknown type of argument")
}
if err != nil {
return c, err
}
f.info.size = int64(c)
f.info.modTime = time.Now()
f.original = bb.Bytes()
f.Reader = bytes.NewReader(f.original)
return c, nil
}
// NewFile returns a new "virtual" file
func NewFile(name string, r io.Reader) (File, error) {
return buildFile(name, r)
}
// NewDir returns a new "virtual" directory
func NewDir(name string) (File, error) {
v, err := buildFile(name, nil)
if err != nil {
return v, err
}
v.info.isDir = true
return v, nil
}
func buildFile(name string, r io.Reader) (*virtualFile, error) {
vf := &virtualFile{
name: name,
info: fileInfo{
Path: name,
modTime: time.Now(),
},
}
var err error
if r != nil {
_, err = vf.write(r)
} else {
_, err = vf.write([]byte{}) // for safety
}
return vf, err
}

View File

@@ -1,39 +0,0 @@
package packd
import (
"os"
"time"
)
var _ os.FileInfo = fileInfo{}
type fileInfo struct {
Path string
size int64
modTime time.Time
isDir bool
}
func (f fileInfo) Name() string {
return f.Path
}
func (f fileInfo) Size() int64 {
return f.size
}
func (f fileInfo) Mode() os.FileMode {
return 0444
}
func (f fileInfo) ModTime() time.Time {
return f.modTime
}
func (f fileInfo) IsDir() bool {
return f.isDir
}
func (f fileInfo) Sys() interface{} {
return nil
}

View File

@@ -1,83 +0,0 @@
package packd
import (
"fmt"
"io"
"net/http"
"os"
)
type WalkFunc func(string, File) error
// Box represents the entirety of the necessary
// interfaces to form a "full" box.
// github.com/gobuffalo/packr#Box is an example of this interface.
type Box interface {
HTTPBox
Lister
Addable
Finder
Walkable
Haser
}
type Haser interface {
Has(string) bool
}
type Walker interface {
Walk(wf WalkFunc) error
}
type Walkable interface {
Walker
WalkPrefix(prefix string, wf WalkFunc) error
}
type Finder interface {
Find(string) ([]byte, error)
FindString(name string) (string, error)
}
type HTTPBox interface {
Open(name string) (http.File, error)
}
type Lister interface {
List() []string
}
type Addable interface {
AddString(path string, t string) error
AddBytes(path string, t []byte) error
}
type SimpleFile interface {
fmt.Stringer
io.Reader
io.Writer
Name() string
}
type HTTPFile interface {
SimpleFile
io.Closer
io.Seeker
Readdir(count int) ([]os.FileInfo, error)
Stat() (os.FileInfo, error)
}
type File interface {
HTTPFile
FileInfo() (os.FileInfo, error)
}
// LegacyBox represents deprecated methods
// that older Box implementations might have had.
// github.com/gobuffalo/packr v1 is an example of a LegacyBox.
type LegacyBox interface {
String(name string) string
MustString(name string) (string, error)
Bytes(name string) []byte
MustBytes(name string) ([]byte, error)
}

View File

@@ -1,29 +0,0 @@
*.log
.DS_Store
doc
tmp
pkg
*.gem
*.pid
coverage
coverage.data
build/*
*.pbxuser
*.mode1v3
.svn
profile
.console_history
.sass-cache/*
.rake_tasks~
*.log.lck
solr/
.jhw-cache/
jhw.*
*.sublime*
node_modules/
dist/
generated/
.vendor/
bin/*
gin-bin
.idea/

View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2019 Mark Bates
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,61 +0,0 @@
TAGS ?= ""
GO_BIN ?= "go"
install:
$(GO_BIN) install -tags ${TAGS} -v .
make tidy
tidy:
ifeq ($(GO111MODULE),on)
$(GO_BIN) mod tidy
else
echo skipping go mod tidy
endif
deps:
$(GO_BIN) get -tags ${TAGS} -t ./...
make tidy
build:
$(GO_BIN) build -v .
make tidy
test:
$(GO_BIN) test -cover -tags ${TAGS} ./...
make tidy
ci-deps:
$(GO_BIN) get -tags ${TAGS} -t ./...
ci-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
lint:
go get github.com/golangci/golangci-lint/cmd/golangci-lint
golangci-lint run --enable-all
make tidy
update:
ifeq ($(GO111MODULE),on)
rm go.*
$(GO_BIN) mod init
$(GO_BIN) mod tidy
else
$(GO_BIN) get -u -tags ${TAGS}
endif
make test
make install
make tidy
release-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
make tidy
release:
$(GO_BIN) get github.com/gobuffalo/release
make tidy
release -y -f version.go --skip-packr
make tidy

View File

@@ -1,6 +0,0 @@
# github.com/markbates/errx Stands on the Shoulders of Giants
github.com/markbates/errx does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants, this project would not be possible. Please make sure to check them out and thank them for all of their hard work.
Thank you to the following **GIANTS**:

View File

@@ -1,71 +0,0 @@
variables:
GOBIN: "$(GOPATH)/bin" # Go binaries path
GOPATH: "$(system.defaultWorkingDirectory)/gopath" # Go workspace path
modulePath: "$(GOPATH)/src/github.com/$(build.repository.name)" # Path to the module"s code
jobs:
- job: Windows
pool:
vmImage: "vs2017-win2016"
strategy:
matrix:
go 1.10:
go_version: "1.10"
go 1.11 (on):
go_version: "1.11.5"
GO111MODULE: "on"
go 1.11 (off):
go_version: "1.11.5"
GO111MODULE: "off"
go 1.12 (on):
go_version: "1.12"
GO111MODULE: "on"
go 1.12 (off):
go_version: "1.12"
GO111MODULE: "off"
steps:
- template: azure-tests.yml
- job: macOS
pool:
vmImage: "macOS-10.13"
strategy:
matrix:
go 1.10:
go_version: "1.10"
go 1.11 (on):
go_version: "1.11.5"
GO111MODULE: "on"
go 1.11 (off):
go_version: "1.11.5"
GO111MODULE: "off"
go 1.12 (on):
go_version: "1.12"
GO111MODULE: "on"
go 1.12 (off):
go_version: "1.12"
GO111MODULE: "off"
steps:
- template: azure-tests.yml
- job: Linux
pool:
vmImage: "ubuntu-16.04"
strategy:
matrix:
go 1.10:
go_version: "1.10"
go 1.11 (on):
go_version: "1.11.5"
GO111MODULE: "on"
go 1.11 (off):
go_version: "1.11.5"
GO111MODULE: "off"
go 1.12 (on):
go_version: "1.12"
GO111MODULE: "on"
go 1.12 (off):
go_version: "1.12"
GO111MODULE: "off"
steps:
- template: azure-tests.yml

View File

@@ -1,19 +0,0 @@
steps:
- task: GoTool@0
inputs:
version: $(go_version)
- task: Bash@3
inputs:
targetType: inline
script: |
mkdir -p "$(GOBIN)"
mkdir -p "$(GOPATH)/pkg"
mkdir -p "$(modulePath)"
shopt -s extglob
mv !(gopath) "$(modulePath)"
displayName: "Setup Go Workspace"
- script: |
go get -t -v ./...
go test -race ./...
workingDirectory: "$(modulePath)"
displayName: "Tests"

View File

@@ -1,23 +0,0 @@
package errx
// go2 errors
type Wrapper interface {
Unwrap() error
}
// pkg/errors
type Causer interface {
Cause() error
}
func Unwrap(err error) error {
switch e := err.(type) {
case Wrapper:
return e.Unwrap()
case Causer:
return e.Cause()
}
return err
}
var Cause = Unwrap

View File

@@ -1,4 +0,0 @@
package errx
// Version of errx
const Version = "v1.0.0"

View File

@@ -1,70 +0,0 @@
package packd
import (
"sort"
"sync"
)
// ByteMap wraps sync.Map and uses the following types:
// key: string
// value: []byte
type ByteMap struct {
data sync.Map
}
// Delete the key from the map
func (m *ByteMap) Delete(key string) {
m.data.Delete(key)
}
// Load the key from the map.
// Returns []byte or bool.
// A false return indicates either the key was not found
// or the value is not of type []byte
func (m *ByteMap) Load(key string) ([]byte, bool) {
i, ok := m.data.Load(key)
if !ok {
return []byte(``), false
}
s, ok := i.([]byte)
return s, ok
}
// LoadOrStore will return an existing key or
// store the value if not already in the map
func (m *ByteMap) LoadOrStore(key string, value []byte) ([]byte, bool) {
i, _ := m.data.LoadOrStore(key, value)
s, ok := i.([]byte)
return s, ok
}
// Range over the []byte values in the map
func (m *ByteMap) Range(f func(key string, value []byte) bool) {
m.data.Range(func(k, v interface{}) bool {
key, ok := k.(string)
if !ok {
return false
}
value, ok := v.([]byte)
if !ok {
return false
}
return f(key, value)
})
}
// Store a []byte in the map
func (m *ByteMap) Store(key string, value []byte) {
m.data.Store(key, value)
}
// Keys returns a list of keys in the map
func (m *ByteMap) Keys() []string {
var keys []string
m.Range(func(key string, value []byte) bool {
keys = append(keys, key)
return true
})
sort.Strings(keys)
return keys
}

View File

@@ -1,156 +0,0 @@
package packd
import (
"bytes"
"net/http"
"os"
"path/filepath"
"sort"
"strings"
"github.com/gobuffalo/packd/internal/takeon/github.com/markbates/errx"
)
var _ Addable = NewMemoryBox()
var _ Finder = NewMemoryBox()
var _ Lister = NewMemoryBox()
var _ HTTPBox = NewMemoryBox()
var _ Haser = NewMemoryBox()
var _ Walkable = NewMemoryBox()
var _ Box = NewMemoryBox()
// MemoryBox is a thread-safe, in-memory, implementation of the Box interface.
type MemoryBox struct {
files *ByteMap
}
func (m *MemoryBox) Has(path string) bool {
_, ok := m.files.Load(path)
return ok
}
func (m *MemoryBox) List() []string {
var names []string
m.files.Range(func(key string, value []byte) bool {
names = append(names, key)
return true
})
sort.Strings(names)
return names
}
func (m *MemoryBox) Open(path string) (http.File, error) {
cpath := strings.TrimPrefix(path, "/")
if filepath.Ext(cpath) == "" {
// it's a directory
return NewDir(path)
}
if len(cpath) == 0 {
cpath = "index.html"
}
b, err := m.Find(cpath)
if err != nil {
return nil, err
}
cpath = filepath.FromSlash(cpath)
f, err := NewFile(cpath, bytes.NewReader(b))
if err != nil {
return nil, err
}
return f, nil
}
func (m *MemoryBox) FindString(path string) (string, error) {
bb, err := m.Find(path)
return string(bb), err
}
func (m *MemoryBox) Find(path string) (ret []byte, e error) {
res, ok := m.files.Load(path)
if !ok {
var b []byte
lpath := strings.ToLower(path)
err := m.Walk(func(p string, file File) error {
lp := strings.ToLower(p)
if lp != lpath {
return nil
}
res := file.String()
b = []byte(res)
return nil
})
if err != nil {
return b, os.ErrNotExist
}
if len(b) == 0 {
return b, os.ErrNotExist
}
return b, nil
}
return res, nil
}
func (m *MemoryBox) AddString(path string, t string) error {
return m.AddBytes(path, []byte(t))
}
func (m *MemoryBox) AddBytes(path string, t []byte) error {
m.files.Store(path, t)
return nil
}
func (m *MemoryBox) Walk(wf WalkFunc) error {
var err error
m.files.Range(func(path string, b []byte) bool {
var f File
f, err = NewFile(path, bytes.NewReader(b))
if err != nil {
return false
}
err = wf(path, f)
if err != nil {
if errx.Unwrap(err) == filepath.SkipDir {
err = nil
return true
}
return false
}
return true
})
if errx.Unwrap(err) == filepath.SkipDir {
return nil
}
return err
}
func (m *MemoryBox) WalkPrefix(pre string, wf WalkFunc) error {
return m.Walk(func(path string, file File) error {
if strings.HasPrefix(path, pre) {
return wf(path, file)
}
return nil
})
}
func (m *MemoryBox) Remove(path string) {
m.files.Delete(path)
m.files.Delete(strings.ToLower(path))
}
// NewMemoryBox returns a configured *MemoryBox
func NewMemoryBox() *MemoryBox {
return &MemoryBox{
files: &ByteMap{},
}
}

View File

@@ -1,43 +0,0 @@
package packd
import (
"path/filepath"
"strings"
)
var CommonSkipPrefixes = []string{".", "_", "node_modules", "vendor"}
// SkipWalker will walk the Walker and call the WalkFunc for files who's directories
// do no match any of the skipPrefixes. If no skipPrefixes are passed, then
// CommonSkipPrefixes is used
func SkipWalker(walker Walker, skipPrefixes []string, wf WalkFunc) error {
if len(skipPrefixes) == 0 {
skipPrefixes = append(skipPrefixes, CommonSkipPrefixes...)
}
return walker.Walk(func(path string, file File) error {
fi, err := file.FileInfo()
if err != nil {
return err
}
path = strings.Replace(path, "\\", "/", -1)
parts := strings.Split(path, "/")
if !fi.IsDir() {
parts = parts[:len(parts)-1]
}
for _, base := range parts {
if base != "." {
for _, skip := range skipPrefixes {
skip = strings.ToLower(skip)
lbase := strings.ToLower(base)
if strings.HasPrefix(lbase, skip) {
return filepath.SkipDir
}
}
}
}
return wf(path, file)
})
}

View File

@@ -1,4 +0,0 @@
package packd
// Version of packd
const Version = "v0.3.0"

View File

@@ -1,3 +0,0 @@
{
"Enable": ["vet", "golint", "goimports", "deadcode", "gotype", "ineffassign", "misspell", "nakedret", "unconvert", "megacheck", "varcheck"]
}

View File

@@ -1,42 +0,0 @@
# Code generated by github.com/gobuffalo/release. DO NOT EDIT.
# Edit .goreleaser.yml.plush instead
builds:
-
goos:
- darwin
- linux
- windows
goarch:
- ppc64le
- 386
- amd64
env:
- CGO_ENABLED=0
ignore:
- goos: darwin
goarch: ppc64le
- goos: windows
goarch: ppc64le
main: ./packr2/main.go
binary: packr2
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'
brews:
-
github:
owner: gobuffalo
name: homebrew-tap

View File

@@ -1,39 +0,0 @@
builds:
-
goos:
- darwin
- linux
- windows
goarch:
- ppc64le
- 386
- amd64
env:
- CGO_ENABLED=0
ignore:
- goos: darwin
goarch: ppc64le
- goos: windows
goarch: ppc64le
main: ./packr2/main.go
binary: packr2
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'
<%= if (brew) { %>
brews:
-
github:
owner: gobuffalo
name: homebrew-tap
<% } %>

View File

@@ -1,8 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016 Mark Bates
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,46 +0,0 @@
TAGS ?= "sqlite"
GO_BIN ?= go
install: deps
echo "installing packr v2"
packr2
$(GO_BIN) install -v ./packr2
tidy:
ifeq ($(GO111MODULE),on)
$(GO_BIN) mod tidy
else
echo skipping go mod tidy
endif
deps:
$(GO_BIN) get -tags ${TAGS} -t ./...
$(GO_BIN) install -v ./packr2
make tidy
build: deps
packr2
$(GO_BIN) build -v ./packr2
make tidy
test:
packr2
$(GO_BIN) test -tags ${TAGS} ./...
make tidy
lint:
gometalinter --vendor ./... --deadline=1m --skip=internal
update:
$(GO_BIN) get -u -tags ${TAGS} ./...
make tidy
make install
make test
make tidy
release-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
release:
release -y -f version.go
make tidy

View File

@@ -1,239 +0,0 @@
**NOTICE: Please consider migrating your projects to github.com/markbates/pkger. It has an idiomatic API, minimal dependencies, a stronger test suite (tested directly against the std lib counterparts), transparent tooling, and more.**
https://blog.gobuffalo.io/introducing-pkger-static-file-embedding-in-go-1ce76dc79c65
# Packr (v2)
[![GoDoc](https://godoc.org/github.com/gobuffalo/packr/v2?status.svg)](https://godoc.org/github.com/gobuffalo/packr/v2)
Packr is a simple solution for bundling static assets inside of Go binaries. Most importantly it does it in a way that is friendly to developers while they are developing.
## Intro Video
To get an idea of the what and why of Packr, please enjoy this short video: [https://vimeo.com/219863271](https://vimeo.com/219863271).
## Library Installation
```text
$ go get -u github.com/gobuffalo/packr/v2/...
```
## Binary Installation
```text
$ go get -u github.com/gobuffalo/packr/v2/packr2
```
## New File Format FAQs
In version `v2.0.0` the file format changed and is not backward compatible with the `packr-v1.x` library.
#### Can `packr-v1.x` read the new format?
No, it can not. Because of the way the new file format works porting it to `packr-v1.x` would be difficult. PRs are welcome though. :)
#### Can `packr-v2.x` read `packr-v1.x` files?
Yes it can, but that ability will eventually be phased out. Because of that we recommend moving to the new format.
#### Can `packr-v2.x` generate `packr-v1.x` files?
Yes it can, but that ability will eventually be phased out. Because of that we recommend moving to the new format.
The `--legacy` command is available on all commands that generate `-packr.go` files.
```bash
$ packr2 --legacy
```
## Usage
### In Code
The first step in using Packr is to create a new box. A box represents a folder on disk. Once you have a box you can get `string` or `[]byte` representations of the file.
```go
// set up a new box by giving it a name and an optional (relative) path to a folder on disk:
box := packr.New("My Box", "./templates")
// Get the string representation of a file, or an error if it doesn't exist:
html, err := box.FindString("index.html")
// Get the []byte representation of a file, or an error if it doesn't exist:
html, err := box.Find("index.html")
```
### What is a Box?
A box represents a folder, and any sub-folders, on disk that you want to have access to in your binary. When compiling a binary using the `packr2` CLI the contents of the folder will be converted into Go files that can be compiled inside of a "standard" go binary. Inside of the compiled binary the files will be read from memory. When working locally the files will be read directly off of disk. This is a seamless switch that doesn't require any special attention on your part.
#### Example
Assume the follow directory structure:
```
├── main.go
└── templates
├── admin
│   └── index.html
└── index.html
```
The following program will read the `./templates/admin/index.html` file and print it out.
```go
package main
import (
"fmt"
"github.com/gobuffalo/packr/v2"
)
func main() {
box := packr.New("myBox", "./templates")
s, err := box.FindString("admin/index.html")
if err != nil {
log.Fatal(err)
}
fmt.Println(s)
}
```
### Development Made Easy
In order to get static files into a Go binary, those files must first be converted to Go code. To do that, Packr, ships with a few tools to help build binaries. See below.
During development, however, it is painful to have to keep running a tool to compile those files.
Packr uses the following resolution rules when looking for a file:
1. Look for the file in-memory (inside a Go binary)
1. Look for the file on disk (during development)
Because Packr knows how to fall through to the file system, developers don't need to worry about constantly compiling their static files into a binary. They can work unimpeded.
Packr takes file resolution a step further. When declaring a new box you use a relative path, `./templates`. When Packr receives this call it calculates out the absolute path to that directory. By doing this it means you can be guaranteed that Packr can find your files correctly, even if you're not running in the directory that the box was created in. This helps with the problem of testing, where Go changes the `pwd` for each package, making relative paths difficult to work with. This is not a problem when using Packr.
---
## Usage with HTTP
A box implements the [`http.FileSystem`](https://golang.org/pkg/net/http/#FileSystem) interface, meaning it can be used to serve static files.
```go
package main
import (
"net/http"
"github.com/gobuffalo/packr/v2"
)
func main() {
box := packr.New("someBoxName", "./templates")
http.Handle("/", http.FileServer(box))
http.ListenAndServe(":3000", nil)
}
```
---
## Building a Binary
Before you build your Go binary, run the `packr2` command first. It will look for all the boxes in your code and then generate `.go` files that pack the static files into bytes that can be bundled into the Go binary.
```
$ packr2
```
Then run your `go build command` like normal.
*NOTE*: It is not recommended to check-in these generated `-packr.go` files. They can be large, and can easily become out of date if not careful. It is recommended that you always run `packr2 clean` after running the `packr2` tool.
#### Cleaning Up
When you're done it is recommended that you run the `packr2 clean` command. This will remove all of the generated files that Packr created for you.
```
$ packr2 clean
```
Why do you want to do this? Packr first looks to the information stored in these generated files, if the information isn't there it looks to disk. This makes it easy to work with in development.
---
## Debugging
The `packr2` command passes all arguments down to the underlying `go` command, this includes the `-v` flag to print out `go build` information. Packr looks for the `-v` flag, and will turn on its own verbose logging. This is very useful for trying to understand what the `packr` command is doing when it is run.
---
## FAQ
### Compilation Errors with Go Templates
Q: I have a program with Go template files, those files are named `foo.go` and look like the following:
```
// Copyright {{.Year}} {{.Author}}. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package {{.Project}}
```
When I run `packr2` I get errors like:
```
expected 'IDENT', found '{'
```
A: Packr works by searching your `.go` files for [`github.com/gobuffalo/packr/v2#New`](https://godoc.org/github.com/gobuffalo/packr/v2#New) or [`github.com/gobuffalo/packr/v2#NewBox`](https://godoc.org/github.com/gobuffalo/packr/v2#NewBox) calls. Because those files aren't "proper" Go files, Packr can't parse them to find the box declarations. To fix this you need to tell Packr to ignore those files when searching for boxes. A couple solutions to this problem are:
* Name the files something else. The `.tmpl` extension is the idiomatic way of naming these types of files.
* Rename the folder containing these files to start with an `_`, for example `_templates`. Packr, like Go, will ignore folders starting with the `_` character when searching for boxes.
### Dynamic Box Paths
Q: I need to set the path of a box using a variable, but `packr.New("foo", myVar)` doesn't work correctly.
A: Packr attempts to "automagically" set it's resolution directory when using [`github.com/gobuffalo/packr/v2#New`](https://godoc.org/github.com/gobuffalo/packr/v2#New), however, for dynamic paths you need to set it manually:
```go
box := packr.New("foo", "|")
box.ResolutionDir = myVar
```
### I don't want to pack files, but still use the Packr interface.
Q: I want to write code that using the Packr tools, but doesn't actually pack the files into my binary. How can I do that?
A: Using [`github.com/gobuffalo/packr/v2#Folder`](https://godoc.org/github.com/gobuffalo/packr/v2#Folder) gives you back a `*packr.Box` that can be used as normal, but is excluded by the Packr tool when compiling.
### Packr Finds No Boxes
Q: I run `packr2 -v` but it doesn't find my boxes:
```
DEBU[2019-03-18T18:48:52+01:00] *parser.Parser#NewFromRoots found prospects=0
DEBU[2019-03-18T18:48:52+01:00] found 0 boxes
```
A: Packr works by parsing `.go` files to find [`github.com/gobuffalo/packr/v2#Box`](https://godoc.org/github.com/gobuffalo/packr/v2#Box) and [`github.com/gobuffalo/packr/v2#NewBox`](https://godoc.org/github.com/gobuffalo/packr/v2#NewBox) declarations. If there aren't any `.go` in the folder that `packr2` is run in it can not find those declarations. To fix this problem run the `packr2` command in the directory containing your `.go` files.
### Box Interfaces
Q: I want to be able to easily test my applications by passing in mock boxes. How do I do that?
A: Packr boxes and files conform to the interfaces found at [`github.com/gobuffalo/packd`](https://godoc.org/github.com/gobuffalo/packd). Change your application to use those interfaces instead of the concrete Packr types.
```go
// using concrete type
func myFunc(box *packr.Box) {}
// using interfaces
func myFunc(box packd.Box) {}
```

View File

@@ -1,32 +0,0 @@
# /Users/smichalak/dev/packr/v2 Stands on the Shoulders of Giants
/Users/smichalak/dev/packr/v2 does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants, this project would not be possible. Please make sure to check them out and thank them for all of their hard work.
Thank you to the following **GIANTS**:
* [github.com/gobuffalo/logger](https://godoc.org/github.com/gobuffalo/logger)
* [github.com/gobuffalo/packd](https://godoc.org/github.com/gobuffalo/packd)
* [github.com/karrick/godirwalk](https://godoc.org/github.com/karrick/godirwalk)
* [github.com/konsorten/go-windows-terminal-sequences](https://godoc.org/github.com/konsorten/go-windows-terminal-sequences)
* [github.com/markbates/errx](https://godoc.org/github.com/markbates/errx)
* [github.com/markbates/oncer](https://godoc.org/github.com/markbates/oncer)
* [github.com/markbates/safe](https://godoc.org/github.com/markbates/safe)
* [github.com/rogpeppe/go-internal](https://godoc.org/github.com/rogpeppe/go-internal)
* [github.com/sirupsen/logrus](https://godoc.org/github.com/sirupsen/logrus)
* [github.com/spf13/cobra](https://godoc.org/github.com/spf13/cobra)
* [github.com/stretchr/testify](https://godoc.org/github.com/stretchr/testify)
* [golang.org/x/sync](https://godoc.org/golang.org/x/sync)
* [golang.org/x/tools](https://godoc.org/golang.org/x/tools)

View File

@@ -1,240 +0,0 @@
package packr
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"sort"
"strings"
"github.com/gobuffalo/packd"
"github.com/gobuffalo/packr/v2/file"
"github.com/gobuffalo/packr/v2/file/resolver"
"github.com/gobuffalo/packr/v2/plog"
"github.com/markbates/oncer"
)
var _ packd.Box = &Box{}
var _ packd.HTTPBox = &Box{}
var _ packd.Addable = &Box{}
var _ packd.Walkable = &Box{}
var _ packd.Finder = &Box{}
// Box represent a folder on a disk you want to
// have access to in the built Go binary.
type Box struct {
Path string `json:"path"`
Name string `json:"name"`
ResolutionDir string `json:"resolution_dir"`
DefaultResolver resolver.Resolver `json:"default_resolver"`
resolvers resolversMap
dirs dirsMap
}
// NewBox returns a Box that can be used to
// retrieve files from either disk or the embedded
// binary.
// Deprecated: Use New instead.
func NewBox(path string) *Box {
oncer.Deprecate(0, "packr.NewBox", "Use packr.New instead.")
return New(path, path)
}
// New returns a new Box with the name of the box
// and the path of the box.
func New(name string, path string) *Box {
plog.Debug("packr", "New", "name", name, "path", path)
b, _ := findBox(name)
if b != nil {
return b
}
b = construct(name, path)
plog.Debug(b, "New", "Box", b, "ResolutionDir", b.ResolutionDir)
b, err := placeBox(b)
if err != nil {
panic(err)
}
return b
}
// Folder returns a Box that will NOT be packed.
// This is useful for writing tests or tools that
// need to work with a folder at runtime.
func Folder(path string) *Box {
return New(path, path)
}
// SetResolver allows for the use of a custom resolver for
// the specified file
func (b *Box) SetResolver(file string, res resolver.Resolver) {
d := filepath.Dir(file)
b.dirs.Store(d, true)
plog.Debug(b, "SetResolver", "file", file, "resolver", fmt.Sprintf("%T", res))
b.resolvers.Store(resolver.Key(file), res)
}
// AddString converts t to a byteslice and delegates to AddBytes to add to b.data
func (b *Box) AddString(path string, t string) error {
return b.AddBytes(path, []byte(t))
}
// AddBytes sets t in b.data by the given path
func (b *Box) AddBytes(path string, t []byte) error {
m := map[string]file.File{}
f, err := file.NewFile(path, t)
if err != nil {
return err
}
m[resolver.Key(path)] = f
res := resolver.NewInMemory(m)
b.SetResolver(path, res)
return nil
}
// FindString returns either the string of the requested
// file or an error if it can not be found.
func (b *Box) FindString(name string) (string, error) {
bb, err := b.Find(name)
return string(bb), err
}
// Find returns either the byte slice of the requested
// file or an error if it can not be found.
func (b *Box) Find(name string) ([]byte, error) {
f, err := b.Resolve(name)
if err != nil {
return []byte(""), err
}
bb := &bytes.Buffer{}
io.Copy(bb, f)
return bb.Bytes(), nil
}
// Has returns true if the resource exists in the box
func (b *Box) Has(name string) bool {
_, err := b.Find(name)
return err == nil
}
// HasDir returns true if the directory exists in the box
func (b *Box) HasDir(name string) bool {
oncer.Do("packr2/box/HasDir"+b.Name, func() {
for _, f := range b.List() {
for d := filepath.Dir(f); d != "."; d = filepath.Dir(d) {
b.dirs.Store(d, true)
}
}
})
if name == "/" {
return b.Has("index.html")
}
_, ok := b.dirs.Load(name)
return ok
}
// Open returns a File using the http.File interface
func (b *Box) Open(name string) (http.File, error) {
plog.Debug(b, "Open", "name", name)
f, err := b.Resolve(name)
if err != nil {
if len(filepath.Ext(name)) == 0 {
return b.openWoExt(name)
}
return f, err
}
f, err = file.NewFileR(name, f)
plog.Debug(b, "Open", "name", f.Name(), "file", f.Name())
return f, err
}
func (b *Box) openWoExt(name string) (http.File, error) {
if !b.HasDir(name) {
id := path.Join(name, "index.html")
if b.Has(id) {
return b.Open(id)
}
return nil, os.ErrNotExist
}
d, err := file.NewDir(name)
plog.Debug(b, "Open", "name", name, "dir", d)
return d, err
}
// List shows "What's in the box?"
func (b *Box) List() []string {
var keys []string
b.Walk(func(path string, info File) error {
if info == nil {
return nil
}
finfo, _ := info.FileInfo()
if !finfo.IsDir() {
keys = append(keys, path)
}
return nil
})
sort.Strings(keys)
return keys
}
// Resolve will attempt to find the file in the box,
// returning an error if the find can not be found.
func (b *Box) Resolve(key string) (file.File, error) {
key = strings.TrimPrefix(key, "/")
var r resolver.Resolver
b.resolvers.Range(func(k string, vr resolver.Resolver) bool {
lk := strings.ToLower(resolver.Key(k))
lkey := strings.ToLower(resolver.Key(key))
if lk == lkey {
r = vr
return false
}
return true
})
if r == nil {
r = b.DefaultResolver
if r == nil {
r = resolver.DefaultResolver
if r == nil {
return nil, fmt.Errorf("resolver.DefaultResolver is nil")
}
}
}
plog.Debug(r, "Resolve", "box", b.Name, "key", key)
f, err := r.Resolve(b.Name, key)
if err != nil {
z, err := resolver.ResolvePathInBase(resolver.OsPath(b.ResolutionDir), filepath.FromSlash(path.Clean("/"+resolver.OsPath(key))))
if err != nil {
plog.Debug(r, "Resolve", "box", b.Name, "key", key, "err", err)
return f, err
}
f, err = r.Resolve(b.Name, z)
if err != nil {
plog.Debug(r, "Resolve", "box", b.Name, "key", z, "err", err)
return f, err
}
b, err := ioutil.ReadAll(f)
if err != nil {
return f, err
}
f, err = file.NewFile(key, b)
if err != nil {
return f, err
}
}
plog.Debug(r, "Resolve", "box", b.Name, "key", key, "file", f.Name())
return f, nil
}

View File

@@ -1,73 +0,0 @@
//go:generate mapgen -name "box" -zero "nil" -go-type "*Box" -pkg "" -a "New(`test-a`, ``)" -b "New(`test-b`, ``)" -c "New(`test-c`, ``)" -bb "New(`test-bb`, ``)" -destination "packr"
// Code generated by github.com/gobuffalo/mapgen. DO NOT EDIT.
package packr
import (
"sort"
"sync"
)
// boxMap wraps sync.Map and uses the following types:
// key: string
// value: *Box
type boxMap struct {
data sync.Map
}
// Delete the key from the map
func (m *boxMap) Delete(key string) {
m.data.Delete(key)
}
// Load the key from the map.
// Returns *Box or bool.
// A false return indicates either the key was not found
// or the value is not of type *Box
func (m *boxMap) Load(key string) (*Box, bool) {
i, ok := m.data.Load(key)
if !ok {
return nil, false
}
s, ok := i.(*Box)
return s, ok
}
// LoadOrStore will return an existing key or
// store the value if not already in the map
func (m *boxMap) LoadOrStore(key string, value *Box) (*Box, bool) {
i, _ := m.data.LoadOrStore(key, value)
s, ok := i.(*Box)
return s, ok
}
// Range over the *Box values in the map
func (m *boxMap) Range(f func(key string, value *Box) bool) {
m.data.Range(func(k, v interface{}) bool {
key, ok := k.(string)
if !ok {
return false
}
value, ok := v.(*Box)
if !ok {
return false
}
return f(key, value)
})
}
// Store a *Box in the map
func (m *boxMap) Store(key string, value *Box) {
m.data.Store(key, value)
}
// Keys returns a list of keys in the map
func (m *boxMap) Keys() []string {
var keys []string
m.Range(func(key string, value *Box) bool {
keys = append(keys, key)
return true
})
sort.Strings(keys)
return keys
}

View File

@@ -1,79 +0,0 @@
package packr
import (
"encoding/json"
"fmt"
"github.com/gobuffalo/packr/v2/file"
"github.com/gobuffalo/packr/v2/file/resolver"
"github.com/markbates/oncer"
)
// File has been deprecated and file.File should be used instead
type File = file.File
var (
// ErrResOutsideBox gets returned in case of the requested resources being outside the box
// Deprecated
ErrResOutsideBox = fmt.Errorf("can't find a resource outside the box")
)
// PackBytes packs bytes for a file into a box.
// Deprecated
func PackBytes(box string, name string, bb []byte) {
b := NewBox(box)
d := resolver.NewInMemory(map[string]file.File{})
f, err := file.NewFile(name, bb)
if err != nil {
panic(err)
}
if err := d.Pack(name, f); err != nil {
panic(err)
}
b.SetResolver(name, d)
}
// PackBytesGzip packets the gzipped compressed bytes into a box.
// Deprecated
func PackBytesGzip(box string, name string, bb []byte) error {
// TODO: this function never did what it was supposed to do!
PackBytes(box, name, bb)
return nil
}
// PackJSONBytes packs JSON encoded bytes for a file into a box.
// Deprecated
func PackJSONBytes(box string, name string, jbb string) error {
var bb []byte
err := json.Unmarshal([]byte(jbb), &bb)
if err != nil {
return err
}
PackBytes(box, name, bb)
return nil
}
// Bytes is deprecated. Use Find instead
func (b *Box) Bytes(name string) []byte {
bb, _ := b.Find(name)
oncer.Deprecate(0, "github.com/gobuffalo/packr/v2#Box.Bytes", "Use github.com/gobuffalo/packr/v2#Box.Find instead.")
return bb
}
// MustBytes is deprecated. Use Find instead.
func (b *Box) MustBytes(name string) ([]byte, error) {
oncer.Deprecate(0, "github.com/gobuffalo/packr/v2#Box.MustBytes", "Use github.com/gobuffalo/packr/v2#Box.Find instead.")
return b.Find(name)
}
// String is deprecated. Use FindString instead
func (b *Box) String(name string) string {
oncer.Deprecate(0, "github.com/gobuffalo/packr/v2#Box.String", "Use github.com/gobuffalo/packr/v2#Box.FindString instead.")
return string(b.Bytes(name))
}
// MustString is deprecated. Use FindString instead
func (b *Box) MustString(name string) (string, error) {
oncer.Deprecate(0, "github.com/gobuffalo/packr/v2#Box.MustString", "Use github.com/gobuffalo/packr/v2#Box.FindString instead.")
return b.FindString(name)
}

View File

@@ -1,82 +0,0 @@
//go:generate mapgen -name "dirs" -zero "false" -go-type "bool" -pkg "" -a "nil" -b "nil" -c "nil" -bb "nil" -destination "packr"
// Code generated by github.com/gobuffalo/mapgen. DO NOT EDIT.
package packr
import (
"sort"
"strings"
"sync"
)
// dirsMap wraps sync.Map and uses the following types:
// key: string
// value: bool
type dirsMap struct {
data sync.Map
}
// Delete the key from the map
func (m *dirsMap) Delete(key string) {
m.data.Delete(m.normalizeKey(key))
}
// Load the key from the map.
// Returns bool or bool.
// A false return indicates either the key was not found
// or the value is not of type bool
func (m *dirsMap) Load(key string) (bool, bool) {
i, ok := m.data.Load(m.normalizeKey(key))
if !ok {
return false, false
}
s, ok := i.(bool)
return s, ok
}
// LoadOrStore will return an existing key or
// store the value if not already in the map
func (m *dirsMap) LoadOrStore(key string, value bool) (bool, bool) {
i, _ := m.data.LoadOrStore(m.normalizeKey(key), value)
s, ok := i.(bool)
return s, ok
}
// Range over the bool values in the map
func (m *dirsMap) Range(f func(key string, value bool) bool) {
m.data.Range(func(k, v interface{}) bool {
key, ok := k.(string)
if !ok {
return false
}
value, ok := v.(bool)
if !ok {
return false
}
return f(key, value)
})
}
// Store a bool in the map
func (m *dirsMap) Store(key string, value bool) {
d := m.normalizeKey(key)
m.data.Store(d, value)
m.data.Store(strings.TrimPrefix(d, "/"), value)
}
// Keys returns a list of keys in the map
func (m *dirsMap) Keys() []string {
var keys []string
m.Range(func(key string, value bool) bool {
keys = append(keys, key)
return true
})
sort.Strings(keys)
return keys
}
func (m *dirsMap) normalizeKey(key string) string {
key = strings.Replace(key, "\\", "/", -1)
return key
}

View File

@@ -1,32 +0,0 @@
package file
import (
"bytes"
"io"
"github.com/gobuffalo/packd"
)
// File represents a virtual, or physical, backing of
// a file object in a Box
type File = packd.File
// FileMappable types are capable of returning a map of
// path => File
type FileMappable interface {
FileMap() map[string]File
}
// NewFile returns a virtual File implementation
func NewFile(name string, b []byte) (File, error) {
return packd.NewFile(name, bytes.NewReader(b))
}
// NewDir returns a virtual dir implementation
func NewDir(name string) (File, error) {
return packd.NewDir(name)
}
func NewFileR(name string, r io.Reader) (File, error) {
return packd.NewFile(name, r)
}

View File

@@ -1,38 +0,0 @@
package file
import (
"os"
"time"
)
type info struct {
Path string
Contents []byte
size int64
modTime time.Time
isDir bool
}
func (f info) Name() string {
return f.Path
}
func (f info) Size() int64 {
return f.size
}
func (f info) Mode() os.FileMode {
return 0444
}
func (f info) ModTime() time.Time {
return f.modTime
}
func (f info) IsDir() bool {
return f.isDir
}
func (f info) Sys() interface{} {
return nil
}

View File

@@ -1,111 +0,0 @@
package resolver
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"github.com/gobuffalo/packr/v2/file"
"github.com/gobuffalo/packr/v2/plog"
"github.com/karrick/godirwalk"
)
var _ Resolver = &Disk{}
type Disk struct {
Root string
}
func (d Disk) String() string {
return String(&d)
}
func (d *Disk) Resolve(box string, name string) (file.File, error) {
var err error
path := OsPath(name)
if !filepath.IsAbs(path) {
path, err = ResolvePathInBase(OsPath(d.Root), path)
if err != nil {
return nil, err
}
}
fi, err := os.Stat(path)
if err != nil {
return nil, err
}
if fi.IsDir() {
return nil, os.ErrNotExist
}
if bb, err := ioutil.ReadFile(path); err == nil {
return file.NewFile(OsPath(name), bb)
}
return nil, os.ErrNotExist
}
// ResolvePathInBase returns a path that is guaranteed to be inside of the base directory or an error
func ResolvePathInBase(base, path string) (string, error) {
// Determine the absolute file path of the base directory
d, err := filepath.Abs(base)
if err != nil {
return "", err
}
// Return the base directory if no file was requested
if path == "/" || path == "\\" {
return d, nil
}
// Resolve the absolute file path after combining the key with base
p, err := filepath.Abs(filepath.Join(d, path))
if err != nil {
return "", err
}
// Verify that the resolved path is inside of the base directory
if !strings.HasPrefix(p, d+string(filepath.Separator)) {
return "", os.ErrNotExist
}
return p, nil
}
var _ file.FileMappable = &Disk{}
func (d *Disk) FileMap() map[string]file.File {
moot := &sync.Mutex{}
m := map[string]file.File{}
root := OsPath(d.Root)
if _, err := os.Stat(root); err != nil {
return m
}
callback := func(path string, de *godirwalk.Dirent) error {
if _, err := os.Stat(root); err != nil {
return nil
}
if !de.IsRegular() {
return nil
}
moot.Lock()
name := strings.TrimPrefix(path, root+string(filepath.Separator))
b, err := ioutil.ReadFile(path)
if err != nil {
return err
}
m[name], err = file.NewFile(name, b)
if err != nil {
return err
}
moot.Unlock()
return nil
}
err := godirwalk.Walk(root, &godirwalk.Options{
FollowSymbolicLinks: true,
Callback: callback,
})
if err != nil {
plog.Logger.Errorf("[%s] error walking %v", root, err)
}
return m
}

View File

@@ -1,314 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package hex implements hexadecimal encoding and decoding.
package hex
import (
"bytes"
"fmt"
"io"
)
const hextable = "0123456789abcdef"
// EncodedLen returns the length of an encoding of n source bytes.
// Specifically, it returns n * 2.
func EncodedLen(n int) int { return n * 2 }
// Encode encodes src into EncodedLen(len(src))
// bytes of dst. As a convenience, it returns the number
// of bytes written to dst, but this value is always EncodedLen(len(src)).
// Encode implements hexadecimal encoding.
func Encode(dst, src []byte) int {
for i, v := range src {
dst[i*2] = hextable[v>>4]
dst[i*2+1] = hextable[v&0x0f]
}
return len(src) * 2
}
// ErrLength reports an attempt to decode an odd-length input
// using Decode or DecodeString.
// The stream-based Decoder returns io.ErrUnexpectedEOF instead of ErrLength.
var ErrLength = fmt.Errorf("encoding/hex: odd length hex string")
// InvalidByteError values describe errors resulting from an invalid byte in a hex string.
type InvalidByteError byte
func (e InvalidByteError) Error() string {
return fmt.Sprintf("encoding/hex: invalid byte: %#U", rune(e))
}
// DecodedLen returns the length of a decoding of x source bytes.
// Specifically, it returns x / 2.
func DecodedLen(x int) int { return x / 2 }
// Decode decodes src into DecodedLen(len(src)) bytes,
// returning the actual number of bytes written to dst.
//
// Decode expects that src contains only hexadecimal
// characters and that src has even length.
// If the input is malformed, Decode returns the number
// of bytes decoded before the error.
func Decode(dst, src []byte) (int, error) {
var i int
for i = 0; i < len(src)/2; i++ {
a, ok := fromHexChar(src[i*2])
if !ok {
return i, InvalidByteError(src[i*2])
}
b, ok := fromHexChar(src[i*2+1])
if !ok {
return i, InvalidByteError(src[i*2+1])
}
dst[i] = (a << 4) | b
}
if len(src)%2 == 1 {
// Check for invalid char before reporting bad length,
// since the invalid char (if present) is an earlier problem.
if _, ok := fromHexChar(src[i*2]); !ok {
return i, InvalidByteError(src[i*2])
}
return i, ErrLength
}
return i, nil
}
// fromHexChar converts a hex character into its value and a success flag.
func fromHexChar(c byte) (byte, bool) {
switch {
case '0' <= c && c <= '9':
return c - '0', true
case 'a' <= c && c <= 'f':
return c - 'a' + 10, true
case 'A' <= c && c <= 'F':
return c - 'A' + 10, true
}
return 0, false
}
// EncodeToString returns the hexadecimal encoding of src.
func EncodeToString(src []byte) string {
dst := make([]byte, EncodedLen(len(src)))
Encode(dst, src)
return string(dst)
}
// DecodeString returns the bytes represented by the hexadecimal string s.
//
// DecodeString expects that src contains only hexadecimal
// characters and that src has even length.
// If the input is malformed, DecodeString returns
// the bytes decoded before the error.
func DecodeString(s string) ([]byte, error) {
src := []byte(s)
// We can use the source slice itself as the destination
// because the decode loop increments by one and then the 'seen' byte is not used anymore.
n, err := Decode(src, src)
return src[:n], err
}
// Dump returns a string that contains a hex dump of the given data. The format
// of the hex dump matches the output of `hexdump -C` on the command line.
func Dump(data []byte) string {
var buf bytes.Buffer
dumper := Dumper(&buf)
dumper.Write(data)
dumper.Close()
return buf.String()
}
// bufferSize is the number of hexadecimal characters to buffer in encoder and decoder.
const bufferSize = 1024
type encoder struct {
w io.Writer
err error
out [bufferSize]byte // output buffer
}
// NewEncoder returns an io.Writer that writes lowercase hexadecimal characters to w.
func NewEncoder(w io.Writer) io.Writer {
return &encoder{w: w}
}
func (e *encoder) Write(p []byte) (n int, err error) {
for len(p) > 0 && e.err == nil {
chunkSize := bufferSize / 2
if len(p) < chunkSize {
chunkSize = len(p)
}
var written int
encoded := Encode(e.out[:], p[:chunkSize])
written, e.err = e.w.Write(e.out[:encoded])
n += written / 2
p = p[chunkSize:]
}
return n, e.err
}
type decoder struct {
r io.Reader
err error
in []byte // input buffer (encoded form)
arr [bufferSize]byte // backing array for in
}
// NewDecoder returns an io.Reader that decodes hexadecimal characters from r.
// NewDecoder expects that r contain only an even number of hexadecimal characters.
func NewDecoder(r io.Reader) io.Reader {
return &decoder{r: r}
}
func (d *decoder) Read(p []byte) (n int, err error) {
// Fill internal buffer with sufficient bytes to decode
if len(d.in) < 2 && d.err == nil {
var numCopy, numRead int
numCopy = copy(d.arr[:], d.in) // Copies either 0 or 1 bytes
numRead, d.err = d.r.Read(d.arr[numCopy:])
d.in = d.arr[:numCopy+numRead]
if d.err == io.EOF && len(d.in)%2 != 0 {
if _, ok := fromHexChar(d.in[len(d.in)-1]); !ok {
d.err = InvalidByteError(d.in[len(d.in)-1])
} else {
d.err = io.ErrUnexpectedEOF
}
}
}
// Decode internal buffer into output buffer
if numAvail := len(d.in) / 2; len(p) > numAvail {
p = p[:numAvail]
}
numDec, err := Decode(p, d.in[:len(p)*2])
d.in = d.in[2*numDec:]
if err != nil {
d.in, d.err = nil, err // Decode error; discard input remainder
}
if len(d.in) < 2 {
return numDec, d.err // Only expose errors when buffer fully consumed
}
return numDec, nil
}
// Dumper returns a WriteCloser that writes a hex dump of all written data to
// w. The format of the dump matches the output of `hexdump -C` on the command
// line.
func Dumper(w io.Writer) io.WriteCloser {
return &dumper{w: w}
}
type dumper struct {
w io.Writer
rightChars [18]byte
buf [14]byte
used int // number of bytes in the current line
n uint // number of bytes, total
closed bool
}
func toChar(b byte) byte {
if b < 32 || b > 126 {
return '.'
}
return b
}
func (h *dumper) Write(data []byte) (n int, err error) {
if h.closed {
return 0, fmt.Errorf("encoding/hex: dumper closed")
}
// Output lines look like:
// 00000010 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d |./0123456789:;<=|
// ^ offset ^ extra space ^ ASCII of line.
for i := range data {
if h.used == 0 {
// At the beginning of a line we print the current
// offset in hex.
h.buf[0] = byte(h.n >> 24)
h.buf[1] = byte(h.n >> 16)
h.buf[2] = byte(h.n >> 8)
h.buf[3] = byte(h.n)
Encode(h.buf[4:], h.buf[:4])
h.buf[12] = ' '
h.buf[13] = ' '
_, err = h.w.Write(h.buf[4:])
if err != nil {
return
}
}
Encode(h.buf[:], data[i:i+1])
h.buf[2] = ' '
l := 3
if h.used == 7 {
// There's an additional space after the 8th byte.
h.buf[3] = ' '
l = 4
} else if h.used == 15 {
// At the end of the line there's an extra space and
// the bar for the right column.
h.buf[3] = ' '
h.buf[4] = '|'
l = 5
}
_, err = h.w.Write(h.buf[:l])
if err != nil {
return
}
n++
h.rightChars[h.used] = toChar(data[i])
h.used++
h.n++
if h.used == 16 {
h.rightChars[16] = '|'
h.rightChars[17] = '\n'
_, err = h.w.Write(h.rightChars[:])
if err != nil {
return
}
h.used = 0
}
}
return
}
func (h *dumper) Close() (err error) {
// See the comments in Write() for the details of this format.
if h.closed {
return
}
h.closed = true
if h.used == 0 {
return
}
h.buf[0] = ' '
h.buf[1] = ' '
h.buf[2] = ' '
h.buf[3] = ' '
h.buf[4] = '|'
nBytes := h.used
for h.used < 16 {
l := 3
if h.used == 7 {
l = 4
} else if h.used == 15 {
l = 5
}
_, err = h.w.Write(h.buf[:l])
if err != nil {
return
}
h.used++
}
h.rightChars[nBytes] = '|'
h.rightChars[nBytes+1] = '\n'
_, err = h.w.Write(h.rightChars[:nBytes+2])
return
}

View File

@@ -1,112 +0,0 @@
package resolver
import (
"bytes"
"compress/gzip"
"io"
"io/ioutil"
"os"
"strings"
"sync"
"github.com/gobuffalo/packr/v2/file/resolver/encoding/hex"
"github.com/gobuffalo/packr/v2/plog"
"github.com/gobuffalo/packr/v2/file"
)
var _ Resolver = &HexGzip{}
type HexGzip struct {
packed map[string]string
unpacked map[string]string
moot *sync.RWMutex
}
func (hg HexGzip) String() string {
return String(&hg)
}
var _ file.FileMappable = &HexGzip{}
func (hg *HexGzip) FileMap() map[string]file.File {
hg.moot.RLock()
var names []string
for k := range hg.packed {
names = append(names, k)
}
hg.moot.RUnlock()
m := map[string]file.File{}
for _, n := range names {
if f, err := hg.Resolve("", n); err == nil {
m[n] = f
}
}
return m
}
func (hg *HexGzip) Resolve(box string, name string) (file.File, error) {
plog.Debug(hg, "Resolve", "box", box, "name", name)
hg.moot.Lock()
defer hg.moot.Unlock()
if s, ok := hg.unpacked[name]; ok {
return file.NewFile(name, []byte(s))
}
packed, ok := hg.packed[name]
if !ok {
return nil, os.ErrNotExist
}
unpacked, err := UnHexGzipString(packed)
if err != nil {
return nil, err
}
f, err := file.NewFile(OsPath(name), []byte(unpacked))
if err != nil {
return nil, err
}
hg.unpacked[name] = f.String()
return f, nil
}
func NewHexGzip(files map[string]string) (*HexGzip, error) {
if files == nil {
files = map[string]string{}
}
hg := &HexGzip{
packed: files,
unpacked: map[string]string{},
moot: &sync.RWMutex{},
}
return hg, nil
}
func HexGzipString(s string) (string, error) {
bb := &bytes.Buffer{}
enc := hex.NewEncoder(bb)
zw := gzip.NewWriter(enc)
io.Copy(zw, strings.NewReader(s))
zw.Close()
return bb.String(), nil
}
func UnHexGzipString(packed string) (string, error) {
br := bytes.NewBufferString(packed)
dec := hex.NewDecoder(br)
zr, err := gzip.NewReader(dec)
if err != nil {
return "", err
}
defer zr.Close()
b, err := ioutil.ReadAll(zr)
if err != nil {
return "", err
}
return string(b), nil
}

View File

@@ -1,21 +0,0 @@
package resolver
import (
"path/filepath"
"runtime"
"strings"
)
func Key(s string) string {
s = strings.Replace(s, "\\", "/", -1)
return s
}
func OsPath(s string) string {
if runtime.GOOS == "windows" {
s = strings.Replace(s, "/", string(filepath.Separator), -1)
} else {
s = strings.Replace(s, "\\", string(filepath.Separator), -1)
}
return s
}

View File

@@ -1,63 +0,0 @@
package resolver
import (
"io/ioutil"
"github.com/gobuffalo/packd"
"github.com/gobuffalo/packr/v2/file"
"github.com/gobuffalo/packr/v2/plog"
)
var _ Resolver = &InMemory{}
type InMemory struct {
*packd.MemoryBox
}
func (d InMemory) String() string {
return String(&d)
}
func (d *InMemory) Resolve(box string, name string) (file.File, error) {
b, err := d.MemoryBox.Find(name)
if err != nil {
return nil, err
}
return file.NewFile(name, b)
}
func (d *InMemory) Pack(name string, f file.File) error {
plog.Debug(d, "Pack", "name", name)
b, err := ioutil.ReadAll(f)
if err != nil {
return err
}
d.AddBytes(name, b)
return nil
}
func (d *InMemory) FileMap() map[string]file.File {
m := map[string]file.File{}
d.Walk(func(path string, file file.File) error {
m[path] = file
return nil
})
return m
}
func NewInMemory(files map[string]file.File) *InMemory {
if files == nil {
files = map[string]file.File{}
}
box := packd.NewMemoryBox()
for p, f := range files {
if b, err := ioutil.ReadAll(f); err == nil {
box.AddBytes(p, b)
}
}
return &InMemory{
MemoryBox: box,
}
}

View File

@@ -1,7 +0,0 @@
package resolver
import "github.com/gobuffalo/packr/v2/file"
type Packable interface {
Pack(name string, f file.File) error
}

View File

@@ -1,33 +0,0 @@
package resolver
import (
"encoding/json"
"fmt"
"os"
"github.com/gobuffalo/packr/v2/file"
)
type Resolver interface {
Resolve(string, string) (file.File, error)
}
func defaultResolver() Resolver {
pwd, _ := os.Getwd()
return &Disk{
Root: pwd,
}
}
var DefaultResolver = defaultResolver()
func String(r Resolver) string {
m := map[string]interface{}{
"name": fmt.Sprintf("%T", r),
}
if fm, ok := r.(file.FileMappable); ok {
m["files"] = fm
}
b, _ := json.Marshal(m)
return string(b)
}

View File

@@ -1,72 +0,0 @@
package packr
import (
"os"
"path/filepath"
"runtime"
"strings"
"github.com/gobuffalo/packr/v2/plog"
)
func construct(name string, path string) *Box {
return &Box{
Path: path,
Name: name,
ResolutionDir: resolutionDir(path),
resolvers: resolversMap{},
dirs: dirsMap{},
}
}
func resolutionDirTestFilename(filename, og string) (string, bool) {
ng := filepath.Join(filepath.Dir(filename), og)
// // this little hack courtesy of the `-cover` flag!!
cov := filepath.Join("_test", "_obj_test")
ng = strings.Replace(ng, string(filepath.Separator)+cov, "", 1)
if resolutionDirExists(ng, og) {
return ng, true
}
ng = filepath.Join(os.Getenv("GOPATH"), "src", ng)
if resolutionDirExists(ng, og) {
return ng, true
}
return og, false
}
func resolutionDirExists(s, og string) bool {
_, err := os.Stat(s)
if err != nil {
return false
}
plog.Debug("packr", "resolutionDir", "original", og, "resolved", s)
return true
}
func resolutionDir(og string) string {
ng, _ := filepath.Abs(og)
if resolutionDirExists(ng, og) {
return ng
}
// packr.New
_, filename, _, _ := runtime.Caller(3)
ng, ok := resolutionDirTestFilename(filename, og)
if ok {
return ng
}
// packr.NewBox (deprecated)
_, filename, _, _ = runtime.Caller(4)
ng, ok = resolutionDirTestFilename(filename, og)
if ok {
return ng
}
return og
}

View File

@@ -1,52 +0,0 @@
package parser
import (
"encoding/json"
"fmt"
)
// FromArgs is useful when writing packr store-cmd binaries.
/*
package main
import (
"log"
"os"
"github.com/gobuffalo/packr/v2/jam/parser"
"github.com/markbates/s3packr/s3packr"
)
func main() {
err := parser.FromArgs(os.Args[1:], func(boxes parser.Boxes) error {
for _, box := range boxes {
s3 := s3packr.New(box)
if err := s3.Pack(box); err != nil {
return err
}
}
return nil
})
if err != nil {
log.Fatal(err)
}
}
*/
func FromArgs(args []string, fn func(Boxes) error) error {
if len(args) == 0 {
return fmt.Errorf("you must supply a payload")
}
payload := args[0]
if len(payload) == 0 {
return fmt.Errorf("you must supply a payload")
}
var boxes Boxes
err := json.Unmarshal([]byte(payload), &boxes)
if err != nil {
return err
}
return fn(boxes)
}

View File

@@ -1,40 +0,0 @@
package parser
import (
"encoding/json"
"os"
"strings"
)
// Box found while parsing a file
type Box struct {
Name string // name of the box
Path string // relative path of folder NewBox("./templates")
AbsPath string // absolute path of Path
Package string // the package name the box was found in
PWD string // the PWD when the parser was run
PackageDir string // the absolute path of the package where the box was found
}
type Boxes []*Box
// String - json returned
func (b Box) String() string {
x, _ := json.Marshal(b)
return string(x)
}
// NewBox stub from the name and the path provided
func NewBox(name string, path string) *Box {
if len(name) == 0 {
name = path
}
name = strings.Replace(name, "\"", "", -1)
pwd, _ := os.Getwd()
box := &Box{
Name: name,
Path: path,
PWD: pwd,
}
return box
}

View File

@@ -1,54 +0,0 @@
package parser
import (
"bytes"
"io"
"io/ioutil"
"path/filepath"
)
// File that is to be parsed
type File struct {
io.Reader
Path string
AbsPath string
}
// Name of the file "app.go"
func (f File) Name() string {
return f.Path
}
// String returns the contents of the reader
func (f *File) String() string {
src, _ := ioutil.ReadAll(f)
f.Reader = bytes.NewReader(src)
return string(src)
}
func (s *File) Write(p []byte) (int, error) {
bb := &bytes.Buffer{}
i, err := bb.Write(p)
s.Reader = bb
return i, err
}
// NewFile takes the name of the file you want to
// write to and a reader to reader from
func NewFile(path string, r io.Reader) *File {
if r == nil {
r = &bytes.Buffer{}
}
if seek, ok := r.(io.Seeker); ok {
seek.Seek(0, 0)
}
abs := path
if !filepath.IsAbs(path) {
abs, _ = filepath.Abs(path)
}
return &File{
Reader: r,
Path: path,
AbsPath: abs,
}
}

View File

@@ -1,112 +0,0 @@
package parser
import (
"fmt"
"go/build"
"os"
"path/filepath"
"strings"
"time"
"github.com/gobuffalo/packr/v2/plog"
"github.com/karrick/godirwalk"
"github.com/markbates/errx"
"github.com/markbates/oncer"
)
type finder struct {
id time.Time
}
func (fd *finder) key(m, dir string) string {
return fmt.Sprintf("%s-*parser.finder#%s-%s", fd.id, m, dir)
}
// findAllGoFiles *.go files for a given diretory
func (fd *finder) findAllGoFiles(dir string) ([]string, error) {
var err error
var names []string
oncer.Do(fd.key("findAllGoFiles", dir), func() {
plog.Debug(fd, "findAllGoFiles", "dir", dir)
callback := func(path string, do *godirwalk.Dirent) error {
ext := filepath.Ext(path)
if ext != ".go" {
return nil
}
//check if path is a dir
fi, err := os.Stat(path)
if err != nil {
return nil
}
if fi.IsDir() {
return nil
}
names = append(names, path)
return nil
}
err = godirwalk.Walk(dir, &godirwalk.Options{
FollowSymbolicLinks: true,
Callback: callback,
})
})
return names, err
}
func (fd *finder) findAllGoFilesImports(dir string) ([]string, error) {
var err error
var names []string
oncer.Do(fd.key("findAllGoFilesImports", dir), func() {
ctx := build.Default
if len(ctx.SrcDirs()) == 0 {
err = fmt.Errorf("no src directories found")
return
}
pkg, err := ctx.ImportDir(dir, 0)
if strings.HasPrefix(pkg.ImportPath, "github.com/gobuffalo/packr") {
return
}
if err != nil {
if !strings.Contains(err.Error(), "cannot find package") {
if _, ok := errx.Unwrap(err).(*build.NoGoError); !ok {
err = err
return
}
}
}
if pkg.Goroot {
return
}
if len(pkg.GoFiles) <= 0 {
return
}
plog.Debug(fd, "findAllGoFilesImports", "dir", dir)
names, _ = fd.findAllGoFiles(dir)
for _, n := range pkg.GoFiles {
names = append(names, filepath.Join(pkg.Dir, n))
}
for _, imp := range pkg.Imports {
if len(ctx.SrcDirs()) == 0 {
continue
}
d := ctx.SrcDirs()[len(ctx.SrcDirs())-1]
ip := filepath.Join(d, imp)
n, err := fd.findAllGoFilesImports(ip)
if err != nil && len(n) != 0 {
names = n
return
}
names = append(names, n...)
}
})
return names, err
}

View File

@@ -1,43 +0,0 @@
package parser
import (
"go/ast"
"go/parser"
"go/token"
"io"
"strings"
"github.com/gobuffalo/packd"
"github.com/markbates/errx"
)
// ParsedFile ...
type ParsedFile struct {
File packd.SimpleFile
FileSet *token.FileSet
Ast *ast.File
Lines []string
}
// ParseFileMode ...
func ParseFileMode(gf packd.SimpleFile, mode parser.Mode) (ParsedFile, error) {
pf := ParsedFile{
FileSet: token.NewFileSet(),
File: gf,
}
src := gf.String()
f, err := parser.ParseFile(pf.FileSet, gf.Name(), src, mode)
if err != nil && errx.Unwrap(err) != io.EOF {
return pf, err
}
pf.Ast = f
pf.Lines = strings.Split(src, "\n")
return pf, nil
}
// ParseFile ...
func ParseFile(gf packd.SimpleFile) (ParsedFile, error) {
return ParseFileMode(gf, 0)
}

View File

@@ -1,46 +0,0 @@
package parser
import (
"os"
"sort"
"strings"
"github.com/gobuffalo/packr/v2/plog"
)
// Parser to find boxes
type Parser struct {
Prospects []*File // a list of files to check for boxes
IgnoreImports bool
}
// Run the parser and run any boxes found
func (p *Parser) Run() (Boxes, error) {
var boxes Boxes
for _, pros := range p.Prospects {
plog.Debug(p, "Run", "parsing", pros.Name())
v := NewVisitor(pros)
pbr, err := v.Run()
if err != nil {
return boxes, err
}
for _, b := range pbr {
plog.Debug(p, "Run", "file", pros.Name(), "box", b.Name)
boxes = append(boxes, b)
}
}
pwd, _ := os.Getwd()
sort.Slice(boxes, func(a, b int) bool {
b1 := boxes[a]
return !strings.HasPrefix(b1.AbsPath, pwd)
})
return boxes, nil
}
// New Parser from a list of File
func New(prospects ...*File) *Parser {
return &Parser{
Prospects: prospects,
}
}

View File

@@ -1,77 +0,0 @@
package parser
import (
"os"
"path/filepath"
"strings"
"github.com/gobuffalo/packr/v2/file/resolver"
"github.com/gobuffalo/packr/v2/plog"
)
var DefaultIgnoredFolders = []string{".", "_", "vendor", "node_modules", "_fixtures", "testdata"}
func IsProspect(path string, ignore ...string) (status bool) {
// plog.Debug("parser", "IsProspect", "path", path, "ignore", ignore)
defer func() {
if status {
plog.Debug("parser", "IsProspect (TRUE)", "path", path, "status", status)
}
}()
if path == "." {
return true
}
ext := filepath.Ext(path)
dir := filepath.Dir(path)
fi, _ := os.Stat(path)
if fi != nil {
if fi.IsDir() {
dir = filepath.Base(path)
} else {
if len(ext) > 0 {
dir = filepath.Base(filepath.Dir(path))
}
}
}
path = strings.ToLower(path)
dir = strings.ToLower(dir)
if strings.HasSuffix(path, "-packr.go") {
return false
}
if strings.HasSuffix(path, "_test.go") {
return false
}
ignore = append(ignore, DefaultIgnoredFolders...)
for i, x := range ignore {
ignore[i] = strings.TrimSpace(strings.ToLower(x))
}
parts := strings.Split(resolver.OsPath(path), string(filepath.Separator))
if len(parts) == 0 {
return false
}
for _, i := range ignore {
for _, p := range parts {
if strings.HasPrefix(p, i) {
return false
}
}
}
un := filepath.Base(path)
if len(ext) != 0 {
un = filepath.Base(filepath.Dir(path))
}
if strings.HasPrefix(un, "_") {
return false
}
return ext == ".go"
}

View File

@@ -1,89 +0,0 @@
package parser
import (
"bytes"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/gobuffalo/packr/v2/plog"
"github.com/karrick/godirwalk"
)
type RootsOptions struct {
IgnoreImports bool
Ignores []string
}
func (r RootsOptions) String() string {
x, _ := json.Marshal(r)
return string(x)
}
// NewFromRoots scans the file roots provided and returns a
// new Parser containing the prospects
func NewFromRoots(roots []string, opts *RootsOptions) (*Parser, error) {
if opts == nil {
opts = &RootsOptions{}
}
if len(roots) == 0 {
pwd, _ := os.Getwd()
roots = append(roots, pwd)
}
p := New()
plog.Debug(p, "NewFromRoots", "roots", roots, "options", opts)
callback := func(path string, de *godirwalk.Dirent) error {
if IsProspect(path, opts.Ignores...) {
if de.IsDir() {
return nil
}
roots = append(roots, path)
return nil
}
if de.IsDir() {
return filepath.SkipDir
}
return nil
}
wopts := &godirwalk.Options{
FollowSymbolicLinks: true,
Callback: callback,
}
for _, root := range roots {
plog.Debug(p, "NewFromRoots", "walking", root)
err := godirwalk.Walk(root, wopts)
if err != nil {
return p, err
}
}
dd := map[string]string{}
fd := &finder{id: time.Now()}
for _, r := range roots {
var names []string
if opts.IgnoreImports {
names, _ = fd.findAllGoFiles(r)
} else {
names, _ = fd.findAllGoFilesImports(r)
}
for _, n := range names {
if IsProspect(n) {
plog.Debug(p, "NewFromRoots", "mapping", n)
dd[n] = n
}
}
}
for path := range dd {
plog.Debug(p, "NewFromRoots", "reading file", path)
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
p.Prospects = append(p.Prospects, NewFile(path, bytes.NewReader(b)))
}
plog.Debug(p, "NewFromRoots", "found prospects", len(p.Prospects))
return p, nil
}

View File

@@ -1,324 +0,0 @@
package parser
import (
"fmt"
"go/ast"
"os"
"path/filepath"
"sort"
"strings"
"github.com/gobuffalo/packd"
)
type Visitor struct {
File packd.SimpleFile
Package string
boxes map[string]*Box
errors []error
}
func NewVisitor(f *File) *Visitor {
return &Visitor{
File: f,
boxes: map[string]*Box{},
errors: []error{},
}
}
func (v *Visitor) Run() (Boxes, error) {
var boxes Boxes
pf, err := ParseFile(v.File)
if err != nil {
return boxes, err
}
v.Package = pf.Ast.Name.Name
ast.Walk(v, pf.Ast)
for _, vb := range v.boxes {
boxes = append(boxes, vb)
}
sort.Slice(boxes, func(i, j int) bool {
return boxes[i].Name < boxes[j].Name
})
if len(v.errors) > 0 {
s := make([]string, len(v.errors))
for i, e := range v.errors {
s[i] = e.Error()
}
return boxes, err
}
return boxes, nil
}
func (v *Visitor) Visit(node ast.Node) ast.Visitor {
if node == nil {
return v
}
if err := v.eval(node); err != nil {
v.errors = append(v.errors, err)
}
return v
}
func (v *Visitor) eval(node ast.Node) error {
switch t := node.(type) {
case *ast.CallExpr:
return v.evalExpr(t)
case *ast.Ident:
return v.evalIdent(t)
case *ast.GenDecl:
for _, n := range t.Specs {
if err := v.eval(n); err != nil {
return err
}
}
case *ast.FuncDecl:
if t.Body == nil {
return nil
}
for _, b := range t.Body.List {
if err := v.evalStmt(b); err != nil {
return err
}
}
return nil
case *ast.ValueSpec:
for _, e := range t.Values {
if err := v.evalExpr(e); err != nil {
return err
}
}
}
return nil
}
func (v *Visitor) evalStmt(stmt ast.Stmt) error {
switch t := stmt.(type) {
case *ast.ExprStmt:
return v.evalExpr(t.X)
case *ast.AssignStmt:
for _, e := range t.Rhs {
if err := v.evalArgs(e); err != nil {
return err
}
}
}
return nil
}
func (v *Visitor) evalExpr(expr ast.Expr) error {
switch t := expr.(type) {
case *ast.CallExpr:
if t.Fun == nil {
return nil
}
for _, a := range t.Args {
switch at := a.(type) {
case *ast.CallExpr:
if sel, ok := t.Fun.(*ast.SelectorExpr); ok {
return v.evalSelector(at, sel)
}
if err := v.evalArgs(at); err != nil {
return err
}
case *ast.CompositeLit:
for _, e := range at.Elts {
if err := v.evalExpr(e); err != nil {
return err
}
}
}
}
if ft, ok := t.Fun.(*ast.SelectorExpr); ok {
return v.evalSelector(t, ft)
}
case *ast.KeyValueExpr:
return v.evalExpr(t.Value)
}
return nil
}
func (v *Visitor) evalArgs(expr ast.Expr) error {
switch at := expr.(type) {
case *ast.CompositeLit:
for _, e := range at.Elts {
if err := v.evalExpr(e); err != nil {
return err
}
}
case *ast.CallExpr:
if at.Fun == nil {
return nil
}
switch st := at.Fun.(type) {
case *ast.SelectorExpr:
if err := v.evalSelector(at, st); err != nil {
return err
}
case *ast.Ident:
return v.evalIdent(st)
}
for _, a := range at.Args {
if err := v.evalArgs(a); err != nil {
return err
}
}
}
return nil
}
func (v *Visitor) evalSelector(expr *ast.CallExpr, sel *ast.SelectorExpr) error {
x, ok := sel.X.(*ast.Ident)
if !ok {
return nil
}
if x.Name == "packr" {
switch sel.Sel.Name {
case "New":
if len(expr.Args) != 2 {
return fmt.Errorf("`New` requires two arguments")
}
zz := func(e ast.Expr) (string, error) {
switch at := e.(type) {
case *ast.Ident:
switch at.Obj.Kind {
case ast.Var:
if as, ok := at.Obj.Decl.(*ast.AssignStmt); ok {
return v.fromVariable(as)
}
case ast.Con:
if vs, ok := at.Obj.Decl.(*ast.ValueSpec); ok {
return v.fromConstant(vs)
}
}
return "", v.evalIdent(at)
case *ast.BasicLit:
return at.Value, nil
case *ast.CallExpr:
return "", v.evalExpr(at)
}
return "", fmt.Errorf("can't handle %T", e)
}
k1, err := zz(expr.Args[0])
if err != nil {
return err
}
k2, err := zz(expr.Args[1])
if err != nil {
return err
}
v.addBox(k1, k2)
return nil
case "NewBox":
for _, e := range expr.Args {
switch at := e.(type) {
case *ast.Ident:
switch at.Obj.Kind {
case ast.Var:
if as, ok := at.Obj.Decl.(*ast.AssignStmt); ok {
v.addVariable("", as)
}
case ast.Con:
if vs, ok := at.Obj.Decl.(*ast.ValueSpec); ok {
v.addConstant("", vs)
}
}
return v.evalIdent(at)
case *ast.BasicLit:
v.addBox("", at.Value)
case *ast.CallExpr:
return v.evalExpr(at)
}
}
}
}
return nil
}
func (v *Visitor) evalIdent(i *ast.Ident) error {
if i.Obj == nil {
return nil
}
if s, ok := i.Obj.Decl.(*ast.AssignStmt); ok {
return v.evalStmt(s)
}
return nil
}
func (v *Visitor) addBox(name string, path string) {
if len(name) == 0 {
name = path
}
name = strings.Replace(name, "\"", "", -1)
path = strings.Replace(path, "\"", "", -1)
abs := path
if _, ok := v.boxes[name]; !ok {
box := NewBox(name, path)
box.Package = v.Package
pd := filepath.Dir(v.File.Name())
pwd, _ := os.Getwd()
if !filepath.IsAbs(pd) {
pd = filepath.Join(pwd, pd)
}
box.PackageDir = pd
if !filepath.IsAbs(abs) {
abs = filepath.Join(pd, abs)
}
box.AbsPath = abs
v.boxes[name] = box
}
}
func (v *Visitor) fromVariable(as *ast.AssignStmt) (string, error) {
if len(as.Rhs) == 1 {
if bs, ok := as.Rhs[0].(*ast.BasicLit); ok {
return bs.Value, nil
}
}
return "", fmt.Errorf("unable to find value from variable %v", as)
}
func (v *Visitor) addVariable(bn string, as *ast.AssignStmt) error {
bv, err := v.fromVariable(as)
if err != nil {
return nil
}
if len(bn) == 0 {
bn = bv
}
v.addBox(bn, bv)
return nil
}
func (v *Visitor) fromConstant(vs *ast.ValueSpec) (string, error) {
if len(vs.Values) == 1 {
if bs, ok := vs.Values[0].(*ast.BasicLit); ok {
return bs.Value, nil
}
}
return "", fmt.Errorf("unable to find value from constant %v", vs)
}
func (v *Visitor) addConstant(bn string, vs *ast.ValueSpec) error {
if len(vs.Values) == 1 {
if bs, ok := vs.Values[0].(*ast.BasicLit); ok {
bv := bs.Value
if len(bn) == 0 {
bn = bv
}
v.addBox(bn, bv)
}
}
return nil
}

View File

@@ -1,56 +0,0 @@
package packr
import (
"fmt"
"github.com/gobuffalo/packr/v2/file/resolver"
"github.com/gobuffalo/packr/v2/jam/parser"
"github.com/gobuffalo/packr/v2/plog"
"github.com/markbates/safe"
)
var boxes = &boxMap{}
var _ = safe.Run(func() {
p, err := parser.NewFromRoots([]string{}, nil)
if err != nil {
plog.Logger.Error(err)
return
}
boxes, err := p.Run()
if err != nil {
plog.Logger.Error(err)
return
}
for _, box := range boxes {
b := construct(box.Name, box.AbsPath)
_, err = placeBox(b)
if err != nil {
plog.Logger.Error(err)
return
}
}
})
func findBox(name string) (*Box, error) {
key := resolver.Key(name)
plog.Debug("packr", "findBox", "name", name, "key", key)
b, ok := boxes.Load(key)
if !ok {
plog.Debug("packr", "findBox", "name", name, "key", key, "found", ok)
return nil, fmt.Errorf("could not find box %s", name)
}
plog.Debug(b, "found", "box", b)
return b, nil
}
func placeBox(b *Box) (*Box, error) {
key := resolver.Key(b.Name)
eb, _ := boxes.LoadOrStore(key, b)
plog.Debug("packr", "placeBox", "name", eb.Name, "path", eb.Path, "resolution directory", eb.ResolutionDir)
return eb, nil
}

View File

@@ -1,41 +0,0 @@
package plog
import (
"encoding/json"
"fmt"
"github.com/gobuffalo/logger"
"github.com/sirupsen/logrus"
)
var Logger = logger.New(logger.ErrorLevel)
func Debug(t interface{}, m string, args ...interface{}) {
if len(args)%2 == 1 {
args = append(args, "")
}
f := logrus.Fields{}
for i := 0; i < len(args); i += 2 {
k := fmt.Sprint(args[i])
v := args[i+1]
if s, ok := v.(fmt.Stringer); ok {
f[k] = s.String()
continue
}
if s, ok := v.(string); ok {
f[k] = s
continue
}
if b, err := json.Marshal(v); err == nil {
f[k] = string(b)
continue
}
f[k] = v
}
e := Logger.WithFields(f)
if s, ok := t.(string); ok {
e.Debugf("%s#%s", s, m)
return
}
e.Debugf("%T#%s", t, m)
}

View File

@@ -1,32 +0,0 @@
package packr
import (
"github.com/gobuffalo/packr/v2/file"
"github.com/gobuffalo/packr/v2/file/resolver"
"github.com/gobuffalo/packr/v2/plog"
)
// Pointer is a resolvr which resolves
// a file from a different box.
type Pointer struct {
ForwardBox string
ForwardPath string
}
var _ resolver.Resolver = Pointer{}
// Resolve attempts to find the file in the specific box
// with the specified key
func (p Pointer) Resolve(box string, path string) (file.File, error) {
plog.Debug(p, "Resolve", "box", box, "path", path, "forward-box", p.ForwardBox, "forward-path", p.ForwardPath)
b, err := findBox(p.ForwardBox)
if err != nil {
return nil, err
}
f, err := b.Resolve(p.ForwardPath)
if err != nil {
return f, err
}
plog.Debug(p, "Resolve", "box", box, "path", path, "file", f)
return file.NewFileR(path, f)
}

View File

@@ -1,75 +0,0 @@
//go:generate mapgen -name "resolvers" -zero "nil" -go-type "resolver.Resolver" -pkg "" -a "nil" -b "nil" -c "nil" -bb "nil" -destination "packr"
// Code generated by github.com/gobuffalo/mapgen. DO NOT EDIT.
package packr
import (
"sort"
"sync"
"github.com/gobuffalo/packr/v2/file/resolver"
)
// resolversMap wraps sync.Map and uses the following types:
// key: string
// value: resolver.Resolver
type resolversMap struct {
data sync.Map
}
// Delete the key from the map
func (m *resolversMap) Delete(key string) {
m.data.Delete(key)
}
// Load the key from the map.
// Returns resolver.Resolver or bool.
// A false return indicates either the key was not found
// or the value is not of type resolver.Resolver
func (m *resolversMap) Load(key string) (resolver.Resolver, bool) {
i, ok := m.data.Load(key)
if !ok {
return nil, false
}
s, ok := i.(resolver.Resolver)
return s, ok
}
// LoadOrStore will return an existing key or
// store the value if not already in the map
func (m *resolversMap) LoadOrStore(key string, value resolver.Resolver) (resolver.Resolver, bool) {
i, _ := m.data.LoadOrStore(key, value)
s, ok := i.(resolver.Resolver)
return s, ok
}
// Range over the resolver.Resolver values in the map
func (m *resolversMap) Range(f func(key string, value resolver.Resolver) bool) {
m.data.Range(func(k, v interface{}) bool {
key, ok := k.(string)
if !ok {
return false
}
value, ok := v.(resolver.Resolver)
if !ok {
return false
}
return f(key, value)
})
}
// Store a resolver.Resolver in the map
func (m *resolversMap) Store(key string, value resolver.Resolver) {
m.data.Store(key, value)
}
// Keys returns a list of keys in the map
func (m *resolversMap) Keys() []string {
var keys []string
m.Range(func(key string, value resolver.Resolver) bool {
keys = append(keys, key)
return true
})
sort.Strings(keys)
return keys
}

View File

@@ -1,4 +0,0 @@
package packr
// Version of Packr
const Version = "v2.8.1"

View File

@@ -1,80 +0,0 @@
package packr
import (
"sort"
"strings"
"github.com/gobuffalo/packd"
"github.com/gobuffalo/packr/v2/file"
"github.com/gobuffalo/packr/v2/file/resolver"
"github.com/gobuffalo/packr/v2/plog"
)
// WalkFunc is used to walk a box
type WalkFunc = packd.WalkFunc
// Walk will traverse the box and call the WalkFunc for each file in the box/folder.
func (b *Box) Walk(wf WalkFunc) error {
m := map[string]file.File{}
dr := b.DefaultResolver
if dr == nil {
cd := resolver.OsPath(b.ResolutionDir)
dr = &resolver.Disk{Root: cd}
}
if fm, ok := dr.(file.FileMappable); ok {
for n, f := range fm.FileMap() {
m[n] = f
}
}
var err error
b.resolvers.Range(func(n string, r resolver.Resolver) bool {
var f file.File
f, err = r.Resolve("", n)
if err != nil {
return false
}
keep := true
for k := range m {
if strings.EqualFold(k, n) {
keep = false
}
}
if keep {
m[n] = f
}
return true
})
if err != nil {
return err
}
var keys = make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
osPath := resolver.OsPath(k)
plog.Debug(b, "Walk", "path", k, "osPath", osPath)
if err := wf(osPath, m[k]); err != nil {
return err
}
}
return nil
}
// WalkPrefix will call box.Walk and call the WalkFunc when it finds paths that have a matching prefix
func (b *Box) WalkPrefix(prefix string, wf WalkFunc) error {
ipref := resolver.OsPath(prefix)
return b.Walk(func(path string, f File) error {
ipath := resolver.OsPath(path)
if strings.HasPrefix(ipath, ipref) {
if err := wf(path, f); err != nil {
return err
}
}
return nil
})
}

View File

@@ -2,7 +2,6 @@
FAQ.md
README.md
LICENSE
Makefile
.gitignore
.travis.yml
CONTRIBUTING.md

View File

@@ -4,4 +4,7 @@ cli/cli
cli/migrate
.coverage
.godoc.pid
vendor/
vendor/
.vscode/
.idea
dist/

View File

@@ -1,6 +1,6 @@
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
deadline: 2m
timeout: 5m
linters:
enable:
#- golint

View File

@@ -0,0 +1,98 @@
project_name: migrate
before:
hooks:
- go mod tidy
builds:
- env:
- CGO_ENABLED=0
goos:
- linux
- windows
- darwin
goarch:
- amd64
- arm
- arm64
- 386
goarm:
- 7
main: ./cmd/migrate
ldflags:
- '-w -s -X main.Version={{ .Version }} -extldflags "static"'
flags:
- "-tags={{ .Env.DATABASE }} {{ .Env.SOURCE }}"
- "-trimpath"
nfpms:
- homepage: "https://github.com/golang-migrate/migrate"
maintainer: "dhui@users.noreply.github.com"
license: MIT
description: "Database migrations"
formats:
- deb
file_name_template: "{{ .ProjectName }}.{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
dockers:
- goos: linux
goarch: amd64
dockerfile: Dockerfile.github-actions
use: buildx
ids:
- migrate
image_templates:
- 'migrate/migrate:{{ .Tag }}-amd64'
build_flag_templates:
- '--label=org.opencontainers.image.created={{ .Date }}'
- '--label=org.opencontainers.image.title={{ .ProjectName }}'
- '--label=org.opencontainers.image.revision={{ .FullCommit }}'
- '--label=org.opencontainers.image.version={{ .Version }}'
- "--label=org.opencontainers.image.source={{ .GitURL }}"
- "--platform=linux/amd64"
- goos: linux
goarch: arm64
dockerfile: Dockerfile.github-actions
use: buildx
ids:
- migrate
image_templates:
- 'migrate/migrate:{{ .Tag }}-arm64'
build_flag_templates:
- '--label=org.opencontainers.image.created={{ .Date }}'
- '--label=org.opencontainers.image.title={{ .ProjectName }}'
- '--label=org.opencontainers.image.revision={{ .FullCommit }}'
- '--label=org.opencontainers.image.version={{ .Version }}'
- "--label=org.opencontainers.image.source={{ .GitURL }}"
- "--platform=linux/arm64"
docker_manifests:
- name_template: 'migrate/migrate:{{ .Tag }}'
image_templates:
- 'migrate/migrate:{{ .Tag }}-amd64'
- 'migrate/migrate:{{ .Tag }}-arm64'
- name_template: 'migrate/migrate:{{ .Major }}'
image_templates:
- 'migrate/migrate:{{ .Tag }}-amd64'
- 'migrate/migrate:{{ .Tag }}-arm64'
- name_template: 'migrate/migrate:latest'
image_templates:
- 'migrate/migrate:{{ .Tag }}-amd64'
- 'migrate/migrate:{{ .Tag }}-arm64'
archives:
- name_template: "{{ .ProjectName }}.{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
format_overrides:
- goos: windows
format: zip
checksum:
name_template: 'sha256sum.txt'
release:
draft: true
changelog:
skip: false
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'
- Merge pull request
- Merge branch
- go mod tidy
snapshot:
name_template: "{{ .Tag }}-next"

View File

@@ -6,8 +6,8 @@ matrix:
- go: master
include:
# Supported versions of Go: https://golang.org/dl/
- go: "1.11.x"
- go: "1.12.x"
- go: "1.14.x"
- go: "1.15.x"
- go: master
go_import_path: github.com/golang-migrate/migrate
@@ -26,8 +26,15 @@ cache:
directories:
- $GOPATH/pkg
before_install:
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.16.0
# Update docker to latest version: https://docs.travis-ci.com/user/docker/#installing-a-newer-docker-version
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
- sudo apt-get update
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
# Install golangci-lint
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0
- echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
install:
@@ -50,13 +57,16 @@ deploy:
secure: hWH1HLPpzpfA8pXQ93T1qKQVFSpQp0as/JLQ7D91jHuJ8p+RxVeqblDrR6HQY/95R/nyiE9GJmvUolSuw5h449LSrGxPtVWhdh6EnkxlQHlen5XeMhVjRjFV0sE9qGe8v7uAkiTfRO61ktTWHrEAvw5qpyqnNISodmZS78XIasPODQbNlzwINhWhDTHIjXGb4FpizYaL3OGCanrxfR9fQyCaqKGGBjRq3Mfq8U6Yd4mApmsE+uJxgaZV8K5zBqpkSzQRWhcVGNL5DuLsU3gfSJOo7kZeA2G71SHffH577dBoqtCZ4VFv169CoUZehLWCb+7XKJZmHXVujCURATSySLGUOPc6EoLFAn3YtsCA04mS4bZVo5FZPWVwfhjmkhtDR4f6wscKp7r1HsFHSOgm59QfETQdrn4MnZ44H2Jd39axqndn5DvK9EcZVjPHynOPnueXP2u6mTuUgh2VyyWBCDO3CNo0fGlo7VJI69IkIWNSD87K9cHZWYMClyKZkUzS+PmRAhHRYbVd+9ZjKOmnU36kUHNDG/ft1D4ogsY+rhVtXB4lgWDM5adri+EIScYdYnB1/pQexLBigcJY9uE7nQTR0U6QgVNYvun7uRNs40E0c4voSfmPdFO0FlOD2y1oQhnaXfWLbu9nMcTcs4RFGrcC7NzkUN4/WjG8s285V6w=
skip_cleanup: true
on:
go: "1.12.x"
go: "1.15.x"
repo: golang-migrate/migrate
tags: true
file:
- cli/build/migrate.linux-amd64.tar.gz
- cli/build/migrate.linux-armv7.tar.gz
- cli/build/migrate.linux-arm64.tar.gz
- cli/build/migrate.darwin-amd64.tar.gz
- cli/build/migrate.windows-amd64.exe.tar.gz
- cli/build/migrate.windows-386.exe.tar.gz
- cli/build/sha256sum.txt
- dependency_tree.txt
- provider: packagecloud
@@ -68,7 +78,7 @@ deploy:
package_glob: '*.deb'
skip_cleanup: true
on:
go: "1.12.x"
go: "1.15.x"
repo: golang-migrate/migrate
tags: true
- provider: packagecloud
@@ -80,7 +90,7 @@ deploy:
package_glob: '*.deb'
skip_cleanup: true
on:
go: "1.12.x"
go: "1.15.x"
repo: golang-migrate/migrate
tags: true
- provider: packagecloud
@@ -88,11 +98,11 @@ deploy:
username: golang-migrate
token:
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
dist: ubuntu/cosmic
dist: ubuntu/focal
package_glob: '*.deb'
skip_cleanup: true
on:
go: "1.12.x"
go: "1.15.x"
repo: golang-migrate/migrate
tags: true
- provider: packagecloud
@@ -104,7 +114,7 @@ deploy:
package_glob: '*.deb'
skip_cleanup: true
on:
go: "1.12.x"
go: "1.15.x"
repo: golang-migrate/migrate
tags: true
- provider: packagecloud
@@ -116,13 +126,13 @@ deploy:
package_glob: '*.deb'
skip_cleanup: true
on:
go: "1.12.x"
go: "1.15.x"
repo: golang-migrate/migrate
tags: true
- provider: script
script: ./docker-deploy.sh
skip_cleanup: true
on:
go: "1.12.x"
go: "1.15.x"
repo: golang-migrate/migrate
tags: true

View File

@@ -1,23 +1,26 @@
FROM golang:1.12-alpine3.9 AS downloader
FROM golang:1.16-alpine3.13 AS builder
ARG VERSION
RUN apk add --no-cache git gcc musl-dev
RUN apk add --no-cache git gcc musl-dev make
WORKDIR /go/src/github.com/golang-migrate/migrate
ENV GO111MODULE=on
COPY go.mod go.sum ./
RUN go mod download
COPY . ./
ENV GO111MODULE=on
ENV DATABASES="postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb"
ENV SOURCES="file go_bindata github aws_s3 google_cloud_storage godoc_vfs gitlab"
RUN make build-docker
RUN go build -a -o build/migrate.linux-386 -ldflags="-X main.Version=${VERSION}" -tags "$DATABASES $SOURCES" ./cmd/migrate
FROM alpine:3.9
FROM alpine:3.13
RUN apk add --no-cache ca-certificates
COPY --from=downloader /go/src/github.com/golang-migrate/migrate/build/migrate.linux-386 /migrate
COPY --from=builder /go/src/github.com/golang-migrate/migrate/build/migrate.linux-386 /usr/local/bin/migrate
RUN ln -s /usr/local/bin/migrate /migrate
ENTRYPOINT ["/migrate"]
ENTRYPOINT ["migrate"]
CMD ["--help"]

View File

@@ -0,0 +1,17 @@
ARG DOCKER_IMAGE
FROM $DOCKER_IMAGE
RUN apk add --no-cache git gcc musl-dev make
WORKDIR /go/src/github.com/golang-migrate/migrate
ENV GO111MODULE=on
ENV COVERAGE_DIR=/tmp/coverage
COPY go.mod go.sum ./
RUN go mod download
COPY . ./
CMD ["make", "test"]

View File

@@ -0,0 +1,8 @@
FROM alpine:3.13
RUN apk add --no-cache ca-certificates
ENTRYPOINT ["/usr/bin/migrate"]
CMD ["--help"]
COPY migrate /usr/bin/migrate

View File

@@ -16,7 +16,7 @@
NilMigration defines a migration without a body. NilVersion is defined as const -1.
#### What is the difference between uint(version) and int(targetVersion)?
version refers to an existing migration version coming from a source and therefor can never be negative.
version refers to an existing migration version coming from a source and therefore can never be negative.
targetVersion can either be a version OR represent a NilVersion, which equals -1.
#### What's the difference between Next/Previous and Up/Down?
@@ -53,7 +53,7 @@
Yes, technically thats possible. We want to encourage you to contribute your driver to this respository though.
The driver's functionality is dictated by migrate's interfaces. That means there should really
just be one driver for a database/ source. We want to prevent a future where several drivers doing the exact same thing,
just implemented a bit differently, co-exist somewhere on Github. If users have to do research first to find the
just implemented a bit differently, co-exist somewhere on GitHub. If users have to do research first to find the
"best" available driver for a database in order to get started, we would have failed as an open source community.
#### Can I mix multiple sources during a batch of migrations?
@@ -68,3 +68,12 @@
Database-specific locking features are used by *some* database drivers to prevent multiple instances of migrate from running migrations at the same time
the same database at the same time. For example, the MySQL driver uses the `GET_LOCK` function, while the Postgres driver uses
the `pg_advisory_lock` function.
#### Do I need to create a table for tracking migration version used?
No, it is done automatically.
#### Can I use migrate with a non-Go project?
Yes, you can use the migrate CLI in a non-Go project, but there are probably other libraries/frameworks available that offer better test and deploy integrations in that language/framework.
#### I have got an error `Dirty database version 1. Fix and force version`. What should I do?
Keep calm and refer to [the getting started docs](GETTING_STARTED.md#forcing-your-database-version).

View File

@@ -0,0 +1,53 @@
# Getting started
Before you start, you should understand the concept of forward/up and reverse/down database migrations.
Configure a database for your application. Make sure that your database driver is supported [here](README.md#databases)
## Create migrations
Create some migrations using migrate CLI. Here is an example:
```
migrate create -ext sql -dir db/migrations -seq create_users_table
```
Once you create your files, you should fill them.
**IMPORTANT:** In a project developed by more than one person there is a chance of migrations inconsistency - e.g. two developers can create conflicting migrations, and the developer that created his migration later gets it merged to the repository first.
Developers and Teams should keep an eye on such cases (especially during code review).
[Here](https://github.com/golang-migrate/migrate/issues/179#issuecomment-475821264) is the issue summary if you would like to read more.
Consider making your migrations idempotent - we can run the same sql code twice in a row with the same result. This makes our migrations more robust. On the other hand, it causes slightly less control over database schema - e.g. let's say you forgot to drop the table in down migration. You run down migration - the table is still there. When you run up migration again - `CREATE TABLE` would return an error, helping you find an issue in down migration, while `CREATE TABLE IF NOT EXISTS` would not. Use those conditions wisely.
In case you would like to run several commands/queries in one migration, you should wrap them in a transaction (if your database supports it).
This way if one of commands fails, our database will remain unchanged.
## Run migrations
Run your migrations through the CLI or your app and check if they applied expected changes.
Just to give you an idea:
```
migrate -database YOUR_DATABASE_URL -path PATH_TO_YOUR_MIGRATIONS up
```
Just add the code to your app and you're ready to go!
Before commiting your migrations you should run your migrations up, down, and then up again to see if migrations are working properly both ways.
(e.g. if you created a table in a migration but reverse migration did not delete it, you will encounter an error when running the forward migration again)
It's also worth checking your migrations in a separate, containerized environment. You can find some tools in the end of this document.
**IMPORTANT:** If you would like to run multiple instances of your app on different machines be sure to use a database that supports locking when running migrations. Otherwise you may encounter issues.
## Forcing your database version
In case you run a migration that contained an error, migrate will not let you run other migrations on the same database. You will see an error like `Dirty database version 1. Fix and force version`, even when you fix the erred migration. This means your database was marked as 'dirty'.
You need to investigate the migration error - was your migration applied partially, or was it not applied at all? Once you know, you should force your database to a version reflecting it's real state. You can do so with `force` command:
```
migrate -path PATH_TO_YOUR_MIGRATIONS -database YOUR_DATABASE_URL force VERSION
```
Once you force the version and your migration was fixed, your database is 'clean' again and you can proceed with your migrations.
For details and example of usage see [this comment](https://github.com/golang-migrate/migrate/issues/282#issuecomment-530743258).
## Further reading:
- [PostgreSQL tutorial](database/postgres/TUTORIAL.md)
- [Best practices](MIGRATIONS.md)
- [FAQ](FAQ.md)
- Tools for testing your migrations in a container:
- https://github.com/dhui/dktest
- https://github.com/ory/dockertest

View File

@@ -44,9 +44,14 @@ It is suggested that the version number of corresponding `up` and `down` migrati
files be equivalent for clarity, but they are allowed to differ so long as the
relative ordering of the migrations is preserved.
The migration files are permitted to be empty, so in the event that a migration
is a no-op or is irreversible, it is recommended to still include both migration
files, and either leaving them empty or adding a comment as appropriate.
The migration files are permitted to be "empty", in the event that a migration
is a no-op or is irreversible. It is recommended to still include both migration
files by making the whole migration file consist of a comment.
If your database does not support comments, then deleting the migration file will also work.
Note, an actual empty file (e.g. a 0 byte file) may cause issues with your database since migrate
will attempt to run an empty query. In this case, deleting the migration file will also work.
For the rational of this behavior see:
[#244 (comment)](https://github.com/golang-migrate/migrate/issues/244#issuecomment-510758270)
## Migration Content Format

View File

@@ -1,15 +1,30 @@
SOURCE ?= file go_bindata github aws_s3 google_cloud_storage godoc_vfs gitlab
DATABASE ?= postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb
SOURCE ?= file go_bindata github github_ee bitbucket aws_s3 google_cloud_storage godoc_vfs gitlab
DATABASE ?= postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver firebird neo4j pgx
DATABASE_TEST ?= $(DATABASE) sqlite sqlite3 sqlcipher
VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-)
TEST_FLAGS ?=
REPO_OWNER ?= $(shell cd .. && basename "$$(pwd)")
COVERAGE_DIR ?= .coverage
echo-source:
@echo "$(SOURCE)"
echo-database:
@echo "$(DATABASE)"
build:
CGO_ENABLED=0 go build -ldflags='-X main.Version=$(VERSION)' -tags '$(DATABASE) $(SOURCE)' ./cmd/migrate
build-docker:
CGO_ENABLED=0 go build -a -o build/migrate.linux-386 -ldflags="-s -w -X main.Version=${VERSION}" -tags "$(DATABASE) $(SOURCE)" ./cmd/migrate
build-cli: clean
-mkdir ./cli/build
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o ../../cli/build/migrate.linux-amd64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -a -o ../../cli/build/migrate.linux-armv7 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -a -o ../../cli/build/migrate.linux-arm64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -a -o ../../cli/build/migrate.darwin-amd64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=windows GOARCH=386 go build -a -o ../../cli/build/migrate.windows-386.exe -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -a -o ../../cli/build/migrate.windows-amd64.exe -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
cd ./cli/build && find . -name 'migrate*' | xargs -I{} tar czf {}.tar.gz {}
cd ./cli/build && shasum -a 256 * > sha256sum.txt
@@ -27,27 +42,14 @@ test-short:
test:
@-rm -r $(COVERAGE_DIR)
@mkdir $(COVERAGE_DIR)
make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile $$(COVERAGE_DIR)/_$$(RAND).txt -bench=. -benchmem -timeout 20m'
@echo 'mode: atomic' > $(COVERAGE_DIR)/combined.txt
@cat $(COVERAGE_DIR)/_*.txt | grep -v 'mode: atomic' >> $(COVERAGE_DIR)/combined.txt
make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile $$(COVERAGE_DIR)/combined.txt -bench=. -benchmem -timeout 20m'
test-with-flags:
@echo SOURCE: $(SOURCE)
@echo DATABASE: $(DATABASE)
@echo SOURCE: $(SOURCE)
@echo DATABASE_TEST: $(DATABASE_TEST)
@go test $(TEST_FLAGS) .
@go test $(TEST_FLAGS) ./cli/...
@go test $(TEST_FLAGS) ./database
@go test $(TEST_FLAGS) ./testing/...
@echo -n '$(SOURCE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./source/{}
@go test $(TEST_FLAGS) ./source/testing/...
@go test $(TEST_FLAGS) ./source/stub/...
@echo -n '$(DATABASE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./database/{}
@go test $(TEST_FLAGS) ./database/testing/...
@go test $(TEST_FLAGS) ./database/stub/...
@go test $(TEST_FLAGS) ./...
kill-orphaned-docker-containers:
@@ -84,7 +86,7 @@ rewrite-import-paths:
docs:
-make kill-docs
nohup godoc -play -http=127.0.0.1:6064 </dev/null >/dev/null 2>&1 & echo $$! > .godoc.pid
cat .godoc.pid
cat .godoc.pid
kill-docs:
@@ -109,10 +111,10 @@ define external_deps
endef
.PHONY: build-cli clean test-short test test-with-flags html-coverage \
.PHONY: build build-docker build-cli clean test-short test test-with-flags html-coverage \
restore-import-paths rewrite-import-paths list-external-deps release \
docs kill-docs open-docs kill-orphaned-docker-containers
SHELL = /bin/bash
SHELL = /bin/sh
RAND = $(shell echo $$RANDOM)

View File

@@ -1,21 +1,21 @@
[![Build Status](https://img.shields.io/travis/com/golang-migrate/migrate/master.svg)](https://travis-ci.com/golang-migrate/migrate)
[![GoDoc](https://godoc.org/github.com/golang-migrate/migrate?status.svg)](https://godoc.org/github.com/golang-migrate/migrate)
[![CircleCI - Build Status](https://img.shields.io/circleci/build/github/golang-migrate/migrate/master)](https://circleci.com/gh/golang-migrate/migrate)
[![GoDoc](https://pkg.go.dev/badge/github.com/golang-migrate/migrate)](https://pkg.go.dev/github.com/golang-migrate/migrate/v4)
[![Coverage Status](https://img.shields.io/coveralls/github/golang-migrate/migrate/master.svg)](https://coveralls.io/github/golang-migrate/migrate?branch=master)
[![packagecloud.io](https://img.shields.io/badge/deb-packagecloud.io-844fec.svg)](https://packagecloud.io/golang-migrate/migrate?filter=debs)
[![Docker Pulls](https://img.shields.io/docker/pulls/migrate/migrate.svg)](https://hub.docker.com/r/migrate/migrate/)
![Supported Go Versions](https://img.shields.io/badge/Go-1.11%2C%201.12-lightgrey.svg)
![Supported Go Versions](https://img.shields.io/badge/Go-1.15%2C%201.16-lightgrey.svg)
[![GitHub Release](https://img.shields.io/github/release/golang-migrate/migrate.svg)](https://github.com/golang-migrate/migrate/releases)
[![Go Report Card](https://goreportcard.com/badge/github.com/golang-migrate/migrate)](https://goreportcard.com/report/github.com/golang-migrate/migrate)
# migrate
__Database migrations written in Go. Use as [CLI](#cli-usage) or import as [library](#use-in-your-go-project).__
* Migrate reads migrations from [sources](#migration-sources)
* Migrate reads migrations from [sources](#migration-sources)
and applies them in correct order to a [database](#databases).
* Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof.
* Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof.
(Keeps the drivers lightweight, too.)
* Database drivers don't assume things or try to correct user input. When in doubt, fail.
* Database drivers don't assume things or try to correct user input. When in doubt, fail.
Forked from [mattes/migrate](https://github.com/mattes/migrate)
@@ -23,24 +23,28 @@ Forked from [mattes/migrate](https://github.com/mattes/migrate)
Database drivers run migrations. [Add a new database?](database/driver.go)
* [PostgreSQL](database/postgres)
* [Redshift](database/redshift)
* [Ql](database/ql)
* [Cassandra](database/cassandra)
* [SQLite](database/sqlite3) ([todo #165](https://github.com/mattes/migrate/issues/165))
* [MySQL/ MariaDB](database/mysql)
* [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167))
* [MongoDB](database/mongodb)
* [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170))
* [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171))
* [Google Cloud Spanner](database/spanner)
* [CockroachDB](database/cockroachdb)
* [ClickHouse](database/clickhouse)
* [Firebird](database/firebird)
* [PostgreSQL](database/postgres)
* [PGX](database/pgx)
* [Redshift](database/redshift)
* [Ql](database/ql)
* [Cassandra](database/cassandra)
* [SQLite](database/sqlite)
* [SQLite3](database/sqlite3) ([todo #165](https://github.com/mattes/migrate/issues/165))
* [SQLCipher](database/sqlcipher)
* [MySQL/ MariaDB](database/mysql)
* [Neo4j](database/neo4j)
* [MongoDB](database/mongodb)
* [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170))
* [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171))
* [Google Cloud Spanner](database/spanner)
* [CockroachDB](database/cockroachdb)
* [ClickHouse](database/clickhouse)
* [Firebird](database/firebird)
* [MS SQL Server](database/sqlserver)
### Database URLs
Database connection strings are specified via URLs. The URL format is driver dependent but generally has the form: `dbdriver://username:password@host:port/dbname?option1=true&option2=false`
Database connection strings are specified via URLs. The URL format is driver dependent but generally has the form: `dbdriver://username:password@host:port/dbname?param1=true&param2=false`
Any [reserved URL characters](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_reserved_characters) need to be escaped. Note, the `%` character also [needs to be escaped](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_the_percent_character)
@@ -48,6 +52,7 @@ Explicitly, the following characters need to be escaped:
`!`, `#`, `$`, `%`, `&`, `'`, `(`, `)`, `*`, `+`, `,`, `/`, `:`, `;`, `=`, `?`, `@`, `[`, `]`
It's easiest to always run the URL parts of your DB connection URL (e.g. username, password, etc) through an URL encoder. See the example Python snippets below:
```bash
$ python3 -c 'import urllib.parse; print(urllib.parse.quote(input("String to encode: "), ""))'
String to encode: FAKEpassword!#$%&'()*+,/:;=?@[]
@@ -62,44 +67,44 @@ $
Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go)
* [Filesystem](source/file) - read from fileystem
* [Go-Bindata](source/go_bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata))
* [Github](source/github) - read from remote Github repositories
* [Gitlab](source/gitlab) - read from remote Gitlab repositories
* [AWS S3](source/aws_s3) - read from Amazon Web Services S3
* [Google Cloud Storage](source/google_cloud_storage) - read from Google Cloud Platform Storage
* [Filesystem](source/file) - read from filesystem
* [Go-Bindata](source/go_bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata))
* [GitHub](source/github) - read from remote GitHub repositories
* [GitHub Enterprise](source/github_ee) - read from remote GitHub Enterprise repositories
* [Bitbucket](source/bitbucket) - read from remote Bitbucket repositories
* [Gitlab](source/gitlab) - read from remote Gitlab repositories
* [AWS S3](source/aws_s3) - read from Amazon Web Services S3
* [Google Cloud Storage](source/google_cloud_storage) - read from Google Cloud Platform Storage
## CLI usage
* Simple wrapper around this library.
* Handles ctrl+c (SIGINT) gracefully.
* No config search paths, no config files, no magic ENV var injections.
* Simple wrapper around this library.
* Handles ctrl+c (SIGINT) gracefully.
* No config search paths, no config files, no magic ENV var injections.
__[CLI Documentation](cli)__
__[CLI Documentation](cmd/migrate)__
### Basic usage:
### Basic usage
```
```bash
$ migrate -source file://path/to/migrations -database postgres://localhost:5432/database up 2
```
### Docker usage
```
```bash
$ docker run -v {{ migration dir }}:/migrations --network host migrate/migrate
-path=/migrations/ -database postgres://localhost:5432/database up 2
```
## Use in your Go project
* API is stable and frozen for this release (v3 & v4).
* Uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies.
* To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`.
* Bring your own logger.
* Uses `io.Reader` streams internally for low memory overhead.
* Thread-safe and no goroutine leaks.
* API is stable and frozen for this release (v3 & v4).
* Uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies.
* To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`.
* Bring your own logger.
* Uses `io.Reader` streams internally for low memory overhead.
* Thread-safe and no goroutine leaks.
__[Go Documentation](https://godoc.org/github.com/golang-migrate/migrate)__
@@ -139,11 +144,22 @@ func main() {
}
```
## Getting started
Go to [getting started](GETTING_STARTED.md)
## Tutorials
* [CockroachDB](database/cockroachdb/TUTORIAL.md)
* [PostgreSQL](database/postgres/TUTORIAL.md)
(more tutorials to come)
## Migration files
Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration)
```
```bash
1481574547_create_users_table.up.sql
1481574547_create_users_table.down.sql
```
@@ -165,8 +181,6 @@ read the [development guide](CONTRIBUTING.md).
Also have a look at the [FAQ](FAQ.md).
---
Looking for alternatives? [https://awesome-go.com/#database](https://awesome-go.com/#database).

View File

@@ -7,12 +7,14 @@ package database
import (
"fmt"
"io"
nurl "net/url"
"sync"
iurl "github.com/golang-migrate/migrate/v4/internal/url"
)
var (
ErrLocked = fmt.Errorf("can't acquire lock")
ErrLocked = fmt.Errorf("can't acquire lock")
ErrNotLocked = fmt.Errorf("can't unlock, as not currently locked")
)
const NilVersion int = -1
@@ -32,7 +34,7 @@ var drivers = make(map[string]Driver)
// All other functions are tested by tests in database/testing.
// Saves you some time and makes sure all database drivers behave the same way.
// 5. Call Register in init().
// 6. Create a migrate/cli/build_<driver-name>.go file
// 6. Create a internal/cli/build_<driver-name>.go file
// 7. Add driver name in 'DATABASE' variable in Makefile
//
// Guidelines:
@@ -60,7 +62,7 @@ type Driver interface {
// all migrations have been run.
Unlock() error
// Run applies a migration to the database. migration is garantueed to be not nil.
// Run applies a migration to the database. migration is guaranteed to be not nil.
Run(migration io.Reader) error
// SetVersion saves version and dirty state.
@@ -81,21 +83,16 @@ type Driver interface {
// Open returns a new driver instance.
func Open(url string) (Driver, error) {
u, err := nurl.Parse(url)
scheme, err := iurl.SchemeFromURL(url)
if err != nil {
return nil, fmt.Errorf("Unable to parse URL. Did you escape all reserved URL characters? "+
"See: https://github.com/golang-migrate/migrate#database-urls Error: %v", err)
}
if u.Scheme == "" {
return nil, fmt.Errorf("database driver: invalid URL scheme")
return nil, err
}
driversMu.RLock()
d, ok := drivers[u.Scheme]
d, ok := drivers[scheme]
driversMu.RUnlock()
if !ok {
return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", u.Scheme)
return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", scheme)
}
return d.Open(url)

View File

@@ -0,0 +1,16 @@
# sqlite3
`sqlite3://path/to/database?query`
Unlike other migrate database drivers, the sqlite3 driver will automatically wrap each migration in an implicit transaction by default. Migrations must not contain explicit `BEGIN` or `COMMIT` statements. This behavior may change in a future major release. (See below for a workaround.)
Refer to [upstream documentation](https://github.com/mattn/go-sqlite3/blob/master/README.md#connection-string) for a complete list of query parameters supported by the sqlite3 database driver. The auxiliary query parameters listed below may be supplied to tailor migrate behavior. All auxiliary query parameters are optional.
| URL Query | WithInstance Config | Description |
|------------|---------------------|-------------|
| `x-migrations-table` | `MigrationsTable` | Name of the migrations table. Defaults to `schema_migrations`. |
| `x-no-tx-wrap` | `NoTxWrap` | Disable implicit transactions when `true`. Migrations may, and should, contain explicit `BEGIN` and `COMMIT` statements. |
## Notes
* Uses the `github.com/mattn/go-sqlite3` sqlite db driver (cgo)

View File

@@ -3,9 +3,11 @@ package sqlite3
import (
"database/sql"
"fmt"
"go.uber.org/atomic"
"io"
"io/ioutil"
nurl "net/url"
"strconv"
"strings"
"github.com/golang-migrate/migrate/v4"
@@ -28,11 +30,12 @@ var (
type Config struct {
MigrationsTable string
DatabaseName string
NoTxWrap bool
}
type Sqlite struct {
db *sql.DB
isLocked bool
isLocked atomic.Bool
config *Config
}
@@ -100,13 +103,25 @@ func (m *Sqlite) Open(url string) (database.Driver, error) {
return nil, err
}
migrationsTable := purl.Query().Get("x-migrations-table")
qv := purl.Query()
migrationsTable := qv.Get("x-migrations-table")
if len(migrationsTable) == 0 {
migrationsTable = DefaultMigrationsTable
}
noTxWrap := false
if v := qv.Get("x-no-tx-wrap"); v != "" {
noTxWrap, err = strconv.ParseBool(v)
if err != nil {
return nil, fmt.Errorf("x-no-tx-wrap: %s", err)
}
}
mx, err := WithInstance(db, &Config{
DatabaseName: purl.Path,
MigrationsTable: migrationsTable,
NoTxWrap: noTxWrap,
})
if err != nil {
return nil, err
@@ -129,6 +144,7 @@ func (m *Sqlite) Drop() (err error) {
err = multierror.Append(err, errClose)
}
}()
tableNames := make([]string, 0)
for tables.Next() {
var tableName string
@@ -139,6 +155,10 @@ func (m *Sqlite) Drop() (err error) {
tableNames = append(tableNames, tableName)
}
}
if err := tables.Err(); err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
if len(tableNames) > 0 {
for _, t := range tableNames {
query := "DROP TABLE " + t
@@ -158,18 +178,16 @@ func (m *Sqlite) Drop() (err error) {
}
func (m *Sqlite) Lock() error {
if m.isLocked {
if !m.isLocked.CAS(false, true) {
return database.ErrLocked
}
m.isLocked = true
return nil
}
func (m *Sqlite) Unlock() error {
if !m.isLocked {
return nil
if !m.isLocked.CAS(true, false) {
return database.ErrNotLocked
}
m.isLocked = false
return nil
}
@@ -180,6 +198,9 @@ func (m *Sqlite) Run(migration io.Reader) error {
}
query := string(migr[:])
if m.config.NoTxWrap {
return m.executeQueryNoTx(query)
}
return m.executeQuery(query)
}
@@ -200,6 +221,13 @@ func (m *Sqlite) executeQuery(query string) error {
return nil
}
func (m *Sqlite) executeQueryNoTx(query string) error {
if _, err := m.db.Exec(query); err != nil {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
return nil
}
func (m *Sqlite) SetVersion(version int, dirty bool) error {
tx, err := m.db.Begin()
if err != nil {
@@ -211,9 +239,12 @@ func (m *Sqlite) SetVersion(version int, dirty bool) error {
return &database.Error{OrigErr: err, Query: []byte(query)}
}
if version >= 0 {
query := fmt.Sprintf(`INSERT INTO %s (version, dirty) VALUES (%d, '%t')`, m.config.MigrationsTable, version, dirty)
if _, err := tx.Exec(query); err != nil {
// Also re-write the schema version for nil dirty versions to prevent
// empty schema version for failed down migration on the first migration
// See: https://github.com/golang-migrate/migrate/issues/330
if version >= 0 || (version == database.NilVersion && dirty) {
query := fmt.Sprintf(`INSERT INTO %s (version, dirty) VALUES (?, ?)`, m.config.MigrationsTable)
if _, err := tx.Exec(query, version, dirty); err != nil {
if errRollback := tx.Rollback(); errRollback != nil {
err = multierror.Append(err, errRollback)
}

View File

@@ -2,6 +2,7 @@ package database
import (
"fmt"
"go.uber.org/atomic"
"hash/crc32"
"strings"
)
@@ -17,3 +18,16 @@ func GenerateAdvisoryLockId(databaseName string, additionalNames ...string) (str
sum = sum * uint32(advisoryLockIDSalt)
return fmt.Sprint(sum), nil
}
// CasRestoreOnErr CAS wrapper to automatically restore the lock state on error
func CasRestoreOnErr(lock *atomic.Bool, o, n bool, casErr error, f func() error) error {
if !lock.CAS(o, n) {
return casErr
}
if err := f(); err != nil {
// Automatically unlock/lock on error
lock.Store(o)
return err
}
return nil
}

View File

@@ -0,0 +1,25 @@
package url
import (
"errors"
"strings"
)
var errNoScheme = errors.New("no scheme")
var errEmptyURL = errors.New("URL cannot be empty")
// schemeFromURL returns the scheme from a URL string
func SchemeFromURL(url string) (string, error) {
if url == "" {
return "", errEmptyURL
}
i := strings.Index(url, ":")
// No : or : is the first character.
if i < 1 {
return "", errNoScheme
}
return url[0:i], nil
}

View File

@@ -1,18 +1,20 @@
// Package migrate reads migrations from sources and runs them against databases.
// Sources are defined by the `source.Driver` and databases by the `database.Driver`
// interface. The driver interfaces are kept "dump", all migration logic is kept
// interface. The driver interfaces are kept "dumb", all migration logic is kept
// in this package.
package migrate
import (
"errors"
"fmt"
"github.com/hashicorp/go-multierror"
"os"
"sync"
"time"
"github.com/hashicorp/go-multierror"
"github.com/golang-migrate/migrate/v4/database"
iurl "github.com/golang-migrate/migrate/v4/internal/url"
"github.com/golang-migrate/migrate/v4/source"
)
@@ -85,13 +87,13 @@ type Migrate struct {
func New(sourceURL, databaseURL string) (*Migrate, error) {
m := newCommon()
sourceName, err := sourceSchemeFromURL(sourceURL)
sourceName, err := iurl.SchemeFromURL(sourceURL)
if err != nil {
return nil, err
}
m.sourceName = sourceName
databaseName, err := databaseSchemeFromURL(databaseURL)
databaseName, err := iurl.SchemeFromURL(databaseURL)
if err != nil {
return nil, err
}
@@ -119,7 +121,7 @@ func New(sourceURL, databaseURL string) (*Migrate, error) {
func NewWithDatabaseInstance(sourceURL string, databaseName string, databaseInstance database.Driver) (*Migrate, error) {
m := newCommon()
sourceName, err := schemeFromURL(sourceURL)
sourceName, err := iurl.SchemeFromURL(sourceURL)
if err != nil {
return nil, err
}
@@ -145,7 +147,7 @@ func NewWithDatabaseInstance(sourceURL string, databaseName string, databaseInst
func NewWithSourceInstance(sourceName string, sourceInstance source.Driver, databaseURL string) (*Migrate, error) {
m := newCommon()
databaseName, err := schemeFromURL(databaseURL)
databaseName, err := iurl.SchemeFromURL(databaseURL)
if err != nil {
return nil, err
}
@@ -485,7 +487,7 @@ func (m *Migrate) read(from int, to int, ret chan<- interface{}) {
}
prev, err := m.sourceDrv.Prev(suint(from))
if os.IsNotExist(err) && to == -1 {
if errors.Is(err, os.ErrNotExist) && to == -1 {
// apply nil migration
migr, err := m.newMigration(suint(from), -1)
if err != nil {
@@ -578,7 +580,7 @@ func (m *Migrate) readUp(from int, limit int, ret chan<- interface{}) {
// apply next migration
next, err := m.sourceDrv.Next(suint(from))
if os.IsNotExist(err) {
if errors.Is(err, os.ErrNotExist) {
// no limit, but no migrations applied?
if limit == -1 && count == 0 {
ret <- ErrNoChange
@@ -664,7 +666,7 @@ func (m *Migrate) readDown(from int, limit int, ret chan<- interface{}) {
}
prev, err := m.sourceDrv.Prev(suint(from))
if os.IsNotExist(err) {
if errors.Is(err, os.ErrNotExist) {
// no limit or haven't reached limit, apply "first" migration
if limit == -1 || limit-count > 0 {
firstVersion, err := m.sourceDrv.First()
@@ -783,9 +785,9 @@ func (m *Migrate) versionExists(version uint) (result error) {
}
}()
}
if os.IsExist(err) {
if errors.Is(err, os.ErrExist) {
return nil
} else if !os.IsNotExist(err) {
} else if !errors.Is(err, os.ErrNotExist) {
return err
}
@@ -798,13 +800,15 @@ func (m *Migrate) versionExists(version uint) (result error) {
}
}()
}
if os.IsExist(err) {
if errors.Is(err, os.ErrExist) {
return nil
} else if !os.IsNotExist(err) {
} else if !errors.Is(err, os.ErrNotExist) {
return err
}
return os.ErrNotExist
err = fmt.Errorf("no migration found for version %d: %w", version, err)
m.logErr(err)
return err
}
// stop returns true if no more migrations should be run against the database
@@ -832,7 +836,7 @@ func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, err
if targetVersion >= int(version) {
r, identifier, err := m.sourceDrv.ReadUp(version)
if os.IsNotExist(err) {
if errors.Is(err, os.ErrNotExist) {
// create "empty" migration
migr, err = NewMigration(nil, "", version, targetVersion)
if err != nil {
@@ -852,7 +856,7 @@ func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, err
} else {
r, identifier, err := m.sourceDrv.ReadDown(version)
if os.IsNotExist(err) {
if errors.Is(err, os.ErrNotExist) {
// create "empty" migration
migr, err = NewMigration(nil, "", version, targetVersion)
if err != nil {
@@ -950,7 +954,7 @@ func (m *Migrate) unlock() error {
// if a prevErr is not nil.
func (m *Migrate) unlockErr(prevErr error) error {
if err := m.unlock(); err != nil {
return NewMultiError(prevErr, err)
return multierror.Append(prevErr, err)
}
return prevErr
}

View File

@@ -87,7 +87,7 @@ func Open(url string) (Driver, error) {
d, ok := drivers[u.Scheme]
driversMu.RUnlock()
if !ok {
return nil, fmt.Errorf("source driver: unknown driver %v (forgotten import?)", u.Scheme)
return nil, fmt.Errorf("source driver: unknown driver '%s' (forgotten import?)", u.Scheme)
}
return d.Open(url)

View File

@@ -0,0 +1,15 @@
package source
import "os"
// ErrDuplicateMigration is an error type for reporting duplicate migration
// files.
type ErrDuplicateMigration struct {
Migration
os.FileInfo
}
// Error implements error interface.
func (e ErrDuplicateMigration) Error() string {
return "duplicate migration file: " + e.Name()
}

View File

@@ -1,12 +1,8 @@
package file
import (
"fmt"
"io"
"io/ioutil"
nurl "net/url"
"os"
"path"
"path/filepath"
"github.com/golang-migrate/migrate/v4/source"
@@ -16,18 +12,11 @@ func init() {
source.Register("file", &File{})
}
type File struct {
url string
path string
migrations *source.Migrations
}
func (f *File) Open(url string) (source.Driver, error) {
func parseURL(url string) (string, error) {
u, err := nurl.Parse(url)
if err != nil {
return nil, err
return "", err
}
// concat host and path to restore full path
// host might be `.`
p := u.Opaque
@@ -39,7 +28,7 @@ func (f *File) Open(url string) (source.Driver, error) {
// default to current directory if no path
wd, err := os.Getwd()
if err != nil {
return nil, err
return "", err
}
p = wd
@@ -47,81 +36,9 @@ func (f *File) Open(url string) (source.Driver, error) {
// make path absolute if relative
abs, err := filepath.Abs(p)
if err != nil {
return nil, err
return "", err
}
p = abs
}
// scan directory
files, err := ioutil.ReadDir(p)
if err != nil {
return nil, err
}
nf := &File{
url: url,
path: p,
migrations: source.NewMigrations(),
}
for _, fi := range files {
if !fi.IsDir() {
m, err := source.DefaultParse(fi.Name())
if err != nil {
continue // ignore files that we can't parse
}
if !nf.migrations.Append(m) {
return nil, fmt.Errorf("unable to parse file %v", fi.Name())
}
}
}
return nf, nil
}
func (f *File) Close() error {
// nothing do to here
return nil
}
func (f *File) First() (version uint, err error) {
if v, ok := f.migrations.First(); ok {
return v, nil
}
return 0, &os.PathError{Op: "first", Path: f.path, Err: os.ErrNotExist}
}
func (f *File) Prev(version uint) (prevVersion uint, err error) {
if v, ok := f.migrations.Prev(version); ok {
return v, nil
}
return 0, &os.PathError{Op: fmt.Sprintf("prev for version %v", version), Path: f.path, Err: os.ErrNotExist}
}
func (f *File) Next(version uint) (nextVersion uint, err error) {
if v, ok := f.migrations.Next(version); ok {
return v, nil
}
return 0, &os.PathError{Op: fmt.Sprintf("next for version %v", version), Path: f.path, Err: os.ErrNotExist}
}
func (f *File) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) {
if m, ok := f.migrations.Up(version); ok {
r, err := os.Open(path.Join(f.path, m.Raw))
if err != nil {
return nil, "", err
}
return r, m.Identifier, nil
}
return nil, "", &os.PathError{Op: fmt.Sprintf("read version %v", version), Path: f.path, Err: os.ErrNotExist}
}
func (f *File) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) {
if m, ok := f.migrations.Down(version); ok {
r, err := os.Open(path.Join(f.path, m.Raw))
if err != nil {
return nil, "", err
}
return r, m.Identifier, nil
}
return nil, "", &os.PathError{Op: fmt.Sprintf("read version %v", version), Path: f.path, Err: os.ErrNotExist}
return p, nil
}

View File

@@ -0,0 +1,32 @@
// +build !go1.16
package file
import (
"net/http"
"github.com/golang-migrate/migrate/v4/source"
"github.com/golang-migrate/migrate/v4/source/httpfs"
)
type File struct {
httpfs.PartialDriver
url string
path string
}
func (f *File) Open(url string) (source.Driver, error) {
p, err := parseURL(url)
if err != nil {
return nil, err
}
nf := &File{
url: url,
path: p,
}
if err := nf.Init(http.Dir(p), ""); err != nil {
return nil, err
}
return nf, nil
}

View File

@@ -0,0 +1,31 @@
// +build go1.16
package file
import (
"os"
"github.com/golang-migrate/migrate/v4/source"
"github.com/golang-migrate/migrate/v4/source/iofs"
)
type File struct {
iofs.PartialDriver
url string
path string
}
func (f *File) Open(url string) (source.Driver, error) {
p, err := parseURL(url)
if err != nil {
return nil, err
}
nf := &File{
url: url,
path: p,
}
if err := nf.Init(os.DirFS(p), "."); err != nil {
return nil, err
}
return nf, nil
}

View File

@@ -0,0 +1,49 @@
# httpfs
## Usage
This package could be used to create new migration source drivers that uses
`http.FileSystem` to read migration files.
Struct `httpfs.PartialDriver` partly implements the `source.Driver` interface. It has all
the methods except for `Open()`. Embedding this struct and adding `Open()` method
allows users of this package to create new migration sources. Example:
```go
struct mydriver {
httpfs.PartialDriver
}
func (d *mydriver) Open(url string) (source.Driver, error) {
var fs http.FileSystem
var path string
var ds mydriver
// acquire fs and path from url
// set-up ds if necessary
if err := ds.Init(fs, path); err != nil {
return nil, err
}
return &ds, nil
}
```
This package also provides a simple `source.Driver` implementation that works
with `http.FileSystem` provided by the user of this package. It is created with
`httpfs.New()` call.
Example of using `http.Dir()` to read migrations from `sql` directory:
```go
src, err := httpfs.New(http.Dir("sql"))
if err != nil {
// do something
}
m, err := migrate.NewWithSourceInstance("httpfs", src, "database://url")
if err != nil {
// do something
}
err = m.Up()
...
```

View File

@@ -0,0 +1,31 @@
package httpfs
import (
"errors"
"net/http"
"github.com/golang-migrate/migrate/v4/source"
)
// driver is a migration source driver for reading migrations from
// http.FileSystem instances. It implements source.Driver interface and can be
// used as a migration source for the main migrate library.
type driver struct {
PartialDriver
}
// New creates a new migrate source driver from a http.FileSystem instance and a
// relative path to migration files within the virtual FS.
func New(fs http.FileSystem, path string) (source.Driver, error) {
var d driver
if err := d.Init(fs, path); err != nil {
return nil, err
}
return &d, nil
}
// Open completes the implementetion of source.Driver interface. Other methods
// are implemented by the embedded PartialDriver struct.
func (d *driver) Open(url string) (source.Driver, error) {
return nil, errors.New("Open() cannot be called on the httpfs passthrough driver")
}

View File

@@ -0,0 +1,156 @@
package httpfs
import (
"errors"
"io"
"net/http"
"os"
"path"
"strconv"
"github.com/golang-migrate/migrate/v4/source"
)
// PartialDriver is a helper service for creating new source drivers working with
// http.FileSystem instances. It implements all source.Driver interface methods
// except for Open(). New driver could embed this struct and add missing Open()
// method.
//
// To prepare PartialDriver for use Init() function.
type PartialDriver struct {
migrations *source.Migrations
fs http.FileSystem
path string
}
// Init prepares not initialized PartialDriver instance to read migrations from a
// http.FileSystem instance and a relative path.
func (p *PartialDriver) Init(fs http.FileSystem, path string) error {
root, err := fs.Open(path)
if err != nil {
return err
}
files, err := root.Readdir(0)
if err != nil {
_ = root.Close()
return err
}
if err = root.Close(); err != nil {
return err
}
ms := source.NewMigrations()
for _, file := range files {
if file.IsDir() {
continue
}
m, err := source.DefaultParse(file.Name())
if err != nil {
continue // ignore files that we can't parse
}
if !ms.Append(m) {
return source.ErrDuplicateMigration{
Migration: *m,
FileInfo: file,
}
}
}
p.fs = fs
p.path = path
p.migrations = ms
return nil
}
// Close is part of source.Driver interface implementation. This is a no-op.
func (p *PartialDriver) Close() error {
return nil
}
// First is part of source.Driver interface implementation.
func (p *PartialDriver) First() (version uint, err error) {
if version, ok := p.migrations.First(); ok {
return version, nil
}
return 0, &os.PathError{
Op: "first",
Path: p.path,
Err: os.ErrNotExist,
}
}
// Prev is part of source.Driver interface implementation.
func (p *PartialDriver) Prev(version uint) (prevVersion uint, err error) {
if version, ok := p.migrations.Prev(version); ok {
return version, nil
}
return 0, &os.PathError{
Op: "prev for version " + strconv.FormatUint(uint64(version), 10),
Path: p.path,
Err: os.ErrNotExist,
}
}
// Next is part of source.Driver interface implementation.
func (p *PartialDriver) Next(version uint) (nextVersion uint, err error) {
if version, ok := p.migrations.Next(version); ok {
return version, nil
}
return 0, &os.PathError{
Op: "next for version " + strconv.FormatUint(uint64(version), 10),
Path: p.path,
Err: os.ErrNotExist,
}
}
// ReadUp is part of source.Driver interface implementation.
func (p *PartialDriver) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) {
if m, ok := p.migrations.Up(version); ok {
body, err := p.open(path.Join(p.path, m.Raw))
if err != nil {
return nil, "", err
}
return body, m.Identifier, nil
}
return nil, "", &os.PathError{
Op: "read up for version " + strconv.FormatUint(uint64(version), 10),
Path: p.path,
Err: os.ErrNotExist,
}
}
// ReadDown is part of source.Driver interface implementation.
func (p *PartialDriver) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) {
if m, ok := p.migrations.Down(version); ok {
body, err := p.open(path.Join(p.path, m.Raw))
if err != nil {
return nil, "", err
}
return body, m.Identifier, nil
}
return nil, "", &os.PathError{
Op: "read down for version " + strconv.FormatUint(uint64(version), 10),
Path: p.path,
Err: os.ErrNotExist,
}
}
func (p *PartialDriver) open(path string) (http.File, error) {
f, err := p.fs.Open(path)
if err == nil {
return f, nil
}
// Some non-standard file systems may return errors that don't include the path, that
// makes debugging harder.
if !errors.As(err, new(*os.PathError)) {
err = &os.PathError{
Op: "open",
Path: path,
Err: err,
}
}
return nil, err
}

View File

@@ -0,0 +1,3 @@
# iofs
https://pkg.go.dev/github.com/golang-migrate/migrate/v4/source/iofs

View File

@@ -0,0 +1,10 @@
/*
Package iofs provides the Go 1.16+ io/fs#FS driver.
It can accept various file systems (like embed.FS, archive/zip#Reader) implementing io/fs#FS.
This driver cannot be used with Go versions 1.15 and below.
Also, Opening with a URL scheme is not supported.
*/
package iofs

View File

@@ -0,0 +1,175 @@
// +build go1.16
package iofs
import (
"errors"
"fmt"
"io"
"io/fs"
"path"
"strconv"
"github.com/golang-migrate/migrate/v4/source"
)
type driver struct {
PartialDriver
}
// New returns a new Driver from io/fs#FS and a relative path.
func New(fsys fs.FS, path string) (source.Driver, error) {
var i driver
if err := i.Init(fsys, path); err != nil {
return nil, fmt.Errorf("failed to init driver with path %s: %w", path, err)
}
return &i, nil
}
// Open is part of source.Driver interface implementation.
// Open cannot be called on the iofs passthrough driver.
func (d *driver) Open(url string) (source.Driver, error) {
return nil, errors.New("Open() cannot be called on the iofs passthrough driver")
}
// PartialDriver is a helper service for creating new source drivers working with
// io/fs.FS instances. It implements all source.Driver interface methods
// except for Open(). New driver could embed this struct and add missing Open()
// method.
//
// To prepare PartialDriver for use Init() function.
type PartialDriver struct {
migrations *source.Migrations
fsys fs.FS
path string
}
// Init prepares not initialized IoFS instance to read migrations from a
// io/fs#FS instance and a relative path.
func (d *PartialDriver) Init(fsys fs.FS, path string) error {
entries, err := fs.ReadDir(fsys, path)
if err != nil {
return err
}
ms := source.NewMigrations()
for _, e := range entries {
if e.IsDir() {
continue
}
m, err := source.DefaultParse(e.Name())
if err != nil {
continue
}
file, err := e.Info()
if err != nil {
return err
}
if !ms.Append(m) {
return source.ErrDuplicateMigration{
Migration: *m,
FileInfo: file,
}
}
}
d.fsys = fsys
d.path = path
d.migrations = ms
return nil
}
// Close is part of source.Driver interface implementation.
// Closes the file system if possible.
func (d *PartialDriver) Close() error {
c, ok := d.fsys.(io.Closer)
if !ok {
return nil
}
return c.Close()
}
// First is part of source.Driver interface implementation.
func (d *PartialDriver) First() (version uint, err error) {
if version, ok := d.migrations.First(); ok {
return version, nil
}
return 0, &fs.PathError{
Op: "first",
Path: d.path,
Err: fs.ErrNotExist,
}
}
// Prev is part of source.Driver interface implementation.
func (d *PartialDriver) Prev(version uint) (prevVersion uint, err error) {
if version, ok := d.migrations.Prev(version); ok {
return version, nil
}
return 0, &fs.PathError{
Op: "prev for version " + strconv.FormatUint(uint64(version), 10),
Path: d.path,
Err: fs.ErrNotExist,
}
}
// Next is part of source.Driver interface implementation.
func (d *PartialDriver) Next(version uint) (nextVersion uint, err error) {
if version, ok := d.migrations.Next(version); ok {
return version, nil
}
return 0, &fs.PathError{
Op: "next for version " + strconv.FormatUint(uint64(version), 10),
Path: d.path,
Err: fs.ErrNotExist,
}
}
// ReadUp is part of source.Driver interface implementation.
func (d *PartialDriver) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) {
if m, ok := d.migrations.Up(version); ok {
body, err := d.open(path.Join(d.path, m.Raw))
if err != nil {
return nil, "", err
}
return body, m.Identifier, nil
}
return nil, "", &fs.PathError{
Op: "read up for version " + strconv.FormatUint(uint64(version), 10),
Path: d.path,
Err: fs.ErrNotExist,
}
}
// ReadDown is part of source.Driver interface implementation.
func (d *PartialDriver) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) {
if m, ok := d.migrations.Down(version); ok {
body, err := d.open(path.Join(d.path, m.Raw))
if err != nil {
return nil, "", err
}
return body, m.Identifier, nil
}
return nil, "", &fs.PathError{
Op: "read down for version " + strconv.FormatUint(uint64(version), 10),
Path: d.path,
Err: fs.ErrNotExist,
}
}
func (d *PartialDriver) open(path string) (fs.File, error) {
f, err := d.fsys.Open(path)
if err == nil {
return f, nil
}
// Some non-standard file systems may return errors that don't include the path, that
// makes debugging harder.
if !errors.As(err, new(*fs.PathError)) {
err = &fs.PathError{
Op: "open",
Path: path,
Err: err,
}
}
return nil, err
}

View File

@@ -0,0 +1 @@
1 down

Some files were not shown because too many files have changed in this diff Show More