mirror of
https://github.com/stashapp/stash.git
synced 2025-12-18 04:44:37 +03:00
Selective export (#770)
This commit is contained in:
663
vendor/golang.org/x/image/ccitt/reader.go
generated
vendored
Normal file
663
vendor/golang.org/x/image/ccitt/reader.go
generated
vendored
Normal file
@@ -0,0 +1,663 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run gen.go
|
||||
|
||||
// Package ccitt implements a CCITT (fax) image decoder.
|
||||
package ccitt
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"image"
|
||||
"io"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidBounds = errors.New("ccitt: invalid bounds")
|
||||
errInvalidCode = errors.New("ccitt: invalid code")
|
||||
errInvalidMode = errors.New("ccitt: invalid mode")
|
||||
errInvalidOffset = errors.New("ccitt: invalid offset")
|
||||
errMissingEOL = errors.New("ccitt: missing End-of-Line")
|
||||
errRunLengthOverflowsWidth = errors.New("ccitt: run length overflows width")
|
||||
errRunLengthTooLong = errors.New("ccitt: run length too long")
|
||||
errUnsupportedMode = errors.New("ccitt: unsupported mode")
|
||||
errUnsupportedSubFormat = errors.New("ccitt: unsupported sub-format")
|
||||
errUnsupportedWidth = errors.New("ccitt: unsupported width")
|
||||
)
|
||||
|
||||
// Order specifies the bit ordering in a CCITT data stream.
|
||||
type Order uint32
|
||||
|
||||
const (
|
||||
// LSB means Least Significant Bits first.
|
||||
LSB Order = iota
|
||||
// MSB means Most Significant Bits first.
|
||||
MSB
|
||||
)
|
||||
|
||||
// SubFormat represents that the CCITT format consists of a number of
|
||||
// sub-formats. Decoding or encoding a CCITT data stream requires knowing the
|
||||
// sub-format context. It is not represented in the data stream per se.
|
||||
type SubFormat uint32
|
||||
|
||||
const (
|
||||
Group3 SubFormat = iota
|
||||
Group4
|
||||
)
|
||||
|
||||
// Options are optional parameters.
|
||||
type Options struct {
|
||||
// Align means that some variable-bit-width codes are byte-aligned.
|
||||
Align bool
|
||||
// Invert means that black is the 1 bit or 0xFF byte, and white is 0.
|
||||
Invert bool
|
||||
}
|
||||
|
||||
// maxWidth is the maximum (inclusive) supported width. This is a limitation of
|
||||
// this implementation, to guard against integer overflow, and not anything
|
||||
// inherent to the CCITT format.
|
||||
const maxWidth = 1 << 20
|
||||
|
||||
func invertBytes(b []byte) {
|
||||
for i, c := range b {
|
||||
b[i] = ^c
|
||||
}
|
||||
}
|
||||
|
||||
func reverseBitsWithinBytes(b []byte) {
|
||||
for i, c := range b {
|
||||
b[i] = bits.Reverse8(c)
|
||||
}
|
||||
}
|
||||
|
||||
type bitReader struct {
|
||||
r io.Reader
|
||||
|
||||
// readErr is the error returned from the most recent r.Read call. As the
|
||||
// io.Reader documentation says, when r.Read returns (n, err), "always
|
||||
// process the n > 0 bytes returned before considering the error err".
|
||||
readErr error
|
||||
|
||||
// order is whether to process r's bytes LSB first or MSB first.
|
||||
order Order
|
||||
|
||||
// The low nBits bits of the bits field hold upcoming bits in LSB order.
|
||||
bits uint64
|
||||
nBits uint32
|
||||
|
||||
// bytes[br:bw] holds bytes read from r but not yet loaded into bits.
|
||||
br uint32
|
||||
bw uint32
|
||||
bytes [1024]uint8
|
||||
}
|
||||
|
||||
func (b *bitReader) alignToByteBoundary() {
|
||||
n := b.nBits & 7
|
||||
b.bits >>= n
|
||||
b.nBits -= n
|
||||
}
|
||||
|
||||
// nextBitMaxNBits is the maximum possible value of bitReader.nBits after a
|
||||
// bitReader.nextBit call, provided that bitReader.nBits was not more than this
|
||||
// value before that call.
|
||||
//
|
||||
// Note that the decode function can unread bits, which can temporarily set the
|
||||
// bitReader.nBits value above nextBitMaxNBits.
|
||||
const nextBitMaxNBits = 31
|
||||
|
||||
func (b *bitReader) nextBit() (uint32, error) {
|
||||
for {
|
||||
if b.nBits > 0 {
|
||||
bit := uint32(b.bits) & 1
|
||||
b.bits >>= 1
|
||||
b.nBits--
|
||||
return bit, nil
|
||||
}
|
||||
|
||||
if available := b.bw - b.br; available >= 4 {
|
||||
// Read 32 bits, even though b.bits is a uint64, since the decode
|
||||
// function may need to unread up to maxCodeLength bits, putting
|
||||
// them back in the remaining (64 - 32) bits. TestMaxCodeLength
|
||||
// checks that the generated maxCodeLength constant fits.
|
||||
//
|
||||
// If changing the Uint32 call, also change nextBitMaxNBits.
|
||||
b.bits = uint64(binary.LittleEndian.Uint32(b.bytes[b.br:]))
|
||||
b.br += 4
|
||||
b.nBits = 32
|
||||
continue
|
||||
} else if available > 0 {
|
||||
b.bits = uint64(b.bytes[b.br])
|
||||
b.br++
|
||||
b.nBits = 8
|
||||
continue
|
||||
}
|
||||
|
||||
if b.readErr != nil {
|
||||
return 0, b.readErr
|
||||
}
|
||||
|
||||
n, err := b.r.Read(b.bytes[:])
|
||||
b.br = 0
|
||||
b.bw = uint32(n)
|
||||
b.readErr = err
|
||||
|
||||
if b.order != LSB {
|
||||
reverseBitsWithinBytes(b.bytes[:b.bw])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decode(b *bitReader, decodeTable [][2]int16) (uint32, error) {
|
||||
nBitsRead, bitsRead, state := uint32(0), uint32(0), int32(1)
|
||||
for {
|
||||
bit, err := b.nextBit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
bitsRead |= bit << nBitsRead
|
||||
nBitsRead++
|
||||
// The "&1" is redundant, but can eliminate a bounds check.
|
||||
state = int32(decodeTable[state][bit&1])
|
||||
if state < 0 {
|
||||
return uint32(^state), nil
|
||||
} else if state == 0 {
|
||||
// Unread the bits we've read, then return errInvalidCode.
|
||||
b.bits = (b.bits << nBitsRead) | uint64(bitsRead)
|
||||
b.nBits += nBitsRead
|
||||
return 0, errInvalidCode
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type reader struct {
|
||||
br bitReader
|
||||
subFormat SubFormat
|
||||
|
||||
// width is the image width in pixels.
|
||||
width int
|
||||
|
||||
// rowsRemaining starts at the image height in pixels, when the reader is
|
||||
// driven through the io.Reader interface, and decrements to zero as rows
|
||||
// are decoded. When driven through DecodeIntoGray, this field is unused.
|
||||
rowsRemaining int
|
||||
|
||||
// curr and prev hold the current and previous rows. Each element is either
|
||||
// 0x00 (black) or 0xFF (white).
|
||||
//
|
||||
// prev may be nil, when processing the first row.
|
||||
curr []byte
|
||||
prev []byte
|
||||
|
||||
// ri is the read index. curr[:ri] are those bytes of curr that have been
|
||||
// passed along via the Read method.
|
||||
//
|
||||
// When the reader is driven through DecodeIntoGray, instead of through the
|
||||
// io.Reader interface, this field is unused.
|
||||
ri int
|
||||
|
||||
// wi is the write index. curr[:wi] are those bytes of curr that have
|
||||
// already been decoded via the decodeRow method.
|
||||
//
|
||||
// What this implementation calls wi is roughly equivalent to what the spec
|
||||
// calls the a0 index.
|
||||
wi int
|
||||
|
||||
// These fields are copied from the *Options (which may be nil).
|
||||
align bool
|
||||
invert bool
|
||||
|
||||
// atStartOfRow is whether we have just started the row. Some parts of the
|
||||
// spec say to treat this situation as if "wi = -1".
|
||||
atStartOfRow bool
|
||||
|
||||
// penColorIsWhite is whether the next run is black or white.
|
||||
penColorIsWhite bool
|
||||
|
||||
// seenStartOfImage is whether we've called the startDecode method.
|
||||
seenStartOfImage bool
|
||||
|
||||
// readErr is a sticky error for the Read method.
|
||||
readErr error
|
||||
}
|
||||
|
||||
func (z *reader) Read(p []byte) (int, error) {
|
||||
if z.readErr != nil {
|
||||
return 0, z.readErr
|
||||
}
|
||||
originalP := p
|
||||
|
||||
for len(p) > 0 {
|
||||
// Allocate buffers (and decode any start-of-image codes), if
|
||||
// processing the first or second row.
|
||||
if z.curr == nil {
|
||||
if !z.seenStartOfImage {
|
||||
if z.readErr = z.startDecode(); z.readErr != nil {
|
||||
break
|
||||
}
|
||||
z.atStartOfRow = true
|
||||
}
|
||||
z.curr = make([]byte, z.width)
|
||||
}
|
||||
|
||||
// Decode the next row, if necessary.
|
||||
if z.atStartOfRow {
|
||||
if z.rowsRemaining <= 0 {
|
||||
if z.readErr = z.finishDecode(); z.readErr != nil {
|
||||
break
|
||||
}
|
||||
z.readErr = io.EOF
|
||||
break
|
||||
}
|
||||
if z.readErr = z.decodeRow(); z.readErr != nil {
|
||||
break
|
||||
}
|
||||
z.rowsRemaining--
|
||||
}
|
||||
|
||||
// Pack from z.curr (1 byte per pixel) to p (1 bit per pixel), up to 8
|
||||
// elements per iteration.
|
||||
i := 0
|
||||
for ; i < len(p); i++ {
|
||||
numToPack := len(z.curr) - z.ri
|
||||
if numToPack <= 0 {
|
||||
break
|
||||
} else if numToPack > 8 {
|
||||
numToPack = 8
|
||||
}
|
||||
|
||||
byteValue := byte(0)
|
||||
for j := 0; j < numToPack; j++ {
|
||||
byteValue |= (z.curr[z.ri] & 0x80) >> uint(j)
|
||||
z.ri++
|
||||
}
|
||||
p[i] = byteValue
|
||||
}
|
||||
p = p[i:]
|
||||
|
||||
// Prepare to decode the next row, if necessary.
|
||||
if z.ri == len(z.curr) {
|
||||
z.ri, z.curr, z.prev = 0, z.prev, z.curr
|
||||
z.atStartOfRow = true
|
||||
}
|
||||
}
|
||||
|
||||
n := len(originalP) - len(p)
|
||||
// TODO: when invert is true, should the end-of-row padding bits be 0 or 1?
|
||||
if z.invert {
|
||||
invertBytes(originalP[:n])
|
||||
}
|
||||
return n, z.readErr
|
||||
}
|
||||
|
||||
func (z *reader) penColor() byte {
|
||||
if z.penColorIsWhite {
|
||||
return 0xFF
|
||||
}
|
||||
return 0x00
|
||||
}
|
||||
|
||||
func (z *reader) startDecode() error {
|
||||
switch z.subFormat {
|
||||
case Group3:
|
||||
if err := z.decodeEOL(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case Group4:
|
||||
// No-op.
|
||||
|
||||
default:
|
||||
return errUnsupportedSubFormat
|
||||
}
|
||||
|
||||
z.seenStartOfImage = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *reader) finishDecode() error {
|
||||
numberOfEOLs := 0
|
||||
switch z.subFormat {
|
||||
case Group3:
|
||||
// The stream ends with a RTC (Return To Control) of 6 consecutive
|
||||
// EOL's, but we should have already just seen an EOL, either in
|
||||
// z.startDecode (for a zero-height image) or in z.decodeRow.
|
||||
numberOfEOLs = 5
|
||||
|
||||
case Group4:
|
||||
// The stream ends with two EOL's, the first of which is possibly
|
||||
// byte-aligned.
|
||||
numberOfEOLs = 2
|
||||
if err := z.decodeEOL(); err == nil {
|
||||
numberOfEOLs--
|
||||
} else if err == errInvalidCode {
|
||||
// Try again, this time starting from a byte boundary.
|
||||
z.br.alignToByteBoundary()
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return errUnsupportedSubFormat
|
||||
}
|
||||
|
||||
for ; numberOfEOLs > 0; numberOfEOLs-- {
|
||||
if err := z.decodeEOL(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *reader) decodeEOL() error {
|
||||
// TODO: EOL doesn't have to be in the modeDecodeTable. It could be in its
|
||||
// own table, or we could just hard-code it, especially if we might need to
|
||||
// cater for optional byte-alignment, or an arbitrary number (potentially
|
||||
// more than 8) of 0-valued padding bits.
|
||||
if mode, err := decode(&z.br, modeDecodeTable[:]); err != nil {
|
||||
return err
|
||||
} else if mode != modeEOL {
|
||||
return errMissingEOL
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *reader) decodeRow() error {
|
||||
z.wi = 0
|
||||
z.atStartOfRow = true
|
||||
z.penColorIsWhite = true
|
||||
|
||||
switch z.subFormat {
|
||||
case Group3:
|
||||
for ; z.wi < len(z.curr); z.atStartOfRow = false {
|
||||
if err := z.decodeRun(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return z.decodeEOL()
|
||||
|
||||
case Group4:
|
||||
if z.align {
|
||||
z.br.alignToByteBoundary()
|
||||
}
|
||||
|
||||
for ; z.wi < len(z.curr); z.atStartOfRow = false {
|
||||
mode, err := decode(&z.br, modeDecodeTable[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rm := readerMode{}
|
||||
if mode < uint32(len(readerModes)) {
|
||||
rm = readerModes[mode]
|
||||
}
|
||||
if rm.function == nil {
|
||||
return errInvalidMode
|
||||
}
|
||||
if err := rm.function(z, rm.arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return errUnsupportedSubFormat
|
||||
}
|
||||
|
||||
func (z *reader) decodeRun() error {
|
||||
table := blackDecodeTable[:]
|
||||
if z.penColorIsWhite {
|
||||
table = whiteDecodeTable[:]
|
||||
}
|
||||
|
||||
total := 0
|
||||
for {
|
||||
n, err := decode(&z.br, table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n > maxWidth {
|
||||
panic("unreachable")
|
||||
}
|
||||
total += int(n)
|
||||
if total > maxWidth {
|
||||
return errRunLengthTooLong
|
||||
}
|
||||
// Anything 0x3F or below is a terminal code.
|
||||
if n <= 0x3F {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if total > (len(z.curr) - z.wi) {
|
||||
return errRunLengthOverflowsWidth
|
||||
}
|
||||
dst := z.curr[z.wi : z.wi+total]
|
||||
penColor := z.penColor()
|
||||
for i := range dst {
|
||||
dst[i] = penColor
|
||||
}
|
||||
z.wi += total
|
||||
z.penColorIsWhite = !z.penColorIsWhite
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// The various modes' semantics are based on determining a row of pixels'
|
||||
// "changing elements": those pixels whose color differs from the one on its
|
||||
// immediate left.
|
||||
//
|
||||
// The row above the first row is implicitly all white. Similarly, the column
|
||||
// to the left of the first column is implicitly all white.
|
||||
//
|
||||
// For example, here's Figure 1 in "ITU-T Recommendation T.6", where the
|
||||
// current and previous rows contain black (B) and white (w) pixels. The a?
|
||||
// indexes point into curr, the b? indexes point into prev.
|
||||
//
|
||||
// b1 b2
|
||||
// v v
|
||||
// prev: BBBBBwwwwwBBBwwwww
|
||||
// curr: BBBwwwwwBBBBBBwwww
|
||||
// ^ ^ ^
|
||||
// a0 a1 a2
|
||||
//
|
||||
// a0 is the "reference element" or current decoder position, roughly
|
||||
// equivalent to what this implementation calls reader.wi.
|
||||
//
|
||||
// a1 is the next changing element to the right of a0, on the "coding line"
|
||||
// (the current row).
|
||||
//
|
||||
// a2 is the next changing element to the right of a1, again on curr.
|
||||
//
|
||||
// b1 is the first changing element on the "reference line" (the previous row)
|
||||
// to the right of a0 and of opposite color to a0.
|
||||
//
|
||||
// b2 is the next changing element to the right of b1, again on prev.
|
||||
//
|
||||
// The various modes calculate a1 (and a2, for modeH):
|
||||
// - modePass calculates that a1 is at or to the right of b2.
|
||||
// - modeH calculates a1 and a2 without considering b1 or b2.
|
||||
// - modeV* calculates a1 to be b1 plus an adjustment (between -3 and +3).
|
||||
|
||||
const (
|
||||
findB1 = false
|
||||
findB2 = true
|
||||
)
|
||||
|
||||
// findB finds either the b1 or b2 value.
|
||||
func (z *reader) findB(whichB bool) int {
|
||||
// The initial row is a special case. The previous row is implicitly all
|
||||
// white, so that there are no changing pixel elements. We return b1 or b2
|
||||
// to be at the end of the row.
|
||||
if len(z.prev) != len(z.curr) {
|
||||
return len(z.curr)
|
||||
}
|
||||
|
||||
i := z.wi
|
||||
|
||||
if z.atStartOfRow {
|
||||
// a0 is implicitly at -1, on a white pixel. b1 is the first black
|
||||
// pixel in the previous row. b2 is the first white pixel after that.
|
||||
for ; (i < len(z.prev)) && (z.prev[i] == 0xFF); i++ {
|
||||
}
|
||||
if whichB == findB2 {
|
||||
for ; (i < len(z.prev)) && (z.prev[i] == 0x00); i++ {
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// As per figure 1 above, assume that the current pen color is white.
|
||||
// First, walk past every contiguous black pixel in prev, starting at a0.
|
||||
oppositeColor := ^z.penColor()
|
||||
for ; (i < len(z.prev)) && (z.prev[i] == oppositeColor); i++ {
|
||||
}
|
||||
|
||||
// Then walk past every contiguous white pixel.
|
||||
penColor := ^oppositeColor
|
||||
for ; (i < len(z.prev)) && (z.prev[i] == penColor); i++ {
|
||||
}
|
||||
|
||||
// We're now at a black pixel (or at the end of the row). That's b1.
|
||||
if whichB == findB2 {
|
||||
// If we're looking for b2, walk past every contiguous black pixel
|
||||
// again.
|
||||
oppositeColor := ^penColor
|
||||
for ; (i < len(z.prev)) && (z.prev[i] == oppositeColor); i++ {
|
||||
}
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
type readerMode struct {
|
||||
function func(z *reader, arg int) error
|
||||
arg int
|
||||
}
|
||||
|
||||
var readerModes = [...]readerMode{
|
||||
modePass: {function: readerModePass},
|
||||
modeH: {function: readerModeH},
|
||||
modeV0: {function: readerModeV, arg: +0},
|
||||
modeVR1: {function: readerModeV, arg: +1},
|
||||
modeVR2: {function: readerModeV, arg: +2},
|
||||
modeVR3: {function: readerModeV, arg: +3},
|
||||
modeVL1: {function: readerModeV, arg: -1},
|
||||
modeVL2: {function: readerModeV, arg: -2},
|
||||
modeVL3: {function: readerModeV, arg: -3},
|
||||
modeExt: {function: readerModeExt},
|
||||
}
|
||||
|
||||
func readerModePass(z *reader, arg int) error {
|
||||
b2 := z.findB(findB2)
|
||||
if (b2 < z.wi) || (len(z.curr) < b2) {
|
||||
return errInvalidOffset
|
||||
}
|
||||
dst := z.curr[z.wi:b2]
|
||||
penColor := z.penColor()
|
||||
for i := range dst {
|
||||
dst[i] = penColor
|
||||
}
|
||||
z.wi = b2
|
||||
return nil
|
||||
}
|
||||
|
||||
func readerModeH(z *reader, arg int) error {
|
||||
// The first iteration finds a1. The second finds a2.
|
||||
for i := 0; i < 2; i++ {
|
||||
if err := z.decodeRun(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readerModeV(z *reader, arg int) error {
|
||||
a1 := z.findB(findB1) + arg
|
||||
if (a1 < z.wi) || (len(z.curr) < a1) {
|
||||
return errInvalidOffset
|
||||
}
|
||||
dst := z.curr[z.wi:a1]
|
||||
penColor := z.penColor()
|
||||
for i := range dst {
|
||||
dst[i] = penColor
|
||||
}
|
||||
z.wi = a1
|
||||
z.penColorIsWhite = !z.penColorIsWhite
|
||||
return nil
|
||||
}
|
||||
|
||||
func readerModeExt(z *reader, arg int) error {
|
||||
return errUnsupportedMode
|
||||
}
|
||||
|
||||
// DecodeIntoGray decodes the CCITT-formatted data in r into dst.
|
||||
//
|
||||
// It returns an error if dst's width and height don't match the implied width
|
||||
// and height of CCITT-formatted data.
|
||||
func DecodeIntoGray(dst *image.Gray, r io.Reader, order Order, sf SubFormat, opts *Options) error {
|
||||
bounds := dst.Bounds()
|
||||
if (bounds.Dx() < 0) || (bounds.Dy() < 0) {
|
||||
return errInvalidBounds
|
||||
}
|
||||
if bounds.Dx() > maxWidth {
|
||||
return errUnsupportedWidth
|
||||
}
|
||||
|
||||
z := reader{
|
||||
br: bitReader{r: r, order: order},
|
||||
subFormat: sf,
|
||||
align: (opts != nil) && opts.Align,
|
||||
invert: (opts != nil) && opts.Invert,
|
||||
width: bounds.Dx(),
|
||||
}
|
||||
if err := z.startDecode(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
width := bounds.Dx()
|
||||
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
|
||||
p := (y - bounds.Min.Y) * dst.Stride
|
||||
z.curr = dst.Pix[p : p+width]
|
||||
if err := z.decodeRow(); err != nil {
|
||||
return err
|
||||
}
|
||||
z.curr, z.prev = nil, z.curr
|
||||
}
|
||||
|
||||
if err := z.finishDecode(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if z.invert {
|
||||
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
|
||||
p := (y - bounds.Min.Y) * dst.Stride
|
||||
invertBytes(dst.Pix[p : p+width])
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewReader returns an io.Reader that decodes the CCITT-formatted data in r.
|
||||
// The resultant byte stream is one bit per pixel (MSB first), with 1 meaning
|
||||
// white and 0 meaning black. Each row in the result is byte-aligned.
|
||||
func NewReader(r io.Reader, order Order, sf SubFormat, width int, height int, opts *Options) io.Reader {
|
||||
readErr := error(nil)
|
||||
if (width < 0) || (height < 0) {
|
||||
readErr = errInvalidBounds
|
||||
} else if width > maxWidth {
|
||||
readErr = errUnsupportedWidth
|
||||
}
|
||||
|
||||
return &reader{
|
||||
br: bitReader{r: r, order: order},
|
||||
subFormat: sf,
|
||||
align: (opts != nil) && opts.Align,
|
||||
invert: (opts != nil) && opts.Invert,
|
||||
width: width,
|
||||
rowsRemaining: height,
|
||||
readErr: readErr,
|
||||
}
|
||||
}
|
||||
989
vendor/golang.org/x/image/ccitt/table.go
generated
vendored
Normal file
989
vendor/golang.org/x/image/ccitt/table.go
generated
vendored
Normal file
@@ -0,0 +1,989 @@
|
||||
// generated by "go run gen.go". DO NOT EDIT.
|
||||
|
||||
package ccitt
|
||||
|
||||
// Each decodeTable is represented by an array of [2]int16's: a binary tree.
|
||||
// Each array element (other than element 0, which means invalid) is a branch
|
||||
// node in that tree. The root node is always element 1 (the second element).
|
||||
//
|
||||
// To walk the tree, look at the next bit in the bit stream, using it to select
|
||||
// the first or second element of the [2]int16. If that int16 is 0, we have an
|
||||
// invalid code. If it is positive, go to that branch node. If it is negative,
|
||||
// then we have a leaf node, whose value is the bitwise complement (the ^
|
||||
// operator) of that int16.
|
||||
//
|
||||
// Comments above each decodeTable also show the same structure visually. The
|
||||
// "b123" lines show the 123'rd branch node. The "=XXXXX" lines show an invalid
|
||||
// code. The "=v1234" lines show a leaf node with value 1234. When reading the
|
||||
// bit stream, a 0 or 1 bit means to go up or down, as you move left to right.
|
||||
//
|
||||
// For example, in modeDecodeTable, branch node b005 is three steps up from the
|
||||
// root node, meaning that we have already seen "000". If the next bit is "0"
|
||||
// then we move to branch node b006. Otherwise, the next bit is "1", and we
|
||||
// move to the leaf node v0000 (also known as the modePass constant). Indeed,
|
||||
// the bits that encode modePass are "0001".
|
||||
//
|
||||
// Tables 1, 2 and 3 come from the "ITU-T Recommendation T.6: FACSIMILE CODING
|
||||
// SCHEMES AND CODING CONTROL FUNCTIONS FOR GROUP 4 FACSIMILE APPARATUS"
|
||||
// specification:
|
||||
//
|
||||
// https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-T.6-198811-I!!PDF-E&type=items
|
||||
|
||||
// modeDecodeTable represents Table 1 and the End-of-Line code.
|
||||
//
|
||||
// +=XXXXX
|
||||
// b015 +-+
|
||||
// | +=v0010
|
||||
// b014 +-+
|
||||
// | +=XXXXX
|
||||
// b013 +-+
|
||||
// | +=XXXXX
|
||||
// b012 +-+
|
||||
// | +=XXXXX
|
||||
// b011 +-+
|
||||
// | +=XXXXX
|
||||
// b009 +-+
|
||||
// | +=v0009
|
||||
// b007 +-+
|
||||
// | | +=v0008
|
||||
// b010 | +-+
|
||||
// | +=v0005
|
||||
// b006 +-+
|
||||
// | | +=v0007
|
||||
// b008 | +-+
|
||||
// | +=v0004
|
||||
// b005 +-+
|
||||
// | +=v0000
|
||||
// b003 +-+
|
||||
// | +=v0001
|
||||
// b002 +-+
|
||||
// | | +=v0006
|
||||
// b004 | +-+
|
||||
// | +=v0003
|
||||
// b001 +-+
|
||||
// +=v0002
|
||||
var modeDecodeTable = [...][2]int16{
|
||||
0: {0, 0},
|
||||
1: {2, ^2},
|
||||
2: {3, 4},
|
||||
3: {5, ^1},
|
||||
4: {^6, ^3},
|
||||
5: {6, ^0},
|
||||
6: {7, 8},
|
||||
7: {9, 10},
|
||||
8: {^7, ^4},
|
||||
9: {11, ^9},
|
||||
10: {^8, ^5},
|
||||
11: {12, 0},
|
||||
12: {13, 0},
|
||||
13: {14, 0},
|
||||
14: {15, 0},
|
||||
15: {0, ^10},
|
||||
}
|
||||
|
||||
// whiteDecodeTable represents Tables 2 and 3 for a white run.
|
||||
//
|
||||
// +=XXXXX
|
||||
// b059 +-+
|
||||
// | | +=v1792
|
||||
// b096 | | +-+
|
||||
// | | | | +=v1984
|
||||
// b100 | | | +-+
|
||||
// | | | +=v2048
|
||||
// b094 | | +-+
|
||||
// | | | | +=v2112
|
||||
// b101 | | | | +-+
|
||||
// | | | | | +=v2176
|
||||
// b097 | | | +-+
|
||||
// | | | | +=v2240
|
||||
// b102 | | | +-+
|
||||
// | | | +=v2304
|
||||
// b085 | +-+
|
||||
// | | +=v1856
|
||||
// b098 | | +-+
|
||||
// | | | +=v1920
|
||||
// b095 | +-+
|
||||
// | | +=v2368
|
||||
// b103 | | +-+
|
||||
// | | | +=v2432
|
||||
// b099 | +-+
|
||||
// | | +=v2496
|
||||
// b104 | +-+
|
||||
// | +=v2560
|
||||
// b040 +-+
|
||||
// | | +=v0029
|
||||
// b060 | +-+
|
||||
// | +=v0030
|
||||
// b026 +-+
|
||||
// | | +=v0045
|
||||
// b061 | | +-+
|
||||
// | | | +=v0046
|
||||
// b041 | +-+
|
||||
// | +=v0022
|
||||
// b016 +-+
|
||||
// | | +=v0023
|
||||
// b042 | | +-+
|
||||
// | | | | +=v0047
|
||||
// b062 | | | +-+
|
||||
// | | | +=v0048
|
||||
// b027 | +-+
|
||||
// | +=v0013
|
||||
// b008 +-+
|
||||
// | | +=v0020
|
||||
// b043 | | +-+
|
||||
// | | | | +=v0033
|
||||
// b063 | | | +-+
|
||||
// | | | +=v0034
|
||||
// b028 | | +-+
|
||||
// | | | | +=v0035
|
||||
// b064 | | | | +-+
|
||||
// | | | | | +=v0036
|
||||
// b044 | | | +-+
|
||||
// | | | | +=v0037
|
||||
// b065 | | | +-+
|
||||
// | | | +=v0038
|
||||
// b017 | +-+
|
||||
// | | +=v0019
|
||||
// b045 | | +-+
|
||||
// | | | | +=v0031
|
||||
// b066 | | | +-+
|
||||
// | | | +=v0032
|
||||
// b029 | +-+
|
||||
// | +=v0001
|
||||
// b004 +-+
|
||||
// | | +=v0012
|
||||
// b030 | | +-+
|
||||
// | | | | +=v0053
|
||||
// b067 | | | | +-+
|
||||
// | | | | | +=v0054
|
||||
// b046 | | | +-+
|
||||
// | | | +=v0026
|
||||
// b018 | | +-+
|
||||
// | | | | +=v0039
|
||||
// b068 | | | | +-+
|
||||
// | | | | | +=v0040
|
||||
// b047 | | | | +-+
|
||||
// | | | | | | +=v0041
|
||||
// b069 | | | | | +-+
|
||||
// | | | | | +=v0042
|
||||
// b031 | | | +-+
|
||||
// | | | | +=v0043
|
||||
// b070 | | | | +-+
|
||||
// | | | | | +=v0044
|
||||
// b048 | | | +-+
|
||||
// | | | +=v0021
|
||||
// b009 | +-+
|
||||
// | | +=v0028
|
||||
// b049 | | +-+
|
||||
// | | | | +=v0061
|
||||
// b071 | | | +-+
|
||||
// | | | +=v0062
|
||||
// b032 | | +-+
|
||||
// | | | | +=v0063
|
||||
// b072 | | | | +-+
|
||||
// | | | | | +=v0000
|
||||
// b050 | | | +-+
|
||||
// | | | | +=v0320
|
||||
// b073 | | | +-+
|
||||
// | | | +=v0384
|
||||
// b019 | +-+
|
||||
// | +=v0010
|
||||
// b002 +-+
|
||||
// | | +=v0011
|
||||
// b020 | | +-+
|
||||
// | | | | +=v0027
|
||||
// b051 | | | | +-+
|
||||
// | | | | | | +=v0059
|
||||
// b074 | | | | | +-+
|
||||
// | | | | | +=v0060
|
||||
// b033 | | | +-+
|
||||
// | | | | +=v1472
|
||||
// b086 | | | | +-+
|
||||
// | | | | | +=v1536
|
||||
// b075 | | | | +-+
|
||||
// | | | | | | +=v1600
|
||||
// b087 | | | | | +-+
|
||||
// | | | | | +=v1728
|
||||
// b052 | | | +-+
|
||||
// | | | +=v0018
|
||||
// b010 | | +-+
|
||||
// | | | | +=v0024
|
||||
// b053 | | | | +-+
|
||||
// | | | | | | +=v0049
|
||||
// b076 | | | | | +-+
|
||||
// | | | | | +=v0050
|
||||
// b034 | | | | +-+
|
||||
// | | | | | | +=v0051
|
||||
// b077 | | | | | | +-+
|
||||
// | | | | | | | +=v0052
|
||||
// b054 | | | | | +-+
|
||||
// | | | | | +=v0025
|
||||
// b021 | | | +-+
|
||||
// | | | | +=v0055
|
||||
// b078 | | | | +-+
|
||||
// | | | | | +=v0056
|
||||
// b055 | | | | +-+
|
||||
// | | | | | | +=v0057
|
||||
// b079 | | | | | +-+
|
||||
// | | | | | +=v0058
|
||||
// b035 | | | +-+
|
||||
// | | | +=v0192
|
||||
// b005 | +-+
|
||||
// | | +=v1664
|
||||
// b036 | | +-+
|
||||
// | | | | +=v0448
|
||||
// b080 | | | | +-+
|
||||
// | | | | | +=v0512
|
||||
// b056 | | | +-+
|
||||
// | | | | +=v0704
|
||||
// b088 | | | | +-+
|
||||
// | | | | | +=v0768
|
||||
// b081 | | | +-+
|
||||
// | | | +=v0640
|
||||
// b022 | | +-+
|
||||
// | | | | +=v0576
|
||||
// b082 | | | | +-+
|
||||
// | | | | | | +=v0832
|
||||
// b089 | | | | | +-+
|
||||
// | | | | | +=v0896
|
||||
// b057 | | | | +-+
|
||||
// | | | | | | +=v0960
|
||||
// b090 | | | | | | +-+
|
||||
// | | | | | | | +=v1024
|
||||
// b083 | | | | | +-+
|
||||
// | | | | | | +=v1088
|
||||
// b091 | | | | | +-+
|
||||
// | | | | | +=v1152
|
||||
// b037 | | | +-+
|
||||
// | | | | +=v1216
|
||||
// b092 | | | | +-+
|
||||
// | | | | | +=v1280
|
||||
// b084 | | | | +-+
|
||||
// | | | | | | +=v1344
|
||||
// b093 | | | | | +-+
|
||||
// | | | | | +=v1408
|
||||
// b058 | | | +-+
|
||||
// | | | +=v0256
|
||||
// b011 | +-+
|
||||
// | +=v0002
|
||||
// b001 +-+
|
||||
// | +=v0003
|
||||
// b012 | +-+
|
||||
// | | | +=v0128
|
||||
// b023 | | +-+
|
||||
// | | +=v0008
|
||||
// b006 | +-+
|
||||
// | | | +=v0009
|
||||
// b024 | | | +-+
|
||||
// | | | | | +=v0016
|
||||
// b038 | | | | +-+
|
||||
// | | | | +=v0017
|
||||
// b013 | | +-+
|
||||
// | | +=v0004
|
||||
// b003 +-+
|
||||
// | +=v0005
|
||||
// b014 | +-+
|
||||
// | | | +=v0014
|
||||
// b039 | | | +-+
|
||||
// | | | | +=v0015
|
||||
// b025 | | +-+
|
||||
// | | +=v0064
|
||||
// b007 +-+
|
||||
// | +=v0006
|
||||
// b015 +-+
|
||||
// +=v0007
|
||||
var whiteDecodeTable = [...][2]int16{
|
||||
0: {0, 0},
|
||||
1: {2, 3},
|
||||
2: {4, 5},
|
||||
3: {6, 7},
|
||||
4: {8, 9},
|
||||
5: {10, 11},
|
||||
6: {12, 13},
|
||||
7: {14, 15},
|
||||
8: {16, 17},
|
||||
9: {18, 19},
|
||||
10: {20, 21},
|
||||
11: {22, ^2},
|
||||
12: {^3, 23},
|
||||
13: {24, ^4},
|
||||
14: {^5, 25},
|
||||
15: {^6, ^7},
|
||||
16: {26, 27},
|
||||
17: {28, 29},
|
||||
18: {30, 31},
|
||||
19: {32, ^10},
|
||||
20: {^11, 33},
|
||||
21: {34, 35},
|
||||
22: {36, 37},
|
||||
23: {^128, ^8},
|
||||
24: {^9, 38},
|
||||
25: {39, ^64},
|
||||
26: {40, 41},
|
||||
27: {42, ^13},
|
||||
28: {43, 44},
|
||||
29: {45, ^1},
|
||||
30: {^12, 46},
|
||||
31: {47, 48},
|
||||
32: {49, 50},
|
||||
33: {51, 52},
|
||||
34: {53, 54},
|
||||
35: {55, ^192},
|
||||
36: {^1664, 56},
|
||||
37: {57, 58},
|
||||
38: {^16, ^17},
|
||||
39: {^14, ^15},
|
||||
40: {59, 60},
|
||||
41: {61, ^22},
|
||||
42: {^23, 62},
|
||||
43: {^20, 63},
|
||||
44: {64, 65},
|
||||
45: {^19, 66},
|
||||
46: {67, ^26},
|
||||
47: {68, 69},
|
||||
48: {70, ^21},
|
||||
49: {^28, 71},
|
||||
50: {72, 73},
|
||||
51: {^27, 74},
|
||||
52: {75, ^18},
|
||||
53: {^24, 76},
|
||||
54: {77, ^25},
|
||||
55: {78, 79},
|
||||
56: {80, 81},
|
||||
57: {82, 83},
|
||||
58: {84, ^256},
|
||||
59: {0, 85},
|
||||
60: {^29, ^30},
|
||||
61: {^45, ^46},
|
||||
62: {^47, ^48},
|
||||
63: {^33, ^34},
|
||||
64: {^35, ^36},
|
||||
65: {^37, ^38},
|
||||
66: {^31, ^32},
|
||||
67: {^53, ^54},
|
||||
68: {^39, ^40},
|
||||
69: {^41, ^42},
|
||||
70: {^43, ^44},
|
||||
71: {^61, ^62},
|
||||
72: {^63, ^0},
|
||||
73: {^320, ^384},
|
||||
74: {^59, ^60},
|
||||
75: {86, 87},
|
||||
76: {^49, ^50},
|
||||
77: {^51, ^52},
|
||||
78: {^55, ^56},
|
||||
79: {^57, ^58},
|
||||
80: {^448, ^512},
|
||||
81: {88, ^640},
|
||||
82: {^576, 89},
|
||||
83: {90, 91},
|
||||
84: {92, 93},
|
||||
85: {94, 95},
|
||||
86: {^1472, ^1536},
|
||||
87: {^1600, ^1728},
|
||||
88: {^704, ^768},
|
||||
89: {^832, ^896},
|
||||
90: {^960, ^1024},
|
||||
91: {^1088, ^1152},
|
||||
92: {^1216, ^1280},
|
||||
93: {^1344, ^1408},
|
||||
94: {96, 97},
|
||||
95: {98, 99},
|
||||
96: {^1792, 100},
|
||||
97: {101, 102},
|
||||
98: {^1856, ^1920},
|
||||
99: {103, 104},
|
||||
100: {^1984, ^2048},
|
||||
101: {^2112, ^2176},
|
||||
102: {^2240, ^2304},
|
||||
103: {^2368, ^2432},
|
||||
104: {^2496, ^2560},
|
||||
}
|
||||
|
||||
// blackDecodeTable represents Tables 2 and 3 for a black run.
|
||||
//
|
||||
// +=XXXXX
|
||||
// b017 +-+
|
||||
// | | +=v1792
|
||||
// b042 | | +-+
|
||||
// | | | | +=v1984
|
||||
// b063 | | | +-+
|
||||
// | | | +=v2048
|
||||
// b029 | | +-+
|
||||
// | | | | +=v2112
|
||||
// b064 | | | | +-+
|
||||
// | | | | | +=v2176
|
||||
// b043 | | | +-+
|
||||
// | | | | +=v2240
|
||||
// b065 | | | +-+
|
||||
// | | | +=v2304
|
||||
// b022 | +-+
|
||||
// | | +=v1856
|
||||
// b044 | | +-+
|
||||
// | | | +=v1920
|
||||
// b030 | +-+
|
||||
// | | +=v2368
|
||||
// b066 | | +-+
|
||||
// | | | +=v2432
|
||||
// b045 | +-+
|
||||
// | | +=v2496
|
||||
// b067 | +-+
|
||||
// | +=v2560
|
||||
// b013 +-+
|
||||
// | | +=v0018
|
||||
// b031 | | +-+
|
||||
// | | | | +=v0052
|
||||
// b068 | | | | +-+
|
||||
// | | | | | | +=v0640
|
||||
// b095 | | | | | +-+
|
||||
// | | | | | +=v0704
|
||||
// b046 | | | +-+
|
||||
// | | | | +=v0768
|
||||
// b096 | | | | +-+
|
||||
// | | | | | +=v0832
|
||||
// b069 | | | +-+
|
||||
// | | | +=v0055
|
||||
// b023 | | +-+
|
||||
// | | | | +=v0056
|
||||
// b070 | | | | +-+
|
||||
// | | | | | | +=v1280
|
||||
// b097 | | | | | +-+
|
||||
// | | | | | +=v1344
|
||||
// b047 | | | | +-+
|
||||
// | | | | | | +=v1408
|
||||
// b098 | | | | | | +-+
|
||||
// | | | | | | | +=v1472
|
||||
// b071 | | | | | +-+
|
||||
// | | | | | +=v0059
|
||||
// b032 | | | +-+
|
||||
// | | | | +=v0060
|
||||
// b072 | | | | +-+
|
||||
// | | | | | | +=v1536
|
||||
// b099 | | | | | +-+
|
||||
// | | | | | +=v1600
|
||||
// b048 | | | +-+
|
||||
// | | | +=v0024
|
||||
// b018 | +-+
|
||||
// | | +=v0025
|
||||
// b049 | | +-+
|
||||
// | | | | +=v1664
|
||||
// b100 | | | | +-+
|
||||
// | | | | | +=v1728
|
||||
// b073 | | | +-+
|
||||
// | | | +=v0320
|
||||
// b033 | | +-+
|
||||
// | | | | +=v0384
|
||||
// b074 | | | | +-+
|
||||
// | | | | | +=v0448
|
||||
// b050 | | | +-+
|
||||
// | | | | +=v0512
|
||||
// b101 | | | | +-+
|
||||
// | | | | | +=v0576
|
||||
// b075 | | | +-+
|
||||
// | | | +=v0053
|
||||
// b024 | +-+
|
||||
// | | +=v0054
|
||||
// b076 | | +-+
|
||||
// | | | | +=v0896
|
||||
// b102 | | | +-+
|
||||
// | | | +=v0960
|
||||
// b051 | | +-+
|
||||
// | | | | +=v1024
|
||||
// b103 | | | | +-+
|
||||
// | | | | | +=v1088
|
||||
// b077 | | | +-+
|
||||
// | | | | +=v1152
|
||||
// b104 | | | +-+
|
||||
// | | | +=v1216
|
||||
// b034 | +-+
|
||||
// | +=v0064
|
||||
// b010 +-+
|
||||
// | | +=v0013
|
||||
// b019 | | +-+
|
||||
// | | | | +=v0023
|
||||
// b052 | | | | +-+
|
||||
// | | | | | | +=v0050
|
||||
// b078 | | | | | +-+
|
||||
// | | | | | +=v0051
|
||||
// b035 | | | | +-+
|
||||
// | | | | | | +=v0044
|
||||
// b079 | | | | | | +-+
|
||||
// | | | | | | | +=v0045
|
||||
// b053 | | | | | +-+
|
||||
// | | | | | | +=v0046
|
||||
// b080 | | | | | +-+
|
||||
// | | | | | +=v0047
|
||||
// b025 | | | +-+
|
||||
// | | | | +=v0057
|
||||
// b081 | | | | +-+
|
||||
// | | | | | +=v0058
|
||||
// b054 | | | | +-+
|
||||
// | | | | | | +=v0061
|
||||
// b082 | | | | | +-+
|
||||
// | | | | | +=v0256
|
||||
// b036 | | | +-+
|
||||
// | | | +=v0016
|
||||
// b014 | +-+
|
||||
// | | +=v0017
|
||||
// b037 | | +-+
|
||||
// | | | | +=v0048
|
||||
// b083 | | | | +-+
|
||||
// | | | | | +=v0049
|
||||
// b055 | | | +-+
|
||||
// | | | | +=v0062
|
||||
// b084 | | | +-+
|
||||
// | | | +=v0063
|
||||
// b026 | | +-+
|
||||
// | | | | +=v0030
|
||||
// b085 | | | | +-+
|
||||
// | | | | | +=v0031
|
||||
// b056 | | | | +-+
|
||||
// | | | | | | +=v0032
|
||||
// b086 | | | | | +-+
|
||||
// | | | | | +=v0033
|
||||
// b038 | | | +-+
|
||||
// | | | | +=v0040
|
||||
// b087 | | | | +-+
|
||||
// | | | | | +=v0041
|
||||
// b057 | | | +-+
|
||||
// | | | +=v0022
|
||||
// b020 | +-+
|
||||
// | +=v0014
|
||||
// b008 +-+
|
||||
// | | +=v0010
|
||||
// b015 | | +-+
|
||||
// | | | +=v0011
|
||||
// b011 | +-+
|
||||
// | | +=v0015
|
||||
// b027 | | +-+
|
||||
// | | | | +=v0128
|
||||
// b088 | | | | +-+
|
||||
// | | | | | +=v0192
|
||||
// b058 | | | | +-+
|
||||
// | | | | | | +=v0026
|
||||
// b089 | | | | | +-+
|
||||
// | | | | | +=v0027
|
||||
// b039 | | | +-+
|
||||
// | | | | +=v0028
|
||||
// b090 | | | | +-+
|
||||
// | | | | | +=v0029
|
||||
// b059 | | | +-+
|
||||
// | | | +=v0019
|
||||
// b021 | | +-+
|
||||
// | | | | +=v0020
|
||||
// b060 | | | | +-+
|
||||
// | | | | | | +=v0034
|
||||
// b091 | | | | | +-+
|
||||
// | | | | | +=v0035
|
||||
// b040 | | | | +-+
|
||||
// | | | | | | +=v0036
|
||||
// b092 | | | | | | +-+
|
||||
// | | | | | | | +=v0037
|
||||
// b061 | | | | | +-+
|
||||
// | | | | | | +=v0038
|
||||
// b093 | | | | | +-+
|
||||
// | | | | | +=v0039
|
||||
// b028 | | | +-+
|
||||
// | | | | +=v0021
|
||||
// b062 | | | | +-+
|
||||
// | | | | | | +=v0042
|
||||
// b094 | | | | | +-+
|
||||
// | | | | | +=v0043
|
||||
// b041 | | | +-+
|
||||
// | | | +=v0000
|
||||
// b016 | +-+
|
||||
// | +=v0012
|
||||
// b006 +-+
|
||||
// | | +=v0009
|
||||
// b012 | | +-+
|
||||
// | | | +=v0008
|
||||
// b009 | +-+
|
||||
// | +=v0007
|
||||
// b004 +-+
|
||||
// | | +=v0006
|
||||
// b007 | +-+
|
||||
// | +=v0005
|
||||
// b002 +-+
|
||||
// | | +=v0001
|
||||
// b005 | +-+
|
||||
// | +=v0004
|
||||
// b001 +-+
|
||||
// | +=v0003
|
||||
// b003 +-+
|
||||
// +=v0002
|
||||
var blackDecodeTable = [...][2]int16{
|
||||
0: {0, 0},
|
||||
1: {2, 3},
|
||||
2: {4, 5},
|
||||
3: {^3, ^2},
|
||||
4: {6, 7},
|
||||
5: {^1, ^4},
|
||||
6: {8, 9},
|
||||
7: {^6, ^5},
|
||||
8: {10, 11},
|
||||
9: {12, ^7},
|
||||
10: {13, 14},
|
||||
11: {15, 16},
|
||||
12: {^9, ^8},
|
||||
13: {17, 18},
|
||||
14: {19, 20},
|
||||
15: {^10, ^11},
|
||||
16: {21, ^12},
|
||||
17: {0, 22},
|
||||
18: {23, 24},
|
||||
19: {^13, 25},
|
||||
20: {26, ^14},
|
||||
21: {27, 28},
|
||||
22: {29, 30},
|
||||
23: {31, 32},
|
||||
24: {33, 34},
|
||||
25: {35, 36},
|
||||
26: {37, 38},
|
||||
27: {^15, 39},
|
||||
28: {40, 41},
|
||||
29: {42, 43},
|
||||
30: {44, 45},
|
||||
31: {^18, 46},
|
||||
32: {47, 48},
|
||||
33: {49, 50},
|
||||
34: {51, ^64},
|
||||
35: {52, 53},
|
||||
36: {54, ^16},
|
||||
37: {^17, 55},
|
||||
38: {56, 57},
|
||||
39: {58, 59},
|
||||
40: {60, 61},
|
||||
41: {62, ^0},
|
||||
42: {^1792, 63},
|
||||
43: {64, 65},
|
||||
44: {^1856, ^1920},
|
||||
45: {66, 67},
|
||||
46: {68, 69},
|
||||
47: {70, 71},
|
||||
48: {72, ^24},
|
||||
49: {^25, 73},
|
||||
50: {74, 75},
|
||||
51: {76, 77},
|
||||
52: {^23, 78},
|
||||
53: {79, 80},
|
||||
54: {81, 82},
|
||||
55: {83, 84},
|
||||
56: {85, 86},
|
||||
57: {87, ^22},
|
||||
58: {88, 89},
|
||||
59: {90, ^19},
|
||||
60: {^20, 91},
|
||||
61: {92, 93},
|
||||
62: {^21, 94},
|
||||
63: {^1984, ^2048},
|
||||
64: {^2112, ^2176},
|
||||
65: {^2240, ^2304},
|
||||
66: {^2368, ^2432},
|
||||
67: {^2496, ^2560},
|
||||
68: {^52, 95},
|
||||
69: {96, ^55},
|
||||
70: {^56, 97},
|
||||
71: {98, ^59},
|
||||
72: {^60, 99},
|
||||
73: {100, ^320},
|
||||
74: {^384, ^448},
|
||||
75: {101, ^53},
|
||||
76: {^54, 102},
|
||||
77: {103, 104},
|
||||
78: {^50, ^51},
|
||||
79: {^44, ^45},
|
||||
80: {^46, ^47},
|
||||
81: {^57, ^58},
|
||||
82: {^61, ^256},
|
||||
83: {^48, ^49},
|
||||
84: {^62, ^63},
|
||||
85: {^30, ^31},
|
||||
86: {^32, ^33},
|
||||
87: {^40, ^41},
|
||||
88: {^128, ^192},
|
||||
89: {^26, ^27},
|
||||
90: {^28, ^29},
|
||||
91: {^34, ^35},
|
||||
92: {^36, ^37},
|
||||
93: {^38, ^39},
|
||||
94: {^42, ^43},
|
||||
95: {^640, ^704},
|
||||
96: {^768, ^832},
|
||||
97: {^1280, ^1344},
|
||||
98: {^1408, ^1472},
|
||||
99: {^1536, ^1600},
|
||||
100: {^1664, ^1728},
|
||||
101: {^512, ^576},
|
||||
102: {^896, ^960},
|
||||
103: {^1024, ^1088},
|
||||
104: {^1152, ^1216},
|
||||
}
|
||||
|
||||
const maxCodeLength = 13
|
||||
|
||||
// Each encodeTable is represented by an array of bitStrings.
|
||||
|
||||
// bitString is a pair of uint32 values representing a bit code.
|
||||
// The nBits low bits of bits make up the actual bit code.
|
||||
// Eg. bitString{0x0004, 8} represents the bitcode "00000100".
|
||||
type bitString struct {
|
||||
bits uint32
|
||||
nBits uint32
|
||||
}
|
||||
|
||||
// modeEncodeTable represents Table 1 and the End-of-Line code.
|
||||
var modeEncodeTable = [...]bitString{
|
||||
0: {0x0001, 4}, // "0001"
|
||||
1: {0x0001, 3}, // "001"
|
||||
2: {0x0001, 1}, // "1"
|
||||
3: {0x0003, 3}, // "011"
|
||||
4: {0x0003, 6}, // "000011"
|
||||
5: {0x0003, 7}, // "0000011"
|
||||
6: {0x0002, 3}, // "010"
|
||||
7: {0x0002, 6}, // "000010"
|
||||
8: {0x0002, 7}, // "0000010"
|
||||
9: {0x0001, 7}, // "0000001"
|
||||
10: {0x0001, 12}, // "000000000001"
|
||||
}
|
||||
|
||||
// whiteEncodeTable2 represents Table 2 for a white run.
|
||||
var whiteEncodeTable2 = [...]bitString{
|
||||
0: {0x0035, 8}, // "00110101"
|
||||
1: {0x0007, 6}, // "000111"
|
||||
2: {0x0007, 4}, // "0111"
|
||||
3: {0x0008, 4}, // "1000"
|
||||
4: {0x000b, 4}, // "1011"
|
||||
5: {0x000c, 4}, // "1100"
|
||||
6: {0x000e, 4}, // "1110"
|
||||
7: {0x000f, 4}, // "1111"
|
||||
8: {0x0013, 5}, // "10011"
|
||||
9: {0x0014, 5}, // "10100"
|
||||
10: {0x0007, 5}, // "00111"
|
||||
11: {0x0008, 5}, // "01000"
|
||||
12: {0x0008, 6}, // "001000"
|
||||
13: {0x0003, 6}, // "000011"
|
||||
14: {0x0034, 6}, // "110100"
|
||||
15: {0x0035, 6}, // "110101"
|
||||
16: {0x002a, 6}, // "101010"
|
||||
17: {0x002b, 6}, // "101011"
|
||||
18: {0x0027, 7}, // "0100111"
|
||||
19: {0x000c, 7}, // "0001100"
|
||||
20: {0x0008, 7}, // "0001000"
|
||||
21: {0x0017, 7}, // "0010111"
|
||||
22: {0x0003, 7}, // "0000011"
|
||||
23: {0x0004, 7}, // "0000100"
|
||||
24: {0x0028, 7}, // "0101000"
|
||||
25: {0x002b, 7}, // "0101011"
|
||||
26: {0x0013, 7}, // "0010011"
|
||||
27: {0x0024, 7}, // "0100100"
|
||||
28: {0x0018, 7}, // "0011000"
|
||||
29: {0x0002, 8}, // "00000010"
|
||||
30: {0x0003, 8}, // "00000011"
|
||||
31: {0x001a, 8}, // "00011010"
|
||||
32: {0x001b, 8}, // "00011011"
|
||||
33: {0x0012, 8}, // "00010010"
|
||||
34: {0x0013, 8}, // "00010011"
|
||||
35: {0x0014, 8}, // "00010100"
|
||||
36: {0x0015, 8}, // "00010101"
|
||||
37: {0x0016, 8}, // "00010110"
|
||||
38: {0x0017, 8}, // "00010111"
|
||||
39: {0x0028, 8}, // "00101000"
|
||||
40: {0x0029, 8}, // "00101001"
|
||||
41: {0x002a, 8}, // "00101010"
|
||||
42: {0x002b, 8}, // "00101011"
|
||||
43: {0x002c, 8}, // "00101100"
|
||||
44: {0x002d, 8}, // "00101101"
|
||||
45: {0x0004, 8}, // "00000100"
|
||||
46: {0x0005, 8}, // "00000101"
|
||||
47: {0x000a, 8}, // "00001010"
|
||||
48: {0x000b, 8}, // "00001011"
|
||||
49: {0x0052, 8}, // "01010010"
|
||||
50: {0x0053, 8}, // "01010011"
|
||||
51: {0x0054, 8}, // "01010100"
|
||||
52: {0x0055, 8}, // "01010101"
|
||||
53: {0x0024, 8}, // "00100100"
|
||||
54: {0x0025, 8}, // "00100101"
|
||||
55: {0x0058, 8}, // "01011000"
|
||||
56: {0x0059, 8}, // "01011001"
|
||||
57: {0x005a, 8}, // "01011010"
|
||||
58: {0x005b, 8}, // "01011011"
|
||||
59: {0x004a, 8}, // "01001010"
|
||||
60: {0x004b, 8}, // "01001011"
|
||||
61: {0x0032, 8}, // "00110010"
|
||||
62: {0x0033, 8}, // "00110011"
|
||||
63: {0x0034, 8}, // "00110100"
|
||||
}
|
||||
|
||||
// whiteEncodeTable3 represents Table 3 for a white run.
|
||||
var whiteEncodeTable3 = [...]bitString{
|
||||
0: {0x001b, 5}, // "11011"
|
||||
1: {0x0012, 5}, // "10010"
|
||||
2: {0x0017, 6}, // "010111"
|
||||
3: {0x0037, 7}, // "0110111"
|
||||
4: {0x0036, 8}, // "00110110"
|
||||
5: {0x0037, 8}, // "00110111"
|
||||
6: {0x0064, 8}, // "01100100"
|
||||
7: {0x0065, 8}, // "01100101"
|
||||
8: {0x0068, 8}, // "01101000"
|
||||
9: {0x0067, 8}, // "01100111"
|
||||
10: {0x00cc, 9}, // "011001100"
|
||||
11: {0x00cd, 9}, // "011001101"
|
||||
12: {0x00d2, 9}, // "011010010"
|
||||
13: {0x00d3, 9}, // "011010011"
|
||||
14: {0x00d4, 9}, // "011010100"
|
||||
15: {0x00d5, 9}, // "011010101"
|
||||
16: {0x00d6, 9}, // "011010110"
|
||||
17: {0x00d7, 9}, // "011010111"
|
||||
18: {0x00d8, 9}, // "011011000"
|
||||
19: {0x00d9, 9}, // "011011001"
|
||||
20: {0x00da, 9}, // "011011010"
|
||||
21: {0x00db, 9}, // "011011011"
|
||||
22: {0x0098, 9}, // "010011000"
|
||||
23: {0x0099, 9}, // "010011001"
|
||||
24: {0x009a, 9}, // "010011010"
|
||||
25: {0x0018, 6}, // "011000"
|
||||
26: {0x009b, 9}, // "010011011"
|
||||
27: {0x0008, 11}, // "00000001000"
|
||||
28: {0x000c, 11}, // "00000001100"
|
||||
29: {0x000d, 11}, // "00000001101"
|
||||
30: {0x0012, 12}, // "000000010010"
|
||||
31: {0x0013, 12}, // "000000010011"
|
||||
32: {0x0014, 12}, // "000000010100"
|
||||
33: {0x0015, 12}, // "000000010101"
|
||||
34: {0x0016, 12}, // "000000010110"
|
||||
35: {0x0017, 12}, // "000000010111"
|
||||
36: {0x001c, 12}, // "000000011100"
|
||||
37: {0x001d, 12}, // "000000011101"
|
||||
38: {0x001e, 12}, // "000000011110"
|
||||
39: {0x001f, 12}, // "000000011111"
|
||||
}
|
||||
|
||||
// blackEncodeTable2 represents Table 2 for a black run.
|
||||
var blackEncodeTable2 = [...]bitString{
|
||||
0: {0x0037, 10}, // "0000110111"
|
||||
1: {0x0002, 3}, // "010"
|
||||
2: {0x0003, 2}, // "11"
|
||||
3: {0x0002, 2}, // "10"
|
||||
4: {0x0003, 3}, // "011"
|
||||
5: {0x0003, 4}, // "0011"
|
||||
6: {0x0002, 4}, // "0010"
|
||||
7: {0x0003, 5}, // "00011"
|
||||
8: {0x0005, 6}, // "000101"
|
||||
9: {0x0004, 6}, // "000100"
|
||||
10: {0x0004, 7}, // "0000100"
|
||||
11: {0x0005, 7}, // "0000101"
|
||||
12: {0x0007, 7}, // "0000111"
|
||||
13: {0x0004, 8}, // "00000100"
|
||||
14: {0x0007, 8}, // "00000111"
|
||||
15: {0x0018, 9}, // "000011000"
|
||||
16: {0x0017, 10}, // "0000010111"
|
||||
17: {0x0018, 10}, // "0000011000"
|
||||
18: {0x0008, 10}, // "0000001000"
|
||||
19: {0x0067, 11}, // "00001100111"
|
||||
20: {0x0068, 11}, // "00001101000"
|
||||
21: {0x006c, 11}, // "00001101100"
|
||||
22: {0x0037, 11}, // "00000110111"
|
||||
23: {0x0028, 11}, // "00000101000"
|
||||
24: {0x0017, 11}, // "00000010111"
|
||||
25: {0x0018, 11}, // "00000011000"
|
||||
26: {0x00ca, 12}, // "000011001010"
|
||||
27: {0x00cb, 12}, // "000011001011"
|
||||
28: {0x00cc, 12}, // "000011001100"
|
||||
29: {0x00cd, 12}, // "000011001101"
|
||||
30: {0x0068, 12}, // "000001101000"
|
||||
31: {0x0069, 12}, // "000001101001"
|
||||
32: {0x006a, 12}, // "000001101010"
|
||||
33: {0x006b, 12}, // "000001101011"
|
||||
34: {0x00d2, 12}, // "000011010010"
|
||||
35: {0x00d3, 12}, // "000011010011"
|
||||
36: {0x00d4, 12}, // "000011010100"
|
||||
37: {0x00d5, 12}, // "000011010101"
|
||||
38: {0x00d6, 12}, // "000011010110"
|
||||
39: {0x00d7, 12}, // "000011010111"
|
||||
40: {0x006c, 12}, // "000001101100"
|
||||
41: {0x006d, 12}, // "000001101101"
|
||||
42: {0x00da, 12}, // "000011011010"
|
||||
43: {0x00db, 12}, // "000011011011"
|
||||
44: {0x0054, 12}, // "000001010100"
|
||||
45: {0x0055, 12}, // "000001010101"
|
||||
46: {0x0056, 12}, // "000001010110"
|
||||
47: {0x0057, 12}, // "000001010111"
|
||||
48: {0x0064, 12}, // "000001100100"
|
||||
49: {0x0065, 12}, // "000001100101"
|
||||
50: {0x0052, 12}, // "000001010010"
|
||||
51: {0x0053, 12}, // "000001010011"
|
||||
52: {0x0024, 12}, // "000000100100"
|
||||
53: {0x0037, 12}, // "000000110111"
|
||||
54: {0x0038, 12}, // "000000111000"
|
||||
55: {0x0027, 12}, // "000000100111"
|
||||
56: {0x0028, 12}, // "000000101000"
|
||||
57: {0x0058, 12}, // "000001011000"
|
||||
58: {0x0059, 12}, // "000001011001"
|
||||
59: {0x002b, 12}, // "000000101011"
|
||||
60: {0x002c, 12}, // "000000101100"
|
||||
61: {0x005a, 12}, // "000001011010"
|
||||
62: {0x0066, 12}, // "000001100110"
|
||||
63: {0x0067, 12}, // "000001100111"
|
||||
}
|
||||
|
||||
// blackEncodeTable3 represents Table 3 for a black run.
|
||||
var blackEncodeTable3 = [...]bitString{
|
||||
0: {0x000f, 10}, // "0000001111"
|
||||
1: {0x00c8, 12}, // "000011001000"
|
||||
2: {0x00c9, 12}, // "000011001001"
|
||||
3: {0x005b, 12}, // "000001011011"
|
||||
4: {0x0033, 12}, // "000000110011"
|
||||
5: {0x0034, 12}, // "000000110100"
|
||||
6: {0x0035, 12}, // "000000110101"
|
||||
7: {0x006c, 13}, // "0000001101100"
|
||||
8: {0x006d, 13}, // "0000001101101"
|
||||
9: {0x004a, 13}, // "0000001001010"
|
||||
10: {0x004b, 13}, // "0000001001011"
|
||||
11: {0x004c, 13}, // "0000001001100"
|
||||
12: {0x004d, 13}, // "0000001001101"
|
||||
13: {0x0072, 13}, // "0000001110010"
|
||||
14: {0x0073, 13}, // "0000001110011"
|
||||
15: {0x0074, 13}, // "0000001110100"
|
||||
16: {0x0075, 13}, // "0000001110101"
|
||||
17: {0x0076, 13}, // "0000001110110"
|
||||
18: {0x0077, 13}, // "0000001110111"
|
||||
19: {0x0052, 13}, // "0000001010010"
|
||||
20: {0x0053, 13}, // "0000001010011"
|
||||
21: {0x0054, 13}, // "0000001010100"
|
||||
22: {0x0055, 13}, // "0000001010101"
|
||||
23: {0x005a, 13}, // "0000001011010"
|
||||
24: {0x005b, 13}, // "0000001011011"
|
||||
25: {0x0064, 13}, // "0000001100100"
|
||||
26: {0x0065, 13}, // "0000001100101"
|
||||
27: {0x0008, 11}, // "00000001000"
|
||||
28: {0x000c, 11}, // "00000001100"
|
||||
29: {0x000d, 11}, // "00000001101"
|
||||
30: {0x0012, 12}, // "000000010010"
|
||||
31: {0x0013, 12}, // "000000010011"
|
||||
32: {0x0014, 12}, // "000000010100"
|
||||
33: {0x0015, 12}, // "000000010101"
|
||||
34: {0x0016, 12}, // "000000010110"
|
||||
35: {0x0017, 12}, // "000000010111"
|
||||
36: {0x001c, 12}, // "000000011100"
|
||||
37: {0x001d, 12}, // "000000011101"
|
||||
38: {0x001e, 12}, // "000000011110"
|
||||
39: {0x001f, 12}, // "000000011111"
|
||||
}
|
||||
|
||||
// COPY PASTE table.go BEGIN
|
||||
|
||||
const (
|
||||
modePass = iota // Pass
|
||||
modeH // Horizontal
|
||||
modeV0 // Vertical-0
|
||||
modeVR1 // Vertical-Right-1
|
||||
modeVR2 // Vertical-Right-2
|
||||
modeVR3 // Vertical-Right-3
|
||||
modeVL1 // Vertical-Left-1
|
||||
modeVL2 // Vertical-Left-2
|
||||
modeVL3 // Vertical-Left-3
|
||||
modeExt // Extension
|
||||
modeEOL // End-of-Line
|
||||
)
|
||||
|
||||
// COPY PASTE table.go END
|
||||
102
vendor/golang.org/x/image/ccitt/writer.go
generated
vendored
Normal file
102
vendor/golang.org/x/image/ccitt/writer.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ccitt
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
)
|
||||
|
||||
type bitWriter struct {
|
||||
w io.Writer
|
||||
|
||||
// order is whether to process w's bytes LSB first or MSB first.
|
||||
order Order
|
||||
|
||||
// The high nBits bits of the bits field hold encoded bits to be written to w.
|
||||
bits uint64
|
||||
nBits uint32
|
||||
|
||||
// bytes[:bw] holds encoded bytes not yet written to w.
|
||||
// Overflow protection is ensured by using a multiple of 8 as bytes length.
|
||||
bw uint32
|
||||
bytes [1024]uint8
|
||||
}
|
||||
|
||||
// flushBits copies 64 bits from b.bits to b.bytes. If b.bytes is then full, it
|
||||
// is written to b.w.
|
||||
func (b *bitWriter) flushBits() error {
|
||||
binary.BigEndian.PutUint64(b.bytes[b.bw:], b.bits)
|
||||
b.bits = 0
|
||||
b.nBits = 0
|
||||
b.bw += 8
|
||||
if b.bw < uint32(len(b.bytes)) {
|
||||
return nil
|
||||
}
|
||||
b.bw = 0
|
||||
if b.order != MSB {
|
||||
reverseBitsWithinBytes(b.bytes[:])
|
||||
}
|
||||
_, err := b.w.Write(b.bytes[:])
|
||||
return err
|
||||
}
|
||||
|
||||
// close finalizes a bitcode stream by writing any
|
||||
// pending bits to bitWriter's underlying io.Writer.
|
||||
func (b *bitWriter) close() error {
|
||||
// Write any encoded bits to bytes.
|
||||
if b.nBits > 0 {
|
||||
binary.BigEndian.PutUint64(b.bytes[b.bw:], b.bits)
|
||||
b.bw += (b.nBits + 7) >> 3
|
||||
}
|
||||
|
||||
if b.order != MSB {
|
||||
reverseBitsWithinBytes(b.bytes[:b.bw])
|
||||
}
|
||||
|
||||
// Write b.bw bytes to b.w.
|
||||
_, err := b.w.Write(b.bytes[:b.bw])
|
||||
return err
|
||||
}
|
||||
|
||||
// alignToByteBoundary rounds b.nBits up to a multiple of 8.
|
||||
// If all 64 bits are used, flush them to bitWriter's bytes.
|
||||
func (b *bitWriter) alignToByteBoundary() error {
|
||||
if b.nBits = (b.nBits + 7) &^ 7; b.nBits == 64 {
|
||||
return b.flushBits()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeCode writes a variable length bitcode to b's underlying io.Writer.
|
||||
func (b *bitWriter) writeCode(bs bitString) error {
|
||||
bits := bs.bits
|
||||
nBits := bs.nBits
|
||||
if 64-b.nBits >= nBits {
|
||||
// b.bits has sufficient room for storing nBits bits.
|
||||
b.bits |= uint64(bits) << (64 - nBits - b.nBits)
|
||||
b.nBits += nBits
|
||||
if b.nBits == 64 {
|
||||
return b.flushBits()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Number of leading bits that fill b.bits.
|
||||
i := 64 - b.nBits
|
||||
|
||||
// Fill b.bits then flush and write remaining bits.
|
||||
b.bits |= uint64(bits) >> (nBits - i)
|
||||
b.nBits = 64
|
||||
|
||||
if err := b.flushBits(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nBits -= i
|
||||
b.bits = uint64(bits) << (64 - nBits)
|
||||
b.nBits = nBits
|
||||
return nil
|
||||
}
|
||||
16
vendor/golang.org/x/image/tiff/consts.go
generated
vendored
16
vendor/golang.org/x/image/tiff/consts.go
generated
vendored
@@ -42,11 +42,16 @@ const (
|
||||
tCompression = 259
|
||||
tPhotometricInterpretation = 262
|
||||
|
||||
tFillOrder = 266
|
||||
|
||||
tStripOffsets = 273
|
||||
tSamplesPerPixel = 277
|
||||
tRowsPerStrip = 278
|
||||
tStripByteCounts = 279
|
||||
|
||||
tT4Options = 292 // CCITT Group 3 options, a set of 32 flag bits.
|
||||
tT6Options = 293 // CCITT Group 4 options, a set of 32 flag bits.
|
||||
|
||||
tTileWidth = 322
|
||||
tTileLength = 323
|
||||
tTileOffsets = 324
|
||||
@@ -112,22 +117,33 @@ const (
|
||||
mRGB
|
||||
mRGBA
|
||||
mNRGBA
|
||||
mCMYK
|
||||
)
|
||||
|
||||
// CompressionType describes the type of compression used in Options.
|
||||
type CompressionType int
|
||||
|
||||
// Constants for supported compression types.
|
||||
const (
|
||||
Uncompressed CompressionType = iota
|
||||
Deflate
|
||||
LZW
|
||||
CCITTGroup3
|
||||
CCITTGroup4
|
||||
)
|
||||
|
||||
// specValue returns the compression type constant from the TIFF spec that
|
||||
// is equivalent to c.
|
||||
func (c CompressionType) specValue() uint32 {
|
||||
switch c {
|
||||
case LZW:
|
||||
return cLZW
|
||||
case Deflate:
|
||||
return cDeflate
|
||||
case CCITTGroup3:
|
||||
return cG3
|
||||
case CCITTGroup4:
|
||||
return cG4
|
||||
}
|
||||
return cNone
|
||||
}
|
||||
|
||||
29
vendor/golang.org/x/image/tiff/fuzz.go
generated
vendored
Normal file
29
vendor/golang.org/x/image/tiff/fuzz.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build gofuzz
|
||||
|
||||
package tiff
|
||||
|
||||
import "bytes"
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
cfg, err := DecodeConfig(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
if cfg.Width*cfg.Height > 1e6 {
|
||||
return 0
|
||||
}
|
||||
img, err := Decode(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
var w bytes.Buffer
|
||||
err = Encode(&w, img, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return 1
|
||||
}
|
||||
26
vendor/golang.org/x/image/tiff/reader.go
generated
vendored
26
vendor/golang.org/x/image/tiff/reader.go
generated
vendored
@@ -17,6 +17,7 @@ import (
|
||||
"io/ioutil"
|
||||
"math"
|
||||
|
||||
"golang.org/x/image/ccitt"
|
||||
"golang.org/x/image/tiff/lzw"
|
||||
)
|
||||
|
||||
@@ -129,7 +130,10 @@ func (d *decoder) parseIFD(p []byte) (int, error) {
|
||||
tTileOffsets,
|
||||
tTileByteCounts,
|
||||
tImageLength,
|
||||
tImageWidth:
|
||||
tImageWidth,
|
||||
tFillOrder,
|
||||
tT4Options,
|
||||
tT6Options:
|
||||
val, err := d.ifdUint(p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -441,7 +445,8 @@ func newDecoder(r io.Reader) (*decoder, error) {
|
||||
d.config.Height = int(d.firstVal(tImageLength))
|
||||
|
||||
if _, ok := d.features[tBitsPerSample]; !ok {
|
||||
return nil, FormatError("BitsPerSample tag missing")
|
||||
// Default is 1 per specification.
|
||||
d.features[tBitsPerSample] = []uint{1}
|
||||
}
|
||||
d.bpp = d.firstVal(tBitsPerSample)
|
||||
switch d.bpp {
|
||||
@@ -539,6 +544,13 @@ func DecodeConfig(r io.Reader) (image.Config, error) {
|
||||
return d.config, nil
|
||||
}
|
||||
|
||||
func ccittFillOrder(tiffFillOrder uint) ccitt.Order {
|
||||
if tiffFillOrder == 2 {
|
||||
return ccitt.LSB
|
||||
}
|
||||
return ccitt.MSB
|
||||
}
|
||||
|
||||
// Decode reads a TIFF image from r and returns it as an image.Image.
|
||||
// The type of Image returned depends on the contents of the TIFF.
|
||||
func Decode(r io.Reader) (img image.Image, err error) {
|
||||
@@ -644,6 +656,16 @@ func Decode(r io.Reader) (img image.Image, err error) {
|
||||
d.buf = make([]byte, n)
|
||||
_, err = d.r.ReadAt(d.buf, offset)
|
||||
}
|
||||
case cG3:
|
||||
inv := d.firstVal(tPhotometricInterpretation) == pWhiteIsZero
|
||||
order := ccittFillOrder(d.firstVal(tFillOrder))
|
||||
r := ccitt.NewReader(io.NewSectionReader(d.r, offset, n), order, ccitt.Group3, blkW, blkH, &ccitt.Options{Invert: inv, Align: false})
|
||||
d.buf, err = ioutil.ReadAll(r)
|
||||
case cG4:
|
||||
inv := d.firstVal(tPhotometricInterpretation) == pWhiteIsZero
|
||||
order := ccittFillOrder(d.firstVal(tFillOrder))
|
||||
r := ccitt.NewReader(io.NewSectionReader(d.r, offset, n), order, ccitt.Group4, blkW, blkH, &ccitt.Options{Invert: inv, Align: false})
|
||||
d.buf, err = ioutil.ReadAll(r)
|
||||
case cLZW:
|
||||
r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8)
|
||||
d.buf, err = ioutil.ReadAll(r)
|
||||
|
||||
27
vendor/golang.org/x/mod/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/mod/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
22
vendor/golang.org/x/mod/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/mod/PATENTS
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
||||
@@ -2,8 +2,86 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package module defines the module.Version type
|
||||
// along with support code.
|
||||
// Package module defines the module.Version type along with support code.
|
||||
//
|
||||
// The module.Version type is a simple Path, Version pair:
|
||||
//
|
||||
// type Version struct {
|
||||
// Path string
|
||||
// Version string
|
||||
// }
|
||||
//
|
||||
// There are no restrictions imposed directly by use of this structure,
|
||||
// but additional checking functions, most notably Check, verify that
|
||||
// a particular path, version pair is valid.
|
||||
//
|
||||
// Escaped Paths
|
||||
//
|
||||
// Module paths appear as substrings of file system paths
|
||||
// (in the download cache) and of web server URLs in the proxy protocol.
|
||||
// In general we cannot rely on file systems to be case-sensitive,
|
||||
// nor can we rely on web servers, since they read from file systems.
|
||||
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
|
||||
// and rsc.io/quote separate. Windows and macOS don't.
|
||||
// Instead, we must never require two different casings of a file path.
|
||||
// Because we want the download cache to match the proxy protocol,
|
||||
// and because we want the proxy protocol to be possible to serve
|
||||
// from a tree of static files (which might be stored on a case-insensitive
|
||||
// file system), the proxy protocol must never require two different casings
|
||||
// of a URL path either.
|
||||
//
|
||||
// One possibility would be to make the escaped form be the lowercase
|
||||
// hexadecimal encoding of the actual path bytes. This would avoid ever
|
||||
// needing different casings of a file path, but it would be fairly illegible
|
||||
// to most programmers when those paths appeared in the file system
|
||||
// (including in file paths in compiler errors and stack traces)
|
||||
// in web server logs, and so on. Instead, we want a safe escaped form that
|
||||
// leaves most paths unaltered.
|
||||
//
|
||||
// The safe escaped form is to replace every uppercase letter
|
||||
// with an exclamation mark followed by the letter's lowercase equivalent.
|
||||
//
|
||||
// For example,
|
||||
//
|
||||
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
|
||||
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
|
||||
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
|
||||
//
|
||||
// Import paths that avoid upper-case letters are left unchanged.
|
||||
// Note that because import paths are ASCII-only and avoid various
|
||||
// problematic punctuation (like : < and >), the escaped form is also ASCII-only
|
||||
// and avoids the same problematic punctuation.
|
||||
//
|
||||
// Import paths have never allowed exclamation marks, so there is no
|
||||
// need to define how to escape a literal !.
|
||||
//
|
||||
// Unicode Restrictions
|
||||
//
|
||||
// Today, paths are disallowed from using Unicode.
|
||||
//
|
||||
// Although paths are currently disallowed from using Unicode,
|
||||
// we would like at some point to allow Unicode letters as well, to assume that
|
||||
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
|
||||
// the !-for-uppercase convention for escaping them in the file system.
|
||||
// But there are at least two subtle considerations.
|
||||
//
|
||||
// First, note that not all case-fold equivalent distinct runes
|
||||
// form an upper/lower pair.
|
||||
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
|
||||
// are three distinct runes that case-fold to each other.
|
||||
// When we do add Unicode letters, we must not assume that upper/lower
|
||||
// are the only case-equivalent pairs.
|
||||
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
|
||||
// Or perhaps it would escape as "!!k", or perhaps as "(212A)".
|
||||
//
|
||||
// Second, it would be nice to allow Unicode marks as well as letters,
|
||||
// but marks include combining marks, and then we must deal not
|
||||
// only with case folding but also normalization: both U+00E9 ('é')
|
||||
// and U+0065 U+0301 ('e' followed by combining acute accent)
|
||||
// look the same on the page and are treated by some file systems
|
||||
// as the same path. If we do allow Unicode marks in paths, there
|
||||
// must be some kind of normalization to allow only one canonical
|
||||
// encoding of any character used in an import path.
|
||||
package module
|
||||
|
||||
// IMPORTANT NOTE
|
||||
@@ -24,22 +102,95 @@ import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/tools/internal/semver"
|
||||
"golang.org/x/mod/semver"
|
||||
errors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// A Version is defined by a module path and version pair.
|
||||
// A Version (for clients, a module.Version) is defined by a module path and version pair.
|
||||
// These are stored in their plain (unescaped) form.
|
||||
type Version struct {
|
||||
// Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2".
|
||||
Path string
|
||||
|
||||
// Version is usually a semantic version in canonical form.
|
||||
// There are two exceptions to this general rule.
|
||||
// There are three exceptions to this general rule.
|
||||
// First, the top-level target of a build has no specific version
|
||||
// and uses Version = "".
|
||||
// Second, during MVS calculations the version "none" is used
|
||||
// to represent the decision to take no version of a given module.
|
||||
// Third, filesystem paths found in "replace" directives are
|
||||
// represented by a path with an empty version.
|
||||
Version string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// String returns a representation of the Version suitable for logging
|
||||
// (Path@Version, or just Path if Version is empty).
|
||||
func (m Version) String() string {
|
||||
if m.Version == "" {
|
||||
return m.Path
|
||||
}
|
||||
return m.Path + "@" + m.Version
|
||||
}
|
||||
|
||||
// A ModuleError indicates an error specific to a module.
|
||||
type ModuleError struct {
|
||||
Path string
|
||||
Version string
|
||||
Err error
|
||||
}
|
||||
|
||||
// VersionError returns a ModuleError derived from a Version and error,
|
||||
// or err itself if it is already such an error.
|
||||
func VersionError(v Version, err error) error {
|
||||
var mErr *ModuleError
|
||||
if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version {
|
||||
return err
|
||||
}
|
||||
return &ModuleError{
|
||||
Path: v.Path,
|
||||
Version: v.Version,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ModuleError) Error() string {
|
||||
if v, ok := e.Err.(*InvalidVersionError); ok {
|
||||
return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err)
|
||||
}
|
||||
if e.Version != "" {
|
||||
return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err)
|
||||
}
|
||||
return fmt.Sprintf("module %s: %v", e.Path, e.Err)
|
||||
}
|
||||
|
||||
func (e *ModuleError) Unwrap() error { return e.Err }
|
||||
|
||||
// An InvalidVersionError indicates an error specific to a version, with the
|
||||
// module path unknown or specified externally.
|
||||
//
|
||||
// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError
|
||||
// must not wrap a ModuleError.
|
||||
type InvalidVersionError struct {
|
||||
Version string
|
||||
Pseudo bool
|
||||
Err error
|
||||
}
|
||||
|
||||
// noun returns either "version" or "pseudo-version", depending on whether
|
||||
// e.Version is a pseudo-version.
|
||||
func (e *InvalidVersionError) noun() string {
|
||||
if e.Pseudo {
|
||||
return "pseudo-version"
|
||||
}
|
||||
return "version"
|
||||
}
|
||||
|
||||
func (e *InvalidVersionError) Error() string {
|
||||
return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err)
|
||||
}
|
||||
|
||||
func (e *InvalidVersionError) Unwrap() error { return e.Err }
|
||||
|
||||
// Check checks that a given module path, version pair is valid.
|
||||
// In addition to the path being a valid module path
|
||||
// and the version being a valid semantic version,
|
||||
@@ -51,17 +202,14 @@ func Check(path, version string) error {
|
||||
return err
|
||||
}
|
||||
if !semver.IsValid(version) {
|
||||
return fmt.Errorf("malformed semantic version %v", version)
|
||||
return &ModuleError{
|
||||
Path: path,
|
||||
Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")},
|
||||
}
|
||||
}
|
||||
_, pathMajor, _ := SplitPathVersion(path)
|
||||
if !MatchPathMajor(version, pathMajor) {
|
||||
if pathMajor == "" {
|
||||
pathMajor = "v0 or v1"
|
||||
}
|
||||
if pathMajor[0] == '.' { // .v1
|
||||
pathMajor = pathMajor[1:]
|
||||
}
|
||||
return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor)
|
||||
if err := CheckPathMajor(version, pathMajor); err != nil {
|
||||
return &ModuleError{Path: path, Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -79,7 +227,7 @@ func firstPathOK(r rune) bool {
|
||||
// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
|
||||
// This matches what "go get" has historically recognized in import paths.
|
||||
// TODO(rsc): We would like to allow Unicode letters, but that requires additional
|
||||
// care in the safe encoding (see note below).
|
||||
// care in the safe encoding (see "escaped paths" above).
|
||||
func pathOK(r rune) bool {
|
||||
if r < utf8.RuneSelf {
|
||||
return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
|
||||
@@ -94,7 +242,7 @@ func pathOK(r rune) bool {
|
||||
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
|
||||
// If we expand the set of allowed characters here, we have to
|
||||
// work harder at detecting potential case-folding and normalization collisions.
|
||||
// See note about "safe encoding" below.
|
||||
// See note about "escaped paths" above.
|
||||
func fileNameOK(r rune) bool {
|
||||
if r < utf8.RuneSelf {
|
||||
// Entire set of ASCII punctuation, from which we remove characters:
|
||||
@@ -120,6 +268,17 @@ func fileNameOK(r rune) bool {
|
||||
}
|
||||
|
||||
// CheckPath checks that a module path is valid.
|
||||
// A valid module path is a valid import path, as checked by CheckImportPath,
|
||||
// with two additional constraints.
|
||||
// First, the leading path element (up to the first slash, if any),
|
||||
// by convention a domain name, must contain only lower-case ASCII letters,
|
||||
// ASCII digits, dots (U+002E), and dashes (U+002D);
|
||||
// it must contain at least one dot and cannot start with a dash.
|
||||
// Second, for a final path element of the form /vN, where N looks numeric
|
||||
// (ASCII digits and dots) must not begin with a leading zero, must not be /v1,
|
||||
// and must not contain any dots. For paths beginning with "gopkg.in/",
|
||||
// this second requirement is replaced by a requirement that the path
|
||||
// follow the gopkg.in server's conventions.
|
||||
func CheckPath(path string) error {
|
||||
if err := checkPath(path, false); err != nil {
|
||||
return fmt.Errorf("malformed module path %q: %v", path, err)
|
||||
@@ -149,6 +308,20 @@ func CheckPath(path string) error {
|
||||
}
|
||||
|
||||
// CheckImportPath checks that an import path is valid.
|
||||
//
|
||||
// A valid import path consists of one or more valid path elements
|
||||
// separated by slashes (U+002F). (It must not begin with nor end in a slash.)
|
||||
//
|
||||
// A valid path element is a non-empty string made up of
|
||||
// ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
|
||||
// It must not begin or end with a dot (U+002E), nor contain two dots in a row.
|
||||
//
|
||||
// The element prefix up to the first dot must not be a reserved file name
|
||||
// on Windows, regardless of case (CON, com1, NuL, and so on).
|
||||
//
|
||||
// CheckImportPath may be less restrictive in the future, but see the
|
||||
// top-level package documentation for additional information about
|
||||
// subtleties of Unicode.
|
||||
func CheckImportPath(path string) error {
|
||||
if err := checkPath(path, false); err != nil {
|
||||
return fmt.Errorf("malformed import path %q: %v", path, err)
|
||||
@@ -169,8 +342,8 @@ func checkPath(path string, fileName bool) error {
|
||||
if path == "" {
|
||||
return fmt.Errorf("empty string")
|
||||
}
|
||||
if strings.Contains(path, "..") {
|
||||
return fmt.Errorf("double dot")
|
||||
if path[0] == '-' {
|
||||
return fmt.Errorf("leading dash")
|
||||
}
|
||||
if strings.Contains(path, "//") {
|
||||
return fmt.Errorf("double slash")
|
||||
@@ -226,13 +399,24 @@ func checkElem(elem string, fileName bool) error {
|
||||
}
|
||||
for _, bad := range badWindowsNames {
|
||||
if strings.EqualFold(bad, short) {
|
||||
return fmt.Errorf("disallowed path element %q", elem)
|
||||
return fmt.Errorf("%q disallowed as path element component on Windows", short)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckFilePath checks whether a slash-separated file path is valid.
|
||||
// CheckFilePath checks that a slash-separated file path is valid.
|
||||
// The definition of a valid file path is the same as the definition
|
||||
// of a valid import path except that the set of allowed characters is larger:
|
||||
// all Unicode letters, ASCII digits, the ASCII space character (U+0020),
|
||||
// and the ASCII punctuation characters
|
||||
// “!#$%&()+,-.=@[]^_{}~”.
|
||||
// (The excluded punctuation characters, " * < > ? ` ' | / \ and :,
|
||||
// have special meanings in certain shells or operating systems.)
|
||||
//
|
||||
// CheckFilePath may be less restrictive in the future, but see the
|
||||
// top-level package documentation for additional information about
|
||||
// subtleties of Unicode.
|
||||
func CheckFilePath(path string) error {
|
||||
if err := checkPath(path, true); err != nil {
|
||||
return fmt.Errorf("malformed file path %q: %v", path, err)
|
||||
@@ -271,6 +455,9 @@ var badWindowsNames = []string{
|
||||
// and version is either empty or "/vN" for N >= 2.
|
||||
// As a special case, gopkg.in paths are recognized directly;
|
||||
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
|
||||
// SplitPathVersion returns with ok = false when presented with
|
||||
// a path whose last path element does not satisfy the constraints
|
||||
// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
|
||||
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
|
||||
if strings.HasPrefix(path, "gopkg.in/") {
|
||||
return splitGopkgIn(path)
|
||||
@@ -319,20 +506,65 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
|
||||
|
||||
// MatchPathMajor reports whether the semantic version v
|
||||
// matches the path major version pathMajor.
|
||||
//
|
||||
// MatchPathMajor returns true if and only if CheckPathMajor returns nil.
|
||||
func MatchPathMajor(v, pathMajor string) bool {
|
||||
return CheckPathMajor(v, pathMajor) == nil
|
||||
}
|
||||
|
||||
// CheckPathMajor returns a non-nil error if the semantic version v
|
||||
// does not match the path major version pathMajor.
|
||||
func CheckPathMajor(v, pathMajor string) error {
|
||||
// TODO(jayconrod): return errors or panic for invalid inputs. This function
|
||||
// (and others) was covered by integration tests for cmd/go, and surrounding
|
||||
// code protected against invalid inputs like non-canonical versions.
|
||||
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
|
||||
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
|
||||
}
|
||||
if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
|
||||
// Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
|
||||
// For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
m := semver.Major(v)
|
||||
if pathMajor == "" {
|
||||
return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible"
|
||||
if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" {
|
||||
return nil
|
||||
}
|
||||
pathMajor = "v0 or v1"
|
||||
} else if pathMajor[0] == '/' || pathMajor[0] == '.' {
|
||||
if m == pathMajor[1:] {
|
||||
return nil
|
||||
}
|
||||
pathMajor = pathMajor[1:]
|
||||
}
|
||||
return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:]
|
||||
return &InvalidVersionError{
|
||||
Version: v,
|
||||
Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)),
|
||||
}
|
||||
}
|
||||
|
||||
// PathMajorPrefix returns the major-version tag prefix implied by pathMajor.
|
||||
// An empty PathMajorPrefix allows either v0 or v1.
|
||||
//
|
||||
// Note that MatchPathMajor may accept some versions that do not actually begin
|
||||
// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1'
|
||||
// pathMajor, even though that pathMajor implies 'v1' tagging.
|
||||
func PathMajorPrefix(pathMajor string) string {
|
||||
if pathMajor == "" {
|
||||
return ""
|
||||
}
|
||||
if pathMajor[0] != '/' && pathMajor[0] != '.' {
|
||||
panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator")
|
||||
}
|
||||
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
|
||||
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
|
||||
}
|
||||
m := pathMajor[1:]
|
||||
if m != semver.Major(m) {
|
||||
panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version")
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// CanonicalVersion returns the canonical form of the version string v.
|
||||
@@ -345,7 +577,10 @@ func CanonicalVersion(v string) string {
|
||||
return cv
|
||||
}
|
||||
|
||||
// Sort sorts the list by Path, breaking ties by comparing Versions.
|
||||
// Sort sorts the list by Path, breaking ties by comparing Version fields.
|
||||
// The Version fields are interpreted as semantic versions (using semver.Compare)
|
||||
// optionally followed by a tie-breaking suffix introduced by a slash character,
|
||||
// like in "v0.0.1/go.mod".
|
||||
func Sort(list []Version) {
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
mi := list[i]
|
||||
@@ -372,93 +607,36 @@ func Sort(list []Version) {
|
||||
})
|
||||
}
|
||||
|
||||
// Safe encodings
|
||||
//
|
||||
// Module paths appear as substrings of file system paths
|
||||
// (in the download cache) and of web server URLs in the proxy protocol.
|
||||
// In general we cannot rely on file systems to be case-sensitive,
|
||||
// nor can we rely on web servers, since they read from file systems.
|
||||
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
|
||||
// and rsc.io/quote separate. Windows and macOS don't.
|
||||
// Instead, we must never require two different casings of a file path.
|
||||
// Because we want the download cache to match the proxy protocol,
|
||||
// and because we want the proxy protocol to be possible to serve
|
||||
// from a tree of static files (which might be stored on a case-insensitive
|
||||
// file system), the proxy protocol must never require two different casings
|
||||
// of a URL path either.
|
||||
//
|
||||
// One possibility would be to make the safe encoding be the lowercase
|
||||
// hexadecimal encoding of the actual path bytes. This would avoid ever
|
||||
// needing different casings of a file path, but it would be fairly illegible
|
||||
// to most programmers when those paths appeared in the file system
|
||||
// (including in file paths in compiler errors and stack traces)
|
||||
// in web server logs, and so on. Instead, we want a safe encoding that
|
||||
// leaves most paths unaltered.
|
||||
//
|
||||
// The safe encoding is this:
|
||||
// replace every uppercase letter with an exclamation mark
|
||||
// followed by the letter's lowercase equivalent.
|
||||
//
|
||||
// For example,
|
||||
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
|
||||
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
|
||||
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
|
||||
//
|
||||
// Import paths that avoid upper-case letters are left unchanged.
|
||||
// Note that because import paths are ASCII-only and avoid various
|
||||
// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
|
||||
// and avoids the same problematic punctuation.
|
||||
//
|
||||
// Import paths have never allowed exclamation marks, so there is no
|
||||
// need to define how to encode a literal !.
|
||||
//
|
||||
// Although paths are disallowed from using Unicode (see pathOK above),
|
||||
// the eventual plan is to allow Unicode letters as well, to assume that
|
||||
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
|
||||
// the !-for-uppercase convention. Note however that not all runes that
|
||||
// are different but case-fold equivalent are an upper/lower pair.
|
||||
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
|
||||
// are considered to case-fold to each other. When we do add Unicode
|
||||
// letters, we must not assume that upper/lower are the only case-equivalent pairs.
|
||||
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
|
||||
// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
|
||||
//
|
||||
// Also, it would be nice to allow Unicode marks as well as letters,
|
||||
// but marks include combining marks, and then we must deal not
|
||||
// only with case folding but also normalization: both U+00E9 ('é')
|
||||
// and U+0065 U+0301 ('e' followed by combining acute accent)
|
||||
// look the same on the page and are treated by some file systems
|
||||
// as the same path. If we do allow Unicode marks in paths, there
|
||||
// must be some kind of normalization to allow only one canonical
|
||||
// encoding of any character used in an import path.
|
||||
|
||||
// EncodePath returns the safe encoding of the given module path.
|
||||
// EscapePath returns the escaped form of the given module path.
|
||||
// It fails if the module path is invalid.
|
||||
func EncodePath(path string) (encoding string, err error) {
|
||||
func EscapePath(path string) (escaped string, err error) {
|
||||
if err := CheckPath(path); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return encodeString(path)
|
||||
return escapeString(path)
|
||||
}
|
||||
|
||||
// EncodeVersion returns the safe encoding of the given module version.
|
||||
// EscapeVersion returns the escaped form of the given module version.
|
||||
// Versions are allowed to be in non-semver form but must be valid file names
|
||||
// and not contain exclamation marks.
|
||||
func EncodeVersion(v string) (encoding string, err error) {
|
||||
func EscapeVersion(v string) (escaped string, err error) {
|
||||
if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
|
||||
return "", fmt.Errorf("disallowed version string %q", v)
|
||||
return "", &InvalidVersionError{
|
||||
Version: v,
|
||||
Err: fmt.Errorf("disallowed version string"),
|
||||
}
|
||||
}
|
||||
return encodeString(v)
|
||||
return escapeString(v)
|
||||
}
|
||||
|
||||
func encodeString(s string) (encoding string, err error) {
|
||||
func escapeString(s string) (escaped string, err error) {
|
||||
haveUpper := false
|
||||
for _, r := range s {
|
||||
if r == '!' || r >= utf8.RuneSelf {
|
||||
// This should be disallowed by CheckPath, but diagnose anyway.
|
||||
// The correctness of the encoding loop below depends on it.
|
||||
return "", fmt.Errorf("internal error: inconsistency in EncodePath")
|
||||
// The correctness of the escaping loop below depends on it.
|
||||
return "", fmt.Errorf("internal error: inconsistency in EscapePath")
|
||||
}
|
||||
if 'A' <= r && r <= 'Z' {
|
||||
haveUpper = true
|
||||
@@ -480,39 +658,39 @@ func encodeString(s string) (encoding string, err error) {
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// DecodePath returns the module path of the given safe encoding.
|
||||
// It fails if the encoding is invalid or encodes an invalid path.
|
||||
func DecodePath(encoding string) (path string, err error) {
|
||||
path, ok := decodeString(encoding)
|
||||
// UnescapePath returns the module path for the given escaped path.
|
||||
// It fails if the escaped path is invalid or describes an invalid path.
|
||||
func UnescapePath(escaped string) (path string, err error) {
|
||||
path, ok := unescapeString(escaped)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("invalid module path encoding %q", encoding)
|
||||
return "", fmt.Errorf("invalid escaped module path %q", escaped)
|
||||
}
|
||||
if err := CheckPath(path); err != nil {
|
||||
return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err)
|
||||
return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// DecodeVersion returns the version string for the given safe encoding.
|
||||
// It fails if the encoding is invalid or encodes an invalid version.
|
||||
// UnescapeVersion returns the version string for the given escaped version.
|
||||
// It fails if the escaped form is invalid or describes an invalid version.
|
||||
// Versions are allowed to be in non-semver form but must be valid file names
|
||||
// and not contain exclamation marks.
|
||||
func DecodeVersion(encoding string) (v string, err error) {
|
||||
v, ok := decodeString(encoding)
|
||||
func UnescapeVersion(escaped string) (v string, err error) {
|
||||
v, ok := unescapeString(escaped)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("invalid version encoding %q", encoding)
|
||||
return "", fmt.Errorf("invalid escaped version %q", escaped)
|
||||
}
|
||||
if err := checkElem(v, true); err != nil {
|
||||
return "", fmt.Errorf("disallowed version string %q", v)
|
||||
return "", fmt.Errorf("invalid escaped version %q: %v", v, err)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func decodeString(encoding string) (string, bool) {
|
||||
func unescapeString(escaped string) (string, bool) {
|
||||
var buf []byte
|
||||
|
||||
bang := false
|
||||
for _, r := range encoding {
|
||||
for _, r := range escaped {
|
||||
if r >= utf8.RuneSelf {
|
||||
return "", false
|
||||
}
|
||||
@@ -107,7 +107,7 @@ func Build(v string) string {
|
||||
}
|
||||
|
||||
// Compare returns an integer comparing two versions according to
|
||||
// according to semantic version precedence.
|
||||
// semantic version precedence.
|
||||
// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
|
||||
//
|
||||
// An invalid semantic version string is considered less than a valid one.
|
||||
@@ -263,7 +263,7 @@ func parseBuild(v string) (t, rest string, ok bool) {
|
||||
i := 1
|
||||
start := 1
|
||||
for i < len(v) {
|
||||
if !isIdentChar(v[i]) {
|
||||
if !isIdentChar(v[i]) && v[i] != '.' {
|
||||
return
|
||||
}
|
||||
if v[i] == '.' {
|
||||
5
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
5
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
@@ -275,9 +275,10 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del
|
||||
|
||||
// We deleted an entry but now there may be
|
||||
// a blank line-sized hole where the import was.
|
||||
if line-lastLine > 1 {
|
||||
if line-lastLine > 1 || !gen.Rparen.IsValid() {
|
||||
// There was a blank line immediately preceding the deleted import,
|
||||
// so there's no need to close the hole.
|
||||
// so there's no need to close the hole. The right parenthesis is
|
||||
// invalid after AddImport to an import statement without parenthesis.
|
||||
// Do nothing.
|
||||
} else if line != fset.File(gen.Rparen).LineCount() {
|
||||
// There was no blank line. Close the hole.
|
||||
|
||||
2
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
2
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
@@ -100,7 +100,7 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
|
||||
// Write writes encoded type information for the specified package to out.
|
||||
// The FileSet provides file position information for named objects.
|
||||
func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
|
||||
b, err := gcimporter.BExportData(fset, pkg)
|
||||
b, err := gcimporter.IExportData(fset, pkg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
11
vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
generated
vendored
11
vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
generated
vendored
@@ -332,7 +332,7 @@ func (p *importer) pos() token.Pos {
|
||||
p.prevFile = file
|
||||
p.prevLine = line
|
||||
|
||||
return p.fake.pos(file, line)
|
||||
return p.fake.pos(file, line, 0)
|
||||
}
|
||||
|
||||
// Synthesize a token.Pos
|
||||
@@ -341,7 +341,9 @@ type fakeFileSet struct {
|
||||
files map[string]*token.File
|
||||
}
|
||||
|
||||
func (s *fakeFileSet) pos(file string, line int) token.Pos {
|
||||
func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
|
||||
// TODO(mdempsky): Make use of column.
|
||||
|
||||
// Since we don't know the set of needed file positions, we
|
||||
// reserve maxlines positions per file.
|
||||
const maxlines = 64 * 1024
|
||||
@@ -976,10 +978,11 @@ const (
|
||||
aliasTag
|
||||
)
|
||||
|
||||
var predeclOnce sync.Once
|
||||
var predecl []types.Type // initialized lazily
|
||||
|
||||
func predeclared() []types.Type {
|
||||
if predecl == nil {
|
||||
predeclOnce.Do(func() {
|
||||
// initialize lazily to be sure that all
|
||||
// elements have been initialized before
|
||||
predecl = []types.Type{ // basic types
|
||||
@@ -1026,7 +1029,7 @@ func predeclared() []types.Type {
|
||||
// used internally by gc; never used by this package or in .a files
|
||||
anyType{},
|
||||
}
|
||||
}
|
||||
})
|
||||
return predecl
|
||||
}
|
||||
|
||||
|
||||
8
vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
generated
vendored
8
vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
generated
vendored
@@ -344,7 +344,7 @@ func (p *parser) expectKeyword(keyword string) {
|
||||
|
||||
// PackageId = string_lit .
|
||||
//
|
||||
func (p *parser) parsePackageId() string {
|
||||
func (p *parser) parsePackageID() string {
|
||||
id, err := strconv.Unquote(p.expect(scanner.String))
|
||||
if err != nil {
|
||||
p.error(err)
|
||||
@@ -384,7 +384,7 @@ func (p *parser) parseDotIdent() string {
|
||||
//
|
||||
func (p *parser) parseQualifiedName() (id, name string) {
|
||||
p.expect('@')
|
||||
id = p.parsePackageId()
|
||||
id = p.parsePackageID()
|
||||
p.expect('.')
|
||||
// Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields.
|
||||
if p.tok == '?' {
|
||||
@@ -696,7 +696,7 @@ func (p *parser) parseInterfaceType(parent *types.Package) types.Type {
|
||||
|
||||
// Complete requires the type's embedded interfaces to be fully defined,
|
||||
// but we do not define any
|
||||
return types.NewInterface(methods, nil).Complete()
|
||||
return newInterface(methods, nil).Complete()
|
||||
}
|
||||
|
||||
// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
|
||||
@@ -785,7 +785,7 @@ func (p *parser) parseType(parent *types.Package) types.Type {
|
||||
func (p *parser) parseImportDecl() {
|
||||
p.expectKeyword("import")
|
||||
name := p.parsePackageName()
|
||||
p.getPkg(p.parsePackageId(), name)
|
||||
p.getPkg(p.parsePackageID(), name)
|
||||
}
|
||||
|
||||
// int_lit = [ "+" | "-" ] { "0" ... "9" } .
|
||||
|
||||
34
vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
generated
vendored
34
vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
generated
vendored
@@ -6,8 +6,6 @@
|
||||
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
|
||||
// see that file for specification of the format.
|
||||
|
||||
// +build go1.11
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
@@ -28,7 +26,10 @@ import (
|
||||
const iexportVersion = 0
|
||||
|
||||
// IExportData returns the binary export data for pkg.
|
||||
//
|
||||
// If no file set is provided, position info will be missing.
|
||||
// The package path of the top-level package will not be recorded,
|
||||
// so that calls to IImportData can override with a provided package path.
|
||||
func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
@@ -48,6 +49,7 @@ func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error)
|
||||
stringIndex: map[string]uint64{},
|
||||
declIndex: map[types.Object]uint64{},
|
||||
typIndex: map[types.Type]uint64{},
|
||||
localpkg: pkg,
|
||||
}
|
||||
|
||||
for i, pt := range predeclared() {
|
||||
@@ -73,7 +75,7 @@ func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error)
|
||||
// Append indices to data0 section.
|
||||
dataLen := uint64(p.data0.Len())
|
||||
w := p.newWriter()
|
||||
w.writeIndex(p.declIndex, pkg)
|
||||
w.writeIndex(p.declIndex)
|
||||
w.flush()
|
||||
|
||||
// Assemble header.
|
||||
@@ -95,14 +97,14 @@ func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error)
|
||||
// we're writing out the main index, which is also read by
|
||||
// non-compiler tools and includes a complete package description
|
||||
// (i.e., name and height).
|
||||
func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) {
|
||||
func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
|
||||
// Build a map from packages to objects from that package.
|
||||
pkgObjs := map[*types.Package][]types.Object{}
|
||||
|
||||
// For the main index, make sure to include every package that
|
||||
// we reference, even if we're not exporting (or reexporting)
|
||||
// any symbols from it.
|
||||
pkgObjs[localpkg] = nil
|
||||
pkgObjs[w.p.localpkg] = nil
|
||||
for pkg := range w.p.allPkgs {
|
||||
pkgObjs[pkg] = nil
|
||||
}
|
||||
@@ -121,12 +123,12 @@ func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types
|
||||
}
|
||||
|
||||
sort.Slice(pkgs, func(i, j int) bool {
|
||||
return pkgs[i].Path() < pkgs[j].Path()
|
||||
return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])
|
||||
})
|
||||
|
||||
w.uint64(uint64(len(pkgs)))
|
||||
for _, pkg := range pkgs {
|
||||
w.string(pkg.Path())
|
||||
w.string(w.exportPath(pkg))
|
||||
w.string(pkg.Name())
|
||||
w.uint64(uint64(0)) // package height is not needed for go/types
|
||||
|
||||
@@ -143,6 +145,8 @@ type iexporter struct {
|
||||
fset *token.FileSet
|
||||
out *bytes.Buffer
|
||||
|
||||
localpkg *types.Package
|
||||
|
||||
// allPkgs tracks all packages that have been referenced by
|
||||
// the export data, so we can ensure to include them in the
|
||||
// main index.
|
||||
@@ -195,6 +199,13 @@ type exportWriter struct {
|
||||
prevLine int64
|
||||
}
|
||||
|
||||
func (w *exportWriter) exportPath(pkg *types.Package) string {
|
||||
if pkg == w.p.localpkg {
|
||||
return ""
|
||||
}
|
||||
return pkg.Path()
|
||||
}
|
||||
|
||||
func (p *iexporter) doDecl(obj types.Object) {
|
||||
w := p.newWriter()
|
||||
w.setPkg(obj.Pkg(), false)
|
||||
@@ -267,6 +278,11 @@ func (w *exportWriter) tag(tag byte) {
|
||||
}
|
||||
|
||||
func (w *exportWriter) pos(pos token.Pos) {
|
||||
if w.p.fset == nil {
|
||||
w.int64(0)
|
||||
return
|
||||
}
|
||||
|
||||
p := w.p.fset.Position(pos)
|
||||
file := p.Filename
|
||||
line := int64(p.Line)
|
||||
@@ -299,7 +315,7 @@ func (w *exportWriter) pkg(pkg *types.Package) {
|
||||
// Ensure any referenced packages are declared in the main index.
|
||||
w.p.allPkgs[pkg] = true
|
||||
|
||||
w.string(pkg.Path())
|
||||
w.string(w.exportPath(pkg))
|
||||
}
|
||||
|
||||
func (w *exportWriter) qualifiedIdent(obj types.Object) {
|
||||
@@ -394,7 +410,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
|
||||
w.pos(f.Pos())
|
||||
w.string(f.Name())
|
||||
w.typ(f.Type(), pkg)
|
||||
w.bool(f.Embedded())
|
||||
w.bool(f.Anonymous())
|
||||
w.string(t.Tag(i)) // note (or tag)
|
||||
}
|
||||
|
||||
|
||||
68
vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
generated
vendored
68
vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
generated
vendored
@@ -63,8 +63,8 @@ const (
|
||||
// If the export data version is not recognized or the format is otherwise
|
||||
// compromised, an error is returned.
|
||||
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
||||
const currentVersion = 0
|
||||
version := -1
|
||||
const currentVersion = 1
|
||||
version := int64(-1)
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
if version > currentVersion {
|
||||
@@ -77,9 +77,9 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
||||
|
||||
r := &intReader{bytes.NewReader(data), path}
|
||||
|
||||
version = int(r.uint64())
|
||||
version = int64(r.uint64())
|
||||
switch version {
|
||||
case currentVersion:
|
||||
case currentVersion, 0:
|
||||
default:
|
||||
errorf("unknown iexport format version %d", version)
|
||||
}
|
||||
@@ -93,7 +93,8 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
||||
r.Seek(sLen+dLen, io.SeekCurrent)
|
||||
|
||||
p := iimporter{
|
||||
ipath: path,
|
||||
ipath: path,
|
||||
version: int(version),
|
||||
|
||||
stringData: stringData,
|
||||
stringCache: make(map[uint64]string),
|
||||
@@ -142,20 +143,18 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
||||
p.pkgIndex[pkg] = nameIndex
|
||||
pkgList[i] = pkg
|
||||
}
|
||||
var localpkg *types.Package
|
||||
for _, pkg := range pkgList {
|
||||
if pkg.Path() == path {
|
||||
localpkg = pkg
|
||||
}
|
||||
if len(pkgList) == 0 {
|
||||
errorf("no packages found for %s", path)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
names := make([]string, 0, len(p.pkgIndex[localpkg]))
|
||||
for name := range p.pkgIndex[localpkg] {
|
||||
p.ipkg = pkgList[0]
|
||||
names := make([]string, 0, len(p.pkgIndex[p.ipkg]))
|
||||
for name := range p.pkgIndex[p.ipkg] {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
for _, name := range names {
|
||||
p.doDecl(localpkg, name)
|
||||
p.doDecl(p.ipkg, name)
|
||||
}
|
||||
|
||||
for _, typ := range p.interfaceList {
|
||||
@@ -165,17 +164,19 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
||||
// record all referenced packages as imports
|
||||
list := append(([]*types.Package)(nil), pkgList[1:]...)
|
||||
sort.Sort(byPath(list))
|
||||
localpkg.SetImports(list)
|
||||
p.ipkg.SetImports(list)
|
||||
|
||||
// package was imported completely and without errors
|
||||
localpkg.MarkComplete()
|
||||
p.ipkg.MarkComplete()
|
||||
|
||||
consumed, _ := r.Seek(0, io.SeekCurrent)
|
||||
return int(consumed), localpkg, nil
|
||||
return int(consumed), p.ipkg, nil
|
||||
}
|
||||
|
||||
type iimporter struct {
|
||||
ipath string
|
||||
ipath string
|
||||
ipkg *types.Package
|
||||
version int
|
||||
|
||||
stringData []byte
|
||||
stringCache map[uint64]string
|
||||
@@ -226,6 +227,9 @@ func (p *iimporter) pkgAt(off uint64) *types.Package {
|
||||
return pkg
|
||||
}
|
||||
path := p.stringAt(off)
|
||||
if path == p.ipath {
|
||||
return p.ipkg
|
||||
}
|
||||
errorf("missing package %q in %q", path, p.ipath)
|
||||
return nil
|
||||
}
|
||||
@@ -255,6 +259,7 @@ type importReader struct {
|
||||
currPkg *types.Package
|
||||
prevFile string
|
||||
prevLine int64
|
||||
prevColumn int64
|
||||
}
|
||||
|
||||
func (r *importReader) obj(name string) {
|
||||
@@ -448,6 +453,19 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) {
|
||||
}
|
||||
|
||||
func (r *importReader) pos() token.Pos {
|
||||
if r.p.version >= 1 {
|
||||
r.posv1()
|
||||
} else {
|
||||
r.posv0()
|
||||
}
|
||||
|
||||
if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
|
||||
return token.NoPos
|
||||
}
|
||||
return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
|
||||
}
|
||||
|
||||
func (r *importReader) posv0() {
|
||||
delta := r.int64()
|
||||
if delta != deltaNewFile {
|
||||
r.prevLine += delta
|
||||
@@ -457,12 +475,18 @@ func (r *importReader) pos() token.Pos {
|
||||
r.prevFile = r.string()
|
||||
r.prevLine = l
|
||||
}
|
||||
}
|
||||
|
||||
if r.prevFile == "" && r.prevLine == 0 {
|
||||
return token.NoPos
|
||||
func (r *importReader) posv1() {
|
||||
delta := r.int64()
|
||||
r.prevColumn += delta >> 1
|
||||
if delta&1 != 0 {
|
||||
delta = r.int64()
|
||||
r.prevLine += delta >> 1
|
||||
if delta&1 != 0 {
|
||||
r.prevFile = r.string()
|
||||
}
|
||||
}
|
||||
|
||||
return r.p.fake.pos(r.prevFile, int(r.prevLine))
|
||||
}
|
||||
|
||||
func (r *importReader) typ() types.Type {
|
||||
|
||||
116
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
116
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
@@ -11,11 +11,10 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
)
|
||||
|
||||
var debug = false
|
||||
@@ -78,83 +77,42 @@ func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExp
|
||||
}
|
||||
|
||||
func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
|
||||
args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"}
|
||||
args = append(args, buildFlags...)
|
||||
args = append(args, "--", "unsafe")
|
||||
stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
inv := gocommand.Invocation{
|
||||
Verb: "list",
|
||||
Args: []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"},
|
||||
Env: env,
|
||||
BuildFlags: buildFlags,
|
||||
WorkingDir: dir,
|
||||
}
|
||||
fields := strings.Fields(stdout.String())
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("could not determine GOARCH and Go compiler")
|
||||
stdout, stderr, friendlyErr, rawErr := inv.RunRaw(ctx)
|
||||
var goarch, compiler string
|
||||
if rawErr != nil {
|
||||
if strings.Contains(rawErr.Error(), "cannot find main module") {
|
||||
// User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc.
|
||||
// TODO(matloob): Is this a problem in practice?
|
||||
inv := gocommand.Invocation{
|
||||
Verb: "env",
|
||||
Args: []string{"GOARCH"},
|
||||
Env: env,
|
||||
WorkingDir: dir,
|
||||
}
|
||||
envout, enverr := inv.Run(ctx)
|
||||
if enverr != nil {
|
||||
return nil, enverr
|
||||
}
|
||||
goarch = strings.TrimSpace(envout.String())
|
||||
compiler = "gc"
|
||||
} else {
|
||||
return nil, friendlyErr
|
||||
}
|
||||
} else {
|
||||
fields := strings.Fields(stdout.String())
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>",
|
||||
stdout.String(), stderr.String())
|
||||
}
|
||||
goarch = fields[0]
|
||||
compiler = fields[1]
|
||||
}
|
||||
goarch := fields[0]
|
||||
compiler := fields[1]
|
||||
return types.SizesFor(compiler, goarch), nil
|
||||
}
|
||||
|
||||
// InvokeGo returns the stdout of a go command invocation.
|
||||
func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, error) {
|
||||
if debug {
|
||||
defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now())
|
||||
}
|
||||
stdout := new(bytes.Buffer)
|
||||
stderr := new(bytes.Buffer)
|
||||
cmd := exec.CommandContext(ctx, "go", args...)
|
||||
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||
// expects the working directory to keep the original path, including the
|
||||
// go command when dealing with modules.
|
||||
// The Go stdlib has a special feature where if the cwd and the PWD are the
|
||||
// same node then it trusts the PWD, so by setting it in the env for the child
|
||||
// process we fix up all the paths returned by the go command.
|
||||
cmd.Env = append(append([]string{}, env...), "PWD="+dir)
|
||||
cmd.Dir = dir
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
exitErr, ok := err.(*exec.ExitError)
|
||||
if !ok {
|
||||
// Catastrophic error:
|
||||
// - executable not found
|
||||
// - context cancellation
|
||||
return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
|
||||
}
|
||||
|
||||
// Export mode entails a build.
|
||||
// If that build fails, errors appear on stderr
|
||||
// (despite the -e flag) and the Export field is blank.
|
||||
// Do not fail in that case.
|
||||
if !usesExportData {
|
||||
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
|
||||
}
|
||||
}
|
||||
|
||||
// As of writing, go list -export prints some non-fatal compilation
|
||||
// errors to stderr, even with -e set. We would prefer that it put
|
||||
// them in the Package.Error JSON (see https://golang.org/issue/26319).
|
||||
// In the meantime, there's nowhere good to put them, but they can
|
||||
// be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
|
||||
// is set.
|
||||
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr)
|
||||
}
|
||||
|
||||
// debugging
|
||||
if false {
|
||||
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout)
|
||||
}
|
||||
|
||||
return stdout, nil
|
||||
}
|
||||
|
||||
func cmdDebugStr(envlist []string, args ...string) string {
|
||||
env := make(map[string]string)
|
||||
for _, kv := range envlist {
|
||||
split := strings.Split(kv, "=")
|
||||
k, v := split[0], split[1]
|
||||
env[k] = v
|
||||
}
|
||||
|
||||
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args)
|
||||
}
|
||||
|
||||
3
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
3
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
@@ -60,8 +60,7 @@ causes Load to run in LoadFiles mode, collecting minimal information.
|
||||
See the documentation for type Config for details.
|
||||
|
||||
As noted earlier, the Config.Mode controls the amount of detail
|
||||
reported about the loaded packages, with each mode returning all the data of the
|
||||
previous mode with some extra added. See the documentation for type LoadMode
|
||||
reported about the loaded packages. See the documentation for type LoadMode
|
||||
for details.
|
||||
|
||||
Most tools should pass their command-line arguments (after any flags)
|
||||
|
||||
38
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
38
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
@@ -12,18 +12,34 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Driver
|
||||
// The Driver Protocol
|
||||
//
|
||||
// The driver, given the inputs to a call to Load, returns metadata about the packages specified.
|
||||
// This allows for different build systems to support go/packages by telling go/packages how the
|
||||
// packages' source is organized.
|
||||
// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in
|
||||
// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package
|
||||
// documentation in doc.go for the full description of the patterns that need to be supported.
|
||||
// A driver receives as a JSON-serialized driverRequest struct in standard input and will
|
||||
// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output.
|
||||
|
||||
// driverRequest is used to provide the portion of Load's Config that is needed by a driver.
|
||||
type driverRequest struct {
|
||||
Command string `json:"command"`
|
||||
Mode LoadMode `json:"mode"`
|
||||
Env []string `json:"env"`
|
||||
BuildFlags []string `json:"build_flags"`
|
||||
Tests bool `json:"tests"`
|
||||
Overlay map[string][]byte `json:"overlay"`
|
||||
Mode LoadMode `json:"mode"`
|
||||
// Env specifies the environment the underlying build system should be run in.
|
||||
Env []string `json:"env"`
|
||||
// BuildFlags are flags that should be passed to the underlying build system.
|
||||
BuildFlags []string `json:"build_flags"`
|
||||
// Tests specifies whether the patterns should also return test packages.
|
||||
Tests bool `json:"tests"`
|
||||
// Overlay maps file paths (relative to the driver's working directory) to the byte contents
|
||||
// of overlay files.
|
||||
Overlay map[string][]byte `json:"overlay"`
|
||||
}
|
||||
|
||||
// findExternalDriver returns the file path of a tool that supplies
|
||||
@@ -61,15 +77,21 @@ func findExternalDriver(cfg *Config) driver {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
stderr := new(bytes.Buffer)
|
||||
cmd := exec.CommandContext(cfg.Context, tool, words...)
|
||||
cmd.Dir = cfg.Dir
|
||||
cmd.Env = cfg.Env
|
||||
cmd.Stdin = bytes.NewReader(req)
|
||||
cmd.Stdout = buf
|
||||
cmd.Stderr = new(bytes.Buffer)
|
||||
cmd.Stderr = stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
|
||||
}
|
||||
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr)
|
||||
}
|
||||
|
||||
var response driverResponse
|
||||
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
|
||||
return nil, err
|
||||
|
||||
754
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
754
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
@@ -6,24 +6,25 @@ package packages
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/tools/go/internal/packagesdriver"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
"golang.org/x/tools/internal/semver"
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
"golang.org/x/tools/internal/packagesinternal"
|
||||
)
|
||||
|
||||
// debug controls verbose logging.
|
||||
@@ -42,16 +43,21 @@ type responseDeduper struct {
|
||||
dr *driverResponse
|
||||
}
|
||||
|
||||
// init fills in r with a driverResponse.
|
||||
func (r *responseDeduper) init(dr *driverResponse) {
|
||||
r.dr = dr
|
||||
r.seenRoots = map[string]bool{}
|
||||
r.seenPackages = map[string]*Package{}
|
||||
func newDeduper() *responseDeduper {
|
||||
return &responseDeduper{
|
||||
dr: &driverResponse{},
|
||||
seenRoots: map[string]bool{},
|
||||
seenPackages: map[string]*Package{},
|
||||
}
|
||||
}
|
||||
|
||||
// addAll fills in r with a driverResponse.
|
||||
func (r *responseDeduper) addAll(dr *driverResponse) {
|
||||
for _, pkg := range dr.Packages {
|
||||
r.seenPackages[pkg.ID] = pkg
|
||||
r.addPackage(pkg)
|
||||
}
|
||||
for _, root := range dr.Roots {
|
||||
r.seenRoots[root] = true
|
||||
r.addRoot(root)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,24 +77,86 @@ func (r *responseDeduper) addRoot(id string) {
|
||||
r.dr.Roots = append(r.dr.Roots, id)
|
||||
}
|
||||
|
||||
type golistState struct {
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
|
||||
envOnce sync.Once
|
||||
goEnvError error
|
||||
goEnv map[string]string
|
||||
|
||||
rootsOnce sync.Once
|
||||
rootDirsError error
|
||||
rootDirs map[string]string
|
||||
|
||||
// vendorDirs caches the (non)existence of vendor directories.
|
||||
vendorDirs map[string]bool
|
||||
}
|
||||
|
||||
// getEnv returns Go environment variables. Only specific variables are
|
||||
// populated -- computing all of them is slow.
|
||||
func (state *golistState) getEnv() (map[string]string, error) {
|
||||
state.envOnce.Do(func() {
|
||||
var b *bytes.Buffer
|
||||
b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH")
|
||||
if state.goEnvError != nil {
|
||||
return
|
||||
}
|
||||
|
||||
state.goEnv = make(map[string]string)
|
||||
decoder := json.NewDecoder(b)
|
||||
if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil {
|
||||
return
|
||||
}
|
||||
})
|
||||
return state.goEnv, state.goEnvError
|
||||
}
|
||||
|
||||
// mustGetEnv is a convenience function that can be used if getEnv has already succeeded.
|
||||
func (state *golistState) mustGetEnv() map[string]string {
|
||||
env, err := state.getEnv()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("mustGetEnv: %v", err))
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
// goListDriver uses the go list command to interpret the patterns and produce
|
||||
// the build system package structure.
|
||||
// See driver for more details.
|
||||
func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
||||
var sizes types.Sizes
|
||||
// Make sure that any asynchronous go commands are killed when we return.
|
||||
parentCtx := cfg.Context
|
||||
if parentCtx == nil {
|
||||
parentCtx = context.Background()
|
||||
}
|
||||
ctx, cancel := context.WithCancel(parentCtx)
|
||||
defer cancel()
|
||||
|
||||
response := newDeduper()
|
||||
|
||||
// Fill in response.Sizes asynchronously if necessary.
|
||||
var sizeserr error
|
||||
var sizeswg sync.WaitGroup
|
||||
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
|
||||
sizeswg.Add(1)
|
||||
go func() {
|
||||
sizes, sizeserr = getSizes(cfg)
|
||||
var sizes types.Sizes
|
||||
sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg))
|
||||
// types.SizesFor always returns nil or a *types.StdSizes.
|
||||
response.dr.Sizes, _ = sizes.(*types.StdSizes)
|
||||
sizeswg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
state := &golistState{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
vendorDirs: map[string]bool{},
|
||||
}
|
||||
|
||||
// Determine files requested in contains patterns
|
||||
var containFiles []string
|
||||
var packagesNamed []string
|
||||
restPatterns := make([]string, 0, len(patterns))
|
||||
// Extract file= and other [querytype]= patterns. Report an error if querytype
|
||||
// doesn't exist.
|
||||
@@ -104,8 +172,6 @@ extractQueries:
|
||||
containFiles = append(containFiles, value)
|
||||
case "pattern":
|
||||
restPatterns = append(restPatterns, value)
|
||||
case "iamashamedtousethedisabledqueryname":
|
||||
packagesNamed = append(packagesNamed, value)
|
||||
case "": // not a reserved query
|
||||
restPatterns = append(restPatterns, pattern)
|
||||
default:
|
||||
@@ -121,58 +187,52 @@ extractQueries:
|
||||
}
|
||||
}
|
||||
|
||||
response := &responseDeduper{}
|
||||
var err error
|
||||
|
||||
// See if we have any patterns to pass through to go list. Zero initial
|
||||
// patterns also requires a go list call, since it's the equivalent of
|
||||
// ".".
|
||||
if len(restPatterns) > 0 || len(patterns) == 0 {
|
||||
dr, err := golistDriver(cfg, restPatterns...)
|
||||
dr, err := state.createDriverResponse(restPatterns...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.init(dr)
|
||||
} else {
|
||||
response.init(&driverResponse{})
|
||||
response.addAll(dr)
|
||||
}
|
||||
|
||||
sizeswg.Wait()
|
||||
if sizeserr != nil {
|
||||
return nil, sizeserr
|
||||
}
|
||||
// types.SizesFor always returns nil or a *types.StdSizes
|
||||
response.dr.Sizes, _ = sizes.(*types.StdSizes)
|
||||
|
||||
var containsCandidates []string
|
||||
|
||||
if len(containFiles) != 0 {
|
||||
if err := runContainsQueries(cfg, golistDriver, response, containFiles); err != nil {
|
||||
if err := state.runContainsQueries(response, containFiles); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(packagesNamed) != 0 {
|
||||
if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response.dr)
|
||||
modifiedPkgs, needPkgs, err := state.processGolistOverlay(response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var containsCandidates []string
|
||||
if len(containFiles) > 0 {
|
||||
containsCandidates = append(containsCandidates, modifiedPkgs...)
|
||||
containsCandidates = append(containsCandidates, needPkgs...)
|
||||
}
|
||||
if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs); err != nil {
|
||||
if err := state.addNeededOverlayPackages(response, needPkgs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Check candidate packages for containFiles.
|
||||
if len(containFiles) > 0 {
|
||||
for _, id := range containsCandidates {
|
||||
pkg := response.seenPackages[id]
|
||||
pkg, ok := response.seenPackages[id]
|
||||
if !ok {
|
||||
response.addPackage(&Package{
|
||||
ID: id,
|
||||
Errors: []Error{
|
||||
{
|
||||
Kind: ListError,
|
||||
Msg: fmt.Sprintf("package %s expected but not seen", id),
|
||||
},
|
||||
},
|
||||
})
|
||||
continue
|
||||
}
|
||||
for _, f := range containFiles {
|
||||
for _, g := range pkg.GoFiles {
|
||||
if sameFile(f, g) {
|
||||
@@ -183,29 +243,32 @@ extractQueries:
|
||||
}
|
||||
}
|
||||
|
||||
sizeswg.Wait()
|
||||
if sizeserr != nil {
|
||||
return nil, sizeserr
|
||||
}
|
||||
return response.dr, nil
|
||||
}
|
||||
|
||||
func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error {
|
||||
func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error {
|
||||
if len(pkgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
dr, err := driver(cfg, pkgs...)
|
||||
dr, err := state.createDriverResponse(pkgs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, pkg := range dr.Packages {
|
||||
response.addPackage(pkg)
|
||||
}
|
||||
_, needPkgs, err := processGolistOverlay(cfg, response.dr)
|
||||
_, needPkgs, err := state.processGolistOverlay(response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
addNeededOverlayPackages(cfg, driver, response, needPkgs)
|
||||
return nil
|
||||
return state.addNeededOverlayPackages(response, needPkgs)
|
||||
}
|
||||
|
||||
func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
|
||||
func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error {
|
||||
for _, query := range queries {
|
||||
// TODO(matloob): Do only one query per directory.
|
||||
fdir := filepath.Dir(query)
|
||||
@@ -215,9 +278,18 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
|
||||
}
|
||||
dirResponse, err := driver(cfg, pattern)
|
||||
if err != nil {
|
||||
return err
|
||||
dirResponse, err := state.createDriverResponse(pattern)
|
||||
|
||||
// If there was an error loading the package, or the package is returned
|
||||
// with errors, try to load the file as an ad-hoc package.
|
||||
// Usually the error will appear in a returned package, but may not if we're
|
||||
// in module mode and the ad-hoc is located outside a module.
|
||||
if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 &&
|
||||
len(dirResponse.Packages[0].Errors) == 1 {
|
||||
var queryErr error
|
||||
if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil {
|
||||
return err // return the original error
|
||||
}
|
||||
}
|
||||
isRoot := make(map[string]bool, len(dirResponse.Roots))
|
||||
for _, root := range dirResponse.Roots {
|
||||
@@ -244,262 +316,47 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q
|
||||
return nil
|
||||
}
|
||||
|
||||
// modCacheRegexp splits a path in a module cache into module, module version, and package.
|
||||
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
|
||||
|
||||
func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
|
||||
// calling `go env` isn't free; bail out if there's nothing to do.
|
||||
if len(queries) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Determine which directories are relevant to scan.
|
||||
roots, modRoot, err := roots(cfg)
|
||||
// adhocPackage attempts to load or construct an ad-hoc package for a given
|
||||
// query, if the original call to the driver produced inadequate results.
|
||||
func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) {
|
||||
response, err := state.createDriverResponse(query)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Scan the selected directories. Simple matches, from GOPATH/GOROOT
|
||||
// or the local module, can simply be "go list"ed. Matches from the
|
||||
// module cache need special treatment.
|
||||
var matchesMu sync.Mutex
|
||||
var simpleMatches, modCacheMatches []string
|
||||
add := func(root gopathwalk.Root, dir string) {
|
||||
// Walk calls this concurrently; protect the result slices.
|
||||
matchesMu.Lock()
|
||||
defer matchesMu.Unlock()
|
||||
|
||||
path := dir
|
||||
if dir != root.Path {
|
||||
path = dir[len(root.Path)+1:]
|
||||
}
|
||||
if pathMatchesQueries(path, queries) {
|
||||
switch root.Type {
|
||||
case gopathwalk.RootModuleCache:
|
||||
modCacheMatches = append(modCacheMatches, path)
|
||||
case gopathwalk.RootCurrentModule:
|
||||
// We'd need to read go.mod to find the full
|
||||
// import path. Relative's easier.
|
||||
rel, err := filepath.Rel(cfg.Dir, dir)
|
||||
if err != nil {
|
||||
// This ought to be impossible, since
|
||||
// we found dir in the current module.
|
||||
panic(err)
|
||||
}
|
||||
simpleMatches = append(simpleMatches, "./"+rel)
|
||||
case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT:
|
||||
simpleMatches = append(simpleMatches, path)
|
||||
}
|
||||
}
|
||||
// If we get nothing back from `go list`,
|
||||
// try to make this file into its own ad-hoc package.
|
||||
// TODO(rstambler): Should this check against the original response?
|
||||
if len(response.Packages) == 0 {
|
||||
response.Packages = append(response.Packages, &Package{
|
||||
ID: "command-line-arguments",
|
||||
PkgPath: query,
|
||||
GoFiles: []string{query},
|
||||
CompiledGoFiles: []string{query},
|
||||
Imports: make(map[string]*Package),
|
||||
})
|
||||
response.Roots = append(response.Roots, "command-line-arguments")
|
||||
}
|
||||
|
||||
startWalk := time.Now()
|
||||
gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug})
|
||||
if debug {
|
||||
log.Printf("%v for walk", time.Since(startWalk))
|
||||
}
|
||||
|
||||
// Weird special case: the top-level package in a module will be in
|
||||
// whatever directory the user checked the repository out into. It's
|
||||
// more reasonable for that to not match the package name. So, if there
|
||||
// are any Go files in the mod root, query it just to be safe.
|
||||
if modRoot != "" {
|
||||
rel, err := filepath.Rel(cfg.Dir, modRoot)
|
||||
if err != nil {
|
||||
panic(err) // See above.
|
||||
}
|
||||
|
||||
files, err := ioutil.ReadDir(modRoot)
|
||||
for _, f := range files {
|
||||
if strings.HasSuffix(f.Name(), ".go") {
|
||||
simpleMatches = append(simpleMatches, rel)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
addResponse := func(r *driverResponse) {
|
||||
for _, pkg := range r.Packages {
|
||||
response.addPackage(pkg)
|
||||
for _, name := range queries {
|
||||
if pkg.Name == name {
|
||||
response.addRoot(pkg.ID)
|
||||
break
|
||||
// Handle special cases.
|
||||
if len(response.Packages) == 1 {
|
||||
// golang/go#33482: If this is a file= query for ad-hoc packages where
|
||||
// the file only exists on an overlay, and exists outside of a module,
|
||||
// add the file to the package and remove the errors.
|
||||
if response.Packages[0].ID == "command-line-arguments" ||
|
||||
filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) {
|
||||
if len(response.Packages[0].GoFiles) == 0 {
|
||||
filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
|
||||
// TODO(matloob): check if the file is outside of a root dir?
|
||||
for path := range state.cfg.Overlay {
|
||||
if path == filename {
|
||||
response.Packages[0].Errors = nil
|
||||
response.Packages[0].GoFiles = []string{path}
|
||||
response.Packages[0].CompiledGoFiles = []string{path}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(simpleMatches) != 0 {
|
||||
resp, err := driver(cfg, simpleMatches...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
addResponse(resp)
|
||||
}
|
||||
|
||||
// Module cache matches are tricky. We want to avoid downloading new
|
||||
// versions of things, so we need to use the ones present in the cache.
|
||||
// go list doesn't accept version specifiers, so we have to write out a
|
||||
// temporary module, and do the list in that module.
|
||||
if len(modCacheMatches) != 0 {
|
||||
// Collect all the matches, deduplicating by major version
|
||||
// and preferring the newest.
|
||||
type modInfo struct {
|
||||
mod string
|
||||
major string
|
||||
}
|
||||
mods := make(map[modInfo]string)
|
||||
var imports []string
|
||||
for _, modPath := range modCacheMatches {
|
||||
matches := modCacheRegexp.FindStringSubmatch(modPath)
|
||||
mod, ver := filepath.ToSlash(matches[1]), matches[2]
|
||||
importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3]))
|
||||
|
||||
major := semver.Major(ver)
|
||||
if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 {
|
||||
mods[modInfo{mod, major}] = ver
|
||||
}
|
||||
|
||||
imports = append(imports, importPath)
|
||||
}
|
||||
|
||||
// Build the temporary module.
|
||||
var gomod bytes.Buffer
|
||||
gomod.WriteString("module modquery\nrequire (\n")
|
||||
for mod, version := range mods {
|
||||
gomod.WriteString("\t" + mod.mod + " " + version + "\n")
|
||||
}
|
||||
gomod.WriteString(")\n")
|
||||
|
||||
tmpCfg := *cfg
|
||||
|
||||
// We're only trying to look at stuff in the module cache, so
|
||||
// disable the network. This should speed things up, and has
|
||||
// prevented errors in at least one case, #28518.
|
||||
tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...))
|
||||
|
||||
var err error
|
||||
tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpCfg.Dir)
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil {
|
||||
return fmt.Errorf("writing go.mod for module cache query: %v", err)
|
||||
}
|
||||
|
||||
// Run the query, using the import paths calculated from the matches above.
|
||||
resp, err := driver(&tmpCfg, imports...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying module cache matches: %v", err)
|
||||
}
|
||||
addResponse(resp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getSizes(cfg *Config) (types.Sizes, error) {
|
||||
return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg))
|
||||
}
|
||||
|
||||
// roots selects the appropriate paths to walk based on the passed-in configuration,
|
||||
// particularly the environment and the presence of a go.mod in cfg.Dir's parents.
|
||||
func roots(cfg *Config) ([]gopathwalk.Root, string, error) {
|
||||
stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD")
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
fields := strings.Split(stdout.String(), "\n")
|
||||
if len(fields) != 4 || len(fields[3]) != 0 {
|
||||
return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String())
|
||||
}
|
||||
goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2]
|
||||
var modDir string
|
||||
if gomod != "" {
|
||||
modDir = filepath.Dir(gomod)
|
||||
}
|
||||
|
||||
var roots []gopathwalk.Root
|
||||
// Always add GOROOT.
|
||||
roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT})
|
||||
// If modules are enabled, scan the module dir.
|
||||
if modDir != "" {
|
||||
roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule})
|
||||
}
|
||||
// Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode.
|
||||
for _, p := range gopath {
|
||||
if modDir != "" {
|
||||
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
|
||||
} else {
|
||||
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH})
|
||||
}
|
||||
}
|
||||
|
||||
return roots, modDir, nil
|
||||
}
|
||||
|
||||
// These functions were copied from goimports. See further documentation there.
|
||||
|
||||
// pathMatchesQueries is adapted from pkgIsCandidate.
|
||||
// TODO: is it reasonable to do Contains here, rather than an exact match on a path component?
|
||||
func pathMatchesQueries(path string, queries []string) bool {
|
||||
lastTwo := lastTwoComponents(path)
|
||||
for _, query := range queries {
|
||||
if strings.Contains(lastTwo, query) {
|
||||
return true
|
||||
}
|
||||
if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) {
|
||||
lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
|
||||
if strings.Contains(lastTwo, query) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// lastTwoComponents returns at most the last two path components
|
||||
// of v, using either / or \ as the path separator.
|
||||
func lastTwoComponents(v string) string {
|
||||
nslash := 0
|
||||
for i := len(v) - 1; i >= 0; i-- {
|
||||
if v[i] == '/' || v[i] == '\\' {
|
||||
nslash++
|
||||
if nslash == 2 {
|
||||
return v[i:]
|
||||
}
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func hasHyphenOrUpperASCII(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
b := s[i]
|
||||
if b == '-' || ('A' <= b && b <= 'Z') {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func lowerASCIIAndRemoveHyphen(s string) (ret string) {
|
||||
buf := make([]byte, 0, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case b == '-':
|
||||
continue
|
||||
case 'A' <= b && b <= 'Z':
|
||||
buf = append(buf, b+('a'-'A'))
|
||||
default:
|
||||
buf = append(buf, b)
|
||||
}
|
||||
}
|
||||
return string(buf)
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// Fields must match go list;
|
||||
@@ -524,6 +381,7 @@ type jsonPackage struct {
|
||||
Imports []string
|
||||
ImportMap map[string]string
|
||||
Deps []string
|
||||
Module *packagesinternal.Module
|
||||
TestGoFiles []string
|
||||
TestImports []string
|
||||
XTestGoFiles []string
|
||||
@@ -544,16 +402,15 @@ func otherFiles(p *jsonPackage) [][]string {
|
||||
return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
|
||||
}
|
||||
|
||||
// golistDriver uses the "go list" command to expand the pattern
|
||||
// words and return metadata for the specified packages. dir may be
|
||||
// "" and env may be nil, as per os/exec.Command.
|
||||
func golistDriver(cfg *Config, words ...string) (*driverResponse, error) {
|
||||
// createDriverResponse uses the "go list" command to expand the pattern
|
||||
// words and return a response for the specified packages.
|
||||
func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) {
|
||||
// go list uses the following identifiers in ImportPath and Imports:
|
||||
//
|
||||
// "p" -- importable package or main (command)
|
||||
// "q.test" -- q's test executable
|
||||
// "q.test" -- q's test executable
|
||||
// "p [q.test]" -- variant of p as built for q's test executable
|
||||
// "q_test [q.test]" -- q's external test package
|
||||
// "q_test [q.test]" -- q's external test package
|
||||
//
|
||||
// The packages p that are built differently for a test q.test
|
||||
// are q itself, plus any helpers used by the external test q_test,
|
||||
@@ -561,11 +418,13 @@ func golistDriver(cfg *Config, words ...string) (*driverResponse, error) {
|
||||
|
||||
// Run "go list" for complete
|
||||
// information on the specified packages.
|
||||
buf, err := invokeGo(cfg, golistargs(cfg, words)...)
|
||||
buf, err := state.invokeGo("list", golistargs(state.cfg, words)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
seen := make(map[string]*jsonPackage)
|
||||
pkgs := make(map[string]*Package)
|
||||
additionalErrors := make(map[string][]Error)
|
||||
// Decode the JSON and convert it to Package form.
|
||||
var response driverResponse
|
||||
for dec := json.NewDecoder(buf); dec.More(); {
|
||||
@@ -588,12 +447,80 @@ func golistDriver(cfg *Config, words ...string) (*driverResponse, error) {
|
||||
return nil, fmt.Errorf("package missing import path: %+v", p)
|
||||
}
|
||||
|
||||
if old, found := seen[p.ImportPath]; found {
|
||||
if !reflect.DeepEqual(p, old) {
|
||||
return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
|
||||
// Work around https://golang.org/issue/33157:
|
||||
// go list -e, when given an absolute path, will find the package contained at
|
||||
// that directory. But when no package exists there, it will return a fake package
|
||||
// with an error and the ImportPath set to the absolute path provided to go list.
|
||||
// Try to convert that absolute path to what its package path would be if it's
|
||||
// contained in a known module or GOPATH entry. This will allow the package to be
|
||||
// properly "reclaimed" when overlays are processed.
|
||||
if filepath.IsAbs(p.ImportPath) && p.Error != nil {
|
||||
pkgPath, ok, err := state.getPkgPath(p.ImportPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// skip the duplicate
|
||||
continue
|
||||
if ok {
|
||||
p.ImportPath = pkgPath
|
||||
}
|
||||
}
|
||||
|
||||
if old, found := seen[p.ImportPath]; found {
|
||||
// If one version of the package has an error, and the other doesn't, assume
|
||||
// that this is a case where go list is reporting a fake dependency variant
|
||||
// of the imported package: When a package tries to invalidly import another
|
||||
// package, go list emits a variant of the imported package (with the same
|
||||
// import path, but with an error on it, and the package will have a
|
||||
// DepError set on it). An example of when this can happen is for imports of
|
||||
// main packages: main packages can not be imported, but they may be
|
||||
// separately matched and listed by another pattern.
|
||||
// See golang.org/issue/36188 for more details.
|
||||
|
||||
// The plan is that eventually, hopefully in Go 1.15, the error will be
|
||||
// reported on the importing package rather than the duplicate "fake"
|
||||
// version of the imported package. Once all supported versions of Go
|
||||
// have the new behavior this logic can be deleted.
|
||||
// TODO(matloob): delete the workaround logic once all supported versions of
|
||||
// Go return the errors on the proper package.
|
||||
|
||||
// There should be exactly one version of a package that doesn't have an
|
||||
// error.
|
||||
if old.Error == nil && p.Error == nil {
|
||||
if !reflect.DeepEqual(p, old) {
|
||||
return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine if this package's error needs to be bubbled up.
|
||||
// This is a hack, and we expect for go list to eventually set the error
|
||||
// on the package.
|
||||
if old.Error != nil {
|
||||
var errkind string
|
||||
if strings.Contains(old.Error.Err, "not an importable package") {
|
||||
errkind = "not an importable package"
|
||||
} else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") {
|
||||
errkind = "use of internal package not allowed"
|
||||
}
|
||||
if errkind != "" {
|
||||
if len(old.Error.ImportStack) < 2 {
|
||||
return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack with fewer than two elements`, errkind)
|
||||
}
|
||||
importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-2]
|
||||
additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{
|
||||
Pos: old.Error.Pos,
|
||||
Msg: old.Error.Err,
|
||||
Kind: ListError,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that if there's a version of the package without an error,
|
||||
// that's the one reported to the user.
|
||||
if old.Error == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// This package will replace the old one at the end of the loop.
|
||||
}
|
||||
seen[p.ImportPath] = p
|
||||
|
||||
@@ -603,6 +530,8 @@ func golistDriver(cfg *Config, words ...string) (*driverResponse, error) {
|
||||
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
|
||||
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
|
||||
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
|
||||
forTest: p.ForTest,
|
||||
module: p.Module,
|
||||
}
|
||||
|
||||
// Work around https://golang.org/issue/28749:
|
||||
@@ -679,18 +608,70 @@ func golistDriver(cfg *Config, words ...string) (*driverResponse, error) {
|
||||
}
|
||||
|
||||
if p.Error != nil {
|
||||
msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363.
|
||||
// Address golang.org/issue/35964 by appending import stack to error message.
|
||||
if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 {
|
||||
msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack)
|
||||
}
|
||||
pkg.Errors = append(pkg.Errors, Error{
|
||||
Pos: p.Error.Pos,
|
||||
Msg: p.Error.Err,
|
||||
Pos: p.Error.Pos,
|
||||
Msg: msg,
|
||||
Kind: ListError,
|
||||
})
|
||||
}
|
||||
|
||||
response.Packages = append(response.Packages, pkg)
|
||||
pkgs[pkg.ID] = pkg
|
||||
}
|
||||
|
||||
for id, errs := range additionalErrors {
|
||||
if p, ok := pkgs[id]; ok {
|
||||
p.Errors = append(p.Errors, errs...)
|
||||
}
|
||||
}
|
||||
for _, pkg := range pkgs {
|
||||
response.Packages = append(response.Packages, pkg)
|
||||
}
|
||||
sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID })
|
||||
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
// getPkgPath finds the package path of a directory if it's relative to a root directory.
|
||||
func (state *golistState) getPkgPath(dir string) (string, bool, error) {
|
||||
absDir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
roots, err := state.determineRootDirs()
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
for rdir, rpath := range roots {
|
||||
// Make sure that the directory is in the module,
|
||||
// to avoid creating a path relative to another module.
|
||||
if !strings.HasPrefix(absDir, rdir) {
|
||||
continue
|
||||
}
|
||||
// TODO(matloob): This doesn't properly handle symlinks.
|
||||
r, err := filepath.Rel(rdir, dir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if rpath != "" {
|
||||
// We choose only one root even though the directory even it can belong in multiple modules
|
||||
// or GOPATH entries. This is okay because we only need to work with absolute dirs when a
|
||||
// file is missing from disk, for instance when gopls calls go/packages in an overlay.
|
||||
// Once the file is saved, gopls, or the next invocation of the tool will get the correct
|
||||
// result straight from golist.
|
||||
// TODO(matloob): Implement module tiebreaking?
|
||||
return path.Join(rpath, filepath.ToSlash(r)), true, nil
|
||||
}
|
||||
return filepath.ToSlash(r), true, nil
|
||||
}
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
// absJoin absolutizes and flattens the lists of files.
|
||||
func absJoin(dir string, fileses ...[]string) (res []string) {
|
||||
for _, files := range fileses {
|
||||
@@ -707,11 +688,11 @@ func absJoin(dir string, fileses ...[]string) (res []string) {
|
||||
func golistargs(cfg *Config, words []string) []string {
|
||||
const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
|
||||
fullargs := []string{
|
||||
"list", "-e", "-json",
|
||||
fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0),
|
||||
"-e", "-json",
|
||||
fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0),
|
||||
fmt.Sprintf("-test=%t", cfg.Tests),
|
||||
fmt.Sprintf("-export=%t", usesExportData(cfg)),
|
||||
fmt.Sprintf("-deps=%t", cfg.Mode&NeedDeps != 0),
|
||||
fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0),
|
||||
// go list doesn't let you pass -test and -find together,
|
||||
// probably because you'd just get the TestMain.
|
||||
fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0),
|
||||
@@ -723,27 +704,20 @@ func golistargs(cfg *Config, words []string) []string {
|
||||
}
|
||||
|
||||
// invokeGo returns the stdout of a go command invocation.
|
||||
func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
||||
stdout := new(bytes.Buffer)
|
||||
stderr := new(bytes.Buffer)
|
||||
cmd := exec.CommandContext(cfg.Context, "go", args...)
|
||||
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||
// expects the working directory to keep the original path, including the
|
||||
// go command when dealing with modules.
|
||||
// The Go stdlib has a special feature where if the cwd and the PWD are the
|
||||
// same node then it trusts the PWD, so by setting it in the env for the child
|
||||
// process we fix up all the paths returned by the go command.
|
||||
cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir)
|
||||
cmd.Dir = cfg.Dir
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
if debug {
|
||||
defer func(start time.Time) {
|
||||
log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr)
|
||||
}(time.Now())
|
||||
func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) {
|
||||
cfg := state.cfg
|
||||
|
||||
inv := &gocommand.Invocation{
|
||||
Verb: verb,
|
||||
Args: args,
|
||||
BuildFlags: cfg.BuildFlags,
|
||||
Env: cfg.Env,
|
||||
Logf: cfg.Logf,
|
||||
WorkingDir: cfg.Dir,
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
stdout, stderr, _, err := inv.RunRaw(cfg.Context)
|
||||
if err != nil {
|
||||
// Check for 'go' executable not being found.
|
||||
if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
|
||||
return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound)
|
||||
@@ -753,7 +727,7 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
||||
if !ok {
|
||||
// Catastrophic error:
|
||||
// - context cancellation
|
||||
return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
|
||||
return nil, fmt.Errorf("couldn't run 'go': %v", err)
|
||||
}
|
||||
|
||||
// Old go version?
|
||||
@@ -761,6 +735,35 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
||||
return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
|
||||
}
|
||||
|
||||
// Related to #24854
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") {
|
||||
return nil, fmt.Errorf("%s", stderr.String())
|
||||
}
|
||||
|
||||
// Is there an error running the C compiler in cgo? This will be reported in the "Error" field
|
||||
// and should be suppressed by go list -e.
|
||||
//
|
||||
// This condition is not perfect yet because the error message can include other error messages than runtime/cgo.
|
||||
isPkgPathRune := func(r rune) bool {
|
||||
// From https://golang.org/ref/spec#Import_declarations:
|
||||
// Implementation restriction: A compiler may restrict ImportPaths to non-empty strings
|
||||
// using only characters belonging to Unicode's L, M, N, P, and S general categories
|
||||
// (the Graphic characters without spaces) and may also exclude the
|
||||
// characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD.
|
||||
return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) &&
|
||||
!strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r)
|
||||
}
|
||||
if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") {
|
||||
msg := stderr.String()[len("# "):]
|
||||
if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") {
|
||||
return stdout, nil
|
||||
}
|
||||
// Treat pkg-config errors as a special case (golang.org/issue/36770).
|
||||
if strings.HasPrefix(msg, "pkg-config") {
|
||||
return stdout, nil
|
||||
}
|
||||
}
|
||||
|
||||
// This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show
|
||||
// the error in the Err section of stdout in case -e option is provided.
|
||||
// This fix is provided for backwards compatibility.
|
||||
@@ -770,13 +773,70 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Similar to the previous error, but currently lacks a fix in Go.
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") {
|
||||
output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||
strings.Trim(stderr.String(), "\n"))
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath.
|
||||
// If the package doesn't exist, put the absolute path of the directory into the error message,
|
||||
// as Go 1.13 list does.
|
||||
const noSuchDirectory = "no such directory"
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) {
|
||||
errstr := stderr.String()
|
||||
abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):])
|
||||
output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||
abspath, strings.Trim(stderr.String(), "\n"))
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist.
|
||||
// Note that the error message we look for in this case is different that the one looked for above.
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") {
|
||||
output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||
strings.Trim(stderr.String(), "\n"))
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a
|
||||
// directory outside any module.
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") {
|
||||
output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||
// TODO(matloob): command-line-arguments isn't correct here.
|
||||
"command-line-arguments", strings.Trim(stderr.String(), "\n"))
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Another variation of the previous error
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") {
|
||||
output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||
// TODO(matloob): command-line-arguments isn't correct here.
|
||||
"command-line-arguments", strings.Trim(stderr.String(), "\n"))
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit
|
||||
// status if there's a dependency on a package that doesn't exist. But it should return
|
||||
// a zero exit status and set an error on that package.
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") {
|
||||
// Don't clobber stdout if `go list` actually returned something.
|
||||
if len(stdout.String()) > 0 {
|
||||
return stdout, nil
|
||||
}
|
||||
// try to extract package name from string
|
||||
stderrStr := stderr.String()
|
||||
var importPath string
|
||||
colon := strings.Index(stderrStr, ":")
|
||||
if colon > 0 && strings.HasPrefix(stderrStr, "go build ") {
|
||||
importPath = stderrStr[len("go build "):colon]
|
||||
}
|
||||
output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
||||
importPath, strings.Trim(stderrStr, "\n"))
|
||||
return bytes.NewBufferString(output), nil
|
||||
}
|
||||
|
||||
// Export mode entails a build.
|
||||
// If that build fails, errors appear on stderr
|
||||
// (despite the -e flag) and the Export field is blank.
|
||||
@@ -788,22 +848,6 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
||||
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
|
||||
}
|
||||
}
|
||||
|
||||
// As of writing, go list -export prints some non-fatal compilation
|
||||
// errors to stderr, even with -e set. We would prefer that it put
|
||||
// them in the Package.Error JSON (see https://golang.org/issue/26319).
|
||||
// In the meantime, there's nowhere good to put them, but they can
|
||||
// be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
|
||||
// is set.
|
||||
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr)
|
||||
}
|
||||
|
||||
// debugging
|
||||
if false {
|
||||
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cmd, args...), stdout)
|
||||
}
|
||||
|
||||
return stdout, nil
|
||||
}
|
||||
|
||||
|
||||
344
vendor/golang.org/x/tools/go/packages/golist_overlay.go
generated
vendored
344
vendor/golang.org/x/tools/go/packages/golist_overlay.go
generated
vendored
@@ -1,9 +1,13 @@
|
||||
package packages
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@@ -12,74 +16,189 @@ import (
|
||||
// files that don't exist on disk to an overlay. The results can be
|
||||
// sometimes incorrect.
|
||||
// TODO(matloob): Handle unsupported cases, including the following:
|
||||
// - test files
|
||||
// - adding test and non-test files to test variants of packages
|
||||
// - determining the correct package to add given a new import path
|
||||
// - creating packages that don't exist
|
||||
func processGolistOverlay(cfg *Config, response *driverResponse) (modifiedPkgs, needPkgs []string, err error) {
|
||||
func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) {
|
||||
havePkgs := make(map[string]string) // importPath -> non-test package ID
|
||||
needPkgsSet := make(map[string]bool)
|
||||
modifiedPkgsSet := make(map[string]bool)
|
||||
|
||||
for _, pkg := range response.Packages {
|
||||
for _, pkg := range response.dr.Packages {
|
||||
// This is an approximation of import path to id. This can be
|
||||
// wrong for tests, vendored packages, and a number of other cases.
|
||||
havePkgs[pkg.PkgPath] = pkg.ID
|
||||
}
|
||||
|
||||
outer:
|
||||
for path, contents := range cfg.Overlay {
|
||||
base := filepath.Base(path)
|
||||
if strings.HasSuffix(path, "_test.go") {
|
||||
// Overlays don't support adding new test files yet.
|
||||
// TODO(matloob): support adding new test files.
|
||||
// If no new imports are added, it is safe to avoid loading any needPkgs.
|
||||
// Otherwise, it's hard to tell which package is actually being loaded
|
||||
// (due to vendoring) and whether any modified package will show up
|
||||
// in the transitive set of dependencies (because new imports are added,
|
||||
// potentially modifying the transitive set of dependencies).
|
||||
var overlayAddsImports bool
|
||||
|
||||
// If both a package and its test package are created by the overlay, we
|
||||
// need the real package first. Process all non-test files before test
|
||||
// files, and make the whole process deterministic while we're at it.
|
||||
var overlayFiles []string
|
||||
for opath := range state.cfg.Overlay {
|
||||
overlayFiles = append(overlayFiles, opath)
|
||||
}
|
||||
sort.Slice(overlayFiles, func(i, j int) bool {
|
||||
iTest := strings.HasSuffix(overlayFiles[i], "_test.go")
|
||||
jTest := strings.HasSuffix(overlayFiles[j], "_test.go")
|
||||
if iTest != jTest {
|
||||
return !iTest // non-tests are before tests.
|
||||
}
|
||||
return overlayFiles[i] < overlayFiles[j]
|
||||
})
|
||||
for _, opath := range overlayFiles {
|
||||
contents := state.cfg.Overlay[opath]
|
||||
base := filepath.Base(opath)
|
||||
dir := filepath.Dir(opath)
|
||||
var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant
|
||||
var testVariantOf *Package // if opath is a test file, this is the package it is testing
|
||||
var fileExists bool
|
||||
isTestFile := strings.HasSuffix(opath, "_test.go")
|
||||
pkgName, ok := extractPackageName(opath, contents)
|
||||
if !ok {
|
||||
// Don't bother adding a file that doesn't even have a parsable package statement
|
||||
// to the overlay.
|
||||
continue
|
||||
}
|
||||
dir := filepath.Dir(path)
|
||||
for _, pkg := range response.Packages {
|
||||
var dirContains, fileExists bool
|
||||
for _, f := range pkg.GoFiles {
|
||||
if sameFile(filepath.Dir(f), dir) {
|
||||
dirContains = true
|
||||
nextPackage:
|
||||
for _, p := range response.dr.Packages {
|
||||
if pkgName != p.Name && p.ID != "command-line-arguments" {
|
||||
continue
|
||||
}
|
||||
for _, f := range p.GoFiles {
|
||||
if !sameFile(filepath.Dir(f), dir) {
|
||||
continue
|
||||
}
|
||||
// Make sure to capture information on the package's test variant, if needed.
|
||||
if isTestFile && !hasTestFiles(p) {
|
||||
// TODO(matloob): Are there packages other than the 'production' variant
|
||||
// of a package that this can match? This shouldn't match the test main package
|
||||
// because the file is generated in another directory.
|
||||
testVariantOf = p
|
||||
continue nextPackage
|
||||
}
|
||||
// We must have already seen the package of which this is a test variant.
|
||||
if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath {
|
||||
if hasTestFiles(p) {
|
||||
testVariantOf = pkg
|
||||
}
|
||||
}
|
||||
pkg = p
|
||||
if filepath.Base(f) == base {
|
||||
fileExists = true
|
||||
}
|
||||
}
|
||||
// The overlay could have included an entirely new package.
|
||||
isNewPackage := extractPackage(pkg, path, contents)
|
||||
if dirContains || isNewPackage {
|
||||
if !fileExists {
|
||||
pkg.GoFiles = append(pkg.GoFiles, path) // TODO(matloob): should the file just be added to GoFiles?
|
||||
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, path)
|
||||
modifiedPkgsSet[pkg.ID] = true
|
||||
}
|
||||
// The overlay could have included an entirely new package.
|
||||
if pkg == nil {
|
||||
// Try to find the module or gopath dir the file is contained in.
|
||||
// Then for modules, add the module opath to the beginning.
|
||||
pkgPath, ok, err := state.getPkgPath(dir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
isXTest := strings.HasSuffix(pkgName, "_test")
|
||||
if isXTest {
|
||||
pkgPath += "_test"
|
||||
}
|
||||
id := pkgPath
|
||||
if isTestFile && !isXTest {
|
||||
id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath)
|
||||
}
|
||||
// Try to reclaim a package with the same id if it exists in the response.
|
||||
for _, p := range response.dr.Packages {
|
||||
if reclaimPackage(p, id, opath, contents) {
|
||||
pkg = p
|
||||
break
|
||||
}
|
||||
imports, err := extractImports(path, contents)
|
||||
if err != nil {
|
||||
// Let the parser or type checker report errors later.
|
||||
continue outer
|
||||
}
|
||||
for _, imp := range imports {
|
||||
_, found := pkg.Imports[imp]
|
||||
if !found {
|
||||
needPkgsSet[imp] = true
|
||||
// TODO(matloob): Handle cases when the following block isn't correct.
|
||||
// These include imports of test variants, imports of vendored packages, etc.
|
||||
id, ok := havePkgs[imp]
|
||||
if !ok {
|
||||
id = imp
|
||||
}
|
||||
pkg.Imports[imp] = &Package{ID: id}
|
||||
}
|
||||
// Otherwise, create a new package
|
||||
if pkg == nil {
|
||||
pkg = &Package{PkgPath: pkgPath, ID: id, Name: pkgName, Imports: make(map[string]*Package)}
|
||||
response.addPackage(pkg)
|
||||
havePkgs[pkg.PkgPath] = id
|
||||
// Add the production package's sources for a test variant.
|
||||
if isTestFile && !isXTest && testVariantOf != nil {
|
||||
pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...)
|
||||
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...)
|
||||
// Add the package under test and its imports to the test variant.
|
||||
pkg.forTest = testVariantOf.PkgPath
|
||||
for k, v := range testVariantOf.Imports {
|
||||
pkg.Imports[k] = &Package{ID: v.ID}
|
||||
}
|
||||
}
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
if !fileExists {
|
||||
pkg.GoFiles = append(pkg.GoFiles, opath)
|
||||
// TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior
|
||||
// if the file will be ignored due to its build tags.
|
||||
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath)
|
||||
modifiedPkgsSet[pkg.ID] = true
|
||||
}
|
||||
imports, err := extractImports(opath, contents)
|
||||
if err != nil {
|
||||
// Let the parser or type checker report errors later.
|
||||
continue
|
||||
}
|
||||
for _, imp := range imports {
|
||||
if _, found := pkg.Imports[imp]; found {
|
||||
continue
|
||||
}
|
||||
overlayAddsImports = true
|
||||
id, ok := havePkgs[imp]
|
||||
if !ok {
|
||||
var err error
|
||||
id, err = state.resolveImport(dir, imp)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
pkg.Imports[imp] = &Package{ID: id}
|
||||
// Add dependencies to the non-test variant version of this package as well.
|
||||
if testVariantOf != nil {
|
||||
testVariantOf.Imports[imp] = &Package{ID: id}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
needPkgs = make([]string, 0, len(needPkgsSet))
|
||||
for pkg := range needPkgsSet {
|
||||
needPkgs = append(needPkgs, pkg)
|
||||
// toPkgPath guesses the package path given the id.
|
||||
toPkgPath := func(sourceDir, id string) (string, error) {
|
||||
if i := strings.IndexByte(id, ' '); i >= 0 {
|
||||
return state.resolveImport(sourceDir, id[:i])
|
||||
}
|
||||
return state.resolveImport(sourceDir, id)
|
||||
}
|
||||
|
||||
// Now that new packages have been created, do another pass to determine
|
||||
// the new set of missing packages.
|
||||
for _, pkg := range response.dr.Packages {
|
||||
for _, imp := range pkg.Imports {
|
||||
if len(pkg.GoFiles) == 0 {
|
||||
return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath)
|
||||
}
|
||||
pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if _, ok := havePkgs[pkgPath]; !ok {
|
||||
needPkgsSet[pkgPath] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if overlayAddsImports {
|
||||
needPkgs = make([]string, 0, len(needPkgsSet))
|
||||
for pkg := range needPkgsSet {
|
||||
needPkgs = append(needPkgs, pkg)
|
||||
}
|
||||
}
|
||||
modifiedPkgs = make([]string, 0, len(modifiedPkgsSet))
|
||||
for pkg := range modifiedPkgsSet {
|
||||
@@ -88,6 +207,116 @@ outer:
|
||||
return modifiedPkgs, needPkgs, err
|
||||
}
|
||||
|
||||
// resolveImport finds the the ID of a package given its import path.
|
||||
// In particular, it will find the right vendored copy when in GOPATH mode.
|
||||
func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) {
|
||||
env, err := state.getEnv()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if env["GOMOD"] != "" {
|
||||
return importPath, nil
|
||||
}
|
||||
|
||||
searchDir := sourceDir
|
||||
for {
|
||||
vendorDir := filepath.Join(searchDir, "vendor")
|
||||
exists, ok := state.vendorDirs[vendorDir]
|
||||
if !ok {
|
||||
info, err := os.Stat(vendorDir)
|
||||
exists = err == nil && info.IsDir()
|
||||
state.vendorDirs[vendorDir] = exists
|
||||
}
|
||||
|
||||
if exists {
|
||||
vendoredPath := filepath.Join(vendorDir, importPath)
|
||||
if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() {
|
||||
// We should probably check for .go files here, but shame on anyone who fools us.
|
||||
path, ok, err := state.getPkgPath(vendoredPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if ok {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We know we've hit the top of the filesystem when we Dir / and get /,
|
||||
// or C:\ and get C:\, etc.
|
||||
next := filepath.Dir(searchDir)
|
||||
if next == searchDir {
|
||||
break
|
||||
}
|
||||
searchDir = next
|
||||
}
|
||||
return importPath, nil
|
||||
}
|
||||
|
||||
func hasTestFiles(p *Package) bool {
|
||||
for _, f := range p.GoFiles {
|
||||
if strings.HasSuffix(f, "_test.go") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// determineRootDirs returns a mapping from absolute directories that could
|
||||
// contain code to their corresponding import path prefixes.
|
||||
func (state *golistState) determineRootDirs() (map[string]string, error) {
|
||||
env, err := state.getEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if env["GOMOD"] != "" {
|
||||
state.rootsOnce.Do(func() {
|
||||
state.rootDirs, state.rootDirsError = state.determineRootDirsModules()
|
||||
})
|
||||
} else {
|
||||
state.rootsOnce.Do(func() {
|
||||
state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH()
|
||||
})
|
||||
}
|
||||
return state.rootDirs, state.rootDirsError
|
||||
}
|
||||
|
||||
func (state *golistState) determineRootDirsModules() (map[string]string, error) {
|
||||
out, err := state.invokeGo("list", "-m", "-json", "all")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := map[string]string{}
|
||||
type jsonMod struct{ Path, Dir string }
|
||||
for dec := json.NewDecoder(out); dec.More(); {
|
||||
mod := new(jsonMod)
|
||||
if err := dec.Decode(mod); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if mod.Dir != "" && mod.Path != "" {
|
||||
// This is a valid module; add it to the map.
|
||||
absDir, err := filepath.Abs(mod.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[absDir] = mod.Path
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) {
|
||||
m := map[string]string{}
|
||||
for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) {
|
||||
absDir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[filepath.Join(absDir, "src")] = ""
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func extractImports(filename string, contents []byte) ([]string, error) {
|
||||
f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset?
|
||||
if err != nil {
|
||||
@@ -105,13 +334,16 @@ func extractImports(filename string, contents []byte) ([]string, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// extractPackage attempts to extract a package defined in an overlay.
|
||||
// reclaimPackage attempts to reuse a package that failed to load in an overlay.
|
||||
//
|
||||
// If the package has errors and has no Name, GoFiles, or Imports,
|
||||
// then it's possible that it doesn't yet exist on disk.
|
||||
func extractPackage(pkg *Package, filename string, contents []byte) bool {
|
||||
func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool {
|
||||
// TODO(rstambler): Check the message of the actual error?
|
||||
// It differs between $GOPATH and module mode.
|
||||
if pkg.ID != id {
|
||||
return false
|
||||
}
|
||||
if len(pkg.Errors) != 1 {
|
||||
return false
|
||||
}
|
||||
@@ -124,15 +356,21 @@ func extractPackage(pkg *Package, filename string, contents []byte) bool {
|
||||
if len(pkg.Imports) > 0 {
|
||||
return false
|
||||
}
|
||||
f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset?
|
||||
if err != nil {
|
||||
pkgName, ok := extractPackageName(filename, contents)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// TODO(rstambler): This doesn't work for main packages.
|
||||
if filepath.Base(pkg.PkgPath) != f.Name.Name {
|
||||
return false
|
||||
}
|
||||
pkg.Name = f.Name.Name
|
||||
pkg.Name = pkgName
|
||||
pkg.Errors = nil
|
||||
return true
|
||||
}
|
||||
|
||||
func extractPackageName(filename string, contents []byte) (string, bool) {
|
||||
// TODO(rstambler): Check the message of the actual error?
|
||||
// It differs between $GOPATH and module mode.
|
||||
f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset?
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
return f.Name.Name, true
|
||||
}
|
||||
|
||||
57
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
Normal file
57
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package packages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var allModes = []LoadMode{
|
||||
NeedName,
|
||||
NeedFiles,
|
||||
NeedCompiledGoFiles,
|
||||
NeedImports,
|
||||
NeedDeps,
|
||||
NeedExportsFile,
|
||||
NeedTypes,
|
||||
NeedSyntax,
|
||||
NeedTypesInfo,
|
||||
NeedTypesSizes,
|
||||
}
|
||||
|
||||
var modeStrings = []string{
|
||||
"NeedName",
|
||||
"NeedFiles",
|
||||
"NeedCompiledGoFiles",
|
||||
"NeedImports",
|
||||
"NeedDeps",
|
||||
"NeedExportsFile",
|
||||
"NeedTypes",
|
||||
"NeedSyntax",
|
||||
"NeedTypesInfo",
|
||||
"NeedTypesSizes",
|
||||
}
|
||||
|
||||
func (mod LoadMode) String() string {
|
||||
m := mod
|
||||
if m == 0 {
|
||||
return fmt.Sprintf("LoadMode(0)")
|
||||
}
|
||||
var out []string
|
||||
for i, x := range allModes {
|
||||
if x > m {
|
||||
break
|
||||
}
|
||||
if (m & x) != 0 {
|
||||
out = append(out, modeStrings[i])
|
||||
m = m ^ x
|
||||
}
|
||||
}
|
||||
if m != 0 {
|
||||
out = append(out, "Unknown")
|
||||
}
|
||||
return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|"))
|
||||
}
|
||||
268
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
268
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
@@ -23,26 +23,22 @@ import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/go/gcexportdata"
|
||||
"golang.org/x/tools/internal/packagesinternal"
|
||||
)
|
||||
|
||||
// A LoadMode specifies the amount of detail to return when loading.
|
||||
// Higher-numbered modes cause Load to return more information,
|
||||
// but may be slower. Load may return more information than requested.
|
||||
// A LoadMode controls the amount of detail to return when loading.
|
||||
// The bits below can be combined to specify which fields should be
|
||||
// filled in the result packages.
|
||||
// The zero value is a special case, equivalent to combining
|
||||
// the NeedName, NeedFiles, and NeedCompiledGoFiles bits.
|
||||
// ID and Errors (if present) will always be filled.
|
||||
// Load may return more information than requested.
|
||||
type LoadMode int
|
||||
|
||||
// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to
|
||||
// NeedExportFile to make it consistent with the Package field it's adding.
|
||||
|
||||
const (
|
||||
// The following constants are used to specify which fields of the Package
|
||||
// should be filled when loading is done. As a special case to provide
|
||||
// backwards compatibility, a LoadMode of 0 is equivalent to LoadFiles.
|
||||
// For all other LoadModes, the bits below specify which fields will be filled
|
||||
// in the result packages.
|
||||
// WARNING: This part of the go/packages API is EXPERIMENTAL. It might
|
||||
// be changed or removed up until April 15 2019. After that date it will
|
||||
// be frozen.
|
||||
// TODO(matloob): Remove this comment on April 15.
|
||||
|
||||
// ID and Errors (if present) will always be filled.
|
||||
|
||||
// NeedName adds Name and PkgPath.
|
||||
NeedName LoadMode = 1 << iota
|
||||
|
||||
@@ -56,11 +52,10 @@ const (
|
||||
// "placeholder" Packages with only the ID set.
|
||||
NeedImports
|
||||
|
||||
// NeedDeps adds the fields requested by the LoadMode in the packages in Imports. If NeedImports
|
||||
// is not set NeedDeps has no effect.
|
||||
// NeedDeps adds the fields requested by the LoadMode in the packages in Imports.
|
||||
NeedDeps
|
||||
|
||||
// NeedExportsFile adds ExportsFile.
|
||||
// NeedExportsFile adds ExportFile.
|
||||
NeedExportsFile
|
||||
|
||||
// NeedTypes adds Types, Fset, and IllTyped.
|
||||
@@ -77,31 +72,25 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
// LoadFiles finds the packages and computes their source file lists.
|
||||
// Package fields: ID, Name, Errors, GoFiles, CompiledGoFiles, and OtherFiles.
|
||||
// Deprecated: LoadFiles exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
|
||||
|
||||
// LoadImports adds import information for each package
|
||||
// and its dependencies.
|
||||
// Package fields added: Imports.
|
||||
LoadImports = LoadFiles | NeedImports | NeedDeps
|
||||
// Deprecated: LoadImports exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
LoadImports = LoadFiles | NeedImports
|
||||
|
||||
// LoadTypes adds type information for package-level
|
||||
// declarations in the packages matching the patterns.
|
||||
// Package fields added: Types, TypesSizes, Fset, and IllTyped.
|
||||
// This mode uses type information provided by the build system when
|
||||
// possible, and may fill in the ExportFile field.
|
||||
// Deprecated: LoadTypes exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
|
||||
|
||||
// LoadSyntax adds typed syntax trees for the packages matching the patterns.
|
||||
// Package fields added: Syntax, and TypesInfo, for direct pattern matches only.
|
||||
// Deprecated: LoadSyntax exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
|
||||
|
||||
// LoadAllSyntax adds typed syntax trees for the packages matching the patterns
|
||||
// and all dependencies.
|
||||
// Package fields added: Types, Fset, IllTyped, Syntax, and TypesInfo,
|
||||
// for all packages in the import graph.
|
||||
LoadAllSyntax = LoadSyntax
|
||||
// Deprecated: LoadAllSyntax exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
LoadAllSyntax = LoadSyntax | NeedDeps
|
||||
)
|
||||
|
||||
// A Config specifies details about how packages should be loaded.
|
||||
@@ -117,6 +106,12 @@ type Config struct {
|
||||
// If Context is nil, the load cannot be cancelled.
|
||||
Context context.Context
|
||||
|
||||
// Logf is the logger for the config.
|
||||
// If the user provides a logger, debug logging is enabled.
|
||||
// If the GOPACKAGESDEBUG environment variable is set to true,
|
||||
// but the logger is nil, default to log.Printf.
|
||||
Logf func(format string, args ...interface{})
|
||||
|
||||
// Dir is the directory in which to run the build system's query tool
|
||||
// that provides information about the packages.
|
||||
// If Dir is empty, the tool is run in the current directory.
|
||||
@@ -169,7 +164,7 @@ type Config struct {
|
||||
Tests bool
|
||||
|
||||
// Overlay provides a mapping of absolute file paths to file contents.
|
||||
// If the file with the given path already exists, the parser will use the
|
||||
// If the file with the given path already exists, the parser will use the
|
||||
// alternative file contents provided by the map.
|
||||
//
|
||||
// Overlays provide incomplete support for when a given file doesn't
|
||||
@@ -275,9 +270,9 @@ type Package struct {
|
||||
Imports map[string]*Package
|
||||
|
||||
// Types provides type information for the package.
|
||||
// Modes LoadTypes and above set this field for packages matching the
|
||||
// patterns; type information for dependencies may be missing or incomplete.
|
||||
// Mode LoadAllSyntax sets this field for all packages, including dependencies.
|
||||
// The NeedTypes LoadMode bit sets this field for packages matching the
|
||||
// patterns; type information for dependencies may be missing or incomplete,
|
||||
// unless NeedDeps and NeedImports are also set.
|
||||
Types *types.Package
|
||||
|
||||
// Fset provides position information for Types, TypesInfo, and Syntax.
|
||||
@@ -290,8 +285,9 @@ type Package struct {
|
||||
|
||||
// Syntax is the package's syntax trees, for the files listed in CompiledGoFiles.
|
||||
//
|
||||
// Mode LoadSyntax sets this field for packages matching the patterns.
|
||||
// Mode LoadAllSyntax sets this field for all packages, including dependencies.
|
||||
// The NeedSyntax LoadMode bit populates this field for packages matching the patterns.
|
||||
// If NeedDeps and NeedImports are also set, this field will also be populated
|
||||
// for dependencies.
|
||||
Syntax []*ast.File
|
||||
|
||||
// TypesInfo provides type information about the package's syntax trees.
|
||||
@@ -300,6 +296,21 @@ type Package struct {
|
||||
|
||||
// TypesSizes provides the effective size function for types in TypesInfo.
|
||||
TypesSizes types.Sizes
|
||||
|
||||
// forTest is the package under test, if any.
|
||||
forTest string
|
||||
|
||||
// module is the module information for the package if it exists.
|
||||
module *packagesinternal.Module
|
||||
}
|
||||
|
||||
func init() {
|
||||
packagesinternal.GetForTest = func(p interface{}) string {
|
||||
return p.(*Package).forTest
|
||||
}
|
||||
packagesinternal.GetModule = func(p interface{}) *packagesinternal.Module {
|
||||
return p.(*Package).module
|
||||
}
|
||||
}
|
||||
|
||||
// An Error describes a problem with a package's metadata, syntax, or types.
|
||||
@@ -423,11 +434,13 @@ type loader struct {
|
||||
parseCacheMu sync.Mutex
|
||||
exportMu sync.Mutex // enforces mutual exclusion of exportdata operations
|
||||
|
||||
// TODO(matloob): Add an implied mode here and use that instead of mode.
|
||||
// Implied mode would contain all the fields we need the data for so we can
|
||||
// get the actually requested fields. We'll zero them out before returning
|
||||
// packages to the user. This will make it easier for us to get the conditions
|
||||
// where we need certain modes right.
|
||||
// Config.Mode contains the implied mode (see impliedLoadMode).
|
||||
// Implied mode contains all the fields we need the data for.
|
||||
// In requestedMode there are the actually requested fields.
|
||||
// We'll zero them out before returning packages to the user.
|
||||
// This makes it easier for us to get the conditions where
|
||||
// we need certain modes right.
|
||||
requestedMode LoadMode
|
||||
}
|
||||
|
||||
type parseValue struct {
|
||||
@@ -442,9 +455,20 @@ func newLoader(cfg *Config) *loader {
|
||||
}
|
||||
if cfg != nil {
|
||||
ld.Config = *cfg
|
||||
// If the user has provided a logger, use it.
|
||||
ld.Config.Logf = cfg.Logf
|
||||
}
|
||||
if ld.Config.Logf == nil {
|
||||
// If the GOPACKAGESDEBUG environment variable is set to true,
|
||||
// but the user has not provided a logger, default to log.Printf.
|
||||
if debug {
|
||||
ld.Config.Logf = log.Printf
|
||||
} else {
|
||||
ld.Config.Logf = func(format string, args ...interface{}) {}
|
||||
}
|
||||
}
|
||||
if ld.Config.Mode == 0 {
|
||||
ld.Config.Mode = LoadFiles // Preserve zero behavior of Mode for backwards compatibility.
|
||||
ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility.
|
||||
}
|
||||
if ld.Config.Env == nil {
|
||||
ld.Config.Env = os.Environ()
|
||||
@@ -458,7 +482,11 @@ func newLoader(cfg *Config) *loader {
|
||||
}
|
||||
}
|
||||
|
||||
if ld.Mode&NeedTypes != 0 {
|
||||
// Save the actually requested fields. We'll zero them out before returning packages to the user.
|
||||
ld.requestedMode = ld.Mode
|
||||
ld.Mode = impliedLoadMode(ld.Mode)
|
||||
|
||||
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
|
||||
if ld.Fset == nil {
|
||||
ld.Fset = token.NewFileSet()
|
||||
}
|
||||
@@ -472,6 +500,7 @@ func newLoader(cfg *Config) *loader {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ld
|
||||
}
|
||||
|
||||
@@ -490,12 +519,23 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||
if i, found := rootMap[pkg.ID]; found {
|
||||
rootIndex = i
|
||||
}
|
||||
|
||||
// Overlays can invalidate export data.
|
||||
// TODO(matloob): make this check fine-grained based on dependencies on overlaid files
|
||||
exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
|
||||
// This package needs type information if the caller requested types and the package is
|
||||
// either a root, or it's a non-root and the user requested dependencies ...
|
||||
needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
|
||||
// This package needs source if the call requested source (or types info, which implies source)
|
||||
// and the package is either a root, or itas a non- root and the user requested dependencies...
|
||||
needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
|
||||
// ... or if we need types and the exportData is invalid. We fall back to (incompletely)
|
||||
// typechecking packages from source if they fail to compile.
|
||||
(ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe"
|
||||
lpkg := &loaderPackage{
|
||||
Package: pkg,
|
||||
needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0,
|
||||
needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0 ||
|
||||
len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files
|
||||
pkg.ExportFile == "" && pkg.PkgPath != "unsafe",
|
||||
needtypes: needtypes,
|
||||
needsrc: needsrc,
|
||||
}
|
||||
ld.pkgs[lpkg.ID] = lpkg
|
||||
if rootIndex >= 0 {
|
||||
@@ -540,28 +580,31 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||
lpkg.color = grey
|
||||
stack = append(stack, lpkg) // push
|
||||
stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
|
||||
lpkg.Imports = make(map[string]*Package, len(stubs))
|
||||
for importPath, ipkg := range stubs {
|
||||
var importErr error
|
||||
imp := ld.pkgs[ipkg.ID]
|
||||
if imp == nil {
|
||||
// (includes package "C" when DisableCgo)
|
||||
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
|
||||
} else if imp.color == grey {
|
||||
importErr = fmt.Errorf("import cycle: %s", stack)
|
||||
}
|
||||
if importErr != nil {
|
||||
if lpkg.importErrors == nil {
|
||||
lpkg.importErrors = make(map[string]error)
|
||||
// If NeedImports isn't set, the imports fields will all be zeroed out.
|
||||
if ld.Mode&NeedImports != 0 {
|
||||
lpkg.Imports = make(map[string]*Package, len(stubs))
|
||||
for importPath, ipkg := range stubs {
|
||||
var importErr error
|
||||
imp := ld.pkgs[ipkg.ID]
|
||||
if imp == nil {
|
||||
// (includes package "C" when DisableCgo)
|
||||
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
|
||||
} else if imp.color == grey {
|
||||
importErr = fmt.Errorf("import cycle: %s", stack)
|
||||
}
|
||||
if importErr != nil {
|
||||
if lpkg.importErrors == nil {
|
||||
lpkg.importErrors = make(map[string]error)
|
||||
}
|
||||
lpkg.importErrors[importPath] = importErr
|
||||
continue
|
||||
}
|
||||
lpkg.importErrors[importPath] = importErr
|
||||
continue
|
||||
}
|
||||
|
||||
if visit(imp) {
|
||||
lpkg.needsrc = true
|
||||
if visit(imp) {
|
||||
lpkg.needsrc = true
|
||||
}
|
||||
lpkg.Imports[importPath] = imp.Package
|
||||
}
|
||||
lpkg.Imports[importPath] = imp.Package
|
||||
}
|
||||
if lpkg.needsrc {
|
||||
srcPkgs = append(srcPkgs, lpkg)
|
||||
@@ -575,7 +618,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||
return lpkg.needsrc
|
||||
}
|
||||
|
||||
if ld.Mode&(NeedImports|NeedDeps) == 0 {
|
||||
if ld.Mode&NeedImports == 0 {
|
||||
// We do this to drop the stub import packages that we are not even going to try to resolve.
|
||||
for _, lpkg := range initial {
|
||||
lpkg.Imports = nil
|
||||
@@ -586,7 +629,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||
visit(lpkg)
|
||||
}
|
||||
}
|
||||
if ld.Mode&NeedDeps != 0 { // TODO(matloob): This is only the case if NeedTypes is also set, right?
|
||||
if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 {
|
||||
for _, lpkg := range srcPkgs {
|
||||
// Complete type information is required for the
|
||||
// immediate dependencies of each source package.
|
||||
@@ -596,9 +639,9 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Load type data if needed, starting at
|
||||
// Load type data and syntax if needed, starting at
|
||||
// the initial packages (roots of the import DAG).
|
||||
if ld.Mode&NeedTypes != 0 {
|
||||
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
|
||||
var wg sync.WaitGroup
|
||||
for _, lpkg := range initial {
|
||||
wg.Add(1)
|
||||
@@ -611,54 +654,44 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||
}
|
||||
|
||||
result := make([]*Package, len(initial))
|
||||
importPlaceholders := make(map[string]*Package)
|
||||
for i, lpkg := range initial {
|
||||
result[i] = lpkg.Package
|
||||
}
|
||||
for i := range ld.pkgs {
|
||||
// Clear all unrequested fields, for extra de-Hyrum-ization.
|
||||
if ld.Mode&NeedName == 0 {
|
||||
if ld.requestedMode&NeedName == 0 {
|
||||
ld.pkgs[i].Name = ""
|
||||
ld.pkgs[i].PkgPath = ""
|
||||
}
|
||||
if ld.Mode&NeedFiles == 0 {
|
||||
if ld.requestedMode&NeedFiles == 0 {
|
||||
ld.pkgs[i].GoFiles = nil
|
||||
ld.pkgs[i].OtherFiles = nil
|
||||
}
|
||||
if ld.Mode&NeedCompiledGoFiles == 0 {
|
||||
if ld.requestedMode&NeedCompiledGoFiles == 0 {
|
||||
ld.pkgs[i].CompiledGoFiles = nil
|
||||
}
|
||||
if ld.Mode&NeedImports == 0 {
|
||||
if ld.requestedMode&NeedImports == 0 {
|
||||
ld.pkgs[i].Imports = nil
|
||||
}
|
||||
if ld.Mode&NeedExportsFile == 0 {
|
||||
if ld.requestedMode&NeedExportsFile == 0 {
|
||||
ld.pkgs[i].ExportFile = ""
|
||||
}
|
||||
if ld.Mode&NeedTypes == 0 {
|
||||
if ld.requestedMode&NeedTypes == 0 {
|
||||
ld.pkgs[i].Types = nil
|
||||
ld.pkgs[i].Fset = nil
|
||||
ld.pkgs[i].IllTyped = false
|
||||
}
|
||||
if ld.Mode&NeedSyntax == 0 {
|
||||
if ld.requestedMode&NeedSyntax == 0 {
|
||||
ld.pkgs[i].Syntax = nil
|
||||
}
|
||||
if ld.Mode&NeedTypesInfo == 0 {
|
||||
if ld.requestedMode&NeedTypesInfo == 0 {
|
||||
ld.pkgs[i].TypesInfo = nil
|
||||
}
|
||||
if ld.Mode&NeedTypesSizes == 0 {
|
||||
if ld.requestedMode&NeedTypesSizes == 0 {
|
||||
ld.pkgs[i].TypesSizes = nil
|
||||
}
|
||||
if ld.Mode&NeedDeps == 0 {
|
||||
for j, pkg := range ld.pkgs[i].Imports {
|
||||
ph, ok := importPlaceholders[pkg.ID]
|
||||
if !ok {
|
||||
ph = &Package{ID: pkg.ID}
|
||||
importPlaceholders[pkg.ID] = ph
|
||||
}
|
||||
ld.pkgs[i].Imports[j] = ph
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -679,7 +712,6 @@ func (ld *loader) loadRecursive(lpkg *loaderPackage) {
|
||||
}(imp)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
ld.loadPackage(lpkg)
|
||||
})
|
||||
}
|
||||
@@ -687,7 +719,7 @@ func (ld *loader) loadRecursive(lpkg *loaderPackage) {
|
||||
// loadPackage loads the specified package.
|
||||
// It must be called only once per Package,
|
||||
// after immediate dependencies are loaded.
|
||||
// Precondition: ld.Mode >= LoadTypes.
|
||||
// Precondition: ld.Mode & NeedTypes.
|
||||
func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
if lpkg.PkgPath == "unsafe" {
|
||||
// Fill in the blanks to avoid surprises.
|
||||
@@ -711,7 +743,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
// which would then require that such created packages be explicitly
|
||||
// inserted back into the Import graph as a final step after export data loading.
|
||||
// The Diamond test exercises this case.
|
||||
if !lpkg.needtypes {
|
||||
if !lpkg.needtypes && !lpkg.needsrc {
|
||||
return
|
||||
}
|
||||
if !lpkg.needsrc {
|
||||
@@ -768,12 +800,23 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
lpkg.Errors = append(lpkg.Errors, errs...)
|
||||
}
|
||||
|
||||
if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
|
||||
// The config requested loading sources and types, but sources are missing.
|
||||
// Add an error to the package and fall back to loading from export data.
|
||||
appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
|
||||
ld.loadFromExportData(lpkg)
|
||||
return // can't get syntax trees for this package
|
||||
}
|
||||
|
||||
files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
|
||||
for _, err := range errs {
|
||||
appendError(err)
|
||||
}
|
||||
|
||||
lpkg.Syntax = files
|
||||
if ld.Config.Mode&NeedTypes == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
lpkg.TypesInfo = &types.Info{
|
||||
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||
@@ -806,7 +849,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
if ipkg.Types != nil && ipkg.Types.Complete() {
|
||||
return ipkg.Types, nil
|
||||
}
|
||||
log.Fatalf("internal error: nil Pkg importing %q from %q", path, lpkg)
|
||||
log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg)
|
||||
panic("unreachable")
|
||||
})
|
||||
|
||||
@@ -817,7 +860,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
// Type-check bodies of functions only in non-initial packages.
|
||||
// Example: for import graph A->B->C and initial packages {A,C},
|
||||
// we can ignore function bodies in B.
|
||||
IgnoreFuncBodies: (ld.Mode&(NeedDeps|NeedTypesInfo) == 0) && !lpkg.initial,
|
||||
IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial,
|
||||
|
||||
Error: appendError,
|
||||
Sizes: ld.sizes,
|
||||
@@ -1079,6 +1122,25 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
||||
return tpkg, nil
|
||||
}
|
||||
|
||||
func usesExportData(cfg *Config) bool {
|
||||
return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedTypesInfo == 0
|
||||
// impliedLoadMode returns loadMode with its dependencies.
|
||||
func impliedLoadMode(loadMode LoadMode) LoadMode {
|
||||
if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 {
|
||||
// If NeedTypesInfo, go/packages needs to do typechecking itself so it can
|
||||
// associate type info with the AST. To do so, we need the export data
|
||||
// for dependencies, which means we need to ask for the direct dependencies.
|
||||
// NeedImports is used to ask for the direct dependencies.
|
||||
loadMode |= NeedImports
|
||||
}
|
||||
|
||||
if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 {
|
||||
// With NeedDeps we need to load at least direct dependencies.
|
||||
// NeedImports is used to ask for the direct dependencies.
|
||||
loadMode |= NeedImports
|
||||
}
|
||||
|
||||
return loadMode
|
||||
}
|
||||
|
||||
func usesExportData(cfg *Config) bool {
|
||||
return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
|
||||
}
|
||||
|
||||
70
vendor/golang.org/x/tools/imports/forward.go
generated
vendored
Normal file
70
vendor/golang.org/x/tools/imports/forward.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Package imports implements a Go pretty-printer (like package "go/format")
|
||||
// that also adds or removes import statements as necessary.
|
||||
package imports // import "golang.org/x/tools/imports"
|
||||
|
||||
import (
|
||||
"go/build"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
intimp "golang.org/x/tools/internal/imports"
|
||||
)
|
||||
|
||||
// Options specifies options for processing files.
|
||||
type Options struct {
|
||||
Fragment bool // Accept fragment of a source file (no package statement)
|
||||
AllErrors bool // Report all errors (not just the first 10 on different lines)
|
||||
|
||||
Comments bool // Print comments (true if nil *Options provided)
|
||||
TabIndent bool // Use tabs for indent (true if nil *Options provided)
|
||||
TabWidth int // Tab width (8 if nil *Options provided)
|
||||
|
||||
FormatOnly bool // Disable the insertion and deletion of imports
|
||||
}
|
||||
|
||||
// Debug controls verbose logging.
|
||||
var Debug = false
|
||||
|
||||
// LocalPrefix is a comma-separated string of import path prefixes, which, if
|
||||
// set, instructs Process to sort the import paths with the given prefixes
|
||||
// into another group after 3rd-party packages.
|
||||
var LocalPrefix string
|
||||
|
||||
// Process formats and adjusts imports for the provided file.
|
||||
// If opt is nil the defaults are used.
|
||||
//
|
||||
// Note that filename's directory influences which imports can be chosen,
|
||||
// so it is important that filename be accurate.
|
||||
// To process data ``as if'' it were in filename, pass the data as a non-nil src.
|
||||
func Process(filename string, src []byte, opt *Options) ([]byte, error) {
|
||||
if opt == nil {
|
||||
opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
|
||||
}
|
||||
intopt := &intimp.Options{
|
||||
Env: &intimp.ProcessEnv{
|
||||
GOPATH: build.Default.GOPATH,
|
||||
GOROOT: build.Default.GOROOT,
|
||||
GOFLAGS: os.Getenv("GOFLAGS"),
|
||||
GO111MODULE: os.Getenv("GO111MODULE"),
|
||||
GOPROXY: os.Getenv("GOPROXY"),
|
||||
GOSUMDB: os.Getenv("GOSUMDB"),
|
||||
LocalPrefix: LocalPrefix,
|
||||
},
|
||||
AllErrors: opt.AllErrors,
|
||||
Comments: opt.Comments,
|
||||
FormatOnly: opt.FormatOnly,
|
||||
Fragment: opt.Fragment,
|
||||
TabIndent: opt.TabIndent,
|
||||
TabWidth: opt.TabWidth,
|
||||
}
|
||||
if Debug {
|
||||
intopt.Env.Logf = log.Printf
|
||||
}
|
||||
return intimp.Process(filename, src, intopt)
|
||||
}
|
||||
|
||||
// VendorlessPath returns the devendorized version of the import path ipath.
|
||||
// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b".
|
||||
func VendorlessPath(ipath string) string {
|
||||
return intimp.VendorlessPath(ipath)
|
||||
}
|
||||
355
vendor/golang.org/x/tools/imports/mod.go
generated
vendored
355
vendor/golang.org/x/tools/imports/mod.go
generated
vendored
@@ -1,355 +0,0 @@
|
||||
package imports
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
"golang.org/x/tools/internal/module"
|
||||
)
|
||||
|
||||
// moduleResolver implements resolver for modules using the go command as little
|
||||
// as feasible.
|
||||
type moduleResolver struct {
|
||||
env *fixEnv
|
||||
|
||||
initialized bool
|
||||
main *moduleJSON
|
||||
modsByModPath []*moduleJSON // All modules, ordered by # of path components in module Path...
|
||||
modsByDir []*moduleJSON // ...or Dir.
|
||||
}
|
||||
|
||||
type moduleJSON struct {
|
||||
Path string // module path
|
||||
Version string // module version
|
||||
Versions []string // available module versions (with -versions)
|
||||
Replace *moduleJSON // replaced by this module
|
||||
Time *time.Time // time version was created
|
||||
Update *moduleJSON // available update, if any (with -u)
|
||||
Main bool // is this the main module?
|
||||
Indirect bool // is this module only an indirect dependency of main module?
|
||||
Dir string // directory holding files for this module, if any
|
||||
GoMod string // path to go.mod file for this module, if any
|
||||
Error *moduleErrorJSON // error loading module
|
||||
}
|
||||
|
||||
type moduleErrorJSON struct {
|
||||
Err string // the error itself
|
||||
}
|
||||
|
||||
func (r *moduleResolver) init() error {
|
||||
if r.initialized {
|
||||
return nil
|
||||
}
|
||||
stdout, err := r.env.invokeGo("list", "-m", "-json", "...")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for dec := json.NewDecoder(stdout); dec.More(); {
|
||||
mod := &moduleJSON{}
|
||||
if err := dec.Decode(mod); err != nil {
|
||||
return err
|
||||
}
|
||||
if mod.Dir == "" {
|
||||
if Debug {
|
||||
log.Printf("module %v has not been downloaded and will be ignored", mod.Path)
|
||||
}
|
||||
// Can't do anything with a module that's not downloaded.
|
||||
continue
|
||||
}
|
||||
r.modsByModPath = append(r.modsByModPath, mod)
|
||||
r.modsByDir = append(r.modsByDir, mod)
|
||||
if mod.Main {
|
||||
r.main = mod
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(r.modsByModPath, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.modsByModPath[x].Path, "/")
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
sort.Slice(r.modsByDir, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.modsByDir[x].Dir, "/")
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
|
||||
r.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// findPackage returns the module and directory that contains the package at
|
||||
// the given import path, or returns nil, "" if no module is in scope.
|
||||
func (r *moduleResolver) findPackage(importPath string) (*moduleJSON, string) {
|
||||
for _, m := range r.modsByModPath {
|
||||
if !strings.HasPrefix(importPath, m.Path) {
|
||||
continue
|
||||
}
|
||||
pathInModule := importPath[len(m.Path):]
|
||||
pkgDir := filepath.Join(m.Dir, pathInModule)
|
||||
if dirIsNestedModule(pkgDir, m) {
|
||||
continue
|
||||
}
|
||||
|
||||
pkgFiles, err := ioutil.ReadDir(pkgDir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// A module only contains a package if it has buildable go
|
||||
// files in that directory. If not, it could be provided by an
|
||||
// outer module. See #29736.
|
||||
for _, fi := range pkgFiles {
|
||||
if ok, _ := r.env.buildContext().MatchFile(pkgDir, fi.Name()); ok {
|
||||
return m, pkgDir
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
// findModuleByDir returns the module that contains dir, or nil if no such
|
||||
// module is in scope.
|
||||
func (r *moduleResolver) findModuleByDir(dir string) *moduleJSON {
|
||||
// This is quite tricky and may not be correct. dir could be:
|
||||
// - a package in the main module.
|
||||
// - a replace target underneath the main module's directory.
|
||||
// - a nested module in the above.
|
||||
// - a replace target somewhere totally random.
|
||||
// - a nested module in the above.
|
||||
// - in the mod cache.
|
||||
// - in /vendor/ in -mod=vendor mode.
|
||||
// - nested module? Dunno.
|
||||
// Rumor has it that replace targets cannot contain other replace targets.
|
||||
for _, m := range r.modsByDir {
|
||||
if !strings.HasPrefix(dir, m.Dir) {
|
||||
continue
|
||||
}
|
||||
|
||||
if dirIsNestedModule(dir, m) {
|
||||
continue
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dirIsNestedModule reports if dir is contained in a nested module underneath
|
||||
// mod, not actually in mod.
|
||||
func dirIsNestedModule(dir string, mod *moduleJSON) bool {
|
||||
if !strings.HasPrefix(dir, mod.Dir) {
|
||||
return false
|
||||
}
|
||||
mf := findModFile(dir)
|
||||
if mf == "" {
|
||||
return false
|
||||
}
|
||||
return filepath.Dir(mf) != mod.Dir
|
||||
}
|
||||
|
||||
func findModFile(dir string) string {
|
||||
for {
|
||||
f := filepath.Join(dir, "go.mod")
|
||||
info, err := os.Stat(f)
|
||||
if err == nil && !info.IsDir() {
|
||||
return f
|
||||
}
|
||||
d := filepath.Dir(dir)
|
||||
if len(d) >= len(dir) {
|
||||
return "" // reached top of file system, no go.mod
|
||||
}
|
||||
dir = d
|
||||
}
|
||||
}
|
||||
|
||||
func (r *moduleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := map[string]string{}
|
||||
for _, path := range importPaths {
|
||||
_, packageDir := r.findPackage(path)
|
||||
if packageDir == "" {
|
||||
continue
|
||||
}
|
||||
name, err := packageDirToName(packageDir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
names[path] = name
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (r *moduleResolver) scan(_ references) ([]*pkg, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Walk GOROOT, GOPATH/pkg/mod, and the main module.
|
||||
roots := []gopathwalk.Root{
|
||||
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
|
||||
}
|
||||
if r.main != nil {
|
||||
roots = append(roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
|
||||
}
|
||||
for _, p := range filepath.SplitList(r.env.GOPATH) {
|
||||
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
|
||||
}
|
||||
|
||||
// Walk replace targets, just in case they're not in any of the above.
|
||||
for _, mod := range r.modsByModPath {
|
||||
if mod.Replace != nil {
|
||||
roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
|
||||
}
|
||||
}
|
||||
|
||||
var result []*pkg
|
||||
dupCheck := make(map[string]bool)
|
||||
var mu sync.Mutex
|
||||
|
||||
gopathwalk.Walk(roots, func(root gopathwalk.Root, dir string) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if _, dup := dupCheck[dir]; dup {
|
||||
return
|
||||
}
|
||||
|
||||
dupCheck[dir] = true
|
||||
|
||||
subdir := ""
|
||||
if dir != root.Path {
|
||||
subdir = dir[len(root.Path)+len("/"):]
|
||||
}
|
||||
importPath := filepath.ToSlash(subdir)
|
||||
if strings.HasPrefix(importPath, "vendor/") {
|
||||
// Ignore vendor dirs. If -mod=vendor is on, then things
|
||||
// should mostly just work, but when it's not vendor/
|
||||
// is a mess. There's no easy way to tell if it's on.
|
||||
// We can still find things in the mod cache and
|
||||
// map them into /vendor when -mod=vendor is on.
|
||||
return
|
||||
}
|
||||
switch root.Type {
|
||||
case gopathwalk.RootCurrentModule:
|
||||
importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
|
||||
case gopathwalk.RootModuleCache:
|
||||
matches := modCacheRegexp.FindStringSubmatch(subdir)
|
||||
modPath, err := module.DecodePath(filepath.ToSlash(matches[1]))
|
||||
if err != nil {
|
||||
if Debug {
|
||||
log.Printf("decoding module cache path %q: %v", subdir, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
importPath = path.Join(modPath, filepath.ToSlash(matches[3]))
|
||||
case gopathwalk.RootGOROOT:
|
||||
importPath = subdir
|
||||
}
|
||||
|
||||
// Check if the directory is underneath a module that's in scope.
|
||||
if mod := r.findModuleByDir(dir); mod != nil {
|
||||
// It is. If dir is the target of a replace directive,
|
||||
// our guessed import path is wrong. Use the real one.
|
||||
if mod.Dir == dir {
|
||||
importPath = mod.Path
|
||||
} else {
|
||||
dirInMod := dir[len(mod.Dir)+len("/"):]
|
||||
importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
|
||||
}
|
||||
} else {
|
||||
// The package is in an unknown module. Check that it's
|
||||
// not obviously impossible to import.
|
||||
var modFile string
|
||||
switch root.Type {
|
||||
case gopathwalk.RootModuleCache:
|
||||
matches := modCacheRegexp.FindStringSubmatch(subdir)
|
||||
modFile = filepath.Join(matches[1], "@", matches[2], "go.mod")
|
||||
default:
|
||||
modFile = findModFile(dir)
|
||||
}
|
||||
|
||||
modBytes, err := ioutil.ReadFile(modFile)
|
||||
if err == nil && !strings.HasPrefix(importPath, modulePath(modBytes)) {
|
||||
// The module's declared path does not match
|
||||
// its expected path. It probably needs a
|
||||
// replace directive we don't have.
|
||||
return
|
||||
}
|
||||
}
|
||||
// We may have discovered a package that has a different version
|
||||
// in scope already. Canonicalize to that one if possible.
|
||||
if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" {
|
||||
dir = canonicalDir
|
||||
}
|
||||
|
||||
result = append(result, &pkg{
|
||||
importPathShort: VendorlessPath(importPath),
|
||||
dir: dir,
|
||||
})
|
||||
}, gopathwalk.Options{Debug: Debug, ModulesEnabled: true})
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// modCacheRegexp splits a path in a module cache into module, module version, and package.
|
||||
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
|
||||
|
||||
var (
|
||||
slashSlash = []byte("//")
|
||||
moduleStr = []byte("module")
|
||||
)
|
||||
|
||||
// modulePath returns the module path from the gomod file text.
|
||||
// If it cannot find a module path, it returns an empty string.
|
||||
// It is tolerant of unrelated problems in the go.mod file.
|
||||
//
|
||||
// Copied from cmd/go/internal/modfile.
|
||||
func modulePath(mod []byte) string {
|
||||
for len(mod) > 0 {
|
||||
line := mod
|
||||
mod = nil
|
||||
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||
line, mod = line[:i], line[i+1:]
|
||||
}
|
||||
if i := bytes.Index(line, slashSlash); i >= 0 {
|
||||
line = line[:i]
|
||||
}
|
||||
line = bytes.TrimSpace(line)
|
||||
if !bytes.HasPrefix(line, moduleStr) {
|
||||
continue
|
||||
}
|
||||
line = line[len(moduleStr):]
|
||||
n := len(line)
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == n || len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if line[0] == '"' || line[0] == '`' {
|
||||
p, err := strconv.Unquote(string(line))
|
||||
if err != nil {
|
||||
return "" // malformed quoted string or multiline module path
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
return string(line)
|
||||
}
|
||||
return "" // missing module path
|
||||
}
|
||||
10325
vendor/golang.org/x/tools/imports/zstdlib.go
generated
vendored
10325
vendor/golang.org/x/tools/imports/zstdlib.go
generated
vendored
File diff suppressed because it is too large
Load Diff
10
vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
generated
vendored
10
vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
generated
vendored
@@ -14,14 +14,14 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// TraverseLink is used as a return value from WalkFuncs to indicate that the
|
||||
// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the
|
||||
// symlink named in the call may be traversed.
|
||||
var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
|
||||
var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
|
||||
|
||||
// SkipFiles is a used as a return value from WalkFuncs to indicate that the
|
||||
// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the
|
||||
// callback should not be called for any other files in the current directory.
|
||||
// Child directories will still be traversed.
|
||||
var SkipFiles = errors.New("fastwalk: skip remaining files in directory")
|
||||
var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory")
|
||||
|
||||
// Walk is a faster implementation of filepath.Walk.
|
||||
//
|
||||
@@ -167,7 +167,7 @@ func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
|
||||
|
||||
err := w.fn(joined, typ)
|
||||
if typ == os.ModeSymlink {
|
||||
if err == TraverseLink {
|
||||
if err == ErrTraverseLink {
|
||||
// Set callbackDone so we don't call it twice for both the
|
||||
// symlink-as-symlink and the symlink-as-directory later:
|
||||
w.enqueue(walkItem{dir: joined, callbackDone: true})
|
||||
|
||||
2
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
generated
vendored
2
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
generated
vendored
@@ -26,7 +26,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
|
||||
continue
|
||||
}
|
||||
if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil {
|
||||
if err == SkipFiles {
|
||||
if err == ErrSkipFiles {
|
||||
skipFiles = true
|
||||
continue
|
||||
}
|
||||
|
||||
7
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
generated
vendored
7
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
generated
vendored
@@ -66,7 +66,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
|
||||
continue
|
||||
}
|
||||
if err := fn(dirName, name, typ); err != nil {
|
||||
if err == SkipFiles {
|
||||
if err == ErrSkipFiles {
|
||||
skipFiles = true
|
||||
continue
|
||||
}
|
||||
@@ -76,8 +76,9 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
|
||||
}
|
||||
|
||||
func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) {
|
||||
// golang.org/issue/15653
|
||||
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
|
||||
// golang.org/issue/37269
|
||||
dirent := &syscall.Dirent{}
|
||||
copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf)
|
||||
if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v {
|
||||
panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v))
|
||||
}
|
||||
|
||||
126
vendor/golang.org/x/tools/internal/gocommand/invoke.go
generated
vendored
Normal file
126
vendor/golang.org/x/tools/internal/gocommand/invoke.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
// Package gocommand is a helper for calling the go command.
|
||||
package gocommand
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An Invocation represents a call to the go command.
|
||||
type Invocation struct {
|
||||
Verb string
|
||||
Args []string
|
||||
BuildFlags []string
|
||||
Env []string
|
||||
WorkingDir string
|
||||
Logf func(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// Run runs the invocation, returning its stdout and an error suitable for
|
||||
// human consumption, including stderr.
|
||||
func (i *Invocation) Run(ctx context.Context) (*bytes.Buffer, error) {
|
||||
stdout, _, friendly, _ := i.RunRaw(ctx)
|
||||
return stdout, friendly
|
||||
}
|
||||
|
||||
// RunRaw is like RunPiped, but also returns the raw stderr and error for callers
|
||||
// that want to do low-level error handling/recovery.
|
||||
func (i *Invocation) RunRaw(ctx context.Context) (stdout *bytes.Buffer, stderr *bytes.Buffer, friendlyError error, rawError error) {
|
||||
stdout = &bytes.Buffer{}
|
||||
stderr = &bytes.Buffer{}
|
||||
rawError = i.RunPiped(ctx, stdout, stderr)
|
||||
if rawError != nil {
|
||||
// Check for 'go' executable not being found.
|
||||
if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
|
||||
friendlyError = fmt.Errorf("go command required, not found: %v", ee)
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
friendlyError = ctx.Err()
|
||||
}
|
||||
friendlyError = fmt.Errorf("err: %v: stderr: %s", rawError, stderr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RunPiped is like Run, but relies on the given stdout/stderr
|
||||
func (i *Invocation) RunPiped(ctx context.Context, stdout, stderr io.Writer) error {
|
||||
log := i.Logf
|
||||
if log == nil {
|
||||
log = func(string, ...interface{}) {}
|
||||
}
|
||||
|
||||
goArgs := []string{i.Verb}
|
||||
switch i.Verb {
|
||||
case "mod":
|
||||
// mod needs the sub-verb before build flags.
|
||||
goArgs = append(goArgs, i.Args[0])
|
||||
goArgs = append(goArgs, i.BuildFlags...)
|
||||
goArgs = append(goArgs, i.Args[1:]...)
|
||||
case "env":
|
||||
// env doesn't take build flags.
|
||||
goArgs = append(goArgs, i.Args...)
|
||||
default:
|
||||
goArgs = append(goArgs, i.BuildFlags...)
|
||||
goArgs = append(goArgs, i.Args...)
|
||||
}
|
||||
cmd := exec.Command("go", goArgs...)
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||
// expects the working directory to keep the original path, including the
|
||||
// go command when dealing with modules.
|
||||
// The Go stdlib has a special feature where if the cwd and the PWD are the
|
||||
// same node then it trusts the PWD, so by setting it in the env for the child
|
||||
// process we fix up all the paths returned by the go command.
|
||||
cmd.Env = append(append([]string{}, i.Env...), "PWD="+i.WorkingDir)
|
||||
cmd.Dir = i.WorkingDir
|
||||
|
||||
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
|
||||
|
||||
return runCmdContext(ctx, cmd)
|
||||
}
|
||||
|
||||
// runCmdContext is like exec.CommandContext except it sends os.Interrupt
|
||||
// before os.Kill.
|
||||
func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
resChan := make(chan error, 1)
|
||||
go func() {
|
||||
resChan <- cmd.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
}
|
||||
// Cancelled. Interrupt and see if it ends voluntarily.
|
||||
cmd.Process.Signal(os.Interrupt)
|
||||
select {
|
||||
case err := <-resChan:
|
||||
return err
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
// Didn't shut down in response to interrupt. Kill it hard.
|
||||
cmd.Process.Kill()
|
||||
return <-resChan
|
||||
}
|
||||
|
||||
func cmdDebugStr(cmd *exec.Cmd) string {
|
||||
env := make(map[string]string)
|
||||
for _, kv := range cmd.Env {
|
||||
split := strings.Split(kv, "=")
|
||||
k, v := split[0], split[1]
|
||||
env[k] = v
|
||||
}
|
||||
|
||||
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args)
|
||||
}
|
||||
79
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
79
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
@@ -16,14 +16,17 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/fastwalk"
|
||||
)
|
||||
|
||||
// Options controls the behavior of a Walk call.
|
||||
type Options struct {
|
||||
Debug bool // Enable debug logging
|
||||
ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules.
|
||||
// If Logf is non-nil, debug logging is enabled through this function.
|
||||
Logf func(format string, args ...interface{})
|
||||
// Search module caches. Also disables legacy goimports ignore rules.
|
||||
ModulesEnabled bool
|
||||
}
|
||||
|
||||
// RootType indicates the type of a Root.
|
||||
@@ -59,24 +62,39 @@ func SrcDirsRoots(ctx *build.Context) []Root {
|
||||
// paths of the containing source directory and the package directory.
|
||||
// add will be called concurrently.
|
||||
func Walk(roots []Root, add func(root Root, dir string), opts Options) {
|
||||
WalkSkip(roots, add, func(Root, string) bool { return false }, opts)
|
||||
}
|
||||
|
||||
// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
|
||||
// For each package found, add will be called (concurrently) with the absolute
|
||||
// paths of the containing source directory and the package directory.
|
||||
// For each directory that will be scanned, skip will be called (concurrently)
|
||||
// with the absolute paths of the containing source directory and the directory.
|
||||
// If skip returns false on a directory it will be processed.
|
||||
// add will be called concurrently.
|
||||
// skip will be called concurrently.
|
||||
func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) {
|
||||
for _, root := range roots {
|
||||
walkDir(root, add, opts)
|
||||
walkDir(root, add, skip, opts)
|
||||
}
|
||||
}
|
||||
|
||||
func walkDir(root Root, add func(Root, string), opts Options) {
|
||||
// walkDir creates a walker and starts fastwalk with this walker.
|
||||
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
|
||||
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
|
||||
if opts.Debug {
|
||||
log.Printf("skipping nonexistant directory: %v", root.Path)
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("skipping nonexistent directory: %v", root.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
if opts.Debug {
|
||||
log.Printf("scanning %s", root.Path)
|
||||
start := time.Now()
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("gopathwalk: scanning %s", root.Path)
|
||||
}
|
||||
w := &walker{
|
||||
root: root,
|
||||
add: add,
|
||||
skip: skip,
|
||||
opts: opts,
|
||||
}
|
||||
w.init()
|
||||
@@ -84,21 +102,22 @@ func walkDir(root Root, add func(Root, string), opts Options) {
|
||||
log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err)
|
||||
}
|
||||
|
||||
if opts.Debug {
|
||||
log.Printf("scanned %s", root.Path)
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("gopathwalk: scanned %s in %v", root.Path, time.Since(start))
|
||||
}
|
||||
}
|
||||
|
||||
// walker is the callback for fastwalk.Walk.
|
||||
type walker struct {
|
||||
root Root // The source directory to scan.
|
||||
add func(Root, string) // The callback that will be invoked for every possible Go package dir.
|
||||
opts Options // Options passed to Walk by the user.
|
||||
root Root // The source directory to scan.
|
||||
add func(Root, string) // The callback that will be invoked for every possible Go package dir.
|
||||
skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true.
|
||||
opts Options // Options passed to Walk by the user.
|
||||
|
||||
ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
|
||||
}
|
||||
|
||||
// init initializes the walker based on its Options.
|
||||
// init initializes the walker based on its Options
|
||||
func (w *walker) init() {
|
||||
var ignoredPaths []string
|
||||
if w.root.Type == RootModuleCache {
|
||||
@@ -113,11 +132,11 @@ func (w *walker) init() {
|
||||
full := filepath.Join(w.root.Path, p)
|
||||
if fi, err := os.Stat(full); err == nil {
|
||||
w.ignoredDirs = append(w.ignoredDirs, fi)
|
||||
if w.opts.Debug {
|
||||
log.Printf("Directory added to ignore list: %s", full)
|
||||
if w.opts.Logf != nil {
|
||||
w.opts.Logf("Directory added to ignore list: %s", full)
|
||||
}
|
||||
} else if w.opts.Debug {
|
||||
log.Printf("Error statting ignored directory: %v", err)
|
||||
} else if w.opts.Logf != nil {
|
||||
w.opts.Logf("Error statting ignored directory: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -128,11 +147,11 @@ func (w *walker) init() {
|
||||
func (w *walker) getIgnoredDirs(path string) []string {
|
||||
file := filepath.Join(path, ".goimportsignore")
|
||||
slurp, err := ioutil.ReadFile(file)
|
||||
if w.opts.Debug {
|
||||
if w.opts.Logf != nil {
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
w.opts.Logf("%v", err)
|
||||
} else {
|
||||
log.Printf("Read %s", file)
|
||||
w.opts.Logf("Read %s", file)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -151,29 +170,35 @@ func (w *walker) getIgnoredDirs(path string) []string {
|
||||
return ignoredDirs
|
||||
}
|
||||
|
||||
func (w *walker) shouldSkipDir(fi os.FileInfo) bool {
|
||||
// shouldSkipDir reports whether the file should be skipped or not.
|
||||
func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool {
|
||||
for _, ignoredDir := range w.ignoredDirs {
|
||||
if os.SameFile(fi, ignoredDir) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if w.skip != nil {
|
||||
// Check with the user specified callback.
|
||||
return w.skip(w.root, dir)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// walk walks through the given path.
|
||||
func (w *walker) walk(path string, typ os.FileMode) error {
|
||||
dir := filepath.Dir(path)
|
||||
if typ.IsRegular() {
|
||||
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
|
||||
// Doesn't make sense to have regular files
|
||||
// directly in your $GOPATH/src or $GOROOT/src.
|
||||
return fastwalk.SkipFiles
|
||||
return fastwalk.ErrSkipFiles
|
||||
}
|
||||
if !strings.HasSuffix(path, ".go") {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.add(w.root, dir)
|
||||
return fastwalk.SkipFiles
|
||||
return fastwalk.ErrSkipFiles
|
||||
}
|
||||
if typ == os.ModeDir {
|
||||
base := filepath.Base(path)
|
||||
@@ -184,7 +209,7 @@ func (w *walker) walk(path string, typ os.FileMode) error {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
fi, err := os.Lstat(path)
|
||||
if err == nil && w.shouldSkipDir(fi) {
|
||||
if err == nil && w.shouldSkipDir(fi, path) {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
@@ -201,7 +226,7 @@ func (w *walker) walk(path string, typ os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
if w.shouldTraverse(dir, fi) {
|
||||
return fastwalk.TraverseLink
|
||||
return fastwalk.ErrTraverseLink
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -224,7 +249,7 @@ func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
|
||||
if !ts.IsDir() {
|
||||
return false
|
||||
}
|
||||
if w.shouldSkipDir(ts) {
|
||||
if w.shouldSkipDir(ts, dir) {
|
||||
return false
|
||||
}
|
||||
// Check for symlink loops by statting each directory component
|
||||
|
||||
947
vendor/golang.org/x/tools/imports/fix.go → vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
947
vendor/golang.org/x/tools/imports/fix.go → vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
File diff suppressed because it is too large
Load Diff
@@ -6,11 +6,12 @@
|
||||
|
||||
// Package imports implements a Go pretty-printer (like package "go/format")
|
||||
// that also adds or removes import statements as necessary.
|
||||
package imports // import "golang.org/x/tools/imports"
|
||||
package imports
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -27,8 +29,10 @@ import (
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
)
|
||||
|
||||
// Options specifies options for processing files.
|
||||
// Options is golang.org/x/tools/imports.Options with extra internal-only options.
|
||||
type Options struct {
|
||||
Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state.
|
||||
|
||||
Fragment bool // Accept fragment of a source file (no package statement)
|
||||
AllErrors bool // Report all errors (not just the first 10 on different lines)
|
||||
|
||||
@@ -39,27 +43,11 @@ type Options struct {
|
||||
FormatOnly bool // Disable the insertion and deletion of imports
|
||||
}
|
||||
|
||||
// Process formats and adjusts imports for the provided file.
|
||||
// If opt is nil the defaults are used.
|
||||
//
|
||||
// Note that filename's directory influences which imports can be chosen,
|
||||
// so it is important that filename be accurate.
|
||||
// To process data ``as if'' it were in filename, pass the data as a non-nil src.
|
||||
func Process(filename string, src []byte, opt *Options) ([]byte, error) {
|
||||
env := &fixEnv{GOPATH: build.Default.GOPATH, GOROOT: build.Default.GOROOT}
|
||||
return process(filename, src, opt, env)
|
||||
}
|
||||
|
||||
func process(filename string, src []byte, opt *Options, env *fixEnv) ([]byte, error) {
|
||||
if opt == nil {
|
||||
opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
|
||||
}
|
||||
if src == nil {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
src = b
|
||||
// Process implements golang.org/x/tools/imports.Process with explicit context in env.
|
||||
func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
|
||||
src, opt, err = initialize(filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileSet := token.NewFileSet()
|
||||
@@ -69,12 +57,117 @@ func process(filename string, src []byte, opt *Options, env *fixEnv) ([]byte, er
|
||||
}
|
||||
|
||||
if !opt.FormatOnly {
|
||||
if err := fixImports(fileSet, file, filename, env); err != nil {
|
||||
if err := fixImports(fileSet, file, filename, opt.Env); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return formatFile(fileSet, file, src, adjust, opt)
|
||||
}
|
||||
|
||||
sortImports(fileSet, file)
|
||||
// FixImports returns a list of fixes to the imports that, when applied,
|
||||
// will leave the imports in the same state as Process.
|
||||
//
|
||||
// Note that filename's directory influences which imports can be chosen,
|
||||
// so it is important that filename be accurate.
|
||||
func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
|
||||
src, opt, err = initialize(filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileSet := token.NewFileSet()
|
||||
file, _, err := parse(fileSet, filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return getFixes(fileSet, file, filename, opt.Env)
|
||||
}
|
||||
|
||||
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
|
||||
// is added in when parsing the file.
|
||||
func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) {
|
||||
src, opt, err = initialize(filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Don't use parse() -- we don't care about fragments or statement lists
|
||||
// here, and we need to work with unparseable files.
|
||||
fileSet := token.NewFileSet()
|
||||
parserMode := parser.Mode(0)
|
||||
if opt.Comments {
|
||||
parserMode |= parser.ParseComments
|
||||
}
|
||||
if opt.AllErrors {
|
||||
parserMode |= parser.AllErrors
|
||||
}
|
||||
parserMode |= extraMode
|
||||
|
||||
file, err := parser.ParseFile(fileSet, filename, src, parserMode)
|
||||
if file == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Apply the fixes to the file.
|
||||
apply(fileSet, file, fixes)
|
||||
|
||||
return formatFile(fileSet, file, src, nil, opt)
|
||||
}
|
||||
|
||||
// GetAllCandidates gets all of the packages starting with prefix that can be
|
||||
// imported by filename, sorted by import path.
|
||||
func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error {
|
||||
_, opt, err := initialize(filename, []byte{}, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env)
|
||||
}
|
||||
|
||||
// GetPackageExports returns all known packages with name pkg and their exports.
|
||||
func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error {
|
||||
_, opt, err := initialize(filename, []byte{}, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env)
|
||||
}
|
||||
|
||||
// initialize sets the values for opt and src.
|
||||
// If they are provided, they are not changed. Otherwise opt is set to the
|
||||
// default values and src is read from the file system.
|
||||
func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, error) {
|
||||
// Use defaults if opt is nil.
|
||||
if opt == nil {
|
||||
opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
|
||||
}
|
||||
|
||||
// Set the env if the user has not provided it.
|
||||
if opt.Env == nil {
|
||||
opt.Env = &ProcessEnv{
|
||||
GOPATH: build.Default.GOPATH,
|
||||
GOROOT: build.Default.GOROOT,
|
||||
GOFLAGS: os.Getenv("GOFLAGS"),
|
||||
GO111MODULE: os.Getenv("GO111MODULE"),
|
||||
GOPROXY: os.Getenv("GOPROXY"),
|
||||
GOSUMDB: os.Getenv("GOSUMDB"),
|
||||
}
|
||||
}
|
||||
if src == nil {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
src = b
|
||||
}
|
||||
|
||||
return src, opt, nil
|
||||
}
|
||||
|
||||
func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
|
||||
mergeImports(opt.Env, fileSet, file)
|
||||
sortImports(opt.Env, fileSet, file)
|
||||
imps := astutil.Imports(fileSet, file)
|
||||
var spacesBefore []string // import paths we need spaces before
|
||||
for _, impSection := range imps {
|
||||
@@ -85,7 +178,7 @@ func process(filename string, src []byte, opt *Options, env *fixEnv) ([]byte, er
|
||||
lastGroup := -1
|
||||
for _, importSpec := range impSection {
|
||||
importPath, _ := strconv.Unquote(importSpec.Path.Value)
|
||||
groupNum := importGroup(importPath)
|
||||
groupNum := importGroup(opt.Env, importPath)
|
||||
if groupNum != lastGroup && lastGroup != -1 {
|
||||
spacesBefore = append(spacesBefore, importPath)
|
||||
}
|
||||
@@ -101,7 +194,7 @@ func process(filename string, src []byte, opt *Options, env *fixEnv) ([]byte, er
|
||||
printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = printConfig.Fprint(&buf, fileSet, file)
|
||||
err := printConfig.Fprint(&buf, fileSet, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
718
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
Normal file
718
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
Normal file
@@ -0,0 +1,718 @@
|
||||
package imports
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/mod/module"
|
||||
"golang.org/x/mod/semver"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
)
|
||||
|
||||
// ModuleResolver implements resolver for modules using the go command as little
|
||||
// as feasible.
|
||||
type ModuleResolver struct {
|
||||
env *ProcessEnv
|
||||
moduleCacheDir string
|
||||
dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
|
||||
roots []gopathwalk.Root
|
||||
scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots.
|
||||
scannedRoots map[gopathwalk.Root]bool
|
||||
|
||||
initialized bool
|
||||
main *ModuleJSON
|
||||
modsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path...
|
||||
modsByDir []*ModuleJSON // ...or Dir.
|
||||
|
||||
// moduleCacheCache stores information about the module cache.
|
||||
moduleCacheCache *dirInfoCache
|
||||
otherCache *dirInfoCache
|
||||
}
|
||||
|
||||
type ModuleJSON struct {
|
||||
Path string // module path
|
||||
Replace *ModuleJSON // replaced by this module
|
||||
Main bool // is this the main module?
|
||||
Indirect bool // is this module only an indirect dependency of main module?
|
||||
Dir string // directory holding files for this module, if any
|
||||
GoMod string // path to go.mod file for this module, if any
|
||||
GoVersion string // go version used in module
|
||||
}
|
||||
|
||||
func newModuleResolver(e *ProcessEnv) *ModuleResolver {
|
||||
r := &ModuleResolver{
|
||||
env: e,
|
||||
scanSema: make(chan struct{}, 1),
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) init() error {
|
||||
if r.initialized {
|
||||
return nil
|
||||
}
|
||||
mainMod, vendorEnabled, err := vendorEnabled(r.env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mainMod != nil && vendorEnabled {
|
||||
// Vendor mode is on, so all the non-Main modules are irrelevant,
|
||||
// and we need to search /vendor for everything.
|
||||
r.main = mainMod
|
||||
r.dummyVendorMod = &ModuleJSON{
|
||||
Path: "",
|
||||
Dir: filepath.Join(mainMod.Dir, "vendor"),
|
||||
}
|
||||
r.modsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod}
|
||||
r.modsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod}
|
||||
} else {
|
||||
// Vendor mode is off, so run go list -m ... to find everything.
|
||||
r.initAllMods()
|
||||
}
|
||||
|
||||
r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod")
|
||||
|
||||
sort.Slice(r.modsByModPath, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.modsByModPath[x].Path, "/")
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
sort.Slice(r.modsByDir, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.modsByDir[x].Dir, "/")
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
|
||||
r.roots = []gopathwalk.Root{
|
||||
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
|
||||
}
|
||||
if r.main != nil {
|
||||
r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
|
||||
}
|
||||
if vendorEnabled {
|
||||
r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
|
||||
} else {
|
||||
addDep := func(mod *ModuleJSON) {
|
||||
if mod.Replace == nil {
|
||||
// This is redundant with the cache, but we'll skip it cheaply enough.
|
||||
r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache})
|
||||
} else {
|
||||
r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
|
||||
}
|
||||
}
|
||||
// Walk dependent modules before scanning the full mod cache, direct deps first.
|
||||
for _, mod := range r.modsByModPath {
|
||||
if !mod.Indirect && !mod.Main {
|
||||
addDep(mod)
|
||||
}
|
||||
}
|
||||
for _, mod := range r.modsByModPath {
|
||||
if mod.Indirect && !mod.Main {
|
||||
addDep(mod)
|
||||
}
|
||||
}
|
||||
r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache})
|
||||
}
|
||||
|
||||
r.scannedRoots = map[gopathwalk.Root]bool{}
|
||||
if r.moduleCacheCache == nil {
|
||||
r.moduleCacheCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
}
|
||||
if r.otherCache == nil {
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
}
|
||||
r.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) initAllMods() error {
|
||||
stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-json", "...")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for dec := json.NewDecoder(stdout); dec.More(); {
|
||||
mod := &ModuleJSON{}
|
||||
if err := dec.Decode(mod); err != nil {
|
||||
return err
|
||||
}
|
||||
if mod.Dir == "" {
|
||||
if r.env.Logf != nil {
|
||||
r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
|
||||
}
|
||||
// Can't do anything with a module that's not downloaded.
|
||||
continue
|
||||
}
|
||||
// golang/go#36193: the go command doesn't always clean paths.
|
||||
mod.Dir = filepath.Clean(mod.Dir)
|
||||
r.modsByModPath = append(r.modsByModPath, mod)
|
||||
r.modsByDir = append(r.modsByDir, mod)
|
||||
if mod.Main {
|
||||
r.main = mod
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) ClearForNewScan() {
|
||||
<-r.scanSema
|
||||
r.scannedRoots = map[gopathwalk.Root]bool{}
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) ClearForNewMod() {
|
||||
<-r.scanSema
|
||||
*r = ModuleResolver{
|
||||
env: r.env,
|
||||
moduleCacheCache: r.moduleCacheCache,
|
||||
otherCache: r.otherCache,
|
||||
scanSema: r.scanSema,
|
||||
}
|
||||
r.init()
|
||||
r.scanSema <- struct{}{}
|
||||
}
|
||||
|
||||
// findPackage returns the module and directory that contains the package at
|
||||
// the given import path, or returns nil, "" if no module is in scope.
|
||||
func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) {
|
||||
// This can't find packages in the stdlib, but that's harmless for all
|
||||
// the existing code paths.
|
||||
for _, m := range r.modsByModPath {
|
||||
if !strings.HasPrefix(importPath, m.Path) {
|
||||
continue
|
||||
}
|
||||
pathInModule := importPath[len(m.Path):]
|
||||
pkgDir := filepath.Join(m.Dir, pathInModule)
|
||||
if r.dirIsNestedModule(pkgDir, m) {
|
||||
continue
|
||||
}
|
||||
|
||||
if info, ok := r.cacheLoad(pkgDir); ok {
|
||||
if loaded, err := info.reachedStatus(nameLoaded); loaded {
|
||||
if err != nil {
|
||||
continue // No package in this dir.
|
||||
}
|
||||
return m, pkgDir
|
||||
}
|
||||
if scanned, err := info.reachedStatus(directoryScanned); scanned && err != nil {
|
||||
continue // Dir is unreadable, etc.
|
||||
}
|
||||
// This is slightly wrong: a directory doesn't have to have an
|
||||
// importable package to count as a package for package-to-module
|
||||
// resolution. package main or _test files should count but
|
||||
// don't.
|
||||
// TODO(heschi): fix this.
|
||||
if _, err := r.cachePackageName(info); err == nil {
|
||||
return m, pkgDir
|
||||
}
|
||||
}
|
||||
|
||||
// Not cached. Read the filesystem.
|
||||
pkgFiles, err := ioutil.ReadDir(pkgDir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// A module only contains a package if it has buildable go
|
||||
// files in that directory. If not, it could be provided by an
|
||||
// outer module. See #29736.
|
||||
for _, fi := range pkgFiles {
|
||||
if ok, _ := r.env.buildContext().MatchFile(pkgDir, fi.Name()); ok {
|
||||
return m, pkgDir
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheLoad(dir string) (directoryPackageInfo, bool) {
|
||||
if info, ok := r.moduleCacheCache.Load(dir); ok {
|
||||
return info, ok
|
||||
}
|
||||
return r.otherCache.Load(dir)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheStore(info directoryPackageInfo) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
r.moduleCacheCache.Store(info.dir, info)
|
||||
} else {
|
||||
r.otherCache.Store(info.dir, info)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheKeys() []string {
|
||||
return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...)
|
||||
}
|
||||
|
||||
// cachePackageName caches the package name for a dir already in the cache.
|
||||
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
return r.moduleCacheCache.CachePackageName(info)
|
||||
}
|
||||
return r.otherCache.CachePackageName(info)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
return r.moduleCacheCache.CacheExports(ctx, env, info)
|
||||
}
|
||||
return r.otherCache.CacheExports(ctx, env, info)
|
||||
}
|
||||
|
||||
// findModuleByDir returns the module that contains dir, or nil if no such
|
||||
// module is in scope.
|
||||
func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON {
|
||||
// This is quite tricky and may not be correct. dir could be:
|
||||
// - a package in the main module.
|
||||
// - a replace target underneath the main module's directory.
|
||||
// - a nested module in the above.
|
||||
// - a replace target somewhere totally random.
|
||||
// - a nested module in the above.
|
||||
// - in the mod cache.
|
||||
// - in /vendor/ in -mod=vendor mode.
|
||||
// - nested module? Dunno.
|
||||
// Rumor has it that replace targets cannot contain other replace targets.
|
||||
for _, m := range r.modsByDir {
|
||||
if !strings.HasPrefix(dir, m.Dir) {
|
||||
continue
|
||||
}
|
||||
|
||||
if r.dirIsNestedModule(dir, m) {
|
||||
continue
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dirIsNestedModule reports if dir is contained in a nested module underneath
|
||||
// mod, not actually in mod.
|
||||
func (r *ModuleResolver) dirIsNestedModule(dir string, mod *ModuleJSON) bool {
|
||||
if !strings.HasPrefix(dir, mod.Dir) {
|
||||
return false
|
||||
}
|
||||
if r.dirInModuleCache(dir) {
|
||||
// Nested modules in the module cache are pruned,
|
||||
// so it cannot be a nested module.
|
||||
return false
|
||||
}
|
||||
if mod != nil && mod == r.dummyVendorMod {
|
||||
// The /vendor pseudomodule is flattened and doesn't actually count.
|
||||
return false
|
||||
}
|
||||
modDir, _ := r.modInfo(dir)
|
||||
if modDir == "" {
|
||||
return false
|
||||
}
|
||||
return modDir != mod.Dir
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) {
|
||||
readModName := func(modFile string) string {
|
||||
modBytes, err := ioutil.ReadFile(modFile)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return modulePath(modBytes)
|
||||
}
|
||||
|
||||
if r.dirInModuleCache(dir) {
|
||||
matches := modCacheRegexp.FindStringSubmatch(dir)
|
||||
index := strings.Index(dir, matches[1]+"@"+matches[2])
|
||||
modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2])
|
||||
return modDir, readModName(filepath.Join(modDir, "go.mod"))
|
||||
}
|
||||
for {
|
||||
if info, ok := r.cacheLoad(dir); ok {
|
||||
return info.moduleDir, info.moduleName
|
||||
}
|
||||
f := filepath.Join(dir, "go.mod")
|
||||
info, err := os.Stat(f)
|
||||
if err == nil && !info.IsDir() {
|
||||
return dir, readModName(f)
|
||||
}
|
||||
|
||||
d := filepath.Dir(dir)
|
||||
if len(d) >= len(dir) {
|
||||
return "", "" // reached top of file system, no go.mod
|
||||
}
|
||||
dir = d
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) dirInModuleCache(dir string) bool {
|
||||
if r.moduleCacheDir == "" {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(dir, r.moduleCacheDir)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := map[string]string{}
|
||||
for _, path := range importPaths {
|
||||
_, packageDir := r.findPackage(path)
|
||||
if packageDir == "" {
|
||||
continue
|
||||
}
|
||||
name, err := packageDirToName(packageDir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
names[path] = name
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error {
|
||||
if err := r.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processDir := func(info directoryPackageInfo) {
|
||||
// Skip this directory if we were not able to get the package information successfully.
|
||||
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
||||
return
|
||||
}
|
||||
pkg, err := r.canonicalize(info)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.dirFound(pkg) {
|
||||
return
|
||||
}
|
||||
pkg.packageName, err = r.cachePackageName(info)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.packageNameLoaded(pkg) {
|
||||
return
|
||||
}
|
||||
_, exports, err := r.loadExports(ctx, pkg, false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
callback.exportsLoaded(pkg, exports)
|
||||
}
|
||||
|
||||
// Start processing everything in the cache, and listen for the new stuff
|
||||
// we discover in the walk below.
|
||||
stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir)
|
||||
defer stop1()
|
||||
stop2 := r.otherCache.ScanAndListen(ctx, processDir)
|
||||
defer stop2()
|
||||
|
||||
// We assume cached directories are fully cached, including all their
|
||||
// children, and have not changed. We can skip them.
|
||||
skip := func(root gopathwalk.Root, dir string) bool {
|
||||
info, ok := r.cacheLoad(dir)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// This directory can be skipped as long as we have already scanned it.
|
||||
// Packages with errors will continue to have errors, so there is no need
|
||||
// to rescan them.
|
||||
packageScanned, _ := info.reachedStatus(directoryScanned)
|
||||
return packageScanned
|
||||
}
|
||||
|
||||
// Add anything new to the cache, and process it if we're still listening.
|
||||
add := func(root gopathwalk.Root, dir string) {
|
||||
r.cacheStore(r.scanDirForPackage(root, dir))
|
||||
}
|
||||
|
||||
// r.roots and the callback are not necessarily safe to use in the
|
||||
// goroutine below. Process them eagerly.
|
||||
roots := filterRoots(r.roots, callback.rootFound)
|
||||
// We can't cancel walks, because we need them to finish to have a usable
|
||||
// cache. Instead, run them in a separate goroutine and detach.
|
||||
scanDone := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-r.scanSema:
|
||||
}
|
||||
defer func() { r.scanSema <- struct{}{} }()
|
||||
// We have the lock on r.scannedRoots, and no other scans can run.
|
||||
for _, root := range roots {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.scannedRoots[root] {
|
||||
continue
|
||||
}
|
||||
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true})
|
||||
r.scannedRoots[root] = true
|
||||
}
|
||||
close(scanDone)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-scanDone:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int {
|
||||
if _, ok := stdlib[path]; ok {
|
||||
return MaxRelevance
|
||||
}
|
||||
mod, _ := r.findPackage(path)
|
||||
return modRelevance(mod)
|
||||
}
|
||||
|
||||
func modRelevance(mod *ModuleJSON) int {
|
||||
switch {
|
||||
case mod == nil: // out of scope
|
||||
return MaxRelevance - 4
|
||||
case mod.Indirect:
|
||||
return MaxRelevance - 3
|
||||
case !mod.Main:
|
||||
return MaxRelevance - 2
|
||||
default:
|
||||
return MaxRelevance - 1 // main module ties with stdlib
|
||||
}
|
||||
}
|
||||
|
||||
// canonicalize gets the result of canonicalizing the packages using the results
|
||||
// of initializing the resolver from 'go list -m'.
|
||||
func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
|
||||
// Packages in GOROOT are already canonical, regardless of the std/cmd modules.
|
||||
if info.rootType == gopathwalk.RootGOROOT {
|
||||
return &pkg{
|
||||
importPathShort: info.nonCanonicalImportPath,
|
||||
dir: info.dir,
|
||||
packageName: path.Base(info.nonCanonicalImportPath),
|
||||
relevance: MaxRelevance,
|
||||
}, nil
|
||||
}
|
||||
|
||||
importPath := info.nonCanonicalImportPath
|
||||
mod := r.findModuleByDir(info.dir)
|
||||
// Check if the directory is underneath a module that's in scope.
|
||||
if mod != nil {
|
||||
// It is. If dir is the target of a replace directive,
|
||||
// our guessed import path is wrong. Use the real one.
|
||||
if mod.Dir == info.dir {
|
||||
importPath = mod.Path
|
||||
} else {
|
||||
dirInMod := info.dir[len(mod.Dir)+len("/"):]
|
||||
importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
|
||||
}
|
||||
} else if !strings.HasPrefix(importPath, info.moduleName) {
|
||||
// The module's name doesn't match the package's import path. It
|
||||
// probably needs a replace directive we don't have.
|
||||
return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir)
|
||||
}
|
||||
|
||||
res := &pkg{
|
||||
importPathShort: importPath,
|
||||
dir: info.dir,
|
||||
relevance: modRelevance(mod),
|
||||
}
|
||||
// We may have discovered a package that has a different version
|
||||
// in scope already. Canonicalize to that one if possible.
|
||||
if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" {
|
||||
res.dir = canonicalDir
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest {
|
||||
return r.cacheExports(ctx, r.env, info)
|
||||
}
|
||||
return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
|
||||
subdir := ""
|
||||
if dir != root.Path {
|
||||
subdir = dir[len(root.Path)+len("/"):]
|
||||
}
|
||||
importPath := filepath.ToSlash(subdir)
|
||||
if strings.HasPrefix(importPath, "vendor/") {
|
||||
// Only enter vendor directories if they're explicitly requested as a root.
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("unwanted vendor directory"),
|
||||
}
|
||||
}
|
||||
switch root.Type {
|
||||
case gopathwalk.RootCurrentModule:
|
||||
importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
|
||||
case gopathwalk.RootModuleCache:
|
||||
matches := modCacheRegexp.FindStringSubmatch(subdir)
|
||||
if len(matches) == 0 {
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("invalid module cache path: %v", subdir),
|
||||
}
|
||||
}
|
||||
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
|
||||
if err != nil {
|
||||
if r.env.Logf != nil {
|
||||
r.env.Logf("decoding module cache path %q: %v", subdir, err)
|
||||
}
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
|
||||
}
|
||||
}
|
||||
importPath = path.Join(modPath, filepath.ToSlash(matches[3]))
|
||||
}
|
||||
|
||||
modDir, modName := r.modInfo(dir)
|
||||
result := directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
dir: dir,
|
||||
rootType: root.Type,
|
||||
nonCanonicalImportPath: importPath,
|
||||
moduleDir: modDir,
|
||||
moduleName: modName,
|
||||
}
|
||||
if root.Type == gopathwalk.RootGOROOT {
|
||||
// stdlib packages are always in scope, despite the confusing go.mod
|
||||
return result
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// modCacheRegexp splits a path in a module cache into module, module version, and package.
|
||||
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
|
||||
|
||||
var (
|
||||
slashSlash = []byte("//")
|
||||
moduleStr = []byte("module")
|
||||
)
|
||||
|
||||
// modulePath returns the module path from the gomod file text.
|
||||
// If it cannot find a module path, it returns an empty string.
|
||||
// It is tolerant of unrelated problems in the go.mod file.
|
||||
//
|
||||
// Copied from cmd/go/internal/modfile.
|
||||
func modulePath(mod []byte) string {
|
||||
for len(mod) > 0 {
|
||||
line := mod
|
||||
mod = nil
|
||||
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||
line, mod = line[:i], line[i+1:]
|
||||
}
|
||||
if i := bytes.Index(line, slashSlash); i >= 0 {
|
||||
line = line[:i]
|
||||
}
|
||||
line = bytes.TrimSpace(line)
|
||||
if !bytes.HasPrefix(line, moduleStr) {
|
||||
continue
|
||||
}
|
||||
line = line[len(moduleStr):]
|
||||
n := len(line)
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == n || len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if line[0] == '"' || line[0] == '`' {
|
||||
p, err := strconv.Unquote(string(line))
|
||||
if err != nil {
|
||||
return "" // malformed quoted string or multiline module path
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
return string(line)
|
||||
}
|
||||
return "" // missing module path
|
||||
}
|
||||
|
||||
var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
|
||||
|
||||
// vendorEnabled indicates if vendoring is enabled.
|
||||
// Inspired by setDefaultBuildMod in modload/init.go
|
||||
func vendorEnabled(env *ProcessEnv) (*ModuleJSON, bool, error) {
|
||||
mainMod, go114, err := getMainModuleAnd114(env)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
matches := modFlagRegexp.FindStringSubmatch(env.GOFLAGS)
|
||||
var modFlag string
|
||||
if len(matches) != 0 {
|
||||
modFlag = matches[1]
|
||||
}
|
||||
if modFlag != "" {
|
||||
// Don't override an explicit '-mod=' argument.
|
||||
return mainMod, modFlag == "vendor", nil
|
||||
}
|
||||
if mainMod == nil || !go114 {
|
||||
return mainMod, false, nil
|
||||
}
|
||||
// Check 1.14's automatic vendor mode.
|
||||
if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
|
||||
if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
|
||||
// The Go version is at least 1.14, and a vendor directory exists.
|
||||
// Set -mod=vendor by default.
|
||||
return mainMod, true, nil
|
||||
}
|
||||
}
|
||||
return mainMod, false, nil
|
||||
}
|
||||
|
||||
// getMainModuleAnd114 gets the main module's information and whether the
|
||||
// go command in use is 1.14+. This is the information needed to figure out
|
||||
// if vendoring should be enabled.
|
||||
func getMainModuleAnd114(env *ProcessEnv) (*ModuleJSON, bool, error) {
|
||||
const format = `{{.Path}}
|
||||
{{.Dir}}
|
||||
{{.GoMod}}
|
||||
{{.GoVersion}}
|
||||
{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
|
||||
`
|
||||
stdout, err := env.invokeGo(context.TODO(), "list", "-m", "-f", format)
|
||||
if err != nil {
|
||||
return nil, false, nil
|
||||
}
|
||||
lines := strings.Split(stdout.String(), "\n")
|
||||
if len(lines) < 5 {
|
||||
return nil, false, fmt.Errorf("unexpected stdout: %q", stdout)
|
||||
}
|
||||
mod := &ModuleJSON{
|
||||
Path: lines[0],
|
||||
Dir: lines[1],
|
||||
GoMod: lines[2],
|
||||
GoVersion: lines[3],
|
||||
Main: true,
|
||||
}
|
||||
return mod, lines[4] == "go1.14", nil
|
||||
}
|
||||
232
vendor/golang.org/x/tools/internal/imports/mod_cache.go
generated
vendored
Normal file
232
vendor/golang.org/x/tools/internal/imports/mod_cache.go
generated
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
package imports
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
)
|
||||
|
||||
// To find packages to import, the resolver needs to know about all of the
|
||||
// the packages that could be imported. This includes packages that are
|
||||
// already in modules that are in (1) the current module, (2) replace targets,
|
||||
// and (3) packages in the module cache. Packages in (1) and (2) may change over
|
||||
// time, as the client may edit the current module and locally replaced modules.
|
||||
// The module cache (which includes all of the packages in (3)) can only
|
||||
// ever be added to.
|
||||
//
|
||||
// The resolver can thus save state about packages in the module cache
|
||||
// and guarantee that this will not change over time. To obtain information
|
||||
// about new modules added to the module cache, the module cache should be
|
||||
// rescanned.
|
||||
//
|
||||
// It is OK to serve information about modules that have been deleted,
|
||||
// as they do still exist.
|
||||
// TODO(suzmue): can we share information with the caller about
|
||||
// what module needs to be downloaded to import this package?
|
||||
|
||||
type directoryPackageStatus int
|
||||
|
||||
const (
|
||||
_ directoryPackageStatus = iota
|
||||
directoryScanned
|
||||
nameLoaded
|
||||
exportsLoaded
|
||||
)
|
||||
|
||||
type directoryPackageInfo struct {
|
||||
// status indicates the extent to which this struct has been filled in.
|
||||
status directoryPackageStatus
|
||||
// err is non-nil when there was an error trying to reach status.
|
||||
err error
|
||||
|
||||
// Set when status >= directoryScanned.
|
||||
|
||||
// dir is the absolute directory of this package.
|
||||
dir string
|
||||
rootType gopathwalk.RootType
|
||||
// nonCanonicalImportPath is the package's expected import path. It may
|
||||
// not actually be importable at that path.
|
||||
nonCanonicalImportPath string
|
||||
|
||||
// Module-related information.
|
||||
moduleDir string // The directory that is the module root of this dir.
|
||||
moduleName string // The module name that contains this dir.
|
||||
|
||||
// Set when status >= nameLoaded.
|
||||
|
||||
packageName string // the package name, as declared in the source.
|
||||
|
||||
// Set when status >= exportsLoaded.
|
||||
|
||||
exports []string
|
||||
}
|
||||
|
||||
// reachedStatus returns true when info has a status at least target and any error associated with
|
||||
// an attempt to reach target.
|
||||
func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (bool, error) {
|
||||
if info.err == nil {
|
||||
return info.status >= target, nil
|
||||
}
|
||||
if info.status == target {
|
||||
return true, info.err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// dirInfoCache is a concurrency safe map for storing information about
|
||||
// directories that may contain packages.
|
||||
//
|
||||
// The information in this cache is built incrementally. Entries are initialized in scan.
|
||||
// No new keys should be added in any other functions, as all directories containing
|
||||
// packages are identified in scan.
|
||||
//
|
||||
// Other functions, including loadExports and findPackage, may update entries in this cache
|
||||
// as they discover new things about the directory.
|
||||
//
|
||||
// The information in the cache is not expected to change for the cache's
|
||||
// lifetime, so there is no protection against competing writes. Users should
|
||||
// take care not to hold the cache across changes to the underlying files.
|
||||
//
|
||||
// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc)
|
||||
type dirInfoCache struct {
|
||||
mu sync.Mutex
|
||||
// dirs stores information about packages in directories, keyed by absolute path.
|
||||
dirs map[string]*directoryPackageInfo
|
||||
listeners map[*int]cacheListener
|
||||
}
|
||||
|
||||
type cacheListener func(directoryPackageInfo)
|
||||
|
||||
// ScanAndListen calls listener on all the items in the cache, and on anything
|
||||
// newly added. The returned stop function waits for all in-flight callbacks to
|
||||
// finish and blocks new ones.
|
||||
func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// Flushing out all the callbacks is tricky without knowing how many there
|
||||
// are going to be. Setting an arbitrary limit makes it much easier.
|
||||
const maxInFlight = 10
|
||||
sema := make(chan struct{}, maxInFlight)
|
||||
for i := 0; i < maxInFlight; i++ {
|
||||
sema <- struct{}{}
|
||||
}
|
||||
|
||||
cookie := new(int) // A unique ID we can use for the listener.
|
||||
|
||||
// We can't hold mu while calling the listener.
|
||||
d.mu.Lock()
|
||||
var keys []string
|
||||
for key := range d.dirs {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
d.listeners[cookie] = func(info directoryPackageInfo) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-sema:
|
||||
}
|
||||
listener(info)
|
||||
sema <- struct{}{}
|
||||
}
|
||||
d.mu.Unlock()
|
||||
|
||||
stop := func() {
|
||||
cancel()
|
||||
d.mu.Lock()
|
||||
delete(d.listeners, cookie)
|
||||
d.mu.Unlock()
|
||||
for i := 0; i < maxInFlight; i++ {
|
||||
<-sema
|
||||
}
|
||||
}
|
||||
|
||||
// Process the pre-existing keys.
|
||||
for _, k := range keys {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return stop
|
||||
default:
|
||||
}
|
||||
if v, ok := d.Load(k); ok {
|
||||
listener(v)
|
||||
}
|
||||
}
|
||||
|
||||
return stop
|
||||
}
|
||||
|
||||
// Store stores the package info for dir.
|
||||
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
d.mu.Lock()
|
||||
_, old := d.dirs[dir]
|
||||
d.dirs[dir] = &info
|
||||
var listeners []cacheListener
|
||||
for _, l := range d.listeners {
|
||||
listeners = append(listeners, l)
|
||||
}
|
||||
d.mu.Unlock()
|
||||
|
||||
if !old {
|
||||
for _, l := range listeners {
|
||||
l(info)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
|
||||
func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
info, ok := d.dirs[dir]
|
||||
if !ok {
|
||||
return directoryPackageInfo{}, false
|
||||
}
|
||||
return *info, true
|
||||
}
|
||||
|
||||
// Keys returns the keys currently present in d.
|
||||
func (d *dirInfoCache) Keys() (keys []string) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
for key := range d.dirs {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
|
||||
if loaded, err := info.reachedStatus(nameLoaded); loaded {
|
||||
return info.packageName, err
|
||||
}
|
||||
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
||||
return "", fmt.Errorf("cannot read package name, scan error: %v", err)
|
||||
}
|
||||
info.packageName, info.err = packageDirToName(info.dir)
|
||||
info.status = nameLoaded
|
||||
d.Store(info.dir, info)
|
||||
return info.packageName, info.err
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
if reached, _ := info.reachedStatus(exportsLoaded); reached {
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
if reached, err := info.reachedStatus(nameLoaded); reached && err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false)
|
||||
if info.err == context.Canceled || info.err == context.DeadlineExceeded {
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
// The cache structure wants things to proceed linearly. We can skip a
|
||||
// step here, but only if we succeed.
|
||||
if info.status == nameLoaded || info.err == nil {
|
||||
info.status = exportsLoaded
|
||||
} else {
|
||||
info.status = nameLoaded
|
||||
}
|
||||
d.Store(info.dir, info)
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
// sortImports sorts runs of consecutive import lines in import blocks in f.
|
||||
// It also removes duplicate imports when it is possible to do so without data loss.
|
||||
func sortImports(fset *token.FileSet, f *ast.File) {
|
||||
func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) {
|
||||
for i, d := range f.Decls {
|
||||
d, ok := d.(*ast.GenDecl)
|
||||
if !ok || d.Tok != token.IMPORT {
|
||||
@@ -40,11 +40,11 @@ func sortImports(fset *token.FileSet, f *ast.File) {
|
||||
for j, s := range d.Specs {
|
||||
if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
|
||||
// j begins a new run. End this one.
|
||||
specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
|
||||
specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:j])...)
|
||||
i = j
|
||||
}
|
||||
}
|
||||
specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
|
||||
specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:])...)
|
||||
d.Specs = specs
|
||||
|
||||
// Deduping can leave a blank line before the rparen; clean that up.
|
||||
@@ -58,6 +58,53 @@ func sortImports(fset *token.FileSet, f *ast.File) {
|
||||
}
|
||||
}
|
||||
|
||||
// mergeImports merges all the import declarations into the first one.
|
||||
// Taken from golang.org/x/tools/ast/astutil.
|
||||
func mergeImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) {
|
||||
if len(f.Decls) <= 1 {
|
||||
return
|
||||
}
|
||||
|
||||
// Merge all the import declarations into the first one.
|
||||
var first *ast.GenDecl
|
||||
for i := 0; i < len(f.Decls); i++ {
|
||||
decl := f.Decls[i]
|
||||
gen, ok := decl.(*ast.GenDecl)
|
||||
if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
|
||||
continue
|
||||
}
|
||||
if first == nil {
|
||||
first = gen
|
||||
continue // Don't touch the first one.
|
||||
}
|
||||
// We now know there is more than one package in this import
|
||||
// declaration. Ensure that it ends up parenthesized.
|
||||
first.Lparen = first.Pos()
|
||||
// Move the imports of the other import declaration to the first one.
|
||||
for _, spec := range gen.Specs {
|
||||
spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
|
||||
first.Specs = append(first.Specs, spec)
|
||||
}
|
||||
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
|
||||
// declImports reports whether gen contains an import of path.
|
||||
// Taken from golang.org/x/tools/ast/astutil.
|
||||
func declImports(gen *ast.GenDecl, path string) bool {
|
||||
if gen.Tok != token.IMPORT {
|
||||
return false
|
||||
}
|
||||
for _, spec := range gen.Specs {
|
||||
impspec := spec.(*ast.ImportSpec)
|
||||
if importPath(impspec) == path {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func importPath(s ast.Spec) string {
|
||||
t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value)
|
||||
if err == nil {
|
||||
@@ -95,7 +142,7 @@ type posSpan struct {
|
||||
End token.Pos
|
||||
}
|
||||
|
||||
func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
|
||||
func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
|
||||
// Can't short-circuit here even if specs are already sorted,
|
||||
// since they might yet need deduplication.
|
||||
// A lone import, however, may be safely ignored.
|
||||
@@ -144,7 +191,7 @@ func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
|
||||
// Reassign the import paths to have the same position sequence.
|
||||
// Reassign each comment to abut the end of its spec.
|
||||
// Sort the comments by new position.
|
||||
sort.Sort(byImportSpec(specs))
|
||||
sort.Sort(byImportSpec{env, specs})
|
||||
|
||||
// Dedup. Thanks to our sorting, we can just consider
|
||||
// adjacent pairs of imports.
|
||||
@@ -197,16 +244,19 @@ func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
|
||||
return specs
|
||||
}
|
||||
|
||||
type byImportSpec []ast.Spec // slice of *ast.ImportSpec
|
||||
type byImportSpec struct {
|
||||
env *ProcessEnv
|
||||
specs []ast.Spec // slice of *ast.ImportSpec
|
||||
}
|
||||
|
||||
func (x byImportSpec) Len() int { return len(x) }
|
||||
func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x byImportSpec) Len() int { return len(x.specs) }
|
||||
func (x byImportSpec) Swap(i, j int) { x.specs[i], x.specs[j] = x.specs[j], x.specs[i] }
|
||||
func (x byImportSpec) Less(i, j int) bool {
|
||||
ipath := importPath(x[i])
|
||||
jpath := importPath(x[j])
|
||||
ipath := importPath(x.specs[i])
|
||||
jpath := importPath(x.specs[j])
|
||||
|
||||
igroup := importGroup(ipath)
|
||||
jgroup := importGroup(jpath)
|
||||
igroup := importGroup(x.env, ipath)
|
||||
jgroup := importGroup(x.env, jpath)
|
||||
if igroup != jgroup {
|
||||
return igroup < jgroup
|
||||
}
|
||||
@@ -214,13 +264,13 @@ func (x byImportSpec) Less(i, j int) bool {
|
||||
if ipath != jpath {
|
||||
return ipath < jpath
|
||||
}
|
||||
iname := importName(x[i])
|
||||
jname := importName(x[j])
|
||||
iname := importName(x.specs[i])
|
||||
jname := importName(x.specs[j])
|
||||
|
||||
if iname != jname {
|
||||
return iname < jname
|
||||
}
|
||||
return importComment(x[i]) < importComment(x[j])
|
||||
return importComment(x.specs[i]) < importComment(x.specs[j])
|
||||
}
|
||||
|
||||
type byCommentPos []*ast.CommentGroup
|
||||
10377
vendor/golang.org/x/tools/internal/imports/zstdlib.go
generated
vendored
Normal file
10377
vendor/golang.org/x/tools/internal/imports/zstdlib.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
27
vendor/golang.org/x/tools/internal/packagesinternal/packages.go
generated
vendored
Normal file
27
vendor/golang.org/x/tools/internal/packagesinternal/packages.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
// Package packagesinternal exposes internal-only fields from go/packages.
|
||||
package packagesinternal
|
||||
|
||||
import "time"
|
||||
|
||||
// Fields must match go list;
|
||||
type Module struct {
|
||||
Path string // module path
|
||||
Version string // module version
|
||||
Versions []string // available module versions (with -versions)
|
||||
Replace *Module // replaced by this module
|
||||
Time *time.Time // time version was created
|
||||
Update *Module // available update, if any (with -u)
|
||||
Main bool // is this the main module?
|
||||
Indirect bool // is this module only an indirect dependency of main module?
|
||||
Dir string // directory holding files for this module, if any
|
||||
GoMod string // path to go.mod file used when loading this module, if any
|
||||
GoVersion string // go version used in module
|
||||
Error *ModuleError // error loading module
|
||||
}
|
||||
type ModuleError struct {
|
||||
Err string // the error itself
|
||||
}
|
||||
|
||||
var GetForTest = func(p interface{}) string { return "" }
|
||||
|
||||
var GetModule = func(p interface{}) *Module { return nil }
|
||||
27
vendor/golang.org/x/xerrors/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/xerrors/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2019 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
22
vendor/golang.org/x/xerrors/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/xerrors/PATENTS
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
||||
2
vendor/golang.org/x/xerrors/README
generated
vendored
Normal file
2
vendor/golang.org/x/xerrors/README
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
This repository holds the transition packages for the new Go 1.13 error values.
|
||||
See golang.org/design/29934-error-values.
|
||||
193
vendor/golang.org/x/xerrors/adaptor.go
generated
vendored
Normal file
193
vendor/golang.org/x/xerrors/adaptor.go
generated
vendored
Normal file
@@ -0,0 +1,193 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xerrors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// FormatError calls the FormatError method of f with an errors.Printer
|
||||
// configured according to s and verb, and writes the result to s.
|
||||
func FormatError(f Formatter, s fmt.State, verb rune) {
|
||||
// Assuming this function is only called from the Format method, and given
|
||||
// that FormatError takes precedence over Format, it cannot be called from
|
||||
// any package that supports errors.Formatter. It is therefore safe to
|
||||
// disregard that State may be a specific printer implementation and use one
|
||||
// of our choice instead.
|
||||
|
||||
// limitations: does not support printing error as Go struct.
|
||||
|
||||
var (
|
||||
sep = " " // separator before next error
|
||||
p = &state{State: s}
|
||||
direct = true
|
||||
)
|
||||
|
||||
var err error = f
|
||||
|
||||
switch verb {
|
||||
// Note that this switch must match the preference order
|
||||
// for ordinary string printing (%#v before %+v, and so on).
|
||||
|
||||
case 'v':
|
||||
if s.Flag('#') {
|
||||
if stringer, ok := err.(fmt.GoStringer); ok {
|
||||
io.WriteString(&p.buf, stringer.GoString())
|
||||
goto exit
|
||||
}
|
||||
// proceed as if it were %v
|
||||
} else if s.Flag('+') {
|
||||
p.printDetail = true
|
||||
sep = "\n - "
|
||||
}
|
||||
case 's':
|
||||
case 'q', 'x', 'X':
|
||||
// Use an intermediate buffer in the rare cases that precision,
|
||||
// truncation, or one of the alternative verbs (q, x, and X) are
|
||||
// specified.
|
||||
direct = false
|
||||
|
||||
default:
|
||||
p.buf.WriteString("%!")
|
||||
p.buf.WriteRune(verb)
|
||||
p.buf.WriteByte('(')
|
||||
switch {
|
||||
case err != nil:
|
||||
p.buf.WriteString(reflect.TypeOf(f).String())
|
||||
default:
|
||||
p.buf.WriteString("<nil>")
|
||||
}
|
||||
p.buf.WriteByte(')')
|
||||
io.Copy(s, &p.buf)
|
||||
return
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
switch v := err.(type) {
|
||||
case Formatter:
|
||||
err = v.FormatError((*printer)(p))
|
||||
case fmt.Formatter:
|
||||
v.Format(p, 'v')
|
||||
break loop
|
||||
default:
|
||||
io.WriteString(&p.buf, v.Error())
|
||||
break loop
|
||||
}
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if p.needColon || !p.printDetail {
|
||||
p.buf.WriteByte(':')
|
||||
p.needColon = false
|
||||
}
|
||||
p.buf.WriteString(sep)
|
||||
p.inDetail = false
|
||||
p.needNewline = false
|
||||
}
|
||||
|
||||
exit:
|
||||
width, okW := s.Width()
|
||||
prec, okP := s.Precision()
|
||||
|
||||
if !direct || (okW && width > 0) || okP {
|
||||
// Construct format string from State s.
|
||||
format := []byte{'%'}
|
||||
if s.Flag('-') {
|
||||
format = append(format, '-')
|
||||
}
|
||||
if s.Flag('+') {
|
||||
format = append(format, '+')
|
||||
}
|
||||
if s.Flag(' ') {
|
||||
format = append(format, ' ')
|
||||
}
|
||||
if okW {
|
||||
format = strconv.AppendInt(format, int64(width), 10)
|
||||
}
|
||||
if okP {
|
||||
format = append(format, '.')
|
||||
format = strconv.AppendInt(format, int64(prec), 10)
|
||||
}
|
||||
format = append(format, string(verb)...)
|
||||
fmt.Fprintf(s, string(format), p.buf.String())
|
||||
} else {
|
||||
io.Copy(s, &p.buf)
|
||||
}
|
||||
}
|
||||
|
||||
var detailSep = []byte("\n ")
|
||||
|
||||
// state tracks error printing state. It implements fmt.State.
|
||||
type state struct {
|
||||
fmt.State
|
||||
buf bytes.Buffer
|
||||
|
||||
printDetail bool
|
||||
inDetail bool
|
||||
needColon bool
|
||||
needNewline bool
|
||||
}
|
||||
|
||||
func (s *state) Write(b []byte) (n int, err error) {
|
||||
if s.printDetail {
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if s.inDetail && s.needColon {
|
||||
s.needNewline = true
|
||||
if b[0] == '\n' {
|
||||
b = b[1:]
|
||||
}
|
||||
}
|
||||
k := 0
|
||||
for i, c := range b {
|
||||
if s.needNewline {
|
||||
if s.inDetail && s.needColon {
|
||||
s.buf.WriteByte(':')
|
||||
s.needColon = false
|
||||
}
|
||||
s.buf.Write(detailSep)
|
||||
s.needNewline = false
|
||||
}
|
||||
if c == '\n' {
|
||||
s.buf.Write(b[k:i])
|
||||
k = i + 1
|
||||
s.needNewline = true
|
||||
}
|
||||
}
|
||||
s.buf.Write(b[k:])
|
||||
if !s.inDetail {
|
||||
s.needColon = true
|
||||
}
|
||||
} else if !s.inDetail {
|
||||
s.buf.Write(b)
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// printer wraps a state to implement an xerrors.Printer.
|
||||
type printer state
|
||||
|
||||
func (s *printer) Print(args ...interface{}) {
|
||||
if !s.inDetail || s.printDetail {
|
||||
fmt.Fprint((*state)(s), args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *printer) Printf(format string, args ...interface{}) {
|
||||
if !s.inDetail || s.printDetail {
|
||||
fmt.Fprintf((*state)(s), format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *printer) Detail() bool {
|
||||
s.inDetail = true
|
||||
return s.printDetail
|
||||
}
|
||||
1
vendor/golang.org/x/xerrors/codereview.cfg
generated
vendored
Normal file
1
vendor/golang.org/x/xerrors/codereview.cfg
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
issuerepo: golang/go
|
||||
22
vendor/golang.org/x/xerrors/doc.go
generated
vendored
Normal file
22
vendor/golang.org/x/xerrors/doc.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package xerrors implements functions to manipulate errors.
|
||||
//
|
||||
// This package is based on the Go 2 proposal for error values:
|
||||
// https://golang.org/design/29934-error-values
|
||||
//
|
||||
// These functions were incorporated into the standard library's errors package
|
||||
// in Go 1.13:
|
||||
// - Is
|
||||
// - As
|
||||
// - Unwrap
|
||||
//
|
||||
// Also, Errorf's %w verb was incorporated into fmt.Errorf.
|
||||
//
|
||||
// Use this package to get equivalent behavior in all supported Go versions.
|
||||
//
|
||||
// No other features of this package were included in Go 1.13, and at present
|
||||
// there are no plans to include any of them.
|
||||
package xerrors // import "golang.org/x/xerrors"
|
||||
33
vendor/golang.org/x/xerrors/errors.go
generated
vendored
Normal file
33
vendor/golang.org/x/xerrors/errors.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xerrors
|
||||
|
||||
import "fmt"
|
||||
|
||||
// errorString is a trivial implementation of error.
|
||||
type errorString struct {
|
||||
s string
|
||||
frame Frame
|
||||
}
|
||||
|
||||
// New returns an error that formats as the given text.
|
||||
//
|
||||
// The returned error contains a Frame set to the caller's location and
|
||||
// implements Formatter to show this information when printed with details.
|
||||
func New(text string) error {
|
||||
return &errorString{text, Caller(1)}
|
||||
}
|
||||
|
||||
func (e *errorString) Error() string {
|
||||
return e.s
|
||||
}
|
||||
|
||||
func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) }
|
||||
|
||||
func (e *errorString) FormatError(p Printer) (next error) {
|
||||
p.Print(e.s)
|
||||
e.frame.Format(p)
|
||||
return nil
|
||||
}
|
||||
187
vendor/golang.org/x/xerrors/fmt.go
generated
vendored
Normal file
187
vendor/golang.org/x/xerrors/fmt.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xerrors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/xerrors/internal"
|
||||
)
|
||||
|
||||
const percentBangString = "%!"
|
||||
|
||||
// Errorf formats according to a format specifier and returns the string as a
|
||||
// value that satisfies error.
|
||||
//
|
||||
// The returned error includes the file and line number of the caller when
|
||||
// formatted with additional detail enabled. If the last argument is an error
|
||||
// the returned error's Format method will return it if the format string ends
|
||||
// with ": %s", ": %v", or ": %w". If the last argument is an error and the
|
||||
// format string ends with ": %w", the returned error implements an Unwrap
|
||||
// method returning it.
|
||||
//
|
||||
// If the format specifier includes a %w verb with an error operand in a
|
||||
// position other than at the end, the returned error will still implement an
|
||||
// Unwrap method returning the operand, but the error's Format method will not
|
||||
// return the wrapped error.
|
||||
//
|
||||
// It is invalid to include more than one %w verb or to supply it with an
|
||||
// operand that does not implement the error interface. The %w verb is otherwise
|
||||
// a synonym for %v.
|
||||
func Errorf(format string, a ...interface{}) error {
|
||||
format = formatPlusW(format)
|
||||
// Support a ": %[wsv]" suffix, which works well with xerrors.Formatter.
|
||||
wrap := strings.HasSuffix(format, ": %w")
|
||||
idx, format2, ok := parsePercentW(format)
|
||||
percentWElsewhere := !wrap && idx >= 0
|
||||
if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) {
|
||||
err := errorAt(a, len(a)-1)
|
||||
if err == nil {
|
||||
return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)}
|
||||
}
|
||||
// TODO: this is not entirely correct. The error value could be
|
||||
// printed elsewhere in format if it mixes numbered with unnumbered
|
||||
// substitutions. With relatively small changes to doPrintf we can
|
||||
// have it optionally ignore extra arguments and pass the argument
|
||||
// list in its entirety.
|
||||
msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...)
|
||||
frame := Frame{}
|
||||
if internal.EnableTrace {
|
||||
frame = Caller(1)
|
||||
}
|
||||
if wrap {
|
||||
return &wrapError{msg, err, frame}
|
||||
}
|
||||
return &noWrapError{msg, err, frame}
|
||||
}
|
||||
// Support %w anywhere.
|
||||
// TODO: don't repeat the wrapped error's message when %w occurs in the middle.
|
||||
msg := fmt.Sprintf(format2, a...)
|
||||
if idx < 0 {
|
||||
return &noWrapError{msg, nil, Caller(1)}
|
||||
}
|
||||
err := errorAt(a, idx)
|
||||
if !ok || err == nil {
|
||||
// Too many %ws or argument of %w is not an error. Approximate the Go
|
||||
// 1.13 fmt.Errorf message.
|
||||
return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)}
|
||||
}
|
||||
frame := Frame{}
|
||||
if internal.EnableTrace {
|
||||
frame = Caller(1)
|
||||
}
|
||||
return &wrapError{msg, err, frame}
|
||||
}
|
||||
|
||||
func errorAt(args []interface{}, i int) error {
|
||||
if i < 0 || i >= len(args) {
|
||||
return nil
|
||||
}
|
||||
err, ok := args[i].(error)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// formatPlusW is used to avoid the vet check that will barf at %w.
|
||||
func formatPlusW(s string) string {
|
||||
return s
|
||||
}
|
||||
|
||||
// Return the index of the only %w in format, or -1 if none.
|
||||
// Also return a rewritten format string with %w replaced by %v, and
|
||||
// false if there is more than one %w.
|
||||
// TODO: handle "%[N]w".
|
||||
func parsePercentW(format string) (idx int, newFormat string, ok bool) {
|
||||
// Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go.
|
||||
idx = -1
|
||||
ok = true
|
||||
n := 0
|
||||
sz := 0
|
||||
var isW bool
|
||||
for i := 0; i < len(format); i += sz {
|
||||
if format[i] != '%' {
|
||||
sz = 1
|
||||
continue
|
||||
}
|
||||
// "%%" is not a format directive.
|
||||
if i+1 < len(format) && format[i+1] == '%' {
|
||||
sz = 2
|
||||
continue
|
||||
}
|
||||
sz, isW = parsePrintfVerb(format[i:])
|
||||
if isW {
|
||||
if idx >= 0 {
|
||||
ok = false
|
||||
} else {
|
||||
idx = n
|
||||
}
|
||||
// "Replace" the last character, the 'w', with a 'v'.
|
||||
p := i + sz - 1
|
||||
format = format[:p] + "v" + format[p+1:]
|
||||
}
|
||||
n++
|
||||
}
|
||||
return idx, format, ok
|
||||
}
|
||||
|
||||
// Parse the printf verb starting with a % at s[0].
|
||||
// Return how many bytes it occupies and whether the verb is 'w'.
|
||||
func parsePrintfVerb(s string) (int, bool) {
|
||||
// Assume only that the directive is a sequence of non-letters followed by a single letter.
|
||||
sz := 0
|
||||
var r rune
|
||||
for i := 1; i < len(s); i += sz {
|
||||
r, sz = utf8.DecodeRuneInString(s[i:])
|
||||
if unicode.IsLetter(r) {
|
||||
return i + sz, r == 'w'
|
||||
}
|
||||
}
|
||||
return len(s), false
|
||||
}
|
||||
|
||||
type noWrapError struct {
|
||||
msg string
|
||||
err error
|
||||
frame Frame
|
||||
}
|
||||
|
||||
func (e *noWrapError) Error() string {
|
||||
return fmt.Sprint(e)
|
||||
}
|
||||
|
||||
func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) }
|
||||
|
||||
func (e *noWrapError) FormatError(p Printer) (next error) {
|
||||
p.Print(e.msg)
|
||||
e.frame.Format(p)
|
||||
return e.err
|
||||
}
|
||||
|
||||
type wrapError struct {
|
||||
msg string
|
||||
err error
|
||||
frame Frame
|
||||
}
|
||||
|
||||
func (e *wrapError) Error() string {
|
||||
return fmt.Sprint(e)
|
||||
}
|
||||
|
||||
func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) }
|
||||
|
||||
func (e *wrapError) FormatError(p Printer) (next error) {
|
||||
p.Print(e.msg)
|
||||
e.frame.Format(p)
|
||||
return e.err
|
||||
}
|
||||
|
||||
func (e *wrapError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
34
vendor/golang.org/x/xerrors/format.go
generated
vendored
Normal file
34
vendor/golang.org/x/xerrors/format.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xerrors
|
||||
|
||||
// A Formatter formats error messages.
|
||||
type Formatter interface {
|
||||
error
|
||||
|
||||
// FormatError prints the receiver's first error and returns the next error in
|
||||
// the error chain, if any.
|
||||
FormatError(p Printer) (next error)
|
||||
}
|
||||
|
||||
// A Printer formats error messages.
|
||||
//
|
||||
// The most common implementation of Printer is the one provided by package fmt
|
||||
// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message
|
||||
// typically provide their own implementations.
|
||||
type Printer interface {
|
||||
// Print appends args to the message output.
|
||||
Print(args ...interface{})
|
||||
|
||||
// Printf writes a formatted string.
|
||||
Printf(format string, args ...interface{})
|
||||
|
||||
// Detail reports whether error detail is requested.
|
||||
// After the first call to Detail, all text written to the Printer
|
||||
// is formatted as additional detail, or ignored when
|
||||
// detail has not been requested.
|
||||
// If Detail returns false, the caller can avoid printing the detail at all.
|
||||
Detail() bool
|
||||
}
|
||||
56
vendor/golang.org/x/xerrors/frame.go
generated
vendored
Normal file
56
vendor/golang.org/x/xerrors/frame.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xerrors
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// A Frame contains part of a call stack.
|
||||
type Frame struct {
|
||||
// Make room for three PCs: the one we were asked for, what it called,
|
||||
// and possibly a PC for skipPleaseUseCallersFrames. See:
|
||||
// https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169
|
||||
frames [3]uintptr
|
||||
}
|
||||
|
||||
// Caller returns a Frame that describes a frame on the caller's stack.
|
||||
// The argument skip is the number of frames to skip over.
|
||||
// Caller(0) returns the frame for the caller of Caller.
|
||||
func Caller(skip int) Frame {
|
||||
var s Frame
|
||||
runtime.Callers(skip+1, s.frames[:])
|
||||
return s
|
||||
}
|
||||
|
||||
// location reports the file, line, and function of a frame.
|
||||
//
|
||||
// The returned function may be "" even if file and line are not.
|
||||
func (f Frame) location() (function, file string, line int) {
|
||||
frames := runtime.CallersFrames(f.frames[:])
|
||||
if _, ok := frames.Next(); !ok {
|
||||
return "", "", 0
|
||||
}
|
||||
fr, ok := frames.Next()
|
||||
if !ok {
|
||||
return "", "", 0
|
||||
}
|
||||
return fr.Function, fr.File, fr.Line
|
||||
}
|
||||
|
||||
// Format prints the stack as error detail.
|
||||
// It should be called from an error's Format implementation
|
||||
// after printing any other error detail.
|
||||
func (f Frame) Format(p Printer) {
|
||||
if p.Detail() {
|
||||
function, file, line := f.location()
|
||||
if function != "" {
|
||||
p.Printf("%s\n ", function)
|
||||
}
|
||||
if file != "" {
|
||||
p.Printf("%s:%d\n", file, line)
|
||||
}
|
||||
}
|
||||
}
|
||||
3
vendor/golang.org/x/xerrors/go.mod
generated
vendored
Normal file
3
vendor/golang.org/x/xerrors/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module golang.org/x/xerrors
|
||||
|
||||
go 1.11
|
||||
8
vendor/golang.org/x/xerrors/internal/internal.go
generated
vendored
Normal file
8
vendor/golang.org/x/xerrors/internal/internal.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
// EnableTrace indicates whether stack information should be recorded in errors.
|
||||
var EnableTrace = true
|
||||
106
vendor/golang.org/x/xerrors/wrap.go
generated
vendored
Normal file
106
vendor/golang.org/x/xerrors/wrap.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xerrors
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// A Wrapper provides context around another error.
|
||||
type Wrapper interface {
|
||||
// Unwrap returns the next error in the error chain.
|
||||
// If there is no next error, Unwrap returns nil.
|
||||
Unwrap() error
|
||||
}
|
||||
|
||||
// Opaque returns an error with the same error formatting as err
|
||||
// but that does not match err and cannot be unwrapped.
|
||||
func Opaque(err error) error {
|
||||
return noWrapper{err}
|
||||
}
|
||||
|
||||
type noWrapper struct {
|
||||
error
|
||||
}
|
||||
|
||||
func (e noWrapper) FormatError(p Printer) (next error) {
|
||||
if f, ok := e.error.(Formatter); ok {
|
||||
return f.FormatError(p)
|
||||
}
|
||||
p.Print(e.error)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unwrap returns the result of calling the Unwrap method on err, if err implements
|
||||
// Unwrap. Otherwise, Unwrap returns nil.
|
||||
func Unwrap(err error) error {
|
||||
u, ok := err.(Wrapper)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return u.Unwrap()
|
||||
}
|
||||
|
||||
// Is reports whether any error in err's chain matches target.
|
||||
//
|
||||
// An error is considered to match a target if it is equal to that target or if
|
||||
// it implements a method Is(error) bool such that Is(target) returns true.
|
||||
func Is(err, target error) bool {
|
||||
if target == nil {
|
||||
return err == target
|
||||
}
|
||||
|
||||
isComparable := reflect.TypeOf(target).Comparable()
|
||||
for {
|
||||
if isComparable && err == target {
|
||||
return true
|
||||
}
|
||||
if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
|
||||
return true
|
||||
}
|
||||
// TODO: consider supporing target.Is(err). This would allow
|
||||
// user-definable predicates, but also may allow for coping with sloppy
|
||||
// APIs, thereby making it easier to get away with them.
|
||||
if err = Unwrap(err); err == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// As finds the first error in err's chain that matches the type to which target
|
||||
// points, and if so, sets the target to its value and returns true. An error
|
||||
// matches a type if it is assignable to the target type, or if it has a method
|
||||
// As(interface{}) bool such that As(target) returns true. As will panic if target
|
||||
// is not a non-nil pointer to a type which implements error or is of interface type.
|
||||
//
|
||||
// The As method should set the target to its value and return true if err
|
||||
// matches the type to which target points.
|
||||
func As(err error, target interface{}) bool {
|
||||
if target == nil {
|
||||
panic("errors: target cannot be nil")
|
||||
}
|
||||
val := reflect.ValueOf(target)
|
||||
typ := val.Type()
|
||||
if typ.Kind() != reflect.Ptr || val.IsNil() {
|
||||
panic("errors: target must be a non-nil pointer")
|
||||
}
|
||||
if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) {
|
||||
panic("errors: *target must be interface or implement error")
|
||||
}
|
||||
targetType := typ.Elem()
|
||||
for err != nil {
|
||||
if reflect.TypeOf(err).AssignableTo(targetType) {
|
||||
val.Elem().Set(reflect.ValueOf(err))
|
||||
return true
|
||||
}
|
||||
if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) {
|
||||
return true
|
||||
}
|
||||
err = Unwrap(err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var errorType = reflect.TypeOf((*error)(nil)).Elem()
|
||||
Reference in New Issue
Block a user