no message
This commit is contained in:
143
Godeps/Godeps.json
generated
143
Godeps/Godeps.json
generated
@ -3,148 +3,13 @@
|
||||
"GoVersion": "go1.10",
|
||||
"GodepVersion": "v80",
|
||||
"Packages": [
|
||||
"./..."
|
||||
"github.com/pmylund/go-cache"
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/Yawning/chacha20",
|
||||
"Rev": "e3b1f968fc6397b51d963fee8ec8711a47bc0ce8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/alecthomas/template",
|
||||
"Rev": "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/alecthomas/template/parse",
|
||||
"Rev": "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/alecthomas/units",
|
||||
"Rev": "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/snappy",
|
||||
"Rev": "553a641470496b2327abcac10b36396bd98e45c9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "3520598351bb3500a49ae9563f5539666ae0a27c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Comment": "v1.0.4-1-g40b5202",
|
||||
"Rev": "40b520211179dbf7eaafaa7fe1ffaa1b7d929ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/errors",
|
||||
"Comment": "v0.8.0-6-g602255c",
|
||||
"Rev": "602255cdb6deaf1523ea53ac30eae5554ba7bee9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/templexxx/cpufeat",
|
||||
"Rev": "3794dfbfb04749f896b521032f69383f24c3687e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/templexxx/reedsolomon",
|
||||
"Comment": "0.1.1-4-g7092926",
|
||||
"Rev": "7092926d7d05c415fabb892b1464a03f8228ab80"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/templexxx/xor",
|
||||
"Comment": "0.1.2",
|
||||
"Rev": "0af8e873c554da75f37f2049cdffda804533d44c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tjfoc/gmsm/sm4",
|
||||
"Comment": "v1.0.1-3-g9d99fac",
|
||||
"Rev": "9d99face20b0dd300b7db50b3f69758de41c096a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/xtaci/kcp-go",
|
||||
"Comment": "v3.19-6-g21da33a",
|
||||
"Rev": "21da33a6696d67c1bffb3c954366499d613097a6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/xtaci/smux",
|
||||
"Comment": "v1.0.6",
|
||||
"Rev": "ebec7ef2574b42a7088cd7751176483e0a27d458"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||
"Rev": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/cast5",
|
||||
"Rev": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/curve25519",
|
||||
"Rev": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ed25519",
|
||||
"Rev": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ed25519/internal/edwards25519",
|
||||
"Rev": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pbkdf2",
|
||||
"Rev": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/salsa20",
|
||||
"Rev": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/salsa20/salsa",
|
||||
"Rev": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh",
|
||||
"Rev": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/tea",
|
||||
"Rev": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/twofish",
|
||||
"Rev": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/xtea",
|
||||
"Rev": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/bpf",
|
||||
"Rev": "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/internal/iana",
|
||||
"Rev": "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/internal/socket",
|
||||
"Rev": "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/ipv4",
|
||||
"Rev": "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/ipv6",
|
||||
"Rev": "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/time/rate",
|
||||
"Rev": "6dc17368e09b0e8634d71cac8168d853e869a0c7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/alecthomas/kingpin.v2",
|
||||
"Comment": "v2.2.5",
|
||||
"Rev": "1087e65c9441605df944fb12c33f0fe7072d18ca"
|
||||
"ImportPath": "github.com/pmylund/go-cache",
|
||||
"Comment": "v2.1.0-1-g9f6ff22",
|
||||
"Rev": "9f6ff22cfff829561052f5886ec80a2ac148b4eb"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
122
vendor/github.com/Yawning/chacha20/LICENSE
generated
vendored
122
vendor/github.com/Yawning/chacha20/LICENSE
generated
vendored
@ -1,122 +0,0 @@
|
||||
Creative Commons Legal Code
|
||||
|
||||
CC0 1.0 Universal
|
||||
|
||||
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
|
||||
LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
|
||||
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
|
||||
INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
|
||||
REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
|
||||
PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
|
||||
THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
|
||||
HEREUNDER.
|
||||
|
||||
Statement of Purpose
|
||||
|
||||
The laws of most jurisdictions throughout the world automatically confer
|
||||
exclusive Copyright and Related Rights (defined below) upon the creator
|
||||
and subsequent owner(s) (each and all, an "owner") of an original work of
|
||||
authorship and/or a database (each, a "Work").
|
||||
|
||||
Certain owners wish to permanently relinquish those rights to a Work for
|
||||
the purpose of contributing to a commons of creative, cultural and
|
||||
scientific works ("Commons") that the public can reliably and without fear
|
||||
of later claims of infringement build upon, modify, incorporate in other
|
||||
works, reuse and redistribute as freely as possible in any form whatsoever
|
||||
and for any purposes, including without limitation commercial purposes.
|
||||
These owners may contribute to the Commons to promote the ideal of a free
|
||||
culture and the further production of creative, cultural and scientific
|
||||
works, or to gain reputation or greater distribution for their Work in
|
||||
part through the use and efforts of others.
|
||||
|
||||
For these and/or other purposes and motivations, and without any
|
||||
expectation of additional consideration or compensation, the person
|
||||
associating CC0 with a Work (the "Affirmer"), to the extent that he or she
|
||||
is an owner of Copyright and Related Rights in the Work, voluntarily
|
||||
elects to apply CC0 to the Work and publicly distribute the Work under its
|
||||
terms, with knowledge of his or her Copyright and Related Rights in the
|
||||
Work and the meaning and intended legal effect of CC0 on those rights.
|
||||
|
||||
1. Copyright and Related Rights. A Work made available under CC0 may be
|
||||
protected by copyright and related or neighboring rights ("Copyright and
|
||||
Related Rights"). Copyright and Related Rights include, but are not
|
||||
limited to, the following:
|
||||
|
||||
i. the right to reproduce, adapt, distribute, perform, display,
|
||||
communicate, and translate a Work;
|
||||
ii. moral rights retained by the original author(s) and/or performer(s);
|
||||
iii. publicity and privacy rights pertaining to a person's image or
|
||||
likeness depicted in a Work;
|
||||
iv. rights protecting against unfair competition in regards to a Work,
|
||||
subject to the limitations in paragraph 4(a), below;
|
||||
v. rights protecting the extraction, dissemination, use and reuse of data
|
||||
in a Work;
|
||||
vi. database rights (such as those arising under Directive 96/9/EC of the
|
||||
European Parliament and of the Council of 11 March 1996 on the legal
|
||||
protection of databases, and under any national implementation
|
||||
thereof, including any amended or successor version of such
|
||||
directive); and
|
||||
vii. other similar, equivalent or corresponding rights throughout the
|
||||
world based on applicable law or treaty, and any national
|
||||
implementations thereof.
|
||||
|
||||
2. Waiver. To the greatest extent permitted by, but not in contravention
|
||||
of, applicable law, Affirmer hereby overtly, fully, permanently,
|
||||
irrevocably and unconditionally waives, abandons, and surrenders all of
|
||||
Affirmer's Copyright and Related Rights and associated claims and causes
|
||||
of action, whether now known or unknown (including existing as well as
|
||||
future claims and causes of action), in the Work (i) in all territories
|
||||
worldwide, (ii) for the maximum duration provided by applicable law or
|
||||
treaty (including future time extensions), (iii) in any current or future
|
||||
medium and for any number of copies, and (iv) for any purpose whatsoever,
|
||||
including without limitation commercial, advertising or promotional
|
||||
purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
|
||||
member of the public at large and to the detriment of Affirmer's heirs and
|
||||
successors, fully intending that such Waiver shall not be subject to
|
||||
revocation, rescission, cancellation, termination, or any other legal or
|
||||
equitable action to disrupt the quiet enjoyment of the Work by the public
|
||||
as contemplated by Affirmer's express Statement of Purpose.
|
||||
|
||||
3. Public License Fallback. Should any part of the Waiver for any reason
|
||||
be judged legally invalid or ineffective under applicable law, then the
|
||||
Waiver shall be preserved to the maximum extent permitted taking into
|
||||
account Affirmer's express Statement of Purpose. In addition, to the
|
||||
extent the Waiver is so judged Affirmer hereby grants to each affected
|
||||
person a royalty-free, non transferable, non sublicensable, non exclusive,
|
||||
irrevocable and unconditional license to exercise Affirmer's Copyright and
|
||||
Related Rights in the Work (i) in all territories worldwide, (ii) for the
|
||||
maximum duration provided by applicable law or treaty (including future
|
||||
time extensions), (iii) in any current or future medium and for any number
|
||||
of copies, and (iv) for any purpose whatsoever, including without
|
||||
limitation commercial, advertising or promotional purposes (the
|
||||
"License"). The License shall be deemed effective as of the date CC0 was
|
||||
applied by Affirmer to the Work. Should any part of the License for any
|
||||
reason be judged legally invalid or ineffective under applicable law, such
|
||||
partial invalidity or ineffectiveness shall not invalidate the remainder
|
||||
of the License, and in such case Affirmer hereby affirms that he or she
|
||||
will not (i) exercise any of his or her remaining Copyright and Related
|
||||
Rights in the Work or (ii) assert any associated claims and causes of
|
||||
action with respect to the Work, in either case contrary to Affirmer's
|
||||
express Statement of Purpose.
|
||||
|
||||
4. Limitations and Disclaimers.
|
||||
|
||||
a. No trademark or patent rights held by Affirmer are waived, abandoned,
|
||||
surrendered, licensed or otherwise affected by this document.
|
||||
b. Affirmer offers the Work as-is and makes no representations or
|
||||
warranties of any kind concerning the Work, express, implied,
|
||||
statutory or otherwise, including without limitation warranties of
|
||||
title, merchantability, fitness for a particular purpose, non
|
||||
infringement, or the absence of latent or other defects, accuracy, or
|
||||
the present or absence of errors, whether or not discoverable, all to
|
||||
the greatest extent permissible under applicable law.
|
||||
c. Affirmer disclaims responsibility for clearing rights of other persons
|
||||
that may apply to the Work or any use thereof, including without
|
||||
limitation any person's Copyright and Related Rights in the Work.
|
||||
Further, Affirmer disclaims responsibility for obtaining any necessary
|
||||
consents, permissions or other rights required for any use of the
|
||||
Work.
|
||||
d. Affirmer understands and acknowledges that Creative Commons is not a
|
||||
party to this document and has no duty or obligation with respect to
|
||||
this CC0 or use of the Work.
|
||||
|
||||
14
vendor/github.com/Yawning/chacha20/README.md
generated
vendored
14
vendor/github.com/Yawning/chacha20/README.md
generated
vendored
@ -1,14 +0,0 @@
|
||||
### chacha20 - ChaCha20
|
||||
#### Yawning Angel (yawning at schwanenlied dot me)
|
||||
|
||||
Yet another Go ChaCha20 implementation. Everything else I found was slow,
|
||||
didn't support all the variants I need to use, or relied on cgo to go fast.
|
||||
|
||||
Features:
|
||||
|
||||
* 20 round, 256 bit key only. Everything else is pointless and stupid.
|
||||
* IETF 96 bit nonce variant.
|
||||
* XChaCha 24 byte nonce variant.
|
||||
* SSE2 and AVX2 support on amd64 targets.
|
||||
* Incremental encrypt/decrypt support, unlike golang.org/x/crypto/salsa20.
|
||||
|
||||
273
vendor/github.com/Yawning/chacha20/chacha20.go
generated
vendored
273
vendor/github.com/Yawning/chacha20/chacha20.go
generated
vendored
@ -1,273 +0,0 @@
|
||||
// chacha20.go - A ChaCha stream cipher implementation.
|
||||
//
|
||||
// To the extent possible under law, Yawning Angel has waived all copyright
|
||||
// and related or neighboring rights to chacha20, using the Creative
|
||||
// Commons "CC0" public domain dedication. See LICENSE or
|
||||
// <http://creativecommons.org/publicdomain/zero/1.0/> for full details.
|
||||
|
||||
package chacha20
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
// KeySize is the ChaCha20 key size in bytes.
|
||||
KeySize = 32
|
||||
|
||||
// NonceSize is the ChaCha20 nonce size in bytes.
|
||||
NonceSize = 8
|
||||
|
||||
// INonceSize is the IETF ChaCha20 nonce size in bytes.
|
||||
INonceSize = 12
|
||||
|
||||
// XNonceSize is the XChaCha20 nonce size in bytes.
|
||||
XNonceSize = 24
|
||||
|
||||
// HNonceSize is the HChaCha20 nonce size in bytes.
|
||||
HNonceSize = 16
|
||||
|
||||
// BlockSize is the ChaCha20 block size in bytes.
|
||||
BlockSize = 64
|
||||
|
||||
stateSize = 16
|
||||
chachaRounds = 20
|
||||
|
||||
// The constant "expand 32-byte k" as little endian uint32s.
|
||||
sigma0 = uint32(0x61707865)
|
||||
sigma1 = uint32(0x3320646e)
|
||||
sigma2 = uint32(0x79622d32)
|
||||
sigma3 = uint32(0x6b206574)
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidKey is the error returned when the key is invalid.
|
||||
ErrInvalidKey = errors.New("key length must be KeySize bytes")
|
||||
|
||||
// ErrInvalidNonce is the error returned when the nonce is invalid.
|
||||
ErrInvalidNonce = errors.New("nonce length must be NonceSize/INonceSize/XNonceSize bytes")
|
||||
|
||||
// ErrInvalidCounter is the error returned when the counter is invalid.
|
||||
ErrInvalidCounter = errors.New("block counter is invalid (out of range)")
|
||||
|
||||
useUnsafe = false
|
||||
usingVectors = false
|
||||
blocksFn = blocksRef
|
||||
)
|
||||
|
||||
// A Cipher is an instance of ChaCha20/XChaCha20 using a particular key and
|
||||
// nonce.
|
||||
type Cipher struct {
|
||||
state [stateSize]uint32
|
||||
|
||||
buf [BlockSize]byte
|
||||
off int
|
||||
ietf bool
|
||||
}
|
||||
|
||||
// Reset zeros the key data so that it will no longer appear in the process's
|
||||
// memory.
|
||||
func (c *Cipher) Reset() {
|
||||
for i := range c.state {
|
||||
c.state[i] = 0
|
||||
}
|
||||
for i := range c.buf {
|
||||
c.buf[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// XORKeyStream sets dst to the result of XORing src with the key stream. Dst
|
||||
// and src may be the same slice but otherwise should not overlap.
|
||||
func (c *Cipher) XORKeyStream(dst, src []byte) {
|
||||
if len(dst) < len(src) {
|
||||
src = src[:len(dst)]
|
||||
}
|
||||
|
||||
for remaining := len(src); remaining > 0; {
|
||||
// Process multiple blocks at once.
|
||||
if c.off == BlockSize {
|
||||
nrBlocks := remaining / BlockSize
|
||||
directBytes := nrBlocks * BlockSize
|
||||
if nrBlocks > 0 {
|
||||
blocksFn(&c.state, src, dst, nrBlocks, c.ietf)
|
||||
remaining -= directBytes
|
||||
if remaining == 0 {
|
||||
return
|
||||
}
|
||||
dst = dst[directBytes:]
|
||||
src = src[directBytes:]
|
||||
}
|
||||
|
||||
// If there's a partial block, generate 1 block of keystream into
|
||||
// the internal buffer.
|
||||
blocksFn(&c.state, nil, c.buf[:], 1, c.ietf)
|
||||
c.off = 0
|
||||
}
|
||||
|
||||
// Process partial blocks from the buffered keystream.
|
||||
toXor := BlockSize - c.off
|
||||
if remaining < toXor {
|
||||
toXor = remaining
|
||||
}
|
||||
if toXor > 0 {
|
||||
for i, v := range src[:toXor] {
|
||||
dst[i] = v ^ c.buf[c.off+i]
|
||||
}
|
||||
dst = dst[toXor:]
|
||||
src = src[toXor:]
|
||||
|
||||
remaining -= toXor
|
||||
c.off += toXor
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// KeyStream sets dst to the raw keystream.
|
||||
func (c *Cipher) KeyStream(dst []byte) {
|
||||
for remaining := len(dst); remaining > 0; {
|
||||
// Process multiple blocks at once.
|
||||
if c.off == BlockSize {
|
||||
nrBlocks := remaining / BlockSize
|
||||
directBytes := nrBlocks * BlockSize
|
||||
if nrBlocks > 0 {
|
||||
blocksFn(&c.state, nil, dst, nrBlocks, c.ietf)
|
||||
remaining -= directBytes
|
||||
if remaining == 0 {
|
||||
return
|
||||
}
|
||||
dst = dst[directBytes:]
|
||||
}
|
||||
|
||||
// If there's a partial block, generate 1 block of keystream into
|
||||
// the internal buffer.
|
||||
blocksFn(&c.state, nil, c.buf[:], 1, c.ietf)
|
||||
c.off = 0
|
||||
}
|
||||
|
||||
// Process partial blocks from the buffered keystream.
|
||||
toCopy := BlockSize - c.off
|
||||
if remaining < toCopy {
|
||||
toCopy = remaining
|
||||
}
|
||||
if toCopy > 0 {
|
||||
copy(dst[:toCopy], c.buf[c.off:c.off+toCopy])
|
||||
dst = dst[toCopy:]
|
||||
remaining -= toCopy
|
||||
c.off += toCopy
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ReKey reinitializes the ChaCha20/XChaCha20 instance with the provided key
|
||||
// and nonce.
|
||||
func (c *Cipher) ReKey(key, nonce []byte) error {
|
||||
if len(key) != KeySize {
|
||||
return ErrInvalidKey
|
||||
}
|
||||
|
||||
switch len(nonce) {
|
||||
case NonceSize:
|
||||
case INonceSize:
|
||||
case XNonceSize:
|
||||
var subkey [KeySize]byte
|
||||
var subnonce [HNonceSize]byte
|
||||
copy(subnonce[:], nonce[0:16])
|
||||
HChaCha(key, &subnonce, &subkey)
|
||||
key = subkey[:]
|
||||
nonce = nonce[16:24]
|
||||
defer func() {
|
||||
for i := range subkey {
|
||||
subkey[i] = 0
|
||||
}
|
||||
}()
|
||||
default:
|
||||
return ErrInvalidNonce
|
||||
}
|
||||
|
||||
c.Reset()
|
||||
c.state[0] = sigma0
|
||||
c.state[1] = sigma1
|
||||
c.state[2] = sigma2
|
||||
c.state[3] = sigma3
|
||||
c.state[4] = binary.LittleEndian.Uint32(key[0:4])
|
||||
c.state[5] = binary.LittleEndian.Uint32(key[4:8])
|
||||
c.state[6] = binary.LittleEndian.Uint32(key[8:12])
|
||||
c.state[7] = binary.LittleEndian.Uint32(key[12:16])
|
||||
c.state[8] = binary.LittleEndian.Uint32(key[16:20])
|
||||
c.state[9] = binary.LittleEndian.Uint32(key[20:24])
|
||||
c.state[10] = binary.LittleEndian.Uint32(key[24:28])
|
||||
c.state[11] = binary.LittleEndian.Uint32(key[28:32])
|
||||
c.state[12] = 0
|
||||
if len(nonce) == INonceSize {
|
||||
c.state[13] = binary.LittleEndian.Uint32(nonce[0:4])
|
||||
c.state[14] = binary.LittleEndian.Uint32(nonce[4:8])
|
||||
c.state[15] = binary.LittleEndian.Uint32(nonce[8:12])
|
||||
c.ietf = true
|
||||
} else {
|
||||
c.state[13] = 0
|
||||
c.state[14] = binary.LittleEndian.Uint32(nonce[0:4])
|
||||
c.state[15] = binary.LittleEndian.Uint32(nonce[4:8])
|
||||
c.ietf = false
|
||||
}
|
||||
c.off = BlockSize
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Seek sets the block counter to a given offset.
|
||||
func (c *Cipher) Seek(blockCounter uint64) error {
|
||||
if c.ietf {
|
||||
if blockCounter > math.MaxUint32 {
|
||||
return ErrInvalidCounter
|
||||
}
|
||||
c.state[12] = uint32(blockCounter)
|
||||
} else {
|
||||
c.state[12] = uint32(blockCounter)
|
||||
c.state[13] = uint32(blockCounter >> 32)
|
||||
}
|
||||
c.off = BlockSize
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewCipher returns a new ChaCha20/XChaCha20 instance.
|
||||
func NewCipher(key, nonce []byte) (*Cipher, error) {
|
||||
c := new(Cipher)
|
||||
if err := c.ReKey(key, nonce); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// HChaCha is the HChaCha20 hash function used to make XChaCha.
|
||||
func HChaCha(key []byte, nonce *[HNonceSize]byte, out *[32]byte) {
|
||||
var x [stateSize]uint32 // Last 4 slots unused, sigma hardcoded.
|
||||
x[0] = binary.LittleEndian.Uint32(key[0:4])
|
||||
x[1] = binary.LittleEndian.Uint32(key[4:8])
|
||||
x[2] = binary.LittleEndian.Uint32(key[8:12])
|
||||
x[3] = binary.LittleEndian.Uint32(key[12:16])
|
||||
x[4] = binary.LittleEndian.Uint32(key[16:20])
|
||||
x[5] = binary.LittleEndian.Uint32(key[20:24])
|
||||
x[6] = binary.LittleEndian.Uint32(key[24:28])
|
||||
x[7] = binary.LittleEndian.Uint32(key[28:32])
|
||||
x[8] = binary.LittleEndian.Uint32(nonce[0:4])
|
||||
x[9] = binary.LittleEndian.Uint32(nonce[4:8])
|
||||
x[10] = binary.LittleEndian.Uint32(nonce[8:12])
|
||||
x[11] = binary.LittleEndian.Uint32(nonce[12:16])
|
||||
hChaChaRef(&x, out)
|
||||
}
|
||||
|
||||
func init() {
|
||||
switch runtime.GOARCH {
|
||||
case "386", "amd64":
|
||||
// Abuse unsafe to skip calling binary.LittleEndian.PutUint32
|
||||
// in the critical path. This is a big boost on systems that are
|
||||
// little endian and not overly picky about alignment.
|
||||
useUnsafe = true
|
||||
}
|
||||
}
|
||||
|
||||
var _ cipher.Stream = (*Cipher)(nil)
|
||||
95
vendor/github.com/Yawning/chacha20/chacha20_amd64.go
generated
vendored
95
vendor/github.com/Yawning/chacha20/chacha20_amd64.go
generated
vendored
@ -1,95 +0,0 @@
|
||||
// chacha20_amd64.go - AMD64 optimized chacha20.
|
||||
//
|
||||
// To the extent possible under law, Yawning Angel has waived all copyright
|
||||
// and related or neighboring rights to chacha20, using the Creative
|
||||
// Commons "CC0" public domain dedication. See LICENSE or
|
||||
// <http://creativecommons.org/publicdomain/zero/1.0/> for full details.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
package chacha20
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
var usingAVX2 = false
|
||||
|
||||
func blocksAmd64SSE2(x *uint32, inp, outp *byte, nrBlocks uint)
|
||||
|
||||
func blocksAmd64AVX2(x *uint32, inp, outp *byte, nrBlocks uint)
|
||||
|
||||
func cpuidAmd64(cpuidParams *uint32)
|
||||
|
||||
func xgetbv0Amd64(xcrVec *uint32)
|
||||
|
||||
func blocksAmd64(x *[stateSize]uint32, in []byte, out []byte, nrBlocks int, isIetf bool) {
|
||||
// Probably unneeded, but stating this explicitly simplifies the assembly.
|
||||
if nrBlocks == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if isIetf {
|
||||
var totalBlocks uint64
|
||||
totalBlocks = uint64(x[12]) + uint64(nrBlocks)
|
||||
if totalBlocks > math.MaxUint32 {
|
||||
panic("chacha20: Exceeded keystream per nonce limit")
|
||||
}
|
||||
}
|
||||
|
||||
if in == nil {
|
||||
for i := range out {
|
||||
out[i] = 0
|
||||
}
|
||||
in = out
|
||||
}
|
||||
|
||||
// Pointless to call the AVX2 code for just a single block, since half of
|
||||
// the output gets discarded...
|
||||
if usingAVX2 && nrBlocks > 1 {
|
||||
blocksAmd64AVX2(&x[0], &in[0], &out[0], uint(nrBlocks))
|
||||
} else {
|
||||
blocksAmd64SSE2(&x[0], &in[0], &out[0], uint(nrBlocks))
|
||||
}
|
||||
}
|
||||
|
||||
func supportsAVX2() bool {
|
||||
// https://software.intel.com/en-us/articles/how-to-detect-new-instruction-support-in-the-4th-generation-intel-core-processor-family
|
||||
const (
|
||||
osXsaveBit = 1 << 27
|
||||
avx2Bit = 1 << 5
|
||||
)
|
||||
|
||||
// Check to see if CPUID actually supports the leaf that indicates AVX2.
|
||||
// CPUID.(EAX=0H, ECX=0H) >= 7
|
||||
regs := [4]uint32{0x00}
|
||||
cpuidAmd64(®s[0])
|
||||
if regs[0] < 7 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check to see if the OS knows how to save/restore XMM/YMM state.
|
||||
// CPUID.(EAX=01H, ECX=0H):ECX.OSXSAVE[bit 27]==1
|
||||
regs = [4]uint32{0x01}
|
||||
cpuidAmd64(®s[0])
|
||||
if regs[2]&osXsaveBit == 0 {
|
||||
return false
|
||||
}
|
||||
xcrRegs := [2]uint32{}
|
||||
xgetbv0Amd64(&xcrRegs[0])
|
||||
if xcrRegs[0]&6 != 6 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check for AVX2 support.
|
||||
// CPUID.(EAX=07H, ECX=0H):EBX.AVX2[bit 5]==1
|
||||
regs = [4]uint32{0x07}
|
||||
cpuidAmd64(®s[0])
|
||||
return regs[1]&avx2Bit != 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
blocksFn = blocksAmd64
|
||||
usingVectors = true
|
||||
usingAVX2 = supportsAVX2()
|
||||
}
|
||||
1295
vendor/github.com/Yawning/chacha20/chacha20_amd64.py
generated
vendored
1295
vendor/github.com/Yawning/chacha20/chacha20_amd64.py
generated
vendored
File diff suppressed because it is too large
Load Diff
1180
vendor/github.com/Yawning/chacha20/chacha20_amd64.s
generated
vendored
1180
vendor/github.com/Yawning/chacha20/chacha20_amd64.s
generated
vendored
File diff suppressed because it is too large
Load Diff
394
vendor/github.com/Yawning/chacha20/chacha20_ref.go
generated
vendored
394
vendor/github.com/Yawning/chacha20/chacha20_ref.go
generated
vendored
@ -1,394 +0,0 @@
|
||||
// chacha20_ref.go - Reference ChaCha20.
|
||||
//
|
||||
// To the extent possible under law, Yawning Angel has waived all copyright
|
||||
// and related or neighboring rights to chacha20, using the Creative
|
||||
// Commons "CC0" public domain dedication. See LICENSE or
|
||||
// <http://creativecommons.org/publicdomain/zero/1.0/> for full details.
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package chacha20
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func blocksRef(x *[stateSize]uint32, in []byte, out []byte, nrBlocks int, isIetf bool) {
|
||||
if isIetf {
|
||||
var totalBlocks uint64
|
||||
totalBlocks = uint64(x[12]) + uint64(nrBlocks)
|
||||
if totalBlocks > math.MaxUint32 {
|
||||
panic("chacha20: Exceeded keystream per nonce limit")
|
||||
}
|
||||
}
|
||||
|
||||
// This routine ignores x[0]...x[4] in favor the const values since it's
|
||||
// ever so slightly faster.
|
||||
|
||||
for n := 0; n < nrBlocks; n++ {
|
||||
x0, x1, x2, x3 := sigma0, sigma1, sigma2, sigma3
|
||||
x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 := x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15]
|
||||
|
||||
for i := chachaRounds; i > 0; i -= 2 {
|
||||
// quarterround(x, 0, 4, 8, 12)
|
||||
x0 += x4
|
||||
x12 ^= x0
|
||||
x12 = (x12 << 16) | (x12 >> 16)
|
||||
x8 += x12
|
||||
x4 ^= x8
|
||||
x4 = (x4 << 12) | (x4 >> 20)
|
||||
x0 += x4
|
||||
x12 ^= x0
|
||||
x12 = (x12 << 8) | (x12 >> 24)
|
||||
x8 += x12
|
||||
x4 ^= x8
|
||||
x4 = (x4 << 7) | (x4 >> 25)
|
||||
|
||||
// quarterround(x, 1, 5, 9, 13)
|
||||
x1 += x5
|
||||
x13 ^= x1
|
||||
x13 = (x13 << 16) | (x13 >> 16)
|
||||
x9 += x13
|
||||
x5 ^= x9
|
||||
x5 = (x5 << 12) | (x5 >> 20)
|
||||
x1 += x5
|
||||
x13 ^= x1
|
||||
x13 = (x13 << 8) | (x13 >> 24)
|
||||
x9 += x13
|
||||
x5 ^= x9
|
||||
x5 = (x5 << 7) | (x5 >> 25)
|
||||
|
||||
// quarterround(x, 2, 6, 10, 14)
|
||||
x2 += x6
|
||||
x14 ^= x2
|
||||
x14 = (x14 << 16) | (x14 >> 16)
|
||||
x10 += x14
|
||||
x6 ^= x10
|
||||
x6 = (x6 << 12) | (x6 >> 20)
|
||||
x2 += x6
|
||||
x14 ^= x2
|
||||
x14 = (x14 << 8) | (x14 >> 24)
|
||||
x10 += x14
|
||||
x6 ^= x10
|
||||
x6 = (x6 << 7) | (x6 >> 25)
|
||||
|
||||
// quarterround(x, 3, 7, 11, 15)
|
||||
x3 += x7
|
||||
x15 ^= x3
|
||||
x15 = (x15 << 16) | (x15 >> 16)
|
||||
x11 += x15
|
||||
x7 ^= x11
|
||||
x7 = (x7 << 12) | (x7 >> 20)
|
||||
x3 += x7
|
||||
x15 ^= x3
|
||||
x15 = (x15 << 8) | (x15 >> 24)
|
||||
x11 += x15
|
||||
x7 ^= x11
|
||||
x7 = (x7 << 7) | (x7 >> 25)
|
||||
|
||||
// quarterround(x, 0, 5, 10, 15)
|
||||
x0 += x5
|
||||
x15 ^= x0
|
||||
x15 = (x15 << 16) | (x15 >> 16)
|
||||
x10 += x15
|
||||
x5 ^= x10
|
||||
x5 = (x5 << 12) | (x5 >> 20)
|
||||
x0 += x5
|
||||
x15 ^= x0
|
||||
x15 = (x15 << 8) | (x15 >> 24)
|
||||
x10 += x15
|
||||
x5 ^= x10
|
||||
x5 = (x5 << 7) | (x5 >> 25)
|
||||
|
||||
// quarterround(x, 1, 6, 11, 12)
|
||||
x1 += x6
|
||||
x12 ^= x1
|
||||
x12 = (x12 << 16) | (x12 >> 16)
|
||||
x11 += x12
|
||||
x6 ^= x11
|
||||
x6 = (x6 << 12) | (x6 >> 20)
|
||||
x1 += x6
|
||||
x12 ^= x1
|
||||
x12 = (x12 << 8) | (x12 >> 24)
|
||||
x11 += x12
|
||||
x6 ^= x11
|
||||
x6 = (x6 << 7) | (x6 >> 25)
|
||||
|
||||
// quarterround(x, 2, 7, 8, 13)
|
||||
x2 += x7
|
||||
x13 ^= x2
|
||||
x13 = (x13 << 16) | (x13 >> 16)
|
||||
x8 += x13
|
||||
x7 ^= x8
|
||||
x7 = (x7 << 12) | (x7 >> 20)
|
||||
x2 += x7
|
||||
x13 ^= x2
|
||||
x13 = (x13 << 8) | (x13 >> 24)
|
||||
x8 += x13
|
||||
x7 ^= x8
|
||||
x7 = (x7 << 7) | (x7 >> 25)
|
||||
|
||||
// quarterround(x, 3, 4, 9, 14)
|
||||
x3 += x4
|
||||
x14 ^= x3
|
||||
x14 = (x14 << 16) | (x14 >> 16)
|
||||
x9 += x14
|
||||
x4 ^= x9
|
||||
x4 = (x4 << 12) | (x4 >> 20)
|
||||
x3 += x4
|
||||
x14 ^= x3
|
||||
x14 = (x14 << 8) | (x14 >> 24)
|
||||
x9 += x14
|
||||
x4 ^= x9
|
||||
x4 = (x4 << 7) | (x4 >> 25)
|
||||
}
|
||||
|
||||
// On amd64 at least, this is a rather big boost.
|
||||
if useUnsafe {
|
||||
if in != nil {
|
||||
inArr := (*[16]uint32)(unsafe.Pointer(&in[n*BlockSize]))
|
||||
outArr := (*[16]uint32)(unsafe.Pointer(&out[n*BlockSize]))
|
||||
outArr[0] = inArr[0] ^ (x0 + sigma0)
|
||||
outArr[1] = inArr[1] ^ (x1 + sigma1)
|
||||
outArr[2] = inArr[2] ^ (x2 + sigma2)
|
||||
outArr[3] = inArr[3] ^ (x3 + sigma3)
|
||||
outArr[4] = inArr[4] ^ (x4 + x[4])
|
||||
outArr[5] = inArr[5] ^ (x5 + x[5])
|
||||
outArr[6] = inArr[6] ^ (x6 + x[6])
|
||||
outArr[7] = inArr[7] ^ (x7 + x[7])
|
||||
outArr[8] = inArr[8] ^ (x8 + x[8])
|
||||
outArr[9] = inArr[9] ^ (x9 + x[9])
|
||||
outArr[10] = inArr[10] ^ (x10 + x[10])
|
||||
outArr[11] = inArr[11] ^ (x11 + x[11])
|
||||
outArr[12] = inArr[12] ^ (x12 + x[12])
|
||||
outArr[13] = inArr[13] ^ (x13 + x[13])
|
||||
outArr[14] = inArr[14] ^ (x14 + x[14])
|
||||
outArr[15] = inArr[15] ^ (x15 + x[15])
|
||||
} else {
|
||||
outArr := (*[16]uint32)(unsafe.Pointer(&out[n*BlockSize]))
|
||||
outArr[0] = x0 + sigma0
|
||||
outArr[1] = x1 + sigma1
|
||||
outArr[2] = x2 + sigma2
|
||||
outArr[3] = x3 + sigma3
|
||||
outArr[4] = x4 + x[4]
|
||||
outArr[5] = x5 + x[5]
|
||||
outArr[6] = x6 + x[6]
|
||||
outArr[7] = x7 + x[7]
|
||||
outArr[8] = x8 + x[8]
|
||||
outArr[9] = x9 + x[9]
|
||||
outArr[10] = x10 + x[10]
|
||||
outArr[11] = x11 + x[11]
|
||||
outArr[12] = x12 + x[12]
|
||||
outArr[13] = x13 + x[13]
|
||||
outArr[14] = x14 + x[14]
|
||||
outArr[15] = x15 + x[15]
|
||||
}
|
||||
} else {
|
||||
// Slow path, either the architecture cares about alignment, or is not little endian.
|
||||
x0 += sigma0
|
||||
x1 += sigma1
|
||||
x2 += sigma2
|
||||
x3 += sigma3
|
||||
x4 += x[4]
|
||||
x5 += x[5]
|
||||
x6 += x[6]
|
||||
x7 += x[7]
|
||||
x8 += x[8]
|
||||
x9 += x[9]
|
||||
x10 += x[10]
|
||||
x11 += x[11]
|
||||
x12 += x[12]
|
||||
x13 += x[13]
|
||||
x14 += x[14]
|
||||
x15 += x[15]
|
||||
if in != nil {
|
||||
binary.LittleEndian.PutUint32(out[0:4], binary.LittleEndian.Uint32(in[0:4])^x0)
|
||||
binary.LittleEndian.PutUint32(out[4:8], binary.LittleEndian.Uint32(in[4:8])^x1)
|
||||
binary.LittleEndian.PutUint32(out[8:12], binary.LittleEndian.Uint32(in[8:12])^x2)
|
||||
binary.LittleEndian.PutUint32(out[12:16], binary.LittleEndian.Uint32(in[12:16])^x3)
|
||||
binary.LittleEndian.PutUint32(out[16:20], binary.LittleEndian.Uint32(in[16:20])^x4)
|
||||
binary.LittleEndian.PutUint32(out[20:24], binary.LittleEndian.Uint32(in[20:24])^x5)
|
||||
binary.LittleEndian.PutUint32(out[24:28], binary.LittleEndian.Uint32(in[24:28])^x6)
|
||||
binary.LittleEndian.PutUint32(out[28:32], binary.LittleEndian.Uint32(in[28:32])^x7)
|
||||
binary.LittleEndian.PutUint32(out[32:36], binary.LittleEndian.Uint32(in[32:36])^x8)
|
||||
binary.LittleEndian.PutUint32(out[36:40], binary.LittleEndian.Uint32(in[36:40])^x9)
|
||||
binary.LittleEndian.PutUint32(out[40:44], binary.LittleEndian.Uint32(in[40:44])^x10)
|
||||
binary.LittleEndian.PutUint32(out[44:48], binary.LittleEndian.Uint32(in[44:48])^x11)
|
||||
binary.LittleEndian.PutUint32(out[48:52], binary.LittleEndian.Uint32(in[48:52])^x12)
|
||||
binary.LittleEndian.PutUint32(out[52:56], binary.LittleEndian.Uint32(in[52:56])^x13)
|
||||
binary.LittleEndian.PutUint32(out[56:60], binary.LittleEndian.Uint32(in[56:60])^x14)
|
||||
binary.LittleEndian.PutUint32(out[60:64], binary.LittleEndian.Uint32(in[60:64])^x15)
|
||||
in = in[BlockSize:]
|
||||
} else {
|
||||
binary.LittleEndian.PutUint32(out[0:4], x0)
|
||||
binary.LittleEndian.PutUint32(out[4:8], x1)
|
||||
binary.LittleEndian.PutUint32(out[8:12], x2)
|
||||
binary.LittleEndian.PutUint32(out[12:16], x3)
|
||||
binary.LittleEndian.PutUint32(out[16:20], x4)
|
||||
binary.LittleEndian.PutUint32(out[20:24], x5)
|
||||
binary.LittleEndian.PutUint32(out[24:28], x6)
|
||||
binary.LittleEndian.PutUint32(out[28:32], x7)
|
||||
binary.LittleEndian.PutUint32(out[32:36], x8)
|
||||
binary.LittleEndian.PutUint32(out[36:40], x9)
|
||||
binary.LittleEndian.PutUint32(out[40:44], x10)
|
||||
binary.LittleEndian.PutUint32(out[44:48], x11)
|
||||
binary.LittleEndian.PutUint32(out[48:52], x12)
|
||||
binary.LittleEndian.PutUint32(out[52:56], x13)
|
||||
binary.LittleEndian.PutUint32(out[56:60], x14)
|
||||
binary.LittleEndian.PutUint32(out[60:64], x15)
|
||||
}
|
||||
out = out[BlockSize:]
|
||||
}
|
||||
|
||||
// Stoping at 2^70 bytes per nonce is the user's responsibility.
|
||||
ctr := uint64(x[13])<<32 | uint64(x[12])
|
||||
ctr++
|
||||
x[12] = uint32(ctr)
|
||||
x[13] = uint32(ctr >> 32)
|
||||
}
|
||||
}
|
||||
|
||||
func hChaChaRef(x *[stateSize]uint32, out *[32]byte) {
|
||||
x0, x1, x2, x3 := sigma0, sigma1, sigma2, sigma3
|
||||
x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 := x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11]
|
||||
|
||||
for i := chachaRounds; i > 0; i -= 2 {
|
||||
// quarterround(x, 0, 4, 8, 12)
|
||||
x0 += x4
|
||||
x12 ^= x0
|
||||
x12 = (x12 << 16) | (x12 >> 16)
|
||||
x8 += x12
|
||||
x4 ^= x8
|
||||
x4 = (x4 << 12) | (x4 >> 20)
|
||||
x0 += x4
|
||||
x12 ^= x0
|
||||
x12 = (x12 << 8) | (x12 >> 24)
|
||||
x8 += x12
|
||||
x4 ^= x8
|
||||
x4 = (x4 << 7) | (x4 >> 25)
|
||||
|
||||
// quarterround(x, 1, 5, 9, 13)
|
||||
x1 += x5
|
||||
x13 ^= x1
|
||||
x13 = (x13 << 16) | (x13 >> 16)
|
||||
x9 += x13
|
||||
x5 ^= x9
|
||||
x5 = (x5 << 12) | (x5 >> 20)
|
||||
x1 += x5
|
||||
x13 ^= x1
|
||||
x13 = (x13 << 8) | (x13 >> 24)
|
||||
x9 += x13
|
||||
x5 ^= x9
|
||||
x5 = (x5 << 7) | (x5 >> 25)
|
||||
|
||||
// quarterround(x, 2, 6, 10, 14)
|
||||
x2 += x6
|
||||
x14 ^= x2
|
||||
x14 = (x14 << 16) | (x14 >> 16)
|
||||
x10 += x14
|
||||
x6 ^= x10
|
||||
x6 = (x6 << 12) | (x6 >> 20)
|
||||
x2 += x6
|
||||
x14 ^= x2
|
||||
x14 = (x14 << 8) | (x14 >> 24)
|
||||
x10 += x14
|
||||
x6 ^= x10
|
||||
x6 = (x6 << 7) | (x6 >> 25)
|
||||
|
||||
// quarterround(x, 3, 7, 11, 15)
|
||||
x3 += x7
|
||||
x15 ^= x3
|
||||
x15 = (x15 << 16) | (x15 >> 16)
|
||||
x11 += x15
|
||||
x7 ^= x11
|
||||
x7 = (x7 << 12) | (x7 >> 20)
|
||||
x3 += x7
|
||||
x15 ^= x3
|
||||
x15 = (x15 << 8) | (x15 >> 24)
|
||||
x11 += x15
|
||||
x7 ^= x11
|
||||
x7 = (x7 << 7) | (x7 >> 25)
|
||||
|
||||
// quarterround(x, 0, 5, 10, 15)
|
||||
x0 += x5
|
||||
x15 ^= x0
|
||||
x15 = (x15 << 16) | (x15 >> 16)
|
||||
x10 += x15
|
||||
x5 ^= x10
|
||||
x5 = (x5 << 12) | (x5 >> 20)
|
||||
x0 += x5
|
||||
x15 ^= x0
|
||||
x15 = (x15 << 8) | (x15 >> 24)
|
||||
x10 += x15
|
||||
x5 ^= x10
|
||||
x5 = (x5 << 7) | (x5 >> 25)
|
||||
|
||||
// quarterround(x, 1, 6, 11, 12)
|
||||
x1 += x6
|
||||
x12 ^= x1
|
||||
x12 = (x12 << 16) | (x12 >> 16)
|
||||
x11 += x12
|
||||
x6 ^= x11
|
||||
x6 = (x6 << 12) | (x6 >> 20)
|
||||
x1 += x6
|
||||
x12 ^= x1
|
||||
x12 = (x12 << 8) | (x12 >> 24)
|
||||
x11 += x12
|
||||
x6 ^= x11
|
||||
x6 = (x6 << 7) | (x6 >> 25)
|
||||
|
||||
// quarterround(x, 2, 7, 8, 13)
|
||||
x2 += x7
|
||||
x13 ^= x2
|
||||
x13 = (x13 << 16) | (x13 >> 16)
|
||||
x8 += x13
|
||||
x7 ^= x8
|
||||
x7 = (x7 << 12) | (x7 >> 20)
|
||||
x2 += x7
|
||||
x13 ^= x2
|
||||
x13 = (x13 << 8) | (x13 >> 24)
|
||||
x8 += x13
|
||||
x7 ^= x8
|
||||
x7 = (x7 << 7) | (x7 >> 25)
|
||||
|
||||
// quarterround(x, 3, 4, 9, 14)
|
||||
x3 += x4
|
||||
x14 ^= x3
|
||||
x14 = (x14 << 16) | (x14 >> 16)
|
||||
x9 += x14
|
||||
x4 ^= x9
|
||||
x4 = (x4 << 12) | (x4 >> 20)
|
||||
x3 += x4
|
||||
x14 ^= x3
|
||||
x14 = (x14 << 8) | (x14 >> 24)
|
||||
x9 += x14
|
||||
x4 ^= x9
|
||||
x4 = (x4 << 7) | (x4 >> 25)
|
||||
}
|
||||
|
||||
// HChaCha returns x0...x3 | x12...x15, which corresponds to the
|
||||
// indexes of the ChaCha constant and the indexes of the IV.
|
||||
if useUnsafe {
|
||||
outArr := (*[16]uint32)(unsafe.Pointer(&out[0]))
|
||||
outArr[0] = x0
|
||||
outArr[1] = x1
|
||||
outArr[2] = x2
|
||||
outArr[3] = x3
|
||||
outArr[4] = x12
|
||||
outArr[5] = x13
|
||||
outArr[6] = x14
|
||||
outArr[7] = x15
|
||||
} else {
|
||||
binary.LittleEndian.PutUint32(out[0:4], x0)
|
||||
binary.LittleEndian.PutUint32(out[4:8], x1)
|
||||
binary.LittleEndian.PutUint32(out[8:12], x2)
|
||||
binary.LittleEndian.PutUint32(out[12:16], x3)
|
||||
binary.LittleEndian.PutUint32(out[16:20], x12)
|
||||
binary.LittleEndian.PutUint32(out[20:24], x13)
|
||||
binary.LittleEndian.PutUint32(out[24:28], x14)
|
||||
binary.LittleEndian.PutUint32(out[28:32], x15)
|
||||
}
|
||||
return
|
||||
}
|
||||
395
vendor/github.com/Yawning/chacha20/chacha20_ref_go19.go
generated
vendored
395
vendor/github.com/Yawning/chacha20/chacha20_ref_go19.go
generated
vendored
@ -1,395 +0,0 @@
|
||||
// chacha20_ref.go - Reference ChaCha20.
|
||||
//
|
||||
// To the extent possible under law, Yawning Angel has waived all copyright
|
||||
// and related or neighboring rights to chacha20, using the Creative
|
||||
// Commons "CC0" public domain dedication. See LICENSE or
|
||||
// <http://creativecommons.org/publicdomain/zero/1.0/> for full details.
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package chacha20
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"math/bits"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func blocksRef(x *[stateSize]uint32, in []byte, out []byte, nrBlocks int, isIetf bool) {
|
||||
if isIetf {
|
||||
var totalBlocks uint64
|
||||
totalBlocks = uint64(x[12]) + uint64(nrBlocks)
|
||||
if totalBlocks > math.MaxUint32 {
|
||||
panic("chacha20: Exceeded keystream per nonce limit")
|
||||
}
|
||||
}
|
||||
|
||||
// This routine ignores x[0]...x[4] in favor the const values since it's
|
||||
// ever so slightly faster.
|
||||
|
||||
for n := 0; n < nrBlocks; n++ {
|
||||
x0, x1, x2, x3 := sigma0, sigma1, sigma2, sigma3
|
||||
x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 := x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15]
|
||||
|
||||
for i := chachaRounds; i > 0; i -= 2 {
|
||||
// quarterround(x, 0, 4, 8, 12)
|
||||
x0 += x4
|
||||
x12 ^= x0
|
||||
x12 = bits.RotateLeft32(x12, 16)
|
||||
x8 += x12
|
||||
x4 ^= x8
|
||||
x4 = bits.RotateLeft32(x4, 12)
|
||||
x0 += x4
|
||||
x12 ^= x0
|
||||
x12 = bits.RotateLeft32(x12, 8)
|
||||
x8 += x12
|
||||
x4 ^= x8
|
||||
x4 = bits.RotateLeft32(x4, 7)
|
||||
|
||||
// quarterround(x, 1, 5, 9, 13)
|
||||
x1 += x5
|
||||
x13 ^= x1
|
||||
x13 = bits.RotateLeft32(x13, 16)
|
||||
x9 += x13
|
||||
x5 ^= x9
|
||||
x5 = bits.RotateLeft32(x5, 12)
|
||||
x1 += x5
|
||||
x13 ^= x1
|
||||
x13 = bits.RotateLeft32(x13, 8)
|
||||
x9 += x13
|
||||
x5 ^= x9
|
||||
x5 = bits.RotateLeft32(x5, 7)
|
||||
|
||||
// quarterround(x, 2, 6, 10, 14)
|
||||
x2 += x6
|
||||
x14 ^= x2
|
||||
x14 = bits.RotateLeft32(x14, 16)
|
||||
x10 += x14
|
||||
x6 ^= x10
|
||||
x6 = bits.RotateLeft32(x6, 12)
|
||||
x2 += x6
|
||||
x14 ^= x2
|
||||
x14 = bits.RotateLeft32(x14, 8)
|
||||
x10 += x14
|
||||
x6 ^= x10
|
||||
x6 = bits.RotateLeft32(x6, 7)
|
||||
|
||||
// quarterround(x, 3, 7, 11, 15)
|
||||
x3 += x7
|
||||
x15 ^= x3
|
||||
x15 = bits.RotateLeft32(x15, 16)
|
||||
x11 += x15
|
||||
x7 ^= x11
|
||||
x7 = bits.RotateLeft32(x7, 12)
|
||||
x3 += x7
|
||||
x15 ^= x3
|
||||
x15 = bits.RotateLeft32(x15, 8)
|
||||
x11 += x15
|
||||
x7 ^= x11
|
||||
x7 = bits.RotateLeft32(x7, 7)
|
||||
|
||||
// quarterround(x, 0, 5, 10, 15)
|
||||
x0 += x5
|
||||
x15 ^= x0
|
||||
x15 = bits.RotateLeft32(x15, 16)
|
||||
x10 += x15
|
||||
x5 ^= x10
|
||||
x5 = bits.RotateLeft32(x5, 12)
|
||||
x0 += x5
|
||||
x15 ^= x0
|
||||
x15 = bits.RotateLeft32(x15, 8)
|
||||
x10 += x15
|
||||
x5 ^= x10
|
||||
x5 = bits.RotateLeft32(x5, 7)
|
||||
|
||||
// quarterround(x, 1, 6, 11, 12)
|
||||
x1 += x6
|
||||
x12 ^= x1
|
||||
x12 = bits.RotateLeft32(x12, 16)
|
||||
x11 += x12
|
||||
x6 ^= x11
|
||||
x6 = bits.RotateLeft32(x6, 12)
|
||||
x1 += x6
|
||||
x12 ^= x1
|
||||
x12 = bits.RotateLeft32(x12, 8)
|
||||
x11 += x12
|
||||
x6 ^= x11
|
||||
x6 = bits.RotateLeft32(x6, 7)
|
||||
|
||||
// quarterround(x, 2, 7, 8, 13)
|
||||
x2 += x7
|
||||
x13 ^= x2
|
||||
x13 = bits.RotateLeft32(x13, 16)
|
||||
x8 += x13
|
||||
x7 ^= x8
|
||||
x7 = bits.RotateLeft32(x7, 12)
|
||||
x2 += x7
|
||||
x13 ^= x2
|
||||
x13 = bits.RotateLeft32(x13, 8)
|
||||
x8 += x13
|
||||
x7 ^= x8
|
||||
x7 = bits.RotateLeft32(x7, 7)
|
||||
|
||||
// quarterround(x, 3, 4, 9, 14)
|
||||
x3 += x4
|
||||
x14 ^= x3
|
||||
x14 = bits.RotateLeft32(x14, 16)
|
||||
x9 += x14
|
||||
x4 ^= x9
|
||||
x4 = bits.RotateLeft32(x4, 12)
|
||||
x3 += x4
|
||||
x14 ^= x3
|
||||
x14 = bits.RotateLeft32(x14, 8)
|
||||
x9 += x14
|
||||
x4 ^= x9
|
||||
x4 = bits.RotateLeft32(x4, 7)
|
||||
}
|
||||
|
||||
// On amd64 at least, this is a rather big boost.
|
||||
if useUnsafe {
|
||||
if in != nil {
|
||||
inArr := (*[16]uint32)(unsafe.Pointer(&in[n*BlockSize]))
|
||||
outArr := (*[16]uint32)(unsafe.Pointer(&out[n*BlockSize]))
|
||||
outArr[0] = inArr[0] ^ (x0 + sigma0)
|
||||
outArr[1] = inArr[1] ^ (x1 + sigma1)
|
||||
outArr[2] = inArr[2] ^ (x2 + sigma2)
|
||||
outArr[3] = inArr[3] ^ (x3 + sigma3)
|
||||
outArr[4] = inArr[4] ^ (x4 + x[4])
|
||||
outArr[5] = inArr[5] ^ (x5 + x[5])
|
||||
outArr[6] = inArr[6] ^ (x6 + x[6])
|
||||
outArr[7] = inArr[7] ^ (x7 + x[7])
|
||||
outArr[8] = inArr[8] ^ (x8 + x[8])
|
||||
outArr[9] = inArr[9] ^ (x9 + x[9])
|
||||
outArr[10] = inArr[10] ^ (x10 + x[10])
|
||||
outArr[11] = inArr[11] ^ (x11 + x[11])
|
||||
outArr[12] = inArr[12] ^ (x12 + x[12])
|
||||
outArr[13] = inArr[13] ^ (x13 + x[13])
|
||||
outArr[14] = inArr[14] ^ (x14 + x[14])
|
||||
outArr[15] = inArr[15] ^ (x15 + x[15])
|
||||
} else {
|
||||
outArr := (*[16]uint32)(unsafe.Pointer(&out[n*BlockSize]))
|
||||
outArr[0] = x0 + sigma0
|
||||
outArr[1] = x1 + sigma1
|
||||
outArr[2] = x2 + sigma2
|
||||
outArr[3] = x3 + sigma3
|
||||
outArr[4] = x4 + x[4]
|
||||
outArr[5] = x5 + x[5]
|
||||
outArr[6] = x6 + x[6]
|
||||
outArr[7] = x7 + x[7]
|
||||
outArr[8] = x8 + x[8]
|
||||
outArr[9] = x9 + x[9]
|
||||
outArr[10] = x10 + x[10]
|
||||
outArr[11] = x11 + x[11]
|
||||
outArr[12] = x12 + x[12]
|
||||
outArr[13] = x13 + x[13]
|
||||
outArr[14] = x14 + x[14]
|
||||
outArr[15] = x15 + x[15]
|
||||
}
|
||||
} else {
|
||||
// Slow path, either the architecture cares about alignment, or is not little endian.
|
||||
x0 += sigma0
|
||||
x1 += sigma1
|
||||
x2 += sigma2
|
||||
x3 += sigma3
|
||||
x4 += x[4]
|
||||
x5 += x[5]
|
||||
x6 += x[6]
|
||||
x7 += x[7]
|
||||
x8 += x[8]
|
||||
x9 += x[9]
|
||||
x10 += x[10]
|
||||
x11 += x[11]
|
||||
x12 += x[12]
|
||||
x13 += x[13]
|
||||
x14 += x[14]
|
||||
x15 += x[15]
|
||||
if in != nil {
|
||||
binary.LittleEndian.PutUint32(out[0:4], binary.LittleEndian.Uint32(in[0:4])^x0)
|
||||
binary.LittleEndian.PutUint32(out[4:8], binary.LittleEndian.Uint32(in[4:8])^x1)
|
||||
binary.LittleEndian.PutUint32(out[8:12], binary.LittleEndian.Uint32(in[8:12])^x2)
|
||||
binary.LittleEndian.PutUint32(out[12:16], binary.LittleEndian.Uint32(in[12:16])^x3)
|
||||
binary.LittleEndian.PutUint32(out[16:20], binary.LittleEndian.Uint32(in[16:20])^x4)
|
||||
binary.LittleEndian.PutUint32(out[20:24], binary.LittleEndian.Uint32(in[20:24])^x5)
|
||||
binary.LittleEndian.PutUint32(out[24:28], binary.LittleEndian.Uint32(in[24:28])^x6)
|
||||
binary.LittleEndian.PutUint32(out[28:32], binary.LittleEndian.Uint32(in[28:32])^x7)
|
||||
binary.LittleEndian.PutUint32(out[32:36], binary.LittleEndian.Uint32(in[32:36])^x8)
|
||||
binary.LittleEndian.PutUint32(out[36:40], binary.LittleEndian.Uint32(in[36:40])^x9)
|
||||
binary.LittleEndian.PutUint32(out[40:44], binary.LittleEndian.Uint32(in[40:44])^x10)
|
||||
binary.LittleEndian.PutUint32(out[44:48], binary.LittleEndian.Uint32(in[44:48])^x11)
|
||||
binary.LittleEndian.PutUint32(out[48:52], binary.LittleEndian.Uint32(in[48:52])^x12)
|
||||
binary.LittleEndian.PutUint32(out[52:56], binary.LittleEndian.Uint32(in[52:56])^x13)
|
||||
binary.LittleEndian.PutUint32(out[56:60], binary.LittleEndian.Uint32(in[56:60])^x14)
|
||||
binary.LittleEndian.PutUint32(out[60:64], binary.LittleEndian.Uint32(in[60:64])^x15)
|
||||
in = in[BlockSize:]
|
||||
} else {
|
||||
binary.LittleEndian.PutUint32(out[0:4], x0)
|
||||
binary.LittleEndian.PutUint32(out[4:8], x1)
|
||||
binary.LittleEndian.PutUint32(out[8:12], x2)
|
||||
binary.LittleEndian.PutUint32(out[12:16], x3)
|
||||
binary.LittleEndian.PutUint32(out[16:20], x4)
|
||||
binary.LittleEndian.PutUint32(out[20:24], x5)
|
||||
binary.LittleEndian.PutUint32(out[24:28], x6)
|
||||
binary.LittleEndian.PutUint32(out[28:32], x7)
|
||||
binary.LittleEndian.PutUint32(out[32:36], x8)
|
||||
binary.LittleEndian.PutUint32(out[36:40], x9)
|
||||
binary.LittleEndian.PutUint32(out[40:44], x10)
|
||||
binary.LittleEndian.PutUint32(out[44:48], x11)
|
||||
binary.LittleEndian.PutUint32(out[48:52], x12)
|
||||
binary.LittleEndian.PutUint32(out[52:56], x13)
|
||||
binary.LittleEndian.PutUint32(out[56:60], x14)
|
||||
binary.LittleEndian.PutUint32(out[60:64], x15)
|
||||
}
|
||||
out = out[BlockSize:]
|
||||
}
|
||||
|
||||
// Stoping at 2^70 bytes per nonce is the user's responsibility.
|
||||
ctr := uint64(x[13])<<32 | uint64(x[12])
|
||||
ctr++
|
||||
x[12] = uint32(ctr)
|
||||
x[13] = uint32(ctr >> 32)
|
||||
}
|
||||
}
|
||||
|
||||
func hChaChaRef(x *[stateSize]uint32, out *[32]byte) {
|
||||
x0, x1, x2, x3 := sigma0, sigma1, sigma2, sigma3
|
||||
x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 := x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11]
|
||||
|
||||
for i := chachaRounds; i > 0; i -= 2 {
|
||||
// quarterround(x, 0, 4, 8, 12)
|
||||
x0 += x4
|
||||
x12 ^= x0
|
||||
x12 = bits.RotateLeft32(x12, 16)
|
||||
x8 += x12
|
||||
x4 ^= x8
|
||||
x4 = bits.RotateLeft32(x4, 12)
|
||||
x0 += x4
|
||||
x12 ^= x0
|
||||
x12 = bits.RotateLeft32(x12, 8)
|
||||
x8 += x12
|
||||
x4 ^= x8
|
||||
x4 = bits.RotateLeft32(x4, 7)
|
||||
|
||||
// quarterround(x, 1, 5, 9, 13)
|
||||
x1 += x5
|
||||
x13 ^= x1
|
||||
x13 = bits.RotateLeft32(x13, 16)
|
||||
x9 += x13
|
||||
x5 ^= x9
|
||||
x5 = bits.RotateLeft32(x5, 12)
|
||||
x1 += x5
|
||||
x13 ^= x1
|
||||
x13 = bits.RotateLeft32(x13, 8)
|
||||
x9 += x13
|
||||
x5 ^= x9
|
||||
x5 = bits.RotateLeft32(x5, 7)
|
||||
|
||||
// quarterround(x, 2, 6, 10, 14)
|
||||
x2 += x6
|
||||
x14 ^= x2
|
||||
x14 = bits.RotateLeft32(x14, 16)
|
||||
x10 += x14
|
||||
x6 ^= x10
|
||||
x6 = bits.RotateLeft32(x6, 12)
|
||||
x2 += x6
|
||||
x14 ^= x2
|
||||
x14 = bits.RotateLeft32(x14, 8)
|
||||
x10 += x14
|
||||
x6 ^= x10
|
||||
x6 = bits.RotateLeft32(x6, 7)
|
||||
|
||||
// quarterround(x, 3, 7, 11, 15)
|
||||
x3 += x7
|
||||
x15 ^= x3
|
||||
x15 = bits.RotateLeft32(x15, 16)
|
||||
x11 += x15
|
||||
x7 ^= x11
|
||||
x7 = bits.RotateLeft32(x7, 12)
|
||||
x3 += x7
|
||||
x15 ^= x3
|
||||
x15 = bits.RotateLeft32(x15, 8)
|
||||
x11 += x15
|
||||
x7 ^= x11
|
||||
x7 = bits.RotateLeft32(x7, 7)
|
||||
|
||||
// quarterround(x, 0, 5, 10, 15)
|
||||
x0 += x5
|
||||
x15 ^= x0
|
||||
x15 = bits.RotateLeft32(x15, 16)
|
||||
x10 += x15
|
||||
x5 ^= x10
|
||||
x5 = bits.RotateLeft32(x5, 12)
|
||||
x0 += x5
|
||||
x15 ^= x0
|
||||
x15 = bits.RotateLeft32(x15, 8)
|
||||
x10 += x15
|
||||
x5 ^= x10
|
||||
x5 = bits.RotateLeft32(x5, 7)
|
||||
|
||||
// quarterround(x, 1, 6, 11, 12)
|
||||
x1 += x6
|
||||
x12 ^= x1
|
||||
x12 = bits.RotateLeft32(x12, 16)
|
||||
x11 += x12
|
||||
x6 ^= x11
|
||||
x6 = bits.RotateLeft32(x6, 12)
|
||||
x1 += x6
|
||||
x12 ^= x1
|
||||
x12 = bits.RotateLeft32(x12, 8)
|
||||
x11 += x12
|
||||
x6 ^= x11
|
||||
x6 = bits.RotateLeft32(x6, 7)
|
||||
|
||||
// quarterround(x, 2, 7, 8, 13)
|
||||
x2 += x7
|
||||
x13 ^= x2
|
||||
x13 = bits.RotateLeft32(x13, 16)
|
||||
x8 += x13
|
||||
x7 ^= x8
|
||||
x7 = bits.RotateLeft32(x7, 12)
|
||||
x2 += x7
|
||||
x13 ^= x2
|
||||
x13 = bits.RotateLeft32(x13, 8)
|
||||
x8 += x13
|
||||
x7 ^= x8
|
||||
x7 = bits.RotateLeft32(x7, 7)
|
||||
|
||||
// quarterround(x, 3, 4, 9, 14)
|
||||
x3 += x4
|
||||
x14 ^= x3
|
||||
x14 = bits.RotateLeft32(x14, 16)
|
||||
x9 += x14
|
||||
x4 ^= x9
|
||||
x4 = bits.RotateLeft32(x4, 12)
|
||||
x3 += x4
|
||||
x14 ^= x3
|
||||
x14 = bits.RotateLeft32(x14, 8)
|
||||
x9 += x14
|
||||
x4 ^= x9
|
||||
x4 = bits.RotateLeft32(x4, 7)
|
||||
}
|
||||
|
||||
// HChaCha returns x0...x3 | x12...x15, which corresponds to the
|
||||
// indexes of the ChaCha constant and the indexes of the IV.
|
||||
if useUnsafe {
|
||||
outArr := (*[16]uint32)(unsafe.Pointer(&out[0]))
|
||||
outArr[0] = x0
|
||||
outArr[1] = x1
|
||||
outArr[2] = x2
|
||||
outArr[3] = x3
|
||||
outArr[4] = x12
|
||||
outArr[5] = x13
|
||||
outArr[6] = x14
|
||||
outArr[7] = x15
|
||||
} else {
|
||||
binary.LittleEndian.PutUint32(out[0:4], x0)
|
||||
binary.LittleEndian.PutUint32(out[4:8], x1)
|
||||
binary.LittleEndian.PutUint32(out[8:12], x2)
|
||||
binary.LittleEndian.PutUint32(out[12:16], x3)
|
||||
binary.LittleEndian.PutUint32(out[16:20], x12)
|
||||
binary.LittleEndian.PutUint32(out[20:24], x13)
|
||||
binary.LittleEndian.PutUint32(out[24:28], x14)
|
||||
binary.LittleEndian.PutUint32(out[28:32], x15)
|
||||
}
|
||||
return
|
||||
}
|
||||
27
vendor/github.com/alecthomas/template/LICENSE
generated
vendored
27
vendor/github.com/alecthomas/template/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
25
vendor/github.com/alecthomas/template/README.md
generated
vendored
25
vendor/github.com/alecthomas/template/README.md
generated
vendored
@ -1,25 +0,0 @@
|
||||
# Go's `text/template` package with newline elision
|
||||
|
||||
This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline.
|
||||
|
||||
eg.
|
||||
|
||||
```
|
||||
{{if true}}\
|
||||
hello
|
||||
{{end}}\
|
||||
```
|
||||
|
||||
Will result in:
|
||||
|
||||
```
|
||||
hello\n
|
||||
```
|
||||
|
||||
Rather than:
|
||||
|
||||
```
|
||||
\n
|
||||
hello\n
|
||||
\n
|
||||
```
|
||||
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
@ -1,406 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package template implements data-driven templates for generating textual output.
|
||||
|
||||
To generate HTML output, see package html/template, which has the same interface
|
||||
as this package but automatically secures HTML output against certain attacks.
|
||||
|
||||
Templates are executed by applying them to a data structure. Annotations in the
|
||||
template refer to elements of the data structure (typically a field of a struct
|
||||
or a key in a map) to control execution and derive values to be displayed.
|
||||
Execution of the template walks the structure and sets the cursor, represented
|
||||
by a period '.' and called "dot", to the value at the current location in the
|
||||
structure as execution proceeds.
|
||||
|
||||
The input text for a template is UTF-8-encoded text in any format.
|
||||
"Actions"--data evaluations or control structures--are delimited by
|
||||
"{{" and "}}"; all text outside actions is copied to the output unchanged.
|
||||
Actions may not span newlines, although comments can.
|
||||
|
||||
Once parsed, a template may be executed safely in parallel.
|
||||
|
||||
Here is a trivial example that prints "17 items are made of wool".
|
||||
|
||||
type Inventory struct {
|
||||
Material string
|
||||
Count uint
|
||||
}
|
||||
sweaters := Inventory{"wool", 17}
|
||||
tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
|
||||
if err != nil { panic(err) }
|
||||
err = tmpl.Execute(os.Stdout, sweaters)
|
||||
if err != nil { panic(err) }
|
||||
|
||||
More intricate examples appear below.
|
||||
|
||||
Actions
|
||||
|
||||
Here is the list of actions. "Arguments" and "pipelines" are evaluations of
|
||||
data, defined in detail below.
|
||||
|
||||
*/
|
||||
// {{/* a comment */}}
|
||||
// A comment; discarded. May contain newlines.
|
||||
// Comments do not nest and must start and end at the
|
||||
// delimiters, as shown here.
|
||||
/*
|
||||
|
||||
{{pipeline}}
|
||||
The default textual representation of the value of the pipeline
|
||||
is copied to the output.
|
||||
|
||||
{{if pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, T1 is executed. The empty values are false, 0, any
|
||||
nil pointer or interface value, and any array, slice, map, or
|
||||
string of length zero.
|
||||
Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, T0 is executed;
|
||||
otherwise, T1 is executed. Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
|
||||
To simplify the appearance of if-else chains, the else action
|
||||
of an if may include another if directly; the effect is exactly
|
||||
the same as writing
|
||||
{{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
|
||||
|
||||
{{range pipeline}} T1 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, nothing is output;
|
||||
otherwise, dot is set to the successive elements of the array,
|
||||
slice, or map and T1 is executed. If the value is a map and the
|
||||
keys are of basic type with a defined order ("comparable"), the
|
||||
elements will be visited in sorted key order.
|
||||
|
||||
{{range pipeline}} T1 {{else}} T0 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, dot is unaffected and
|
||||
T0 is executed; otherwise, dot is set to the successive elements
|
||||
of the array, slice, or map and T1 is executed.
|
||||
|
||||
{{template "name"}}
|
||||
The template with the specified name is executed with nil data.
|
||||
|
||||
{{template "name" pipeline}}
|
||||
The template with the specified name is executed with dot set
|
||||
to the value of the pipeline.
|
||||
|
||||
{{with pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, dot is set to the value of the pipeline and T1 is
|
||||
executed.
|
||||
|
||||
{{with pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, dot is unaffected and T0
|
||||
is executed; otherwise, dot is set to the value of the pipeline
|
||||
and T1 is executed.
|
||||
|
||||
Arguments
|
||||
|
||||
An argument is a simple value, denoted by one of the following.
|
||||
|
||||
- A boolean, string, character, integer, floating-point, imaginary
|
||||
or complex constant in Go syntax. These behave like Go's untyped
|
||||
constants, although raw strings may not span newlines.
|
||||
- The keyword nil, representing an untyped Go nil.
|
||||
- The character '.' (period):
|
||||
.
|
||||
The result is the value of dot.
|
||||
- A variable name, which is a (possibly empty) alphanumeric string
|
||||
preceded by a dollar sign, such as
|
||||
$piOver2
|
||||
or
|
||||
$
|
||||
The result is the value of the variable.
|
||||
Variables are described below.
|
||||
- The name of a field of the data, which must be a struct, preceded
|
||||
by a period, such as
|
||||
.Field
|
||||
The result is the value of the field. Field invocations may be
|
||||
chained:
|
||||
.Field1.Field2
|
||||
Fields can also be evaluated on variables, including chaining:
|
||||
$x.Field1.Field2
|
||||
- The name of a key of the data, which must be a map, preceded
|
||||
by a period, such as
|
||||
.Key
|
||||
The result is the map element value indexed by the key.
|
||||
Key invocations may be chained and combined with fields to any
|
||||
depth:
|
||||
.Field1.Key1.Field2.Key2
|
||||
Although the key must be an alphanumeric identifier, unlike with
|
||||
field names they do not need to start with an upper case letter.
|
||||
Keys can also be evaluated on variables, including chaining:
|
||||
$x.key1.key2
|
||||
- The name of a niladic method of the data, preceded by a period,
|
||||
such as
|
||||
.Method
|
||||
The result is the value of invoking the method with dot as the
|
||||
receiver, dot.Method(). Such a method must have one return value (of
|
||||
any type) or two return values, the second of which is an error.
|
||||
If it has two and the returned error is non-nil, execution terminates
|
||||
and an error is returned to the caller as the value of Execute.
|
||||
Method invocations may be chained and combined with fields and keys
|
||||
to any depth:
|
||||
.Field1.Key1.Method1.Field2.Key2.Method2
|
||||
Methods can also be evaluated on variables, including chaining:
|
||||
$x.Method1.Field
|
||||
- The name of a niladic function, such as
|
||||
fun
|
||||
The result is the value of invoking the function, fun(). The return
|
||||
types and values behave as in methods. Functions and function
|
||||
names are described below.
|
||||
- A parenthesized instance of one the above, for grouping. The result
|
||||
may be accessed by a field or map key invocation.
|
||||
print (.F1 arg1) (.F2 arg2)
|
||||
(.StructValuedMethod "arg").Field
|
||||
|
||||
Arguments may evaluate to any type; if they are pointers the implementation
|
||||
automatically indirects to the base type when required.
|
||||
If an evaluation yields a function value, such as a function-valued
|
||||
field of a struct, the function is not invoked automatically, but it
|
||||
can be used as a truth value for an if action and the like. To invoke
|
||||
it, use the call function, defined below.
|
||||
|
||||
A pipeline is a possibly chained sequence of "commands". A command is a simple
|
||||
value (argument) or a function or method call, possibly with multiple arguments:
|
||||
|
||||
Argument
|
||||
The result is the value of evaluating the argument.
|
||||
.Method [Argument...]
|
||||
The method can be alone or the last element of a chain but,
|
||||
unlike methods in the middle of a chain, it can take arguments.
|
||||
The result is the value of calling the method with the
|
||||
arguments:
|
||||
dot.Method(Argument1, etc.)
|
||||
functionName [Argument...]
|
||||
The result is the value of calling the function associated
|
||||
with the name:
|
||||
function(Argument1, etc.)
|
||||
Functions and function names are described below.
|
||||
|
||||
Pipelines
|
||||
|
||||
A pipeline may be "chained" by separating a sequence of commands with pipeline
|
||||
characters '|'. In a chained pipeline, the result of the each command is
|
||||
passed as the last argument of the following command. The output of the final
|
||||
command in the pipeline is the value of the pipeline.
|
||||
|
||||
The output of a command will be either one value or two values, the second of
|
||||
which has type error. If that second value is present and evaluates to
|
||||
non-nil, execution terminates and the error is returned to the caller of
|
||||
Execute.
|
||||
|
||||
Variables
|
||||
|
||||
A pipeline inside an action may initialize a variable to capture the result.
|
||||
The initialization has syntax
|
||||
|
||||
$variable := pipeline
|
||||
|
||||
where $variable is the name of the variable. An action that declares a
|
||||
variable produces no output.
|
||||
|
||||
If a "range" action initializes a variable, the variable is set to the
|
||||
successive elements of the iteration. Also, a "range" may declare two
|
||||
variables, separated by a comma:
|
||||
|
||||
range $index, $element := pipeline
|
||||
|
||||
in which case $index and $element are set to the successive values of the
|
||||
array/slice index or map key and element, respectively. Note that if there is
|
||||
only one variable, it is assigned the element; this is opposite to the
|
||||
convention in Go range clauses.
|
||||
|
||||
A variable's scope extends to the "end" action of the control structure ("if",
|
||||
"with", or "range") in which it is declared, or to the end of the template if
|
||||
there is no such control structure. A template invocation does not inherit
|
||||
variables from the point of its invocation.
|
||||
|
||||
When execution begins, $ is set to the data argument passed to Execute, that is,
|
||||
to the starting value of dot.
|
||||
|
||||
Examples
|
||||
|
||||
Here are some example one-line templates demonstrating pipelines and variables.
|
||||
All produce the quoted word "output":
|
||||
|
||||
{{"\"output\""}}
|
||||
A string constant.
|
||||
{{`"output"`}}
|
||||
A raw string constant.
|
||||
{{printf "%q" "output"}}
|
||||
A function call.
|
||||
{{"output" | printf "%q"}}
|
||||
A function call whose final argument comes from the previous
|
||||
command.
|
||||
{{printf "%q" (print "out" "put")}}
|
||||
A parenthesized argument.
|
||||
{{"put" | printf "%s%s" "out" | printf "%q"}}
|
||||
A more elaborate call.
|
||||
{{"output" | printf "%s" | printf "%q"}}
|
||||
A longer chain.
|
||||
{{with "output"}}{{printf "%q" .}}{{end}}
|
||||
A with action using dot.
|
||||
{{with $x := "output" | printf "%q"}}{{$x}}{{end}}
|
||||
A with action that creates and uses a variable.
|
||||
{{with $x := "output"}}{{printf "%q" $x}}{{end}}
|
||||
A with action that uses the variable in another action.
|
||||
{{with $x := "output"}}{{$x | printf "%q"}}{{end}}
|
||||
The same, but pipelined.
|
||||
|
||||
Functions
|
||||
|
||||
During execution functions are found in two function maps: first in the
|
||||
template, then in the global function map. By default, no functions are defined
|
||||
in the template but the Funcs method can be used to add them.
|
||||
|
||||
Predefined global functions are named as follows.
|
||||
|
||||
and
|
||||
Returns the boolean AND of its arguments by returning the
|
||||
first empty argument or the last argument, that is,
|
||||
"and x y" behaves as "if x then y else x". All the
|
||||
arguments are evaluated.
|
||||
call
|
||||
Returns the result of calling the first argument, which
|
||||
must be a function, with the remaining arguments as parameters.
|
||||
Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
|
||||
Y is a func-valued field, map entry, or the like.
|
||||
The first argument must be the result of an evaluation
|
||||
that yields a value of function type (as distinct from
|
||||
a predefined function such as print). The function must
|
||||
return either one or two result values, the second of which
|
||||
is of type error. If the arguments don't match the function
|
||||
or the returned error value is non-nil, execution stops.
|
||||
html
|
||||
Returns the escaped HTML equivalent of the textual
|
||||
representation of its arguments.
|
||||
index
|
||||
Returns the result of indexing its first argument by the
|
||||
following arguments. Thus "index x 1 2 3" is, in Go syntax,
|
||||
x[1][2][3]. Each indexed item must be a map, slice, or array.
|
||||
js
|
||||
Returns the escaped JavaScript equivalent of the textual
|
||||
representation of its arguments.
|
||||
len
|
||||
Returns the integer length of its argument.
|
||||
not
|
||||
Returns the boolean negation of its single argument.
|
||||
or
|
||||
Returns the boolean OR of its arguments by returning the
|
||||
first non-empty argument or the last argument, that is,
|
||||
"or x y" behaves as "if x then x else y". All the
|
||||
arguments are evaluated.
|
||||
print
|
||||
An alias for fmt.Sprint
|
||||
printf
|
||||
An alias for fmt.Sprintf
|
||||
println
|
||||
An alias for fmt.Sprintln
|
||||
urlquery
|
||||
Returns the escaped value of the textual representation of
|
||||
its arguments in a form suitable for embedding in a URL query.
|
||||
|
||||
The boolean functions take any zero value to be false and a non-zero
|
||||
value to be true.
|
||||
|
||||
There is also a set of binary comparison operators defined as
|
||||
functions:
|
||||
|
||||
eq
|
||||
Returns the boolean truth of arg1 == arg2
|
||||
ne
|
||||
Returns the boolean truth of arg1 != arg2
|
||||
lt
|
||||
Returns the boolean truth of arg1 < arg2
|
||||
le
|
||||
Returns the boolean truth of arg1 <= arg2
|
||||
gt
|
||||
Returns the boolean truth of arg1 > arg2
|
||||
ge
|
||||
Returns the boolean truth of arg1 >= arg2
|
||||
|
||||
For simpler multi-way equality tests, eq (only) accepts two or more
|
||||
arguments and compares the second and subsequent to the first,
|
||||
returning in effect
|
||||
|
||||
arg1==arg2 || arg1==arg3 || arg1==arg4 ...
|
||||
|
||||
(Unlike with || in Go, however, eq is a function call and all the
|
||||
arguments will be evaluated.)
|
||||
|
||||
The comparison functions work on basic types only (or named basic
|
||||
types, such as "type Celsius float32"). They implement the Go rules
|
||||
for comparison of values, except that size and exact type are
|
||||
ignored, so any integer value, signed or unsigned, may be compared
|
||||
with any other integer value. (The arithmetic value is compared,
|
||||
not the bit pattern, so all negative integers are less than all
|
||||
unsigned integers.) However, as usual, one may not compare an int
|
||||
with a float32 and so on.
|
||||
|
||||
Associated templates
|
||||
|
||||
Each template is named by a string specified when it is created. Also, each
|
||||
template is associated with zero or more other templates that it may invoke by
|
||||
name; such associations are transitive and form a name space of templates.
|
||||
|
||||
A template may use a template invocation to instantiate another associated
|
||||
template; see the explanation of the "template" action above. The name must be
|
||||
that of a template associated with the template that contains the invocation.
|
||||
|
||||
Nested template definitions
|
||||
|
||||
When parsing a template, another template may be defined and associated with the
|
||||
template being parsed. Template definitions must appear at the top level of the
|
||||
template, much like global variables in a Go program.
|
||||
|
||||
The syntax of such definitions is to surround each template declaration with a
|
||||
"define" and "end" action.
|
||||
|
||||
The define action names the template being created by providing a string
|
||||
constant. Here is a simple example:
|
||||
|
||||
`{{define "T1"}}ONE{{end}}
|
||||
{{define "T2"}}TWO{{end}}
|
||||
{{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
|
||||
{{template "T3"}}`
|
||||
|
||||
This defines two templates, T1 and T2, and a third T3 that invokes the other two
|
||||
when it is executed. Finally it invokes T3. If executed this template will
|
||||
produce the text
|
||||
|
||||
ONE TWO
|
||||
|
||||
By construction, a template may reside in only one association. If it's
|
||||
necessary to have a template addressable from multiple associations, the
|
||||
template definition must be parsed multiple times to create distinct *Template
|
||||
values, or must be copied with the Clone or AddParseTree method.
|
||||
|
||||
Parse may be called multiple times to assemble the various associated templates;
|
||||
see the ParseFiles and ParseGlob functions and methods for simple ways to parse
|
||||
related templates stored in files.
|
||||
|
||||
A template may be executed directly or through ExecuteTemplate, which executes
|
||||
an associated template identified by name. To invoke our example above, we
|
||||
might write,
|
||||
|
||||
err := tmpl.Execute(os.Stdout, "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
or to invoke a particular template explicitly by name,
|
||||
|
||||
err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
*/
|
||||
package template
|
||||
845
vendor/github.com/alecthomas/template/exec.go
generated
vendored
845
vendor/github.com/alecthomas/template/exec.go
generated
vendored
@ -1,845 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// state represents the state of an execution. It's not part of the
|
||||
// template so that multiple executions of the same template
|
||||
// can execute in parallel.
|
||||
type state struct {
|
||||
tmpl *Template
|
||||
wr io.Writer
|
||||
node parse.Node // current node, for errors
|
||||
vars []variable // push-down stack of variable values.
|
||||
}
|
||||
|
||||
// variable holds the dynamic value of a variable such as $, $x etc.
|
||||
type variable struct {
|
||||
name string
|
||||
value reflect.Value
|
||||
}
|
||||
|
||||
// push pushes a new variable on the stack.
|
||||
func (s *state) push(name string, value reflect.Value) {
|
||||
s.vars = append(s.vars, variable{name, value})
|
||||
}
|
||||
|
||||
// mark returns the length of the variable stack.
|
||||
func (s *state) mark() int {
|
||||
return len(s.vars)
|
||||
}
|
||||
|
||||
// pop pops the variable stack up to the mark.
|
||||
func (s *state) pop(mark int) {
|
||||
s.vars = s.vars[0:mark]
|
||||
}
|
||||
|
||||
// setVar overwrites the top-nth variable on the stack. Used by range iterations.
|
||||
func (s *state) setVar(n int, value reflect.Value) {
|
||||
s.vars[len(s.vars)-n].value = value
|
||||
}
|
||||
|
||||
// varValue returns the value of the named variable.
|
||||
func (s *state) varValue(name string) reflect.Value {
|
||||
for i := s.mark() - 1; i >= 0; i-- {
|
||||
if s.vars[i].name == name {
|
||||
return s.vars[i].value
|
||||
}
|
||||
}
|
||||
s.errorf("undefined variable: %s", name)
|
||||
return zero
|
||||
}
|
||||
|
||||
var zero reflect.Value
|
||||
|
||||
// at marks the state to be on node n, for error reporting.
|
||||
func (s *state) at(node parse.Node) {
|
||||
s.node = node
|
||||
}
|
||||
|
||||
// doublePercent returns the string with %'s replaced by %%, if necessary,
|
||||
// so it can be used safely inside a Printf format string.
|
||||
func doublePercent(str string) string {
|
||||
if strings.Contains(str, "%") {
|
||||
str = strings.Replace(str, "%", "%%", -1)
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (s *state) errorf(format string, args ...interface{}) {
|
||||
name := doublePercent(s.tmpl.Name())
|
||||
if s.node == nil {
|
||||
format = fmt.Sprintf("template: %s: %s", name, format)
|
||||
} else {
|
||||
location, context := s.tmpl.ErrorContext(s.node)
|
||||
format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
|
||||
}
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// errRecover is the handler that turns panics into returns from the top
|
||||
// level of Parse.
|
||||
func errRecover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
switch err := e.(type) {
|
||||
case runtime.Error:
|
||||
panic(e)
|
||||
case error:
|
||||
*errp = err
|
||||
default:
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteTemplate applies the template associated with t that has the given name
|
||||
// to the specified data object and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
|
||||
tmpl := t.tmpl[name]
|
||||
if tmpl == nil {
|
||||
return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
|
||||
}
|
||||
return tmpl.Execute(wr, data)
|
||||
}
|
||||
|
||||
// Execute applies a parsed template to the specified data object,
|
||||
// and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
|
||||
defer errRecover(&err)
|
||||
value := reflect.ValueOf(data)
|
||||
state := &state{
|
||||
tmpl: t,
|
||||
wr: wr,
|
||||
vars: []variable{{"$", value}},
|
||||
}
|
||||
t.init()
|
||||
if t.Tree == nil || t.Root == nil {
|
||||
var b bytes.Buffer
|
||||
for name, tmpl := range t.tmpl {
|
||||
if tmpl.Tree == nil || tmpl.Root == nil {
|
||||
continue
|
||||
}
|
||||
if b.Len() > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
fmt.Fprintf(&b, "%q", name)
|
||||
}
|
||||
var s string
|
||||
if b.Len() > 0 {
|
||||
s = "; defined templates are: " + b.String()
|
||||
}
|
||||
state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
|
||||
}
|
||||
state.walk(value, t.Root)
|
||||
return
|
||||
}
|
||||
|
||||
// Walk functions step through the major pieces of the template structure,
|
||||
// generating output as they go.
|
||||
func (s *state) walk(dot reflect.Value, node parse.Node) {
|
||||
s.at(node)
|
||||
switch node := node.(type) {
|
||||
case *parse.ActionNode:
|
||||
// Do not pop variables so they persist until next end.
|
||||
// Also, if the action declares variables, don't print the result.
|
||||
val := s.evalPipeline(dot, node.Pipe)
|
||||
if len(node.Pipe.Decl) == 0 {
|
||||
s.printValue(node, val)
|
||||
}
|
||||
case *parse.IfNode:
|
||||
s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
|
||||
case *parse.ListNode:
|
||||
for _, node := range node.Nodes {
|
||||
s.walk(dot, node)
|
||||
}
|
||||
case *parse.RangeNode:
|
||||
s.walkRange(dot, node)
|
||||
case *parse.TemplateNode:
|
||||
s.walkTemplate(dot, node)
|
||||
case *parse.TextNode:
|
||||
if _, err := s.wr.Write(node.Text); err != nil {
|
||||
s.errorf("%s", err)
|
||||
}
|
||||
case *parse.WithNode:
|
||||
s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
|
||||
default:
|
||||
s.errorf("unknown node: %s", node)
|
||||
}
|
||||
}
|
||||
|
||||
// walkIfOrWith walks an 'if' or 'with' node. The two control structures
|
||||
// are identical in behavior except that 'with' sets dot.
|
||||
func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
|
||||
defer s.pop(s.mark())
|
||||
val := s.evalPipeline(dot, pipe)
|
||||
truth, ok := isTrue(val)
|
||||
if !ok {
|
||||
s.errorf("if/with can't use %v", val)
|
||||
}
|
||||
if truth {
|
||||
if typ == parse.NodeWith {
|
||||
s.walk(val, list)
|
||||
} else {
|
||||
s.walk(dot, list)
|
||||
}
|
||||
} else if elseList != nil {
|
||||
s.walk(dot, elseList)
|
||||
}
|
||||
}
|
||||
|
||||
// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
|
||||
// and whether the value has a meaningful truth value.
|
||||
func isTrue(val reflect.Value) (truth, ok bool) {
|
||||
if !val.IsValid() {
|
||||
// Something like var x interface{}, never set. It's a form of nil.
|
||||
return false, true
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
truth = val.Len() > 0
|
||||
case reflect.Bool:
|
||||
truth = val.Bool()
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
truth = val.Complex() != 0
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
|
||||
truth = !val.IsNil()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
truth = val.Int() != 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
truth = val.Float() != 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
truth = val.Uint() != 0
|
||||
case reflect.Struct:
|
||||
truth = true // Struct values are always true.
|
||||
default:
|
||||
return
|
||||
}
|
||||
return truth, true
|
||||
}
|
||||
|
||||
func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
|
||||
s.at(r)
|
||||
defer s.pop(s.mark())
|
||||
val, _ := indirect(s.evalPipeline(dot, r.Pipe))
|
||||
// mark top of stack before any variables in the body are pushed.
|
||||
mark := s.mark()
|
||||
oneIteration := func(index, elem reflect.Value) {
|
||||
// Set top var (lexically the second if there are two) to the element.
|
||||
if len(r.Pipe.Decl) > 0 {
|
||||
s.setVar(1, elem)
|
||||
}
|
||||
// Set next var (lexically the first if there are two) to the index.
|
||||
if len(r.Pipe.Decl) > 1 {
|
||||
s.setVar(2, index)
|
||||
}
|
||||
s.walk(elem, r.List)
|
||||
s.pop(mark)
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for i := 0; i < val.Len(); i++ {
|
||||
oneIteration(reflect.ValueOf(i), val.Index(i))
|
||||
}
|
||||
return
|
||||
case reflect.Map:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for _, key := range sortKeys(val.MapKeys()) {
|
||||
oneIteration(key, val.MapIndex(key))
|
||||
}
|
||||
return
|
||||
case reflect.Chan:
|
||||
if val.IsNil() {
|
||||
break
|
||||
}
|
||||
i := 0
|
||||
for ; ; i++ {
|
||||
elem, ok := val.Recv()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
oneIteration(reflect.ValueOf(i), elem)
|
||||
}
|
||||
if i == 0 {
|
||||
break
|
||||
}
|
||||
return
|
||||
case reflect.Invalid:
|
||||
break // An invalid value is likely a nil map, etc. and acts like an empty map.
|
||||
default:
|
||||
s.errorf("range can't iterate over %v", val)
|
||||
}
|
||||
if r.ElseList != nil {
|
||||
s.walk(dot, r.ElseList)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
|
||||
s.at(t)
|
||||
tmpl := s.tmpl.tmpl[t.Name]
|
||||
if tmpl == nil {
|
||||
s.errorf("template %q not defined", t.Name)
|
||||
}
|
||||
// Variables declared by the pipeline persist.
|
||||
dot = s.evalPipeline(dot, t.Pipe)
|
||||
newState := *s
|
||||
newState.tmpl = tmpl
|
||||
// No dynamic scoping: template invocations inherit no variables.
|
||||
newState.vars = []variable{{"$", dot}}
|
||||
newState.walk(dot, tmpl.Root)
|
||||
}
|
||||
|
||||
// Eval functions evaluate pipelines, commands, and their elements and extract
|
||||
// values from the data structure by examining fields, calling methods, and so on.
|
||||
// The printing of those values happens only through walk functions.
|
||||
|
||||
// evalPipeline returns the value acquired by evaluating a pipeline. If the
|
||||
// pipeline has a variable declaration, the variable will be pushed on the
|
||||
// stack. Callers should therefore pop the stack after they are finished
|
||||
// executing commands depending on the pipeline value.
|
||||
func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
|
||||
if pipe == nil {
|
||||
return
|
||||
}
|
||||
s.at(pipe)
|
||||
for _, cmd := range pipe.Cmds {
|
||||
value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
|
||||
// If the object has type interface{}, dig down one level to the thing inside.
|
||||
if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
|
||||
value = reflect.ValueOf(value.Interface()) // lovely!
|
||||
}
|
||||
}
|
||||
for _, variable := range pipe.Decl {
|
||||
s.push(variable.Ident[0], value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
|
||||
if len(args) > 1 || final.IsValid() {
|
||||
s.errorf("can't give argument to non-function %s", args[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
|
||||
firstWord := cmd.Args[0]
|
||||
switch n := firstWord.(type) {
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, cmd.Args, final)
|
||||
case *parse.ChainNode:
|
||||
return s.evalChainNode(dot, n, cmd.Args, final)
|
||||
case *parse.IdentifierNode:
|
||||
// Must be a function.
|
||||
return s.evalFunction(dot, n, cmd, cmd.Args, final)
|
||||
case *parse.PipeNode:
|
||||
// Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
|
||||
return s.evalPipeline(dot, n)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, cmd.Args, final)
|
||||
}
|
||||
s.at(firstWord)
|
||||
s.notAFunction(cmd.Args, final)
|
||||
switch word := firstWord.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(word.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.NilNode:
|
||||
s.errorf("nil is not a command")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(word)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(word.Text)
|
||||
}
|
||||
s.errorf("can't evaluate command %q", firstWord)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// idealConstant is called to return the value of a number in a context where
|
||||
// we don't know the type. In that case, the syntax of the number tells us
|
||||
// its type, and we use Go rules to resolve. Note there is no such thing as
|
||||
// a uint ideal constant in this situation - the value must be of int type.
|
||||
func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
|
||||
// These are ideal constants but we don't know the type
|
||||
// and we have no context. (If it was a method argument,
|
||||
// we'd know what we need.) The syntax guides us to some extent.
|
||||
s.at(constant)
|
||||
switch {
|
||||
case constant.IsComplex:
|
||||
return reflect.ValueOf(constant.Complex128) // incontrovertible.
|
||||
case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
|
||||
return reflect.ValueOf(constant.Float64)
|
||||
case constant.IsInt:
|
||||
n := int(constant.Int64)
|
||||
if int64(n) != constant.Int64 {
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return reflect.ValueOf(n)
|
||||
case constant.IsUint:
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return zero
|
||||
}
|
||||
|
||||
func isHexConstant(s string) bool {
|
||||
return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
|
||||
}
|
||||
|
||||
func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(field)
|
||||
return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(chain)
|
||||
// (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
|
||||
pipe := s.evalArg(dot, nil, chain.Node)
|
||||
if len(chain.Field) == 0 {
|
||||
s.errorf("internal error: no fields in evalChainNode")
|
||||
}
|
||||
return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
// $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
|
||||
s.at(variable)
|
||||
value := s.varValue(variable.Ident[0])
|
||||
if len(variable.Ident) == 1 {
|
||||
s.notAFunction(args, final)
|
||||
return value
|
||||
}
|
||||
return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
|
||||
}
|
||||
|
||||
// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
|
||||
// dot is the environment in which to evaluate arguments, while
|
||||
// receiver is the value being walked along the chain.
|
||||
func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
n := len(ident)
|
||||
for i := 0; i < n-1; i++ {
|
||||
receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
|
||||
}
|
||||
// Now if it's a method, it gets the arguments.
|
||||
return s.evalField(dot, ident[n-1], node, args, final, receiver)
|
||||
}
|
||||
|
||||
func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(node)
|
||||
name := node.Ident
|
||||
function, ok := findFunction(name, s.tmpl)
|
||||
if !ok {
|
||||
s.errorf("%q is not a defined function", name)
|
||||
}
|
||||
return s.evalCall(dot, function, cmd, name, args, final)
|
||||
}
|
||||
|
||||
// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
|
||||
// The 'final' argument represents the return value from the preceding
|
||||
// value of the pipeline, if any.
|
||||
func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
|
||||
if !receiver.IsValid() {
|
||||
return zero
|
||||
}
|
||||
typ := receiver.Type()
|
||||
receiver, _ = indirect(receiver)
|
||||
// Unless it's an interface, need to get to a value of type *T to guarantee
|
||||
// we see all methods of T and *T.
|
||||
ptr := receiver
|
||||
if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
|
||||
ptr = ptr.Addr()
|
||||
}
|
||||
if method := ptr.MethodByName(fieldName); method.IsValid() {
|
||||
return s.evalCall(dot, method, node, fieldName, args, final)
|
||||
}
|
||||
hasArgs := len(args) > 1 || final.IsValid()
|
||||
// It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
|
||||
receiver, isNil := indirect(receiver)
|
||||
if isNil {
|
||||
s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
|
||||
}
|
||||
switch receiver.Kind() {
|
||||
case reflect.Struct:
|
||||
tField, ok := receiver.Type().FieldByName(fieldName)
|
||||
if ok {
|
||||
field := receiver.FieldByIndex(tField.Index)
|
||||
if tField.PkgPath != "" { // field is unexported
|
||||
s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
|
||||
}
|
||||
// If it's a function, we must call it.
|
||||
if hasArgs {
|
||||
s.errorf("%s has arguments but cannot be invoked as function", fieldName)
|
||||
}
|
||||
return field
|
||||
}
|
||||
s.errorf("%s is not a field of struct type %s", fieldName, typ)
|
||||
case reflect.Map:
|
||||
// If it's a map, attempt to use the field name as a key.
|
||||
nameVal := reflect.ValueOf(fieldName)
|
||||
if nameVal.Type().AssignableTo(receiver.Type().Key()) {
|
||||
if hasArgs {
|
||||
s.errorf("%s is not a method but has arguments", fieldName)
|
||||
}
|
||||
return receiver.MapIndex(nameVal)
|
||||
}
|
||||
}
|
||||
s.errorf("can't evaluate field %s in type %s", fieldName, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
var (
|
||||
errorType = reflect.TypeOf((*error)(nil)).Elem()
|
||||
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
|
||||
)
|
||||
|
||||
// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
|
||||
// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
|
||||
// as the function itself.
|
||||
func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
if args != nil {
|
||||
args = args[1:] // Zeroth arg is function name/node; not passed to function.
|
||||
}
|
||||
typ := fun.Type()
|
||||
numIn := len(args)
|
||||
if final.IsValid() {
|
||||
numIn++
|
||||
}
|
||||
numFixed := len(args)
|
||||
if typ.IsVariadic() {
|
||||
numFixed = typ.NumIn() - 1 // last arg is the variadic one.
|
||||
if numIn < numFixed {
|
||||
s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
|
||||
}
|
||||
} else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
|
||||
s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
// TODO: This could still be a confusing error; maybe goodFunc should provide info.
|
||||
s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
|
||||
}
|
||||
// Build the arg list.
|
||||
argv := make([]reflect.Value, numIn)
|
||||
// Args must be evaluated. Fixed args first.
|
||||
i := 0
|
||||
for ; i < numFixed && i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, typ.In(i), args[i])
|
||||
}
|
||||
// Now the ... args.
|
||||
if typ.IsVariadic() {
|
||||
argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
|
||||
for ; i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, argType, args[i])
|
||||
}
|
||||
}
|
||||
// Add final value if necessary.
|
||||
if final.IsValid() {
|
||||
t := typ.In(typ.NumIn() - 1)
|
||||
if typ.IsVariadic() {
|
||||
t = t.Elem()
|
||||
}
|
||||
argv[i] = s.validateType(final, t)
|
||||
}
|
||||
result := fun.Call(argv)
|
||||
// If we have an error that is not nil, stop execution and return that error to the caller.
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
s.at(node)
|
||||
s.errorf("error calling %s: %s", name, result[1].Interface().(error))
|
||||
}
|
||||
return result[0]
|
||||
}
|
||||
|
||||
// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
|
||||
func canBeNil(typ reflect.Type) bool {
|
||||
switch typ.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// validateType guarantees that the value is valid and assignable to the type.
|
||||
func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
|
||||
if !value.IsValid() {
|
||||
if typ == nil || canBeNil(typ) {
|
||||
// An untyped nil interface{}. Accept as a proper nil value.
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("invalid value; expected %s", typ)
|
||||
}
|
||||
if typ != nil && !value.Type().AssignableTo(typ) {
|
||||
if value.Kind() == reflect.Interface && !value.IsNil() {
|
||||
value = value.Elem()
|
||||
if value.Type().AssignableTo(typ) {
|
||||
return value
|
||||
}
|
||||
// fallthrough
|
||||
}
|
||||
// Does one dereference or indirection work? We could do more, as we
|
||||
// do with method receivers, but that gets messy and method receivers
|
||||
// are much more constrained, so it makes more sense there than here.
|
||||
// Besides, one is almost always all you need.
|
||||
switch {
|
||||
case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
|
||||
value = value.Elem()
|
||||
if !value.IsValid() {
|
||||
s.errorf("dereference of nil pointer of type %s", typ)
|
||||
}
|
||||
case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
|
||||
value = value.Addr()
|
||||
default:
|
||||
s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch arg := n.(type) {
|
||||
case *parse.DotNode:
|
||||
return s.validateType(dot, typ)
|
||||
case *parse.NilNode:
|
||||
if canBeNil(typ) {
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("cannot assign nil to %s", typ)
|
||||
case *parse.FieldNode:
|
||||
return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
|
||||
case *parse.VariableNode:
|
||||
return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
|
||||
case *parse.PipeNode:
|
||||
return s.validateType(s.evalPipeline(dot, arg), typ)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, arg, arg, nil, zero)
|
||||
case *parse.ChainNode:
|
||||
return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return s.evalBool(typ, n)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return s.evalComplex(typ, n)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return s.evalFloat(typ, n)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return s.evalInteger(typ, n)
|
||||
case reflect.Interface:
|
||||
if typ.NumMethod() == 0 {
|
||||
return s.evalEmptyInterface(dot, n)
|
||||
}
|
||||
case reflect.String:
|
||||
return s.evalString(typ, n)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return s.evalUnsignedInteger(typ, n)
|
||||
}
|
||||
s.errorf("can't handle %s for arg of type %s", n, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.BoolNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetBool(n.True)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected bool; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.StringNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetString(n.Text)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected string; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetInt(n.Int64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetUint(n.Uint64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected unsigned integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetFloat(n.Float64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected float; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetComplex(n.Complex128)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected complex; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch n := n.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(n.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, nil, zero)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, n, n, nil, zero)
|
||||
case *parse.NilNode:
|
||||
// NilNode is handled in evalArg, the only place that calls here.
|
||||
s.errorf("evalEmptyInterface: nil (can't happen)")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(n)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(n.Text)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, nil, zero)
|
||||
case *parse.PipeNode:
|
||||
return s.evalPipeline(dot, n)
|
||||
}
|
||||
s.errorf("can't handle assignment of %s to empty interface argument", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
|
||||
// We indirect through pointers and empty interfaces (only) because
|
||||
// non-empty interfaces have methods we might need.
|
||||
func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
|
||||
for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
|
||||
if v.IsNil() {
|
||||
return v, true
|
||||
}
|
||||
if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return v, false
|
||||
}
|
||||
|
||||
// printValue writes the textual representation of the value to the output of
|
||||
// the template.
|
||||
func (s *state) printValue(n parse.Node, v reflect.Value) {
|
||||
s.at(n)
|
||||
iface, ok := printableValue(v)
|
||||
if !ok {
|
||||
s.errorf("can't print %s of type %s", n, v.Type())
|
||||
}
|
||||
fmt.Fprint(s.wr, iface)
|
||||
}
|
||||
|
||||
// printableValue returns the, possibly indirected, interface value inside v that
|
||||
// is best for a call to formatted printer.
|
||||
func printableValue(v reflect.Value) (interface{}, bool) {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v, _ = indirect(v) // fmt.Fprint handles nil.
|
||||
}
|
||||
if !v.IsValid() {
|
||||
return "<no value>", true
|
||||
}
|
||||
|
||||
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
|
||||
if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
|
||||
v = v.Addr()
|
||||
} else {
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return v.Interface(), true
|
||||
}
|
||||
|
||||
// Types to help sort the keys in a map for reproducible output.
|
||||
|
||||
type rvs []reflect.Value
|
||||
|
||||
func (x rvs) Len() int { return len(x) }
|
||||
func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
type rvInts struct{ rvs }
|
||||
|
||||
func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
|
||||
|
||||
type rvUints struct{ rvs }
|
||||
|
||||
func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
|
||||
|
||||
type rvFloats struct{ rvs }
|
||||
|
||||
func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
|
||||
|
||||
type rvStrings struct{ rvs }
|
||||
|
||||
func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
|
||||
|
||||
// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
|
||||
func sortKeys(v []reflect.Value) []reflect.Value {
|
||||
if len(v) <= 1 {
|
||||
return v
|
||||
}
|
||||
switch v[0].Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sort.Sort(rvFloats{v})
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
sort.Sort(rvInts{v})
|
||||
case reflect.String:
|
||||
sort.Sort(rvStrings{v})
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
sort.Sort(rvUints{v})
|
||||
}
|
||||
return v
|
||||
}
|
||||
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
@ -1,598 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// FuncMap is the type of the map defining the mapping from names to functions.
|
||||
// Each function must have either a single return value, or two return values of
|
||||
// which the second has type error. In that case, if the second (error)
|
||||
// return value evaluates to non-nil during execution, execution terminates and
|
||||
// Execute returns that error.
|
||||
type FuncMap map[string]interface{}
|
||||
|
||||
var builtins = FuncMap{
|
||||
"and": and,
|
||||
"call": call,
|
||||
"html": HTMLEscaper,
|
||||
"index": index,
|
||||
"js": JSEscaper,
|
||||
"len": length,
|
||||
"not": not,
|
||||
"or": or,
|
||||
"print": fmt.Sprint,
|
||||
"printf": fmt.Sprintf,
|
||||
"println": fmt.Sprintln,
|
||||
"urlquery": URLQueryEscaper,
|
||||
|
||||
// Comparisons
|
||||
"eq": eq, // ==
|
||||
"ge": ge, // >=
|
||||
"gt": gt, // >
|
||||
"le": le, // <=
|
||||
"lt": lt, // <
|
||||
"ne": ne, // !=
|
||||
}
|
||||
|
||||
var builtinFuncs = createValueFuncs(builtins)
|
||||
|
||||
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
|
||||
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
|
||||
m := make(map[string]reflect.Value)
|
||||
addValueFuncs(m, funcMap)
|
||||
return m
|
||||
}
|
||||
|
||||
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
|
||||
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
v := reflect.ValueOf(fn)
|
||||
if v.Kind() != reflect.Func {
|
||||
panic("value for " + name + " not a function")
|
||||
}
|
||||
if !goodFunc(v.Type()) {
|
||||
panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
|
||||
}
|
||||
out[name] = v
|
||||
}
|
||||
}
|
||||
|
||||
// addFuncs adds to values the functions in funcs. It does no checking of the input -
|
||||
// call addValueFuncs first.
|
||||
func addFuncs(out, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
out[name] = fn
|
||||
}
|
||||
}
|
||||
|
||||
// goodFunc checks that the function or method has the right result signature.
|
||||
func goodFunc(typ reflect.Type) bool {
|
||||
// We allow functions with 1 result or 2 results where the second is an error.
|
||||
switch {
|
||||
case typ.NumOut() == 1:
|
||||
return true
|
||||
case typ.NumOut() == 2 && typ.Out(1) == errorType:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// findFunction looks for a function in the template, and global map.
|
||||
func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
|
||||
if tmpl != nil && tmpl.common != nil {
|
||||
if fn := tmpl.execFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
}
|
||||
if fn := builtinFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
|
||||
// Indexing.
|
||||
|
||||
// index returns the result of indexing its first argument by the following
|
||||
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
|
||||
// indexed item must be a map, slice, or array.
|
||||
func index(item interface{}, indices ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(item)
|
||||
for _, i := range indices {
|
||||
index := reflect.ValueOf(i)
|
||||
var isNil bool
|
||||
if v, isNil = indirect(v); isNil {
|
||||
return nil, fmt.Errorf("index of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.String:
|
||||
var x int64
|
||||
switch index.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x = index.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
x = int64(index.Uint())
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
|
||||
}
|
||||
if x < 0 || x >= int64(v.Len()) {
|
||||
return nil, fmt.Errorf("index out of range: %d", x)
|
||||
}
|
||||
v = v.Index(int(x))
|
||||
case reflect.Map:
|
||||
if !index.IsValid() {
|
||||
index = reflect.Zero(v.Type().Key())
|
||||
}
|
||||
if !index.Type().AssignableTo(v.Type().Key()) {
|
||||
return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
|
||||
}
|
||||
if x := v.MapIndex(index); x.IsValid() {
|
||||
v = x
|
||||
} else {
|
||||
v = reflect.Zero(v.Type().Elem())
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("can't index item of type %s", v.Type())
|
||||
}
|
||||
}
|
||||
return v.Interface(), nil
|
||||
}
|
||||
|
||||
// Length
|
||||
|
||||
// length returns the length of the item, with an error if it has no defined length.
|
||||
func length(item interface{}) (int, error) {
|
||||
v, isNil := indirect(reflect.ValueOf(item))
|
||||
if isNil {
|
||||
return 0, fmt.Errorf("len of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len(), nil
|
||||
}
|
||||
return 0, fmt.Errorf("len of type %s", v.Type())
|
||||
}
|
||||
|
||||
// Function invocation
|
||||
|
||||
// call returns the result of evaluating the first argument as a function.
|
||||
// The function must return 1 result, or 2 results, the second of which is an error.
|
||||
func call(fn interface{}, args ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(fn)
|
||||
typ := v.Type()
|
||||
if typ.Kind() != reflect.Func {
|
||||
return nil, fmt.Errorf("non-function of type %s", typ)
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
|
||||
}
|
||||
numIn := typ.NumIn()
|
||||
var dddType reflect.Type
|
||||
if typ.IsVariadic() {
|
||||
if len(args) < numIn-1 {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
|
||||
}
|
||||
dddType = typ.In(numIn - 1).Elem()
|
||||
} else {
|
||||
if len(args) != numIn {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
|
||||
}
|
||||
}
|
||||
argv := make([]reflect.Value, len(args))
|
||||
for i, arg := range args {
|
||||
value := reflect.ValueOf(arg)
|
||||
// Compute the expected type. Clumsy because of variadics.
|
||||
var argType reflect.Type
|
||||
if !typ.IsVariadic() || i < numIn-1 {
|
||||
argType = typ.In(i)
|
||||
} else {
|
||||
argType = dddType
|
||||
}
|
||||
if !value.IsValid() && canBeNil(argType) {
|
||||
value = reflect.Zero(argType)
|
||||
}
|
||||
if !value.Type().AssignableTo(argType) {
|
||||
return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
|
||||
}
|
||||
argv[i] = value
|
||||
}
|
||||
result := v.Call(argv)
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
return result[0].Interface(), result[1].Interface().(error)
|
||||
}
|
||||
return result[0].Interface(), nil
|
||||
}
|
||||
|
||||
// Boolean logic.
|
||||
|
||||
func truth(a interface{}) bool {
|
||||
t, _ := isTrue(reflect.ValueOf(a))
|
||||
return t
|
||||
}
|
||||
|
||||
// and computes the Boolean AND of its arguments, returning
|
||||
// the first false argument it encounters, or the last argument.
|
||||
func and(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if !truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if !truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// or computes the Boolean OR of its arguments, returning
|
||||
// the first true argument it encounters, or the last argument.
|
||||
func or(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// not returns the Boolean negation of its argument.
|
||||
func not(arg interface{}) (truth bool) {
|
||||
truth, _ = isTrue(reflect.ValueOf(arg))
|
||||
return !truth
|
||||
}
|
||||
|
||||
// Comparison.
|
||||
|
||||
// TODO: Perhaps allow comparison between signed and unsigned integers.
|
||||
|
||||
var (
|
||||
errBadComparisonType = errors.New("invalid type for comparison")
|
||||
errBadComparison = errors.New("incompatible types for comparison")
|
||||
errNoComparison = errors.New("missing argument for comparison")
|
||||
)
|
||||
|
||||
type kind int
|
||||
|
||||
const (
|
||||
invalidKind kind = iota
|
||||
boolKind
|
||||
complexKind
|
||||
intKind
|
||||
floatKind
|
||||
integerKind
|
||||
stringKind
|
||||
uintKind
|
||||
)
|
||||
|
||||
func basicKind(v reflect.Value) (kind, error) {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return boolKind, nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return intKind, nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return uintKind, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return floatKind, nil
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return complexKind, nil
|
||||
case reflect.String:
|
||||
return stringKind, nil
|
||||
}
|
||||
return invalidKind, errBadComparisonType
|
||||
}
|
||||
|
||||
// eq evaluates the comparison a == b || a == c || ...
|
||||
func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(arg2) == 0 {
|
||||
return false, errNoComparison
|
||||
}
|
||||
for _, arg := range arg2 {
|
||||
v2 := reflect.ValueOf(arg)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind:
|
||||
truth = v1.Bool() == v2.Bool()
|
||||
case complexKind:
|
||||
truth = v1.Complex() == v2.Complex()
|
||||
case floatKind:
|
||||
truth = v1.Float() == v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() == v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() == v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() == v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
if truth {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ne evaluates the comparison a != b.
|
||||
func ne(arg1, arg2 interface{}) (bool, error) {
|
||||
// != is the inverse of ==.
|
||||
equal, err := eq(arg1, arg2)
|
||||
return !equal, err
|
||||
}
|
||||
|
||||
// lt evaluates the comparison a < b.
|
||||
func lt(arg1, arg2 interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
v2 := reflect.ValueOf(arg2)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind, complexKind:
|
||||
return false, errBadComparisonType
|
||||
case floatKind:
|
||||
truth = v1.Float() < v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() < v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() < v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() < v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
return truth, nil
|
||||
}
|
||||
|
||||
// le evaluates the comparison <= b.
|
||||
func le(arg1, arg2 interface{}) (bool, error) {
|
||||
// <= is < or ==.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if lessThan || err != nil {
|
||||
return lessThan, err
|
||||
}
|
||||
return eq(arg1, arg2)
|
||||
}
|
||||
|
||||
// gt evaluates the comparison a > b.
|
||||
func gt(arg1, arg2 interface{}) (bool, error) {
|
||||
// > is the inverse of <=.
|
||||
lessOrEqual, err := le(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessOrEqual, nil
|
||||
}
|
||||
|
||||
// ge evaluates the comparison a >= b.
|
||||
func ge(arg1, arg2 interface{}) (bool, error) {
|
||||
// >= is the inverse of <.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessThan, nil
|
||||
}
|
||||
|
||||
// HTML escaping.
|
||||
|
||||
var (
|
||||
htmlQuot = []byte(""") // shorter than """
|
||||
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
|
||||
htmlAmp = []byte("&")
|
||||
htmlLt = []byte("<")
|
||||
htmlGt = []byte(">")
|
||||
)
|
||||
|
||||
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
|
||||
func HTMLEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i, c := range b {
|
||||
var html []byte
|
||||
switch c {
|
||||
case '"':
|
||||
html = htmlQuot
|
||||
case '\'':
|
||||
html = htmlApos
|
||||
case '&':
|
||||
html = htmlAmp
|
||||
case '<':
|
||||
html = htmlLt
|
||||
case '>':
|
||||
html = htmlGt
|
||||
default:
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
w.Write(html)
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
|
||||
func HTMLEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexAny(s, `'"&<>`) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
HTMLEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// HTMLEscaper returns the escaped HTML equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func HTMLEscaper(args ...interface{}) string {
|
||||
return HTMLEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// JavaScript escaping.
|
||||
|
||||
var (
|
||||
jsLowUni = []byte(`\u00`)
|
||||
hex = []byte("0123456789ABCDEF")
|
||||
|
||||
jsBackslash = []byte(`\\`)
|
||||
jsApos = []byte(`\'`)
|
||||
jsQuot = []byte(`\"`)
|
||||
jsLt = []byte(`\x3C`)
|
||||
jsGt = []byte(`\x3E`)
|
||||
)
|
||||
|
||||
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
|
||||
func JSEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i := 0; i < len(b); i++ {
|
||||
c := b[i]
|
||||
|
||||
if !jsIsSpecial(rune(c)) {
|
||||
// fast path: nothing to do
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
|
||||
if c < utf8.RuneSelf {
|
||||
// Quotes, slashes and angle brackets get quoted.
|
||||
// Control characters get written as \u00XX.
|
||||
switch c {
|
||||
case '\\':
|
||||
w.Write(jsBackslash)
|
||||
case '\'':
|
||||
w.Write(jsApos)
|
||||
case '"':
|
||||
w.Write(jsQuot)
|
||||
case '<':
|
||||
w.Write(jsLt)
|
||||
case '>':
|
||||
w.Write(jsGt)
|
||||
default:
|
||||
w.Write(jsLowUni)
|
||||
t, b := c>>4, c&0x0f
|
||||
w.Write(hex[t : t+1])
|
||||
w.Write(hex[b : b+1])
|
||||
}
|
||||
} else {
|
||||
// Unicode rune.
|
||||
r, size := utf8.DecodeRune(b[i:])
|
||||
if unicode.IsPrint(r) {
|
||||
w.Write(b[i : i+size])
|
||||
} else {
|
||||
fmt.Fprintf(w, "\\u%04X", r)
|
||||
}
|
||||
i += size - 1
|
||||
}
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
|
||||
func JSEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexFunc(s, jsIsSpecial) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
JSEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func jsIsSpecial(r rune) bool {
|
||||
switch r {
|
||||
case '\\', '\'', '"', '<', '>':
|
||||
return true
|
||||
}
|
||||
return r < ' ' || utf8.RuneSelf <= r
|
||||
}
|
||||
|
||||
// JSEscaper returns the escaped JavaScript equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func JSEscaper(args ...interface{}) string {
|
||||
return JSEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// URLQueryEscaper returns the escaped value of the textual representation of
|
||||
// its arguments in a form suitable for embedding in a URL query.
|
||||
func URLQueryEscaper(args ...interface{}) string {
|
||||
return url.QueryEscape(evalArgs(args))
|
||||
}
|
||||
|
||||
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
|
||||
// fmt.Sprint(args...)
|
||||
// except that each argument is indirected (if a pointer), as required,
|
||||
// using the same rules as the default string evaluation during template
|
||||
// execution.
|
||||
func evalArgs(args []interface{}) string {
|
||||
ok := false
|
||||
var s string
|
||||
// Fast path for simple common case.
|
||||
if len(args) == 1 {
|
||||
s, ok = args[0].(string)
|
||||
}
|
||||
if !ok {
|
||||
for i, arg := range args {
|
||||
a, ok := printableValue(reflect.ValueOf(arg))
|
||||
if ok {
|
||||
args[i] = a
|
||||
} // else left fmt do its thing
|
||||
}
|
||||
s = fmt.Sprint(args...)
|
||||
}
|
||||
return s
|
||||
}
|
||||
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
@ -1,108 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Helper functions to make constructing templates easier.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Functions and methods to parse templates.
|
||||
|
||||
// Must is a helper that wraps a call to a function returning (*Template, error)
|
||||
// and panics if the error is non-nil. It is intended for use in variable
|
||||
// initializations such as
|
||||
// var t = template.Must(template.New("name").Parse("text"))
|
||||
func Must(t *Template, err error) *Template {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ParseFiles creates a new Template and parses the template definitions from
|
||||
// the named files. The returned template's name will have the (base) name and
|
||||
// (parsed) contents of the first file. There must be at least one file.
|
||||
// If an error occurs, parsing stops and the returned *Template is nil.
|
||||
func ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(nil, filenames...)
|
||||
}
|
||||
|
||||
// ParseFiles parses the named files and associates the resulting templates with
|
||||
// t. If an error occurs, parsing stops and the returned template is nil;
|
||||
// otherwise it is t. There must be at least one file.
|
||||
func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
|
||||
// parseFiles is the helper for the method and function. If the argument
|
||||
// template is nil, it is created from the first file.
|
||||
func parseFiles(t *Template, filenames ...string) (*Template, error) {
|
||||
if len(filenames) == 0 {
|
||||
// Not really a problem, but be consistent.
|
||||
return nil, fmt.Errorf("template: no files named in call to ParseFiles")
|
||||
}
|
||||
for _, filename := range filenames {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := string(b)
|
||||
name := filepath.Base(filename)
|
||||
// First template becomes return value if not already defined,
|
||||
// and we use that one for subsequent New calls to associate
|
||||
// all the templates together. Also, if this file has the same name
|
||||
// as t, this file becomes the contents of t, so
|
||||
// t, err := New(name).Funcs(xxx).ParseFiles(name)
|
||||
// works. Otherwise we create a new template associated with t.
|
||||
var tmpl *Template
|
||||
if t == nil {
|
||||
t = New(name)
|
||||
}
|
||||
if name == t.Name() {
|
||||
tmpl = t
|
||||
} else {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
_, err = tmpl.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// ParseGlob creates a new Template and parses the template definitions from the
|
||||
// files identified by the pattern, which must match at least one file. The
|
||||
// returned template will have the (base) name and (parsed) contents of the
|
||||
// first file matched by the pattern. ParseGlob is equivalent to calling
|
||||
// ParseFiles with the list of files matched by the pattern.
|
||||
func ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(nil, pattern)
|
||||
}
|
||||
|
||||
// ParseGlob parses the template definitions in the files identified by the
|
||||
// pattern and associates the resulting templates with t. The pattern is
|
||||
// processed by filepath.Glob and must match at least one file. ParseGlob is
|
||||
// equivalent to calling t.ParseFiles with the list of files matched by the
|
||||
// pattern.
|
||||
func (t *Template) ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(t, pattern)
|
||||
}
|
||||
|
||||
// parseGlob is the implementation of the function and method ParseGlob.
|
||||
func parseGlob(t *Template, pattern string) (*Template, error) {
|
||||
filenames, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(filenames) == 0 {
|
||||
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
|
||||
}
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
@ -1,556 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// item represents a token or text string returned from the scanner.
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
val string // The value of this item.
|
||||
}
|
||||
|
||||
func (i item) String() string {
|
||||
switch {
|
||||
case i.typ == itemEOF:
|
||||
return "EOF"
|
||||
case i.typ == itemError:
|
||||
return i.val
|
||||
case i.typ > itemKeyword:
|
||||
return fmt.Sprintf("<%s>", i.val)
|
||||
case len(i.val) > 10:
|
||||
return fmt.Sprintf("%.10q...", i.val)
|
||||
}
|
||||
return fmt.Sprintf("%q", i.val)
|
||||
}
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota // error occurred; value is text of error
|
||||
itemBool // boolean constant
|
||||
itemChar // printable ASCII character; grab bag for comma etc.
|
||||
itemCharConstant // character constant
|
||||
itemComplex // complex constant (1+2i); imaginary is just a number
|
||||
itemColonEquals // colon-equals (':=') introducing a declaration
|
||||
itemEOF
|
||||
itemField // alphanumeric identifier starting with '.'
|
||||
itemIdentifier // alphanumeric identifier not starting with '.'
|
||||
itemLeftDelim // left action delimiter
|
||||
itemLeftParen // '(' inside action
|
||||
itemNumber // simple number, including imaginary
|
||||
itemPipe // pipe symbol
|
||||
itemRawString // raw quoted string (includes quotes)
|
||||
itemRightDelim // right action delimiter
|
||||
itemElideNewline // elide newline after right delim
|
||||
itemRightParen // ')' inside action
|
||||
itemSpace // run of spaces separating arguments
|
||||
itemString // quoted string (includes quotes)
|
||||
itemText // plain text
|
||||
itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
|
||||
// Keywords appear after all the rest.
|
||||
itemKeyword // used only to delimit the keywords
|
||||
itemDot // the cursor, spelled '.'
|
||||
itemDefine // define keyword
|
||||
itemElse // else keyword
|
||||
itemEnd // end keyword
|
||||
itemIf // if keyword
|
||||
itemNil // the untyped nil constant, easiest to treat as a keyword
|
||||
itemRange // range keyword
|
||||
itemTemplate // template keyword
|
||||
itemWith // with keyword
|
||||
)
|
||||
|
||||
var key = map[string]itemType{
|
||||
".": itemDot,
|
||||
"define": itemDefine,
|
||||
"else": itemElse,
|
||||
"end": itemEnd,
|
||||
"if": itemIf,
|
||||
"range": itemRange,
|
||||
"nil": itemNil,
|
||||
"template": itemTemplate,
|
||||
"with": itemWith,
|
||||
}
|
||||
|
||||
const eof = -1
|
||||
|
||||
// stateFn represents the state of the scanner as a function that returns the next state.
|
||||
type stateFn func(*lexer) stateFn
|
||||
|
||||
// lexer holds the state of the scanner.
|
||||
type lexer struct {
|
||||
name string // the name of the input; used only for error reports
|
||||
input string // the string being scanned
|
||||
leftDelim string // start of action
|
||||
rightDelim string // end of action
|
||||
state stateFn // the next lexing function to enter
|
||||
pos Pos // current position in the input
|
||||
start Pos // start position of this item
|
||||
width Pos // width of last rune read from input
|
||||
lastPos Pos // position of most recent item returned by nextItem
|
||||
items chan item // channel of scanned items
|
||||
parenDepth int // nesting depth of ( ) exprs
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
func (l *lexer) next() rune {
|
||||
if int(l.pos) >= len(l.input) {
|
||||
l.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.width = Pos(w)
|
||||
l.pos += l.width
|
||||
return r
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (l *lexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can only be called once per call of next.
|
||||
func (l *lexer) backup() {
|
||||
l.pos -= l.width
|
||||
}
|
||||
|
||||
// emit passes an item back to the client.
|
||||
func (l *lexer) emit(t itemType) {
|
||||
l.items <- item{t, l.start, l.input[l.start:l.pos]}
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (l *lexer) ignore() {
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's from the valid set.
|
||||
func (l *lexer) accept(valid string) bool {
|
||||
if strings.IndexRune(valid, l.next()) >= 0 {
|
||||
return true
|
||||
}
|
||||
l.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// acceptRun consumes a run of runes from the valid set.
|
||||
func (l *lexer) acceptRun(valid string) {
|
||||
for strings.IndexRune(valid, l.next()) >= 0 {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
|
||||
// lineNumber reports which line we're on, based on the position of
|
||||
// the previous item returned by nextItem. Doing it this way
|
||||
// means we don't have to worry about peek double counting.
|
||||
func (l *lexer) lineNumber() int {
|
||||
return 1 + strings.Count(l.input[:l.lastPos], "\n")
|
||||
}
|
||||
|
||||
// errorf returns an error token and terminates the scan by passing
|
||||
// back a nil pointer that will be the next state, terminating l.nextItem.
|
||||
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
|
||||
l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextItem returns the next item from the input.
|
||||
func (l *lexer) nextItem() item {
|
||||
item := <-l.items
|
||||
l.lastPos = item.pos
|
||||
return item
|
||||
}
|
||||
|
||||
// lex creates a new scanner for the input string.
|
||||
func lex(name, input, left, right string) *lexer {
|
||||
if left == "" {
|
||||
left = leftDelim
|
||||
}
|
||||
if right == "" {
|
||||
right = rightDelim
|
||||
}
|
||||
l := &lexer{
|
||||
name: name,
|
||||
input: input,
|
||||
leftDelim: left,
|
||||
rightDelim: right,
|
||||
items: make(chan item),
|
||||
}
|
||||
go l.run()
|
||||
return l
|
||||
}
|
||||
|
||||
// run runs the state machine for the lexer.
|
||||
func (l *lexer) run() {
|
||||
for l.state = lexText; l.state != nil; {
|
||||
l.state = l.state(l)
|
||||
}
|
||||
}
|
||||
|
||||
// state functions
|
||||
|
||||
const (
|
||||
leftDelim = "{{"
|
||||
rightDelim = "}}"
|
||||
leftComment = "/*"
|
||||
rightComment = "*/"
|
||||
)
|
||||
|
||||
// lexText scans until an opening action delimiter, "{{".
|
||||
func lexText(l *lexer) stateFn {
|
||||
for {
|
||||
if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
return lexLeftDelim
|
||||
}
|
||||
if l.next() == eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Correctly reached EOF.
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexLeftDelim scans the left delimiter, which is known to be present.
|
||||
func lexLeftDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.leftDelim))
|
||||
if strings.HasPrefix(l.input[l.pos:], leftComment) {
|
||||
return lexComment
|
||||
}
|
||||
l.emit(itemLeftDelim)
|
||||
l.parenDepth = 0
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexComment scans a comment. The left comment marker is known to be present.
|
||||
func lexComment(l *lexer) stateFn {
|
||||
l.pos += Pos(len(leftComment))
|
||||
i := strings.Index(l.input[l.pos:], rightComment)
|
||||
if i < 0 {
|
||||
return l.errorf("unclosed comment")
|
||||
}
|
||||
l.pos += Pos(i + len(rightComment))
|
||||
if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
return l.errorf("comment ends before closing delimiter")
|
||||
|
||||
}
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.ignore()
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexRightDelim scans the right delimiter, which is known to be present.
|
||||
func lexRightDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.emit(itemRightDelim)
|
||||
if l.peek() == '\\' {
|
||||
l.pos++
|
||||
l.emit(itemElideNewline)
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexInsideAction scans the elements inside action delimiters.
|
||||
func lexInsideAction(l *lexer) stateFn {
|
||||
// Either number, quoted string, or identifier.
|
||||
// Spaces separate arguments; runs of spaces turn into itemSpace.
|
||||
// Pipe symbols separate and are emitted.
|
||||
if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
if l.parenDepth == 0 {
|
||||
return lexRightDelim
|
||||
}
|
||||
return l.errorf("unclosed left paren")
|
||||
}
|
||||
switch r := l.next(); {
|
||||
case r == eof || isEndOfLine(r):
|
||||
return l.errorf("unclosed action")
|
||||
case isSpace(r):
|
||||
return lexSpace
|
||||
case r == ':':
|
||||
if l.next() != '=' {
|
||||
return l.errorf("expected :=")
|
||||
}
|
||||
l.emit(itemColonEquals)
|
||||
case r == '|':
|
||||
l.emit(itemPipe)
|
||||
case r == '"':
|
||||
return lexQuote
|
||||
case r == '`':
|
||||
return lexRawQuote
|
||||
case r == '$':
|
||||
return lexVariable
|
||||
case r == '\'':
|
||||
return lexChar
|
||||
case r == '.':
|
||||
// special look-ahead for ".field" so we don't break l.backup().
|
||||
if l.pos < Pos(len(l.input)) {
|
||||
r := l.input[l.pos]
|
||||
if r < '0' || '9' < r {
|
||||
return lexField
|
||||
}
|
||||
}
|
||||
fallthrough // '.' can start a number.
|
||||
case r == '+' || r == '-' || ('0' <= r && r <= '9'):
|
||||
l.backup()
|
||||
return lexNumber
|
||||
case isAlphaNumeric(r):
|
||||
l.backup()
|
||||
return lexIdentifier
|
||||
case r == '(':
|
||||
l.emit(itemLeftParen)
|
||||
l.parenDepth++
|
||||
return lexInsideAction
|
||||
case r == ')':
|
||||
l.emit(itemRightParen)
|
||||
l.parenDepth--
|
||||
if l.parenDepth < 0 {
|
||||
return l.errorf("unexpected right paren %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
case r <= unicode.MaxASCII && unicode.IsPrint(r):
|
||||
l.emit(itemChar)
|
||||
return lexInsideAction
|
||||
default:
|
||||
return l.errorf("unrecognized character in action: %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexSpace scans a run of space characters.
|
||||
// One space has already been seen.
|
||||
func lexSpace(l *lexer) stateFn {
|
||||
for isSpace(l.peek()) {
|
||||
l.next()
|
||||
}
|
||||
l.emit(itemSpace)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexIdentifier scans an alphanumeric.
|
||||
func lexIdentifier(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch r := l.next(); {
|
||||
case isAlphaNumeric(r):
|
||||
// absorb.
|
||||
default:
|
||||
l.backup()
|
||||
word := l.input[l.start:l.pos]
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
switch {
|
||||
case key[word] > itemKeyword:
|
||||
l.emit(key[word])
|
||||
case word[0] == '.':
|
||||
l.emit(itemField)
|
||||
case word == "true", word == "false":
|
||||
l.emit(itemBool)
|
||||
default:
|
||||
l.emit(itemIdentifier)
|
||||
}
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexField scans a field: .Alphanumeric.
|
||||
// The . has been scanned.
|
||||
func lexField(l *lexer) stateFn {
|
||||
return lexFieldOrVariable(l, itemField)
|
||||
}
|
||||
|
||||
// lexVariable scans a Variable: $Alphanumeric.
|
||||
// The $ has been scanned.
|
||||
func lexVariable(l *lexer) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "$".
|
||||
l.emit(itemVariable)
|
||||
return lexInsideAction
|
||||
}
|
||||
return lexFieldOrVariable(l, itemVariable)
|
||||
}
|
||||
|
||||
// lexVariable scans a field or variable: [.$]Alphanumeric.
|
||||
// The . or $ has been scanned.
|
||||
func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "." or "$".
|
||||
if typ == itemVariable {
|
||||
l.emit(itemVariable)
|
||||
} else {
|
||||
l.emit(itemDot)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
var r rune
|
||||
for {
|
||||
r = l.next()
|
||||
if !isAlphaNumeric(r) {
|
||||
l.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
l.emit(typ)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// atTerminator reports whether the input is at valid termination character to
|
||||
// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
|
||||
// like "$x+2" not being acceptable without a space, in case we decide one
|
||||
// day to implement arithmetic.
|
||||
func (l *lexer) atTerminator() bool {
|
||||
r := l.peek()
|
||||
if isSpace(r) || isEndOfLine(r) {
|
||||
return true
|
||||
}
|
||||
switch r {
|
||||
case eof, '.', ',', '|', ':', ')', '(':
|
||||
return true
|
||||
}
|
||||
// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
|
||||
// succeed but should fail) but only in extremely rare cases caused by willfully
|
||||
// bad choice of delimiter.
|
||||
if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// lexChar scans a character constant. The initial quote is already
|
||||
// scanned. Syntax checking is done by the parser.
|
||||
func lexChar(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated character constant")
|
||||
case '\'':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemCharConstant)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
|
||||
// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
|
||||
// and "089" - but when it's wrong the input is invalid and the parser (via
|
||||
// strconv) will notice.
|
||||
func lexNumber(l *lexer) stateFn {
|
||||
if !l.scanNumber() {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
if sign := l.peek(); sign == '+' || sign == '-' {
|
||||
// Complex: 1+2i. No spaces, must end in 'i'.
|
||||
if !l.scanNumber() || l.input[l.pos-1] != 'i' {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
l.emit(itemComplex)
|
||||
} else {
|
||||
l.emit(itemNumber)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
func (l *lexer) scanNumber() bool {
|
||||
// Optional leading sign.
|
||||
l.accept("+-")
|
||||
// Is it hex?
|
||||
digits := "0123456789"
|
||||
if l.accept("0") && l.accept("xX") {
|
||||
digits = "0123456789abcdefABCDEF"
|
||||
}
|
||||
l.acceptRun(digits)
|
||||
if l.accept(".") {
|
||||
l.acceptRun(digits)
|
||||
}
|
||||
if l.accept("eE") {
|
||||
l.accept("+-")
|
||||
l.acceptRun("0123456789")
|
||||
}
|
||||
// Is it imaginary?
|
||||
l.accept("i")
|
||||
// Next thing mustn't be alphanumeric.
|
||||
if isAlphaNumeric(l.peek()) {
|
||||
l.next()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// lexQuote scans a quoted string.
|
||||
func lexQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated quoted string")
|
||||
case '"':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexRawQuote scans a raw quoted string.
|
||||
func lexRawQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated raw quoted string")
|
||||
case '`':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemRawString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// isSpace reports whether r is a space character.
|
||||
func isSpace(r rune) bool {
|
||||
return r == ' ' || r == '\t'
|
||||
}
|
||||
|
||||
// isEndOfLine reports whether r is an end-of-line character.
|
||||
func isEndOfLine(r rune) bool {
|
||||
return r == '\r' || r == '\n'
|
||||
}
|
||||
|
||||
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
func isAlphaNumeric(r rune) bool {
|
||||
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
|
||||
}
|
||||
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
@ -1,834 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Parse nodes.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var textFormat = "%s" // Changed to "%q" in tests for better error messages.
|
||||
|
||||
// A Node is an element in the parse tree. The interface is trivial.
|
||||
// The interface contains an unexported method so that only
|
||||
// types local to this package can satisfy it.
|
||||
type Node interface {
|
||||
Type() NodeType
|
||||
String() string
|
||||
// Copy does a deep copy of the Node and all its components.
|
||||
// To avoid type assertions, some XxxNodes also have specialized
|
||||
// CopyXxx methods that return *XxxNode.
|
||||
Copy() Node
|
||||
Position() Pos // byte position of start of node in full original input string
|
||||
// tree returns the containing *Tree.
|
||||
// It is unexported so all implementations of Node are in this package.
|
||||
tree() *Tree
|
||||
}
|
||||
|
||||
// NodeType identifies the type of a parse tree node.
|
||||
type NodeType int
|
||||
|
||||
// Pos represents a byte position in the original input text from which
|
||||
// this template was parsed.
|
||||
type Pos int
|
||||
|
||||
func (p Pos) Position() Pos {
|
||||
return p
|
||||
}
|
||||
|
||||
// Type returns itself and provides an easy default implementation
|
||||
// for embedding in a Node. Embedded in all non-trivial Nodes.
|
||||
func (t NodeType) Type() NodeType {
|
||||
return t
|
||||
}
|
||||
|
||||
const (
|
||||
NodeText NodeType = iota // Plain text.
|
||||
NodeAction // A non-control action such as a field evaluation.
|
||||
NodeBool // A boolean constant.
|
||||
NodeChain // A sequence of field accesses.
|
||||
NodeCommand // An element of a pipeline.
|
||||
NodeDot // The cursor, dot.
|
||||
nodeElse // An else action. Not added to tree.
|
||||
nodeEnd // An end action. Not added to tree.
|
||||
NodeField // A field or method name.
|
||||
NodeIdentifier // An identifier; always a function name.
|
||||
NodeIf // An if action.
|
||||
NodeList // A list of Nodes.
|
||||
NodeNil // An untyped nil constant.
|
||||
NodeNumber // A numerical constant.
|
||||
NodePipe // A pipeline of commands.
|
||||
NodeRange // A range action.
|
||||
NodeString // A string constant.
|
||||
NodeTemplate // A template invocation action.
|
||||
NodeVariable // A $ variable.
|
||||
NodeWith // A with action.
|
||||
)
|
||||
|
||||
// Nodes.
|
||||
|
||||
// ListNode holds a sequence of nodes.
|
||||
type ListNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Nodes []Node // The element nodes in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newList(pos Pos) *ListNode {
|
||||
return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
|
||||
}
|
||||
|
||||
func (l *ListNode) append(n Node) {
|
||||
l.Nodes = append(l.Nodes, n)
|
||||
}
|
||||
|
||||
func (l *ListNode) tree() *Tree {
|
||||
return l.tr
|
||||
}
|
||||
|
||||
func (l *ListNode) String() string {
|
||||
b := new(bytes.Buffer)
|
||||
for _, n := range l.Nodes {
|
||||
fmt.Fprint(b, n)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (l *ListNode) CopyList() *ListNode {
|
||||
if l == nil {
|
||||
return l
|
||||
}
|
||||
n := l.tr.newList(l.Pos)
|
||||
for _, elem := range l.Nodes {
|
||||
n.append(elem.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (l *ListNode) Copy() Node {
|
||||
return l.CopyList()
|
||||
}
|
||||
|
||||
// TextNode holds plain text.
|
||||
type TextNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Text []byte // The text; may span newlines.
|
||||
}
|
||||
|
||||
func (t *Tree) newText(pos Pos, text string) *TextNode {
|
||||
return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
|
||||
}
|
||||
|
||||
func (t *TextNode) String() string {
|
||||
return fmt.Sprintf(textFormat, t.Text)
|
||||
}
|
||||
|
||||
func (t *TextNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TextNode) Copy() Node {
|
||||
return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
|
||||
}
|
||||
|
||||
// PipeNode holds a pipeline with optional declaration
|
||||
type PipeNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Decl []*VariableNode // Variable declarations in lexical order.
|
||||
Cmds []*CommandNode // The commands in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
|
||||
return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
|
||||
}
|
||||
|
||||
func (p *PipeNode) append(command *CommandNode) {
|
||||
p.Cmds = append(p.Cmds, command)
|
||||
}
|
||||
|
||||
func (p *PipeNode) String() string {
|
||||
s := ""
|
||||
if len(p.Decl) > 0 {
|
||||
for i, v := range p.Decl {
|
||||
if i > 0 {
|
||||
s += ", "
|
||||
}
|
||||
s += v.String()
|
||||
}
|
||||
s += " := "
|
||||
}
|
||||
for i, c := range p.Cmds {
|
||||
if i > 0 {
|
||||
s += " | "
|
||||
}
|
||||
s += c.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *PipeNode) tree() *Tree {
|
||||
return p.tr
|
||||
}
|
||||
|
||||
func (p *PipeNode) CopyPipe() *PipeNode {
|
||||
if p == nil {
|
||||
return p
|
||||
}
|
||||
var decl []*VariableNode
|
||||
for _, d := range p.Decl {
|
||||
decl = append(decl, d.Copy().(*VariableNode))
|
||||
}
|
||||
n := p.tr.newPipeline(p.Pos, p.Line, decl)
|
||||
for _, c := range p.Cmds {
|
||||
n.append(c.Copy().(*CommandNode))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *PipeNode) Copy() Node {
|
||||
return p.CopyPipe()
|
||||
}
|
||||
|
||||
// ActionNode holds an action (something bounded by delimiters).
|
||||
// Control actions have their own nodes; ActionNode represents simple
|
||||
// ones such as field evaluations and parenthesized pipelines.
|
||||
type ActionNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline in the action.
|
||||
}
|
||||
|
||||
func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
|
||||
return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (a *ActionNode) String() string {
|
||||
return fmt.Sprintf("{{%s}}", a.Pipe)
|
||||
|
||||
}
|
||||
|
||||
func (a *ActionNode) tree() *Tree {
|
||||
return a.tr
|
||||
}
|
||||
|
||||
func (a *ActionNode) Copy() Node {
|
||||
return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
|
||||
|
||||
}
|
||||
|
||||
// CommandNode holds a command (a pipeline inside an evaluating action).
|
||||
type CommandNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Args []Node // Arguments in lexical order: Identifier, field, or constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newCommand(pos Pos) *CommandNode {
|
||||
return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
|
||||
}
|
||||
|
||||
func (c *CommandNode) append(arg Node) {
|
||||
c.Args = append(c.Args, arg)
|
||||
}
|
||||
|
||||
func (c *CommandNode) String() string {
|
||||
s := ""
|
||||
for i, arg := range c.Args {
|
||||
if i > 0 {
|
||||
s += " "
|
||||
}
|
||||
if arg, ok := arg.(*PipeNode); ok {
|
||||
s += "(" + arg.String() + ")"
|
||||
continue
|
||||
}
|
||||
s += arg.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *CommandNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *CommandNode) Copy() Node {
|
||||
if c == nil {
|
||||
return c
|
||||
}
|
||||
n := c.tr.newCommand(c.Pos)
|
||||
for _, c := range c.Args {
|
||||
n.append(c.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// IdentifierNode holds an identifier.
|
||||
type IdentifierNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident string // The identifier's name.
|
||||
}
|
||||
|
||||
// NewIdentifier returns a new IdentifierNode with the given identifier name.
|
||||
func NewIdentifier(ident string) *IdentifierNode {
|
||||
return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
|
||||
}
|
||||
|
||||
// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
|
||||
i.Pos = pos
|
||||
return i
|
||||
}
|
||||
|
||||
// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
|
||||
i.tr = t
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) String() string {
|
||||
return i.Ident
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) tree() *Tree {
|
||||
return i.tr
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) Copy() Node {
|
||||
return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
|
||||
}
|
||||
|
||||
// VariableNode holds a list of variable names, possibly with chained field
|
||||
// accesses. The dollar sign is part of the (first) name.
|
||||
type VariableNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // Variable name and fields in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
|
||||
return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
|
||||
}
|
||||
|
||||
func (v *VariableNode) String() string {
|
||||
s := ""
|
||||
for i, id := range v.Ident {
|
||||
if i > 0 {
|
||||
s += "."
|
||||
}
|
||||
s += id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (v *VariableNode) tree() *Tree {
|
||||
return v.tr
|
||||
}
|
||||
|
||||
func (v *VariableNode) Copy() Node {
|
||||
return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
|
||||
}
|
||||
|
||||
// DotNode holds the special identifier '.'.
|
||||
type DotNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newDot(pos Pos) *DotNode {
|
||||
return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
|
||||
}
|
||||
|
||||
func (d *DotNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeDot
|
||||
}
|
||||
|
||||
func (d *DotNode) String() string {
|
||||
return "."
|
||||
}
|
||||
|
||||
func (d *DotNode) tree() *Tree {
|
||||
return d.tr
|
||||
}
|
||||
|
||||
func (d *DotNode) Copy() Node {
|
||||
return d.tr.newDot(d.Pos)
|
||||
}
|
||||
|
||||
// NilNode holds the special identifier 'nil' representing an untyped nil constant.
|
||||
type NilNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newNil(pos Pos) *NilNode {
|
||||
return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
|
||||
}
|
||||
|
||||
func (n *NilNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeNil
|
||||
}
|
||||
|
||||
func (n *NilNode) String() string {
|
||||
return "nil"
|
||||
}
|
||||
|
||||
func (n *NilNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NilNode) Copy() Node {
|
||||
return n.tr.newNil(n.Pos)
|
||||
}
|
||||
|
||||
// FieldNode holds a field (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The period is dropped from each ident.
|
||||
type FieldNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newField(pos Pos, ident string) *FieldNode {
|
||||
return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
|
||||
}
|
||||
|
||||
func (f *FieldNode) String() string {
|
||||
s := ""
|
||||
for _, id := range f.Ident {
|
||||
s += "." + id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (f *FieldNode) tree() *Tree {
|
||||
return f.tr
|
||||
}
|
||||
|
||||
func (f *FieldNode) Copy() Node {
|
||||
return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
|
||||
}
|
||||
|
||||
// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The periods are dropped from each ident.
|
||||
type ChainNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Node Node
|
||||
Field []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
|
||||
return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
|
||||
}
|
||||
|
||||
// Add adds the named field (which should start with a period) to the end of the chain.
|
||||
func (c *ChainNode) Add(field string) {
|
||||
if len(field) == 0 || field[0] != '.' {
|
||||
panic("no dot in field")
|
||||
}
|
||||
field = field[1:] // Remove leading dot.
|
||||
if field == "" {
|
||||
panic("empty field")
|
||||
}
|
||||
c.Field = append(c.Field, field)
|
||||
}
|
||||
|
||||
func (c *ChainNode) String() string {
|
||||
s := c.Node.String()
|
||||
if _, ok := c.Node.(*PipeNode); ok {
|
||||
s = "(" + s + ")"
|
||||
}
|
||||
for _, field := range c.Field {
|
||||
s += "." + field
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *ChainNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *ChainNode) Copy() Node {
|
||||
return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
|
||||
}
|
||||
|
||||
// BoolNode holds a boolean constant.
|
||||
type BoolNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
True bool // The value of the boolean constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
|
||||
return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
|
||||
}
|
||||
|
||||
func (b *BoolNode) String() string {
|
||||
if b.True {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
|
||||
func (b *BoolNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BoolNode) Copy() Node {
|
||||
return b.tr.newBool(b.Pos, b.True)
|
||||
}
|
||||
|
||||
// NumberNode holds a number: signed or unsigned integer, float, or complex.
|
||||
// The value is parsed and stored under all the types that can represent the value.
|
||||
// This simulates in a small amount of code the behavior of Go's ideal constants.
|
||||
type NumberNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
IsInt bool // Number has an integral value.
|
||||
IsUint bool // Number has an unsigned integral value.
|
||||
IsFloat bool // Number has a floating-point value.
|
||||
IsComplex bool // Number is complex.
|
||||
Int64 int64 // The signed integer value.
|
||||
Uint64 uint64 // The unsigned integer value.
|
||||
Float64 float64 // The floating-point value.
|
||||
Complex128 complex128 // The complex value.
|
||||
Text string // The original textual representation from the input.
|
||||
}
|
||||
|
||||
func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
|
||||
n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
|
||||
switch typ {
|
||||
case itemCharConstant:
|
||||
rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tail != "'" {
|
||||
return nil, fmt.Errorf("malformed character constant: %s", text)
|
||||
}
|
||||
n.Int64 = int64(rune)
|
||||
n.IsInt = true
|
||||
n.Uint64 = uint64(rune)
|
||||
n.IsUint = true
|
||||
n.Float64 = float64(rune) // odd but those are the rules.
|
||||
n.IsFloat = true
|
||||
return n, nil
|
||||
case itemComplex:
|
||||
// fmt.Sscan can parse the pair, so let it do the work.
|
||||
if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n.IsComplex = true
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
// Imaginary constants can only be complex unless they are zero.
|
||||
if len(text) > 0 && text[len(text)-1] == 'i' {
|
||||
f, err := strconv.ParseFloat(text[:len(text)-1], 64)
|
||||
if err == nil {
|
||||
n.IsComplex = true
|
||||
n.Complex128 = complex(0, f)
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
// Do integer test first so we get 0x123 etc.
|
||||
u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
|
||||
if err == nil {
|
||||
n.IsUint = true
|
||||
n.Uint64 = u
|
||||
}
|
||||
i, err := strconv.ParseInt(text, 0, 64)
|
||||
if err == nil {
|
||||
n.IsInt = true
|
||||
n.Int64 = i
|
||||
if i == 0 {
|
||||
n.IsUint = true // in case of -0.
|
||||
n.Uint64 = u
|
||||
}
|
||||
}
|
||||
// If an integer extraction succeeded, promote the float.
|
||||
if n.IsInt {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Int64)
|
||||
} else if n.IsUint {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Uint64)
|
||||
} else {
|
||||
f, err := strconv.ParseFloat(text, 64)
|
||||
if err == nil {
|
||||
n.IsFloat = true
|
||||
n.Float64 = f
|
||||
// If a floating-point extraction succeeded, extract the int if needed.
|
||||
if !n.IsInt && float64(int64(f)) == f {
|
||||
n.IsInt = true
|
||||
n.Int64 = int64(f)
|
||||
}
|
||||
if !n.IsUint && float64(uint64(f)) == f {
|
||||
n.IsUint = true
|
||||
n.Uint64 = uint64(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !n.IsInt && !n.IsUint && !n.IsFloat {
|
||||
return nil, fmt.Errorf("illegal number syntax: %q", text)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// simplifyComplex pulls out any other types that are represented by the complex number.
|
||||
// These all require that the imaginary part be zero.
|
||||
func (n *NumberNode) simplifyComplex() {
|
||||
n.IsFloat = imag(n.Complex128) == 0
|
||||
if n.IsFloat {
|
||||
n.Float64 = real(n.Complex128)
|
||||
n.IsInt = float64(int64(n.Float64)) == n.Float64
|
||||
if n.IsInt {
|
||||
n.Int64 = int64(n.Float64)
|
||||
}
|
||||
n.IsUint = float64(uint64(n.Float64)) == n.Float64
|
||||
if n.IsUint {
|
||||
n.Uint64 = uint64(n.Float64)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NumberNode) String() string {
|
||||
return n.Text
|
||||
}
|
||||
|
||||
func (n *NumberNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NumberNode) Copy() Node {
|
||||
nn := new(NumberNode)
|
||||
*nn = *n // Easy, fast, correct.
|
||||
return nn
|
||||
}
|
||||
|
||||
// StringNode holds a string constant. The value has been "unquoted".
|
||||
type StringNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Quoted string // The original text of the string, with quotes.
|
||||
Text string // The string, after quote processing.
|
||||
}
|
||||
|
||||
func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
|
||||
return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
|
||||
}
|
||||
|
||||
func (s *StringNode) String() string {
|
||||
return s.Quoted
|
||||
}
|
||||
|
||||
func (s *StringNode) tree() *Tree {
|
||||
return s.tr
|
||||
}
|
||||
|
||||
func (s *StringNode) Copy() Node {
|
||||
return s.tr.newString(s.Pos, s.Quoted, s.Text)
|
||||
}
|
||||
|
||||
// endNode represents an {{end}} action.
|
||||
// It does not appear in the final parse tree.
|
||||
type endNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newEnd(pos Pos) *endNode {
|
||||
return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
|
||||
}
|
||||
|
||||
func (e *endNode) String() string {
|
||||
return "{{end}}"
|
||||
}
|
||||
|
||||
func (e *endNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *endNode) Copy() Node {
|
||||
return e.tr.newEnd(e.Pos)
|
||||
}
|
||||
|
||||
// elseNode represents an {{else}} action. Does not appear in the final tree.
|
||||
type elseNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
}
|
||||
|
||||
func (t *Tree) newElse(pos Pos, line int) *elseNode {
|
||||
return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
|
||||
}
|
||||
|
||||
func (e *elseNode) Type() NodeType {
|
||||
return nodeElse
|
||||
}
|
||||
|
||||
func (e *elseNode) String() string {
|
||||
return "{{else}}"
|
||||
}
|
||||
|
||||
func (e *elseNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *elseNode) Copy() Node {
|
||||
return e.tr.newElse(e.Pos, e.Line)
|
||||
}
|
||||
|
||||
// BranchNode is the common representation of if, range, and with.
|
||||
type BranchNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline to be evaluated.
|
||||
List *ListNode // What to execute if the value is non-empty.
|
||||
ElseList *ListNode // What to execute if the value is empty (nil if absent).
|
||||
}
|
||||
|
||||
func (b *BranchNode) String() string {
|
||||
name := ""
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
name = "if"
|
||||
case NodeRange:
|
||||
name = "range"
|
||||
case NodeWith:
|
||||
name = "with"
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
if b.ElseList != nil {
|
||||
return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
|
||||
}
|
||||
return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
|
||||
}
|
||||
|
||||
func (b *BranchNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BranchNode) Copy() Node {
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeRange:
|
||||
return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeWith:
|
||||
return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
}
|
||||
|
||||
// IfNode represents an {{if}} action and its commands.
|
||||
type IfNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
|
||||
return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (i *IfNode) Copy() Node {
|
||||
return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// RangeNode represents a {{range}} action and its commands.
|
||||
type RangeNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
|
||||
return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (r *RangeNode) Copy() Node {
|
||||
return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// WithNode represents a {{with}} action and its commands.
|
||||
type WithNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
|
||||
return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (w *WithNode) Copy() Node {
|
||||
return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// TemplateNode represents a {{template}} action.
|
||||
type TemplateNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Name string // The name of the template (unquoted).
|
||||
Pipe *PipeNode // The command to evaluate as dot for the template.
|
||||
}
|
||||
|
||||
func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
|
||||
return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (t *TemplateNode) String() string {
|
||||
if t.Pipe == nil {
|
||||
return fmt.Sprintf("{{template %q}}", t.Name)
|
||||
}
|
||||
return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
|
||||
}
|
||||
|
||||
func (t *TemplateNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TemplateNode) Copy() Node {
|
||||
return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
|
||||
}
|
||||
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
@ -1,700 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package parse builds parse trees for templates as defined by text/template
|
||||
// and html/template. Clients should use those packages to construct templates
|
||||
// rather than this one, which provides shared internal data structures not
|
||||
// intended for general use.
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Tree is the representation of a single parsed template.
|
||||
type Tree struct {
|
||||
Name string // name of the template represented by the tree.
|
||||
ParseName string // name of the top-level template during parsing, for error messages.
|
||||
Root *ListNode // top-level root of the tree.
|
||||
text string // text parsed to create the template (or its parent)
|
||||
// Parsing only; cleared after parse.
|
||||
funcs []map[string]interface{}
|
||||
lex *lexer
|
||||
token [3]item // three-token lookahead for parser.
|
||||
peekCount int
|
||||
vars []string // variables defined at the moment.
|
||||
}
|
||||
|
||||
// Copy returns a copy of the Tree. Any parsing state is discarded.
|
||||
func (t *Tree) Copy() *Tree {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Tree{
|
||||
Name: t.Name,
|
||||
ParseName: t.ParseName,
|
||||
Root: t.Root.CopyList(),
|
||||
text: t.text,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns a map from template name to parse.Tree, created by parsing the
|
||||
// templates described in the argument string. The top-level template will be
|
||||
// given the specified name. If an error is encountered, parsing stops and an
|
||||
// empty map is returned with the error.
|
||||
func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
|
||||
treeSet = make(map[string]*Tree)
|
||||
t := New(name)
|
||||
t.text = text
|
||||
_, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
|
||||
return
|
||||
}
|
||||
|
||||
// next returns the next token.
|
||||
func (t *Tree) next() item {
|
||||
if t.peekCount > 0 {
|
||||
t.peekCount--
|
||||
} else {
|
||||
t.token[0] = t.lex.nextItem()
|
||||
}
|
||||
return t.token[t.peekCount]
|
||||
}
|
||||
|
||||
// backup backs the input stream up one token.
|
||||
func (t *Tree) backup() {
|
||||
t.peekCount++
|
||||
}
|
||||
|
||||
// backup2 backs the input stream up two tokens.
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup2(t1 item) {
|
||||
t.token[1] = t1
|
||||
t.peekCount = 2
|
||||
}
|
||||
|
||||
// backup3 backs the input stream up three tokens
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
|
||||
t.token[1] = t1
|
||||
t.token[2] = t2
|
||||
t.peekCount = 3
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next token.
|
||||
func (t *Tree) peek() item {
|
||||
if t.peekCount > 0 {
|
||||
return t.token[t.peekCount-1]
|
||||
}
|
||||
t.peekCount = 1
|
||||
t.token[0] = t.lex.nextItem()
|
||||
return t.token[0]
|
||||
}
|
||||
|
||||
// nextNonSpace returns the next non-space token.
|
||||
func (t *Tree) nextNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// peekNonSpace returns but does not consume the next non-space token.
|
||||
func (t *Tree) peekNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
t.backup()
|
||||
return token
|
||||
}
|
||||
|
||||
// Parsing.
|
||||
|
||||
// New allocates a new parse tree with the given name.
|
||||
func New(name string, funcs ...map[string]interface{}) *Tree {
|
||||
return &Tree{
|
||||
Name: name,
|
||||
funcs: funcs,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorContext returns a textual representation of the location of the node in the input text.
|
||||
// The receiver is only used when the node does not have a pointer to the tree inside,
|
||||
// which can occur in old code.
|
||||
func (t *Tree) ErrorContext(n Node) (location, context string) {
|
||||
pos := int(n.Position())
|
||||
tree := n.tree()
|
||||
if tree == nil {
|
||||
tree = t
|
||||
}
|
||||
text := tree.text[:pos]
|
||||
byteNum := strings.LastIndex(text, "\n")
|
||||
if byteNum == -1 {
|
||||
byteNum = pos // On first line.
|
||||
} else {
|
||||
byteNum++ // After the newline.
|
||||
byteNum = pos - byteNum
|
||||
}
|
||||
lineNum := 1 + strings.Count(text, "\n")
|
||||
context = n.String()
|
||||
if len(context) > 20 {
|
||||
context = fmt.Sprintf("%.20s...", context)
|
||||
}
|
||||
return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (t *Tree) errorf(format string, args ...interface{}) {
|
||||
t.Root = nil
|
||||
format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// error terminates processing.
|
||||
func (t *Tree) error(err error) {
|
||||
t.errorf("%s", err)
|
||||
}
|
||||
|
||||
// expect consumes the next token and guarantees it has the required type.
|
||||
func (t *Tree) expect(expected itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// expectOneOf consumes the next token and guarantees it has one of the required types.
|
||||
func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected1 && token.typ != expected2 {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// unexpected complains about the token and terminates processing.
|
||||
func (t *Tree) unexpected(token item, context string) {
|
||||
t.errorf("unexpected %s in %s", token, context)
|
||||
}
|
||||
|
||||
// recover is the handler that turns panics into returns from the top level of Parse.
|
||||
func (t *Tree) recover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
if _, ok := e.(runtime.Error); ok {
|
||||
panic(e)
|
||||
}
|
||||
if t != nil {
|
||||
t.stopParse()
|
||||
}
|
||||
*errp = e.(error)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// startParse initializes the parser, using the lexer.
|
||||
func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
|
||||
t.Root = nil
|
||||
t.lex = lex
|
||||
t.vars = []string{"$"}
|
||||
t.funcs = funcs
|
||||
}
|
||||
|
||||
// stopParse terminates parsing.
|
||||
func (t *Tree) stopParse() {
|
||||
t.lex = nil
|
||||
t.vars = nil
|
||||
t.funcs = nil
|
||||
}
|
||||
|
||||
// Parse parses the template definition string to construct a representation of
|
||||
// the template for execution. If either action delimiter string is empty, the
|
||||
// default ("{{" or "}}") is used. Embedded template definitions are added to
|
||||
// the treeSet map.
|
||||
func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
|
||||
defer t.recover(&err)
|
||||
t.ParseName = t.Name
|
||||
t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
|
||||
t.text = text
|
||||
t.parse(treeSet)
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// add adds tree to the treeSet.
|
||||
func (t *Tree) add(treeSet map[string]*Tree) {
|
||||
tree := treeSet[t.Name]
|
||||
if tree == nil || IsEmptyTree(tree.Root) {
|
||||
treeSet[t.Name] = t
|
||||
return
|
||||
}
|
||||
if !IsEmptyTree(t.Root) {
|
||||
t.errorf("template: multiple definition of template %q", t.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmptyTree reports whether this tree (node) is empty of everything but space.
|
||||
func IsEmptyTree(n Node) bool {
|
||||
switch n := n.(type) {
|
||||
case nil:
|
||||
return true
|
||||
case *ActionNode:
|
||||
case *IfNode:
|
||||
case *ListNode:
|
||||
for _, node := range n.Nodes {
|
||||
if !IsEmptyTree(node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case *RangeNode:
|
||||
case *TemplateNode:
|
||||
case *TextNode:
|
||||
return len(bytes.TrimSpace(n.Text)) == 0
|
||||
case *WithNode:
|
||||
default:
|
||||
panic("unknown node: " + n.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parse is the top-level parser for a template, essentially the same
|
||||
// as itemList except it also parses {{define}} actions.
|
||||
// It runs to EOF.
|
||||
func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
|
||||
t.Root = t.newList(t.peek().pos)
|
||||
for t.peek().typ != itemEOF {
|
||||
if t.peek().typ == itemLeftDelim {
|
||||
delim := t.next()
|
||||
if t.nextNonSpace().typ == itemDefine {
|
||||
newT := New("definition") // name will be updated once we know it.
|
||||
newT.text = t.text
|
||||
newT.ParseName = t.ParseName
|
||||
newT.startParse(t.funcs, t.lex)
|
||||
newT.parseDefinition(treeSet)
|
||||
continue
|
||||
}
|
||||
t.backup2(delim)
|
||||
}
|
||||
n := t.textOrAction()
|
||||
if n.Type() == nodeEnd {
|
||||
t.errorf("unexpected %s", n)
|
||||
}
|
||||
t.Root.append(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseDefinition parses a {{define}} ... {{end}} template definition and
|
||||
// installs the definition in the treeSet map. The "define" keyword has already
|
||||
// been scanned.
|
||||
func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
|
||||
const context = "define clause"
|
||||
name := t.expectOneOf(itemString, itemRawString, context)
|
||||
var err error
|
||||
t.Name, err = strconv.Unquote(name.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
t.expect(itemRightDelim, context)
|
||||
var end Node
|
||||
t.Root, end = t.itemList()
|
||||
if end.Type() != nodeEnd {
|
||||
t.errorf("unexpected %s in %s", end, context)
|
||||
}
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
}
|
||||
|
||||
// itemList:
|
||||
// textOrAction*
|
||||
// Terminates at {{end}} or {{else}}, returned separately.
|
||||
func (t *Tree) itemList() (list *ListNode, next Node) {
|
||||
list = t.newList(t.peekNonSpace().pos)
|
||||
for t.peekNonSpace().typ != itemEOF {
|
||||
n := t.textOrAction()
|
||||
switch n.Type() {
|
||||
case nodeEnd, nodeElse:
|
||||
return list, n
|
||||
}
|
||||
list.append(n)
|
||||
}
|
||||
t.errorf("unexpected EOF")
|
||||
return
|
||||
}
|
||||
|
||||
// textOrAction:
|
||||
// text | action
|
||||
func (t *Tree) textOrAction() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElideNewline:
|
||||
return t.elideNewline()
|
||||
case itemText:
|
||||
return t.newText(token.pos, token.val)
|
||||
case itemLeftDelim:
|
||||
return t.action()
|
||||
default:
|
||||
t.unexpected(token, "input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// elideNewline:
|
||||
// Remove newlines trailing rightDelim if \\ is present.
|
||||
func (t *Tree) elideNewline() Node {
|
||||
token := t.peek()
|
||||
if token.typ != itemText {
|
||||
t.unexpected(token, "input")
|
||||
return nil
|
||||
}
|
||||
|
||||
t.next()
|
||||
stripped := strings.TrimLeft(token.val, "\n\r")
|
||||
diff := len(token.val) - len(stripped)
|
||||
if diff > 0 {
|
||||
// This is a bit nasty. We mutate the token in-place to remove
|
||||
// preceding newlines.
|
||||
token.pos += Pos(diff)
|
||||
token.val = stripped
|
||||
}
|
||||
return t.newText(token.pos, token.val)
|
||||
}
|
||||
|
||||
// Action:
|
||||
// control
|
||||
// command ("|" command)*
|
||||
// Left delim is past. Now get actions.
|
||||
// First word could be a keyword such as range.
|
||||
func (t *Tree) action() (n Node) {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElse:
|
||||
return t.elseControl()
|
||||
case itemEnd:
|
||||
return t.endControl()
|
||||
case itemIf:
|
||||
return t.ifControl()
|
||||
case itemRange:
|
||||
return t.rangeControl()
|
||||
case itemTemplate:
|
||||
return t.templateControl()
|
||||
case itemWith:
|
||||
return t.withControl()
|
||||
}
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
|
||||
}
|
||||
|
||||
// Pipeline:
|
||||
// declarations? command ('|' command)*
|
||||
func (t *Tree) pipeline(context string) (pipe *PipeNode) {
|
||||
var decl []*VariableNode
|
||||
pos := t.peekNonSpace().pos
|
||||
// Are there declarations?
|
||||
for {
|
||||
if v := t.peekNonSpace(); v.typ == itemVariable {
|
||||
t.next()
|
||||
// Since space is a token, we need 3-token look-ahead here in the worst case:
|
||||
// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
|
||||
// argument variable rather than a declaration. So remember the token
|
||||
// adjacent to the variable so we can push it back if necessary.
|
||||
tokenAfterVariable := t.peek()
|
||||
if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
|
||||
t.nextNonSpace()
|
||||
variable := t.newVariable(v.pos, v.val)
|
||||
decl = append(decl, variable)
|
||||
t.vars = append(t.vars, v.val)
|
||||
if next.typ == itemChar && next.val == "," {
|
||||
if context == "range" && len(decl) < 2 {
|
||||
continue
|
||||
}
|
||||
t.errorf("too many declarations in %s", context)
|
||||
}
|
||||
} else if tokenAfterVariable.typ == itemSpace {
|
||||
t.backup3(v, tokenAfterVariable)
|
||||
} else {
|
||||
t.backup2(v)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
|
||||
for {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemRightDelim, itemRightParen:
|
||||
if len(pipe.Cmds) == 0 {
|
||||
t.errorf("missing value for %s", context)
|
||||
}
|
||||
if token.typ == itemRightParen {
|
||||
t.backup()
|
||||
}
|
||||
return
|
||||
case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
|
||||
itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
|
||||
t.backup()
|
||||
pipe.append(t.command())
|
||||
default:
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
|
||||
defer t.popVars(len(t.vars))
|
||||
line = t.lex.lineNumber()
|
||||
pipe = t.pipeline(context)
|
||||
var next Node
|
||||
list, next = t.itemList()
|
||||
switch next.Type() {
|
||||
case nodeEnd: //done
|
||||
case nodeElse:
|
||||
if allowElseIf {
|
||||
// Special case for "else if". If the "else" is followed immediately by an "if",
|
||||
// the elseControl will have left the "if" token pending. Treat
|
||||
// {{if a}}_{{else if b}}_{{end}}
|
||||
// as
|
||||
// {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
|
||||
// To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
|
||||
// is assumed. This technique works even for long if-else-if chains.
|
||||
// TODO: Should we allow else-if in with and range?
|
||||
if t.peek().typ == itemIf {
|
||||
t.next() // Consume the "if" token.
|
||||
elseList = t.newList(next.Position())
|
||||
elseList.append(t.ifControl())
|
||||
// Do not consume the next item - only one {{end}} required.
|
||||
break
|
||||
}
|
||||
}
|
||||
elseList, next = t.itemList()
|
||||
if next.Type() != nodeEnd {
|
||||
t.errorf("expected end; found %s", next)
|
||||
}
|
||||
}
|
||||
return pipe.Position(), line, pipe, list, elseList
|
||||
}
|
||||
|
||||
// If:
|
||||
// {{if pipeline}} itemList {{end}}
|
||||
// {{if pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) ifControl() Node {
|
||||
return t.newIf(t.parseControl(true, "if"))
|
||||
}
|
||||
|
||||
// Range:
|
||||
// {{range pipeline}} itemList {{end}}
|
||||
// {{range pipeline}} itemList {{else}} itemList {{end}}
|
||||
// Range keyword is past.
|
||||
func (t *Tree) rangeControl() Node {
|
||||
return t.newRange(t.parseControl(false, "range"))
|
||||
}
|
||||
|
||||
// With:
|
||||
// {{with pipeline}} itemList {{end}}
|
||||
// {{with pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) withControl() Node {
|
||||
return t.newWith(t.parseControl(false, "with"))
|
||||
}
|
||||
|
||||
// End:
|
||||
// {{end}}
|
||||
// End keyword is past.
|
||||
func (t *Tree) endControl() Node {
|
||||
return t.newEnd(t.expect(itemRightDelim, "end").pos)
|
||||
}
|
||||
|
||||
// Else:
|
||||
// {{else}}
|
||||
// Else keyword is past.
|
||||
func (t *Tree) elseControl() Node {
|
||||
// Special case for "else if".
|
||||
peek := t.peekNonSpace()
|
||||
if peek.typ == itemIf {
|
||||
// We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
|
||||
return t.newElse(peek.pos, t.lex.lineNumber())
|
||||
}
|
||||
return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
|
||||
}
|
||||
|
||||
// Template:
|
||||
// {{template stringValue pipeline}}
|
||||
// Template keyword is past. The name must be something that can evaluate
|
||||
// to a string.
|
||||
func (t *Tree) templateControl() Node {
|
||||
var name string
|
||||
token := t.nextNonSpace()
|
||||
switch token.typ {
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
name = s
|
||||
default:
|
||||
t.unexpected(token, "template invocation")
|
||||
}
|
||||
var pipe *PipeNode
|
||||
if t.nextNonSpace().typ != itemRightDelim {
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
pipe = t.pipeline("template")
|
||||
}
|
||||
return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
|
||||
}
|
||||
|
||||
// command:
|
||||
// operand (space operand)*
|
||||
// space-separated arguments up to a pipeline character or right delimiter.
|
||||
// we consume the pipe character but leave the right delim to terminate the action.
|
||||
func (t *Tree) command() *CommandNode {
|
||||
cmd := t.newCommand(t.peekNonSpace().pos)
|
||||
for {
|
||||
t.peekNonSpace() // skip leading spaces.
|
||||
operand := t.operand()
|
||||
if operand != nil {
|
||||
cmd.append(operand)
|
||||
}
|
||||
switch token := t.next(); token.typ {
|
||||
case itemSpace:
|
||||
continue
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemRightDelim, itemRightParen:
|
||||
t.backup()
|
||||
case itemPipe:
|
||||
default:
|
||||
t.errorf("unexpected %s in operand; missing space?", token)
|
||||
}
|
||||
break
|
||||
}
|
||||
if len(cmd.Args) == 0 {
|
||||
t.errorf("empty command")
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// operand:
|
||||
// term .Field*
|
||||
// An operand is a space-separated component of a command,
|
||||
// a term possibly followed by field accesses.
|
||||
// A nil return means the next item is not an operand.
|
||||
func (t *Tree) operand() Node {
|
||||
node := t.term()
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
if t.peek().typ == itemField {
|
||||
chain := t.newChain(t.peek().pos, node)
|
||||
for t.peek().typ == itemField {
|
||||
chain.Add(t.next().val)
|
||||
}
|
||||
// Compatibility with original API: If the term is of type NodeField
|
||||
// or NodeVariable, just put more fields on the original.
|
||||
// Otherwise, keep the Chain node.
|
||||
// TODO: Switch to Chains always when we can.
|
||||
switch node.Type() {
|
||||
case NodeField:
|
||||
node = t.newField(chain.Position(), chain.String())
|
||||
case NodeVariable:
|
||||
node = t.newVariable(chain.Position(), chain.String())
|
||||
default:
|
||||
node = chain
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// term:
|
||||
// literal (number, string, nil, boolean)
|
||||
// function (identifier)
|
||||
// .
|
||||
// .Field
|
||||
// $
|
||||
// '(' pipeline ')'
|
||||
// A term is a simple "expression".
|
||||
// A nil return means the next item is not a term.
|
||||
func (t *Tree) term() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemIdentifier:
|
||||
if !t.hasFunction(token.val) {
|
||||
t.errorf("function %q not defined", token.val)
|
||||
}
|
||||
return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
|
||||
case itemDot:
|
||||
return t.newDot(token.pos)
|
||||
case itemNil:
|
||||
return t.newNil(token.pos)
|
||||
case itemVariable:
|
||||
return t.useVar(token.pos, token.val)
|
||||
case itemField:
|
||||
return t.newField(token.pos, token.val)
|
||||
case itemBool:
|
||||
return t.newBool(token.pos, token.val == "true")
|
||||
case itemCharConstant, itemComplex, itemNumber:
|
||||
number, err := t.newNumber(token.pos, token.val, token.typ)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return number
|
||||
case itemLeftParen:
|
||||
pipe := t.pipeline("parenthesized pipeline")
|
||||
if token := t.next(); token.typ != itemRightParen {
|
||||
t.errorf("unclosed right paren: unexpected %s", token)
|
||||
}
|
||||
return pipe
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return t.newString(token.pos, token.val, s)
|
||||
}
|
||||
t.backup()
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasFunction reports if a function name exists in the Tree's maps.
|
||||
func (t *Tree) hasFunction(name string) bool {
|
||||
for _, funcMap := range t.funcs {
|
||||
if funcMap == nil {
|
||||
continue
|
||||
}
|
||||
if funcMap[name] != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// popVars trims the variable list to the specified length
|
||||
func (t *Tree) popVars(n int) {
|
||||
t.vars = t.vars[:n]
|
||||
}
|
||||
|
||||
// useVar returns a node for a variable reference. It errors if the
|
||||
// variable is not defined.
|
||||
func (t *Tree) useVar(pos Pos, name string) Node {
|
||||
v := t.newVariable(pos, name)
|
||||
for _, varName := range t.vars {
|
||||
if varName == v.Ident[0] {
|
||||
return v
|
||||
}
|
||||
}
|
||||
t.errorf("undefined variable %q", v.Ident[0])
|
||||
return nil
|
||||
}
|
||||
218
vendor/github.com/alecthomas/template/template.go
generated
vendored
218
vendor/github.com/alecthomas/template/template.go
generated
vendored
@ -1,218 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// common holds the information shared by related templates.
|
||||
type common struct {
|
||||
tmpl map[string]*Template
|
||||
// We use two maps, one for parsing and one for execution.
|
||||
// This separation makes the API cleaner since it doesn't
|
||||
// expose reflection to the client.
|
||||
parseFuncs FuncMap
|
||||
execFuncs map[string]reflect.Value
|
||||
}
|
||||
|
||||
// Template is the representation of a parsed template. The *parse.Tree
|
||||
// field is exported only for use by html/template and should be treated
|
||||
// as unexported by all other clients.
|
||||
type Template struct {
|
||||
name string
|
||||
*parse.Tree
|
||||
*common
|
||||
leftDelim string
|
||||
rightDelim string
|
||||
}
|
||||
|
||||
// New allocates a new template with the given name.
|
||||
func New(name string) *Template {
|
||||
return &Template{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the template.
|
||||
func (t *Template) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
// New allocates a new template associated with the given one and with the same
|
||||
// delimiters. The association, which is transitive, allows one template to
|
||||
// invoke another with a {{template}} action.
|
||||
func (t *Template) New(name string) *Template {
|
||||
t.init()
|
||||
return &Template{
|
||||
name: name,
|
||||
common: t.common,
|
||||
leftDelim: t.leftDelim,
|
||||
rightDelim: t.rightDelim,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Template) init() {
|
||||
if t.common == nil {
|
||||
t.common = new(common)
|
||||
t.tmpl = make(map[string]*Template)
|
||||
t.parseFuncs = make(FuncMap)
|
||||
t.execFuncs = make(map[string]reflect.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the template, including all associated
|
||||
// templates. The actual representation is not copied, but the name space of
|
||||
// associated templates is, so further calls to Parse in the copy will add
|
||||
// templates to the copy but not to the original. Clone can be used to prepare
|
||||
// common templates and use them with variant definitions for other templates
|
||||
// by adding the variants after the clone is made.
|
||||
func (t *Template) Clone() (*Template, error) {
|
||||
nt := t.copy(nil)
|
||||
nt.init()
|
||||
nt.tmpl[t.name] = nt
|
||||
for k, v := range t.tmpl {
|
||||
if k == t.name { // Already installed.
|
||||
continue
|
||||
}
|
||||
// The associated templates share nt's common structure.
|
||||
tmpl := v.copy(nt.common)
|
||||
nt.tmpl[k] = tmpl
|
||||
}
|
||||
for k, v := range t.parseFuncs {
|
||||
nt.parseFuncs[k] = v
|
||||
}
|
||||
for k, v := range t.execFuncs {
|
||||
nt.execFuncs[k] = v
|
||||
}
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// copy returns a shallow copy of t, with common set to the argument.
|
||||
func (t *Template) copy(c *common) *Template {
|
||||
nt := New(t.name)
|
||||
nt.Tree = t.Tree
|
||||
nt.common = c
|
||||
nt.leftDelim = t.leftDelim
|
||||
nt.rightDelim = t.rightDelim
|
||||
return nt
|
||||
}
|
||||
|
||||
// AddParseTree creates a new template with the name and parse tree
|
||||
// and associates it with t.
|
||||
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
|
||||
if t.common != nil && t.tmpl[name] != nil {
|
||||
return nil, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
nt := t.New(name)
|
||||
nt.Tree = tree
|
||||
t.tmpl[name] = nt
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// Templates returns a slice of the templates associated with t, including t
|
||||
// itself.
|
||||
func (t *Template) Templates() []*Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
// Return a slice so we don't expose the map.
|
||||
m := make([]*Template, 0, len(t.tmpl))
|
||||
for _, v := range t.tmpl {
|
||||
m = append(m, v)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Delims sets the action delimiters to the specified strings, to be used in
|
||||
// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
|
||||
// definitions will inherit the settings. An empty delimiter stands for the
|
||||
// corresponding default: {{ or }}.
|
||||
// The return value is the template, so calls can be chained.
|
||||
func (t *Template) Delims(left, right string) *Template {
|
||||
t.leftDelim = left
|
||||
t.rightDelim = right
|
||||
return t
|
||||
}
|
||||
|
||||
// Funcs adds the elements of the argument map to the template's function map.
|
||||
// It panics if a value in the map is not a function with appropriate return
|
||||
// type. However, it is legal to overwrite elements of the map. The return
|
||||
// value is the template, so calls can be chained.
|
||||
func (t *Template) Funcs(funcMap FuncMap) *Template {
|
||||
t.init()
|
||||
addValueFuncs(t.execFuncs, funcMap)
|
||||
addFuncs(t.parseFuncs, funcMap)
|
||||
return t
|
||||
}
|
||||
|
||||
// Lookup returns the template with the given name that is associated with t,
|
||||
// or nil if there is no such template.
|
||||
func (t *Template) Lookup(name string) *Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
return t.tmpl[name]
|
||||
}
|
||||
|
||||
// Parse parses a string into a template. Nested template definitions will be
|
||||
// associated with the top-level template t. Parse may be called multiple times
|
||||
// to parse definitions of templates to associate with t. It is an error if a
|
||||
// resulting template is non-empty (contains content other than template
|
||||
// definitions) and would replace a non-empty template with the same name.
|
||||
// (In multiple calls to Parse with the same receiver template, only one call
|
||||
// can contain text other than space, comments, and template definitions.)
|
||||
func (t *Template) Parse(text string) (*Template, error) {
|
||||
t.init()
|
||||
trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add the newly parsed trees, including the one for t, into our common structure.
|
||||
for name, tree := range trees {
|
||||
// If the name we parsed is the name of this template, overwrite this template.
|
||||
// The associate method checks it's not a redefinition.
|
||||
tmpl := t
|
||||
if name != t.name {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
// Even if t == tmpl, we need to install it in the common.tmpl map.
|
||||
if replace, err := t.associate(tmpl, tree); err != nil {
|
||||
return nil, err
|
||||
} else if replace {
|
||||
tmpl.Tree = tree
|
||||
}
|
||||
tmpl.leftDelim = t.leftDelim
|
||||
tmpl.rightDelim = t.rightDelim
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// associate installs the new template into the group of templates associated
|
||||
// with t. It is an error to reuse a name except to overwrite an empty
|
||||
// template. The two are already known to share the common structure.
|
||||
// The boolean return value reports wither to store this tree as t.Tree.
|
||||
func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
|
||||
if new.common != t.common {
|
||||
panic("internal error: associate not common")
|
||||
}
|
||||
name := new.name
|
||||
if old := t.tmpl[name]; old != nil {
|
||||
oldIsEmpty := parse.IsEmptyTree(old.Root)
|
||||
newIsEmpty := parse.IsEmptyTree(tree.Root)
|
||||
if newIsEmpty {
|
||||
// Whether old is empty or not, new is empty; no reason to replace old.
|
||||
return false, nil
|
||||
}
|
||||
if !oldIsEmpty {
|
||||
return false, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
}
|
||||
t.tmpl[name] = new
|
||||
return true, nil
|
||||
}
|
||||
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
@ -1,19 +0,0 @@
|
||||
Copyright (C) 2014 Alec Thomas
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
11
vendor/github.com/alecthomas/units/README.md
generated
vendored
11
vendor/github.com/alecthomas/units/README.md
generated
vendored
@ -1,11 +0,0 @@
|
||||
# Units - Helpful unit multipliers and functions for Go
|
||||
|
||||
The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package.
|
||||
|
||||
It allows for code like this:
|
||||
|
||||
```go
|
||||
n, err := ParseBase2Bytes("1KB")
|
||||
// n == 1024
|
||||
n = units.Mebibyte * 512
|
||||
```
|
||||
83
vendor/github.com/alecthomas/units/bytes.go
generated
vendored
83
vendor/github.com/alecthomas/units/bytes.go
generated
vendored
@ -1,83 +0,0 @@
|
||||
package units
|
||||
|
||||
// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
|
||||
// etc.).
|
||||
type Base2Bytes int64
|
||||
|
||||
// Base-2 byte units.
|
||||
const (
|
||||
Kibibyte Base2Bytes = 1024
|
||||
KiB = Kibibyte
|
||||
Mebibyte = Kibibyte * 1024
|
||||
MiB = Mebibyte
|
||||
Gibibyte = Mebibyte * 1024
|
||||
GiB = Gibibyte
|
||||
Tebibyte = Gibibyte * 1024
|
||||
TiB = Tebibyte
|
||||
Pebibyte = Tebibyte * 1024
|
||||
PiB = Pebibyte
|
||||
Exbibyte = Pebibyte * 1024
|
||||
EiB = Exbibyte
|
||||
)
|
||||
|
||||
var (
|
||||
bytesUnitMap = MakeUnitMap("iB", "B", 1024)
|
||||
oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
|
||||
)
|
||||
|
||||
// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
|
||||
// and KiB are both 1024.
|
||||
func ParseBase2Bytes(s string) (Base2Bytes, error) {
|
||||
n, err := ParseUnit(s, bytesUnitMap)
|
||||
if err != nil {
|
||||
n, err = ParseUnit(s, oldBytesUnitMap)
|
||||
}
|
||||
return Base2Bytes(n), err
|
||||
}
|
||||
|
||||
func (b Base2Bytes) String() string {
|
||||
return ToString(int64(b), 1024, "iB", "B")
|
||||
}
|
||||
|
||||
var (
|
||||
metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
|
||||
)
|
||||
|
||||
// MetricBytes are SI byte units (1000 bytes in a kilobyte).
|
||||
type MetricBytes SI
|
||||
|
||||
// SI base-10 byte units.
|
||||
const (
|
||||
Kilobyte MetricBytes = 1000
|
||||
KB = Kilobyte
|
||||
Megabyte = Kilobyte * 1000
|
||||
MB = Megabyte
|
||||
Gigabyte = Megabyte * 1000
|
||||
GB = Gigabyte
|
||||
Terabyte = Gigabyte * 1000
|
||||
TB = Terabyte
|
||||
Petabyte = Terabyte * 1000
|
||||
PB = Petabyte
|
||||
Exabyte = Petabyte * 1000
|
||||
EB = Exabyte
|
||||
)
|
||||
|
||||
// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
|
||||
func ParseMetricBytes(s string) (MetricBytes, error) {
|
||||
n, err := ParseUnit(s, metricBytesUnitMap)
|
||||
return MetricBytes(n), err
|
||||
}
|
||||
|
||||
func (m MetricBytes) String() string {
|
||||
return ToString(int64(m), 1000, "B", "B")
|
||||
}
|
||||
|
||||
// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
|
||||
// respectively. That is, KiB represents 1024 and KB represents 1000.
|
||||
func ParseStrictBytes(s string) (int64, error) {
|
||||
n, err := ParseUnit(s, bytesUnitMap)
|
||||
if err != nil {
|
||||
n, err = ParseUnit(s, metricBytesUnitMap)
|
||||
}
|
||||
return int64(n), err
|
||||
}
|
||||
13
vendor/github.com/alecthomas/units/doc.go
generated
vendored
13
vendor/github.com/alecthomas/units/doc.go
generated
vendored
@ -1,13 +0,0 @@
|
||||
// Package units provides helpful unit multipliers and functions for Go.
|
||||
//
|
||||
// The goal of this package is to have functionality similar to the time [1] package.
|
||||
//
|
||||
//
|
||||
// [1] http://golang.org/pkg/time/
|
||||
//
|
||||
// It allows for code like this:
|
||||
//
|
||||
// n, err := ParseBase2Bytes("1KB")
|
||||
// // n == 1024
|
||||
// n = units.Mebibyte * 512
|
||||
package units
|
||||
26
vendor/github.com/alecthomas/units/si.go
generated
vendored
26
vendor/github.com/alecthomas/units/si.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package units
|
||||
|
||||
// SI units.
|
||||
type SI int64
|
||||
|
||||
// SI unit multiples.
|
||||
const (
|
||||
Kilo SI = 1000
|
||||
Mega = Kilo * 1000
|
||||
Giga = Mega * 1000
|
||||
Tera = Giga * 1000
|
||||
Peta = Tera * 1000
|
||||
Exa = Peta * 1000
|
||||
)
|
||||
|
||||
func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
|
||||
return map[string]float64{
|
||||
shortSuffix: 1,
|
||||
"K" + suffix: float64(scale),
|
||||
"M" + suffix: float64(scale * scale),
|
||||
"G" + suffix: float64(scale * scale * scale),
|
||||
"T" + suffix: float64(scale * scale * scale * scale),
|
||||
"P" + suffix: float64(scale * scale * scale * scale * scale),
|
||||
"E" + suffix: float64(scale * scale * scale * scale * scale * scale),
|
||||
}
|
||||
}
|
||||
138
vendor/github.com/alecthomas/units/util.go
generated
vendored
138
vendor/github.com/alecthomas/units/util.go
generated
vendored
@ -1,138 +0,0 @@
|
||||
package units
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
|
||||
)
|
||||
|
||||
func ToString(n int64, scale int64, suffix, baseSuffix string) string {
|
||||
mn := len(siUnits)
|
||||
out := make([]string, mn)
|
||||
for i, m := range siUnits {
|
||||
if n%scale != 0 || i == 0 && n == 0 {
|
||||
s := suffix
|
||||
if i == 0 {
|
||||
s = baseSuffix
|
||||
}
|
||||
out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
|
||||
}
|
||||
n /= scale
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return strings.Join(out, "")
|
||||
}
|
||||
|
||||
// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
|
||||
var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
|
||||
|
||||
// leadingInt consumes the leading [0-9]* from s.
|
||||
func leadingInt(s string) (x int64, rem string, err error) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
if x >= (1<<63-10)/10 {
|
||||
// overflow
|
||||
return 0, "", errLeadingInt
|
||||
}
|
||||
x = x*10 + int64(c) - '0'
|
||||
}
|
||||
return x, s[i:], nil
|
||||
}
|
||||
|
||||
func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
|
||||
// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
|
||||
orig := s
|
||||
f := float64(0)
|
||||
neg := false
|
||||
|
||||
// Consume [-+]?
|
||||
if s != "" {
|
||||
c := s[0]
|
||||
if c == '-' || c == '+' {
|
||||
neg = c == '-'
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
// Special case: if all that is left is "0", this is zero.
|
||||
if s == "0" {
|
||||
return 0, nil
|
||||
}
|
||||
if s == "" {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
for s != "" {
|
||||
g := float64(0) // this element of the sequence
|
||||
|
||||
var x int64
|
||||
var err error
|
||||
|
||||
// The next character must be [0-9.]
|
||||
if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
// Consume [0-9]*
|
||||
pl := len(s)
|
||||
x, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
g = float64(x)
|
||||
pre := pl != len(s) // whether we consumed anything before a period
|
||||
|
||||
// Consume (\.[0-9]*)?
|
||||
post := false
|
||||
if s != "" && s[0] == '.' {
|
||||
s = s[1:]
|
||||
pl := len(s)
|
||||
x, s, err = leadingInt(s)
|
||||
if err != nil {
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
scale := 1.0
|
||||
for n := pl - len(s); n > 0; n-- {
|
||||
scale *= 10
|
||||
}
|
||||
g += float64(x) / scale
|
||||
post = pl != len(s)
|
||||
}
|
||||
if !pre && !post {
|
||||
// no digits (e.g. ".s" or "-.s")
|
||||
return 0, errors.New("units: invalid " + orig)
|
||||
}
|
||||
|
||||
// Consume unit.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c == '.' || ('0' <= c && c <= '9') {
|
||||
break
|
||||
}
|
||||
}
|
||||
u := s[:i]
|
||||
s = s[i:]
|
||||
unit, ok := unitMap[u]
|
||||
if !ok {
|
||||
return 0, errors.New("units: unknown unit " + u + " in " + orig)
|
||||
}
|
||||
|
||||
f += g * unit
|
||||
}
|
||||
|
||||
if neg {
|
||||
f = -f
|
||||
}
|
||||
if f < float64(-1<<63) || f > float64(1<<63-1) {
|
||||
return 0, errors.New("units: overflow parsing unit")
|
||||
}
|
||||
return int64(f), nil
|
||||
}
|
||||
16
vendor/github.com/golang/snappy/.gitignore
generated
vendored
16
vendor/github.com/golang/snappy/.gitignore
generated
vendored
@ -1,16 +0,0 @@
|
||||
cmd/snappytool/snappytool
|
||||
testdata/bench
|
||||
|
||||
# These explicitly listed benchmark data files are for an obsolete version of
|
||||
# snappy_test.go.
|
||||
testdata/alice29.txt
|
||||
testdata/asyoulik.txt
|
||||
testdata/fireworks.jpeg
|
||||
testdata/geo.protodata
|
||||
testdata/html
|
||||
testdata/html_x_4
|
||||
testdata/kppkn.gtb
|
||||
testdata/lcet10.txt
|
||||
testdata/paper-100k.pdf
|
||||
testdata/plrabn12.txt
|
||||
testdata/urls.10K
|
||||
15
vendor/github.com/golang/snappy/AUTHORS
generated
vendored
15
vendor/github.com/golang/snappy/AUTHORS
generated
vendored
@ -1,15 +0,0 @@
|
||||
# This is the official list of Snappy-Go authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Damian Gryski <dgryski@gmail.com>
|
||||
Google Inc.
|
||||
Jan Mercl <0xjnml@gmail.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
||||
37
vendor/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
37
vendor/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
@ -1,37 +0,0 @@
|
||||
# This is the official list of people who can contribute
|
||||
# (and typically have contributed) code to the Snappy-Go repository.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# The submission process automatically checks to make sure
|
||||
# that people submitting code are listed in this file (by email address).
|
||||
#
|
||||
# Names should be added to this file only after verifying that
|
||||
# the individual or the individual's organization has agreed to
|
||||
# the appropriate Contributor License Agreement, found here:
|
||||
#
|
||||
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||
#
|
||||
# The agreement for individuals can be filled out on the web.
|
||||
#
|
||||
# When adding J Random Contributor's name to this file,
|
||||
# either J's name or J's organization's name should be
|
||||
# added to the AUTHORS file, depending on whether the
|
||||
# individual or corporate CLA was used.
|
||||
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Damian Gryski <dgryski@gmail.com>
|
||||
Jan Mercl <0xjnml@gmail.com>
|
||||
Kai Backman <kaib@golang.org>
|
||||
Marc-Antoine Ruel <maruel@chromium.org>
|
||||
Nigel Tao <nigeltao@golang.org>
|
||||
Rob Pike <r@golang.org>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Russ Cox <rsc@golang.org>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
||||
27
vendor/github.com/golang/snappy/LICENSE
generated
vendored
27
vendor/github.com/golang/snappy/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
107
vendor/github.com/golang/snappy/README
generated
vendored
107
vendor/github.com/golang/snappy/README
generated
vendored
@ -1,107 +0,0 @@
|
||||
The Snappy compression format in the Go programming language.
|
||||
|
||||
To download and install from source:
|
||||
$ go get github.com/golang/snappy
|
||||
|
||||
Unless otherwise noted, the Snappy-Go source files are distributed
|
||||
under the BSD-style license found in the LICENSE file.
|
||||
|
||||
|
||||
|
||||
Benchmarks.
|
||||
|
||||
The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
|
||||
or so files, the same set used by the C++ Snappy code (github.com/google/snappy
|
||||
and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
|
||||
3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
|
||||
|
||||
"go test -test.bench=."
|
||||
|
||||
_UFlat0-8 2.19GB/s ± 0% html
|
||||
_UFlat1-8 1.41GB/s ± 0% urls
|
||||
_UFlat2-8 23.5GB/s ± 2% jpg
|
||||
_UFlat3-8 1.91GB/s ± 0% jpg_200
|
||||
_UFlat4-8 14.0GB/s ± 1% pdf
|
||||
_UFlat5-8 1.97GB/s ± 0% html4
|
||||
_UFlat6-8 814MB/s ± 0% txt1
|
||||
_UFlat7-8 785MB/s ± 0% txt2
|
||||
_UFlat8-8 857MB/s ± 0% txt3
|
||||
_UFlat9-8 719MB/s ± 1% txt4
|
||||
_UFlat10-8 2.84GB/s ± 0% pb
|
||||
_UFlat11-8 1.05GB/s ± 0% gaviota
|
||||
|
||||
_ZFlat0-8 1.04GB/s ± 0% html
|
||||
_ZFlat1-8 534MB/s ± 0% urls
|
||||
_ZFlat2-8 15.7GB/s ± 1% jpg
|
||||
_ZFlat3-8 740MB/s ± 3% jpg_200
|
||||
_ZFlat4-8 9.20GB/s ± 1% pdf
|
||||
_ZFlat5-8 991MB/s ± 0% html4
|
||||
_ZFlat6-8 379MB/s ± 0% txt1
|
||||
_ZFlat7-8 352MB/s ± 0% txt2
|
||||
_ZFlat8-8 396MB/s ± 1% txt3
|
||||
_ZFlat9-8 327MB/s ± 1% txt4
|
||||
_ZFlat10-8 1.33GB/s ± 1% pb
|
||||
_ZFlat11-8 605MB/s ± 1% gaviota
|
||||
|
||||
|
||||
|
||||
"go test -test.bench=. -tags=noasm"
|
||||
|
||||
_UFlat0-8 621MB/s ± 2% html
|
||||
_UFlat1-8 494MB/s ± 1% urls
|
||||
_UFlat2-8 23.2GB/s ± 1% jpg
|
||||
_UFlat3-8 1.12GB/s ± 1% jpg_200
|
||||
_UFlat4-8 4.35GB/s ± 1% pdf
|
||||
_UFlat5-8 609MB/s ± 0% html4
|
||||
_UFlat6-8 296MB/s ± 0% txt1
|
||||
_UFlat7-8 288MB/s ± 0% txt2
|
||||
_UFlat8-8 309MB/s ± 1% txt3
|
||||
_UFlat9-8 280MB/s ± 1% txt4
|
||||
_UFlat10-8 753MB/s ± 0% pb
|
||||
_UFlat11-8 400MB/s ± 0% gaviota
|
||||
|
||||
_ZFlat0-8 409MB/s ± 1% html
|
||||
_ZFlat1-8 250MB/s ± 1% urls
|
||||
_ZFlat2-8 12.3GB/s ± 1% jpg
|
||||
_ZFlat3-8 132MB/s ± 0% jpg_200
|
||||
_ZFlat4-8 2.92GB/s ± 0% pdf
|
||||
_ZFlat5-8 405MB/s ± 1% html4
|
||||
_ZFlat6-8 179MB/s ± 1% txt1
|
||||
_ZFlat7-8 170MB/s ± 1% txt2
|
||||
_ZFlat8-8 189MB/s ± 1% txt3
|
||||
_ZFlat9-8 164MB/s ± 1% txt4
|
||||
_ZFlat10-8 479MB/s ± 1% pb
|
||||
_ZFlat11-8 270MB/s ± 1% gaviota
|
||||
|
||||
|
||||
|
||||
For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
|
||||
are the numbers from C++ Snappy's
|
||||
|
||||
make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
|
||||
|
||||
BM_UFlat/0 2.4GB/s html
|
||||
BM_UFlat/1 1.4GB/s urls
|
||||
BM_UFlat/2 21.8GB/s jpg
|
||||
BM_UFlat/3 1.5GB/s jpg_200
|
||||
BM_UFlat/4 13.3GB/s pdf
|
||||
BM_UFlat/5 2.1GB/s html4
|
||||
BM_UFlat/6 1.0GB/s txt1
|
||||
BM_UFlat/7 959.4MB/s txt2
|
||||
BM_UFlat/8 1.0GB/s txt3
|
||||
BM_UFlat/9 864.5MB/s txt4
|
||||
BM_UFlat/10 2.9GB/s pb
|
||||
BM_UFlat/11 1.2GB/s gaviota
|
||||
|
||||
BM_ZFlat/0 944.3MB/s html (22.31 %)
|
||||
BM_ZFlat/1 501.6MB/s urls (47.78 %)
|
||||
BM_ZFlat/2 14.3GB/s jpg (99.95 %)
|
||||
BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
|
||||
BM_ZFlat/4 8.3GB/s pdf (83.30 %)
|
||||
BM_ZFlat/5 903.5MB/s html4 (22.52 %)
|
||||
BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
|
||||
BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
|
||||
BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
|
||||
BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
|
||||
BM_ZFlat/10 1.2GB/s pb (19.68 %)
|
||||
BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
|
||||
237
vendor/github.com/golang/snappy/decode.go
generated
vendored
237
vendor/github.com/golang/snappy/decode.go
generated
vendored
@ -1,237 +0,0 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt reports that the input is invalid.
|
||||
ErrCorrupt = errors.New("snappy: corrupt input")
|
||||
// ErrTooLarge reports that the uncompressed length is too large.
|
||||
ErrTooLarge = errors.New("snappy: decoded block is too large")
|
||||
// ErrUnsupported reports that the input isn't supported.
|
||||
ErrUnsupported = errors.New("snappy: unsupported input")
|
||||
|
||||
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
|
||||
)
|
||||
|
||||
// DecodedLen returns the length of the decoded block.
|
||||
func DecodedLen(src []byte) (int, error) {
|
||||
v, _, err := decodedLen(src)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// decodedLen returns the length of the decoded block and the number of bytes
|
||||
// that the length header occupied.
|
||||
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||
v, n := binary.Uvarint(src)
|
||||
if n <= 0 || v > 0xffffffff {
|
||||
return 0, 0, ErrCorrupt
|
||||
}
|
||||
|
||||
const wordSize = 32 << (^uint(0) >> 32 & 1)
|
||||
if wordSize == 32 && v > 0x7fffffff {
|
||||
return 0, 0, ErrTooLarge
|
||||
}
|
||||
return int(v), n, nil
|
||||
}
|
||||
|
||||
const (
|
||||
decodeErrCodeCorrupt = 1
|
||||
decodeErrCodeUnsupportedLiteralLength = 2
|
||||
)
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
//
|
||||
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
dLen, s, err := decodedLen(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dLen <= len(dst) {
|
||||
dst = dst[:dLen]
|
||||
} else {
|
||||
dst = make([]byte, dLen)
|
||||
}
|
||||
switch decode(dst, src[s:]) {
|
||||
case 0:
|
||||
return dst, nil
|
||||
case decodeErrCodeUnsupportedLiteralLength:
|
||||
return nil, errUnsupportedLiteralLength
|
||||
}
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
// NewReader returns a new Reader that decompresses from r, using the framing
|
||||
// format described at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
decoded: make([]byte, maxBlockSize),
|
||||
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Reader is an io.Reader that can read Snappy-compressed bytes.
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
decoded []byte
|
||||
buf []byte
|
||||
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||
i, j int
|
||||
readHeader bool
|
||||
}
|
||||
|
||||
// Reset discards any buffered data, resets all state, and switches the Snappy
|
||||
// reader to read from r. This permits reusing a Reader rather than allocating
|
||||
// a new one.
|
||||
func (r *Reader) Reset(reader io.Reader) {
|
||||
r.r = reader
|
||||
r.err = nil
|
||||
r.i = 0
|
||||
r.j = 0
|
||||
r.readHeader = false
|
||||
}
|
||||
|
||||
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
|
||||
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
|
||||
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
|
||||
r.err = ErrCorrupt
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Read satisfies the io.Reader interface.
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
for {
|
||||
if r.i < r.j {
|
||||
n := copy(p, r.decoded[r.i:r.j])
|
||||
r.i += n
|
||||
return n, nil
|
||||
}
|
||||
if !r.readFull(r.buf[:4], true) {
|
||||
return 0, r.err
|
||||
}
|
||||
chunkType := r.buf[0]
|
||||
if !r.readHeader {
|
||||
if chunkType != chunkTypeStreamIdentifier {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.readHeader = true
|
||||
}
|
||||
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
|
||||
if chunkLen > len(r.buf) {
|
||||
r.err = ErrUnsupported
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
// The chunk types are specified at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
switch chunkType {
|
||||
case chunkTypeCompressedData:
|
||||
// Section 4.2. Compressed data (chunk type 0x00).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
buf := r.buf[:chunkLen]
|
||||
if !r.readFull(buf, false) {
|
||||
return 0, r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
buf = buf[checksumSize:]
|
||||
|
||||
n, err := DecodedLen(buf)
|
||||
if err != nil {
|
||||
r.err = err
|
||||
return 0, r.err
|
||||
}
|
||||
if n > len(r.decoded) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if _, err := Decode(r.decoded, buf); err != nil {
|
||||
r.err = err
|
||||
return 0, r.err
|
||||
}
|
||||
if crc(r.decoded[:n]) != checksum {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.i, r.j = 0, n
|
||||
continue
|
||||
|
||||
case chunkTypeUncompressedData:
|
||||
// Section 4.3. Uncompressed data (chunk type 0x01).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
buf := r.buf[:checksumSize]
|
||||
if !r.readFull(buf, false) {
|
||||
return 0, r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
// Read directly into r.decoded instead of via r.buf.
|
||||
n := chunkLen - checksumSize
|
||||
if n > len(r.decoded) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if !r.readFull(r.decoded[:n], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
if crc(r.decoded[:n]) != checksum {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.i, r.j = 0, n
|
||||
continue
|
||||
|
||||
case chunkTypeStreamIdentifier:
|
||||
// Section 4.1. Stream identifier (chunk type 0xff).
|
||||
if chunkLen != len(magicBody) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if !r.readFull(r.buf[:len(magicBody)], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
for i := 0; i < len(magicBody); i++ {
|
||||
if r.buf[i] != magicBody[i] {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if chunkType <= 0x7f {
|
||||
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
||||
r.err = ErrUnsupported
|
||||
return 0, r.err
|
||||
}
|
||||
// Section 4.4 Padding (chunk type 0xfe).
|
||||
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
||||
if !r.readFull(r.buf[:chunkLen], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
}
|
||||
}
|
||||
14
vendor/github.com/golang/snappy/decode_amd64.go
generated
vendored
14
vendor/github.com/golang/snappy/decode_amd64.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
package snappy
|
||||
|
||||
// decode has the same semantics as in decode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func decode(dst, src []byte) int
|
||||
490
vendor/github.com/golang/snappy/decode_amd64.s
generated
vendored
490
vendor/github.com/golang/snappy/decode_amd64.s
generated
vendored
@ -1,490 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// The asm code generally follows the pure Go code in decode_other.go, except
|
||||
// where marked with a "!!!".
|
||||
|
||||
// func decode(dst, src []byte) int
|
||||
//
|
||||
// All local variables fit into registers. The non-zero stack size is only to
|
||||
// spill registers and push args when issuing a CALL. The register allocation:
|
||||
// - AX scratch
|
||||
// - BX scratch
|
||||
// - CX length or x
|
||||
// - DX offset
|
||||
// - SI &src[s]
|
||||
// - DI &dst[d]
|
||||
// + R8 dst_base
|
||||
// + R9 dst_len
|
||||
// + R10 dst_base + dst_len
|
||||
// + R11 src_base
|
||||
// + R12 src_len
|
||||
// + R13 src_base + src_len
|
||||
// - R14 used by doCopy
|
||||
// - R15 used by doCopy
|
||||
//
|
||||
// The registers R8-R13 (marked with a "+") are set at the start of the
|
||||
// function, and after a CALL returns, and are not otherwise modified.
|
||||
//
|
||||
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
|
||||
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
|
||||
TEXT ·decode(SB), NOSPLIT, $48-56
|
||||
// Initialize SI, DI and R8-R13.
|
||||
MOVQ dst_base+0(FP), R8
|
||||
MOVQ dst_len+8(FP), R9
|
||||
MOVQ R8, DI
|
||||
MOVQ R8, R10
|
||||
ADDQ R9, R10
|
||||
MOVQ src_base+24(FP), R11
|
||||
MOVQ src_len+32(FP), R12
|
||||
MOVQ R11, SI
|
||||
MOVQ R11, R13
|
||||
ADDQ R12, R13
|
||||
|
||||
loop:
|
||||
// for s < len(src)
|
||||
CMPQ SI, R13
|
||||
JEQ end
|
||||
|
||||
// CX = uint32(src[s])
|
||||
//
|
||||
// switch src[s] & 0x03
|
||||
MOVBLZX (SI), CX
|
||||
MOVL CX, BX
|
||||
ANDL $3, BX
|
||||
CMPL BX, $1
|
||||
JAE tagCopy
|
||||
|
||||
// ----------------------------------------
|
||||
// The code below handles literal tags.
|
||||
|
||||
// case tagLiteral:
|
||||
// x := uint32(src[s] >> 2)
|
||||
// switch
|
||||
SHRL $2, CX
|
||||
CMPL CX, $60
|
||||
JAE tagLit60Plus
|
||||
|
||||
// case x < 60:
|
||||
// s++
|
||||
INCQ SI
|
||||
|
||||
doLit:
|
||||
// This is the end of the inner "switch", when we have a literal tag.
|
||||
//
|
||||
// We assume that CX == x and x fits in a uint32, where x is the variable
|
||||
// used in the pure Go decode_other.go code.
|
||||
|
||||
// length = int(x) + 1
|
||||
//
|
||||
// Unlike the pure Go code, we don't need to check if length <= 0 because
|
||||
// CX can hold 64 bits, so the increment cannot overflow.
|
||||
INCQ CX
|
||||
|
||||
// Prepare to check if copying length bytes will run past the end of dst or
|
||||
// src.
|
||||
//
|
||||
// AX = len(dst) - d
|
||||
// BX = len(src) - s
|
||||
MOVQ R10, AX
|
||||
SUBQ DI, AX
|
||||
MOVQ R13, BX
|
||||
SUBQ SI, BX
|
||||
|
||||
// !!! Try a faster technique for short (16 or fewer bytes) copies.
|
||||
//
|
||||
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
|
||||
// goto callMemmove // Fall back on calling runtime·memmove.
|
||||
// }
|
||||
//
|
||||
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
|
||||
// against 21 instead of 16, because it cannot assume that all of its input
|
||||
// is contiguous in memory and so it needs to leave enough source bytes to
|
||||
// read the next tag without refilling buffers, but Go's Decode assumes
|
||||
// contiguousness (the src argument is a []byte).
|
||||
CMPQ CX, $16
|
||||
JGT callMemmove
|
||||
CMPQ AX, $16
|
||||
JLT callMemmove
|
||||
CMPQ BX, $16
|
||||
JLT callMemmove
|
||||
|
||||
// !!! Implement the copy from src to dst as a 16-byte load and store.
|
||||
// (Decode's documentation says that dst and src must not overlap.)
|
||||
//
|
||||
// This always copies 16 bytes, instead of only length bytes, but that's
|
||||
// OK. If the input is a valid Snappy encoding then subsequent iterations
|
||||
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
|
||||
// non-nil error), so the overrun will be ignored.
|
||||
//
|
||||
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
|
||||
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||
// effective on architectures that are fussier about alignment.
|
||||
MOVOU 0(SI), X0
|
||||
MOVOU X0, 0(DI)
|
||||
|
||||
// d += length
|
||||
// s += length
|
||||
ADDQ CX, DI
|
||||
ADDQ CX, SI
|
||||
JMP loop
|
||||
|
||||
callMemmove:
|
||||
// if length > len(dst)-d || length > len(src)-s { etc }
|
||||
CMPQ CX, AX
|
||||
JGT errCorrupt
|
||||
CMPQ CX, BX
|
||||
JGT errCorrupt
|
||||
|
||||
// copy(dst[d:], src[s:s+length])
|
||||
//
|
||||
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
|
||||
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
|
||||
// three registers to the stack, to save local variables across the CALL.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ SI, 8(SP)
|
||||
MOVQ CX, 16(SP)
|
||||
MOVQ DI, 24(SP)
|
||||
MOVQ SI, 32(SP)
|
||||
MOVQ CX, 40(SP)
|
||||
CALL runtime·memmove(SB)
|
||||
|
||||
// Restore local variables: unspill registers from the stack and
|
||||
// re-calculate R8-R13.
|
||||
MOVQ 24(SP), DI
|
||||
MOVQ 32(SP), SI
|
||||
MOVQ 40(SP), CX
|
||||
MOVQ dst_base+0(FP), R8
|
||||
MOVQ dst_len+8(FP), R9
|
||||
MOVQ R8, R10
|
||||
ADDQ R9, R10
|
||||
MOVQ src_base+24(FP), R11
|
||||
MOVQ src_len+32(FP), R12
|
||||
MOVQ R11, R13
|
||||
ADDQ R12, R13
|
||||
|
||||
// d += length
|
||||
// s += length
|
||||
ADDQ CX, DI
|
||||
ADDQ CX, SI
|
||||
JMP loop
|
||||
|
||||
tagLit60Plus:
|
||||
// !!! This fragment does the
|
||||
//
|
||||
// s += x - 58; if uint(s) > uint(len(src)) { etc }
|
||||
//
|
||||
// checks. In the asm version, we code it once instead of once per switch case.
|
||||
ADDQ CX, SI
|
||||
SUBQ $58, SI
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// case x == 60:
|
||||
CMPL CX, $61
|
||||
JEQ tagLit61
|
||||
JA tagLit62Plus
|
||||
|
||||
// x = uint32(src[s-1])
|
||||
MOVBLZX -1(SI), CX
|
||||
JMP doLit
|
||||
|
||||
tagLit61:
|
||||
// case x == 61:
|
||||
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||
MOVWLZX -2(SI), CX
|
||||
JMP doLit
|
||||
|
||||
tagLit62Plus:
|
||||
CMPL CX, $62
|
||||
JA tagLit63
|
||||
|
||||
// case x == 62:
|
||||
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||
MOVWLZX -3(SI), CX
|
||||
MOVBLZX -1(SI), BX
|
||||
SHLL $16, BX
|
||||
ORL BX, CX
|
||||
JMP doLit
|
||||
|
||||
tagLit63:
|
||||
// case x == 63:
|
||||
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||
MOVL -4(SI), CX
|
||||
JMP doLit
|
||||
|
||||
// The code above handles literal tags.
|
||||
// ----------------------------------------
|
||||
// The code below handles copy tags.
|
||||
|
||||
tagCopy4:
|
||||
// case tagCopy4:
|
||||
// s += 5
|
||||
ADDQ $5, SI
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// length = 1 + int(src[s-5])>>2
|
||||
SHRQ $2, CX
|
||||
INCQ CX
|
||||
|
||||
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||
MOVLQZX -4(SI), DX
|
||||
JMP doCopy
|
||||
|
||||
tagCopy2:
|
||||
// case tagCopy2:
|
||||
// s += 3
|
||||
ADDQ $3, SI
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// length = 1 + int(src[s-3])>>2
|
||||
SHRQ $2, CX
|
||||
INCQ CX
|
||||
|
||||
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||
MOVWQZX -2(SI), DX
|
||||
JMP doCopy
|
||||
|
||||
tagCopy:
|
||||
// We have a copy tag. We assume that:
|
||||
// - BX == src[s] & 0x03
|
||||
// - CX == src[s]
|
||||
CMPQ BX, $2
|
||||
JEQ tagCopy2
|
||||
JA tagCopy4
|
||||
|
||||
// case tagCopy1:
|
||||
// s += 2
|
||||
ADDQ $2, SI
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||
MOVQ CX, DX
|
||||
ANDQ $0xe0, DX
|
||||
SHLQ $3, DX
|
||||
MOVBQZX -1(SI), BX
|
||||
ORQ BX, DX
|
||||
|
||||
// length = 4 + int(src[s-2])>>2&0x7
|
||||
SHRQ $2, CX
|
||||
ANDQ $7, CX
|
||||
ADDQ $4, CX
|
||||
|
||||
doCopy:
|
||||
// This is the end of the outer "switch", when we have a copy tag.
|
||||
//
|
||||
// We assume that:
|
||||
// - CX == length && CX > 0
|
||||
// - DX == offset
|
||||
|
||||
// if offset <= 0 { etc }
|
||||
CMPQ DX, $0
|
||||
JLE errCorrupt
|
||||
|
||||
// if d < offset { etc }
|
||||
MOVQ DI, BX
|
||||
SUBQ R8, BX
|
||||
CMPQ BX, DX
|
||||
JLT errCorrupt
|
||||
|
||||
// if length > len(dst)-d { etc }
|
||||
MOVQ R10, BX
|
||||
SUBQ DI, BX
|
||||
CMPQ CX, BX
|
||||
JGT errCorrupt
|
||||
|
||||
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
|
||||
//
|
||||
// Set:
|
||||
// - R14 = len(dst)-d
|
||||
// - R15 = &dst[d-offset]
|
||||
MOVQ R10, R14
|
||||
SUBQ DI, R14
|
||||
MOVQ DI, R15
|
||||
SUBQ DX, R15
|
||||
|
||||
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
|
||||
//
|
||||
// First, try using two 8-byte load/stores, similar to the doLit technique
|
||||
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
|
||||
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
|
||||
// and not one 16-byte load/store, and the first store has to be before the
|
||||
// second load, due to the overlap if offset is in the range [8, 16).
|
||||
//
|
||||
// if length > 16 || offset < 8 || len(dst)-d < 16 {
|
||||
// goto slowForwardCopy
|
||||
// }
|
||||
// copy 16 bytes
|
||||
// d += length
|
||||
CMPQ CX, $16
|
||||
JGT slowForwardCopy
|
||||
CMPQ DX, $8
|
||||
JLT slowForwardCopy
|
||||
CMPQ R14, $16
|
||||
JLT slowForwardCopy
|
||||
MOVQ 0(R15), AX
|
||||
MOVQ AX, 0(DI)
|
||||
MOVQ 8(R15), BX
|
||||
MOVQ BX, 8(DI)
|
||||
ADDQ CX, DI
|
||||
JMP loop
|
||||
|
||||
slowForwardCopy:
|
||||
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
|
||||
// can still try 8-byte load stores, provided we can overrun up to 10 extra
|
||||
// bytes. As above, the overrun will be fixed up by subsequent iterations
|
||||
// of the outermost loop.
|
||||
//
|
||||
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
|
||||
// commentary says:
|
||||
//
|
||||
// ----
|
||||
//
|
||||
// The main part of this loop is a simple copy of eight bytes at a time
|
||||
// until we've copied (at least) the requested amount of bytes. However,
|
||||
// if d and d-offset are less than eight bytes apart (indicating a
|
||||
// repeating pattern of length < 8), we first need to expand the pattern in
|
||||
// order to get the correct results. For instance, if the buffer looks like
|
||||
// this, with the eight-byte <d-offset> and <d> patterns marked as
|
||||
// intervals:
|
||||
//
|
||||
// abxxxxxxxxxxxx
|
||||
// [------] d-offset
|
||||
// [------] d
|
||||
//
|
||||
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
|
||||
// once, after which we can move <d> two bytes without moving <d-offset>:
|
||||
//
|
||||
// ababxxxxxxxxxx
|
||||
// [------] d-offset
|
||||
// [------] d
|
||||
//
|
||||
// and repeat the exercise until the two no longer overlap.
|
||||
//
|
||||
// This allows us to do very well in the special case of one single byte
|
||||
// repeated many times, without taking a big hit for more general cases.
|
||||
//
|
||||
// The worst case of extra writing past the end of the match occurs when
|
||||
// offset == 1 and length == 1; the last copy will read from byte positions
|
||||
// [0..7] and write to [4..11], whereas it was only supposed to write to
|
||||
// position 1. Thus, ten excess bytes.
|
||||
//
|
||||
// ----
|
||||
//
|
||||
// That "10 byte overrun" worst case is confirmed by Go's
|
||||
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
|
||||
// and finishSlowForwardCopy algorithm.
|
||||
//
|
||||
// if length > len(dst)-d-10 {
|
||||
// goto verySlowForwardCopy
|
||||
// }
|
||||
SUBQ $10, R14
|
||||
CMPQ CX, R14
|
||||
JGT verySlowForwardCopy
|
||||
|
||||
makeOffsetAtLeast8:
|
||||
// !!! As above, expand the pattern so that offset >= 8 and we can use
|
||||
// 8-byte load/stores.
|
||||
//
|
||||
// for offset < 8 {
|
||||
// copy 8 bytes from dst[d-offset:] to dst[d:]
|
||||
// length -= offset
|
||||
// d += offset
|
||||
// offset += offset
|
||||
// // The two previous lines together means that d-offset, and therefore
|
||||
// // R15, is unchanged.
|
||||
// }
|
||||
CMPQ DX, $8
|
||||
JGE fixUpSlowForwardCopy
|
||||
MOVQ (R15), BX
|
||||
MOVQ BX, (DI)
|
||||
SUBQ DX, CX
|
||||
ADDQ DX, DI
|
||||
ADDQ DX, DX
|
||||
JMP makeOffsetAtLeast8
|
||||
|
||||
fixUpSlowForwardCopy:
|
||||
// !!! Add length (which might be negative now) to d (implied by DI being
|
||||
// &dst[d]) so that d ends up at the right place when we jump back to the
|
||||
// top of the loop. Before we do that, though, we save DI to AX so that, if
|
||||
// length is positive, copying the remaining length bytes will write to the
|
||||
// right place.
|
||||
MOVQ DI, AX
|
||||
ADDQ CX, DI
|
||||
|
||||
finishSlowForwardCopy:
|
||||
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
|
||||
// length means that we overrun, but as above, that will be fixed up by
|
||||
// subsequent iterations of the outermost loop.
|
||||
CMPQ CX, $0
|
||||
JLE loop
|
||||
MOVQ (R15), BX
|
||||
MOVQ BX, (AX)
|
||||
ADDQ $8, R15
|
||||
ADDQ $8, AX
|
||||
SUBQ $8, CX
|
||||
JMP finishSlowForwardCopy
|
||||
|
||||
verySlowForwardCopy:
|
||||
// verySlowForwardCopy is a simple implementation of forward copy. In C
|
||||
// parlance, this is a do/while loop instead of a while loop, since we know
|
||||
// that length > 0. In Go syntax:
|
||||
//
|
||||
// for {
|
||||
// dst[d] = dst[d - offset]
|
||||
// d++
|
||||
// length--
|
||||
// if length == 0 {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
MOVB (R15), BX
|
||||
MOVB BX, (DI)
|
||||
INCQ R15
|
||||
INCQ DI
|
||||
DECQ CX
|
||||
JNZ verySlowForwardCopy
|
||||
JMP loop
|
||||
|
||||
// The code above handles copy tags.
|
||||
// ----------------------------------------
|
||||
|
||||
end:
|
||||
// This is the end of the "for s < len(src)".
|
||||
//
|
||||
// if d != len(dst) { etc }
|
||||
CMPQ DI, R10
|
||||
JNE errCorrupt
|
||||
|
||||
// return 0
|
||||
MOVQ $0, ret+48(FP)
|
||||
RET
|
||||
|
||||
errCorrupt:
|
||||
// return decodeErrCodeCorrupt
|
||||
MOVQ $1, ret+48(FP)
|
||||
RET
|
||||
101
vendor/github.com/golang/snappy/decode_other.go
generated
vendored
101
vendor/github.com/golang/snappy/decode_other.go
generated
vendored
@ -1,101 +0,0 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
package snappy
|
||||
|
||||
// decode writes the decoding of src to dst. It assumes that the varint-encoded
|
||||
// length of the decompressed bytes has already been read, and that len(dst)
|
||||
// equals that length.
|
||||
//
|
||||
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
|
||||
func decode(dst, src []byte) int {
|
||||
var d, s, offset, length int
|
||||
for s < len(src) {
|
||||
switch src[s] & 0x03 {
|
||||
case tagLiteral:
|
||||
x := uint32(src[s] >> 2)
|
||||
switch {
|
||||
case x < 60:
|
||||
s++
|
||||
case x == 60:
|
||||
s += 2
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-1])
|
||||
case x == 61:
|
||||
s += 3
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||
case x == 62:
|
||||
s += 4
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||
case x == 63:
|
||||
s += 5
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||
}
|
||||
length = int(x) + 1
|
||||
if length <= 0 {
|
||||
return decodeErrCodeUnsupportedLiteralLength
|
||||
}
|
||||
if length > len(dst)-d || length > len(src)-s {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
copy(dst[d:], src[s:s+length])
|
||||
d += length
|
||||
s += length
|
||||
continue
|
||||
|
||||
case tagCopy1:
|
||||
s += 2
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = 4 + int(src[s-2])>>2&0x7
|
||||
offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||
|
||||
case tagCopy2:
|
||||
s += 3
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-3])>>2
|
||||
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||
|
||||
case tagCopy4:
|
||||
s += 5
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-5])>>2
|
||||
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||
}
|
||||
|
||||
if offset <= 0 || d < offset || length > len(dst)-d {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
|
||||
// the built-in copy function, this byte-by-byte copy always runs
|
||||
// forwards, even if the slices overlap. Conceptually, this is:
|
||||
//
|
||||
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
|
||||
for end := d + length; d != end; d++ {
|
||||
dst[d] = dst[d-offset]
|
||||
}
|
||||
}
|
||||
if d != len(dst) {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
285
vendor/github.com/golang/snappy/encode.go
generated
vendored
285
vendor/github.com/golang/snappy/encode.go
generated
vendored
@ -1,285 +0,0 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
//
|
||||
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||
func Encode(dst, src []byte) []byte {
|
||||
if n := MaxEncodedLen(len(src)); n < 0 {
|
||||
panic(ErrTooLarge)
|
||||
} else if len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||
d := binary.PutUvarint(dst, uint64(len(src)))
|
||||
|
||||
for len(src) > 0 {
|
||||
p := src
|
||||
src = nil
|
||||
if len(p) > maxBlockSize {
|
||||
p, src = p[:maxBlockSize], p[maxBlockSize:]
|
||||
}
|
||||
if len(p) < minNonLiteralBlockSize {
|
||||
d += emitLiteral(dst[d:], p)
|
||||
} else {
|
||||
d += encodeBlock(dst[d:], p)
|
||||
}
|
||||
}
|
||||
return dst[:d]
|
||||
}
|
||||
|
||||
// inputMargin is the minimum number of extra input bytes to keep, inside
|
||||
// encodeBlock's inner loop. On some architectures, this margin lets us
|
||||
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
|
||||
// literals can be implemented as a single load to and store from a 16-byte
|
||||
// register. That literal's actual length can be as short as 1 byte, so this
|
||||
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
|
||||
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
|
||||
// that we don't overrun the dst and src buffers.
|
||||
const inputMargin = 16 - 1
|
||||
|
||||
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
|
||||
// could be encoded with a copy tag. This is the minimum with respect to the
|
||||
// algorithm used by encodeBlock, not a minimum enforced by the file format.
|
||||
//
|
||||
// The encoded output must start with at least a 1 byte literal, as there are
|
||||
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
|
||||
// from an emitCopy call in encodeBlock's main loop, would require at least
|
||||
// another inputMargin bytes, for the reason above: we want any emitLiteral
|
||||
// calls inside encodeBlock's main loop to use the fast path if possible, which
|
||||
// requires being able to overrun by inputMargin bytes. Thus,
|
||||
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
|
||||
//
|
||||
// The C++ code doesn't use this exact threshold, but it could, as discussed at
|
||||
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
|
||||
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
|
||||
// optimization. It should not affect the encoded form. This is tested by
|
||||
// TestSameEncodingAsCppShortCopies.
|
||||
const minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
|
||||
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
||||
// uncompressed length.
|
||||
//
|
||||
// It will return a negative value if srcLen is too large to encode.
|
||||
func MaxEncodedLen(srcLen int) int {
|
||||
n := uint64(srcLen)
|
||||
if n > 0xffffffff {
|
||||
return -1
|
||||
}
|
||||
// Compressed data can be defined as:
|
||||
// compressed := item* literal*
|
||||
// item := literal* copy
|
||||
//
|
||||
// The trailing literal sequence has a space blowup of at most 62/60
|
||||
// since a literal of length 60 needs one tag byte + one extra byte
|
||||
// for length information.
|
||||
//
|
||||
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
||||
// 4 bytes of data. Because of a special check in the encoding code,
|
||||
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
||||
// the copy op takes 3 bytes to encode, and this type of item leads
|
||||
// to at most the 62/60 blowup for representing literals.
|
||||
//
|
||||
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
||||
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
||||
// worst case here is a one-byte literal followed by a five-byte copy.
|
||||
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
||||
//
|
||||
// This last factor dominates the blowup, so the final estimate is:
|
||||
n = 32 + n + n/6
|
||||
if n > 0xffffffff {
|
||||
return -1
|
||||
}
|
||||
return int(n)
|
||||
}
|
||||
|
||||
var errClosed = errors.New("snappy: Writer is closed")
|
||||
|
||||
// NewWriter returns a new Writer that compresses to w.
|
||||
//
|
||||
// The Writer returned does not buffer writes. There is no need to Flush or
|
||||
// Close such a Writer.
|
||||
//
|
||||
// Deprecated: the Writer returned is not suitable for many small writes, only
|
||||
// for few large writes. Use NewBufferedWriter instead, which is efficient
|
||||
// regardless of the frequency and shape of the writes, and remember to Close
|
||||
// that Writer when done.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
obuf: make([]byte, obufLen),
|
||||
}
|
||||
}
|
||||
|
||||
// NewBufferedWriter returns a new Writer that compresses to w, using the
|
||||
// framing format described at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
//
|
||||
// The Writer returned buffers writes. Users must call Close to guarantee all
|
||||
// data has been forwarded to the underlying io.Writer. They may also call
|
||||
// Flush zero or more times before calling Close.
|
||||
func NewBufferedWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
ibuf: make([]byte, 0, maxBlockSize),
|
||||
obuf: make([]byte, obufLen),
|
||||
}
|
||||
}
|
||||
|
||||
// Writer is an io.Writer that can write Snappy-compressed bytes.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
err error
|
||||
|
||||
// ibuf is a buffer for the incoming (uncompressed) bytes.
|
||||
//
|
||||
// Its use is optional. For backwards compatibility, Writers created by the
|
||||
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
|
||||
// therefore do not need to be Flush'ed or Close'd.
|
||||
ibuf []byte
|
||||
|
||||
// obuf is a buffer for the outgoing (compressed) bytes.
|
||||
obuf []byte
|
||||
|
||||
// wroteStreamHeader is whether we have written the stream header.
|
||||
wroteStreamHeader bool
|
||||
}
|
||||
|
||||
// Reset discards the writer's state and switches the Snappy writer to write to
|
||||
// w. This permits reusing a Writer rather than allocating a new one.
|
||||
func (w *Writer) Reset(writer io.Writer) {
|
||||
w.w = writer
|
||||
w.err = nil
|
||||
if w.ibuf != nil {
|
||||
w.ibuf = w.ibuf[:0]
|
||||
}
|
||||
w.wroteStreamHeader = false
|
||||
}
|
||||
|
||||
// Write satisfies the io.Writer interface.
|
||||
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
|
||||
if w.ibuf == nil {
|
||||
// Do not buffer incoming bytes. This does not perform or compress well
|
||||
// if the caller of Writer.Write writes many small slices. This
|
||||
// behavior is therefore deprecated, but still supported for backwards
|
||||
// compatibility with code that doesn't explicitly Flush or Close.
|
||||
return w.write(p)
|
||||
}
|
||||
|
||||
// The remainder of this method is based on bufio.Writer.Write from the
|
||||
// standard library.
|
||||
|
||||
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
|
||||
var n int
|
||||
if len(w.ibuf) == 0 {
|
||||
// Large write, empty buffer.
|
||||
// Write directly from p to avoid copy.
|
||||
n, _ = w.write(p)
|
||||
} else {
|
||||
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
|
||||
w.ibuf = w.ibuf[:len(w.ibuf)+n]
|
||||
w.Flush()
|
||||
}
|
||||
nRet += n
|
||||
p = p[n:]
|
||||
}
|
||||
if w.err != nil {
|
||||
return nRet, w.err
|
||||
}
|
||||
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
|
||||
w.ibuf = w.ibuf[:len(w.ibuf)+n]
|
||||
nRet += n
|
||||
return nRet, nil
|
||||
}
|
||||
|
||||
func (w *Writer) write(p []byte) (nRet int, errRet error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
for len(p) > 0 {
|
||||
obufStart := len(magicChunk)
|
||||
if !w.wroteStreamHeader {
|
||||
w.wroteStreamHeader = true
|
||||
copy(w.obuf, magicChunk)
|
||||
obufStart = 0
|
||||
}
|
||||
|
||||
var uncompressed []byte
|
||||
if len(p) > maxBlockSize {
|
||||
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
|
||||
} else {
|
||||
uncompressed, p = p, nil
|
||||
}
|
||||
checksum := crc(uncompressed)
|
||||
|
||||
// Compress the buffer, discarding the result if the improvement
|
||||
// isn't at least 12.5%.
|
||||
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
|
||||
chunkType := uint8(chunkTypeCompressedData)
|
||||
chunkLen := 4 + len(compressed)
|
||||
obufEnd := obufHeaderLen + len(compressed)
|
||||
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
|
||||
chunkType = chunkTypeUncompressedData
|
||||
chunkLen = 4 + len(uncompressed)
|
||||
obufEnd = obufHeaderLen
|
||||
}
|
||||
|
||||
// Fill in the per-chunk header that comes before the body.
|
||||
w.obuf[len(magicChunk)+0] = chunkType
|
||||
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
|
||||
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
|
||||
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
|
||||
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
|
||||
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
|
||||
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
|
||||
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
|
||||
|
||||
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
|
||||
w.err = err
|
||||
return nRet, err
|
||||
}
|
||||
if chunkType == chunkTypeUncompressedData {
|
||||
if _, err := w.w.Write(uncompressed); err != nil {
|
||||
w.err = err
|
||||
return nRet, err
|
||||
}
|
||||
}
|
||||
nRet += len(uncompressed)
|
||||
}
|
||||
return nRet, nil
|
||||
}
|
||||
|
||||
// Flush flushes the Writer to its underlying io.Writer.
|
||||
func (w *Writer) Flush() error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
if len(w.ibuf) == 0 {
|
||||
return nil
|
||||
}
|
||||
w.write(w.ibuf)
|
||||
w.ibuf = w.ibuf[:0]
|
||||
return w.err
|
||||
}
|
||||
|
||||
// Close calls Flush and then closes the Writer.
|
||||
func (w *Writer) Close() error {
|
||||
w.Flush()
|
||||
ret := w.err
|
||||
if w.err == nil {
|
||||
w.err = errClosed
|
||||
}
|
||||
return ret
|
||||
}
|
||||
29
vendor/github.com/golang/snappy/encode_amd64.go
generated
vendored
29
vendor/github.com/golang/snappy/encode_amd64.go
generated
vendored
@ -1,29 +0,0 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
package snappy
|
||||
|
||||
// emitLiteral has the same semantics as in encode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func emitLiteral(dst, lit []byte) int
|
||||
|
||||
// emitCopy has the same semantics as in encode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func emitCopy(dst []byte, offset, length int) int
|
||||
|
||||
// extendMatch has the same semantics as in encode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func extendMatch(src []byte, i, j int) int
|
||||
|
||||
// encodeBlock has the same semantics as in encode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBlock(dst, src []byte) (d int)
|
||||
730
vendor/github.com/golang/snappy/encode_amd64.s
generated
vendored
730
vendor/github.com/golang/snappy/encode_amd64.s
generated
vendored
@ -1,730 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
|
||||
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
|
||||
// https://github.com/golang/snappy/issues/29
|
||||
//
|
||||
// As a workaround, the package was built with a known good assembler, and
|
||||
// those instructions were disassembled by "objdump -d" to yield the
|
||||
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||
// style comments, in AT&T asm syntax. Note that rsp here is a physical
|
||||
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
|
||||
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
|
||||
// fine on Go 1.6.
|
||||
|
||||
// The asm code generally follows the pure Go code in encode_other.go, except
|
||||
// where marked with a "!!!".
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// func emitLiteral(dst, lit []byte) int
|
||||
//
|
||||
// All local variables fit into registers. The register allocation:
|
||||
// - AX len(lit)
|
||||
// - BX n
|
||||
// - DX return value
|
||||
// - DI &dst[i]
|
||||
// - R10 &lit[0]
|
||||
//
|
||||
// The 24 bytes of stack space is to call runtime·memmove.
|
||||
//
|
||||
// The unusual register allocation of local variables, such as R10 for the
|
||||
// source pointer, matches the allocation used at the call site in encodeBlock,
|
||||
// which makes it easier to manually inline this function.
|
||||
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
|
||||
MOVQ dst_base+0(FP), DI
|
||||
MOVQ lit_base+24(FP), R10
|
||||
MOVQ lit_len+32(FP), AX
|
||||
MOVQ AX, DX
|
||||
MOVL AX, BX
|
||||
SUBL $1, BX
|
||||
|
||||
CMPL BX, $60
|
||||
JLT oneByte
|
||||
CMPL BX, $256
|
||||
JLT twoBytes
|
||||
|
||||
threeBytes:
|
||||
MOVB $0xf4, 0(DI)
|
||||
MOVW BX, 1(DI)
|
||||
ADDQ $3, DI
|
||||
ADDQ $3, DX
|
||||
JMP memmove
|
||||
|
||||
twoBytes:
|
||||
MOVB $0xf0, 0(DI)
|
||||
MOVB BX, 1(DI)
|
||||
ADDQ $2, DI
|
||||
ADDQ $2, DX
|
||||
JMP memmove
|
||||
|
||||
oneByte:
|
||||
SHLB $2, BX
|
||||
MOVB BX, 0(DI)
|
||||
ADDQ $1, DI
|
||||
ADDQ $1, DX
|
||||
|
||||
memmove:
|
||||
MOVQ DX, ret+48(FP)
|
||||
|
||||
// copy(dst[i:], lit)
|
||||
//
|
||||
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
|
||||
// DI, R10 and AX as arguments.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ R10, 8(SP)
|
||||
MOVQ AX, 16(SP)
|
||||
CALL runtime·memmove(SB)
|
||||
RET
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// func emitCopy(dst []byte, offset, length int) int
|
||||
//
|
||||
// All local variables fit into registers. The register allocation:
|
||||
// - AX length
|
||||
// - SI &dst[0]
|
||||
// - DI &dst[i]
|
||||
// - R11 offset
|
||||
//
|
||||
// The unusual register allocation of local variables, such as R11 for the
|
||||
// offset, matches the allocation used at the call site in encodeBlock, which
|
||||
// makes it easier to manually inline this function.
|
||||
TEXT ·emitCopy(SB), NOSPLIT, $0-48
|
||||
MOVQ dst_base+0(FP), DI
|
||||
MOVQ DI, SI
|
||||
MOVQ offset+24(FP), R11
|
||||
MOVQ length+32(FP), AX
|
||||
|
||||
loop0:
|
||||
// for length >= 68 { etc }
|
||||
CMPL AX, $68
|
||||
JLT step1
|
||||
|
||||
// Emit a length 64 copy, encoded as 3 bytes.
|
||||
MOVB $0xfe, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
SUBL $64, AX
|
||||
JMP loop0
|
||||
|
||||
step1:
|
||||
// if length > 64 { etc }
|
||||
CMPL AX, $64
|
||||
JLE step2
|
||||
|
||||
// Emit a length 60 copy, encoded as 3 bytes.
|
||||
MOVB $0xee, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
SUBL $60, AX
|
||||
|
||||
step2:
|
||||
// if length >= 12 || offset >= 2048 { goto step3 }
|
||||
CMPL AX, $12
|
||||
JGE step3
|
||||
CMPL R11, $2048
|
||||
JGE step3
|
||||
|
||||
// Emit the remaining copy, encoded as 2 bytes.
|
||||
MOVB R11, 1(DI)
|
||||
SHRL $8, R11
|
||||
SHLB $5, R11
|
||||
SUBB $4, AX
|
||||
SHLB $2, AX
|
||||
ORB AX, R11
|
||||
ORB $1, R11
|
||||
MOVB R11, 0(DI)
|
||||
ADDQ $2, DI
|
||||
|
||||
// Return the number of bytes written.
|
||||
SUBQ SI, DI
|
||||
MOVQ DI, ret+40(FP)
|
||||
RET
|
||||
|
||||
step3:
|
||||
// Emit the remaining copy, encoded as 3 bytes.
|
||||
SUBL $1, AX
|
||||
SHLB $2, AX
|
||||
ORB $2, AX
|
||||
MOVB AX, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
|
||||
// Return the number of bytes written.
|
||||
SUBQ SI, DI
|
||||
MOVQ DI, ret+40(FP)
|
||||
RET
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// func extendMatch(src []byte, i, j int) int
|
||||
//
|
||||
// All local variables fit into registers. The register allocation:
|
||||
// - DX &src[0]
|
||||
// - SI &src[j]
|
||||
// - R13 &src[len(src) - 8]
|
||||
// - R14 &src[len(src)]
|
||||
// - R15 &src[i]
|
||||
//
|
||||
// The unusual register allocation of local variables, such as R15 for a source
|
||||
// pointer, matches the allocation used at the call site in encodeBlock, which
|
||||
// makes it easier to manually inline this function.
|
||||
TEXT ·extendMatch(SB), NOSPLIT, $0-48
|
||||
MOVQ src_base+0(FP), DX
|
||||
MOVQ src_len+8(FP), R14
|
||||
MOVQ i+24(FP), R15
|
||||
MOVQ j+32(FP), SI
|
||||
ADDQ DX, R14
|
||||
ADDQ DX, R15
|
||||
ADDQ DX, SI
|
||||
MOVQ R14, R13
|
||||
SUBQ $8, R13
|
||||
|
||||
cmp8:
|
||||
// As long as we are 8 or more bytes before the end of src, we can load and
|
||||
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||
CMPQ SI, R13
|
||||
JA cmp1
|
||||
MOVQ (R15), AX
|
||||
MOVQ (SI), BX
|
||||
CMPQ AX, BX
|
||||
JNE bsf
|
||||
ADDQ $8, R15
|
||||
ADDQ $8, SI
|
||||
JMP cmp8
|
||||
|
||||
bsf:
|
||||
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||
// the index of the first byte that differs. The BSF instruction finds the
|
||||
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||
// the shift by 3 converts a bit index to a byte index.
|
||||
XORQ AX, BX
|
||||
BSFQ BX, BX
|
||||
SHRQ $3, BX
|
||||
ADDQ BX, SI
|
||||
|
||||
// Convert from &src[ret] to ret.
|
||||
SUBQ DX, SI
|
||||
MOVQ SI, ret+40(FP)
|
||||
RET
|
||||
|
||||
cmp1:
|
||||
// In src's tail, compare 1 byte at a time.
|
||||
CMPQ SI, R14
|
||||
JAE extendMatchEnd
|
||||
MOVB (R15), AX
|
||||
MOVB (SI), BX
|
||||
CMPB AX, BX
|
||||
JNE extendMatchEnd
|
||||
ADDQ $1, R15
|
||||
ADDQ $1, SI
|
||||
JMP cmp1
|
||||
|
||||
extendMatchEnd:
|
||||
// Convert from &src[ret] to ret.
|
||||
SUBQ DX, SI
|
||||
MOVQ SI, ret+40(FP)
|
||||
RET
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// func encodeBlock(dst, src []byte) (d int)
|
||||
//
|
||||
// All local variables fit into registers, other than "var table". The register
|
||||
// allocation:
|
||||
// - AX . .
|
||||
// - BX . .
|
||||
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
|
||||
// - DX 64 &src[0], tableSize
|
||||
// - SI 72 &src[s]
|
||||
// - DI 80 &dst[d]
|
||||
// - R9 88 sLimit
|
||||
// - R10 . &src[nextEmit]
|
||||
// - R11 96 prevHash, currHash, nextHash, offset
|
||||
// - R12 104 &src[base], skip
|
||||
// - R13 . &src[nextS], &src[len(src) - 8]
|
||||
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
|
||||
// - R15 112 candidate
|
||||
//
|
||||
// The second column (56, 64, etc) is the stack offset to spill the registers
|
||||
// when calling other functions. We could pack this slightly tighter, but it's
|
||||
// simpler to have a dedicated spill map independent of the function called.
|
||||
//
|
||||
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
|
||||
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
|
||||
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
|
||||
TEXT ·encodeBlock(SB), 0, $32888-56
|
||||
MOVQ dst_base+0(FP), DI
|
||||
MOVQ src_base+24(FP), SI
|
||||
MOVQ src_len+32(FP), R14
|
||||
|
||||
// shift, tableSize := uint32(32-8), 1<<8
|
||||
MOVQ $24, CX
|
||||
MOVQ $256, DX
|
||||
|
||||
calcShift:
|
||||
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
|
||||
// shift--
|
||||
// }
|
||||
CMPQ DX, $16384
|
||||
JGE varTable
|
||||
CMPQ DX, R14
|
||||
JGE varTable
|
||||
SUBQ $1, CX
|
||||
SHLQ $1, DX
|
||||
JMP calcShift
|
||||
|
||||
varTable:
|
||||
// var table [maxTableSize]uint16
|
||||
//
|
||||
// In the asm code, unlike the Go code, we can zero-initialize only the
|
||||
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
|
||||
// writes 16 bytes, so we can do only tableSize/8 writes instead of the
|
||||
// 2048 writes that would zero-initialize all of table's 32768 bytes.
|
||||
SHRQ $3, DX
|
||||
LEAQ table-32768(SP), BX
|
||||
PXOR X0, X0
|
||||
|
||||
memclr:
|
||||
MOVOU X0, 0(BX)
|
||||
ADDQ $16, BX
|
||||
SUBQ $1, DX
|
||||
JNZ memclr
|
||||
|
||||
// !!! DX = &src[0]
|
||||
MOVQ SI, DX
|
||||
|
||||
// sLimit := len(src) - inputMargin
|
||||
MOVQ R14, R9
|
||||
SUBQ $15, R9
|
||||
|
||||
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
|
||||
// change for the rest of the function.
|
||||
MOVQ CX, 56(SP)
|
||||
MOVQ DX, 64(SP)
|
||||
MOVQ R9, 88(SP)
|
||||
|
||||
// nextEmit := 0
|
||||
MOVQ DX, R10
|
||||
|
||||
// s := 1
|
||||
ADDQ $1, SI
|
||||
|
||||
// nextHash := hash(load32(src, s), shift)
|
||||
MOVL 0(SI), R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
outer:
|
||||
// for { etc }
|
||||
|
||||
// skip := 32
|
||||
MOVQ $32, R12
|
||||
|
||||
// nextS := s
|
||||
MOVQ SI, R13
|
||||
|
||||
// candidate := 0
|
||||
MOVQ $0, R15
|
||||
|
||||
inner0:
|
||||
// for { etc }
|
||||
|
||||
// s := nextS
|
||||
MOVQ R13, SI
|
||||
|
||||
// bytesBetweenHashLookups := skip >> 5
|
||||
MOVQ R12, R14
|
||||
SHRQ $5, R14
|
||||
|
||||
// nextS = s + bytesBetweenHashLookups
|
||||
ADDQ R14, R13
|
||||
|
||||
// skip += bytesBetweenHashLookups
|
||||
ADDQ R14, R12
|
||||
|
||||
// if nextS > sLimit { goto emitRemainder }
|
||||
MOVQ R13, AX
|
||||
SUBQ DX, AX
|
||||
CMPQ AX, R9
|
||||
JA emitRemainder
|
||||
|
||||
// candidate = int(table[nextHash])
|
||||
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
|
||||
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||
BYTE $0x4e
|
||||
BYTE $0x0f
|
||||
BYTE $0xb7
|
||||
BYTE $0x7c
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// table[nextHash] = uint16(s)
|
||||
MOVQ SI, AX
|
||||
SUBQ DX, AX
|
||||
|
||||
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||
BYTE $0x66
|
||||
BYTE $0x42
|
||||
BYTE $0x89
|
||||
BYTE $0x44
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// nextHash = hash(load32(src, nextS), shift)
|
||||
MOVL 0(R13), R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
// if load32(src, s) != load32(src, candidate) { continue } break
|
||||
MOVL 0(SI), AX
|
||||
MOVL (DX)(R15*1), BX
|
||||
CMPL AX, BX
|
||||
JNE inner0
|
||||
|
||||
fourByteMatch:
|
||||
// As per the encode_other.go code:
|
||||
//
|
||||
// A 4-byte match has been found. We'll later see etc.
|
||||
|
||||
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
|
||||
// on inputMargin in encode.go.
|
||||
MOVQ SI, AX
|
||||
SUBQ R10, AX
|
||||
CMPQ AX, $16
|
||||
JLE emitLiteralFastPath
|
||||
|
||||
// ----------------------------------------
|
||||
// Begin inline of the emitLiteral call.
|
||||
//
|
||||
// d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||
|
||||
MOVL AX, BX
|
||||
SUBL $1, BX
|
||||
|
||||
CMPL BX, $60
|
||||
JLT inlineEmitLiteralOneByte
|
||||
CMPL BX, $256
|
||||
JLT inlineEmitLiteralTwoBytes
|
||||
|
||||
inlineEmitLiteralThreeBytes:
|
||||
MOVB $0xf4, 0(DI)
|
||||
MOVW BX, 1(DI)
|
||||
ADDQ $3, DI
|
||||
JMP inlineEmitLiteralMemmove
|
||||
|
||||
inlineEmitLiteralTwoBytes:
|
||||
MOVB $0xf0, 0(DI)
|
||||
MOVB BX, 1(DI)
|
||||
ADDQ $2, DI
|
||||
JMP inlineEmitLiteralMemmove
|
||||
|
||||
inlineEmitLiteralOneByte:
|
||||
SHLB $2, BX
|
||||
MOVB BX, 0(DI)
|
||||
ADDQ $1, DI
|
||||
|
||||
inlineEmitLiteralMemmove:
|
||||
// Spill local variables (registers) onto the stack; call; unspill.
|
||||
//
|
||||
// copy(dst[i:], lit)
|
||||
//
|
||||
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
|
||||
// DI, R10 and AX as arguments.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ R10, 8(SP)
|
||||
MOVQ AX, 16(SP)
|
||||
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
|
||||
MOVQ SI, 72(SP)
|
||||
MOVQ DI, 80(SP)
|
||||
MOVQ R15, 112(SP)
|
||||
CALL runtime·memmove(SB)
|
||||
MOVQ 56(SP), CX
|
||||
MOVQ 64(SP), DX
|
||||
MOVQ 72(SP), SI
|
||||
MOVQ 80(SP), DI
|
||||
MOVQ 88(SP), R9
|
||||
MOVQ 112(SP), R15
|
||||
JMP inner1
|
||||
|
||||
inlineEmitLiteralEnd:
|
||||
// End inline of the emitLiteral call.
|
||||
// ----------------------------------------
|
||||
|
||||
emitLiteralFastPath:
|
||||
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
|
||||
MOVB AX, BX
|
||||
SUBB $1, BX
|
||||
SHLB $2, BX
|
||||
MOVB BX, (DI)
|
||||
ADDQ $1, DI
|
||||
|
||||
// !!! Implement the copy from lit to dst as a 16-byte load and store.
|
||||
// (Encode's documentation says that dst and src must not overlap.)
|
||||
//
|
||||
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
|
||||
// OK. Subsequent iterations will fix up the overrun.
|
||||
//
|
||||
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
|
||||
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||
// effective on architectures that are fussier about alignment.
|
||||
MOVOU 0(R10), X0
|
||||
MOVOU X0, 0(DI)
|
||||
ADDQ AX, DI
|
||||
|
||||
inner1:
|
||||
// for { etc }
|
||||
|
||||
// base := s
|
||||
MOVQ SI, R12
|
||||
|
||||
// !!! offset := base - candidate
|
||||
MOVQ R12, R11
|
||||
SUBQ R15, R11
|
||||
SUBQ DX, R11
|
||||
|
||||
// ----------------------------------------
|
||||
// Begin inline of the extendMatch call.
|
||||
//
|
||||
// s = extendMatch(src, candidate+4, s+4)
|
||||
|
||||
// !!! R14 = &src[len(src)]
|
||||
MOVQ src_len+32(FP), R14
|
||||
ADDQ DX, R14
|
||||
|
||||
// !!! R13 = &src[len(src) - 8]
|
||||
MOVQ R14, R13
|
||||
SUBQ $8, R13
|
||||
|
||||
// !!! R15 = &src[candidate + 4]
|
||||
ADDQ $4, R15
|
||||
ADDQ DX, R15
|
||||
|
||||
// !!! s += 4
|
||||
ADDQ $4, SI
|
||||
|
||||
inlineExtendMatchCmp8:
|
||||
// As long as we are 8 or more bytes before the end of src, we can load and
|
||||
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||
CMPQ SI, R13
|
||||
JA inlineExtendMatchCmp1
|
||||
MOVQ (R15), AX
|
||||
MOVQ (SI), BX
|
||||
CMPQ AX, BX
|
||||
JNE inlineExtendMatchBSF
|
||||
ADDQ $8, R15
|
||||
ADDQ $8, SI
|
||||
JMP inlineExtendMatchCmp8
|
||||
|
||||
inlineExtendMatchBSF:
|
||||
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||
// the index of the first byte that differs. The BSF instruction finds the
|
||||
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||
// the shift by 3 converts a bit index to a byte index.
|
||||
XORQ AX, BX
|
||||
BSFQ BX, BX
|
||||
SHRQ $3, BX
|
||||
ADDQ BX, SI
|
||||
JMP inlineExtendMatchEnd
|
||||
|
||||
inlineExtendMatchCmp1:
|
||||
// In src's tail, compare 1 byte at a time.
|
||||
CMPQ SI, R14
|
||||
JAE inlineExtendMatchEnd
|
||||
MOVB (R15), AX
|
||||
MOVB (SI), BX
|
||||
CMPB AX, BX
|
||||
JNE inlineExtendMatchEnd
|
||||
ADDQ $1, R15
|
||||
ADDQ $1, SI
|
||||
JMP inlineExtendMatchCmp1
|
||||
|
||||
inlineExtendMatchEnd:
|
||||
// End inline of the extendMatch call.
|
||||
// ----------------------------------------
|
||||
|
||||
// ----------------------------------------
|
||||
// Begin inline of the emitCopy call.
|
||||
//
|
||||
// d += emitCopy(dst[d:], base-candidate, s-base)
|
||||
|
||||
// !!! length := s - base
|
||||
MOVQ SI, AX
|
||||
SUBQ R12, AX
|
||||
|
||||
inlineEmitCopyLoop0:
|
||||
// for length >= 68 { etc }
|
||||
CMPL AX, $68
|
||||
JLT inlineEmitCopyStep1
|
||||
|
||||
// Emit a length 64 copy, encoded as 3 bytes.
|
||||
MOVB $0xfe, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
SUBL $64, AX
|
||||
JMP inlineEmitCopyLoop0
|
||||
|
||||
inlineEmitCopyStep1:
|
||||
// if length > 64 { etc }
|
||||
CMPL AX, $64
|
||||
JLE inlineEmitCopyStep2
|
||||
|
||||
// Emit a length 60 copy, encoded as 3 bytes.
|
||||
MOVB $0xee, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
SUBL $60, AX
|
||||
|
||||
inlineEmitCopyStep2:
|
||||
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
|
||||
CMPL AX, $12
|
||||
JGE inlineEmitCopyStep3
|
||||
CMPL R11, $2048
|
||||
JGE inlineEmitCopyStep3
|
||||
|
||||
// Emit the remaining copy, encoded as 2 bytes.
|
||||
MOVB R11, 1(DI)
|
||||
SHRL $8, R11
|
||||
SHLB $5, R11
|
||||
SUBB $4, AX
|
||||
SHLB $2, AX
|
||||
ORB AX, R11
|
||||
ORB $1, R11
|
||||
MOVB R11, 0(DI)
|
||||
ADDQ $2, DI
|
||||
JMP inlineEmitCopyEnd
|
||||
|
||||
inlineEmitCopyStep3:
|
||||
// Emit the remaining copy, encoded as 3 bytes.
|
||||
SUBL $1, AX
|
||||
SHLB $2, AX
|
||||
ORB $2, AX
|
||||
MOVB AX, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
|
||||
inlineEmitCopyEnd:
|
||||
// End inline of the emitCopy call.
|
||||
// ----------------------------------------
|
||||
|
||||
// nextEmit = s
|
||||
MOVQ SI, R10
|
||||
|
||||
// if s >= sLimit { goto emitRemainder }
|
||||
MOVQ SI, AX
|
||||
SUBQ DX, AX
|
||||
CMPQ AX, R9
|
||||
JAE emitRemainder
|
||||
|
||||
// As per the encode_other.go code:
|
||||
//
|
||||
// We could immediately etc.
|
||||
|
||||
// x := load64(src, s-1)
|
||||
MOVQ -1(SI), R14
|
||||
|
||||
// prevHash := hash(uint32(x>>0), shift)
|
||||
MOVL R14, R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
// table[prevHash] = uint16(s-1)
|
||||
MOVQ SI, AX
|
||||
SUBQ DX, AX
|
||||
SUBQ $1, AX
|
||||
|
||||
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||
BYTE $0x66
|
||||
BYTE $0x42
|
||||
BYTE $0x89
|
||||
BYTE $0x44
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// currHash := hash(uint32(x>>8), shift)
|
||||
SHRQ $8, R14
|
||||
MOVL R14, R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
// candidate = int(table[currHash])
|
||||
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
|
||||
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||
BYTE $0x4e
|
||||
BYTE $0x0f
|
||||
BYTE $0xb7
|
||||
BYTE $0x7c
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// table[currHash] = uint16(s)
|
||||
ADDQ $1, AX
|
||||
|
||||
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||
BYTE $0x66
|
||||
BYTE $0x42
|
||||
BYTE $0x89
|
||||
BYTE $0x44
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// if uint32(x>>8) == load32(src, candidate) { continue }
|
||||
MOVL (DX)(R15*1), BX
|
||||
CMPL R14, BX
|
||||
JEQ inner1
|
||||
|
||||
// nextHash = hash(uint32(x>>16), shift)
|
||||
SHRQ $8, R14
|
||||
MOVL R14, R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
// s++
|
||||
ADDQ $1, SI
|
||||
|
||||
// break out of the inner1 for loop, i.e. continue the outer loop.
|
||||
JMP outer
|
||||
|
||||
emitRemainder:
|
||||
// if nextEmit < len(src) { etc }
|
||||
MOVQ src_len+32(FP), AX
|
||||
ADDQ DX, AX
|
||||
CMPQ R10, AX
|
||||
JEQ encodeBlockEnd
|
||||
|
||||
// d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
//
|
||||
// Push args.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||
MOVQ R10, 24(SP)
|
||||
SUBQ R10, AX
|
||||
MOVQ AX, 32(SP)
|
||||
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||
|
||||
// Spill local variables (registers) onto the stack; call; unspill.
|
||||
MOVQ DI, 80(SP)
|
||||
CALL ·emitLiteral(SB)
|
||||
MOVQ 80(SP), DI
|
||||
|
||||
// Finish the "d +=" part of "d += emitLiteral(etc)".
|
||||
ADDQ 48(SP), DI
|
||||
|
||||
encodeBlockEnd:
|
||||
MOVQ dst_base+0(FP), AX
|
||||
SUBQ AX, DI
|
||||
MOVQ DI, d+48(FP)
|
||||
RET
|
||||
238
vendor/github.com/golang/snappy/encode_other.go
generated
vendored
238
vendor/github.com/golang/snappy/encode_other.go
generated
vendored
@ -1,238 +0,0 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
package snappy
|
||||
|
||||
func load32(b []byte, i int) uint32 {
|
||||
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func load64(b []byte, i int) uint64 {
|
||||
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
}
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= len(lit) && len(lit) <= 65536
|
||||
func emitLiteral(dst, lit []byte) int {
|
||||
i, n := 0, uint(len(lit)-1)
|
||||
switch {
|
||||
case n < 60:
|
||||
dst[0] = uint8(n)<<2 | tagLiteral
|
||||
i = 1
|
||||
case n < 1<<8:
|
||||
dst[0] = 60<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
i = 2
|
||||
default:
|
||||
dst[0] = 61<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
i = 3
|
||||
}
|
||||
return i + copy(dst[i:], lit)
|
||||
}
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= offset && offset <= 65535
|
||||
// 4 <= length && length <= 65535
|
||||
func emitCopy(dst []byte, offset, length int) int {
|
||||
i := 0
|
||||
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
|
||||
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
|
||||
// length emitted down below is is a little lower (at 60 = 64 - 4), because
|
||||
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
|
||||
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
|
||||
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
|
||||
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
|
||||
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
|
||||
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
|
||||
for length >= 68 {
|
||||
// Emit a length 64 copy, encoded as 3 bytes.
|
||||
dst[i+0] = 63<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
i += 3
|
||||
length -= 64
|
||||
}
|
||||
if length > 64 {
|
||||
// Emit a length 60 copy, encoded as 3 bytes.
|
||||
dst[i+0] = 59<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
i += 3
|
||||
length -= 60
|
||||
}
|
||||
if length >= 12 || offset >= 2048 {
|
||||
// Emit the remaining copy, encoded as 3 bytes.
|
||||
dst[i+0] = uint8(length-1)<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
return i + 3
|
||||
}
|
||||
// Emit the remaining copy, encoded as 2 bytes.
|
||||
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
|
||||
dst[i+1] = uint8(offset)
|
||||
return i + 2
|
||||
}
|
||||
|
||||
// extendMatch returns the largest k such that k <= len(src) and that
|
||||
// src[i:i+k-j] and src[j:k] have the same contents.
|
||||
//
|
||||
// It assumes that:
|
||||
// 0 <= i && i < j && j <= len(src)
|
||||
func extendMatch(src []byte, i, j int) int {
|
||||
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
|
||||
}
|
||||
return j
|
||||
}
|
||||
|
||||
func hash(u, shift uint32) uint32 {
|
||||
return (u * 0x1e35a7bd) >> shift
|
||||
}
|
||||
|
||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlock(dst, src []byte) (d int) {
|
||||
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
||||
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
|
||||
const (
|
||||
maxTableSize = 1 << 14
|
||||
// tableMask is redundant, but helps the compiler eliminate bounds
|
||||
// checks.
|
||||
tableMask = maxTableSize - 1
|
||||
)
|
||||
shift := uint32(32 - 8)
|
||||
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
|
||||
shift--
|
||||
}
|
||||
// In Go, all array elements are zero-initialized, so there is no advantage
|
||||
// to a smaller tableSize per se. However, it matches the C++ algorithm,
|
||||
// and in the asm versions of this code, we can get away with zeroing only
|
||||
// the first tableSize elements.
|
||||
var table [maxTableSize]uint16
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
nextHash := hash(load32(src, s), shift)
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := 32
|
||||
|
||||
nextS := s
|
||||
candidate := 0
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidate = int(table[nextHash&tableMask])
|
||||
table[nextHash&tableMask] = uint16(s)
|
||||
nextHash = hash(load32(src, nextS), shift)
|
||||
if load32(src, s) == load32(src, candidate) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
base := s
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
// This is an inlined version of:
|
||||
// s = extendMatch(src, candidate+4, s+4)
|
||||
s += 4
|
||||
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
|
||||
}
|
||||
|
||||
d += emitCopy(dst[d:], base-candidate, s-base)
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load64(src, s-1)
|
||||
prevHash := hash(uint32(x>>0), shift)
|
||||
table[prevHash&tableMask] = uint16(s - 1)
|
||||
currHash := hash(uint32(x>>8), shift)
|
||||
candidate = int(table[currHash&tableMask])
|
||||
table[currHash&tableMask] = uint16(s)
|
||||
if uint32(x>>8) != load32(src, candidate) {
|
||||
nextHash = hash(uint32(x>>16), shift)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
||||
87
vendor/github.com/golang/snappy/snappy.go
generated
vendored
87
vendor/github.com/golang/snappy/snappy.go
generated
vendored
@ -1,87 +0,0 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package snappy implements the snappy block-based compression format.
|
||||
// It aims for very high speeds and reasonable compression.
|
||||
//
|
||||
// The C++ snappy implementation is at https://github.com/google/snappy
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"hash/crc32"
|
||||
)
|
||||
|
||||
/*
|
||||
Each encoded block begins with the varint-encoded length of the decoded data,
|
||||
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
||||
first byte of each chunk is broken into its 2 least and 6 most significant bits
|
||||
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
|
||||
Zero means a literal tag. All other values mean a copy tag.
|
||||
|
||||
For literal tags:
|
||||
- If m < 60, the next 1 + m bytes are literal bytes.
|
||||
- Otherwise, let n be the little-endian unsigned integer denoted by the next
|
||||
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
|
||||
|
||||
For copy tags, length bytes are copied from offset bytes ago, in the style of
|
||||
Lempel-Ziv compression algorithms. In particular:
|
||||
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
|
||||
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
|
||||
of the offset. The next byte is bits 0-7 of the offset.
|
||||
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
|
||||
The length is 1 + m. The offset is the little-endian unsigned integer
|
||||
denoted by the next 2 bytes.
|
||||
- For l == 3, this tag is a legacy format that is no longer issued by most
|
||||
encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
|
||||
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
|
||||
integer denoted by the next 4 bytes.
|
||||
*/
|
||||
const (
|
||||
tagLiteral = 0x00
|
||||
tagCopy1 = 0x01
|
||||
tagCopy2 = 0x02
|
||||
tagCopy4 = 0x03
|
||||
)
|
||||
|
||||
const (
|
||||
checksumSize = 4
|
||||
chunkHeaderSize = 4
|
||||
magicChunk = "\xff\x06\x00\x00" + magicBody
|
||||
magicBody = "sNaPpY"
|
||||
|
||||
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
|
||||
// part of the wire format per se, but some parts of the encoder assume
|
||||
// that an offset fits into a uint16.
|
||||
//
|
||||
// Also, for the framing format (Writer type instead of Encode function),
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt says
|
||||
// that "the uncompressed data in a chunk must be no longer than 65536
|
||||
// bytes".
|
||||
maxBlockSize = 65536
|
||||
|
||||
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
|
||||
// hard coded to be a const instead of a variable, so that obufLen can also
|
||||
// be a const. Their equivalence is confirmed by
|
||||
// TestMaxEncodedLenOfMaxBlockSize.
|
||||
maxEncodedLenOfMaxBlockSize = 76490
|
||||
|
||||
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
|
||||
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
|
||||
)
|
||||
|
||||
const (
|
||||
chunkTypeCompressedData = 0x00
|
||||
chunkTypeUncompressedData = 0x01
|
||||
chunkTypePadding = 0xfe
|
||||
chunkTypeStreamIdentifier = 0xff
|
||||
)
|
||||
|
||||
var crcTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// crc implements the checksum specified in section 3 of
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func crc(b []byte) uint32 {
|
||||
c := crc32.Update(0, crcTable, b)
|
||||
return uint32(c>>15|c<<17) + 0xa282ead8
|
||||
}
|
||||
23
vendor/github.com/hashicorp/yamux/.gitignore
generated
vendored
23
vendor/github.com/hashicorp/yamux/.gitignore
generated
vendored
@ -1,23 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
362
vendor/github.com/hashicorp/yamux/LICENSE
generated
vendored
362
vendor/github.com/hashicorp/yamux/LICENSE
generated
vendored
@ -1,362 +0,0 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
86
vendor/github.com/hashicorp/yamux/README.md
generated
vendored
86
vendor/github.com/hashicorp/yamux/README.md
generated
vendored
@ -1,86 +0,0 @@
|
||||
# Yamux
|
||||
|
||||
Yamux (Yet another Multiplexer) is a multiplexing library for Golang.
|
||||
It relies on an underlying connection to provide reliability
|
||||
and ordering, such as TCP or Unix domain sockets, and provides
|
||||
stream-oriented multiplexing. It is inspired by SPDY but is not
|
||||
interoperable with it.
|
||||
|
||||
Yamux features include:
|
||||
|
||||
* Bi-directional streams
|
||||
* Streams can be opened by either client or server
|
||||
* Useful for NAT traversal
|
||||
* Server-side push support
|
||||
* Flow control
|
||||
* Avoid starvation
|
||||
* Back-pressure to prevent overwhelming a receiver
|
||||
* Keep Alives
|
||||
* Enables persistent connections over a load balancer
|
||||
* Efficient
|
||||
* Enables thousands of logical streams with low overhead
|
||||
|
||||
## Documentation
|
||||
|
||||
For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux).
|
||||
|
||||
## Specification
|
||||
|
||||
The full specification for Yamux is provided in the `spec.md` file.
|
||||
It can be used as a guide to implementors of interoperable libraries.
|
||||
|
||||
## Usage
|
||||
|
||||
Using Yamux is remarkably simple:
|
||||
|
||||
```go
|
||||
|
||||
func client() {
|
||||
// Get a TCP connection
|
||||
conn, err := net.Dial(...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Setup client side of yamux
|
||||
session, err := yamux.Client(conn, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Open a new stream
|
||||
stream, err := session.Open()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Stream implements net.Conn
|
||||
stream.Write([]byte("ping"))
|
||||
}
|
||||
|
||||
func server() {
|
||||
// Accept a TCP connection
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Setup server side of yamux
|
||||
session, err := yamux.Server(conn, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Accept a stream
|
||||
stream, err := session.Accept()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Listen for a message
|
||||
buf := make([]byte, 4)
|
||||
stream.Read(buf)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
60
vendor/github.com/hashicorp/yamux/addr.go
generated
vendored
60
vendor/github.com/hashicorp/yamux/addr.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
package yamux
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// hasAddr is used to get the address from the underlying connection
|
||||
type hasAddr interface {
|
||||
LocalAddr() net.Addr
|
||||
RemoteAddr() net.Addr
|
||||
}
|
||||
|
||||
// yamuxAddr is used when we cannot get the underlying address
|
||||
type yamuxAddr struct {
|
||||
Addr string
|
||||
}
|
||||
|
||||
func (*yamuxAddr) Network() string {
|
||||
return "yamux"
|
||||
}
|
||||
|
||||
func (y *yamuxAddr) String() string {
|
||||
return fmt.Sprintf("yamux:%s", y.Addr)
|
||||
}
|
||||
|
||||
// Addr is used to get the address of the listener.
|
||||
func (s *Session) Addr() net.Addr {
|
||||
return s.LocalAddr()
|
||||
}
|
||||
|
||||
// LocalAddr is used to get the local address of the
|
||||
// underlying connection.
|
||||
func (s *Session) LocalAddr() net.Addr {
|
||||
addr, ok := s.conn.(hasAddr)
|
||||
if !ok {
|
||||
return &yamuxAddr{"local"}
|
||||
}
|
||||
return addr.LocalAddr()
|
||||
}
|
||||
|
||||
// RemoteAddr is used to get the address of remote end
|
||||
// of the underlying connection
|
||||
func (s *Session) RemoteAddr() net.Addr {
|
||||
addr, ok := s.conn.(hasAddr)
|
||||
if !ok {
|
||||
return &yamuxAddr{"remote"}
|
||||
}
|
||||
return addr.RemoteAddr()
|
||||
}
|
||||
|
||||
// LocalAddr returns the local address
|
||||
func (s *Stream) LocalAddr() net.Addr {
|
||||
return s.session.LocalAddr()
|
||||
}
|
||||
|
||||
// LocalAddr returns the remote address
|
||||
func (s *Stream) RemoteAddr() net.Addr {
|
||||
return s.session.RemoteAddr()
|
||||
}
|
||||
157
vendor/github.com/hashicorp/yamux/const.go
generated
vendored
157
vendor/github.com/hashicorp/yamux/const.go
generated
vendored
@ -1,157 +0,0 @@
|
||||
package yamux
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidVersion means we received a frame with an
|
||||
// invalid version
|
||||
ErrInvalidVersion = fmt.Errorf("invalid protocol version")
|
||||
|
||||
// ErrInvalidMsgType means we received a frame with an
|
||||
// invalid message type
|
||||
ErrInvalidMsgType = fmt.Errorf("invalid msg type")
|
||||
|
||||
// ErrSessionShutdown is used if there is a shutdown during
|
||||
// an operation
|
||||
ErrSessionShutdown = fmt.Errorf("session shutdown")
|
||||
|
||||
// ErrStreamsExhausted is returned if we have no more
|
||||
// stream ids to issue
|
||||
ErrStreamsExhausted = fmt.Errorf("streams exhausted")
|
||||
|
||||
// ErrDuplicateStream is used if a duplicate stream is
|
||||
// opened inbound
|
||||
ErrDuplicateStream = fmt.Errorf("duplicate stream initiated")
|
||||
|
||||
// ErrReceiveWindowExceeded indicates the window was exceeded
|
||||
ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded")
|
||||
|
||||
// ErrTimeout is used when we reach an IO deadline
|
||||
ErrTimeout = fmt.Errorf("i/o deadline reached")
|
||||
|
||||
// ErrStreamClosed is returned when using a closed stream
|
||||
ErrStreamClosed = fmt.Errorf("stream closed")
|
||||
|
||||
// ErrUnexpectedFlag is set when we get an unexpected flag
|
||||
ErrUnexpectedFlag = fmt.Errorf("unexpected flag")
|
||||
|
||||
// ErrRemoteGoAway is used when we get a go away from the other side
|
||||
ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections")
|
||||
|
||||
// ErrConnectionReset is sent if a stream is reset. This can happen
|
||||
// if the backlog is exceeded, or if there was a remote GoAway.
|
||||
ErrConnectionReset = fmt.Errorf("connection reset")
|
||||
|
||||
// ErrConnectionWriteTimeout indicates that we hit the "safety valve"
|
||||
// timeout writing to the underlying stream connection.
|
||||
ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout")
|
||||
|
||||
// ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close
|
||||
ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout")
|
||||
)
|
||||
|
||||
const (
|
||||
// protoVersion is the only version we support
|
||||
protoVersion uint8 = 0
|
||||
)
|
||||
|
||||
const (
|
||||
// Data is used for data frames. They are followed
|
||||
// by length bytes worth of payload.
|
||||
typeData uint8 = iota
|
||||
|
||||
// WindowUpdate is used to change the window of
|
||||
// a given stream. The length indicates the delta
|
||||
// update to the window.
|
||||
typeWindowUpdate
|
||||
|
||||
// Ping is sent as a keep-alive or to measure
|
||||
// the RTT. The StreamID and Length value are echoed
|
||||
// back in the response.
|
||||
typePing
|
||||
|
||||
// GoAway is sent to terminate a session. The StreamID
|
||||
// should be 0 and the length is an error code.
|
||||
typeGoAway
|
||||
)
|
||||
|
||||
const (
|
||||
// SYN is sent to signal a new stream. May
|
||||
// be sent with a data payload
|
||||
flagSYN uint16 = 1 << iota
|
||||
|
||||
// ACK is sent to acknowledge a new stream. May
|
||||
// be sent with a data payload
|
||||
flagACK
|
||||
|
||||
// FIN is sent to half-close the given stream.
|
||||
// May be sent with a data payload.
|
||||
flagFIN
|
||||
|
||||
// RST is used to hard close a given stream.
|
||||
flagRST
|
||||
)
|
||||
|
||||
const (
|
||||
// initialStreamWindow is the initial stream window size
|
||||
initialStreamWindow uint32 = 256 * 1024
|
||||
)
|
||||
|
||||
const (
|
||||
// goAwayNormal is sent on a normal termination
|
||||
goAwayNormal uint32 = iota
|
||||
|
||||
// goAwayProtoErr sent on a protocol error
|
||||
goAwayProtoErr
|
||||
|
||||
// goAwayInternalErr sent on an internal error
|
||||
goAwayInternalErr
|
||||
)
|
||||
|
||||
const (
|
||||
sizeOfVersion = 1
|
||||
sizeOfType = 1
|
||||
sizeOfFlags = 2
|
||||
sizeOfStreamID = 4
|
||||
sizeOfLength = 4
|
||||
headerSize = sizeOfVersion + sizeOfType + sizeOfFlags +
|
||||
sizeOfStreamID + sizeOfLength
|
||||
)
|
||||
|
||||
type header []byte
|
||||
|
||||
func (h header) Version() uint8 {
|
||||
return h[0]
|
||||
}
|
||||
|
||||
func (h header) MsgType() uint8 {
|
||||
return h[1]
|
||||
}
|
||||
|
||||
func (h header) Flags() uint16 {
|
||||
return binary.BigEndian.Uint16(h[2:4])
|
||||
}
|
||||
|
||||
func (h header) StreamID() uint32 {
|
||||
return binary.BigEndian.Uint32(h[4:8])
|
||||
}
|
||||
|
||||
func (h header) Length() uint32 {
|
||||
return binary.BigEndian.Uint32(h[8:12])
|
||||
}
|
||||
|
||||
func (h header) String() string {
|
||||
return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d",
|
||||
h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length())
|
||||
}
|
||||
|
||||
func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) {
|
||||
h[0] = protoVersion
|
||||
h[1] = msgType
|
||||
binary.BigEndian.PutUint16(h[2:4], flags)
|
||||
binary.BigEndian.PutUint32(h[4:8], streamID)
|
||||
binary.BigEndian.PutUint32(h[8:12], length)
|
||||
}
|
||||
87
vendor/github.com/hashicorp/yamux/mux.go
generated
vendored
87
vendor/github.com/hashicorp/yamux/mux.go
generated
vendored
@ -1,87 +0,0 @@
|
||||
package yamux
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config is used to tune the Yamux session
|
||||
type Config struct {
|
||||
// AcceptBacklog is used to limit how many streams may be
|
||||
// waiting an accept.
|
||||
AcceptBacklog int
|
||||
|
||||
// EnableKeepalive is used to do a period keep alive
|
||||
// messages using a ping.
|
||||
EnableKeepAlive bool
|
||||
|
||||
// KeepAliveInterval is how often to perform the keep alive
|
||||
KeepAliveInterval time.Duration
|
||||
|
||||
// ConnectionWriteTimeout is meant to be a "safety valve" timeout after
|
||||
// we which will suspect a problem with the underlying connection and
|
||||
// close it. This is only applied to writes, where's there's generally
|
||||
// an expectation that things will move along quickly.
|
||||
ConnectionWriteTimeout time.Duration
|
||||
|
||||
// MaxStreamWindowSize is used to control the maximum
|
||||
// window size that we allow for a stream.
|
||||
MaxStreamWindowSize uint32
|
||||
|
||||
// LogOutput is used to control the log destination
|
||||
LogOutput io.Writer
|
||||
}
|
||||
|
||||
// DefaultConfig is used to return a default configuration
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
AcceptBacklog: 256,
|
||||
EnableKeepAlive: true,
|
||||
KeepAliveInterval: 30 * time.Second,
|
||||
ConnectionWriteTimeout: 10 * time.Second,
|
||||
MaxStreamWindowSize: initialStreamWindow,
|
||||
LogOutput: os.Stderr,
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyConfig is used to verify the sanity of configuration
|
||||
func VerifyConfig(config *Config) error {
|
||||
if config.AcceptBacklog <= 0 {
|
||||
return fmt.Errorf("backlog must be positive")
|
||||
}
|
||||
if config.KeepAliveInterval == 0 {
|
||||
return fmt.Errorf("keep-alive interval must be positive")
|
||||
}
|
||||
if config.MaxStreamWindowSize < initialStreamWindow {
|
||||
return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Server is used to initialize a new server-side connection.
|
||||
// There must be at most one server-side connection. If a nil config is
|
||||
// provided, the DefaultConfiguration will be used.
|
||||
func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) {
|
||||
if config == nil {
|
||||
config = DefaultConfig()
|
||||
}
|
||||
if err := VerifyConfig(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newSession(config, conn, false), nil
|
||||
}
|
||||
|
||||
// Client is used to initialize a new client-side connection.
|
||||
// There must be at most one client-side connection.
|
||||
func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) {
|
||||
if config == nil {
|
||||
config = DefaultConfig()
|
||||
}
|
||||
|
||||
if err := VerifyConfig(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newSession(config, conn, true), nil
|
||||
}
|
||||
648
vendor/github.com/hashicorp/yamux/session.go
generated
vendored
648
vendor/github.com/hashicorp/yamux/session.go
generated
vendored
@ -1,648 +0,0 @@
|
||||
package yamux
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Session is used to wrap a reliable ordered connection and to
|
||||
// multiplex it into multiple streams.
|
||||
type Session struct {
|
||||
// remoteGoAway indicates the remote side does
|
||||
// not want futher connections. Must be first for alignment.
|
||||
remoteGoAway int32
|
||||
|
||||
// localGoAway indicates that we should stop
|
||||
// accepting futher connections. Must be first for alignment.
|
||||
localGoAway int32
|
||||
|
||||
// nextStreamID is the next stream we should
|
||||
// send. This depends if we are a client/server.
|
||||
nextStreamID uint32
|
||||
|
||||
// config holds our configuration
|
||||
config *Config
|
||||
|
||||
// logger is used for our logs
|
||||
logger *log.Logger
|
||||
|
||||
// conn is the underlying connection
|
||||
conn io.ReadWriteCloser
|
||||
|
||||
// bufRead is a buffered reader
|
||||
bufRead *bufio.Reader
|
||||
|
||||
// pings is used to track inflight pings
|
||||
pings map[uint32]chan struct{}
|
||||
pingID uint32
|
||||
pingLock sync.Mutex
|
||||
|
||||
// streams maps a stream id to a stream, and inflight has an entry
|
||||
// for any outgoing stream that has not yet been established. Both are
|
||||
// protected by streamLock.
|
||||
streams map[uint32]*Stream
|
||||
inflight map[uint32]struct{}
|
||||
streamLock sync.Mutex
|
||||
|
||||
// synCh acts like a semaphore. It is sized to the AcceptBacklog which
|
||||
// is assumed to be symmetric between the client and server. This allows
|
||||
// the client to avoid exceeding the backlog and instead blocks the open.
|
||||
synCh chan struct{}
|
||||
|
||||
// acceptCh is used to pass ready streams to the client
|
||||
acceptCh chan *Stream
|
||||
|
||||
// sendCh is used to mark a stream as ready to send,
|
||||
// or to send a header out directly.
|
||||
sendCh chan sendReady
|
||||
|
||||
// recvDoneCh is closed when recv() exits to avoid a race
|
||||
// between stream registration and stream shutdown
|
||||
recvDoneCh chan struct{}
|
||||
|
||||
// shutdown is used to safely close a session
|
||||
shutdown bool
|
||||
shutdownErr error
|
||||
shutdownCh chan struct{}
|
||||
shutdownLock sync.Mutex
|
||||
}
|
||||
|
||||
// sendReady is used to either mark a stream as ready
|
||||
// or to directly send a header
|
||||
type sendReady struct {
|
||||
Hdr []byte
|
||||
Body io.Reader
|
||||
Err chan error
|
||||
}
|
||||
|
||||
// newSession is used to construct a new session
|
||||
func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
|
||||
s := &Session{
|
||||
config: config,
|
||||
logger: log.New(config.LogOutput, "", log.LstdFlags),
|
||||
conn: conn,
|
||||
bufRead: bufio.NewReader(conn),
|
||||
pings: make(map[uint32]chan struct{}),
|
||||
streams: make(map[uint32]*Stream),
|
||||
inflight: make(map[uint32]struct{}),
|
||||
synCh: make(chan struct{}, config.AcceptBacklog),
|
||||
acceptCh: make(chan *Stream, config.AcceptBacklog),
|
||||
sendCh: make(chan sendReady, 64),
|
||||
recvDoneCh: make(chan struct{}),
|
||||
shutdownCh: make(chan struct{}),
|
||||
}
|
||||
if client {
|
||||
s.nextStreamID = 1
|
||||
} else {
|
||||
s.nextStreamID = 2
|
||||
}
|
||||
go s.recv()
|
||||
go s.send()
|
||||
if config.EnableKeepAlive {
|
||||
go s.keepalive()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// IsClosed does a safe check to see if we have shutdown
|
||||
func (s *Session) IsClosed() bool {
|
||||
select {
|
||||
case <-s.shutdownCh:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// CloseChan returns a read-only channel which is closed as
|
||||
// soon as the session is closed.
|
||||
func (s *Session) CloseChan() <-chan struct{} {
|
||||
return s.shutdownCh
|
||||
}
|
||||
|
||||
// NumStreams returns the number of currently open streams
|
||||
func (s *Session) NumStreams() int {
|
||||
s.streamLock.Lock()
|
||||
num := len(s.streams)
|
||||
s.streamLock.Unlock()
|
||||
return num
|
||||
}
|
||||
|
||||
// Open is used to create a new stream as a net.Conn
|
||||
func (s *Session) Open() (net.Conn, error) {
|
||||
conn, err := s.OpenStream()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// OpenStream is used to create a new stream
|
||||
func (s *Session) OpenStream() (*Stream, error) {
|
||||
if s.IsClosed() {
|
||||
return nil, ErrSessionShutdown
|
||||
}
|
||||
if atomic.LoadInt32(&s.remoteGoAway) == 1 {
|
||||
return nil, ErrRemoteGoAway
|
||||
}
|
||||
|
||||
// Block if we have too many inflight SYNs
|
||||
select {
|
||||
case s.synCh <- struct{}{}:
|
||||
case <-s.shutdownCh:
|
||||
return nil, ErrSessionShutdown
|
||||
}
|
||||
|
||||
GET_ID:
|
||||
// Get an ID, and check for stream exhaustion
|
||||
id := atomic.LoadUint32(&s.nextStreamID)
|
||||
if id >= math.MaxUint32-1 {
|
||||
return nil, ErrStreamsExhausted
|
||||
}
|
||||
if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) {
|
||||
goto GET_ID
|
||||
}
|
||||
|
||||
// Register the stream
|
||||
stream := newStream(s, id, streamInit)
|
||||
s.streamLock.Lock()
|
||||
s.streams[id] = stream
|
||||
s.inflight[id] = struct{}{}
|
||||
s.streamLock.Unlock()
|
||||
|
||||
// Send the window update to create
|
||||
if err := stream.sendWindowUpdate(); err != nil {
|
||||
select {
|
||||
case <-s.synCh:
|
||||
default:
|
||||
s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
// Accept is used to block until the next available stream
|
||||
// is ready to be accepted.
|
||||
func (s *Session) Accept() (net.Conn, error) {
|
||||
conn, err := s.AcceptStream()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// AcceptStream is used to block until the next available stream
|
||||
// is ready to be accepted.
|
||||
func (s *Session) AcceptStream() (*Stream, error) {
|
||||
select {
|
||||
case stream := <-s.acceptCh:
|
||||
if err := stream.sendWindowUpdate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return stream, nil
|
||||
case <-s.shutdownCh:
|
||||
return nil, s.shutdownErr
|
||||
}
|
||||
}
|
||||
|
||||
// Close is used to close the session and all streams.
|
||||
// Attempts to send a GoAway before closing the connection.
|
||||
func (s *Session) Close() error {
|
||||
s.shutdownLock.Lock()
|
||||
defer s.shutdownLock.Unlock()
|
||||
|
||||
if s.shutdown {
|
||||
return nil
|
||||
}
|
||||
s.shutdown = true
|
||||
if s.shutdownErr == nil {
|
||||
s.shutdownErr = ErrSessionShutdown
|
||||
}
|
||||
close(s.shutdownCh)
|
||||
s.conn.Close()
|
||||
<-s.recvDoneCh
|
||||
|
||||
s.streamLock.Lock()
|
||||
defer s.streamLock.Unlock()
|
||||
for _, stream := range s.streams {
|
||||
stream.forceClose()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// exitErr is used to handle an error that is causing the
|
||||
// session to terminate.
|
||||
func (s *Session) exitErr(err error) {
|
||||
s.shutdownLock.Lock()
|
||||
if s.shutdownErr == nil {
|
||||
s.shutdownErr = err
|
||||
}
|
||||
s.shutdownLock.Unlock()
|
||||
s.Close()
|
||||
}
|
||||
|
||||
// GoAway can be used to prevent accepting further
|
||||
// connections. It does not close the underlying conn.
|
||||
func (s *Session) GoAway() error {
|
||||
return s.waitForSend(s.goAway(goAwayNormal), nil)
|
||||
}
|
||||
|
||||
// goAway is used to send a goAway message
|
||||
func (s *Session) goAway(reason uint32) header {
|
||||
atomic.SwapInt32(&s.localGoAway, 1)
|
||||
hdr := header(make([]byte, headerSize))
|
||||
hdr.encode(typeGoAway, 0, 0, reason)
|
||||
return hdr
|
||||
}
|
||||
|
||||
// Ping is used to measure the RTT response time
|
||||
func (s *Session) Ping() (time.Duration, error) {
|
||||
// Get a channel for the ping
|
||||
ch := make(chan struct{})
|
||||
|
||||
// Get a new ping id, mark as pending
|
||||
s.pingLock.Lock()
|
||||
id := s.pingID
|
||||
s.pingID++
|
||||
s.pings[id] = ch
|
||||
s.pingLock.Unlock()
|
||||
|
||||
// Send the ping request
|
||||
hdr := header(make([]byte, headerSize))
|
||||
hdr.encode(typePing, flagSYN, 0, id)
|
||||
if err := s.waitForSend(hdr, nil); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Wait for a response
|
||||
start := time.Now()
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(s.config.ConnectionWriteTimeout):
|
||||
s.pingLock.Lock()
|
||||
delete(s.pings, id) // Ignore it if a response comes later.
|
||||
s.pingLock.Unlock()
|
||||
return 0, ErrTimeout
|
||||
case <-s.shutdownCh:
|
||||
return 0, ErrSessionShutdown
|
||||
}
|
||||
|
||||
// Compute the RTT
|
||||
return time.Now().Sub(start), nil
|
||||
}
|
||||
|
||||
// keepalive is a long running goroutine that periodically does
|
||||
// a ping to keep the connection alive.
|
||||
func (s *Session) keepalive() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(s.config.KeepAliveInterval):
|
||||
_, err := s.Ping()
|
||||
if err != nil {
|
||||
if err != ErrSessionShutdown {
|
||||
s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
|
||||
s.exitErr(ErrKeepAliveTimeout)
|
||||
}
|
||||
return
|
||||
}
|
||||
case <-s.shutdownCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// waitForSendErr waits to send a header, checking for a potential shutdown
|
||||
func (s *Session) waitForSend(hdr header, body io.Reader) error {
|
||||
errCh := make(chan error, 1)
|
||||
return s.waitForSendErr(hdr, body, errCh)
|
||||
}
|
||||
|
||||
// waitForSendErr waits to send a header with optional data, checking for a
|
||||
// potential shutdown. Since there's the expectation that sends can happen
|
||||
// in a timely manner, we enforce the connection write timeout here.
|
||||
func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error {
|
||||
t := timerPool.Get()
|
||||
timer := t.(*time.Timer)
|
||||
timer.Reset(s.config.ConnectionWriteTimeout)
|
||||
defer func() {
|
||||
timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
default:
|
||||
}
|
||||
timerPool.Put(t)
|
||||
}()
|
||||
|
||||
ready := sendReady{Hdr: hdr, Body: body, Err: errCh}
|
||||
select {
|
||||
case s.sendCh <- ready:
|
||||
case <-s.shutdownCh:
|
||||
return ErrSessionShutdown
|
||||
case <-timer.C:
|
||||
return ErrConnectionWriteTimeout
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return err
|
||||
case <-s.shutdownCh:
|
||||
return ErrSessionShutdown
|
||||
case <-timer.C:
|
||||
return ErrConnectionWriteTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// sendNoWait does a send without waiting. Since there's the expectation that
|
||||
// the send happens right here, we enforce the connection write timeout if we
|
||||
// can't queue the header to be sent.
|
||||
func (s *Session) sendNoWait(hdr header) error {
|
||||
t := timerPool.Get()
|
||||
timer := t.(*time.Timer)
|
||||
timer.Reset(s.config.ConnectionWriteTimeout)
|
||||
defer func() {
|
||||
timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
default:
|
||||
}
|
||||
timerPool.Put(t)
|
||||
}()
|
||||
|
||||
select {
|
||||
case s.sendCh <- sendReady{Hdr: hdr}:
|
||||
return nil
|
||||
case <-s.shutdownCh:
|
||||
return ErrSessionShutdown
|
||||
case <-timer.C:
|
||||
return ErrConnectionWriteTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// send is a long running goroutine that sends data
|
||||
func (s *Session) send() {
|
||||
for {
|
||||
select {
|
||||
case ready := <-s.sendCh:
|
||||
// Send a header if ready
|
||||
if ready.Hdr != nil {
|
||||
sent := 0
|
||||
for sent < len(ready.Hdr) {
|
||||
n, err := s.conn.Write(ready.Hdr[sent:])
|
||||
if err != nil {
|
||||
s.logger.Printf("[ERR] yamux: Failed to write header: %v", err)
|
||||
asyncSendErr(ready.Err, err)
|
||||
s.exitErr(err)
|
||||
return
|
||||
}
|
||||
sent += n
|
||||
}
|
||||
}
|
||||
|
||||
// Send data from a body if given
|
||||
if ready.Body != nil {
|
||||
_, err := io.Copy(s.conn, ready.Body)
|
||||
if err != nil {
|
||||
s.logger.Printf("[ERR] yamux: Failed to write body: %v", err)
|
||||
asyncSendErr(ready.Err, err)
|
||||
s.exitErr(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// No error, successful send
|
||||
asyncSendErr(ready.Err, nil)
|
||||
case <-s.shutdownCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recv is a long running goroutine that accepts new data
|
||||
func (s *Session) recv() {
|
||||
if err := s.recvLoop(); err != nil {
|
||||
s.exitErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type
|
||||
var (
|
||||
handlers = []func(*Session, header) error{
|
||||
typeData: (*Session).handleStreamMessage,
|
||||
typeWindowUpdate: (*Session).handleStreamMessage,
|
||||
typePing: (*Session).handlePing,
|
||||
typeGoAway: (*Session).handleGoAway,
|
||||
}
|
||||
)
|
||||
|
||||
// recvLoop continues to receive data until a fatal error is encountered
|
||||
func (s *Session) recvLoop() error {
|
||||
defer close(s.recvDoneCh)
|
||||
hdr := header(make([]byte, headerSize))
|
||||
for {
|
||||
// Read the header
|
||||
if _, err := io.ReadFull(s.bufRead, hdr); err != nil {
|
||||
if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") {
|
||||
s.logger.Printf("[ERR] yamux: Failed to read header: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify the version
|
||||
if hdr.Version() != protoVersion {
|
||||
s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version())
|
||||
return ErrInvalidVersion
|
||||
}
|
||||
|
||||
mt := hdr.MsgType()
|
||||
if mt < typeData || mt > typeGoAway {
|
||||
return ErrInvalidMsgType
|
||||
}
|
||||
|
||||
if err := handlers[mt](s, hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleStreamMessage handles either a data or window update frame
|
||||
func (s *Session) handleStreamMessage(hdr header) error {
|
||||
// Check for a new stream creation
|
||||
id := hdr.StreamID()
|
||||
flags := hdr.Flags()
|
||||
if flags&flagSYN == flagSYN {
|
||||
if err := s.incomingStream(id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Get the stream
|
||||
s.streamLock.Lock()
|
||||
stream := s.streams[id]
|
||||
s.streamLock.Unlock()
|
||||
|
||||
// If we do not have a stream, likely we sent a RST
|
||||
if stream == nil {
|
||||
// Drain any data on the wire
|
||||
if hdr.MsgType() == typeData && hdr.Length() > 0 {
|
||||
s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id)
|
||||
if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil {
|
||||
s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if this is a window update
|
||||
if hdr.MsgType() == typeWindowUpdate {
|
||||
if err := stream.incrSendWindow(hdr, flags); err != nil {
|
||||
if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
|
||||
s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the new data
|
||||
if err := stream.readData(hdr, flags, s.bufRead); err != nil {
|
||||
if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
|
||||
s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handlePing is invokde for a typePing frame
|
||||
func (s *Session) handlePing(hdr header) error {
|
||||
flags := hdr.Flags()
|
||||
pingID := hdr.Length()
|
||||
|
||||
// Check if this is a query, respond back in a separate context so we
|
||||
// don't interfere with the receiving thread blocking for the write.
|
||||
if flags&flagSYN == flagSYN {
|
||||
go func() {
|
||||
hdr := header(make([]byte, headerSize))
|
||||
hdr.encode(typePing, flagACK, 0, pingID)
|
||||
if err := s.sendNoWait(hdr); err != nil {
|
||||
s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle a response
|
||||
s.pingLock.Lock()
|
||||
ch := s.pings[pingID]
|
||||
if ch != nil {
|
||||
delete(s.pings, pingID)
|
||||
close(ch)
|
||||
}
|
||||
s.pingLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleGoAway is invokde for a typeGoAway frame
|
||||
func (s *Session) handleGoAway(hdr header) error {
|
||||
code := hdr.Length()
|
||||
switch code {
|
||||
case goAwayNormal:
|
||||
atomic.SwapInt32(&s.remoteGoAway, 1)
|
||||
case goAwayProtoErr:
|
||||
s.logger.Printf("[ERR] yamux: received protocol error go away")
|
||||
return fmt.Errorf("yamux protocol error")
|
||||
case goAwayInternalErr:
|
||||
s.logger.Printf("[ERR] yamux: received internal error go away")
|
||||
return fmt.Errorf("remote yamux internal error")
|
||||
default:
|
||||
s.logger.Printf("[ERR] yamux: received unexpected go away")
|
||||
return fmt.Errorf("unexpected go away received")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// incomingStream is used to create a new incoming stream
|
||||
func (s *Session) incomingStream(id uint32) error {
|
||||
// Reject immediately if we are doing a go away
|
||||
if atomic.LoadInt32(&s.localGoAway) == 1 {
|
||||
hdr := header(make([]byte, headerSize))
|
||||
hdr.encode(typeWindowUpdate, flagRST, id, 0)
|
||||
return s.sendNoWait(hdr)
|
||||
}
|
||||
|
||||
// Allocate a new stream
|
||||
stream := newStream(s, id, streamSYNReceived)
|
||||
|
||||
s.streamLock.Lock()
|
||||
defer s.streamLock.Unlock()
|
||||
|
||||
// Check if stream already exists
|
||||
if _, ok := s.streams[id]; ok {
|
||||
s.logger.Printf("[ERR] yamux: duplicate stream declared")
|
||||
if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
|
||||
s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
|
||||
}
|
||||
return ErrDuplicateStream
|
||||
}
|
||||
|
||||
// Register the stream
|
||||
s.streams[id] = stream
|
||||
|
||||
// Check if we've exceeded the backlog
|
||||
select {
|
||||
case s.acceptCh <- stream:
|
||||
return nil
|
||||
default:
|
||||
// Backlog exceeded! RST the stream
|
||||
s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset")
|
||||
delete(s.streams, id)
|
||||
stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0)
|
||||
return s.sendNoWait(stream.sendHdr)
|
||||
}
|
||||
}
|
||||
|
||||
// closeStream is used to close a stream once both sides have
|
||||
// issued a close. If there was an in-flight SYN and the stream
|
||||
// was not yet established, then this will give the credit back.
|
||||
func (s *Session) closeStream(id uint32) {
|
||||
s.streamLock.Lock()
|
||||
if _, ok := s.inflight[id]; ok {
|
||||
select {
|
||||
case <-s.synCh:
|
||||
default:
|
||||
s.logger.Printf("[ERR] yamux: SYN tracking out of sync")
|
||||
}
|
||||
}
|
||||
delete(s.streams, id)
|
||||
s.streamLock.Unlock()
|
||||
}
|
||||
|
||||
// establishStream is used to mark a stream that was in the
|
||||
// SYN Sent state as established.
|
||||
func (s *Session) establishStream(id uint32) {
|
||||
s.streamLock.Lock()
|
||||
if _, ok := s.inflight[id]; ok {
|
||||
delete(s.inflight, id)
|
||||
} else {
|
||||
s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)")
|
||||
}
|
||||
select {
|
||||
case <-s.synCh:
|
||||
default:
|
||||
s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)")
|
||||
}
|
||||
s.streamLock.Unlock()
|
||||
}
|
||||
140
vendor/github.com/hashicorp/yamux/spec.md
generated
vendored
140
vendor/github.com/hashicorp/yamux/spec.md
generated
vendored
@ -1,140 +0,0 @@
|
||||
# Specification
|
||||
|
||||
We use this document to detail the internal specification of Yamux.
|
||||
This is used both as a guide for implementing Yamux, but also for
|
||||
alternative interoperable libraries to be built.
|
||||
|
||||
# Framing
|
||||
|
||||
Yamux uses a streaming connection underneath, but imposes a message
|
||||
framing so that it can be shared between many logical streams. Each
|
||||
frame contains a header like:
|
||||
|
||||
* Version (8 bits)
|
||||
* Type (8 bits)
|
||||
* Flags (16 bits)
|
||||
* StreamID (32 bits)
|
||||
* Length (32 bits)
|
||||
|
||||
This means that each header has a 12 byte overhead.
|
||||
All fields are encoded in network order (big endian).
|
||||
Each field is described below:
|
||||
|
||||
## Version Field
|
||||
|
||||
The version field is used for future backward compatibility. At the
|
||||
current time, the field is always set to 0, to indicate the initial
|
||||
version.
|
||||
|
||||
## Type Field
|
||||
|
||||
The type field is used to switch the frame message type. The following
|
||||
message types are supported:
|
||||
|
||||
* 0x0 Data - Used to transmit data. May transmit zero length payloads
|
||||
depending on the flags.
|
||||
|
||||
* 0x1 Window Update - Used to updated the senders receive window size.
|
||||
This is used to implement per-session flow control.
|
||||
|
||||
* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat
|
||||
and do keep-alives over TCP.
|
||||
|
||||
* 0x3 Go Away - Used to close a session.
|
||||
|
||||
## Flag Field
|
||||
|
||||
The flags field is used to provide additional information related
|
||||
to the message type. The following flags are supported:
|
||||
|
||||
* 0x1 SYN - Signals the start of a new stream. May be sent with a data or
|
||||
window update message. Also sent with a ping to indicate outbound.
|
||||
|
||||
* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data
|
||||
or window update message. Also sent with a ping to indicate response.
|
||||
|
||||
* 0x4 FIN - Performs a half-close of a stream. May be sent with a data
|
||||
message or window update.
|
||||
|
||||
* 0x8 RST - Reset a stream immediately. May be sent with a data or
|
||||
window update message.
|
||||
|
||||
## StreamID Field
|
||||
|
||||
The StreamID field is used to identify the logical stream the frame
|
||||
is addressing. The client side should use odd ID's, and the server even.
|
||||
This prevents any collisions. Additionally, the 0 ID is reserved to represent
|
||||
the session.
|
||||
|
||||
Both Ping and Go Away messages should always use the 0 StreamID.
|
||||
|
||||
## Length Field
|
||||
|
||||
The meaning of the length field depends on the message type:
|
||||
|
||||
* Data - provides the length of bytes following the header
|
||||
* Window update - provides a delta update to the window size
|
||||
* Ping - Contains an opaque value, echoed back
|
||||
* Go Away - Contains an error code
|
||||
|
||||
# Message Flow
|
||||
|
||||
There is no explicit connection setup, as Yamux relies on an underlying
|
||||
transport to be provided. However, there is a distinction between client
|
||||
and server side of the connection.
|
||||
|
||||
## Opening a stream
|
||||
|
||||
To open a stream, an initial data or window update frame is sent
|
||||
with a new StreamID. The SYN flag should be set to signal a new stream.
|
||||
|
||||
The receiver must then reply with either a data or window update frame
|
||||
with the StreamID along with the ACK flag to accept the stream or with
|
||||
the RST flag to reject the stream.
|
||||
|
||||
Because we are relying on the reliable stream underneath, a connection
|
||||
can begin sending data once the SYN flag is sent. The corresponding
|
||||
ACK does not need to be received. This is particularly well suited
|
||||
for an RPC system where a client wants to open a stream and immediately
|
||||
fire a request without waiting for the RTT of the ACK.
|
||||
|
||||
This does introduce the possibility of a connection being rejected
|
||||
after data has been sent already. This is a slight semantic difference
|
||||
from TCP, where the conection cannot be refused after it is opened.
|
||||
Clients should be prepared to handle this by checking for an error
|
||||
that indicates a RST was received.
|
||||
|
||||
## Closing a stream
|
||||
|
||||
To close a stream, either side sends a data or window update frame
|
||||
along with the FIN flag. This does a half-close indicating the sender
|
||||
will send no further data.
|
||||
|
||||
Once both sides have closed the connection, the stream is closed.
|
||||
|
||||
Alternatively, if an error occurs, the RST flag can be used to
|
||||
hard close a stream immediately.
|
||||
|
||||
## Flow Control
|
||||
|
||||
When Yamux is initially starts each stream with a 256KB window size.
|
||||
There is no window size for the session.
|
||||
|
||||
To prevent the streams from stalling, window update frames should be
|
||||
sent regularly. Yamux can be configured to provide a larger limit for
|
||||
windows sizes. Both sides assume the initial 256KB window, but can
|
||||
immediately send a window update as part of the SYN/ACK indicating a
|
||||
larger window.
|
||||
|
||||
Both sides should track the number of bytes sent in Data frames
|
||||
only, as only they are tracked as part of the window size.
|
||||
|
||||
## Session termination
|
||||
|
||||
When a session is being terminated, the Go Away message should
|
||||
be sent. The Length should be set to one of the following to
|
||||
provide an error code:
|
||||
|
||||
* 0x0 Normal termination
|
||||
* 0x1 Protocol error
|
||||
* 0x2 Internal error
|
||||
470
vendor/github.com/hashicorp/yamux/stream.go
generated
vendored
470
vendor/github.com/hashicorp/yamux/stream.go
generated
vendored
@ -1,470 +0,0 @@
|
||||
package yamux
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type streamState int
|
||||
|
||||
const (
|
||||
streamInit streamState = iota
|
||||
streamSYNSent
|
||||
streamSYNReceived
|
||||
streamEstablished
|
||||
streamLocalClose
|
||||
streamRemoteClose
|
||||
streamClosed
|
||||
streamReset
|
||||
)
|
||||
|
||||
// Stream is used to represent a logical stream
|
||||
// within a session.
|
||||
type Stream struct {
|
||||
recvWindow uint32
|
||||
sendWindow uint32
|
||||
|
||||
id uint32
|
||||
session *Session
|
||||
|
||||
state streamState
|
||||
stateLock sync.Mutex
|
||||
|
||||
recvBuf *bytes.Buffer
|
||||
recvLock sync.Mutex
|
||||
|
||||
controlHdr header
|
||||
controlErr chan error
|
||||
controlHdrLock sync.Mutex
|
||||
|
||||
sendHdr header
|
||||
sendErr chan error
|
||||
sendLock sync.Mutex
|
||||
|
||||
recvNotifyCh chan struct{}
|
||||
sendNotifyCh chan struct{}
|
||||
|
||||
readDeadline atomic.Value // time.Time
|
||||
writeDeadline atomic.Value // time.Time
|
||||
}
|
||||
|
||||
// newStream is used to construct a new stream within
|
||||
// a given session for an ID
|
||||
func newStream(session *Session, id uint32, state streamState) *Stream {
|
||||
s := &Stream{
|
||||
id: id,
|
||||
session: session,
|
||||
state: state,
|
||||
controlHdr: header(make([]byte, headerSize)),
|
||||
controlErr: make(chan error, 1),
|
||||
sendHdr: header(make([]byte, headerSize)),
|
||||
sendErr: make(chan error, 1),
|
||||
recvWindow: initialStreamWindow,
|
||||
sendWindow: initialStreamWindow,
|
||||
recvNotifyCh: make(chan struct{}, 1),
|
||||
sendNotifyCh: make(chan struct{}, 1),
|
||||
}
|
||||
s.readDeadline.Store(time.Time{})
|
||||
s.writeDeadline.Store(time.Time{})
|
||||
return s
|
||||
}
|
||||
|
||||
// Session returns the associated stream session
|
||||
func (s *Stream) Session() *Session {
|
||||
return s.session
|
||||
}
|
||||
|
||||
// StreamID returns the ID of this stream
|
||||
func (s *Stream) StreamID() uint32 {
|
||||
return s.id
|
||||
}
|
||||
|
||||
// Read is used to read from the stream
|
||||
func (s *Stream) Read(b []byte) (n int, err error) {
|
||||
defer asyncNotify(s.recvNotifyCh)
|
||||
START:
|
||||
s.stateLock.Lock()
|
||||
switch s.state {
|
||||
case streamLocalClose:
|
||||
fallthrough
|
||||
case streamRemoteClose:
|
||||
fallthrough
|
||||
case streamClosed:
|
||||
s.recvLock.Lock()
|
||||
if s.recvBuf == nil || s.recvBuf.Len() == 0 {
|
||||
s.recvLock.Unlock()
|
||||
s.stateLock.Unlock()
|
||||
return 0, io.EOF
|
||||
}
|
||||
s.recvLock.Unlock()
|
||||
case streamReset:
|
||||
s.stateLock.Unlock()
|
||||
return 0, ErrConnectionReset
|
||||
}
|
||||
s.stateLock.Unlock()
|
||||
|
||||
// If there is no data available, block
|
||||
s.recvLock.Lock()
|
||||
if s.recvBuf == nil || s.recvBuf.Len() == 0 {
|
||||
s.recvLock.Unlock()
|
||||
goto WAIT
|
||||
}
|
||||
|
||||
// Read any bytes
|
||||
n, _ = s.recvBuf.Read(b)
|
||||
s.recvLock.Unlock()
|
||||
|
||||
// Send a window update potentially
|
||||
err = s.sendWindowUpdate()
|
||||
return n, err
|
||||
|
||||
WAIT:
|
||||
var timeout <-chan time.Time
|
||||
var timer *time.Timer
|
||||
readDeadline := s.readDeadline.Load().(time.Time)
|
||||
if !readDeadline.IsZero() {
|
||||
delay := readDeadline.Sub(time.Now())
|
||||
timer = time.NewTimer(delay)
|
||||
timeout = timer.C
|
||||
}
|
||||
select {
|
||||
case <-s.recvNotifyCh:
|
||||
if timer != nil {
|
||||
timer.Stop()
|
||||
}
|
||||
goto START
|
||||
case <-timeout:
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// Write is used to write to the stream
|
||||
func (s *Stream) Write(b []byte) (n int, err error) {
|
||||
s.sendLock.Lock()
|
||||
defer s.sendLock.Unlock()
|
||||
total := 0
|
||||
for total < len(b) {
|
||||
n, err := s.write(b[total:])
|
||||
total += n
|
||||
if err != nil {
|
||||
return total, err
|
||||
}
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// write is used to write to the stream, may return on
|
||||
// a short write.
|
||||
func (s *Stream) write(b []byte) (n int, err error) {
|
||||
var flags uint16
|
||||
var max uint32
|
||||
var body io.Reader
|
||||
START:
|
||||
s.stateLock.Lock()
|
||||
switch s.state {
|
||||
case streamLocalClose:
|
||||
fallthrough
|
||||
case streamClosed:
|
||||
s.stateLock.Unlock()
|
||||
return 0, ErrStreamClosed
|
||||
case streamReset:
|
||||
s.stateLock.Unlock()
|
||||
return 0, ErrConnectionReset
|
||||
}
|
||||
s.stateLock.Unlock()
|
||||
|
||||
// If there is no data available, block
|
||||
window := atomic.LoadUint32(&s.sendWindow)
|
||||
if window == 0 {
|
||||
goto WAIT
|
||||
}
|
||||
|
||||
// Determine the flags if any
|
||||
flags = s.sendFlags()
|
||||
|
||||
// Send up to our send window
|
||||
max = min(window, uint32(len(b)))
|
||||
body = bytes.NewReader(b[:max])
|
||||
|
||||
// Send the header
|
||||
s.sendHdr.encode(typeData, flags, s.id, max)
|
||||
if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Reduce our send window
|
||||
atomic.AddUint32(&s.sendWindow, ^uint32(max-1))
|
||||
|
||||
// Unlock
|
||||
return int(max), err
|
||||
|
||||
WAIT:
|
||||
var timeout <-chan time.Time
|
||||
writeDeadline := s.writeDeadline.Load().(time.Time)
|
||||
if !writeDeadline.IsZero() {
|
||||
delay := writeDeadline.Sub(time.Now())
|
||||
timeout = time.After(delay)
|
||||
}
|
||||
select {
|
||||
case <-s.sendNotifyCh:
|
||||
goto START
|
||||
case <-timeout:
|
||||
return 0, ErrTimeout
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// sendFlags determines any flags that are appropriate
|
||||
// based on the current stream state
|
||||
func (s *Stream) sendFlags() uint16 {
|
||||
s.stateLock.Lock()
|
||||
defer s.stateLock.Unlock()
|
||||
var flags uint16
|
||||
switch s.state {
|
||||
case streamInit:
|
||||
flags |= flagSYN
|
||||
s.state = streamSYNSent
|
||||
case streamSYNReceived:
|
||||
flags |= flagACK
|
||||
s.state = streamEstablished
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
// sendWindowUpdate potentially sends a window update enabling
|
||||
// further writes to take place. Must be invoked with the lock.
|
||||
func (s *Stream) sendWindowUpdate() error {
|
||||
s.controlHdrLock.Lock()
|
||||
defer s.controlHdrLock.Unlock()
|
||||
|
||||
// Determine the delta update
|
||||
max := s.session.config.MaxStreamWindowSize
|
||||
var bufLen uint32
|
||||
s.recvLock.Lock()
|
||||
if s.recvBuf != nil {
|
||||
bufLen = uint32(s.recvBuf.Len())
|
||||
}
|
||||
delta := (max - bufLen) - s.recvWindow
|
||||
|
||||
// Determine the flags if any
|
||||
flags := s.sendFlags()
|
||||
|
||||
// Check if we can omit the update
|
||||
if delta < (max/2) && flags == 0 {
|
||||
s.recvLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update our window
|
||||
s.recvWindow += delta
|
||||
s.recvLock.Unlock()
|
||||
|
||||
// Send the header
|
||||
s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta)
|
||||
if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendClose is used to send a FIN
|
||||
func (s *Stream) sendClose() error {
|
||||
s.controlHdrLock.Lock()
|
||||
defer s.controlHdrLock.Unlock()
|
||||
|
||||
flags := s.sendFlags()
|
||||
flags |= flagFIN
|
||||
s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0)
|
||||
if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close is used to close the stream
|
||||
func (s *Stream) Close() error {
|
||||
closeStream := false
|
||||
s.stateLock.Lock()
|
||||
switch s.state {
|
||||
// Opened means we need to signal a close
|
||||
case streamSYNSent:
|
||||
fallthrough
|
||||
case streamSYNReceived:
|
||||
fallthrough
|
||||
case streamEstablished:
|
||||
s.state = streamLocalClose
|
||||
goto SEND_CLOSE
|
||||
|
||||
case streamLocalClose:
|
||||
case streamRemoteClose:
|
||||
s.state = streamClosed
|
||||
closeStream = true
|
||||
goto SEND_CLOSE
|
||||
|
||||
case streamClosed:
|
||||
case streamReset:
|
||||
default:
|
||||
panic("unhandled state")
|
||||
}
|
||||
s.stateLock.Unlock()
|
||||
return nil
|
||||
SEND_CLOSE:
|
||||
s.stateLock.Unlock()
|
||||
s.sendClose()
|
||||
s.notifyWaiting()
|
||||
if closeStream {
|
||||
s.session.closeStream(s.id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// forceClose is used for when the session is exiting
|
||||
func (s *Stream) forceClose() {
|
||||
s.stateLock.Lock()
|
||||
s.state = streamClosed
|
||||
s.stateLock.Unlock()
|
||||
s.notifyWaiting()
|
||||
}
|
||||
|
||||
// processFlags is used to update the state of the stream
|
||||
// based on set flags, if any. Lock must be held
|
||||
func (s *Stream) processFlags(flags uint16) error {
|
||||
// Close the stream without holding the state lock
|
||||
closeStream := false
|
||||
defer func() {
|
||||
if closeStream {
|
||||
s.session.closeStream(s.id)
|
||||
}
|
||||
}()
|
||||
|
||||
s.stateLock.Lock()
|
||||
defer s.stateLock.Unlock()
|
||||
if flags&flagACK == flagACK {
|
||||
if s.state == streamSYNSent {
|
||||
s.state = streamEstablished
|
||||
}
|
||||
s.session.establishStream(s.id)
|
||||
}
|
||||
if flags&flagFIN == flagFIN {
|
||||
switch s.state {
|
||||
case streamSYNSent:
|
||||
fallthrough
|
||||
case streamSYNReceived:
|
||||
fallthrough
|
||||
case streamEstablished:
|
||||
s.state = streamRemoteClose
|
||||
s.notifyWaiting()
|
||||
case streamLocalClose:
|
||||
s.state = streamClosed
|
||||
closeStream = true
|
||||
s.notifyWaiting()
|
||||
default:
|
||||
s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state)
|
||||
return ErrUnexpectedFlag
|
||||
}
|
||||
}
|
||||
if flags&flagRST == flagRST {
|
||||
s.state = streamReset
|
||||
closeStream = true
|
||||
s.notifyWaiting()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// notifyWaiting notifies all the waiting channels
|
||||
func (s *Stream) notifyWaiting() {
|
||||
asyncNotify(s.recvNotifyCh)
|
||||
asyncNotify(s.sendNotifyCh)
|
||||
}
|
||||
|
||||
// incrSendWindow updates the size of our send window
|
||||
func (s *Stream) incrSendWindow(hdr header, flags uint16) error {
|
||||
if err := s.processFlags(flags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Increase window, unblock a sender
|
||||
atomic.AddUint32(&s.sendWindow, hdr.Length())
|
||||
asyncNotify(s.sendNotifyCh)
|
||||
return nil
|
||||
}
|
||||
|
||||
// readData is used to handle a data frame
|
||||
func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
|
||||
if err := s.processFlags(flags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check that our recv window is not exceeded
|
||||
length := hdr.Length()
|
||||
if length == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wrap in a limited reader
|
||||
conn = &io.LimitedReader{R: conn, N: int64(length)}
|
||||
|
||||
// Copy into buffer
|
||||
s.recvLock.Lock()
|
||||
|
||||
if length > s.recvWindow {
|
||||
s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length)
|
||||
return ErrRecvWindowExceeded
|
||||
}
|
||||
|
||||
if s.recvBuf == nil {
|
||||
// Allocate the receive buffer just-in-time to fit the full data frame.
|
||||
// This way we can read in the whole packet without further allocations.
|
||||
s.recvBuf = bytes.NewBuffer(make([]byte, 0, length))
|
||||
}
|
||||
if _, err := io.Copy(s.recvBuf, conn); err != nil {
|
||||
s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err)
|
||||
s.recvLock.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// Decrement the receive window
|
||||
s.recvWindow -= length
|
||||
s.recvLock.Unlock()
|
||||
|
||||
// Unblock any readers
|
||||
asyncNotify(s.recvNotifyCh)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDeadline sets the read and write deadlines
|
||||
func (s *Stream) SetDeadline(t time.Time) error {
|
||||
if err := s.SetReadDeadline(t); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.SetWriteDeadline(t); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetReadDeadline sets the deadline for future Read calls.
|
||||
func (s *Stream) SetReadDeadline(t time.Time) error {
|
||||
s.readDeadline.Store(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetWriteDeadline sets the deadline for future Write calls
|
||||
func (s *Stream) SetWriteDeadline(t time.Time) error {
|
||||
s.writeDeadline.Store(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shrink is used to compact the amount of buffers utilized
|
||||
// This is useful when using Yamux in a connection pool to reduce
|
||||
// the idle memory utilization.
|
||||
func (s *Stream) Shrink() {
|
||||
s.recvLock.Lock()
|
||||
if s.recvBuf != nil && s.recvBuf.Len() == 0 {
|
||||
s.recvBuf = nil
|
||||
}
|
||||
s.recvLock.Unlock()
|
||||
}
|
||||
43
vendor/github.com/hashicorp/yamux/util.go
generated
vendored
43
vendor/github.com/hashicorp/yamux/util.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
package yamux
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
timerPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
timer := time.NewTimer(time.Hour * 1e6)
|
||||
timer.Stop()
|
||||
return timer
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// asyncSendErr is used to try an async send of an error
|
||||
func asyncSendErr(ch chan error, err error) {
|
||||
if ch == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case ch <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// asyncNotify is used to signal a waiting goroutine
|
||||
func asyncNotify(ch chan struct{}) {
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// min computes the minimum of two values
|
||||
func min(a, b uint32) uint32 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
4
vendor/github.com/miekg/dns/.gitignore
generated
vendored
4
vendor/github.com/miekg/dns/.gitignore
generated
vendored
@ -1,4 +0,0 @@
|
||||
*.6
|
||||
tags
|
||||
test.out
|
||||
a.out
|
||||
1
vendor/github.com/miekg/dns/AUTHORS
generated
vendored
1
vendor/github.com/miekg/dns/AUTHORS
generated
vendored
@ -1 +0,0 @@
|
||||
Miek Gieben <miek@miek.nl>
|
||||
10
vendor/github.com/miekg/dns/CONTRIBUTORS
generated
vendored
10
vendor/github.com/miekg/dns/CONTRIBUTORS
generated
vendored
@ -1,10 +0,0 @@
|
||||
Alex A. Skinner
|
||||
Andrew Tunnell-Jones
|
||||
Ask Bjørn Hansen
|
||||
Dave Cheney
|
||||
Dusty Wilson
|
||||
Marek Majkowski
|
||||
Peter van Dijk
|
||||
Omri Bahumi
|
||||
Alex Sergeyev
|
||||
James Hartig
|
||||
9
vendor/github.com/miekg/dns/COPYRIGHT
generated
vendored
9
vendor/github.com/miekg/dns/COPYRIGHT
generated
vendored
@ -1,9 +0,0 @@
|
||||
Copyright 2009 The Go Authors. All rights reserved. Use of this source code
|
||||
is governed by a BSD-style license that can be found in the LICENSE file.
|
||||
Extensions of the original work are copyright (c) 2011 Miek Gieben
|
||||
|
||||
Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is
|
||||
governed by a BSD-style license that can be found in the LICENSE file.
|
||||
|
||||
Copyright 2014 CloudFlare. All rights reserved. Use of this source code is
|
||||
governed by a BSD-style license that can be found in the LICENSE file.
|
||||
21
vendor/github.com/miekg/dns/Gopkg.lock
generated
vendored
21
vendor/github.com/miekg/dns/Gopkg.lock
generated
vendored
@ -1,21 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ed25519","ed25519/internal/edwards25519"]
|
||||
revision = "b080dc9a8c480b08e698fb1219160d598526310f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"]
|
||||
revision = "894f8ed5849b15b810ae41e9590a0d05395bba27"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "c4abc38abaeeeeb9be92455c9c02cae32841122b8982aaa067ef25bb8e86ff9d"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
26
vendor/github.com/miekg/dns/Gopkg.toml
generated
vendored
26
vendor/github.com/miekg/dns/Gopkg.toml
generated
vendored
@ -1,26 +0,0 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
32
vendor/github.com/miekg/dns/LICENSE
generated
vendored
32
vendor/github.com/miekg/dns/LICENSE
generated
vendored
@ -1,32 +0,0 @@
|
||||
Extensions of the original work are copyright (c) 2011 Miek Gieben
|
||||
|
||||
As this is fork of the official Go code the same license applies:
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
33
vendor/github.com/miekg/dns/Makefile.fuzz
generated
vendored
33
vendor/github.com/miekg/dns/Makefile.fuzz
generated
vendored
@ -1,33 +0,0 @@
|
||||
# Makefile for fuzzing
|
||||
#
|
||||
# Use go-fuzz and needs the tools installed.
|
||||
# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/
|
||||
#
|
||||
# Installing go-fuzz:
|
||||
# $ make -f Makefile.fuzz get
|
||||
# Installs:
|
||||
# * github.com/dvyukov/go-fuzz/go-fuzz
|
||||
# * get github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
all: build
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
go-fuzz-build -tags fuzz github.com/miekg/dns
|
||||
|
||||
.PHONY: build-newrr
|
||||
build-newrr:
|
||||
go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns
|
||||
|
||||
.PHONY: fuzz
|
||||
fuzz:
|
||||
go-fuzz -bin=dns-fuzz.zip -workdir=fuzz
|
||||
|
||||
.PHONY: get
|
||||
get:
|
||||
go get github.com/dvyukov/go-fuzz/go-fuzz
|
||||
go get github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm *-fuzz.zip
|
||||
52
vendor/github.com/miekg/dns/Makefile.release
generated
vendored
52
vendor/github.com/miekg/dns/Makefile.release
generated
vendored
@ -1,52 +0,0 @@
|
||||
# Makefile for releasing.
|
||||
#
|
||||
# The release is controlled from version.go. The version found there is
|
||||
# used to tag the git repo, we're not building any artifects so there is nothing
|
||||
# to upload to github.
|
||||
#
|
||||
# * Up the version in version.go
|
||||
# * Run: make -f Makefile.release release
|
||||
# * will *commit* your change with 'Release $VERSION'
|
||||
# * push to github
|
||||
#
|
||||
|
||||
define GO
|
||||
//+build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Println(dns.Version.String())
|
||||
}
|
||||
endef
|
||||
|
||||
$(file > version_release.go,$(GO))
|
||||
VERSION:=$(shell go run version_release.go)
|
||||
TAG="v$(VERSION)"
|
||||
|
||||
all:
|
||||
@echo Use the \'release\' target to start a release $(VERSION)
|
||||
rm -f version_release.go
|
||||
|
||||
.PHONY: release
|
||||
release: commit push
|
||||
@echo Released $(VERSION)
|
||||
rm -f version_release.go
|
||||
|
||||
.PHONY: commit
|
||||
commit:
|
||||
@echo Committing release $(VERSION)
|
||||
git commit -am"Release $(VERSION)"
|
||||
git tag $(TAG)
|
||||
|
||||
.PHONY: push
|
||||
push:
|
||||
@echo Pushing release $(VERSION) to master
|
||||
git push --tags
|
||||
git push
|
||||
168
vendor/github.com/miekg/dns/README.md
generated
vendored
168
vendor/github.com/miekg/dns/README.md
generated
vendored
@ -1,168 +0,0 @@
|
||||
[](https://travis-ci.org/miekg/dns)
|
||||
[](https://codecov.io/github/miekg/dns?branch=master)
|
||||
[](https://goreportcard.com/report/miekg/dns)
|
||||
[](https://godoc.org/github.com/miekg/dns)
|
||||
|
||||
# Alternative (more granular) approach to a DNS library
|
||||
|
||||
> Less is more.
|
||||
|
||||
Complete and usable DNS library. All widely used Resource Records are supported, including the
|
||||
DNSSEC types. It follows a lean and mean philosophy. If there is stuff you should know as a DNS
|
||||
programmer there isn't a convenience function for it. Server side and client side programming is
|
||||
supported, i.e. you can build servers and resolvers with it.
|
||||
|
||||
We try to keep the "master" branch as sane as possible and at the bleeding edge of standards,
|
||||
avoiding breaking changes wherever reasonable. We support the last two versions of Go.
|
||||
|
||||
# Goals
|
||||
|
||||
* KISS;
|
||||
* Fast;
|
||||
* Small API. If it's easy to code in Go, don't make a function for it.
|
||||
|
||||
# Users
|
||||
|
||||
A not-so-up-to-date-list-that-may-be-actually-current:
|
||||
|
||||
* https://github.com/coredns/coredns
|
||||
* https://cloudflare.com
|
||||
* https://github.com/abh/geodns
|
||||
* http://www.statdns.com/
|
||||
* http://www.dnsinspect.com/
|
||||
* https://github.com/chuangbo/jianbing-dictionary-dns
|
||||
* http://www.dns-lg.com/
|
||||
* https://github.com/fcambus/rrda
|
||||
* https://github.com/kenshinx/godns
|
||||
* https://github.com/skynetservices/skydns
|
||||
* https://github.com/hashicorp/consul
|
||||
* https://github.com/DevelopersPL/godnsagent
|
||||
* https://github.com/duedil-ltd/discodns
|
||||
* https://github.com/StalkR/dns-reverse-proxy
|
||||
* https://github.com/tianon/rawdns
|
||||
* https://mesosphere.github.io/mesos-dns/
|
||||
* https://pulse.turbobytes.com/
|
||||
* https://play.google.com/store/apps/details?id=com.turbobytes.dig
|
||||
* https://github.com/fcambus/statzone
|
||||
* https://github.com/benschw/dns-clb-go
|
||||
* https://github.com/corny/dnscheck for http://public-dns.info/
|
||||
* https://namesmith.io
|
||||
* https://github.com/miekg/unbound
|
||||
* https://github.com/miekg/exdns
|
||||
* https://dnslookup.org
|
||||
* https://github.com/looterz/grimd
|
||||
* https://github.com/phamhongviet/serf-dns
|
||||
* https://github.com/mehrdadrad/mylg
|
||||
* https://github.com/bamarni/dockness
|
||||
* https://github.com/fffaraz/microdns
|
||||
* http://kelda.io
|
||||
* https://github.com/ipdcode/hades (JD.COM)
|
||||
* https://github.com/StackExchange/dnscontrol/
|
||||
* https://www.dnsperf.com/
|
||||
* https://dnssectest.net/
|
||||
* https://dns.apebits.com
|
||||
* https://github.com/oif/apex
|
||||
* https://github.com/jedisct1/dnscrypt-proxy
|
||||
* https://github.com/jedisct1/rpdns
|
||||
|
||||
Send pull request if you want to be listed here.
|
||||
|
||||
# Features
|
||||
|
||||
* UDP/TCP queries, IPv4 and IPv6;
|
||||
* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported;
|
||||
* Fast:
|
||||
* Reply speed around ~ 80K qps (faster hardware results in more qps);
|
||||
* Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds;
|
||||
* Server side programming (mimicking the net/http package);
|
||||
* Client side programming;
|
||||
* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519;
|
||||
* EDNS0, NSID, Cookies;
|
||||
* AXFR/IXFR;
|
||||
* TSIG, SIG(0);
|
||||
* DNS over TLS: optional encrypted connection between client and server;
|
||||
* DNS name compression;
|
||||
* Depends only on the standard library.
|
||||
|
||||
Have fun!
|
||||
|
||||
Miek Gieben - 2010-2012 - <miek@miek.nl>
|
||||
|
||||
# Building
|
||||
|
||||
Building is done with the `go` tool. If you have setup your GOPATH correctly, the following should
|
||||
work:
|
||||
|
||||
go get github.com/miekg/dns
|
||||
go build github.com/miekg/dns
|
||||
|
||||
## Examples
|
||||
|
||||
A short "how to use the API" is at the beginning of doc.go (this also will show
|
||||
when you call `godoc github.com/miekg/dns`).
|
||||
|
||||
Example programs can be found in the `github.com/miekg/exdns` repository.
|
||||
|
||||
## Supported RFCs
|
||||
|
||||
*all of them*
|
||||
|
||||
* 103{4,5} - DNS standard
|
||||
* 1348 - NSAP record (removed the record)
|
||||
* 1982 - Serial Arithmetic
|
||||
* 1876 - LOC record
|
||||
* 1995 - IXFR
|
||||
* 1996 - DNS notify
|
||||
* 2136 - DNS Update (dynamic updates)
|
||||
* 2181 - RRset definition - there is no RRset type though, just []RR
|
||||
* 2537 - RSAMD5 DNS keys
|
||||
* 2065 - DNSSEC (updated in later RFCs)
|
||||
* 2671 - EDNS record
|
||||
* 2782 - SRV record
|
||||
* 2845 - TSIG record
|
||||
* 2915 - NAPTR record
|
||||
* 2929 - DNS IANA Considerations
|
||||
* 3110 - RSASHA1 DNS keys
|
||||
* 3225 - DO bit (DNSSEC OK)
|
||||
* 340{1,2,3} - NAPTR record
|
||||
* 3445 - Limiting the scope of (DNS)KEY
|
||||
* 3597 - Unknown RRs
|
||||
* 403{3,4,5} - DNSSEC + validation functions
|
||||
* 4255 - SSHFP record
|
||||
* 4343 - Case insensitivity
|
||||
* 4408 - SPF record
|
||||
* 4509 - SHA256 Hash in DS
|
||||
* 4592 - Wildcards in the DNS
|
||||
* 4635 - HMAC SHA TSIG
|
||||
* 4701 - DHCID
|
||||
* 4892 - id.server
|
||||
* 5001 - NSID
|
||||
* 5155 - NSEC3 record
|
||||
* 5205 - HIP record
|
||||
* 5702 - SHA2 in the DNS
|
||||
* 5936 - AXFR
|
||||
* 5966 - TCP implementation recommendations
|
||||
* 6605 - ECDSA
|
||||
* 6725 - IANA Registry Update
|
||||
* 6742 - ILNP DNS
|
||||
* 6840 - Clarifications and Implementation Notes for DNS Security
|
||||
* 6844 - CAA record
|
||||
* 6891 - EDNS0 update
|
||||
* 6895 - DNS IANA considerations
|
||||
* 6975 - Algorithm Understanding in DNSSEC
|
||||
* 7043 - EUI48/EUI64 records
|
||||
* 7314 - DNS (EDNS) EXPIRE Option
|
||||
* 7477 - CSYNC RR
|
||||
* 7828 - edns-tcp-keepalive EDNS0 Option
|
||||
* 7553 - URI record
|
||||
* 7858 - DNS over TLS: Initiation and Performance Considerations
|
||||
* 7871 - EDNS0 Client Subnet
|
||||
* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies)
|
||||
* 8080 - EdDSA for DNSSEC
|
||||
|
||||
## Loosely based upon
|
||||
|
||||
* `ldns`
|
||||
* `NSD`
|
||||
* `Net::DNS`
|
||||
* `GRONG`
|
||||
506
vendor/github.com/miekg/dns/client.go
generated
vendored
506
vendor/github.com/miekg/dns/client.go
generated
vendored
@ -1,506 +0,0 @@
|
||||
package dns
|
||||
|
||||
// A client implementation.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const dnsTimeout time.Duration = 2 * time.Second
|
||||
const tcpIdleTimeout time.Duration = 8 * time.Second
|
||||
|
||||
// A Conn represents a connection to a DNS server.
|
||||
type Conn struct {
|
||||
net.Conn // a net.Conn holding the connection
|
||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
rtt time.Duration
|
||||
t time.Time
|
||||
tsigRequestMAC string
|
||||
}
|
||||
|
||||
// A Client defines parameters for a DNS client.
|
||||
type Client struct {
|
||||
Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
|
||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||
TLSConfig *tls.Config // TLS connection configuration
|
||||
Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more
|
||||
// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
|
||||
// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
|
||||
// Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext)
|
||||
Timeout time.Duration
|
||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
|
||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
|
||||
group singleflight
|
||||
}
|
||||
|
||||
// Exchange performs a synchronous UDP query. It sends the message m to the address
|
||||
// contained in a and waits for a reply. Exchange does not retry a failed query, nor
|
||||
// will it fall back to TCP in case of truncation.
|
||||
// See client.Exchange for more information on setting larger buffer sizes.
|
||||
func Exchange(m *Msg, a string) (r *Msg, err error) {
|
||||
client := Client{Net: "udp"}
|
||||
r, _, err = client.Exchange(m, a)
|
||||
return r, err
|
||||
}
|
||||
|
||||
func (c *Client) dialTimeout() time.Duration {
|
||||
if c.Timeout != 0 {
|
||||
return c.Timeout
|
||||
}
|
||||
if c.DialTimeout != 0 {
|
||||
return c.DialTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
func (c *Client) readTimeout() time.Duration {
|
||||
if c.ReadTimeout != 0 {
|
||||
return c.ReadTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
func (c *Client) writeTimeout() time.Duration {
|
||||
if c.WriteTimeout != 0 {
|
||||
return c.WriteTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
// Dial connects to the address on the named network.
|
||||
func (c *Client) Dial(address string) (conn *Conn, err error) {
|
||||
// create a new dialer with the appropriate timeout
|
||||
var d net.Dialer
|
||||
if c.Dialer == nil {
|
||||
d = net.Dialer{}
|
||||
} else {
|
||||
d = net.Dialer(*c.Dialer)
|
||||
}
|
||||
d.Timeout = c.getTimeoutForRequest(c.writeTimeout())
|
||||
|
||||
network := "udp"
|
||||
useTLS := false
|
||||
|
||||
switch c.Net {
|
||||
case "tcp-tls":
|
||||
network = "tcp"
|
||||
useTLS = true
|
||||
case "tcp4-tls":
|
||||
network = "tcp4"
|
||||
useTLS = true
|
||||
case "tcp6-tls":
|
||||
network = "tcp6"
|
||||
useTLS = true
|
||||
default:
|
||||
if c.Net != "" {
|
||||
network = c.Net
|
||||
}
|
||||
}
|
||||
|
||||
conn = new(Conn)
|
||||
if useTLS {
|
||||
conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
|
||||
} else {
|
||||
conn.Conn, err = d.Dial(network, address)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// Exchange performs a synchronous query. It sends the message m to the address
|
||||
// contained in a and waits for a reply. Basic use pattern with a *dns.Client:
|
||||
//
|
||||
// c := new(dns.Client)
|
||||
// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
|
||||
//
|
||||
// Exchange does not retry a failed query, nor will it fall back to TCP in
|
||||
// case of truncation.
|
||||
// It is up to the caller to create a message that allows for larger responses to be
|
||||
// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
|
||||
// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit
|
||||
// of 512 bytes
|
||||
// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
|
||||
// attribute appropriately
|
||||
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
|
||||
if !c.SingleInflight {
|
||||
return c.exchange(m, address)
|
||||
}
|
||||
|
||||
t := "nop"
|
||||
if t1, ok := TypeToString[m.Question[0].Qtype]; ok {
|
||||
t = t1
|
||||
}
|
||||
cl := "nop"
|
||||
if cl1, ok := ClassToString[m.Question[0].Qclass]; ok {
|
||||
cl = cl1
|
||||
}
|
||||
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
|
||||
return c.exchange(m, address)
|
||||
})
|
||||
if r != nil && shared {
|
||||
r = r.Copy()
|
||||
}
|
||||
return r, rtt, err
|
||||
}
|
||||
|
||||
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
var co *Conn
|
||||
|
||||
co, err = c.Dial(a)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer co.Close()
|
||||
|
||||
opt := m.IsEdns0()
|
||||
// If EDNS0 is used use that for size.
|
||||
if opt != nil && opt.UDPSize() >= MinMsgSize {
|
||||
co.UDPSize = opt.UDPSize()
|
||||
}
|
||||
// Otherwise use the client's configured UDP size.
|
||||
if opt == nil && c.UDPSize >= MinMsgSize {
|
||||
co.UDPSize = c.UDPSize
|
||||
}
|
||||
|
||||
co.TsigSecret = c.TsigSecret
|
||||
// write with the appropriate write timeout
|
||||
co.SetWriteDeadline(time.Now().Add(c.getTimeoutForRequest(c.writeTimeout())))
|
||||
if err = co.WriteMsg(m); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout())))
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
return r, co.rtt, err
|
||||
}
|
||||
|
||||
// ReadMsg reads a message from the connection co.
|
||||
// If the received message contains a TSIG record the transaction signature
|
||||
// is verified. This method always tries to return the message, however if an
|
||||
// error is returned there are no guarantees that the returned message is a
|
||||
// valid representation of the packet read.
|
||||
func (co *Conn) ReadMsg() (*Msg, error) {
|
||||
p, err := co.ReadMsgHeader(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := new(Msg)
|
||||
if err := m.Unpack(p); err != nil {
|
||||
// If an error was returned, we still want to allow the user to use
|
||||
// the message, but naively they can just check err if they don't want
|
||||
// to use an erroneous message
|
||||
return m, err
|
||||
}
|
||||
if t := m.IsTsig(); t != nil {
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
return m, ErrSecret
|
||||
}
|
||||
// Need to work on the original message p, as that was used to calculate the tsig.
|
||||
err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil).
|
||||
// Returns message as a byte slice to be parsed with Msg.Unpack later on.
|
||||
// Note that error handling on the message body is not possible as only the header is parsed.
|
||||
func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
|
||||
var (
|
||||
p []byte
|
||||
n int
|
||||
err error
|
||||
)
|
||||
|
||||
switch t := co.Conn.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
r := t.(io.Reader)
|
||||
|
||||
// First two bytes specify the length of the entire message.
|
||||
l, err := tcpMsgLen(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p = make([]byte, l)
|
||||
n, err = tcpRead(r, p)
|
||||
co.rtt = time.Since(co.t)
|
||||
default:
|
||||
if co.UDPSize > MinMsgSize {
|
||||
p = make([]byte, co.UDPSize)
|
||||
} else {
|
||||
p = make([]byte, MinMsgSize)
|
||||
}
|
||||
n, err = co.Read(p)
|
||||
co.rtt = time.Since(co.t)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if n < headerSize {
|
||||
return nil, ErrShortRead
|
||||
}
|
||||
|
||||
p = p[:n]
|
||||
if hdr != nil {
|
||||
dh, _, err := unpackMsgHdr(p, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
*hdr = dh
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length.
|
||||
func tcpMsgLen(t io.Reader) (int, error) {
|
||||
p := []byte{0, 0}
|
||||
n, err := t.Read(p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// As seen with my local router/switch, returns 1 byte on the above read,
|
||||
// resulting a a ShortRead. Just write it out (instead of loop) and read the
|
||||
// other byte.
|
||||
if n == 1 {
|
||||
n1, err := t.Read(p[1:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n += n1
|
||||
}
|
||||
|
||||
if n != 2 {
|
||||
return 0, ErrShortRead
|
||||
}
|
||||
l := binary.BigEndian.Uint16(p)
|
||||
if l == 0 {
|
||||
return 0, ErrShortRead
|
||||
}
|
||||
return int(l), nil
|
||||
}
|
||||
|
||||
// tcpRead calls TCPConn.Read enough times to fill allocated buffer.
|
||||
func tcpRead(t io.Reader, p []byte) (int, error) {
|
||||
n, err := t.Read(p)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
for n < len(p) {
|
||||
j, err := t.Read(p[n:])
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
n += j
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Read implements the net.Conn read method.
|
||||
func (co *Conn) Read(p []byte) (n int, err error) {
|
||||
if co.Conn == nil {
|
||||
return 0, ErrConnEmpty
|
||||
}
|
||||
if len(p) < 2 {
|
||||
return 0, io.ErrShortBuffer
|
||||
}
|
||||
switch t := co.Conn.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
r := t.(io.Reader)
|
||||
|
||||
l, err := tcpMsgLen(r)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if l > len(p) {
|
||||
return int(l), io.ErrShortBuffer
|
||||
}
|
||||
return tcpRead(r, p[:l])
|
||||
}
|
||||
// UDP connection
|
||||
n, err = co.Conn.Read(p)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteMsg sends a message through the connection co.
|
||||
// If the message m contains a TSIG record the transaction
|
||||
// signature is calculated.
|
||||
func (co *Conn) WriteMsg(m *Msg) (err error) {
|
||||
var out []byte
|
||||
if t := m.IsTsig(); t != nil {
|
||||
mac := ""
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
return ErrSecret
|
||||
}
|
||||
out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
|
||||
// Set for the next read, although only used in zone transfers
|
||||
co.tsigRequestMAC = mac
|
||||
} else {
|
||||
out, err = m.Pack()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
co.t = time.Now()
|
||||
if _, err = co.Write(out); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write implements the net.Conn Write method.
|
||||
func (co *Conn) Write(p []byte) (n int, err error) {
|
||||
switch t := co.Conn.(type) {
|
||||
case *net.TCPConn, *tls.Conn:
|
||||
w := t.(io.Writer)
|
||||
|
||||
lp := len(p)
|
||||
if lp < 2 {
|
||||
return 0, io.ErrShortBuffer
|
||||
}
|
||||
if lp > MaxMsgSize {
|
||||
return 0, &Error{err: "message too large"}
|
||||
}
|
||||
l := make([]byte, 2, lp+2)
|
||||
binary.BigEndian.PutUint16(l, uint16(lp))
|
||||
p = append(l, p...)
|
||||
n, err := io.Copy(w, bytes.NewReader(p))
|
||||
return int(n), err
|
||||
}
|
||||
n, err = co.Conn.Write(p)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Return the appropriate timeout for a specific request
|
||||
func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration {
|
||||
var requestTimeout time.Duration
|
||||
if c.Timeout != 0 {
|
||||
requestTimeout = c.Timeout
|
||||
} else {
|
||||
requestTimeout = timeout
|
||||
}
|
||||
// net.Dialer.Timeout has priority if smaller than the timeouts computed so
|
||||
// far
|
||||
if c.Dialer != nil && c.Dialer.Timeout != 0 {
|
||||
if c.Dialer.Timeout < requestTimeout {
|
||||
requestTimeout = c.Dialer.Timeout
|
||||
}
|
||||
}
|
||||
return requestTimeout
|
||||
}
|
||||
|
||||
// Dial connects to the address on the named network.
|
||||
func Dial(network, address string) (conn *Conn, err error) {
|
||||
conn = new(Conn)
|
||||
conn.Conn, err = net.Dial(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// ExchangeContext performs a synchronous UDP query, like Exchange. It
|
||||
// additionally obeys deadlines from the passed Context.
|
||||
func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
|
||||
client := Client{Net: "udp"}
|
||||
r, _, err = client.ExchangeContext(ctx, m, a)
|
||||
// ignorint rtt to leave the original ExchangeContext API unchanged, but
|
||||
// this function will go away
|
||||
return r, err
|
||||
}
|
||||
|
||||
// ExchangeConn performs a synchronous query. It sends the message m via the connection
|
||||
// c and waits for a reply. The connection c is not closed by ExchangeConn.
|
||||
// This function is going away, but can easily be mimicked:
|
||||
//
|
||||
// co := &dns.Conn{Conn: c} // c is your net.Conn
|
||||
// co.WriteMsg(m)
|
||||
// in, _ := co.ReadMsg()
|
||||
// co.Close()
|
||||
//
|
||||
func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
|
||||
println("dns: ExchangeConn: this function is deprecated")
|
||||
co := new(Conn)
|
||||
co.Conn = c
|
||||
if err = co.WriteMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
// DialTimeout acts like Dial but takes a timeout.
|
||||
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
|
||||
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}}
|
||||
conn, err = client.Dial(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// DialWithTLS connects to the address on the named network with TLS.
|
||||
func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
|
||||
if !strings.HasSuffix(network, "-tls") {
|
||||
network += "-tls"
|
||||
}
|
||||
client := Client{Net: network, TLSConfig: tlsConfig}
|
||||
conn, err = client.Dial(address)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
|
||||
func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
|
||||
if !strings.HasSuffix(network, "-tls") {
|
||||
network += "-tls"
|
||||
}
|
||||
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
|
||||
conn, err = client.Dial(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// ExchangeContext acts like Exchange, but honors the deadline on the provided
|
||||
// context, if present. If there is both a context deadline and a configured
|
||||
// timeout on the client, the earliest of the two takes effect.
|
||||
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
var timeout time.Duration
|
||||
if deadline, ok := ctx.Deadline(); !ok {
|
||||
timeout = 0
|
||||
} else {
|
||||
timeout = deadline.Sub(time.Now())
|
||||
}
|
||||
// not passing the context to the underlying calls, as the API does not support
|
||||
// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
|
||||
c.Dialer = &net.Dialer{Timeout: timeout}
|
||||
return c.Exchange(m, a)
|
||||
}
|
||||
139
vendor/github.com/miekg/dns/clientconfig.go
generated
vendored
139
vendor/github.com/miekg/dns/clientconfig.go
generated
vendored
@ -1,139 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ClientConfig wraps the contents of the /etc/resolv.conf file.
|
||||
type ClientConfig struct {
|
||||
Servers []string // servers to use
|
||||
Search []string // suffixes to append to local name
|
||||
Port string // what port to use
|
||||
Ndots int // number of dots in name to trigger absolute lookup
|
||||
Timeout int // seconds before giving up on packet
|
||||
Attempts int // lost packets before giving up on server, not used in the package dns
|
||||
}
|
||||
|
||||
// ClientConfigFromFile parses a resolv.conf(5) like file and returns
|
||||
// a *ClientConfig.
|
||||
func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
|
||||
file, err := os.Open(resolvconf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
return ClientConfigFromReader(file)
|
||||
}
|
||||
|
||||
// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument
|
||||
func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
|
||||
c := new(ClientConfig)
|
||||
scanner := bufio.NewScanner(resolvconf)
|
||||
c.Servers = make([]string, 0)
|
||||
c.Search = make([]string, 0)
|
||||
c.Port = "53"
|
||||
c.Ndots = 1
|
||||
c.Timeout = 5
|
||||
c.Attempts = 2
|
||||
|
||||
for scanner.Scan() {
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line := scanner.Text()
|
||||
f := strings.Fields(line)
|
||||
if len(f) < 1 {
|
||||
continue
|
||||
}
|
||||
switch f[0] {
|
||||
case "nameserver": // add one name server
|
||||
if len(f) > 1 {
|
||||
// One more check: make sure server name is
|
||||
// just an IP address. Otherwise we need DNS
|
||||
// to look it up.
|
||||
name := f[1]
|
||||
c.Servers = append(c.Servers, name)
|
||||
}
|
||||
|
||||
case "domain": // set search path to just this domain
|
||||
if len(f) > 1 {
|
||||
c.Search = make([]string, 1)
|
||||
c.Search[0] = f[1]
|
||||
} else {
|
||||
c.Search = make([]string, 0)
|
||||
}
|
||||
|
||||
case "search": // set search path to given servers
|
||||
c.Search = make([]string, len(f)-1)
|
||||
for i := 0; i < len(c.Search); i++ {
|
||||
c.Search[i] = f[i+1]
|
||||
}
|
||||
|
||||
case "options": // magic options
|
||||
for i := 1; i < len(f); i++ {
|
||||
s := f[i]
|
||||
switch {
|
||||
case len(s) >= 6 && s[:6] == "ndots:":
|
||||
n, _ := strconv.Atoi(s[6:])
|
||||
if n < 0 {
|
||||
n = 0
|
||||
} else if n > 15 {
|
||||
n = 15
|
||||
}
|
||||
c.Ndots = n
|
||||
case len(s) >= 8 && s[:8] == "timeout:":
|
||||
n, _ := strconv.Atoi(s[8:])
|
||||
if n < 1 {
|
||||
n = 1
|
||||
}
|
||||
c.Timeout = n
|
||||
case len(s) >= 8 && s[:9] == "attempts:":
|
||||
n, _ := strconv.Atoi(s[9:])
|
||||
if n < 1 {
|
||||
n = 1
|
||||
}
|
||||
c.Attempts = n
|
||||
case s == "rotate":
|
||||
/* not imp */
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NameList returns all of the names that should be queried based on the
|
||||
// config. It is based off of go's net/dns name building, but it does not
|
||||
// check the length of the resulting names.
|
||||
func (c *ClientConfig) NameList(name string) []string {
|
||||
// if this domain is already fully qualified, no append needed.
|
||||
if IsFqdn(name) {
|
||||
return []string{name}
|
||||
}
|
||||
|
||||
// Check to see if the name has more labels than Ndots. Do this before making
|
||||
// the domain fully qualified.
|
||||
hasNdots := CountLabel(name) > c.Ndots
|
||||
// Make the domain fully qualified.
|
||||
name = Fqdn(name)
|
||||
|
||||
// Make a list of names based off search.
|
||||
names := []string{}
|
||||
|
||||
// If name has enough dots, try that first.
|
||||
if hasNdots {
|
||||
names = append(names, name)
|
||||
}
|
||||
for _, s := range c.Search {
|
||||
names = append(names, Fqdn(name+s))
|
||||
}
|
||||
// If we didn't have enough dots, try after suffixes.
|
||||
if !hasNdots {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
188
vendor/github.com/miekg/dns/compress_generate.go
generated
vendored
188
vendor/github.com/miekg/dns/compress_generate.go
generated
vendored
@ -1,188 +0,0 @@
|
||||
//+build ignore
|
||||
|
||||
// compression_generate.go is meant to run with go generate. It will use
|
||||
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||
// it will look to see if there are (compressible) names, if so it will add that
|
||||
// type to compressionLenHelperType and comressionLenSearchType which "fake" the
|
||||
// compression so that Len() is fast.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/importer"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var packageHdr = `
|
||||
// Code generated by "go run compress_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
`
|
||||
|
||||
// getTypeStruct will take a type and the package scope, and return the
|
||||
// (innermost) struct if the type is considered a RR type (currently defined as
|
||||
// those structs beginning with a RR_Header, could be redefined as implementing
|
||||
// the RR interface). The bool return value indicates if embedded structs were
|
||||
// resolved.
|
||||
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||
st, ok := t.Underlying().(*types.Struct)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||
return st, false
|
||||
}
|
||||
if st.Field(0).Anonymous() {
|
||||
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||
return st, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Import and type-check the package
|
||||
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||
fatalIfErr(err)
|
||||
scope := pkg.Scope()
|
||||
|
||||
var domainTypes []string // Types that have a domain name in them (either compressible or not).
|
||||
var cdomainTypes []string // Types that have a compressible domain name in them (subset of domainType)
|
||||
Names:
|
||||
for _, name := range scope.Names() {
|
||||
o := scope.Lookup(name)
|
||||
if o == nil || !o.Exported() {
|
||||
continue
|
||||
}
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
if st == nil {
|
||||
continue
|
||||
}
|
||||
if name == "PrivateRR" {
|
||||
continue
|
||||
}
|
||||
|
||||
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
|
||||
log.Fatalf("Constant Type%s does not exist.", o.Name())
|
||||
}
|
||||
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
if st.Tag(i) == `dns:"domain-name"` {
|
||||
domainTypes = append(domainTypes, o.Name())
|
||||
continue Names
|
||||
}
|
||||
if st.Tag(i) == `dns:"cdomain-name"` {
|
||||
cdomainTypes = append(cdomainTypes, o.Name())
|
||||
domainTypes = append(domainTypes, o.Name())
|
||||
continue Names
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"domain-name"`:
|
||||
domainTypes = append(domainTypes, o.Name())
|
||||
continue Names
|
||||
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||
cdomainTypes = append(cdomainTypes, o.Name())
|
||||
domainTypes = append(domainTypes, o.Name())
|
||||
continue Names
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
b.WriteString(packageHdr)
|
||||
|
||||
// compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names
|
||||
|
||||
fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR) {\n")
|
||||
fmt.Fprint(b, "switch x := r.(type) {\n")
|
||||
for _, name := range domainTypes {
|
||||
o := scope.Lookup(name)
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
|
||||
fmt.Fprintf(b, "case *%s:\n", name)
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
out := func(s string) { fmt.Fprintf(b, "compressionLenHelper(c, x.%s)\n", st.Field(i).Name()) }
|
||||
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
switch st.Tag(i) {
|
||||
case `dns:"domain-name"`:
|
||||
fallthrough
|
||||
case `dns:"cdomain-name"`:
|
||||
// For HIP we need to slice over the elements in this slice.
|
||||
fmt.Fprintf(b, `for i := range x.%s {
|
||||
compressionLenHelper(c, x.%s[i])
|
||||
}
|
||||
`, st.Field(i).Name(), st.Field(i).Name())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"domain-name"`:
|
||||
out(st.Field(i).Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(b, "}\n}\n\n")
|
||||
|
||||
// compressionLenSearchType - search cdomain-tags types for compressible names.
|
||||
|
||||
fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool) {\n")
|
||||
fmt.Fprint(b, "switch x := r.(type) {\n")
|
||||
for _, name := range cdomainTypes {
|
||||
o := scope.Lookup(name)
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
|
||||
fmt.Fprintf(b, "case *%s:\n", name)
|
||||
j := 1
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
out := func(s string, j int) {
|
||||
fmt.Fprintf(b, "k%d, ok%d := compressionLenSearch(c, x.%s)\n", j, j, st.Field(i).Name())
|
||||
}
|
||||
|
||||
// There are no slice types with names that can be compressed.
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||
out(st.Field(i).Name(), j)
|
||||
j++
|
||||
}
|
||||
}
|
||||
k := "k1"
|
||||
ok := "ok1"
|
||||
for i := 2; i < j; i++ {
|
||||
k += fmt.Sprintf(" + k%d", i)
|
||||
ok += fmt.Sprintf(" && ok%d", i)
|
||||
}
|
||||
fmt.Fprintf(b, "return %s, %s\n", k, ok)
|
||||
}
|
||||
fmt.Fprintln(b, "}\nreturn 0, false\n}\n\n")
|
||||
|
||||
// gofmt
|
||||
res, err := format.Source(b.Bytes())
|
||||
if err != nil {
|
||||
b.WriteTo(os.Stderr)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Create("zcompress.go")
|
||||
fatalIfErr(err)
|
||||
defer f.Close()
|
||||
f.Write(res)
|
||||
}
|
||||
|
||||
func fatalIfErr(err error) {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
43
vendor/github.com/miekg/dns/dane.go
generated
vendored
43
vendor/github.com/miekg/dns/dane.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/x509"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records.
|
||||
func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
|
||||
switch matchingType {
|
||||
case 0:
|
||||
switch selector {
|
||||
case 0:
|
||||
return hex.EncodeToString(cert.Raw), nil
|
||||
case 1:
|
||||
return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
|
||||
}
|
||||
case 1:
|
||||
h := sha256.New()
|
||||
switch selector {
|
||||
case 0:
|
||||
h.Write(cert.Raw)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
case 1:
|
||||
h.Write(cert.RawSubjectPublicKeyInfo)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
case 2:
|
||||
h := sha512.New()
|
||||
switch selector {
|
||||
case 0:
|
||||
h.Write(cert.Raw)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
case 1:
|
||||
h.Write(cert.RawSubjectPublicKeyInfo)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
}
|
||||
return "", errors.New("dns: bad MatchingType or Selector")
|
||||
}
|
||||
288
vendor/github.com/miekg/dns/defaults.go
generated
vendored
288
vendor/github.com/miekg/dns/defaults.go
generated
vendored
@ -1,288 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const hexDigit = "0123456789abcdef"
|
||||
|
||||
// Everything is assumed in ClassINET.
|
||||
|
||||
// SetReply creates a reply message from a request message.
|
||||
func (dns *Msg) SetReply(request *Msg) *Msg {
|
||||
dns.Id = request.Id
|
||||
dns.Response = true
|
||||
dns.Opcode = request.Opcode
|
||||
if dns.Opcode == OpcodeQuery {
|
||||
dns.RecursionDesired = request.RecursionDesired // Copy rd bit
|
||||
dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit
|
||||
}
|
||||
dns.Rcode = RcodeSuccess
|
||||
if len(request.Question) > 0 {
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = request.Question[0]
|
||||
}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetQuestion creates a question message, it sets the Question
|
||||
// section, generates an Id and sets the RecursionDesired (RD)
|
||||
// bit to true.
|
||||
func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
|
||||
dns.Id = Id()
|
||||
dns.RecursionDesired = true
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = Question{z, t, ClassINET}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetNotify creates a notify message, it sets the Question
|
||||
// section, generates an Id and sets the Authoritative (AA)
|
||||
// bit to true.
|
||||
func (dns *Msg) SetNotify(z string) *Msg {
|
||||
dns.Opcode = OpcodeNotify
|
||||
dns.Authoritative = true
|
||||
dns.Id = Id()
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = Question{z, TypeSOA, ClassINET}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetRcode creates an error message suitable for the request.
|
||||
func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
|
||||
dns.SetReply(request)
|
||||
dns.Rcode = rcode
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetRcodeFormatError creates a message with FormError set.
|
||||
func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
|
||||
dns.Rcode = RcodeFormatError
|
||||
dns.Opcode = OpcodeQuery
|
||||
dns.Response = true
|
||||
dns.Authoritative = false
|
||||
dns.Id = request.Id
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetUpdate makes the message a dynamic update message. It
|
||||
// sets the ZONE section to: z, TypeSOA, ClassINET.
|
||||
func (dns *Msg) SetUpdate(z string) *Msg {
|
||||
dns.Id = Id()
|
||||
dns.Response = false
|
||||
dns.Opcode = OpcodeUpdate
|
||||
dns.Compress = false // BIND9 cannot handle compression
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = Question{z, TypeSOA, ClassINET}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetIxfr creates message for requesting an IXFR.
|
||||
func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg {
|
||||
dns.Id = Id()
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Ns = make([]RR, 1)
|
||||
s := new(SOA)
|
||||
s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
|
||||
s.Serial = serial
|
||||
s.Ns = ns
|
||||
s.Mbox = mbox
|
||||
dns.Question[0] = Question{z, TypeIXFR, ClassINET}
|
||||
dns.Ns[0] = s
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetAxfr creates message for requesting an AXFR.
|
||||
func (dns *Msg) SetAxfr(z string) *Msg {
|
||||
dns.Id = Id()
|
||||
dns.Question = make([]Question, 1)
|
||||
dns.Question[0] = Question{z, TypeAXFR, ClassINET}
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetTsig appends a TSIG RR to the message.
|
||||
// This is only a skeleton TSIG RR that is added as the last RR in the
|
||||
// additional section. The Tsig is calculated when the message is being send.
|
||||
func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg {
|
||||
t := new(TSIG)
|
||||
t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
|
||||
t.Algorithm = algo
|
||||
t.Fudge = fudge
|
||||
t.TimeSigned = uint64(timesigned)
|
||||
t.OrigId = dns.Id
|
||||
dns.Extra = append(dns.Extra, t)
|
||||
return dns
|
||||
}
|
||||
|
||||
// SetEdns0 appends a EDNS0 OPT RR to the message.
|
||||
// TSIG should always the last RR in a message.
|
||||
func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
|
||||
e := new(OPT)
|
||||
e.Hdr.Name = "."
|
||||
e.Hdr.Rrtype = TypeOPT
|
||||
e.SetUDPSize(udpsize)
|
||||
if do {
|
||||
e.SetDo()
|
||||
}
|
||||
dns.Extra = append(dns.Extra, e)
|
||||
return dns
|
||||
}
|
||||
|
||||
// IsTsig checks if the message has a TSIG record as the last record
|
||||
// in the additional section. It returns the TSIG record found or nil.
|
||||
func (dns *Msg) IsTsig() *TSIG {
|
||||
if len(dns.Extra) > 0 {
|
||||
if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
|
||||
return dns.Extra[len(dns.Extra)-1].(*TSIG)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
|
||||
// record in the additional section will do. It returns the OPT record
|
||||
// found or nil.
|
||||
func (dns *Msg) IsEdns0() *OPT {
|
||||
// EDNS0 is at the end of the additional section, start there.
|
||||
// We might want to change this to *only* look at the last two
|
||||
// records. So we see TSIG and/or OPT - this a slightly bigger
|
||||
// change though.
|
||||
for i := len(dns.Extra) - 1; i >= 0; i-- {
|
||||
if dns.Extra[i].Header().Rrtype == TypeOPT {
|
||||
return dns.Extra[i].(*OPT)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDomainName checks if s is a valid domain name, it returns the number of
|
||||
// labels and true, when a domain name is valid. Note that non fully qualified
|
||||
// domain name is considered valid, in this case the last label is counted in
|
||||
// the number of labels. When false is returned the number of labels is not
|
||||
// defined. Also note that this function is extremely liberal; almost any
|
||||
// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
|
||||
// label fits in 63 characters, but there is no length check for the entire
|
||||
// string s. I.e. a domain name longer than 255 characters is considered valid.
|
||||
func IsDomainName(s string) (labels int, ok bool) {
|
||||
_, labels, err := packDomainName(s, nil, 0, nil, false)
|
||||
return labels, err == nil
|
||||
}
|
||||
|
||||
// IsSubDomain checks if child is indeed a child of the parent. If child and parent
|
||||
// are the same domain true is returned as well.
|
||||
func IsSubDomain(parent, child string) bool {
|
||||
// Entire child is contained in parent
|
||||
return CompareDomainName(parent, child) == CountLabel(parent)
|
||||
}
|
||||
|
||||
// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
|
||||
// The checking is performed on the binary payload.
|
||||
func IsMsg(buf []byte) error {
|
||||
// Header
|
||||
if len(buf) < 12 {
|
||||
return errors.New("dns: bad message header")
|
||||
}
|
||||
// Header: Opcode
|
||||
// TODO(miek): more checks here, e.g. check all header bits.
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsFqdn checks if a domain name is fully qualified.
|
||||
func IsFqdn(s string) bool {
|
||||
l := len(s)
|
||||
if l == 0 {
|
||||
return false
|
||||
}
|
||||
return s[l-1] == '.'
|
||||
}
|
||||
|
||||
// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
|
||||
// This means the RRs need to have the same type, name, and class. Returns true
|
||||
// if the RR set is valid, otherwise false.
|
||||
func IsRRset(rrset []RR) bool {
|
||||
if len(rrset) == 0 {
|
||||
return false
|
||||
}
|
||||
if len(rrset) == 1 {
|
||||
return true
|
||||
}
|
||||
rrHeader := rrset[0].Header()
|
||||
rrType := rrHeader.Rrtype
|
||||
rrClass := rrHeader.Class
|
||||
rrName := rrHeader.Name
|
||||
|
||||
for _, rr := range rrset[1:] {
|
||||
curRRHeader := rr.Header()
|
||||
if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
|
||||
// Mismatch between the records, so this is not a valid rrset for
|
||||
//signing/verifying
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Fqdn return the fully qualified domain name from s.
|
||||
// If s is already fully qualified, it behaves as the identity function.
|
||||
func Fqdn(s string) string {
|
||||
if IsFqdn(s) {
|
||||
return s
|
||||
}
|
||||
return s + "."
|
||||
}
|
||||
|
||||
// Copied from the official Go code.
|
||||
|
||||
// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
|
||||
// address suitable for reverse DNS (PTR) record lookups or an error if it fails
|
||||
// to parse the IP address.
|
||||
func ReverseAddr(addr string) (arpa string, err error) {
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil {
|
||||
return "", &Error{err: "unrecognized address: " + addr}
|
||||
}
|
||||
if ip.To4() != nil {
|
||||
return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." +
|
||||
strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil
|
||||
}
|
||||
// Must be IPv6
|
||||
buf := make([]byte, 0, len(ip)*4+len("ip6.arpa."))
|
||||
// Add it, in reverse, to the buffer
|
||||
for i := len(ip) - 1; i >= 0; i-- {
|
||||
v := ip[i]
|
||||
buf = append(buf, hexDigit[v&0xF])
|
||||
buf = append(buf, '.')
|
||||
buf = append(buf, hexDigit[v>>4])
|
||||
buf = append(buf, '.')
|
||||
}
|
||||
// Append "ip6.arpa." and return (buf already has the final .)
|
||||
buf = append(buf, "ip6.arpa."...)
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// String returns the string representation for the type t.
|
||||
func (t Type) String() string {
|
||||
if t1, ok := TypeToString[uint16(t)]; ok {
|
||||
return t1
|
||||
}
|
||||
return "TYPE" + strconv.Itoa(int(t))
|
||||
}
|
||||
|
||||
// String returns the string representation for the class c.
|
||||
func (c Class) String() string {
|
||||
if s, ok := ClassToString[uint16(c)]; ok {
|
||||
// Only emit mnemonics when they are unambiguous, specically ANY is in both.
|
||||
if _, ok := StringToType[s]; !ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return "CLASS" + strconv.Itoa(int(c))
|
||||
}
|
||||
|
||||
// String returns the string representation for the name n.
|
||||
func (n Name) String() string {
|
||||
return sprintName(string(n))
|
||||
}
|
||||
107
vendor/github.com/miekg/dns/dns.go
generated
vendored
107
vendor/github.com/miekg/dns/dns.go
generated
vendored
@ -1,107 +0,0 @@
|
||||
package dns
|
||||
|
||||
import "strconv"
|
||||
|
||||
const (
|
||||
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
|
||||
defaultTtl = 3600 // Default internal TTL.
|
||||
|
||||
// DefaultMsgSize is the standard default for messages larger than 512 bytes.
|
||||
DefaultMsgSize = 4096
|
||||
// MinMsgSize is the minimal size of a DNS packet.
|
||||
MinMsgSize = 512
|
||||
// MaxMsgSize is the largest possible DNS packet.
|
||||
MaxMsgSize = 65535
|
||||
)
|
||||
|
||||
// Error represents a DNS error.
|
||||
type Error struct{ err string }
|
||||
|
||||
func (e *Error) Error() string {
|
||||
if e == nil {
|
||||
return "dns: <nil>"
|
||||
}
|
||||
return "dns: " + e.err
|
||||
}
|
||||
|
||||
// An RR represents a resource record.
|
||||
type RR interface {
|
||||
// Header returns the header of an resource record. The header contains
|
||||
// everything up to the rdata.
|
||||
Header() *RR_Header
|
||||
// String returns the text representation of the resource record.
|
||||
String() string
|
||||
|
||||
// copy returns a copy of the RR
|
||||
copy() RR
|
||||
// len returns the length (in octets) of the uncompressed RR in wire format.
|
||||
len() int
|
||||
// pack packs an RR into wire format.
|
||||
pack([]byte, int, map[string]int, bool) (int, error)
|
||||
}
|
||||
|
||||
// RR_Header is the header all DNS resource records share.
|
||||
type RR_Header struct {
|
||||
Name string `dns:"cdomain-name"`
|
||||
Rrtype uint16
|
||||
Class uint16
|
||||
Ttl uint32
|
||||
Rdlength uint16 // Length of data after header.
|
||||
}
|
||||
|
||||
// Header returns itself. This is here to make RR_Header implements the RR interface.
|
||||
func (h *RR_Header) Header() *RR_Header { return h }
|
||||
|
||||
// Just to implement the RR interface.
|
||||
func (h *RR_Header) copy() RR { return nil }
|
||||
|
||||
func (h *RR_Header) copyHeader() *RR_Header {
|
||||
r := new(RR_Header)
|
||||
r.Name = h.Name
|
||||
r.Rrtype = h.Rrtype
|
||||
r.Class = h.Class
|
||||
r.Ttl = h.Ttl
|
||||
r.Rdlength = h.Rdlength
|
||||
return r
|
||||
}
|
||||
|
||||
func (h *RR_Header) String() string {
|
||||
var s string
|
||||
|
||||
if h.Rrtype == TypeOPT {
|
||||
s = ";"
|
||||
// and maybe other things
|
||||
}
|
||||
|
||||
s += sprintName(h.Name) + "\t"
|
||||
s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
|
||||
s += Class(h.Class).String() + "\t"
|
||||
s += Type(h.Rrtype).String() + "\t"
|
||||
return s
|
||||
}
|
||||
|
||||
func (h *RR_Header) len() int {
|
||||
l := len(h.Name) + 1
|
||||
l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
|
||||
return l
|
||||
}
|
||||
|
||||
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
|
||||
func (rr *RFC3597) ToRFC3597(r RR) error {
|
||||
buf := make([]byte, r.len()*2)
|
||||
off, err := PackRR(r, buf, 0, nil, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:off]
|
||||
if int(r.Header().Rdlength) > off {
|
||||
return ErrBuf
|
||||
}
|
||||
|
||||
rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*rr = *rfc3597.(*RFC3597)
|
||||
return nil
|
||||
}
|
||||
784
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
784
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
@ -1,784 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
_ "crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
_ "crypto/sha1"
|
||||
_ "crypto/sha256"
|
||||
_ "crypto/sha512"
|
||||
"encoding/asn1"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// DNSSEC encryption algorithm codes.
|
||||
const (
|
||||
_ uint8 = iota
|
||||
RSAMD5
|
||||
DH
|
||||
DSA
|
||||
_ // Skip 4, RFC 6725, section 2.1
|
||||
RSASHA1
|
||||
DSANSEC3SHA1
|
||||
RSASHA1NSEC3SHA1
|
||||
RSASHA256
|
||||
_ // Skip 9, RFC 6725, section 2.1
|
||||
RSASHA512
|
||||
_ // Skip 11, RFC 6725, section 2.1
|
||||
ECCGOST
|
||||
ECDSAP256SHA256
|
||||
ECDSAP384SHA384
|
||||
ED25519
|
||||
ED448
|
||||
INDIRECT uint8 = 252
|
||||
PRIVATEDNS uint8 = 253 // Private (experimental keys)
|
||||
PRIVATEOID uint8 = 254
|
||||
)
|
||||
|
||||
// AlgorithmToString is a map of algorithm IDs to algorithm names.
|
||||
var AlgorithmToString = map[uint8]string{
|
||||
RSAMD5: "RSAMD5",
|
||||
DH: "DH",
|
||||
DSA: "DSA",
|
||||
RSASHA1: "RSASHA1",
|
||||
DSANSEC3SHA1: "DSA-NSEC3-SHA1",
|
||||
RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1",
|
||||
RSASHA256: "RSASHA256",
|
||||
RSASHA512: "RSASHA512",
|
||||
ECCGOST: "ECC-GOST",
|
||||
ECDSAP256SHA256: "ECDSAP256SHA256",
|
||||
ECDSAP384SHA384: "ECDSAP384SHA384",
|
||||
ED25519: "ED25519",
|
||||
ED448: "ED448",
|
||||
INDIRECT: "INDIRECT",
|
||||
PRIVATEDNS: "PRIVATEDNS",
|
||||
PRIVATEOID: "PRIVATEOID",
|
||||
}
|
||||
|
||||
// StringToAlgorithm is the reverse of AlgorithmToString.
|
||||
var StringToAlgorithm = reverseInt8(AlgorithmToString)
|
||||
|
||||
// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
|
||||
var AlgorithmToHash = map[uint8]crypto.Hash{
|
||||
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
|
||||
RSASHA1: crypto.SHA1,
|
||||
RSASHA1NSEC3SHA1: crypto.SHA1,
|
||||
RSASHA256: crypto.SHA256,
|
||||
ECDSAP256SHA256: crypto.SHA256,
|
||||
ECDSAP384SHA384: crypto.SHA384,
|
||||
RSASHA512: crypto.SHA512,
|
||||
ED25519: crypto.Hash(0),
|
||||
}
|
||||
|
||||
// DNSSEC hashing algorithm codes.
|
||||
const (
|
||||
_ uint8 = iota
|
||||
SHA1 // RFC 4034
|
||||
SHA256 // RFC 4509
|
||||
GOST94 // RFC 5933
|
||||
SHA384 // Experimental
|
||||
SHA512 // Experimental
|
||||
)
|
||||
|
||||
// HashToString is a map of hash IDs to names.
|
||||
var HashToString = map[uint8]string{
|
||||
SHA1: "SHA1",
|
||||
SHA256: "SHA256",
|
||||
GOST94: "GOST94",
|
||||
SHA384: "SHA384",
|
||||
SHA512: "SHA512",
|
||||
}
|
||||
|
||||
// StringToHash is a map of names to hash IDs.
|
||||
var StringToHash = reverseInt8(HashToString)
|
||||
|
||||
// DNSKEY flag values.
|
||||
const (
|
||||
SEP = 1
|
||||
REVOKE = 1 << 7
|
||||
ZONE = 1 << 8
|
||||
)
|
||||
|
||||
// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing.
|
||||
type rrsigWireFmt struct {
|
||||
TypeCovered uint16
|
||||
Algorithm uint8
|
||||
Labels uint8
|
||||
OrigTtl uint32
|
||||
Expiration uint32
|
||||
Inception uint32
|
||||
KeyTag uint16
|
||||
SignerName string `dns:"domain-name"`
|
||||
/* No Signature */
|
||||
}
|
||||
|
||||
// Used for converting DNSKEY's rdata to wirefmt.
|
||||
type dnskeyWireFmt struct {
|
||||
Flags uint16
|
||||
Protocol uint8
|
||||
Algorithm uint8
|
||||
PublicKey string `dns:"base64"`
|
||||
/* Nothing is left out */
|
||||
}
|
||||
|
||||
func divRoundUp(a, b int) int {
|
||||
return (a + b - 1) / b
|
||||
}
|
||||
|
||||
// KeyTag calculates the keytag (or key-id) of the DNSKEY.
|
||||
func (k *DNSKEY) KeyTag() uint16 {
|
||||
if k == nil {
|
||||
return 0
|
||||
}
|
||||
var keytag int
|
||||
switch k.Algorithm {
|
||||
case RSAMD5:
|
||||
// Look at the bottom two bytes of the modules, which the last
|
||||
// item in the pubkey. We could do this faster by looking directly
|
||||
// at the base64 values. But I'm lazy.
|
||||
modulus, _ := fromBase64([]byte(k.PublicKey))
|
||||
if len(modulus) > 1 {
|
||||
x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
|
||||
keytag = int(x)
|
||||
}
|
||||
default:
|
||||
keywire := new(dnskeyWireFmt)
|
||||
keywire.Flags = k.Flags
|
||||
keywire.Protocol = k.Protocol
|
||||
keywire.Algorithm = k.Algorithm
|
||||
keywire.PublicKey = k.PublicKey
|
||||
wire := make([]byte, DefaultMsgSize)
|
||||
n, err := packKeyWire(keywire, wire)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
wire = wire[:n]
|
||||
for i, v := range wire {
|
||||
if i&1 != 0 {
|
||||
keytag += int(v) // must be larger than uint32
|
||||
} else {
|
||||
keytag += int(v) << 8
|
||||
}
|
||||
}
|
||||
keytag += (keytag >> 16) & 0xFFFF
|
||||
keytag &= 0xFFFF
|
||||
}
|
||||
return uint16(keytag)
|
||||
}
|
||||
|
||||
// ToDS converts a DNSKEY record to a DS record.
|
||||
func (k *DNSKEY) ToDS(h uint8) *DS {
|
||||
if k == nil {
|
||||
return nil
|
||||
}
|
||||
ds := new(DS)
|
||||
ds.Hdr.Name = k.Hdr.Name
|
||||
ds.Hdr.Class = k.Hdr.Class
|
||||
ds.Hdr.Rrtype = TypeDS
|
||||
ds.Hdr.Ttl = k.Hdr.Ttl
|
||||
ds.Algorithm = k.Algorithm
|
||||
ds.DigestType = h
|
||||
ds.KeyTag = k.KeyTag()
|
||||
|
||||
keywire := new(dnskeyWireFmt)
|
||||
keywire.Flags = k.Flags
|
||||
keywire.Protocol = k.Protocol
|
||||
keywire.Algorithm = k.Algorithm
|
||||
keywire.PublicKey = k.PublicKey
|
||||
wire := make([]byte, DefaultMsgSize)
|
||||
n, err := packKeyWire(keywire, wire)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
wire = wire[:n]
|
||||
|
||||
owner := make([]byte, 255)
|
||||
off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false)
|
||||
if err1 != nil {
|
||||
return nil
|
||||
}
|
||||
owner = owner[:off]
|
||||
// RFC4034:
|
||||
// digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA);
|
||||
// "|" denotes concatenation
|
||||
// DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
|
||||
|
||||
var hash crypto.Hash
|
||||
switch h {
|
||||
case SHA1:
|
||||
hash = crypto.SHA1
|
||||
case SHA256:
|
||||
hash = crypto.SHA256
|
||||
case SHA384:
|
||||
hash = crypto.SHA384
|
||||
case SHA512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
s := hash.New()
|
||||
s.Write(owner)
|
||||
s.Write(wire)
|
||||
ds.Digest = hex.EncodeToString(s.Sum(nil))
|
||||
return ds
|
||||
}
|
||||
|
||||
// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
|
||||
func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
|
||||
c := &CDNSKEY{DNSKEY: *k}
|
||||
c.Hdr = *k.Hdr.copyHeader()
|
||||
c.Hdr.Rrtype = TypeCDNSKEY
|
||||
return c
|
||||
}
|
||||
|
||||
// ToCDS converts a DS record to a CDS record.
|
||||
func (d *DS) ToCDS() *CDS {
|
||||
c := &CDS{DS: *d}
|
||||
c.Hdr = *d.Hdr.copyHeader()
|
||||
c.Hdr.Rrtype = TypeCDS
|
||||
return c
|
||||
}
|
||||
|
||||
// Sign signs an RRSet. The signature needs to be filled in with the values:
|
||||
// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied
|
||||
// from the RRset. Sign returns a non-nill error when the signing went OK.
|
||||
// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non
|
||||
// zero, it is used as-is, otherwise the TTL of the RRset is used as the
|
||||
// OrigTTL.
|
||||
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
||||
if k == nil {
|
||||
return ErrPrivKey
|
||||
}
|
||||
// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
|
||||
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
rr.Hdr.Rrtype = TypeRRSIG
|
||||
rr.Hdr.Name = rrset[0].Header().Name
|
||||
rr.Hdr.Class = rrset[0].Header().Class
|
||||
if rr.OrigTtl == 0 { // If set don't override
|
||||
rr.OrigTtl = rrset[0].Header().Ttl
|
||||
}
|
||||
rr.TypeCovered = rrset[0].Header().Rrtype
|
||||
rr.Labels = uint8(CountLabel(rrset[0].Header().Name))
|
||||
|
||||
if strings.HasPrefix(rrset[0].Header().Name, "*") {
|
||||
rr.Labels-- // wildcard, remove from label count
|
||||
}
|
||||
|
||||
sigwire := new(rrsigWireFmt)
|
||||
sigwire.TypeCovered = rr.TypeCovered
|
||||
sigwire.Algorithm = rr.Algorithm
|
||||
sigwire.Labels = rr.Labels
|
||||
sigwire.OrigTtl = rr.OrigTtl
|
||||
sigwire.Expiration = rr.Expiration
|
||||
sigwire.Inception = rr.Inception
|
||||
sigwire.KeyTag = rr.KeyTag
|
||||
// For signing, lowercase this name
|
||||
sigwire.SignerName = strings.ToLower(rr.SignerName)
|
||||
|
||||
// Create the desired binary blob
|
||||
signdata := make([]byte, DefaultMsgSize)
|
||||
n, err := packSigWire(sigwire, signdata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signdata = signdata[:n]
|
||||
wire, err := rawSignatureData(rrset, rr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, ok := AlgorithmToHash[rr.Algorithm]
|
||||
if !ok {
|
||||
return ErrAlg
|
||||
}
|
||||
|
||||
switch rr.Algorithm {
|
||||
case ED25519:
|
||||
// ed25519 signs the raw message and performs hashing internally.
|
||||
// All other supported signature schemes operate over the pre-hashed
|
||||
// message, and thus ed25519 must be handled separately here.
|
||||
//
|
||||
// The raw message is passed directly into sign and crypto.Hash(0) is
|
||||
// used to signal to the crypto.Signer that the data has not been hashed.
|
||||
signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
default:
|
||||
h := hash.New()
|
||||
h.Write(signdata)
|
||||
h.Write(wire)
|
||||
|
||||
signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) {
|
||||
signature, err := k.Sign(rand.Reader, hashed, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch alg {
|
||||
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
|
||||
return signature, nil
|
||||
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
ecdsaSignature := &struct {
|
||||
R, S *big.Int
|
||||
}{}
|
||||
if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var intlen int
|
||||
switch alg {
|
||||
case ECDSAP256SHA256:
|
||||
intlen = 32
|
||||
case ECDSAP384SHA384:
|
||||
intlen = 48
|
||||
}
|
||||
|
||||
signature := intToBytes(ecdsaSignature.R, intlen)
|
||||
signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...)
|
||||
return signature, nil
|
||||
|
||||
// There is no defined interface for what a DSA backed crypto.Signer returns
|
||||
case DSA, DSANSEC3SHA1:
|
||||
// t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
|
||||
// signature := []byte{byte(t)}
|
||||
// signature = append(signature, intToBytes(r1, 20)...)
|
||||
// signature = append(signature, intToBytes(s1, 20)...)
|
||||
// rr.Signature = signature
|
||||
|
||||
case ED25519:
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
return nil, ErrAlg
|
||||
}
|
||||
|
||||
// Verify validates an RRSet with the signature and key. This is only the
|
||||
// cryptographic test, the signature validity period must be checked separately.
|
||||
// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
|
||||
func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
||||
// First the easy checks
|
||||
if !IsRRset(rrset) {
|
||||
return ErrRRset
|
||||
}
|
||||
if rr.KeyTag != k.KeyTag() {
|
||||
return ErrKey
|
||||
}
|
||||
if rr.Hdr.Class != k.Hdr.Class {
|
||||
return ErrKey
|
||||
}
|
||||
if rr.Algorithm != k.Algorithm {
|
||||
return ErrKey
|
||||
}
|
||||
if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) {
|
||||
return ErrKey
|
||||
}
|
||||
if k.Protocol != 3 {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
// IsRRset checked that we have at least one RR and that the RRs in
|
||||
// the set have consistent type, class, and name. Also check that type and
|
||||
// class matches the RRSIG record.
|
||||
if rrset[0].Header().Class != rr.Hdr.Class {
|
||||
return ErrRRset
|
||||
}
|
||||
if rrset[0].Header().Rrtype != rr.TypeCovered {
|
||||
return ErrRRset
|
||||
}
|
||||
|
||||
// RFC 4035 5.3.2. Reconstructing the Signed Data
|
||||
// Copy the sig, except the rrsig data
|
||||
sigwire := new(rrsigWireFmt)
|
||||
sigwire.TypeCovered = rr.TypeCovered
|
||||
sigwire.Algorithm = rr.Algorithm
|
||||
sigwire.Labels = rr.Labels
|
||||
sigwire.OrigTtl = rr.OrigTtl
|
||||
sigwire.Expiration = rr.Expiration
|
||||
sigwire.Inception = rr.Inception
|
||||
sigwire.KeyTag = rr.KeyTag
|
||||
sigwire.SignerName = strings.ToLower(rr.SignerName)
|
||||
// Create the desired binary blob
|
||||
signeddata := make([]byte, DefaultMsgSize)
|
||||
n, err := packSigWire(sigwire, signeddata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signeddata = signeddata[:n]
|
||||
wire, err := rawSignatureData(rrset, rr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sigbuf := rr.sigBuf() // Get the binary signature data
|
||||
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
|
||||
// TODO(miek)
|
||||
// remove the domain name and assume its ours?
|
||||
}
|
||||
|
||||
hash, ok := AlgorithmToHash[rr.Algorithm]
|
||||
if !ok {
|
||||
return ErrAlg
|
||||
}
|
||||
|
||||
switch rr.Algorithm {
|
||||
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5:
|
||||
// TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
|
||||
pubkey := k.publicKeyRSA() // Get the key
|
||||
if pubkey == nil {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
h := hash.New()
|
||||
h.Write(signeddata)
|
||||
h.Write(wire)
|
||||
return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
|
||||
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
pubkey := k.publicKeyECDSA()
|
||||
if pubkey == nil {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
// Split sigbuf into the r and s coordinates
|
||||
r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2])
|
||||
s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:])
|
||||
|
||||
h := hash.New()
|
||||
h.Write(signeddata)
|
||||
h.Write(wire)
|
||||
if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
|
||||
return nil
|
||||
}
|
||||
return ErrSig
|
||||
|
||||
case ED25519:
|
||||
pubkey := k.publicKeyED25519()
|
||||
if pubkey == nil {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) {
|
||||
return nil
|
||||
}
|
||||
return ErrSig
|
||||
|
||||
default:
|
||||
return ErrAlg
|
||||
}
|
||||
}
|
||||
|
||||
// ValidityPeriod uses RFC1982 serial arithmetic to calculate
|
||||
// if a signature period is valid. If t is the zero time, the
|
||||
// current time is taken other t is. Returns true if the signature
|
||||
// is valid at the given time, otherwise returns false.
|
||||
func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
|
||||
var utc int64
|
||||
if t.IsZero() {
|
||||
utc = time.Now().UTC().Unix()
|
||||
} else {
|
||||
utc = t.UTC().Unix()
|
||||
}
|
||||
modi := (int64(rr.Inception) - utc) / year68
|
||||
mode := (int64(rr.Expiration) - utc) / year68
|
||||
ti := int64(rr.Inception) + (modi * year68)
|
||||
te := int64(rr.Expiration) + (mode * year68)
|
||||
return ti <= utc && utc <= te
|
||||
}
|
||||
|
||||
// Return the signatures base64 encodedig sigdata as a byte slice.
|
||||
func (rr *RRSIG) sigBuf() []byte {
|
||||
sigbuf, err := fromBase64([]byte(rr.Signature))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return sigbuf
|
||||
}
|
||||
|
||||
// publicKeyRSA returns the RSA public key from a DNSKEY record.
|
||||
func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RFC 2537/3110, section 2. RSA Public KEY Resource Records
|
||||
// Length is in the 0th byte, unless its zero, then it
|
||||
// it in bytes 1 and 2 and its a 16 bit number
|
||||
explen := uint16(keybuf[0])
|
||||
keyoff := 1
|
||||
if explen == 0 {
|
||||
explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
|
||||
keyoff = 3
|
||||
}
|
||||
pubkey := new(rsa.PublicKey)
|
||||
|
||||
pubkey.N = big.NewInt(0)
|
||||
shift := uint64((explen - 1) * 8)
|
||||
expo := uint64(0)
|
||||
for i := int(explen - 1); i > 0; i-- {
|
||||
expo += uint64(keybuf[keyoff+i]) << shift
|
||||
shift -= 8
|
||||
}
|
||||
// Remainder
|
||||
expo += uint64(keybuf[keyoff])
|
||||
if expo > (2<<31)+1 {
|
||||
// Larger expo than supported.
|
||||
// println("dns: F5 primes (or larger) are not supported")
|
||||
return nil
|
||||
}
|
||||
pubkey.E = int(expo)
|
||||
|
||||
pubkey.N.SetBytes(keybuf[keyoff+int(explen):])
|
||||
return pubkey
|
||||
}
|
||||
|
||||
// publicKeyECDSA returns the Curve public key from the DNSKEY record.
|
||||
func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
pubkey := new(ecdsa.PublicKey)
|
||||
switch k.Algorithm {
|
||||
case ECDSAP256SHA256:
|
||||
pubkey.Curve = elliptic.P256()
|
||||
if len(keybuf) != 64 {
|
||||
// wrongly encoded key
|
||||
return nil
|
||||
}
|
||||
case ECDSAP384SHA384:
|
||||
pubkey.Curve = elliptic.P384()
|
||||
if len(keybuf) != 96 {
|
||||
// Wrongly encoded key
|
||||
return nil
|
||||
}
|
||||
}
|
||||
pubkey.X = big.NewInt(0)
|
||||
pubkey.X.SetBytes(keybuf[:len(keybuf)/2])
|
||||
pubkey.Y = big.NewInt(0)
|
||||
pubkey.Y.SetBytes(keybuf[len(keybuf)/2:])
|
||||
return pubkey
|
||||
}
|
||||
|
||||
func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(keybuf) < 22 {
|
||||
return nil
|
||||
}
|
||||
t, keybuf := int(keybuf[0]), keybuf[1:]
|
||||
size := 64 + t*8
|
||||
q, keybuf := keybuf[:20], keybuf[20:]
|
||||
if len(keybuf) != 3*size {
|
||||
return nil
|
||||
}
|
||||
p, keybuf := keybuf[:size], keybuf[size:]
|
||||
g, y := keybuf[:size], keybuf[size:]
|
||||
pubkey := new(dsa.PublicKey)
|
||||
pubkey.Parameters.Q = big.NewInt(0).SetBytes(q)
|
||||
pubkey.Parameters.P = big.NewInt(0).SetBytes(p)
|
||||
pubkey.Parameters.G = big.NewInt(0).SetBytes(g)
|
||||
pubkey.Y = big.NewInt(0).SetBytes(y)
|
||||
return pubkey
|
||||
}
|
||||
|
||||
func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(keybuf) != ed25519.PublicKeySize {
|
||||
return nil
|
||||
}
|
||||
return keybuf
|
||||
}
|
||||
|
||||
type wireSlice [][]byte
|
||||
|
||||
func (p wireSlice) Len() int { return len(p) }
|
||||
func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p wireSlice) Less(i, j int) bool {
|
||||
_, ioff, _ := UnpackDomainName(p[i], 0)
|
||||
_, joff, _ := UnpackDomainName(p[j], 0)
|
||||
return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0
|
||||
}
|
||||
|
||||
// Return the raw signature data.
|
||||
func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
|
||||
wires := make(wireSlice, len(rrset))
|
||||
for i, r := range rrset {
|
||||
r1 := r.copy()
|
||||
r1.Header().Ttl = s.OrigTtl
|
||||
labels := SplitDomainName(r1.Header().Name)
|
||||
// 6.2. Canonical RR Form. (4) - wildcards
|
||||
if len(labels) > int(s.Labels) {
|
||||
// Wildcard
|
||||
r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
|
||||
}
|
||||
// RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
|
||||
r1.Header().Name = strings.ToLower(r1.Header().Name)
|
||||
// 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
|
||||
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
|
||||
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
|
||||
// SRV, DNAME, A6
|
||||
//
|
||||
// RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC):
|
||||
// Section 6.2 of [RFC4034] also erroneously lists HINFO as a record
|
||||
// that needs conversion to lowercase, and twice at that. Since HINFO
|
||||
// records contain no domain names, they are not subject to case
|
||||
// conversion.
|
||||
switch x := r1.(type) {
|
||||
case *NS:
|
||||
x.Ns = strings.ToLower(x.Ns)
|
||||
case *MD:
|
||||
x.Md = strings.ToLower(x.Md)
|
||||
case *MF:
|
||||
x.Mf = strings.ToLower(x.Mf)
|
||||
case *CNAME:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
case *SOA:
|
||||
x.Ns = strings.ToLower(x.Ns)
|
||||
x.Mbox = strings.ToLower(x.Mbox)
|
||||
case *MB:
|
||||
x.Mb = strings.ToLower(x.Mb)
|
||||
case *MG:
|
||||
x.Mg = strings.ToLower(x.Mg)
|
||||
case *MR:
|
||||
x.Mr = strings.ToLower(x.Mr)
|
||||
case *PTR:
|
||||
x.Ptr = strings.ToLower(x.Ptr)
|
||||
case *MINFO:
|
||||
x.Rmail = strings.ToLower(x.Rmail)
|
||||
x.Email = strings.ToLower(x.Email)
|
||||
case *MX:
|
||||
x.Mx = strings.ToLower(x.Mx)
|
||||
case *RP:
|
||||
x.Mbox = strings.ToLower(x.Mbox)
|
||||
x.Txt = strings.ToLower(x.Txt)
|
||||
case *AFSDB:
|
||||
x.Hostname = strings.ToLower(x.Hostname)
|
||||
case *RT:
|
||||
x.Host = strings.ToLower(x.Host)
|
||||
case *SIG:
|
||||
x.SignerName = strings.ToLower(x.SignerName)
|
||||
case *PX:
|
||||
x.Map822 = strings.ToLower(x.Map822)
|
||||
x.Mapx400 = strings.ToLower(x.Mapx400)
|
||||
case *NAPTR:
|
||||
x.Replacement = strings.ToLower(x.Replacement)
|
||||
case *KX:
|
||||
x.Exchanger = strings.ToLower(x.Exchanger)
|
||||
case *SRV:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
case *DNAME:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
}
|
||||
// 6.2. Canonical RR Form. (5) - origTTL
|
||||
wire := make([]byte, r1.len()+1) // +1 to be safe(r)
|
||||
off, err1 := PackRR(r1, wire, 0, nil, false)
|
||||
if err1 != nil {
|
||||
return nil, err1
|
||||
}
|
||||
wire = wire[:off]
|
||||
wires[i] = wire
|
||||
}
|
||||
sort.Sort(wires)
|
||||
for i, wire := range wires {
|
||||
if i > 0 && bytes.Equal(wire, wires[i-1]) {
|
||||
continue
|
||||
}
|
||||
buf = append(buf, wire...)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) {
|
||||
// copied from zmsg.go RRSIG packing
|
||||
off, err := packUint16(sw.TypeCovered, msg, 0)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint8(sw.Algorithm, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint8(sw.Labels, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint32(sw.OrigTtl, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint32(sw.Expiration, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint32(sw.Inception, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint16(sw.KeyTag, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = PackDomainName(sw.SignerName, msg, off, nil, false)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) {
|
||||
// copied from zmsg.go DNSKEY packing
|
||||
off, err := packUint16(dw.Flags, msg, 0)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint8(dw.Protocol, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint8(dw.Algorithm, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packStringBase64(dw.PublicKey, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
178
vendor/github.com/miekg/dns/dnssec_keygen.go
generated
vendored
178
vendor/github.com/miekg/dns/dnssec_keygen.go
generated
vendored
@ -1,178 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// Generate generates a DNSKEY of the given bit size.
|
||||
// The public part is put inside the DNSKEY record.
|
||||
// The Algorithm in the key must be set as this will define
|
||||
// what kind of DNSKEY will be generated.
|
||||
// The ECDSA algorithms imply a fixed keysize, in that case
|
||||
// bits should be set to the size of the algorithm.
|
||||
func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
|
||||
switch k.Algorithm {
|
||||
case DSA, DSANSEC3SHA1:
|
||||
if bits != 1024 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
|
||||
if bits < 512 || bits > 4096 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case RSASHA512:
|
||||
if bits < 1024 || bits > 4096 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case ECDSAP256SHA256:
|
||||
if bits != 256 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case ECDSAP384SHA384:
|
||||
if bits != 384 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case ED25519:
|
||||
if bits != 256 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
}
|
||||
|
||||
switch k.Algorithm {
|
||||
case DSA, DSANSEC3SHA1:
|
||||
params := new(dsa.Parameters)
|
||||
if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
priv := new(dsa.PrivateKey)
|
||||
priv.PublicKey.Parameters = *params
|
||||
err := dsa.GenerateKey(priv, rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y)
|
||||
return priv, nil
|
||||
case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
|
||||
priv, err := rsa.GenerateKey(rand.Reader, bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
|
||||
return priv, nil
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
var c elliptic.Curve
|
||||
switch k.Algorithm {
|
||||
case ECDSAP256SHA256:
|
||||
c = elliptic.P256()
|
||||
case ECDSAP384SHA384:
|
||||
c = elliptic.P384()
|
||||
}
|
||||
priv, err := ecdsa.GenerateKey(c, rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
|
||||
return priv, nil
|
||||
case ED25519:
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyED25519(pub)
|
||||
return priv, nil
|
||||
default:
|
||||
return nil, ErrAlg
|
||||
}
|
||||
}
|
||||
|
||||
// Set the public key (the value E and N)
|
||||
func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool {
|
||||
if _E == 0 || _N == nil {
|
||||
return false
|
||||
}
|
||||
buf := exponentToBuf(_E)
|
||||
buf = append(buf, _N.Bytes()...)
|
||||
k.PublicKey = toBase64(buf)
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key for Elliptic Curves
|
||||
func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool {
|
||||
if _X == nil || _Y == nil {
|
||||
return false
|
||||
}
|
||||
var intlen int
|
||||
switch k.Algorithm {
|
||||
case ECDSAP256SHA256:
|
||||
intlen = 32
|
||||
case ECDSAP384SHA384:
|
||||
intlen = 48
|
||||
}
|
||||
k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen))
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key for DSA
|
||||
func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool {
|
||||
if _Q == nil || _P == nil || _G == nil || _Y == nil {
|
||||
return false
|
||||
}
|
||||
buf := dsaToBuf(_Q, _P, _G, _Y)
|
||||
k.PublicKey = toBase64(buf)
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key for Ed25519
|
||||
func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool {
|
||||
if _K == nil {
|
||||
return false
|
||||
}
|
||||
k.PublicKey = toBase64(_K)
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key (the values E and N) for RSA
|
||||
// RFC 3110: Section 2. RSA Public KEY Resource Records
|
||||
func exponentToBuf(_E int) []byte {
|
||||
var buf []byte
|
||||
i := big.NewInt(int64(_E)).Bytes()
|
||||
if len(i) < 256 {
|
||||
buf = make([]byte, 1, 1+len(i))
|
||||
buf[0] = uint8(len(i))
|
||||
} else {
|
||||
buf = make([]byte, 3, 3+len(i))
|
||||
buf[0] = 0
|
||||
buf[1] = uint8(len(i) >> 8)
|
||||
buf[2] = uint8(len(i))
|
||||
}
|
||||
buf = append(buf, i...)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Set the public key for X and Y for Curve. The two
|
||||
// values are just concatenated.
|
||||
func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
|
||||
buf := intToBytes(_X, intlen)
|
||||
buf = append(buf, intToBytes(_Y, intlen)...)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Set the public key for X and Y for Curve. The two
|
||||
// values are just concatenated.
|
||||
func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte {
|
||||
t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8)
|
||||
buf := []byte{byte(t)}
|
||||
buf = append(buf, intToBytes(_Q, 20)...)
|
||||
buf = append(buf, intToBytes(_P, 64+t*8)...)
|
||||
buf = append(buf, intToBytes(_G, 64+t*8)...)
|
||||
buf = append(buf, intToBytes(_Y, 64+t*8)...)
|
||||
return buf
|
||||
}
|
||||
297
vendor/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
297
vendor/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
@ -1,297 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"io"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// NewPrivateKey returns a PrivateKey by parsing the string s.
|
||||
// s should be in the same form of the BIND private key files.
|
||||
func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
|
||||
if s == "" || s[len(s)-1] != '\n' { // We need a closing newline
|
||||
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
|
||||
}
|
||||
return k.ReadPrivateKey(strings.NewReader(s), "")
|
||||
}
|
||||
|
||||
// ReadPrivateKey reads a private key from the io.Reader q. The string file is
|
||||
// only used in error reporting.
|
||||
// The public key must be known, because some cryptographic algorithms embed
|
||||
// the public inside the privatekey.
|
||||
func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
|
||||
m, err := parseKey(q, file)
|
||||
if m == nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := m["private-key-format"]; !ok {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
// TODO(mg): check if the pubkey matches the private key
|
||||
algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8)
|
||||
if err != nil {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
switch uint8(algo) {
|
||||
case DSA:
|
||||
priv, err := readPrivateKeyDSA(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pub := k.publicKeyDSA()
|
||||
if pub == nil {
|
||||
return nil, ErrKey
|
||||
}
|
||||
priv.PublicKey = *pub
|
||||
return priv, nil
|
||||
case RSAMD5:
|
||||
fallthrough
|
||||
case RSASHA1:
|
||||
fallthrough
|
||||
case RSASHA1NSEC3SHA1:
|
||||
fallthrough
|
||||
case RSASHA256:
|
||||
fallthrough
|
||||
case RSASHA512:
|
||||
priv, err := readPrivateKeyRSA(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pub := k.publicKeyRSA()
|
||||
if pub == nil {
|
||||
return nil, ErrKey
|
||||
}
|
||||
priv.PublicKey = *pub
|
||||
return priv, nil
|
||||
case ECCGOST:
|
||||
return nil, ErrPrivKey
|
||||
case ECDSAP256SHA256:
|
||||
fallthrough
|
||||
case ECDSAP384SHA384:
|
||||
priv, err := readPrivateKeyECDSA(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pub := k.publicKeyECDSA()
|
||||
if pub == nil {
|
||||
return nil, ErrKey
|
||||
}
|
||||
priv.PublicKey = *pub
|
||||
return priv, nil
|
||||
case ED25519:
|
||||
return readPrivateKeyED25519(m)
|
||||
default:
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
}
|
||||
|
||||
// Read a private key (file) string and create a public key. Return the private key.
|
||||
func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
|
||||
p := new(rsa.PrivateKey)
|
||||
p.Primes = []*big.Int{nil, nil}
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "modulus", "publicexponent", "privateexponent", "prime1", "prime2":
|
||||
v1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch k {
|
||||
case "modulus":
|
||||
p.PublicKey.N = big.NewInt(0)
|
||||
p.PublicKey.N.SetBytes(v1)
|
||||
case "publicexponent":
|
||||
i := big.NewInt(0)
|
||||
i.SetBytes(v1)
|
||||
p.PublicKey.E = int(i.Int64()) // int64 should be large enough
|
||||
case "privateexponent":
|
||||
p.D = big.NewInt(0)
|
||||
p.D.SetBytes(v1)
|
||||
case "prime1":
|
||||
p.Primes[0] = big.NewInt(0)
|
||||
p.Primes[0].SetBytes(v1)
|
||||
case "prime2":
|
||||
p.Primes[1] = big.NewInt(0)
|
||||
p.Primes[1].SetBytes(v1)
|
||||
}
|
||||
case "exponent1", "exponent2", "coefficient":
|
||||
// not used in Go (yet)
|
||||
case "created", "publish", "activate":
|
||||
// not used in Go (yet)
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) {
|
||||
p := new(dsa.PrivateKey)
|
||||
p.X = big.NewInt(0)
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "private_value(x)":
|
||||
v1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.X.SetBytes(v1)
|
||||
case "created", "publish", "activate":
|
||||
/* not used in Go (yet) */
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
|
||||
p := new(ecdsa.PrivateKey)
|
||||
p.D = big.NewInt(0)
|
||||
// TODO: validate that the required flags are present
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "privatekey":
|
||||
v1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.D.SetBytes(v1)
|
||||
case "created", "publish", "activate":
|
||||
/* not used in Go (yet) */
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) {
|
||||
var p ed25519.PrivateKey
|
||||
// TODO: validate that the required flags are present
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "privatekey":
|
||||
p1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(p1) != 32 {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
// RFC 8080 and Golang's x/crypto/ed25519 differ as to how the
|
||||
// private keys are represented. RFC 8080 specifies that private
|
||||
// keys be stored solely as the seed value (p1 above) while the
|
||||
// ed25519 package represents them as the seed value concatenated
|
||||
// to the public key, which is derived from the seed value.
|
||||
//
|
||||
// ed25519.GenerateKey reads exactly 32 bytes from the passed in
|
||||
// io.Reader and uses them as the seed. It also derives the
|
||||
// public key and produces a compatible private key.
|
||||
_, p, err = ed25519.GenerateKey(bytes.NewReader(p1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "created", "publish", "activate":
|
||||
/* not used in Go (yet) */
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseKey reads a private key from r. It returns a map[string]string,
|
||||
// with the key-value pairs, or an error when the file is not correct.
|
||||
func parseKey(r io.Reader, file string) (map[string]string, error) {
|
||||
s, cancel := scanInit(r)
|
||||
m := make(map[string]string)
|
||||
c := make(chan lex)
|
||||
k := ""
|
||||
defer func() {
|
||||
cancel()
|
||||
// zlexer can send up to two tokens, the next one and possibly 1 remainders.
|
||||
// Do a non-blocking read.
|
||||
_, ok := <-c
|
||||
_, ok = <-c
|
||||
if !ok {
|
||||
// too bad
|
||||
}
|
||||
}()
|
||||
// Start the lexer
|
||||
go klexer(s, c)
|
||||
for l := range c {
|
||||
// It should alternate
|
||||
switch l.value {
|
||||
case zKey:
|
||||
k = l.token
|
||||
case zValue:
|
||||
if k == "" {
|
||||
return nil, &ParseError{file, "no private key seen", l}
|
||||
}
|
||||
//println("Setting", strings.ToLower(k), "to", l.token, "b")
|
||||
m[strings.ToLower(k)] = l.token
|
||||
k = ""
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// klexer scans the sourcefile and returns tokens on the channel c.
|
||||
func klexer(s *scan, c chan lex) {
|
||||
var l lex
|
||||
str := "" // Hold the current read text
|
||||
commt := false
|
||||
key := true
|
||||
x, err := s.tokenText()
|
||||
defer close(c)
|
||||
for err == nil {
|
||||
l.column = s.position.Column
|
||||
l.line = s.position.Line
|
||||
switch x {
|
||||
case ':':
|
||||
if commt {
|
||||
break
|
||||
}
|
||||
l.token = str
|
||||
if key {
|
||||
l.value = zKey
|
||||
c <- l
|
||||
// Next token is a space, eat it
|
||||
s.tokenText()
|
||||
key = false
|
||||
str = ""
|
||||
} else {
|
||||
l.value = zValue
|
||||
}
|
||||
case ';':
|
||||
commt = true
|
||||
case '\n':
|
||||
if commt {
|
||||
// Reset a comment
|
||||
commt = false
|
||||
}
|
||||
l.value = zValue
|
||||
l.token = str
|
||||
c <- l
|
||||
str = ""
|
||||
commt = false
|
||||
key = true
|
||||
default:
|
||||
if commt {
|
||||
break
|
||||
}
|
||||
str += string(x)
|
||||
}
|
||||
x, err = s.tokenText()
|
||||
}
|
||||
if len(str) > 0 {
|
||||
// Send remainder
|
||||
l.token = str
|
||||
l.value = zValue
|
||||
c <- l
|
||||
}
|
||||
}
|
||||
93
vendor/github.com/miekg/dns/dnssec_privkey.go
generated
vendored
93
vendor/github.com/miekg/dns/dnssec_privkey.go
generated
vendored
@ -1,93 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
const format = "Private-key-format: v1.3\n"
|
||||
|
||||
// PrivateKeyString converts a PrivateKey to a string. This string has the same
|
||||
// format as the private-key-file of BIND9 (Private-key-format: v1.3).
|
||||
// It needs some info from the key (the algorithm), so its a method of the DNSKEY
|
||||
// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey
|
||||
func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
|
||||
algorithm := strconv.Itoa(int(r.Algorithm))
|
||||
algorithm += " (" + AlgorithmToString[r.Algorithm] + ")"
|
||||
|
||||
switch p := p.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
modulus := toBase64(p.PublicKey.N.Bytes())
|
||||
e := big.NewInt(int64(p.PublicKey.E))
|
||||
publicExponent := toBase64(e.Bytes())
|
||||
privateExponent := toBase64(p.D.Bytes())
|
||||
prime1 := toBase64(p.Primes[0].Bytes())
|
||||
prime2 := toBase64(p.Primes[1].Bytes())
|
||||
// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
|
||||
// and from: http://code.google.com/p/go/issues/detail?id=987
|
||||
one := big.NewInt(1)
|
||||
p1 := big.NewInt(0).Sub(p.Primes[0], one)
|
||||
q1 := big.NewInt(0).Sub(p.Primes[1], one)
|
||||
exp1 := big.NewInt(0).Mod(p.D, p1)
|
||||
exp2 := big.NewInt(0).Mod(p.D, q1)
|
||||
coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0])
|
||||
|
||||
exponent1 := toBase64(exp1.Bytes())
|
||||
exponent2 := toBase64(exp2.Bytes())
|
||||
coefficient := toBase64(coeff.Bytes())
|
||||
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"Modulus: " + modulus + "\n" +
|
||||
"PublicExponent: " + publicExponent + "\n" +
|
||||
"PrivateExponent: " + privateExponent + "\n" +
|
||||
"Prime1: " + prime1 + "\n" +
|
||||
"Prime2: " + prime2 + "\n" +
|
||||
"Exponent1: " + exponent1 + "\n" +
|
||||
"Exponent2: " + exponent2 + "\n" +
|
||||
"Coefficient: " + coefficient + "\n"
|
||||
|
||||
case *ecdsa.PrivateKey:
|
||||
var intlen int
|
||||
switch r.Algorithm {
|
||||
case ECDSAP256SHA256:
|
||||
intlen = 32
|
||||
case ECDSAP384SHA384:
|
||||
intlen = 48
|
||||
}
|
||||
private := toBase64(intToBytes(p.D, intlen))
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"PrivateKey: " + private + "\n"
|
||||
|
||||
case *dsa.PrivateKey:
|
||||
T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8)
|
||||
prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8))
|
||||
subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20))
|
||||
base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
|
||||
priv := toBase64(intToBytes(p.X, 20))
|
||||
pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"Prime(p): " + prime + "\n" +
|
||||
"Subprime(q): " + subprime + "\n" +
|
||||
"Base(g): " + base + "\n" +
|
||||
"Private_value(x): " + priv + "\n" +
|
||||
"Public_value(y): " + pub + "\n"
|
||||
|
||||
case ed25519.PrivateKey:
|
||||
private := toBase64(p[:32])
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"PrivateKey: " + private + "\n"
|
||||
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
272
vendor/github.com/miekg/dns/doc.go
generated
vendored
272
vendor/github.com/miekg/dns/doc.go
generated
vendored
@ -1,272 +0,0 @@
|
||||
/*
|
||||
Package dns implements a full featured interface to the Domain Name System.
|
||||
Server- and client-side programming is supported.
|
||||
The package allows complete control over what is sent out to the DNS. The package
|
||||
API follows the less-is-more principle, by presenting a small, clean interface.
|
||||
|
||||
The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
|
||||
TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
|
||||
Note that domain names MUST be fully qualified, before sending them, unqualified
|
||||
names in a message will result in a packing failure.
|
||||
|
||||
Resource records are native types. They are not stored in wire format.
|
||||
Basic usage pattern for creating a new resource record:
|
||||
|
||||
r := new(dns.MX)
|
||||
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX,
|
||||
Class: dns.ClassINET, Ttl: 3600}
|
||||
r.Preference = 10
|
||||
r.Mx = "mx.miek.nl."
|
||||
|
||||
Or directly from a string:
|
||||
|
||||
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
|
||||
|
||||
Or when the default origin (.) and TTL (3600) and class (IN) suit you:
|
||||
|
||||
mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
|
||||
|
||||
Or even:
|
||||
|
||||
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
|
||||
|
||||
In the DNS messages are exchanged, these messages contain resource
|
||||
records (sets). Use pattern for creating a message:
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("miek.nl.", dns.TypeMX)
|
||||
|
||||
Or when not certain if the domain name is fully qualified:
|
||||
|
||||
m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
|
||||
|
||||
The message m is now a message with the question section set to ask
|
||||
the MX records for the miek.nl. zone.
|
||||
|
||||
The following is slightly more verbose, but more flexible:
|
||||
|
||||
m1 := new(dns.Msg)
|
||||
m1.Id = dns.Id()
|
||||
m1.RecursionDesired = true
|
||||
m1.Question = make([]dns.Question, 1)
|
||||
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
|
||||
|
||||
After creating a message it can be sent.
|
||||
Basic use pattern for synchronous querying the DNS at a
|
||||
server configured on 127.0.0.1 and port 53:
|
||||
|
||||
c := new(dns.Client)
|
||||
in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
|
||||
|
||||
Suppressing multiple outstanding queries (with the same question, type and
|
||||
class) is as easy as setting:
|
||||
|
||||
c.SingleInflight = true
|
||||
|
||||
More advanced options are available using a net.Dialer and the corresponding API.
|
||||
For example it is possible to set a timeout, or to specify a source IP address
|
||||
and port to use for the connection:
|
||||
|
||||
c := new(dns.Client)
|
||||
laddr := net.UDPAddr{
|
||||
IP: net.ParseIP("[::1]"),
|
||||
Port: 12345,
|
||||
Zone: "",
|
||||
}
|
||||
d := net.Dialer{
|
||||
Timeout: 200 * time.Millisecond,
|
||||
LocalAddr: &laddr,
|
||||
}
|
||||
in, rtt, err := c.ExchangeWithDialer(&d, m1, "8.8.8.8:53")
|
||||
|
||||
If these "advanced" features are not needed, a simple UDP query can be sent,
|
||||
with:
|
||||
|
||||
in, err := dns.Exchange(m1, "127.0.0.1:53")
|
||||
|
||||
When this functions returns you will get dns message. A dns message consists
|
||||
out of four sections.
|
||||
The question section: in.Question, the answer section: in.Answer,
|
||||
the authority section: in.Ns and the additional section: in.Extra.
|
||||
|
||||
Each of these sections (except the Question section) contain a []RR. Basic
|
||||
use pattern for accessing the rdata of a TXT RR as the first RR in
|
||||
the Answer section:
|
||||
|
||||
if t, ok := in.Answer[0].(*dns.TXT); ok {
|
||||
// do something with t.Txt
|
||||
}
|
||||
|
||||
Domain Name and TXT Character String Representations
|
||||
|
||||
Both domain names and TXT character strings are converted to presentation
|
||||
form both when unpacked and when converted to strings.
|
||||
|
||||
For TXT character strings, tabs, carriage returns and line feeds will be
|
||||
converted to \t, \r and \n respectively. Back slashes and quotations marks
|
||||
will be escaped. Bytes below 32 and above 127 will be converted to \DDD
|
||||
form.
|
||||
|
||||
For domain names, in addition to the above rules brackets, periods,
|
||||
spaces, semicolons and the at symbol are escaped.
|
||||
|
||||
DNSSEC
|
||||
|
||||
DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It
|
||||
uses public key cryptography to sign resource records. The
|
||||
public keys are stored in DNSKEY records and the signatures in RRSIG records.
|
||||
|
||||
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit
|
||||
to a request.
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetEdns0(4096, true)
|
||||
|
||||
Signature generation, signature verification and key generation are all supported.
|
||||
|
||||
DYNAMIC UPDATES
|
||||
|
||||
Dynamic updates reuses the DNS message format, but renames three of
|
||||
the sections. Question is Zone, Answer is Prerequisite, Authority is
|
||||
Update, only the Additional is not renamed. See RFC 2136 for the gory details.
|
||||
|
||||
You can set a rather complex set of rules for the existence of absence of
|
||||
certain resource records or names in a zone to specify if resource records
|
||||
should be added or removed. The table from RFC 2136 supplemented with the Go
|
||||
DNS function shows which functions exist to specify the prerequisites.
|
||||
|
||||
3.2.4 - Table Of Metavalues Used In Prerequisite Section
|
||||
|
||||
CLASS TYPE RDATA Meaning Function
|
||||
--------------------------------------------------------------
|
||||
ANY ANY empty Name is in use dns.NameUsed
|
||||
ANY rrset empty RRset exists (value indep) dns.RRsetUsed
|
||||
NONE ANY empty Name is not in use dns.NameNotUsed
|
||||
NONE rrset empty RRset does not exist dns.RRsetNotUsed
|
||||
zone rrset rr RRset exists (value dep) dns.Used
|
||||
|
||||
The prerequisite section can also be left empty.
|
||||
If you have decided on the prerequisites you can tell what RRs should
|
||||
be added or deleted. The next table shows the options you have and
|
||||
what functions to call.
|
||||
|
||||
3.4.2.6 - Table Of Metavalues Used In Update Section
|
||||
|
||||
CLASS TYPE RDATA Meaning Function
|
||||
---------------------------------------------------------------
|
||||
ANY ANY empty Delete all RRsets from name dns.RemoveName
|
||||
ANY rrset empty Delete an RRset dns.RemoveRRset
|
||||
NONE rrset rr Delete an RR from RRset dns.Remove
|
||||
zone rrset rr Add to an RRset dns.Insert
|
||||
|
||||
TRANSACTION SIGNATURE
|
||||
|
||||
An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
|
||||
The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
|
||||
|
||||
Basic use pattern when querying with a TSIG name "axfr." (note that these key names
|
||||
must be fully qualified - as they are domain names) and the base64 secret
|
||||
"so6ZGir4GPAqINNh9U5c3A==":
|
||||
|
||||
If an incoming message contains a TSIG record it MUST be the last record in
|
||||
the additional section (RFC2845 3.2). This means that you should make the
|
||||
call to SetTsig last, right before executing the query. If you make any
|
||||
changes to the RRset after calling SetTsig() the signature will be incorrect.
|
||||
|
||||
c := new(dns.Client)
|
||||
c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("miek.nl.", dns.TypeMX)
|
||||
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||
...
|
||||
// When sending the TSIG RR is calculated and filled in before sending
|
||||
|
||||
When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with
|
||||
TSIG, this is the basic use pattern. In this example we request an AXFR for
|
||||
miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A=="
|
||||
and using the server 176.58.119.54:
|
||||
|
||||
t := new(dns.Transfer)
|
||||
m := new(dns.Msg)
|
||||
t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||
m.SetAxfr("miek.nl.")
|
||||
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||
c, err := t.In(m, "176.58.119.54:53")
|
||||
for r := range c { ... }
|
||||
|
||||
You can now read the records from the transfer as they come in. Each envelope is checked with TSIG.
|
||||
If something is not correct an error is returned.
|
||||
|
||||
Basic use pattern validating and replying to a message that has TSIG set.
|
||||
|
||||
server := &dns.Server{Addr: ":53", Net: "udp"}
|
||||
server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||
go server.ListenAndServe()
|
||||
dns.HandleFunc(".", handleRequest)
|
||||
|
||||
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
|
||||
m := new(dns.Msg)
|
||||
m.SetReply(r)
|
||||
if r.IsTsig() != nil {
|
||||
if w.TsigStatus() == nil {
|
||||
// *Msg r has an TSIG record and it was validated
|
||||
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
|
||||
} else {
|
||||
// *Msg r has an TSIG records and it was not valided
|
||||
}
|
||||
}
|
||||
w.WriteMsg(m)
|
||||
}
|
||||
|
||||
PRIVATE RRS
|
||||
|
||||
RFC 6895 sets aside a range of type codes for private use. This range
|
||||
is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
|
||||
can be used, before requesting an official type code from IANA.
|
||||
|
||||
see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more
|
||||
information.
|
||||
|
||||
EDNS0
|
||||
|
||||
EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated
|
||||
by RFC 6891. It defines an new RR type, the OPT RR, which is then completely
|
||||
abused.
|
||||
Basic use pattern for creating an (empty) OPT RR:
|
||||
|
||||
o := new(dns.OPT)
|
||||
o.Hdr.Name = "." // MUST be the root zone, per definition.
|
||||
o.Hdr.Rrtype = dns.TypeOPT
|
||||
|
||||
The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891)
|
||||
interfaces. Currently only a few have been standardized: EDNS0_NSID
|
||||
(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note
|
||||
that these options may be combined in an OPT RR.
|
||||
Basic use pattern for a server to check if (and which) options are set:
|
||||
|
||||
// o is a dns.OPT
|
||||
for _, s := range o.Option {
|
||||
switch e := s.(type) {
|
||||
case *dns.EDNS0_NSID:
|
||||
// do stuff with e.Nsid
|
||||
case *dns.EDNS0_SUBNET:
|
||||
// access e.Family, e.Address, etc.
|
||||
}
|
||||
}
|
||||
|
||||
SIG(0)
|
||||
|
||||
From RFC 2931:
|
||||
|
||||
SIG(0) provides protection for DNS transactions and requests ....
|
||||
... protection for glue records, DNS requests, protection for message headers
|
||||
on requests and responses, and protection of the overall integrity of a response.
|
||||
|
||||
It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared
|
||||
secret approach in TSIG.
|
||||
Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and
|
||||
RSASHA512.
|
||||
|
||||
Signing subsequent messages in multi-message sessions is not implemented.
|
||||
*/
|
||||
package dns
|
||||
627
vendor/github.com/miekg/dns/edns.go
generated
vendored
627
vendor/github.com/miekg/dns/edns.go
generated
vendored
@ -1,627 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// EDNS0 Option codes.
|
||||
const (
|
||||
EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
|
||||
EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
|
||||
EDNS0NSID = 0x3 // nsid (See RFC 5001)
|
||||
EDNS0DAU = 0x5 // DNSSEC Algorithm Understood
|
||||
EDNS0DHU = 0x6 // DS Hash Understood
|
||||
EDNS0N3U = 0x7 // NSEC3 Hash Understood
|
||||
EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871)
|
||||
EDNS0EXPIRE = 0x9 // EDNS0 expire
|
||||
EDNS0COOKIE = 0xa // EDNS0 Cookie
|
||||
EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828)
|
||||
EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830)
|
||||
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891)
|
||||
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891)
|
||||
_DO = 1 << 15 // DNSSEC OK
|
||||
)
|
||||
|
||||
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
|
||||
// See RFC 6891.
|
||||
type OPT struct {
|
||||
Hdr RR_Header
|
||||
Option []EDNS0 `dns:"opt"`
|
||||
}
|
||||
|
||||
func (rr *OPT) String() string {
|
||||
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
|
||||
if rr.Do() {
|
||||
s += "flags: do; "
|
||||
} else {
|
||||
s += "flags: ; "
|
||||
}
|
||||
s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
|
||||
|
||||
for _, o := range rr.Option {
|
||||
switch o.(type) {
|
||||
case *EDNS0_NSID:
|
||||
s += "\n; NSID: " + o.String()
|
||||
h, e := o.pack()
|
||||
var r string
|
||||
if e == nil {
|
||||
for _, c := range h {
|
||||
r += "(" + string(c) + ")"
|
||||
}
|
||||
s += " " + r
|
||||
}
|
||||
case *EDNS0_SUBNET:
|
||||
s += "\n; SUBNET: " + o.String()
|
||||
case *EDNS0_COOKIE:
|
||||
s += "\n; COOKIE: " + o.String()
|
||||
case *EDNS0_UL:
|
||||
s += "\n; UPDATE LEASE: " + o.String()
|
||||
case *EDNS0_LLQ:
|
||||
s += "\n; LONG LIVED QUERIES: " + o.String()
|
||||
case *EDNS0_DAU:
|
||||
s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String()
|
||||
case *EDNS0_DHU:
|
||||
s += "\n; DS HASH UNDERSTOOD: " + o.String()
|
||||
case *EDNS0_N3U:
|
||||
s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
|
||||
case *EDNS0_LOCAL:
|
||||
s += "\n; LOCAL OPT: " + o.String()
|
||||
case *EDNS0_PADDING:
|
||||
s += "\n; PADDING: " + o.String()
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (rr *OPT) len() int {
|
||||
l := rr.Hdr.len()
|
||||
for i := 0; i < len(rr.Option); i++ {
|
||||
l += 4 // Account for 2-byte option code and 2-byte option length.
|
||||
lo, _ := rr.Option[i].pack()
|
||||
l += len(lo)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// return the old value -> delete SetVersion?
|
||||
|
||||
// Version returns the EDNS version used. Only zero is defined.
|
||||
func (rr *OPT) Version() uint8 {
|
||||
return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16)
|
||||
}
|
||||
|
||||
// SetVersion sets the version of EDNS. This is usually zero.
|
||||
func (rr *OPT) SetVersion(v uint8) {
|
||||
rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16)
|
||||
}
|
||||
|
||||
// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
|
||||
func (rr *OPT) ExtendedRcode() int {
|
||||
return int((rr.Hdr.Ttl & 0xFF000000) >> 24)
|
||||
}
|
||||
|
||||
// SetExtendedRcode sets the EDNS extended RCODE field.
|
||||
func (rr *OPT) SetExtendedRcode(v uint8) {
|
||||
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v) << 24)
|
||||
}
|
||||
|
||||
// UDPSize returns the UDP buffer size.
|
||||
func (rr *OPT) UDPSize() uint16 {
|
||||
return rr.Hdr.Class
|
||||
}
|
||||
|
||||
// SetUDPSize sets the UDP buffer size.
|
||||
func (rr *OPT) SetUDPSize(size uint16) {
|
||||
rr.Hdr.Class = size
|
||||
}
|
||||
|
||||
// Do returns the value of the DO (DNSSEC OK) bit.
|
||||
func (rr *OPT) Do() bool {
|
||||
return rr.Hdr.Ttl&_DO == _DO
|
||||
}
|
||||
|
||||
// SetDo sets the DO (DNSSEC OK) bit.
|
||||
// If we pass an argument, set the DO bit to that value.
|
||||
// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored.
|
||||
func (rr *OPT) SetDo(do ...bool) {
|
||||
if len(do) == 1 {
|
||||
if do[0] {
|
||||
rr.Hdr.Ttl |= _DO
|
||||
} else {
|
||||
rr.Hdr.Ttl &^= _DO
|
||||
}
|
||||
} else {
|
||||
rr.Hdr.Ttl |= _DO
|
||||
}
|
||||
}
|
||||
|
||||
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
|
||||
type EDNS0 interface {
|
||||
// Option returns the option code for the option.
|
||||
Option() uint16
|
||||
// pack returns the bytes of the option data.
|
||||
pack() ([]byte, error)
|
||||
// unpack sets the data as found in the buffer. Is also sets
|
||||
// the length of the slice as the length of the option data.
|
||||
unpack([]byte) error
|
||||
// String returns the string representation of the option.
|
||||
String() string
|
||||
}
|
||||
|
||||
// EDNS0_NSID option is used to retrieve a nameserver
|
||||
// identifier. When sending a request Nsid must be set to the empty string
|
||||
// The identifier is an opaque string encoded as hex.
|
||||
// Basic use pattern for creating an nsid option:
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_NSID)
|
||||
// e.Code = dns.EDNS0NSID
|
||||
// e.Nsid = "AA"
|
||||
// o.Option = append(o.Option, e)
|
||||
type EDNS0_NSID struct {
|
||||
Code uint16 // Always EDNS0NSID
|
||||
Nsid string // This string needs to be hex encoded
|
||||
}
|
||||
|
||||
func (e *EDNS0_NSID) pack() ([]byte, error) {
|
||||
h, err := hex.DecodeString(e.Nsid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code.
|
||||
func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
|
||||
func (e *EDNS0_NSID) String() string { return string(e.Nsid) }
|
||||
|
||||
// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
|
||||
// an idea of where the client lives. See RFC 7871. It can then give back a different
|
||||
// answer depending on the location or network topology.
|
||||
// Basic use pattern for creating an subnet option:
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_SUBNET)
|
||||
// e.Code = dns.EDNS0SUBNET
|
||||
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
|
||||
// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6
|
||||
// e.SourceScope = 0
|
||||
// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
|
||||
// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
|
||||
// o.Option = append(o.Option, e)
|
||||
//
|
||||
// This code will parse all the available bits when unpacking (up to optlen).
|
||||
// When packing it will apply SourceNetmask. If you need more advanced logic,
|
||||
// patches welcome and good luck.
|
||||
type EDNS0_SUBNET struct {
|
||||
Code uint16 // Always EDNS0SUBNET
|
||||
Family uint16 // 1 for IP, 2 for IP6
|
||||
SourceNetmask uint8
|
||||
SourceScope uint8
|
||||
Address net.IP
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET }
|
||||
|
||||
func (e *EDNS0_SUBNET) pack() ([]byte, error) {
|
||||
b := make([]byte, 4)
|
||||
binary.BigEndian.PutUint16(b[0:], e.Family)
|
||||
b[2] = e.SourceNetmask
|
||||
b[3] = e.SourceScope
|
||||
switch e.Family {
|
||||
case 0:
|
||||
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
|
||||
// We might don't need to complain either
|
||||
if e.SourceNetmask != 0 {
|
||||
return nil, errors.New("dns: bad address family")
|
||||
}
|
||||
case 1:
|
||||
if e.SourceNetmask > net.IPv4len*8 {
|
||||
return nil, errors.New("dns: bad netmask")
|
||||
}
|
||||
if len(e.Address.To4()) != net.IPv4len {
|
||||
return nil, errors.New("dns: bad address")
|
||||
}
|
||||
ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
|
||||
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
|
||||
b = append(b, ip[:needLength]...)
|
||||
case 2:
|
||||
if e.SourceNetmask > net.IPv6len*8 {
|
||||
return nil, errors.New("dns: bad netmask")
|
||||
}
|
||||
if len(e.Address) != net.IPv6len {
|
||||
return nil, errors.New("dns: bad address")
|
||||
}
|
||||
ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
|
||||
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
|
||||
b = append(b, ip[:needLength]...)
|
||||
default:
|
||||
return nil, errors.New("dns: bad address family")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_SUBNET) unpack(b []byte) error {
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Family = binary.BigEndian.Uint16(b)
|
||||
e.SourceNetmask = b[2]
|
||||
e.SourceScope = b[3]
|
||||
switch e.Family {
|
||||
case 0:
|
||||
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
|
||||
// It's okay to accept such a packet
|
||||
if e.SourceNetmask != 0 {
|
||||
return errors.New("dns: bad address family")
|
||||
}
|
||||
e.Address = net.IPv4(0, 0, 0, 0)
|
||||
case 1:
|
||||
if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
|
||||
return errors.New("dns: bad netmask")
|
||||
}
|
||||
addr := make([]byte, net.IPv4len)
|
||||
for i := 0; i < net.IPv4len && 4+i < len(b); i++ {
|
||||
addr[i] = b[4+i]
|
||||
}
|
||||
e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3])
|
||||
case 2:
|
||||
if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
|
||||
return errors.New("dns: bad netmask")
|
||||
}
|
||||
addr := make([]byte, net.IPv6len)
|
||||
for i := 0; i < net.IPv6len && 4+i < len(b); i++ {
|
||||
addr[i] = b[4+i]
|
||||
}
|
||||
e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4],
|
||||
addr[5], addr[6], addr[7], addr[8], addr[9], addr[10],
|
||||
addr[11], addr[12], addr[13], addr[14], addr[15]}
|
||||
default:
|
||||
return errors.New("dns: bad address family")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_SUBNET) String() (s string) {
|
||||
if e.Address == nil {
|
||||
s = "<nil>"
|
||||
} else if e.Address.To4() != nil {
|
||||
s = e.Address.String()
|
||||
} else {
|
||||
s = "[" + e.Address.String() + "]"
|
||||
}
|
||||
s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope))
|
||||
return
|
||||
}
|
||||
|
||||
// The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_COOKIE)
|
||||
// e.Code = dns.EDNS0COOKIE
|
||||
// e.Cookie = "24a5ac.."
|
||||
// o.Option = append(o.Option, e)
|
||||
//
|
||||
// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is
|
||||
// always 8 bytes. It may then optionally be followed by the server cookie. The server
|
||||
// cookie is of variable length, 8 to a maximum of 32 bytes. In other words:
|
||||
//
|
||||
// cCookie := o.Cookie[:16]
|
||||
// sCookie := o.Cookie[16:]
|
||||
//
|
||||
// There is no guarantee that the Cookie string has a specific length.
|
||||
type EDNS0_COOKIE struct {
|
||||
Code uint16 // Always EDNS0COOKIE
|
||||
Cookie string // Hex-encoded cookie data
|
||||
}
|
||||
|
||||
func (e *EDNS0_COOKIE) pack() ([]byte, error) {
|
||||
h, err := hex.DecodeString(e.Cookie)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
|
||||
func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
|
||||
func (e *EDNS0_COOKIE) String() string { return e.Cookie }
|
||||
|
||||
// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
|
||||
// an expiration on an update RR. This is helpful for clients that cannot clean
|
||||
// up after themselves. This is a draft RFC and more information can be found at
|
||||
// http://files.dns-sd.org/draft-sekar-dns-ul.txt
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_UL)
|
||||
// e.Code = dns.EDNS0UL
|
||||
// e.Lease = 120 // in seconds
|
||||
// o.Option = append(o.Option, e)
|
||||
type EDNS0_UL struct {
|
||||
Code uint16 // Always EDNS0UL
|
||||
Lease uint32
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
|
||||
func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
|
||||
|
||||
// Copied: http://golang.org/src/pkg/net/dnsmsg.go
|
||||
func (e *EDNS0_UL) pack() ([]byte, error) {
|
||||
b := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(b, e.Lease)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_UL) unpack(b []byte) error {
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Lease = binary.BigEndian.Uint32(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
|
||||
// Implemented for completeness, as the EDNS0 type code is assigned.
|
||||
type EDNS0_LLQ struct {
|
||||
Code uint16 // Always EDNS0LLQ
|
||||
Version uint16
|
||||
Opcode uint16
|
||||
Error uint16
|
||||
Id uint64
|
||||
LeaseLife uint32
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
|
||||
|
||||
func (e *EDNS0_LLQ) pack() ([]byte, error) {
|
||||
b := make([]byte, 18)
|
||||
binary.BigEndian.PutUint16(b[0:], e.Version)
|
||||
binary.BigEndian.PutUint16(b[2:], e.Opcode)
|
||||
binary.BigEndian.PutUint16(b[4:], e.Error)
|
||||
binary.BigEndian.PutUint64(b[6:], e.Id)
|
||||
binary.BigEndian.PutUint32(b[14:], e.LeaseLife)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_LLQ) unpack(b []byte) error {
|
||||
if len(b) < 18 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Version = binary.BigEndian.Uint16(b[0:])
|
||||
e.Opcode = binary.BigEndian.Uint16(b[2:])
|
||||
e.Error = binary.BigEndian.Uint16(b[4:])
|
||||
e.Id = binary.BigEndian.Uint64(b[6:])
|
||||
e.LeaseLife = binary.BigEndian.Uint32(b[14:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_LLQ) String() string {
|
||||
s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
|
||||
" " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) +
|
||||
" " + strconv.FormatUint(uint64(e.LeaseLife), 10)
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
|
||||
type EDNS0_DAU struct {
|
||||
Code uint16 // Always EDNS0DAU
|
||||
AlgCode []uint8
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
|
||||
func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
|
||||
func (e *EDNS0_DAU) String() string {
|
||||
s := ""
|
||||
for i := 0; i < len(e.AlgCode); i++ {
|
||||
if a, ok := AlgorithmToString[e.AlgCode[i]]; ok {
|
||||
s += " " + a
|
||||
} else {
|
||||
s += " " + strconv.Itoa(int(e.AlgCode[i]))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
|
||||
type EDNS0_DHU struct {
|
||||
Code uint16 // Always EDNS0DHU
|
||||
AlgCode []uint8
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
|
||||
func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
|
||||
func (e *EDNS0_DHU) String() string {
|
||||
s := ""
|
||||
for i := 0; i < len(e.AlgCode); i++ {
|
||||
if a, ok := HashToString[e.AlgCode[i]]; ok {
|
||||
s += " " + a
|
||||
} else {
|
||||
s += " " + strconv.Itoa(int(e.AlgCode[i]))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
|
||||
type EDNS0_N3U struct {
|
||||
Code uint16 // Always EDNS0N3U
|
||||
AlgCode []uint8
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
|
||||
func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
|
||||
func (e *EDNS0_N3U) String() string {
|
||||
// Re-use the hash map
|
||||
s := ""
|
||||
for i := 0; i < len(e.AlgCode); i++ {
|
||||
if a, ok := HashToString[e.AlgCode[i]]; ok {
|
||||
s += " " + a
|
||||
} else {
|
||||
s += " " + strconv.Itoa(int(e.AlgCode[i]))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314.
|
||||
type EDNS0_EXPIRE struct {
|
||||
Code uint16 // Always EDNS0EXPIRE
|
||||
Expire uint32
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
|
||||
func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
|
||||
|
||||
func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
|
||||
b := make([]byte, 4)
|
||||
b[0] = byte(e.Expire >> 24)
|
||||
b[1] = byte(e.Expire >> 16)
|
||||
b[2] = byte(e.Expire >> 8)
|
||||
b[3] = byte(e.Expire)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_EXPIRE) unpack(b []byte) error {
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Expire = binary.BigEndian.Uint32(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// The EDNS0_LOCAL option is used for local/experimental purposes. The option
|
||||
// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
|
||||
// (RFC6891), although any unassigned code can actually be used. The content of
|
||||
// the option is made available in Data, unaltered.
|
||||
// Basic use pattern for creating a local option:
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
// o.Hdr.Rrtype = dns.TypeOPT
|
||||
// e := new(dns.EDNS0_LOCAL)
|
||||
// e.Code = dns.EDNS0LOCALSTART
|
||||
// e.Data = []byte{72, 82, 74}
|
||||
// o.Option = append(o.Option, e)
|
||||
type EDNS0_LOCAL struct {
|
||||
Code uint16
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
|
||||
func (e *EDNS0_LOCAL) String() string {
|
||||
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
|
||||
}
|
||||
|
||||
func (e *EDNS0_LOCAL) pack() ([]byte, error) {
|
||||
b := make([]byte, len(e.Data))
|
||||
copied := copy(b, e.Data)
|
||||
if copied != len(e.Data) {
|
||||
return nil, ErrBuf
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_LOCAL) unpack(b []byte) error {
|
||||
e.Data = make([]byte, len(b))
|
||||
copied := copy(e.Data, b)
|
||||
if copied != len(b) {
|
||||
return ErrBuf
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
|
||||
// the TCP connection alive. See RFC 7828.
|
||||
type EDNS0_TCP_KEEPALIVE struct {
|
||||
Code uint16 // Always EDNSTCPKEEPALIVE
|
||||
Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present;
|
||||
Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order.
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE }
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) {
|
||||
if e.Timeout != 0 && e.Length != 2 {
|
||||
return nil, errors.New("dns: timeout specified but length is not 2")
|
||||
}
|
||||
if e.Timeout == 0 && e.Length != 0 {
|
||||
return nil, errors.New("dns: timeout not specified but length is not 0")
|
||||
}
|
||||
b := make([]byte, 4+e.Length)
|
||||
binary.BigEndian.PutUint16(b[0:], e.Code)
|
||||
binary.BigEndian.PutUint16(b[2:], e.Length)
|
||||
if e.Length == 2 {
|
||||
binary.BigEndian.PutUint16(b[4:], e.Timeout)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error {
|
||||
if len(b) < 4 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Length = binary.BigEndian.Uint16(b[2:4])
|
||||
if e.Length != 0 && e.Length != 2 {
|
||||
return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10))
|
||||
}
|
||||
if e.Length == 2 {
|
||||
if len(b) < 6 {
|
||||
return ErrBuf
|
||||
}
|
||||
e.Timeout = binary.BigEndian.Uint16(b[4:6])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) String() (s string) {
|
||||
s = "use tcp keep-alive"
|
||||
if e.Length == 0 {
|
||||
s += ", timeout omitted"
|
||||
} else {
|
||||
s += fmt.Sprintf(", timeout %dms", e.Timeout*100)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EDNS0_PADDING option is used to add padding to a request/response. The default
|
||||
// value of padding SHOULD be 0x0 but other values MAY be used, for instance if
|
||||
// compression is applied before encryption which may break signatures.
|
||||
type EDNS0_PADDING struct {
|
||||
Padding []byte
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }
|
||||
func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
|
||||
func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
|
||||
func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) }
|
||||
87
vendor/github.com/miekg/dns/format.go
generated
vendored
87
vendor/github.com/miekg/dns/format.go
generated
vendored
@ -1,87 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// NumField returns the number of rdata fields r has.
|
||||
func NumField(r RR) int {
|
||||
return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header
|
||||
}
|
||||
|
||||
// Field returns the rdata field i as a string. Fields are indexed starting from 1.
|
||||
// RR types that holds slice data, for instance the NSEC type bitmap will return a single
|
||||
// string where the types are concatenated using a space.
|
||||
// Accessing non existing fields will cause a panic.
|
||||
func Field(r RR, i int) string {
|
||||
if i == 0 {
|
||||
return ""
|
||||
}
|
||||
d := reflect.ValueOf(r).Elem().Field(i)
|
||||
switch k := d.Kind(); k {
|
||||
case reflect.String:
|
||||
return d.String()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.FormatInt(d.Int(), 10)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return strconv.FormatUint(d.Uint(), 10)
|
||||
case reflect.Slice:
|
||||
switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
|
||||
case `dns:"a"`:
|
||||
// TODO(miek): Hmm store this as 16 bytes
|
||||
if d.Len() < net.IPv6len {
|
||||
return net.IPv4(byte(d.Index(0).Uint()),
|
||||
byte(d.Index(1).Uint()),
|
||||
byte(d.Index(2).Uint()),
|
||||
byte(d.Index(3).Uint())).String()
|
||||
}
|
||||
return net.IPv4(byte(d.Index(12).Uint()),
|
||||
byte(d.Index(13).Uint()),
|
||||
byte(d.Index(14).Uint()),
|
||||
byte(d.Index(15).Uint())).String()
|
||||
case `dns:"aaaa"`:
|
||||
return net.IP{
|
||||
byte(d.Index(0).Uint()),
|
||||
byte(d.Index(1).Uint()),
|
||||
byte(d.Index(2).Uint()),
|
||||
byte(d.Index(3).Uint()),
|
||||
byte(d.Index(4).Uint()),
|
||||
byte(d.Index(5).Uint()),
|
||||
byte(d.Index(6).Uint()),
|
||||
byte(d.Index(7).Uint()),
|
||||
byte(d.Index(8).Uint()),
|
||||
byte(d.Index(9).Uint()),
|
||||
byte(d.Index(10).Uint()),
|
||||
byte(d.Index(11).Uint()),
|
||||
byte(d.Index(12).Uint()),
|
||||
byte(d.Index(13).Uint()),
|
||||
byte(d.Index(14).Uint()),
|
||||
byte(d.Index(15).Uint()),
|
||||
}.String()
|
||||
case `dns:"nsec"`:
|
||||
if d.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
s := Type(d.Index(0).Uint()).String()
|
||||
for i := 1; i < d.Len(); i++ {
|
||||
s += " " + Type(d.Index(i).Uint()).String()
|
||||
}
|
||||
return s
|
||||
default:
|
||||
// if it does not have a tag its a string slice
|
||||
fallthrough
|
||||
case `dns:"txt"`:
|
||||
if d.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
s := d.Index(0).String()
|
||||
for i := 1; i < d.Len(); i++ {
|
||||
s += " " + d.Index(i).String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
23
vendor/github.com/miekg/dns/fuzz.go
generated
vendored
23
vendor/github.com/miekg/dns/fuzz.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
// +build fuzz
|
||||
|
||||
package dns
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
msg := new(Msg)
|
||||
|
||||
if err := msg.Unpack(data); err != nil {
|
||||
return 0
|
||||
}
|
||||
if _, err := msg.Pack(); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
func FuzzNewRR(data []byte) int {
|
||||
if _, err := NewRR(string(data)); err != nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
159
vendor/github.com/miekg/dns/generate.go
generated
vendored
159
vendor/github.com/miekg/dns/generate.go
generated
vendored
@ -1,159 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Parse the $GENERATE statement as used in BIND9 zones.
|
||||
// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
|
||||
// We are called after '$GENERATE '. After which we expect:
|
||||
// * the range (12-24/2)
|
||||
// * lhs (ownername)
|
||||
// * [[ttl][class]]
|
||||
// * type
|
||||
// * rhs (rdata)
|
||||
// But we are lazy here, only the range is parsed *all* occurrences
|
||||
// of $ after that are interpreted.
|
||||
// Any error are returned as a string value, the empty string signals
|
||||
// "no error".
|
||||
func generate(l lex, c chan lex, t chan *Token, o string) string {
|
||||
step := 1
|
||||
if i := strings.IndexAny(l.token, "/"); i != -1 {
|
||||
if i+1 == len(l.token) {
|
||||
return "bad step in $GENERATE range"
|
||||
}
|
||||
if s, err := strconv.Atoi(l.token[i+1:]); err == nil {
|
||||
if s < 0 {
|
||||
return "bad step in $GENERATE range"
|
||||
}
|
||||
step = s
|
||||
} else {
|
||||
return "bad step in $GENERATE range"
|
||||
}
|
||||
l.token = l.token[:i]
|
||||
}
|
||||
sx := strings.SplitN(l.token, "-", 2)
|
||||
if len(sx) != 2 {
|
||||
return "bad start-stop in $GENERATE range"
|
||||
}
|
||||
start, err := strconv.Atoi(sx[0])
|
||||
if err != nil {
|
||||
return "bad start in $GENERATE range"
|
||||
}
|
||||
end, err := strconv.Atoi(sx[1])
|
||||
if err != nil {
|
||||
return "bad stop in $GENERATE range"
|
||||
}
|
||||
if end < 0 || start < 0 || end < start {
|
||||
return "bad range in $GENERATE range"
|
||||
}
|
||||
|
||||
<-c // _BLANK
|
||||
// Create a complete new string, which we then parse again.
|
||||
s := ""
|
||||
BuildRR:
|
||||
l = <-c
|
||||
if l.value != zNewline && l.value != zEOF {
|
||||
s += l.token
|
||||
goto BuildRR
|
||||
}
|
||||
for i := start; i <= end; i += step {
|
||||
var (
|
||||
escape bool
|
||||
dom bytes.Buffer
|
||||
mod string
|
||||
err error
|
||||
offset int
|
||||
)
|
||||
|
||||
for j := 0; j < len(s); j++ { // No 'range' because we need to jump around
|
||||
switch s[j] {
|
||||
case '\\':
|
||||
if escape {
|
||||
dom.WriteByte('\\')
|
||||
escape = false
|
||||
continue
|
||||
}
|
||||
escape = true
|
||||
case '$':
|
||||
mod = "%d"
|
||||
offset = 0
|
||||
if escape {
|
||||
dom.WriteByte('$')
|
||||
escape = false
|
||||
continue
|
||||
}
|
||||
escape = false
|
||||
if j+1 >= len(s) { // End of the string
|
||||
dom.WriteString(fmt.Sprintf(mod, i+offset))
|
||||
continue
|
||||
} else {
|
||||
if s[j+1] == '$' {
|
||||
dom.WriteByte('$')
|
||||
j++
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Search for { and }
|
||||
if s[j+1] == '{' { // Modifier block
|
||||
sep := strings.Index(s[j+2:], "}")
|
||||
if sep == -1 {
|
||||
return "bad modifier in $GENERATE"
|
||||
}
|
||||
mod, offset, err = modToPrintf(s[j+2 : j+2+sep])
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
j += 2 + sep // Jump to it
|
||||
}
|
||||
dom.WriteString(fmt.Sprintf(mod, i+offset))
|
||||
default:
|
||||
if escape { // Pretty useless here
|
||||
escape = false
|
||||
continue
|
||||
}
|
||||
dom.WriteByte(s[j])
|
||||
}
|
||||
}
|
||||
// Re-parse the RR and send it on the current channel t
|
||||
rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String())
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
t <- &Token{RR: rx}
|
||||
// Its more efficient to first built the rrlist and then parse it in
|
||||
// one go! But is this a problem?
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
|
||||
func modToPrintf(s string) (string, int, error) {
|
||||
xs := strings.SplitN(s, ",", 3)
|
||||
if len(xs) != 3 {
|
||||
return "", 0, errors.New("bad modifier in $GENERATE")
|
||||
}
|
||||
// xs[0] is offset, xs[1] is width, xs[2] is base
|
||||
if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" {
|
||||
return "", 0, errors.New("bad base in $GENERATE")
|
||||
}
|
||||
offset, err := strconv.Atoi(xs[0])
|
||||
if err != nil || offset > 255 {
|
||||
return "", 0, errors.New("bad offset in $GENERATE")
|
||||
}
|
||||
width, err := strconv.Atoi(xs[1])
|
||||
if err != nil || width > 255 {
|
||||
return "", offset, errors.New("bad width in $GENERATE")
|
||||
}
|
||||
switch {
|
||||
case width < 0:
|
||||
return "", offset, errors.New("bad width in $GENERATE")
|
||||
case width == 0:
|
||||
return "%" + xs[1] + xs[2], offset, nil
|
||||
}
|
||||
return "%0" + xs[1] + xs[2], offset, nil
|
||||
}
|
||||
191
vendor/github.com/miekg/dns/labels.go
generated
vendored
191
vendor/github.com/miekg/dns/labels.go
generated
vendored
@ -1,191 +0,0 @@
|
||||
package dns
|
||||
|
||||
// Holds a bunch of helper functions for dealing with labels.
|
||||
|
||||
// SplitDomainName splits a name string into it's labels.
|
||||
// www.miek.nl. returns []string{"www", "miek", "nl"}
|
||||
// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
|
||||
// The root label (.) returns nil. Note that using
|
||||
// strings.Split(s) will work in most cases, but does not handle
|
||||
// escaped dots (\.) for instance.
|
||||
// s must be a syntactically valid domain name, see IsDomainName.
|
||||
func SplitDomainName(s string) (labels []string) {
|
||||
if len(s) == 0 {
|
||||
return nil
|
||||
}
|
||||
fqdnEnd := 0 // offset of the final '.' or the length of the name
|
||||
idx := Split(s)
|
||||
begin := 0
|
||||
if s[len(s)-1] == '.' {
|
||||
fqdnEnd = len(s) - 1
|
||||
} else {
|
||||
fqdnEnd = len(s)
|
||||
}
|
||||
|
||||
switch len(idx) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
// no-op
|
||||
default:
|
||||
end := 0
|
||||
for i := 1; i < len(idx); i++ {
|
||||
end = idx[i]
|
||||
labels = append(labels, s[begin:end-1])
|
||||
begin = end
|
||||
}
|
||||
}
|
||||
|
||||
labels = append(labels, s[begin:fqdnEnd])
|
||||
return labels
|
||||
}
|
||||
|
||||
// CompareDomainName compares the names s1 and s2 and
|
||||
// returns how many labels they have in common starting from the *right*.
|
||||
// The comparison stops at the first inequality. The names are downcased
|
||||
// before the comparison.
|
||||
//
|
||||
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
|
||||
// www.miek.nl. and www.bla.nl. have one label in common: nl
|
||||
//
|
||||
// s1 and s2 must be syntactically valid domain names.
|
||||
func CompareDomainName(s1, s2 string) (n int) {
|
||||
// the first check: root label
|
||||
if s1 == "." || s2 == "." {
|
||||
return 0
|
||||
}
|
||||
|
||||
l1 := Split(s1)
|
||||
l2 := Split(s2)
|
||||
|
||||
j1 := len(l1) - 1 // end
|
||||
i1 := len(l1) - 2 // start
|
||||
j2 := len(l2) - 1
|
||||
i2 := len(l2) - 2
|
||||
// the second check can be done here: last/only label
|
||||
// before we fall through into the for-loop below
|
||||
if equal(s1[l1[j1]:], s2[l2[j2]:]) {
|
||||
n++
|
||||
} else {
|
||||
return
|
||||
}
|
||||
for {
|
||||
if i1 < 0 || i2 < 0 {
|
||||
break
|
||||
}
|
||||
if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) {
|
||||
n++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
j1--
|
||||
i1--
|
||||
j2--
|
||||
i2--
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CountLabel counts the the number of labels in the string s.
|
||||
// s must be a syntactically valid domain name.
|
||||
func CountLabel(s string) (labels int) {
|
||||
if s == "." {
|
||||
return
|
||||
}
|
||||
off := 0
|
||||
end := false
|
||||
for {
|
||||
off, end = NextLabel(s, off)
|
||||
labels++
|
||||
if end {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Split splits a name s into its label indexes.
|
||||
// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
|
||||
// The root name (.) returns nil. Also see SplitDomainName.
|
||||
// s must be a syntactically valid domain name.
|
||||
func Split(s string) []int {
|
||||
if s == "." {
|
||||
return nil
|
||||
}
|
||||
idx := make([]int, 1, 3)
|
||||
off := 0
|
||||
end := false
|
||||
|
||||
for {
|
||||
off, end = NextLabel(s, off)
|
||||
if end {
|
||||
return idx
|
||||
}
|
||||
idx = append(idx, off)
|
||||
}
|
||||
}
|
||||
|
||||
// NextLabel returns the index of the start of the next label in the
|
||||
// string s starting at offset.
|
||||
// The bool end is true when the end of the string has been reached.
|
||||
// Also see PrevLabel.
|
||||
func NextLabel(s string, offset int) (i int, end bool) {
|
||||
quote := false
|
||||
for i = offset; i < len(s)-1; i++ {
|
||||
switch s[i] {
|
||||
case '\\':
|
||||
quote = !quote
|
||||
default:
|
||||
quote = false
|
||||
case '.':
|
||||
if quote {
|
||||
quote = !quote
|
||||
continue
|
||||
}
|
||||
return i + 1, false
|
||||
}
|
||||
}
|
||||
return i + 1, true
|
||||
}
|
||||
|
||||
// PrevLabel returns the index of the label when starting from the right and
|
||||
// jumping n labels to the left.
|
||||
// The bool start is true when the start of the string has been overshot.
|
||||
// Also see NextLabel.
|
||||
func PrevLabel(s string, n int) (i int, start bool) {
|
||||
if n == 0 {
|
||||
return len(s), false
|
||||
}
|
||||
lab := Split(s)
|
||||
if lab == nil {
|
||||
return 0, true
|
||||
}
|
||||
if n > len(lab) {
|
||||
return 0, true
|
||||
}
|
||||
return lab[len(lab)-n], false
|
||||
}
|
||||
|
||||
// equal compares a and b while ignoring case. It returns true when equal otherwise false.
|
||||
func equal(a, b string) bool {
|
||||
// might be lifted into API function.
|
||||
la := len(a)
|
||||
lb := len(b)
|
||||
if la != lb {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := la - 1; i >= 0; i-- {
|
||||
ai := a[i]
|
||||
bi := b[i]
|
||||
if ai >= 'A' && ai <= 'Z' {
|
||||
ai |= ('a' - 'A')
|
||||
}
|
||||
if bi >= 'A' && bi <= 'Z' {
|
||||
bi |= ('a' - 'A')
|
||||
}
|
||||
if ai != bi {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
1154
vendor/github.com/miekg/dns/msg.go
generated
vendored
1154
vendor/github.com/miekg/dns/msg.go
generated
vendored
File diff suppressed because it is too large
Load Diff
348
vendor/github.com/miekg/dns/msg_generate.go
generated
vendored
348
vendor/github.com/miekg/dns/msg_generate.go
generated
vendored
@ -1,348 +0,0 @@
|
||||
//+build ignore
|
||||
|
||||
// msg_generate.go is meant to run with go generate. It will use
|
||||
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||
// it will generate pack/unpack methods based on the struct tags. The generated source is
|
||||
// written to zmsg.go, and is meant to be checked into git.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/importer"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var packageHdr = `
|
||||
// Code generated by "go run msg_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
`
|
||||
|
||||
// getTypeStruct will take a type and the package scope, and return the
|
||||
// (innermost) struct if the type is considered a RR type (currently defined as
|
||||
// those structs beginning with a RR_Header, could be redefined as implementing
|
||||
// the RR interface). The bool return value indicates if embedded structs were
|
||||
// resolved.
|
||||
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||
st, ok := t.Underlying().(*types.Struct)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||
return st, false
|
||||
}
|
||||
if st.Field(0).Anonymous() {
|
||||
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||
return st, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Import and type-check the package
|
||||
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||
fatalIfErr(err)
|
||||
scope := pkg.Scope()
|
||||
|
||||
// Collect actual types (*X)
|
||||
var namedTypes []string
|
||||
for _, name := range scope.Names() {
|
||||
o := scope.Lookup(name)
|
||||
if o == nil || !o.Exported() {
|
||||
continue
|
||||
}
|
||||
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
|
||||
continue
|
||||
}
|
||||
if name == "PrivateRR" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if corresponding TypeX exists
|
||||
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
|
||||
log.Fatalf("Constant Type%s does not exist.", o.Name())
|
||||
}
|
||||
|
||||
namedTypes = append(namedTypes, o.Name())
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
b.WriteString(packageHdr)
|
||||
|
||||
fmt.Fprint(b, "// pack*() functions\n\n")
|
||||
for _, name := range namedTypes {
|
||||
o := scope.Lookup(name)
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
|
||||
fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name)
|
||||
fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
headerEnd := off
|
||||
`)
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
o := func(s string) {
|
||||
fmt.Fprintf(b, s, st.Field(i).Name())
|
||||
fmt.Fprint(b, `if err != nil {
|
||||
return off, err
|
||||
}
|
||||
`)
|
||||
}
|
||||
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
switch st.Tag(i) {
|
||||
case `dns:"-"`: // ignored
|
||||
case `dns:"txt"`:
|
||||
o("off, err = packStringTxt(rr.%s, msg, off)\n")
|
||||
case `dns:"opt"`:
|
||||
o("off, err = packDataOpt(rr.%s, msg, off)\n")
|
||||
case `dns:"nsec"`:
|
||||
o("off, err = packDataNsec(rr.%s, msg, off)\n")
|
||||
case `dns:"domain-name"`:
|
||||
o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n")
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"-"`: // ignored
|
||||
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||
o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n")
|
||||
case st.Tag(i) == `dns:"domain-name"`:
|
||||
o("off, err = PackDomainName(rr.%s, msg, off, compression, false)\n")
|
||||
case st.Tag(i) == `dns:"a"`:
|
||||
o("off, err = packDataA(rr.%s, msg, off)\n")
|
||||
case st.Tag(i) == `dns:"aaaa"`:
|
||||
o("off, err = packDataAAAA(rr.%s, msg, off)\n")
|
||||
case st.Tag(i) == `dns:"uint48"`:
|
||||
o("off, err = packUint48(rr.%s, msg, off)\n")
|
||||
case st.Tag(i) == `dns:"txt"`:
|
||||
o("off, err = packString(rr.%s, msg, off)\n")
|
||||
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"base32"`:
|
||||
o("off, err = packStringBase32(rr.%s, msg, off)\n")
|
||||
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"base64"`:
|
||||
o("off, err = packStringBase64(rr.%s, msg, off)\n")
|
||||
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`):
|
||||
// directly write instead of using o() so we get the error check in the correct place
|
||||
field := st.Field(i).Name()
|
||||
fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty
|
||||
if rr.%s != "-" {
|
||||
off, err = packStringHex(rr.%s, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
}
|
||||
`, field, field)
|
||||
continue
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"hex"`:
|
||||
o("off, err = packStringHex(rr.%s, msg, off)\n")
|
||||
|
||||
case st.Tag(i) == `dns:"octet"`:
|
||||
o("off, err = packStringOctet(rr.%s, msg, off)\n")
|
||||
case st.Tag(i) == "":
|
||||
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||
case types.Uint8:
|
||||
o("off, err = packUint8(rr.%s, msg, off)\n")
|
||||
case types.Uint16:
|
||||
o("off, err = packUint16(rr.%s, msg, off)\n")
|
||||
case types.Uint32:
|
||||
o("off, err = packUint32(rr.%s, msg, off)\n")
|
||||
case types.Uint64:
|
||||
o("off, err = packUint64(rr.%s, msg, off)\n")
|
||||
case types.String:
|
||||
o("off, err = packString(rr.%s, msg, off)\n")
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name())
|
||||
}
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
}
|
||||
// We have packed everything, only now we know the rdlength of this RR
|
||||
fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)")
|
||||
fmt.Fprintln(b, "return off, nil }\n")
|
||||
}
|
||||
|
||||
fmt.Fprint(b, "// unpack*() functions\n\n")
|
||||
for _, name := range namedTypes {
|
||||
o := scope.Lookup(name)
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
|
||||
fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name)
|
||||
fmt.Fprintf(b, "rr := new(%s)\n", name)
|
||||
fmt.Fprint(b, "rr.Hdr = h\n")
|
||||
fmt.Fprint(b, `if noRdata(h) {
|
||||
return rr, off, nil
|
||||
}
|
||||
var err error
|
||||
rdStart := off
|
||||
_ = rdStart
|
||||
|
||||
`)
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
o := func(s string) {
|
||||
fmt.Fprintf(b, s, st.Field(i).Name())
|
||||
fmt.Fprint(b, `if err != nil {
|
||||
return rr, off, err
|
||||
}
|
||||
`)
|
||||
}
|
||||
|
||||
// size-* are special, because they reference a struct member we should use for the length.
|
||||
if strings.HasPrefix(st.Tag(i), `dns:"size-`) {
|
||||
structMember := structMember(st.Tag(i))
|
||||
structTag := structTag(st.Tag(i))
|
||||
switch structTag {
|
||||
case "hex":
|
||||
fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
|
||||
case "base32":
|
||||
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
|
||||
case "base64":
|
||||
fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember)
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
fmt.Fprint(b, `if err != nil {
|
||||
return rr, off, err
|
||||
}
|
||||
`)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
switch st.Tag(i) {
|
||||
case `dns:"-"`: // ignored
|
||||
case `dns:"txt"`:
|
||||
o("rr.%s, off, err = unpackStringTxt(msg, off)\n")
|
||||
case `dns:"opt"`:
|
||||
o("rr.%s, off, err = unpackDataOpt(msg, off)\n")
|
||||
case `dns:"nsec"`:
|
||||
o("rr.%s, off, err = unpackDataNsec(msg, off)\n")
|
||||
case `dns:"domain-name"`:
|
||||
o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch st.Tag(i) {
|
||||
case `dns:"-"`: // ignored
|
||||
case `dns:"cdomain-name"`:
|
||||
fallthrough
|
||||
case `dns:"domain-name"`:
|
||||
o("rr.%s, off, err = UnpackDomainName(msg, off)\n")
|
||||
case `dns:"a"`:
|
||||
o("rr.%s, off, err = unpackDataA(msg, off)\n")
|
||||
case `dns:"aaaa"`:
|
||||
o("rr.%s, off, err = unpackDataAAAA(msg, off)\n")
|
||||
case `dns:"uint48"`:
|
||||
o("rr.%s, off, err = unpackUint48(msg, off)\n")
|
||||
case `dns:"txt"`:
|
||||
o("rr.%s, off, err = unpackString(msg, off)\n")
|
||||
case `dns:"base32"`:
|
||||
o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||
case `dns:"base64"`:
|
||||
o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||
case `dns:"hex"`:
|
||||
o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
|
||||
case `dns:"octet"`:
|
||||
o("rr.%s, off, err = unpackStringOctet(msg, off)\n")
|
||||
case "":
|
||||
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||
case types.Uint8:
|
||||
o("rr.%s, off, err = unpackUint8(msg, off)\n")
|
||||
case types.Uint16:
|
||||
o("rr.%s, off, err = unpackUint16(msg, off)\n")
|
||||
case types.Uint32:
|
||||
o("rr.%s, off, err = unpackUint32(msg, off)\n")
|
||||
case types.Uint64:
|
||||
o("rr.%s, off, err = unpackUint64(msg, off)\n")
|
||||
case types.String:
|
||||
o("rr.%s, off, err = unpackString(msg, off)\n")
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name())
|
||||
}
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
// If we've hit len(msg) we return without error.
|
||||
if i < st.NumFields()-1 {
|
||||
fmt.Fprintf(b, `if off == len(msg) {
|
||||
return rr, off, nil
|
||||
}
|
||||
`)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(b, "return rr, off, err }\n\n")
|
||||
}
|
||||
// Generate typeToUnpack map
|
||||
fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){")
|
||||
for _, name := range namedTypes {
|
||||
if name == "RFC3597" {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name)
|
||||
}
|
||||
fmt.Fprintln(b, "}\n")
|
||||
|
||||
// gofmt
|
||||
res, err := format.Source(b.Bytes())
|
||||
if err != nil {
|
||||
b.WriteTo(os.Stderr)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// write result
|
||||
f, err := os.Create("zmsg.go")
|
||||
fatalIfErr(err)
|
||||
defer f.Close()
|
||||
f.Write(res)
|
||||
}
|
||||
|
||||
// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string.
|
||||
func structMember(s string) string {
|
||||
fields := strings.Split(s, ":")
|
||||
if len(fields) == 0 {
|
||||
return ""
|
||||
}
|
||||
f := fields[len(fields)-1]
|
||||
// f should have a closing "
|
||||
if len(f) > 1 {
|
||||
return f[:len(f)-1]
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// structTag will take a tag like dns:"size-base32:SaltLength" and return base32.
|
||||
func structTag(s string) string {
|
||||
fields := strings.Split(s, ":")
|
||||
if len(fields) < 2 {
|
||||
return ""
|
||||
}
|
||||
return fields[1][len("\"size-"):]
|
||||
}
|
||||
|
||||
func fatalIfErr(err error) {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
637
vendor/github.com/miekg/dns/msg_helpers.go
generated
vendored
637
vendor/github.com/miekg/dns/msg_helpers.go
generated
vendored
@ -1,637 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// helper functions called from the generated zmsg.go
|
||||
|
||||
// These function are named after the tag to help pack/unpack, if there is no tag it is the name
|
||||
// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
|
||||
// packDataDomainName.
|
||||
|
||||
func unpackDataA(msg []byte, off int) (net.IP, int, error) {
|
||||
if off+net.IPv4len > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking a"}
|
||||
}
|
||||
a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
|
||||
off += net.IPv4len
|
||||
return a, off, nil
|
||||
}
|
||||
|
||||
func packDataA(a net.IP, msg []byte, off int) (int, error) {
|
||||
// It must be a slice of 4, even if it is 16, we encode only the first 4
|
||||
if off+net.IPv4len > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing a"}
|
||||
}
|
||||
switch len(a) {
|
||||
case net.IPv4len, net.IPv6len:
|
||||
copy(msg[off:], a.To4())
|
||||
off += net.IPv4len
|
||||
case 0:
|
||||
// Allowed, for dynamic updates.
|
||||
default:
|
||||
return len(msg), &Error{err: "overflow packing a"}
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
|
||||
if off+net.IPv6len > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
|
||||
}
|
||||
aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
|
||||
off += net.IPv6len
|
||||
return aaaa, off, nil
|
||||
}
|
||||
|
||||
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
|
||||
if off+net.IPv6len > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing aaaa"}
|
||||
}
|
||||
|
||||
switch len(aaaa) {
|
||||
case net.IPv6len:
|
||||
copy(msg[off:], aaaa)
|
||||
off += net.IPv6len
|
||||
case 0:
|
||||
// Allowed, dynamic updates.
|
||||
default:
|
||||
return len(msg), &Error{err: "overflow packing aaaa"}
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
|
||||
// re-sliced msg according to the expected length of the RR.
|
||||
func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
|
||||
hdr := RR_Header{}
|
||||
if off == len(msg) {
|
||||
return hdr, off, msg, nil
|
||||
}
|
||||
|
||||
hdr.Name, off, err = UnpackDomainName(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
hdr.Rrtype, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
hdr.Class, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
hdr.Ttl, off, err = unpackUint32(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
hdr.Rdlength, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
|
||||
return hdr, off, msg, err
|
||||
}
|
||||
|
||||
// pack packs an RR header, returning the offset to the end of the header.
|
||||
// See PackDomainName for documentation about the compression.
|
||||
func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
|
||||
if off == len(msg) {
|
||||
return off, nil
|
||||
}
|
||||
|
||||
off, err = PackDomainName(hdr.Name, msg, off, compression, compress)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint16(hdr.Rrtype, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint16(hdr.Class, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint32(hdr.Ttl, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off, err = packUint16(hdr.Rdlength, msg, off)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
// helper helper functions.
|
||||
|
||||
// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
|
||||
// Returns an error if msg is smaller than the expected size.
|
||||
func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
|
||||
lenrd := off + int(rdlength)
|
||||
if lenrd > len(msg) {
|
||||
return msg, &Error{err: "overflowing header size"}
|
||||
}
|
||||
return msg[:lenrd], nil
|
||||
}
|
||||
|
||||
func fromBase32(s []byte) (buf []byte, err error) {
|
||||
for i, b := range s {
|
||||
if b >= 'a' && b <= 'z' {
|
||||
s[i] = b - 32
|
||||
}
|
||||
}
|
||||
buflen := base32.HexEncoding.DecodedLen(len(s))
|
||||
buf = make([]byte, buflen)
|
||||
n, err := base32.HexEncoding.Decode(buf, s)
|
||||
buf = buf[:n]
|
||||
return
|
||||
}
|
||||
|
||||
func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) }
|
||||
|
||||
func fromBase64(s []byte) (buf []byte, err error) {
|
||||
buflen := base64.StdEncoding.DecodedLen(len(s))
|
||||
buf = make([]byte, buflen)
|
||||
n, err := base64.StdEncoding.Decode(buf, s)
|
||||
buf = buf[:n]
|
||||
return
|
||||
}
|
||||
|
||||
func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
|
||||
|
||||
// dynamicUpdate returns true if the Rdlength is zero.
|
||||
func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
|
||||
|
||||
func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
|
||||
if off+1 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint8"}
|
||||
}
|
||||
return uint8(msg[off]), off + 1, nil
|
||||
}
|
||||
|
||||
func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
|
||||
if off+1 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint8"}
|
||||
}
|
||||
msg[off] = byte(i)
|
||||
return off + 1, nil
|
||||
}
|
||||
|
||||
func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
|
||||
if off+2 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint16"}
|
||||
}
|
||||
return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
|
||||
}
|
||||
|
||||
func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
|
||||
if off+2 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint16"}
|
||||
}
|
||||
binary.BigEndian.PutUint16(msg[off:], i)
|
||||
return off + 2, nil
|
||||
}
|
||||
|
||||
func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
|
||||
if off+4 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint32"}
|
||||
}
|
||||
return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
|
||||
}
|
||||
|
||||
func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
|
||||
if off+4 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint32"}
|
||||
}
|
||||
binary.BigEndian.PutUint32(msg[off:], i)
|
||||
return off + 4, nil
|
||||
}
|
||||
|
||||
func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
|
||||
if off+6 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
|
||||
}
|
||||
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
|
||||
i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
|
||||
uint64(msg[off+4])<<8 | uint64(msg[off+5])))
|
||||
off += 6
|
||||
return i, off, nil
|
||||
}
|
||||
|
||||
func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
|
||||
if off+6 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint64 as uint48"}
|
||||
}
|
||||
msg[off] = byte(i >> 40)
|
||||
msg[off+1] = byte(i >> 32)
|
||||
msg[off+2] = byte(i >> 24)
|
||||
msg[off+3] = byte(i >> 16)
|
||||
msg[off+4] = byte(i >> 8)
|
||||
msg[off+5] = byte(i)
|
||||
off += 6
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
|
||||
if off+8 > len(msg) {
|
||||
return 0, len(msg), &Error{err: "overflow unpacking uint64"}
|
||||
}
|
||||
return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
|
||||
}
|
||||
|
||||
func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
|
||||
if off+8 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing uint64"}
|
||||
}
|
||||
binary.BigEndian.PutUint64(msg[off:], i)
|
||||
off += 8
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackString(msg []byte, off int) (string, int, error) {
|
||||
if off+1 > len(msg) {
|
||||
return "", off, &Error{err: "overflow unpacking txt"}
|
||||
}
|
||||
l := int(msg[off])
|
||||
if off+l+1 > len(msg) {
|
||||
return "", off, &Error{err: "overflow unpacking txt"}
|
||||
}
|
||||
s := make([]byte, 0, l)
|
||||
for _, b := range msg[off+1 : off+1+l] {
|
||||
switch b {
|
||||
case '"', '\\':
|
||||
s = append(s, '\\', b)
|
||||
default:
|
||||
if b < 32 || b > 127 { // unprintable
|
||||
var buf [3]byte
|
||||
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
|
||||
s = append(s, '\\')
|
||||
for i := 0; i < 3-len(bufs); i++ {
|
||||
s = append(s, '0')
|
||||
}
|
||||
for _, r := range bufs {
|
||||
s = append(s, r)
|
||||
}
|
||||
} else {
|
||||
s = append(s, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
off += 1 + l
|
||||
return string(s), off, nil
|
||||
}
|
||||
|
||||
func packString(s string, msg []byte, off int) (int, error) {
|
||||
txtTmp := make([]byte, 256*4+1)
|
||||
off, err := packTxtString(s, msg, off, txtTmp)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
|
||||
if end > len(msg) {
|
||||
return "", len(msg), &Error{err: "overflow unpacking base32"}
|
||||
}
|
||||
s := toBase32(msg[off:end])
|
||||
return s, end, nil
|
||||
}
|
||||
|
||||
func packStringBase32(s string, msg []byte, off int) (int, error) {
|
||||
b32, err := fromBase32([]byte(s))
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
if off+len(b32) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing base32"}
|
||||
}
|
||||
copy(msg[off:off+len(b32)], b32)
|
||||
off += len(b32)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
|
||||
// Rest of the RR is base64 encoded value, so we don't need an explicit length
|
||||
// to be set. Thus far all RR's that have base64 encoded fields have those as their
|
||||
// last one. What we do need is the end of the RR!
|
||||
if end > len(msg) {
|
||||
return "", len(msg), &Error{err: "overflow unpacking base64"}
|
||||
}
|
||||
s := toBase64(msg[off:end])
|
||||
return s, end, nil
|
||||
}
|
||||
|
||||
func packStringBase64(s string, msg []byte, off int) (int, error) {
|
||||
b64, err := fromBase64([]byte(s))
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
if off+len(b64) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing base64"}
|
||||
}
|
||||
copy(msg[off:off+len(b64)], b64)
|
||||
off += len(b64)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringHex(msg []byte, off, end int) (string, int, error) {
|
||||
// Rest of the RR is hex encoded value, so we don't need an explicit length
|
||||
// to be set. NSEC and TSIG have hex fields with a length field.
|
||||
// What we do need is the end of the RR!
|
||||
if end > len(msg) {
|
||||
return "", len(msg), &Error{err: "overflow unpacking hex"}
|
||||
}
|
||||
|
||||
s := hex.EncodeToString(msg[off:end])
|
||||
return s, end, nil
|
||||
}
|
||||
|
||||
func packStringHex(s string, msg []byte, off int) (int, error) {
|
||||
h, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
if off+(len(h)) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing hex"}
|
||||
}
|
||||
copy(msg[off:off+len(h)], h)
|
||||
off += len(h)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
|
||||
txt, off, err := unpackTxt(msg, off)
|
||||
if err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
return txt, off, nil
|
||||
}
|
||||
|
||||
func packStringTxt(s []string, msg []byte, off int) (int, error) {
|
||||
txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
|
||||
off, err := packTxt(s, msg, off, txtTmp)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
|
||||
var edns []EDNS0
|
||||
Option:
|
||||
code := uint16(0)
|
||||
if off+4 > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||
}
|
||||
code = binary.BigEndian.Uint16(msg[off:])
|
||||
off += 2
|
||||
optlen := binary.BigEndian.Uint16(msg[off:])
|
||||
off += 2
|
||||
if off+int(optlen) > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking opt"}
|
||||
}
|
||||
switch code {
|
||||
case EDNS0NSID:
|
||||
e := new(EDNS0_NSID)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0SUBNET:
|
||||
e := new(EDNS0_SUBNET)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0COOKIE:
|
||||
e := new(EDNS0_COOKIE)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0UL:
|
||||
e := new(EDNS0_UL)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0LLQ:
|
||||
e := new(EDNS0_LLQ)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0DAU:
|
||||
e := new(EDNS0_DAU)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0DHU:
|
||||
e := new(EDNS0_DHU)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0N3U:
|
||||
e := new(EDNS0_N3U)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0PADDING:
|
||||
e := new(EDNS0_PADDING)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
default:
|
||||
e := new(EDNS0_LOCAL)
|
||||
e.Code = code
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
}
|
||||
|
||||
if off < len(msg) {
|
||||
goto Option
|
||||
}
|
||||
|
||||
return edns, off, nil
|
||||
}
|
||||
|
||||
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
|
||||
for _, el := range options {
|
||||
b, err := el.pack()
|
||||
if err != nil || off+3 > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing opt"}
|
||||
}
|
||||
binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
|
||||
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
|
||||
off += 4
|
||||
if off+len(b) > len(msg) {
|
||||
copy(msg[off:], b)
|
||||
off = len(msg)
|
||||
continue
|
||||
}
|
||||
// Actual data
|
||||
copy(msg[off:off+len(b)], b)
|
||||
off += len(b)
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackStringOctet(msg []byte, off int) (string, int, error) {
|
||||
s := string(msg[off:])
|
||||
return s, len(msg), nil
|
||||
}
|
||||
|
||||
func packStringOctet(s string, msg []byte, off int) (int, error) {
|
||||
txtTmp := make([]byte, 256*4+1)
|
||||
off, err := packOctetString(s, msg, off, txtTmp)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
|
||||
var nsec []uint16
|
||||
length, window, lastwindow := 0, 0, -1
|
||||
for off < len(msg) {
|
||||
if off+2 > len(msg) {
|
||||
return nsec, len(msg), &Error{err: "overflow unpacking nsecx"}
|
||||
}
|
||||
window = int(msg[off])
|
||||
length = int(msg[off+1])
|
||||
off += 2
|
||||
if window <= lastwindow {
|
||||
// RFC 4034: Blocks are present in the NSEC RR RDATA in
|
||||
// increasing numerical order.
|
||||
return nsec, len(msg), &Error{err: "out of order NSEC block"}
|
||||
}
|
||||
if length == 0 {
|
||||
// RFC 4034: Blocks with no types present MUST NOT be included.
|
||||
return nsec, len(msg), &Error{err: "empty NSEC block"}
|
||||
}
|
||||
if length > 32 {
|
||||
return nsec, len(msg), &Error{err: "NSEC block too long"}
|
||||
}
|
||||
if off+length > len(msg) {
|
||||
return nsec, len(msg), &Error{err: "overflowing NSEC block"}
|
||||
}
|
||||
|
||||
// Walk the bytes in the window and extract the type bits
|
||||
for j := 0; j < length; j++ {
|
||||
b := msg[off+j]
|
||||
// Check the bits one by one, and set the type
|
||||
if b&0x80 == 0x80 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+0))
|
||||
}
|
||||
if b&0x40 == 0x40 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+1))
|
||||
}
|
||||
if b&0x20 == 0x20 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+2))
|
||||
}
|
||||
if b&0x10 == 0x10 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+3))
|
||||
}
|
||||
if b&0x8 == 0x8 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+4))
|
||||
}
|
||||
if b&0x4 == 0x4 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+5))
|
||||
}
|
||||
if b&0x2 == 0x2 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+6))
|
||||
}
|
||||
if b&0x1 == 0x1 {
|
||||
nsec = append(nsec, uint16(window*256+j*8+7))
|
||||
}
|
||||
}
|
||||
off += length
|
||||
lastwindow = window
|
||||
}
|
||||
return nsec, off, nil
|
||||
}
|
||||
|
||||
func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
|
||||
if len(bitmap) == 0 {
|
||||
return off, nil
|
||||
}
|
||||
var lastwindow, lastlength uint16
|
||||
for j := 0; j < len(bitmap); j++ {
|
||||
t := bitmap[j]
|
||||
window := t / 256
|
||||
length := (t-window*256)/8 + 1
|
||||
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
|
||||
off += int(lastlength) + 2
|
||||
lastlength = 0
|
||||
}
|
||||
if window < lastwindow || length < lastlength {
|
||||
return len(msg), &Error{err: "nsec bits out of order"}
|
||||
}
|
||||
if off+2+int(length) > len(msg) {
|
||||
return len(msg), &Error{err: "overflow packing nsec"}
|
||||
}
|
||||
// Setting the window #
|
||||
msg[off] = byte(window)
|
||||
// Setting the octets length
|
||||
msg[off+1] = byte(length)
|
||||
// Setting the bit value for the type in the right octet
|
||||
msg[off+1+int(length)] |= byte(1 << (7 - (t % 8)))
|
||||
lastwindow, lastlength = window, length
|
||||
}
|
||||
off += int(lastlength) + 2
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
|
||||
var (
|
||||
servers []string
|
||||
s string
|
||||
err error
|
||||
)
|
||||
if end > len(msg) {
|
||||
return nil, len(msg), &Error{err: "overflow unpacking domain names"}
|
||||
}
|
||||
for off < end {
|
||||
s, off, err = UnpackDomainName(msg, off)
|
||||
if err != nil {
|
||||
return servers, len(msg), err
|
||||
}
|
||||
servers = append(servers, s)
|
||||
}
|
||||
return servers, off, nil
|
||||
}
|
||||
|
||||
func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) {
|
||||
var err error
|
||||
for j := 0; j < len(names); j++ {
|
||||
off, err = PackDomainName(names[j], msg, off, compression, false && compress)
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
106
vendor/github.com/miekg/dns/nsecx.go
generated
vendored
106
vendor/github.com/miekg/dns/nsecx.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"hash"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type saltWireFmt struct {
|
||||
Salt string `dns:"size-hex"`
|
||||
}
|
||||
|
||||
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
|
||||
func HashName(label string, ha uint8, iter uint16, salt string) string {
|
||||
saltwire := new(saltWireFmt)
|
||||
saltwire.Salt = salt
|
||||
wire := make([]byte, DefaultMsgSize)
|
||||
n, err := packSaltWire(saltwire, wire)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
wire = wire[:n]
|
||||
name := make([]byte, 255)
|
||||
off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
name = name[:off]
|
||||
var s hash.Hash
|
||||
switch ha {
|
||||
case SHA1:
|
||||
s = sha1.New()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
||||
// k = 0
|
||||
s.Write(name)
|
||||
s.Write(wire)
|
||||
nsec3 := s.Sum(nil)
|
||||
// k > 0
|
||||
for k := uint16(0); k < iter; k++ {
|
||||
s.Reset()
|
||||
s.Write(nsec3)
|
||||
s.Write(wire)
|
||||
nsec3 = s.Sum(nsec3[:0])
|
||||
}
|
||||
return toBase32(nsec3)
|
||||
}
|
||||
|
||||
// Cover returns true if a name is covered by the NSEC3 record
|
||||
func (rr *NSEC3) Cover(name string) bool {
|
||||
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
|
||||
owner := strings.ToUpper(rr.Hdr.Name)
|
||||
labelIndices := Split(owner)
|
||||
if len(labelIndices) < 2 {
|
||||
return false
|
||||
}
|
||||
ownerHash := owner[:labelIndices[1]-1]
|
||||
ownerZone := owner[labelIndices[1]:]
|
||||
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
|
||||
return false
|
||||
}
|
||||
|
||||
nextHash := rr.NextDomain
|
||||
if ownerHash == nextHash { // empty interval
|
||||
return false
|
||||
}
|
||||
if ownerHash > nextHash { // end of zone
|
||||
if nameHash > ownerHash { // covered since there is nothing after ownerHash
|
||||
return true
|
||||
}
|
||||
return nameHash < nextHash // if nameHash is before beginning of zone it is covered
|
||||
}
|
||||
if nameHash < ownerHash { // nameHash is before ownerHash, not covered
|
||||
return false
|
||||
}
|
||||
return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash)
|
||||
}
|
||||
|
||||
// Match returns true if a name matches the NSEC3 record
|
||||
func (rr *NSEC3) Match(name string) bool {
|
||||
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
|
||||
owner := strings.ToUpper(rr.Hdr.Name)
|
||||
labelIndices := Split(owner)
|
||||
if len(labelIndices) < 2 {
|
||||
return false
|
||||
}
|
||||
ownerHash := owner[:labelIndices[1]-1]
|
||||
ownerZone := owner[labelIndices[1]:]
|
||||
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
|
||||
return false
|
||||
}
|
||||
if ownerHash == nameHash {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) {
|
||||
off, err := packStringHex(sw.Salt, msg, 0)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
149
vendor/github.com/miekg/dns/privaterr.go
generated
vendored
149
vendor/github.com/miekg/dns/privaterr.go
generated
vendored
@ -1,149 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PrivateRdata is an interface used for implementing "Private Use" RR types, see
|
||||
// RFC 6895. This allows one to experiment with new RR types, without requesting an
|
||||
// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
|
||||
type PrivateRdata interface {
|
||||
// String returns the text presentaton of the Rdata of the Private RR.
|
||||
String() string
|
||||
// Parse parses the Rdata of the private RR.
|
||||
Parse([]string) error
|
||||
// Pack is used when packing a private RR into a buffer.
|
||||
Pack([]byte) (int, error)
|
||||
// Unpack is used when unpacking a private RR from a buffer.
|
||||
// TODO(miek): diff. signature than Pack, see edns0.go for instance.
|
||||
Unpack([]byte) (int, error)
|
||||
// Copy copies the Rdata.
|
||||
Copy(PrivateRdata) error
|
||||
// Len returns the length in octets of the Rdata.
|
||||
Len() int
|
||||
}
|
||||
|
||||
// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
|
||||
// It mocks normal RRs and implements dns.RR interface.
|
||||
type PrivateRR struct {
|
||||
Hdr RR_Header
|
||||
Data PrivateRdata
|
||||
}
|
||||
|
||||
func mkPrivateRR(rrtype uint16) *PrivateRR {
|
||||
// Panics if RR is not an instance of PrivateRR.
|
||||
rrfunc, ok := TypeToRR[rrtype]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype))
|
||||
}
|
||||
|
||||
anyrr := rrfunc()
|
||||
switch rr := anyrr.(type) {
|
||||
case *PrivateRR:
|
||||
return rr
|
||||
}
|
||||
panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr))
|
||||
}
|
||||
|
||||
// Header return the RR header of r.
|
||||
func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
|
||||
|
||||
func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
|
||||
|
||||
// Private len and copy parts to satisfy RR interface.
|
||||
func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() }
|
||||
func (r *PrivateRR) copy() RR {
|
||||
// make new RR like this:
|
||||
rr := mkPrivateRR(r.Hdr.Rrtype)
|
||||
newh := r.Hdr.copyHeader()
|
||||
rr.Hdr = *newh
|
||||
|
||||
err := r.Data.Copy(rr.Data)
|
||||
if err != nil {
|
||||
panic("dns: got value that could not be used to copy Private rdata")
|
||||
}
|
||||
return rr
|
||||
}
|
||||
func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
|
||||
off, err := r.Hdr.pack(msg, off, compression, compress)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
headerEnd := off
|
||||
n, err := r.Data.Pack(msg[off:])
|
||||
if err != nil {
|
||||
return len(msg), err
|
||||
}
|
||||
off += n
|
||||
r.Header().Rdlength = uint16(off - headerEnd)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
// PrivateHandle registers a private resource record type. It requires
|
||||
// string and numeric representation of private RR type and generator function as argument.
|
||||
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
|
||||
rtypestr = strings.ToUpper(rtypestr)
|
||||
|
||||
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
|
||||
TypeToString[rtype] = rtypestr
|
||||
StringToType[rtypestr] = rtype
|
||||
|
||||
typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) {
|
||||
if noRdata(h) {
|
||||
return &h, off, nil
|
||||
}
|
||||
var err error
|
||||
|
||||
rr := mkPrivateRR(h.Rrtype)
|
||||
rr.Hdr = h
|
||||
|
||||
off1, err := rr.Data.Unpack(msg[off:])
|
||||
off += off1
|
||||
if err != nil {
|
||||
return rr, off, err
|
||||
}
|
||||
return rr, off, err
|
||||
}
|
||||
|
||||
setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
rr := mkPrivateRR(h.Rrtype)
|
||||
rr.Hdr = h
|
||||
|
||||
var l lex
|
||||
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
|
||||
Fetch:
|
||||
for {
|
||||
// TODO(miek): we could also be returning _QUOTE, this might or might not
|
||||
// be an issue (basically parsing TXT becomes hard)
|
||||
switch l = <-c; l.value {
|
||||
case zNewline, zEOF:
|
||||
break Fetch
|
||||
case zString:
|
||||
text = append(text, l.token)
|
||||
}
|
||||
}
|
||||
|
||||
err := rr.Data.Parse(text)
|
||||
if err != nil {
|
||||
return nil, &ParseError{f, err.Error(), l}, ""
|
||||
}
|
||||
|
||||
return rr, nil, ""
|
||||
}
|
||||
|
||||
typeToparserFunc[rtype] = parserFunc{setPrivateRR, true}
|
||||
}
|
||||
|
||||
// PrivateHandleRemove removes defenitions required to support private RR type.
|
||||
func PrivateHandleRemove(rtype uint16) {
|
||||
rtypestr, ok := TypeToString[rtype]
|
||||
if ok {
|
||||
delete(TypeToRR, rtype)
|
||||
delete(TypeToString, rtype)
|
||||
delete(typeToparserFunc, rtype)
|
||||
delete(StringToType, rtypestr)
|
||||
delete(typeToUnpack, rtype)
|
||||
}
|
||||
return
|
||||
}
|
||||
49
vendor/github.com/miekg/dns/rawmsg.go
generated
vendored
49
vendor/github.com/miekg/dns/rawmsg.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
package dns
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// rawSetRdlength sets the rdlength in the header of
|
||||
// the RR. The offset 'off' must be positioned at the
|
||||
// start of the header of the RR, 'end' must be the
|
||||
// end of the RR.
|
||||
func rawSetRdlength(msg []byte, off, end int) bool {
|
||||
l := len(msg)
|
||||
Loop:
|
||||
for {
|
||||
if off+1 > l {
|
||||
return false
|
||||
}
|
||||
c := int(msg[off])
|
||||
off++
|
||||
switch c & 0xC0 {
|
||||
case 0x00:
|
||||
if c == 0x00 {
|
||||
// End of the domainname
|
||||
break Loop
|
||||
}
|
||||
if off+c > l {
|
||||
return false
|
||||
}
|
||||
off += c
|
||||
|
||||
case 0xC0:
|
||||
// pointer, next byte included, ends domainname
|
||||
off++
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
// The domainname has been seen, we at the start of the fixed part in the header.
|
||||
// Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length.
|
||||
off += 2 + 2 + 4
|
||||
if off+2 > l {
|
||||
return false
|
||||
}
|
||||
//off+1 is the end of the header, 'end' is the end of the rr
|
||||
//so 'end' - 'off+2' is the length of the rdata
|
||||
rdatalen := end - (off + 2)
|
||||
if rdatalen > 0xFFFF {
|
||||
return false
|
||||
}
|
||||
binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen))
|
||||
return true
|
||||
}
|
||||
38
vendor/github.com/miekg/dns/reverse.go
generated
vendored
38
vendor/github.com/miekg/dns/reverse.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
package dns
|
||||
|
||||
// StringToType is the reverse of TypeToString, needed for string parsing.
|
||||
var StringToType = reverseInt16(TypeToString)
|
||||
|
||||
// StringToClass is the reverse of ClassToString, needed for string parsing.
|
||||
var StringToClass = reverseInt16(ClassToString)
|
||||
|
||||
// StringToOpcode is a map of opcodes to strings.
|
||||
var StringToOpcode = reverseInt(OpcodeToString)
|
||||
|
||||
// StringToRcode is a map of rcodes to strings.
|
||||
var StringToRcode = reverseInt(RcodeToString)
|
||||
|
||||
// Reverse a map
|
||||
func reverseInt8(m map[uint8]string) map[string]uint8 {
|
||||
n := make(map[string]uint8, len(m))
|
||||
for u, s := range m {
|
||||
n[s] = u
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func reverseInt16(m map[uint16]string) map[string]uint16 {
|
||||
n := make(map[string]uint16, len(m))
|
||||
for u, s := range m {
|
||||
n[s] = u
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func reverseInt(m map[int]string) map[string]int {
|
||||
n := make(map[string]int, len(m))
|
||||
for u, s := range m {
|
||||
n[s] = u
|
||||
}
|
||||
return n
|
||||
}
|
||||
84
vendor/github.com/miekg/dns/sanitize.go
generated
vendored
84
vendor/github.com/miekg/dns/sanitize.go
generated
vendored
@ -1,84 +0,0 @@
|
||||
package dns
|
||||
|
||||
// Dedup removes identical RRs from rrs. It preserves the original ordering.
|
||||
// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
|
||||
// rrs.
|
||||
// m is used to store the RRs temporary. If it is nil a new map will be allocated.
|
||||
func Dedup(rrs []RR, m map[string]RR) []RR {
|
||||
if m == nil {
|
||||
m = make(map[string]RR)
|
||||
}
|
||||
// Save the keys, so we don't have to call normalizedString twice.
|
||||
keys := make([]*string, 0, len(rrs))
|
||||
|
||||
for _, r := range rrs {
|
||||
key := normalizedString(r)
|
||||
keys = append(keys, &key)
|
||||
if _, ok := m[key]; ok {
|
||||
// Shortest TTL wins.
|
||||
if m[key].Header().Ttl > r.Header().Ttl {
|
||||
m[key].Header().Ttl = r.Header().Ttl
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
m[key] = r
|
||||
}
|
||||
// If the length of the result map equals the amount of RRs we got,
|
||||
// it means they were all different. We can then just return the original rrset.
|
||||
if len(m) == len(rrs) {
|
||||
return rrs
|
||||
}
|
||||
|
||||
j := 0
|
||||
for i, r := range rrs {
|
||||
// If keys[i] lives in the map, we should copy and remove it.
|
||||
if _, ok := m[*keys[i]]; ok {
|
||||
delete(m, *keys[i])
|
||||
rrs[j] = r
|
||||
j++
|
||||
}
|
||||
|
||||
if len(m) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return rrs[:j]
|
||||
}
|
||||
|
||||
// normalizedString returns a normalized string from r. The TTL
|
||||
// is removed and the domain name is lowercased. We go from this:
|
||||
// DomainName<TAB>TTL<TAB>CLASS<TAB>TYPE<TAB>RDATA to:
|
||||
// lowercasename<TAB>CLASS<TAB>TYPE...
|
||||
func normalizedString(r RR) string {
|
||||
// A string Go DNS makes has: domainname<TAB>TTL<TAB>...
|
||||
b := []byte(r.String())
|
||||
|
||||
// find the first non-escaped tab, then another, so we capture where the TTL lives.
|
||||
esc := false
|
||||
ttlStart, ttlEnd := 0, 0
|
||||
for i := 0; i < len(b) && ttlEnd == 0; i++ {
|
||||
switch {
|
||||
case b[i] == '\\':
|
||||
esc = !esc
|
||||
case b[i] == '\t' && !esc:
|
||||
if ttlStart == 0 {
|
||||
ttlStart = i
|
||||
continue
|
||||
}
|
||||
if ttlEnd == 0 {
|
||||
ttlEnd = i
|
||||
}
|
||||
case b[i] >= 'A' && b[i] <= 'Z' && !esc:
|
||||
b[i] += 32
|
||||
default:
|
||||
esc = false
|
||||
}
|
||||
}
|
||||
|
||||
// remove TTL.
|
||||
copy(b[ttlStart:], b[ttlEnd:])
|
||||
cut := ttlEnd - ttlStart
|
||||
return string(b[:len(b)-cut])
|
||||
}
|
||||
1007
vendor/github.com/miekg/dns/scan.go
generated
vendored
1007
vendor/github.com/miekg/dns/scan.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2199
vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
2199
vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
File diff suppressed because it is too large
Load Diff
56
vendor/github.com/miekg/dns/scanner.go
generated
vendored
56
vendor/github.com/miekg/dns/scanner.go
generated
vendored
@ -1,56 +0,0 @@
|
||||
package dns
|
||||
|
||||
// Implement a simple scanner, return a byte stream from an io reader.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"text/scanner"
|
||||
)
|
||||
|
||||
type scan struct {
|
||||
src *bufio.Reader
|
||||
position scanner.Position
|
||||
eof bool // Have we just seen a eof
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func scanInit(r io.Reader) (*scan, context.CancelFunc) {
|
||||
s := new(scan)
|
||||
s.src = bufio.NewReader(r)
|
||||
s.position.Line = 1
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.ctx = ctx
|
||||
|
||||
return s, cancel
|
||||
}
|
||||
|
||||
// tokenText returns the next byte from the input
|
||||
func (s *scan) tokenText() (byte, error) {
|
||||
c, err := s.src.ReadByte()
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return c, context.Canceled
|
||||
default:
|
||||
break
|
||||
}
|
||||
|
||||
// delay the newline handling until the next token is delivered,
|
||||
// fixes off-by-one errors when reporting a parse error.
|
||||
if s.eof == true {
|
||||
s.position.Line++
|
||||
s.position.Column = 0
|
||||
s.eof = false
|
||||
}
|
||||
if c == '\n' {
|
||||
s.eof = true
|
||||
return c, nil
|
||||
}
|
||||
s.position.Column++
|
||||
return c, nil
|
||||
}
|
||||
719
vendor/github.com/miekg/dns/server.go
generated
vendored
719
vendor/github.com/miekg/dns/server.go
generated
vendored
@ -1,719 +0,0 @@
|
||||
// DNS server implementation.
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Maximum number of TCP queries before we close the socket.
|
||||
const maxTCPQueries = 128
|
||||
|
||||
// Handler is implemented by any value that implements ServeDNS.
|
||||
type Handler interface {
|
||||
ServeDNS(w ResponseWriter, r *Msg)
|
||||
}
|
||||
|
||||
// A ResponseWriter interface is used by an DNS handler to
|
||||
// construct an DNS response.
|
||||
type ResponseWriter interface {
|
||||
// LocalAddr returns the net.Addr of the server
|
||||
LocalAddr() net.Addr
|
||||
// RemoteAddr returns the net.Addr of the client that sent the current request.
|
||||
RemoteAddr() net.Addr
|
||||
// WriteMsg writes a reply back to the client.
|
||||
WriteMsg(*Msg) error
|
||||
// Write writes a raw buffer back to the client.
|
||||
Write([]byte) (int, error)
|
||||
// Close closes the connection.
|
||||
Close() error
|
||||
// TsigStatus returns the status of the Tsig.
|
||||
TsigStatus() error
|
||||
// TsigTimersOnly sets the tsig timers only boolean.
|
||||
TsigTimersOnly(bool)
|
||||
// Hijack lets the caller take over the connection.
|
||||
// After a call to Hijack(), the DNS package will not do anything with the connection.
|
||||
Hijack()
|
||||
}
|
||||
|
||||
type response struct {
|
||||
hijacked bool // connection has been hijacked by handler
|
||||
tsigStatus error
|
||||
tsigTimersOnly bool
|
||||
tsigRequestMAC string
|
||||
tsigSecret map[string]string // the tsig secrets
|
||||
udp *net.UDPConn // i/o connection if UDP was used
|
||||
tcp net.Conn // i/o connection if TCP was used
|
||||
udpSession *SessionUDP // oob data to get egress interface right
|
||||
remoteAddr net.Addr // address of the client
|
||||
writer Writer // writer to output the raw DNS bits
|
||||
}
|
||||
|
||||
// ServeMux is an DNS request multiplexer. It matches the
|
||||
// zone name of each incoming request against a list of
|
||||
// registered patterns add calls the handler for the pattern
|
||||
// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning
|
||||
// that queries for the DS record are redirected to the parent zone (if that
|
||||
// is also registered), otherwise the child gets the query.
|
||||
// ServeMux is also safe for concurrent access from multiple goroutines.
|
||||
type ServeMux struct {
|
||||
z map[string]Handler
|
||||
m *sync.RWMutex
|
||||
}
|
||||
|
||||
// NewServeMux allocates and returns a new ServeMux.
|
||||
func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} }
|
||||
|
||||
// DefaultServeMux is the default ServeMux used by Serve.
|
||||
var DefaultServeMux = NewServeMux()
|
||||
|
||||
// The HandlerFunc type is an adapter to allow the use of
|
||||
// ordinary functions as DNS handlers. If f is a function
|
||||
// with the appropriate signature, HandlerFunc(f) is a
|
||||
// Handler object that calls f.
|
||||
type HandlerFunc func(ResponseWriter, *Msg)
|
||||
|
||||
// ServeDNS calls f(w, r).
|
||||
func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
|
||||
f(w, r)
|
||||
}
|
||||
|
||||
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
|
||||
func HandleFailed(w ResponseWriter, r *Msg) {
|
||||
m := new(Msg)
|
||||
m.SetRcode(r, RcodeServerFailure)
|
||||
// does not matter if this write fails
|
||||
w.WriteMsg(m)
|
||||
}
|
||||
|
||||
func failedHandler() Handler { return HandlerFunc(HandleFailed) }
|
||||
|
||||
// ListenAndServe Starts a server on address and network specified Invoke handler
|
||||
// for incoming queries.
|
||||
func ListenAndServe(addr string, network string, handler Handler) error {
|
||||
server := &Server{Addr: addr, Net: network, Handler: handler}
|
||||
return server.ListenAndServe()
|
||||
}
|
||||
|
||||
// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
|
||||
// http://golang.org/pkg/net/http/#ListenAndServeTLS
|
||||
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config := tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
}
|
||||
|
||||
server := &Server{
|
||||
Addr: addr,
|
||||
Net: "tcp-tls",
|
||||
TLSConfig: &config,
|
||||
Handler: handler,
|
||||
}
|
||||
|
||||
return server.ListenAndServe()
|
||||
}
|
||||
|
||||
// ActivateAndServe activates a server with a listener from systemd,
|
||||
// l and p should not both be non-nil.
|
||||
// If both l and p are not nil only p will be used.
|
||||
// Invoke handler for incoming queries.
|
||||
func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
|
||||
server := &Server{Listener: l, PacketConn: p, Handler: handler}
|
||||
return server.ActivateAndServe()
|
||||
}
|
||||
|
||||
func (mux *ServeMux) match(q string, t uint16) Handler {
|
||||
mux.m.RLock()
|
||||
defer mux.m.RUnlock()
|
||||
var handler Handler
|
||||
b := make([]byte, len(q)) // worst case, one label of length q
|
||||
off := 0
|
||||
end := false
|
||||
for {
|
||||
l := len(q[off:])
|
||||
for i := 0; i < l; i++ {
|
||||
b[i] = q[off+i]
|
||||
if b[i] >= 'A' && b[i] <= 'Z' {
|
||||
b[i] |= ('a' - 'A')
|
||||
}
|
||||
}
|
||||
if h, ok := mux.z[string(b[:l])]; ok { // causes garbage, might want to change the map key
|
||||
if t != TypeDS {
|
||||
return h
|
||||
}
|
||||
// Continue for DS to see if we have a parent too, if so delegeate to the parent
|
||||
handler = h
|
||||
}
|
||||
off, end = NextLabel(q, off)
|
||||
if end {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Wildcard match, if we have found nothing try the root zone as a last resort.
|
||||
if h, ok := mux.z["."]; ok {
|
||||
return h
|
||||
}
|
||||
return handler
|
||||
}
|
||||
|
||||
// Handle adds a handler to the ServeMux for pattern.
|
||||
func (mux *ServeMux) Handle(pattern string, handler Handler) {
|
||||
if pattern == "" {
|
||||
panic("dns: invalid pattern " + pattern)
|
||||
}
|
||||
mux.m.Lock()
|
||||
mux.z[Fqdn(pattern)] = handler
|
||||
mux.m.Unlock()
|
||||
}
|
||||
|
||||
// HandleFunc adds a handler function to the ServeMux for pattern.
|
||||
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
|
||||
mux.Handle(pattern, HandlerFunc(handler))
|
||||
}
|
||||
|
||||
// HandleRemove deregistrars the handler specific for pattern from the ServeMux.
|
||||
func (mux *ServeMux) HandleRemove(pattern string) {
|
||||
if pattern == "" {
|
||||
panic("dns: invalid pattern " + pattern)
|
||||
}
|
||||
mux.m.Lock()
|
||||
delete(mux.z, Fqdn(pattern))
|
||||
mux.m.Unlock()
|
||||
}
|
||||
|
||||
// ServeDNS dispatches the request to the handler whose
|
||||
// pattern most closely matches the request message. If DefaultServeMux
|
||||
// is used the correct thing for DS queries is done: a possible parent
|
||||
// is sought.
|
||||
// If no handler is found a standard SERVFAIL message is returned
|
||||
// If the request message does not have exactly one question in the
|
||||
// question section a SERVFAIL is returned, unlesss Unsafe is true.
|
||||
func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) {
|
||||
var h Handler
|
||||
if len(request.Question) < 1 { // allow more than one question
|
||||
h = failedHandler()
|
||||
} else {
|
||||
if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil {
|
||||
h = failedHandler()
|
||||
}
|
||||
}
|
||||
h.ServeDNS(w, request)
|
||||
}
|
||||
|
||||
// Handle registers the handler with the given pattern
|
||||
// in the DefaultServeMux. The documentation for
|
||||
// ServeMux explains how patterns are matched.
|
||||
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
|
||||
|
||||
// HandleRemove deregisters the handle with the given pattern
|
||||
// in the DefaultServeMux.
|
||||
func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
|
||||
|
||||
// HandleFunc registers the handler function with the given pattern
|
||||
// in the DefaultServeMux.
|
||||
func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
|
||||
DefaultServeMux.HandleFunc(pattern, handler)
|
||||
}
|
||||
|
||||
// Writer writes raw DNS messages; each call to Write should send an entire message.
|
||||
type Writer interface {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message.
|
||||
type Reader interface {
|
||||
// ReadTCP reads a raw message from a TCP connection. Implementations may alter
|
||||
// connection properties, for example the read-deadline.
|
||||
ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
|
||||
// ReadUDP reads a raw message from a UDP connection. Implementations may alter
|
||||
// connection properties, for example the read-deadline.
|
||||
ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
|
||||
}
|
||||
|
||||
// defaultReader is an adapter for the Server struct that implements the Reader interface
|
||||
// using the readTCP and readUDP func of the embedded Server.
|
||||
type defaultReader struct {
|
||||
*Server
|
||||
}
|
||||
|
||||
func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
|
||||
return dr.readTCP(conn, timeout)
|
||||
}
|
||||
|
||||
func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
|
||||
return dr.readUDP(conn, timeout)
|
||||
}
|
||||
|
||||
// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
|
||||
// Implementations should never return a nil Reader.
|
||||
type DecorateReader func(Reader) Reader
|
||||
|
||||
// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
|
||||
// Implementations should never return a nil Writer.
|
||||
type DecorateWriter func(Writer) Writer
|
||||
|
||||
// A Server defines parameters for running an DNS server.
|
||||
type Server struct {
|
||||
// Address to listen on, ":dns" if empty.
|
||||
Addr string
|
||||
// if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
|
||||
Net string
|
||||
// TCP Listener to use, this is to aid in systemd's socket activation.
|
||||
Listener net.Listener
|
||||
// TLS connection configuration
|
||||
TLSConfig *tls.Config
|
||||
// UDP "Listener" to use, this is to aid in systemd's socket activation.
|
||||
PacketConn net.PacketConn
|
||||
// Handler to invoke, dns.DefaultServeMux if nil.
|
||||
Handler Handler
|
||||
// Default buffer size to use to read incoming UDP messages. If not set
|
||||
// it defaults to MinMsgSize (512 B).
|
||||
UDPSize int
|
||||
// The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second.
|
||||
ReadTimeout time.Duration
|
||||
// The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second.
|
||||
WriteTimeout time.Duration
|
||||
// TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
|
||||
IdleTimeout func() time.Duration
|
||||
// Secret(s) for Tsig map[<zonename>]<base64 secret>. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2).
|
||||
TsigSecret map[string]string
|
||||
// Unsafe instructs the server to disregard any sanity checks and directly hand the message to
|
||||
// the handler. It will specifically not check if the query has the QR bit not set.
|
||||
Unsafe bool
|
||||
// If NotifyStartedFunc is set it is called once the server has started listening.
|
||||
NotifyStartedFunc func()
|
||||
// DecorateReader is optional, allows customization of the process that reads raw DNS messages.
|
||||
DecorateReader DecorateReader
|
||||
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
|
||||
DecorateWriter DecorateWriter
|
||||
|
||||
// Shutdown handling
|
||||
lock sync.RWMutex
|
||||
started bool
|
||||
}
|
||||
|
||||
// ListenAndServe starts a nameserver on the configured address in *Server.
|
||||
func (srv *Server) ListenAndServe() error {
|
||||
srv.lock.Lock()
|
||||
defer srv.lock.Unlock()
|
||||
if srv.started {
|
||||
return &Error{err: "server already started"}
|
||||
}
|
||||
addr := srv.Addr
|
||||
if addr == "" {
|
||||
addr = ":domain"
|
||||
}
|
||||
if srv.UDPSize == 0 {
|
||||
srv.UDPSize = MinMsgSize
|
||||
}
|
||||
switch srv.Net {
|
||||
case "tcp", "tcp4", "tcp6":
|
||||
a, err := net.ResolveTCPAddr(srv.Net, addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l, err := net.ListenTCP(srv.Net, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.Listener = l
|
||||
srv.started = true
|
||||
srv.lock.Unlock()
|
||||
err = srv.serveTCP(l)
|
||||
srv.lock.Lock() // to satisfy the defer at the top
|
||||
return err
|
||||
case "tcp-tls", "tcp4-tls", "tcp6-tls":
|
||||
network := "tcp"
|
||||
if srv.Net == "tcp4-tls" {
|
||||
network = "tcp4"
|
||||
} else if srv.Net == "tcp6-tls" {
|
||||
network = "tcp6"
|
||||
}
|
||||
|
||||
l, err := tls.Listen(network, addr, srv.TLSConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.Listener = l
|
||||
srv.started = true
|
||||
srv.lock.Unlock()
|
||||
err = srv.serveTCP(l)
|
||||
srv.lock.Lock() // to satisfy the defer at the top
|
||||
return err
|
||||
case "udp", "udp4", "udp6":
|
||||
a, err := net.ResolveUDPAddr(srv.Net, addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l, err := net.ListenUDP(srv.Net, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e := setUDPSocketOptions(l); e != nil {
|
||||
return e
|
||||
}
|
||||
srv.PacketConn = l
|
||||
srv.started = true
|
||||
srv.lock.Unlock()
|
||||
err = srv.serveUDP(l)
|
||||
srv.lock.Lock() // to satisfy the defer at the top
|
||||
return err
|
||||
}
|
||||
return &Error{err: "bad network"}
|
||||
}
|
||||
|
||||
// ActivateAndServe starts a nameserver with the PacketConn or Listener
|
||||
// configured in *Server. Its main use is to start a server from systemd.
|
||||
func (srv *Server) ActivateAndServe() error {
|
||||
srv.lock.Lock()
|
||||
defer srv.lock.Unlock()
|
||||
if srv.started {
|
||||
return &Error{err: "server already started"}
|
||||
}
|
||||
pConn := srv.PacketConn
|
||||
l := srv.Listener
|
||||
if pConn != nil {
|
||||
if srv.UDPSize == 0 {
|
||||
srv.UDPSize = MinMsgSize
|
||||
}
|
||||
// Check PacketConn interface's type is valid and value
|
||||
// is not nil
|
||||
if t, ok := pConn.(*net.UDPConn); ok && t != nil {
|
||||
if e := setUDPSocketOptions(t); e != nil {
|
||||
return e
|
||||
}
|
||||
srv.started = true
|
||||
srv.lock.Unlock()
|
||||
e := srv.serveUDP(t)
|
||||
srv.lock.Lock() // to satisfy the defer at the top
|
||||
return e
|
||||
}
|
||||
}
|
||||
if l != nil {
|
||||
srv.started = true
|
||||
srv.lock.Unlock()
|
||||
e := srv.serveTCP(l)
|
||||
srv.lock.Lock() // to satisfy the defer at the top
|
||||
return e
|
||||
}
|
||||
return &Error{err: "bad listeners"}
|
||||
}
|
||||
|
||||
// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and
|
||||
// ActivateAndServe will return.
|
||||
func (srv *Server) Shutdown() error {
|
||||
srv.lock.Lock()
|
||||
if !srv.started {
|
||||
srv.lock.Unlock()
|
||||
return &Error{err: "server not started"}
|
||||
}
|
||||
srv.started = false
|
||||
srv.lock.Unlock()
|
||||
|
||||
if srv.PacketConn != nil {
|
||||
srv.PacketConn.Close()
|
||||
}
|
||||
if srv.Listener != nil {
|
||||
srv.Listener.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
|
||||
func (srv *Server) getReadTimeout() time.Duration {
|
||||
rtimeout := dnsTimeout
|
||||
if srv.ReadTimeout != 0 {
|
||||
rtimeout = srv.ReadTimeout
|
||||
}
|
||||
return rtimeout
|
||||
}
|
||||
|
||||
// serveTCP starts a TCP listener for the server.
|
||||
// Each request is handled in a separate goroutine.
|
||||
func (srv *Server) serveTCP(l net.Listener) error {
|
||||
defer l.Close()
|
||||
|
||||
if srv.NotifyStartedFunc != nil {
|
||||
srv.NotifyStartedFunc()
|
||||
}
|
||||
|
||||
reader := Reader(&defaultReader{srv})
|
||||
if srv.DecorateReader != nil {
|
||||
reader = srv.DecorateReader(reader)
|
||||
}
|
||||
|
||||
handler := srv.Handler
|
||||
if handler == nil {
|
||||
handler = DefaultServeMux
|
||||
}
|
||||
rtimeout := srv.getReadTimeout()
|
||||
// deadline is not used here
|
||||
for {
|
||||
rw, err := l.Accept()
|
||||
srv.lock.RLock()
|
||||
if !srv.started {
|
||||
srv.lock.RUnlock()
|
||||
return nil
|
||||
}
|
||||
srv.lock.RUnlock()
|
||||
if err != nil {
|
||||
if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
m, err := reader.ReadTCP(rw, rtimeout)
|
||||
if err != nil {
|
||||
rw.Close()
|
||||
return
|
||||
}
|
||||
srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// serveUDP starts a UDP listener for the server.
|
||||
// Each request is handled in a separate goroutine.
|
||||
func (srv *Server) serveUDP(l *net.UDPConn) error {
|
||||
defer l.Close()
|
||||
|
||||
if srv.NotifyStartedFunc != nil {
|
||||
srv.NotifyStartedFunc()
|
||||
}
|
||||
|
||||
reader := Reader(&defaultReader{srv})
|
||||
if srv.DecorateReader != nil {
|
||||
reader = srv.DecorateReader(reader)
|
||||
}
|
||||
|
||||
handler := srv.Handler
|
||||
if handler == nil {
|
||||
handler = DefaultServeMux
|
||||
}
|
||||
rtimeout := srv.getReadTimeout()
|
||||
// deadline is not used here
|
||||
for {
|
||||
m, s, err := reader.ReadUDP(l, rtimeout)
|
||||
srv.lock.RLock()
|
||||
if !srv.started {
|
||||
srv.lock.RUnlock()
|
||||
return nil
|
||||
}
|
||||
srv.lock.RUnlock()
|
||||
if err != nil {
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(m) < headerSize {
|
||||
continue
|
||||
}
|
||||
go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Serve a new connection.
|
||||
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) {
|
||||
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
|
||||
if srv.DecorateWriter != nil {
|
||||
w.writer = srv.DecorateWriter(w)
|
||||
} else {
|
||||
w.writer = w
|
||||
}
|
||||
|
||||
q := 0 // counter for the amount of TCP queries we get
|
||||
|
||||
reader := Reader(&defaultReader{srv})
|
||||
if srv.DecorateReader != nil {
|
||||
reader = srv.DecorateReader(reader)
|
||||
}
|
||||
Redo:
|
||||
req := new(Msg)
|
||||
err := req.Unpack(m)
|
||||
if err != nil { // Send a FormatError back
|
||||
x := new(Msg)
|
||||
x.SetRcodeFormatError(req)
|
||||
w.WriteMsg(x)
|
||||
goto Exit
|
||||
}
|
||||
if !srv.Unsafe && req.Response {
|
||||
goto Exit
|
||||
}
|
||||
|
||||
w.tsigStatus = nil
|
||||
if w.tsigSecret != nil {
|
||||
if t := req.IsTsig(); t != nil {
|
||||
secret := t.Hdr.Name
|
||||
if _, ok := w.tsigSecret[secret]; !ok {
|
||||
w.tsigStatus = ErrKeyAlg
|
||||
}
|
||||
w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false)
|
||||
w.tsigTimersOnly = false
|
||||
w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
|
||||
}
|
||||
}
|
||||
h.ServeDNS(w, req) // Writes back to the client
|
||||
|
||||
Exit:
|
||||
if w.tcp == nil {
|
||||
return
|
||||
}
|
||||
// TODO(miek): make this number configurable?
|
||||
if q > maxTCPQueries { // close socket after this many queries
|
||||
w.Close()
|
||||
return
|
||||
}
|
||||
|
||||
if w.hijacked {
|
||||
return // client calls Close()
|
||||
}
|
||||
if u != nil { // UDP, "close" and return
|
||||
w.Close()
|
||||
return
|
||||
}
|
||||
idleTimeout := tcpIdleTimeout
|
||||
if srv.IdleTimeout != nil {
|
||||
idleTimeout = srv.IdleTimeout()
|
||||
}
|
||||
m, err = reader.ReadTCP(w.tcp, idleTimeout)
|
||||
if err == nil {
|
||||
q++
|
||||
goto Redo
|
||||
}
|
||||
w.Close()
|
||||
return
|
||||
}
|
||||
|
||||
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
|
||||
conn.SetReadDeadline(time.Now().Add(timeout))
|
||||
l := make([]byte, 2)
|
||||
n, err := conn.Read(l)
|
||||
if err != nil || n != 2 {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, ErrShortRead
|
||||
}
|
||||
length := binary.BigEndian.Uint16(l)
|
||||
if length == 0 {
|
||||
return nil, ErrShortRead
|
||||
}
|
||||
m := make([]byte, int(length))
|
||||
n, err = conn.Read(m[:int(length)])
|
||||
if err != nil || n == 0 {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, ErrShortRead
|
||||
}
|
||||
i := n
|
||||
for i < int(length) {
|
||||
j, err := conn.Read(m[i:int(length)])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i += j
|
||||
}
|
||||
n = i
|
||||
m = m[:n]
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
|
||||
conn.SetReadDeadline(time.Now().Add(timeout))
|
||||
m := make([]byte, srv.UDPSize)
|
||||
n, s, err := ReadFromSessionUDP(conn, m)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
m = m[:n]
|
||||
return m, s, nil
|
||||
}
|
||||
|
||||
// WriteMsg implements the ResponseWriter.WriteMsg method.
|
||||
func (w *response) WriteMsg(m *Msg) (err error) {
|
||||
var data []byte
|
||||
if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check)
|
||||
if t := m.IsTsig(); t != nil {
|
||||
data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.writer.Write(data)
|
||||
return err
|
||||
}
|
||||
}
|
||||
data, err = m.Pack()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.writer.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
// Write implements the ResponseWriter.Write method.
|
||||
func (w *response) Write(m []byte) (int, error) {
|
||||
switch {
|
||||
case w.udp != nil:
|
||||
n, err := WriteToSessionUDP(w.udp, m, w.udpSession)
|
||||
return n, err
|
||||
case w.tcp != nil:
|
||||
lm := len(m)
|
||||
if lm < 2 {
|
||||
return 0, io.ErrShortBuffer
|
||||
}
|
||||
if lm > MaxMsgSize {
|
||||
return 0, &Error{err: "message too large"}
|
||||
}
|
||||
l := make([]byte, 2, 2+lm)
|
||||
binary.BigEndian.PutUint16(l, uint16(lm))
|
||||
m = append(l, m...)
|
||||
|
||||
n, err := io.Copy(w.tcp, bytes.NewReader(m))
|
||||
return int(n), err
|
||||
}
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// LocalAddr implements the ResponseWriter.LocalAddr method.
|
||||
func (w *response) LocalAddr() net.Addr {
|
||||
if w.tcp != nil {
|
||||
return w.tcp.LocalAddr()
|
||||
}
|
||||
return w.udp.LocalAddr()
|
||||
}
|
||||
|
||||
// RemoteAddr implements the ResponseWriter.RemoteAddr method.
|
||||
func (w *response) RemoteAddr() net.Addr { return w.remoteAddr }
|
||||
|
||||
// TsigStatus implements the ResponseWriter.TsigStatus method.
|
||||
func (w *response) TsigStatus() error { return w.tsigStatus }
|
||||
|
||||
// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method.
|
||||
func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b }
|
||||
|
||||
// Hijack implements the ResponseWriter.Hijack method.
|
||||
func (w *response) Hijack() { w.hijacked = true }
|
||||
|
||||
// Close implements the ResponseWriter.Close method
|
||||
func (w *response) Close() error {
|
||||
// Can't close the udp conn, as that is actually the listener.
|
||||
if w.tcp != nil {
|
||||
e := w.tcp.Close()
|
||||
w.tcp = nil
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
218
vendor/github.com/miekg/dns/sig0.go
generated
vendored
218
vendor/github.com/miekg/dns/sig0.go
generated
vendored
@ -1,218 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Sign signs a dns.Msg. It fills the signature with the appropriate data.
|
||||
// The SIG record should have the SignerName, KeyTag, Algorithm, Inception
|
||||
// and Expiration set.
|
||||
func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
|
||||
if k == nil {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
|
||||
return nil, ErrKey
|
||||
}
|
||||
rr.Header().Rrtype = TypeSIG
|
||||
rr.Header().Class = ClassANY
|
||||
rr.Header().Ttl = 0
|
||||
rr.Header().Name = "."
|
||||
rr.OrigTtl = 0
|
||||
rr.TypeCovered = 0
|
||||
rr.Labels = 0
|
||||
|
||||
buf := make([]byte, m.Len()+rr.len())
|
||||
mbuf, err := m.PackBuffer(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if &buf[0] != &mbuf[0] {
|
||||
return nil, ErrBuf
|
||||
}
|
||||
off, err := PackRR(rr, buf, len(mbuf), nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf = buf[:off:cap(buf)]
|
||||
|
||||
hash, ok := AlgorithmToHash[rr.Algorithm]
|
||||
if !ok {
|
||||
return nil, ErrAlg
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
// Write SIG rdata
|
||||
hasher.Write(buf[len(mbuf)+1+2+2+4+2:])
|
||||
// Write message
|
||||
hasher.Write(buf[:len(mbuf)])
|
||||
|
||||
signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
|
||||
buf = append(buf, signature...)
|
||||
if len(buf) > int(^uint16(0)) {
|
||||
return nil, ErrBuf
|
||||
}
|
||||
// Adjust sig data length
|
||||
rdoff := len(mbuf) + 1 + 2 + 2 + 4
|
||||
rdlen := binary.BigEndian.Uint16(buf[rdoff:])
|
||||
rdlen += uint16(len(signature))
|
||||
binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
|
||||
// Adjust additional count
|
||||
adc := binary.BigEndian.Uint16(buf[10:])
|
||||
adc++
|
||||
binary.BigEndian.PutUint16(buf[10:], adc)
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Verify validates the message buf using the key k.
|
||||
// It's assumed that buf is a valid message from which rr was unpacked.
|
||||
func (rr *SIG) Verify(k *KEY, buf []byte) error {
|
||||
if k == nil {
|
||||
return ErrKey
|
||||
}
|
||||
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
var hash crypto.Hash
|
||||
switch rr.Algorithm {
|
||||
case DSA, RSASHA1:
|
||||
hash = crypto.SHA1
|
||||
case RSASHA256, ECDSAP256SHA256:
|
||||
hash = crypto.SHA256
|
||||
case ECDSAP384SHA384:
|
||||
hash = crypto.SHA384
|
||||
case RSASHA512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return ErrAlg
|
||||
}
|
||||
hasher := hash.New()
|
||||
|
||||
buflen := len(buf)
|
||||
qdc := binary.BigEndian.Uint16(buf[4:])
|
||||
anc := binary.BigEndian.Uint16(buf[6:])
|
||||
auc := binary.BigEndian.Uint16(buf[8:])
|
||||
adc := binary.BigEndian.Uint16(buf[10:])
|
||||
offset := 12
|
||||
var err error
|
||||
for i := uint16(0); i < qdc && offset < buflen; i++ {
|
||||
_, offset, err = UnpackDomainName(buf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip past Type and Class
|
||||
offset += 2 + 2
|
||||
}
|
||||
for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ {
|
||||
_, offset, err = UnpackDomainName(buf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip past Type, Class and TTL
|
||||
offset += 2 + 2 + 4
|
||||
if offset+1 >= buflen {
|
||||
continue
|
||||
}
|
||||
var rdlen uint16
|
||||
rdlen = binary.BigEndian.Uint16(buf[offset:])
|
||||
offset += 2
|
||||
offset += int(rdlen)
|
||||
}
|
||||
if offset >= buflen {
|
||||
return &Error{err: "overflowing unpacking signed message"}
|
||||
}
|
||||
|
||||
// offset should be just prior to SIG
|
||||
bodyend := offset
|
||||
// owner name SHOULD be root
|
||||
_, offset, err = UnpackDomainName(buf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip Type, Class, TTL, RDLen
|
||||
offset += 2 + 2 + 4 + 2
|
||||
sigstart := offset
|
||||
// Skip Type Covered, Algorithm, Labels, Original TTL
|
||||
offset += 2 + 1 + 1 + 4
|
||||
if offset+4+4 >= buflen {
|
||||
return &Error{err: "overflow unpacking signed message"}
|
||||
}
|
||||
expire := binary.BigEndian.Uint32(buf[offset:])
|
||||
offset += 4
|
||||
incept := binary.BigEndian.Uint32(buf[offset:])
|
||||
offset += 4
|
||||
now := uint32(time.Now().Unix())
|
||||
if now < incept || now > expire {
|
||||
return ErrTime
|
||||
}
|
||||
// Skip key tag
|
||||
offset += 2
|
||||
var signername string
|
||||
signername, offset, err = UnpackDomainName(buf, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If key has come from the DNS name compression might
|
||||
// have mangled the case of the name
|
||||
if strings.ToLower(signername) != strings.ToLower(k.Header().Name) {
|
||||
return &Error{err: "signer name doesn't match key name"}
|
||||
}
|
||||
sigend := offset
|
||||
hasher.Write(buf[sigstart:sigend])
|
||||
hasher.Write(buf[:10])
|
||||
hasher.Write([]byte{
|
||||
byte((adc - 1) << 8),
|
||||
byte(adc - 1),
|
||||
})
|
||||
hasher.Write(buf[12:bodyend])
|
||||
|
||||
hashed := hasher.Sum(nil)
|
||||
sig := buf[sigend:]
|
||||
switch k.Algorithm {
|
||||
case DSA:
|
||||
pk := k.publicKeyDSA()
|
||||
sig = sig[1:]
|
||||
r := big.NewInt(0)
|
||||
r.SetBytes(sig[:len(sig)/2])
|
||||
s := big.NewInt(0)
|
||||
s.SetBytes(sig[len(sig)/2:])
|
||||
if pk != nil {
|
||||
if dsa.Verify(pk, hashed, r, s) {
|
||||
return nil
|
||||
}
|
||||
return ErrSig
|
||||
}
|
||||
case RSASHA1, RSASHA256, RSASHA512:
|
||||
pk := k.publicKeyRSA()
|
||||
if pk != nil {
|
||||
return rsa.VerifyPKCS1v15(pk, hash, hashed, sig)
|
||||
}
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
pk := k.publicKeyECDSA()
|
||||
r := big.NewInt(0)
|
||||
r.SetBytes(sig[:len(sig)/2])
|
||||
s := big.NewInt(0)
|
||||
s.SetBytes(sig[len(sig)/2:])
|
||||
if pk != nil {
|
||||
if ecdsa.Verify(pk, hashed, r, s) {
|
||||
return nil
|
||||
}
|
||||
return ErrSig
|
||||
}
|
||||
}
|
||||
return ErrKeyAlg
|
||||
}
|
||||
57
vendor/github.com/miekg/dns/singleinflight.go
generated
vendored
57
vendor/github.com/miekg/dns/singleinflight.go
generated
vendored
@ -1,57 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Adapted for dns package usage by Miek Gieben.
|
||||
|
||||
package dns
|
||||
|
||||
import "sync"
|
||||
import "time"
|
||||
|
||||
// call is an in-flight or completed singleflight.Do call
|
||||
type call struct {
|
||||
wg sync.WaitGroup
|
||||
val *Msg
|
||||
rtt time.Duration
|
||||
err error
|
||||
dups int
|
||||
}
|
||||
|
||||
// singleflight represents a class of work and forms a namespace in
|
||||
// which units of work can be executed with duplicate suppression.
|
||||
type singleflight struct {
|
||||
sync.Mutex // protects m
|
||||
m map[string]*call // lazily initialized
|
||||
}
|
||||
|
||||
// Do executes and returns the results of the given function, making
|
||||
// sure that only one execution is in-flight for a given key at a
|
||||
// time. If a duplicate comes in, the duplicate caller waits for the
|
||||
// original to complete and receives the same results.
|
||||
// The return value shared indicates whether v was given to multiple callers.
|
||||
func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
|
||||
g.Lock()
|
||||
if g.m == nil {
|
||||
g.m = make(map[string]*call)
|
||||
}
|
||||
if c, ok := g.m[key]; ok {
|
||||
c.dups++
|
||||
g.Unlock()
|
||||
c.wg.Wait()
|
||||
return c.val, c.rtt, c.err, true
|
||||
}
|
||||
c := new(call)
|
||||
c.wg.Add(1)
|
||||
g.m[key] = c
|
||||
g.Unlock()
|
||||
|
||||
c.val, c.rtt, c.err = fn()
|
||||
c.wg.Done()
|
||||
|
||||
g.Lock()
|
||||
delete(g.m, key)
|
||||
g.Unlock()
|
||||
|
||||
return c.val, c.rtt, c.err, c.dups > 0
|
||||
}
|
||||
47
vendor/github.com/miekg/dns/smimea.go
generated
vendored
47
vendor/github.com/miekg/dns/smimea.go
generated
vendored
@ -1,47 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/hex"
|
||||
)
|
||||
|
||||
// Sign creates a SMIMEA record from an SSL certificate.
|
||||
func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
|
||||
r.Hdr.Rrtype = TypeSMIMEA
|
||||
r.Usage = uint8(usage)
|
||||
r.Selector = uint8(selector)
|
||||
r.MatchingType = uint8(matchingType)
|
||||
|
||||
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies a SMIMEA record against an SSL certificate. If it is OK
|
||||
// a nil error is returned.
|
||||
func (r *SMIMEA) Verify(cert *x509.Certificate) error {
|
||||
c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
|
||||
if err != nil {
|
||||
return err // Not also ErrSig?
|
||||
}
|
||||
if r.Certificate == c {
|
||||
return nil
|
||||
}
|
||||
return ErrSig // ErrSig, really?
|
||||
}
|
||||
|
||||
// SMIMEAName returns the ownername of a SMIMEA resource record as per the
|
||||
// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3
|
||||
func SMIMEAName(email, domain string) (string, error) {
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(email))
|
||||
|
||||
// RFC Section 3: "The local-part is hashed using the SHA2-256
|
||||
// algorithm with the hash truncated to 28 octets and
|
||||
// represented in its hexadecimal representation to become the
|
||||
// left-most label in the prepared domain name"
|
||||
return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil
|
||||
}
|
||||
47
vendor/github.com/miekg/dns/tlsa.go
generated
vendored
47
vendor/github.com/miekg/dns/tlsa.go
generated
vendored
@ -1,47 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"net"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Sign creates a TLSA record from an SSL certificate.
|
||||
func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
|
||||
r.Hdr.Rrtype = TypeTLSA
|
||||
r.Usage = uint8(usage)
|
||||
r.Selector = uint8(selector)
|
||||
r.MatchingType = uint8(matchingType)
|
||||
|
||||
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies a TLSA record against an SSL certificate. If it is OK
|
||||
// a nil error is returned.
|
||||
func (r *TLSA) Verify(cert *x509.Certificate) error {
|
||||
c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
|
||||
if err != nil {
|
||||
return err // Not also ErrSig?
|
||||
}
|
||||
if r.Certificate == c {
|
||||
return nil
|
||||
}
|
||||
return ErrSig // ErrSig, really?
|
||||
}
|
||||
|
||||
// TLSAName returns the ownername of a TLSA resource record as per the
|
||||
// rules specified in RFC 6698, Section 3.
|
||||
func TLSAName(name, service, network string) (string, error) {
|
||||
if !IsFqdn(name) {
|
||||
return "", ErrFqdn
|
||||
}
|
||||
p, err := net.LookupPort(network, service)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil
|
||||
}
|
||||
386
vendor/github.com/miekg/dns/tsig.go
generated
vendored
386
vendor/github.com/miekg/dns/tsig.go
generated
vendored
@ -1,386 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HMAC hashing codes. These are transmitted as domain names.
|
||||
const (
|
||||
HmacMD5 = "hmac-md5.sig-alg.reg.int."
|
||||
HmacSHA1 = "hmac-sha1."
|
||||
HmacSHA256 = "hmac-sha256."
|
||||
HmacSHA512 = "hmac-sha512."
|
||||
)
|
||||
|
||||
// TSIG is the RR the holds the transaction signature of a message.
|
||||
// See RFC 2845 and RFC 4635.
|
||||
type TSIG struct {
|
||||
Hdr RR_Header
|
||||
Algorithm string `dns:"domain-name"`
|
||||
TimeSigned uint64 `dns:"uint48"`
|
||||
Fudge uint16
|
||||
MACSize uint16
|
||||
MAC string `dns:"size-hex:MACSize"`
|
||||
OrigId uint16
|
||||
Error uint16
|
||||
OtherLen uint16
|
||||
OtherData string `dns:"size-hex:OtherLen"`
|
||||
}
|
||||
|
||||
// TSIG has no official presentation format, but this will suffice.
|
||||
|
||||
func (rr *TSIG) String() string {
|
||||
s := "\n;; TSIG PSEUDOSECTION:\n"
|
||||
s += rr.Hdr.String() +
|
||||
" " + rr.Algorithm +
|
||||
" " + tsigTimeToString(rr.TimeSigned) +
|
||||
" " + strconv.Itoa(int(rr.Fudge)) +
|
||||
" " + strconv.Itoa(int(rr.MACSize)) +
|
||||
" " + strings.ToUpper(rr.MAC) +
|
||||
" " + strconv.Itoa(int(rr.OrigId)) +
|
||||
" " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR
|
||||
" " + strconv.Itoa(int(rr.OtherLen)) +
|
||||
" " + rr.OtherData
|
||||
return s
|
||||
}
|
||||
|
||||
// The following values must be put in wireformat, so that the MAC can be calculated.
|
||||
// RFC 2845, section 3.4.2. TSIG Variables.
|
||||
type tsigWireFmt struct {
|
||||
// From RR_Header
|
||||
Name string `dns:"domain-name"`
|
||||
Class uint16
|
||||
Ttl uint32
|
||||
// Rdata of the TSIG
|
||||
Algorithm string `dns:"domain-name"`
|
||||
TimeSigned uint64 `dns:"uint48"`
|
||||
Fudge uint16
|
||||
// MACSize, MAC and OrigId excluded
|
||||
Error uint16
|
||||
OtherLen uint16
|
||||
OtherData string `dns:"size-hex:OtherLen"`
|
||||
}
|
||||
|
||||
// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC
|
||||
type macWireFmt struct {
|
||||
MACSize uint16
|
||||
MAC string `dns:"size-hex:MACSize"`
|
||||
}
|
||||
|
||||
// 3.3. Time values used in TSIG calculations
|
||||
type timerWireFmt struct {
|
||||
TimeSigned uint64 `dns:"uint48"`
|
||||
Fudge uint16
|
||||
}
|
||||
|
||||
// TsigGenerate fills out the TSIG record attached to the message.
|
||||
// The message should contain
|
||||
// a "stub" TSIG RR with the algorithm, key name (owner name of the RR),
|
||||
// time fudge (defaults to 300 seconds) and the current time
|
||||
// The TSIG MAC is saved in that Tsig RR.
|
||||
// When TsigGenerate is called for the first time requestMAC is set to the empty string and
|
||||
// timersOnly is false.
|
||||
// If something goes wrong an error is returned, otherwise it is nil.
|
||||
func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) {
|
||||
if m.IsTsig() == nil {
|
||||
panic("dns: TSIG not last RR in additional")
|
||||
}
|
||||
// If we barf here, the caller is to blame
|
||||
rawsecret, err := fromBase64([]byte(secret))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
rr := m.Extra[len(m.Extra)-1].(*TSIG)
|
||||
m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg
|
||||
mbuf, err := m.Pack()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly)
|
||||
|
||||
t := new(TSIG)
|
||||
var h hash.Hash
|
||||
switch strings.ToLower(rr.Algorithm) {
|
||||
case HmacMD5:
|
||||
h = hmac.New(md5.New, []byte(rawsecret))
|
||||
case HmacSHA1:
|
||||
h = hmac.New(sha1.New, []byte(rawsecret))
|
||||
case HmacSHA256:
|
||||
h = hmac.New(sha256.New, []byte(rawsecret))
|
||||
case HmacSHA512:
|
||||
h = hmac.New(sha512.New, []byte(rawsecret))
|
||||
default:
|
||||
return nil, "", ErrKeyAlg
|
||||
}
|
||||
h.Write(buf)
|
||||
t.MAC = hex.EncodeToString(h.Sum(nil))
|
||||
t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
|
||||
|
||||
t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0}
|
||||
t.Fudge = rr.Fudge
|
||||
t.TimeSigned = rr.TimeSigned
|
||||
t.Algorithm = rr.Algorithm
|
||||
t.OrigId = m.Id
|
||||
|
||||
tbuf := make([]byte, t.len())
|
||||
if off, err := PackRR(t, tbuf, 0, nil, false); err == nil {
|
||||
tbuf = tbuf[:off] // reset to actual size used
|
||||
} else {
|
||||
return nil, "", err
|
||||
}
|
||||
mbuf = append(mbuf, tbuf...)
|
||||
// Update the ArCount directly in the buffer.
|
||||
binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1))
|
||||
|
||||
return mbuf, t.MAC, nil
|
||||
}
|
||||
|
||||
// TsigVerify verifies the TSIG on a message.
|
||||
// If the signature does not validate err contains the
|
||||
// error, otherwise it is nil.
|
||||
func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
|
||||
rawsecret, err := fromBase64([]byte(secret))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Strip the TSIG from the incoming msg
|
||||
stripped, tsig, err := stripTsig(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgMAC, err := hex.DecodeString(tsig.MAC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
|
||||
|
||||
// Fudge factor works both ways. A message can arrive before it was signed because
|
||||
// of clock skew.
|
||||
now := uint64(time.Now().Unix())
|
||||
ti := now - tsig.TimeSigned
|
||||
if now < tsig.TimeSigned {
|
||||
ti = tsig.TimeSigned - now
|
||||
}
|
||||
if uint64(tsig.Fudge) < ti {
|
||||
return ErrTime
|
||||
}
|
||||
|
||||
var h hash.Hash
|
||||
switch strings.ToLower(tsig.Algorithm) {
|
||||
case HmacMD5:
|
||||
h = hmac.New(md5.New, rawsecret)
|
||||
case HmacSHA1:
|
||||
h = hmac.New(sha1.New, rawsecret)
|
||||
case HmacSHA256:
|
||||
h = hmac.New(sha256.New, rawsecret)
|
||||
case HmacSHA512:
|
||||
h = hmac.New(sha512.New, rawsecret)
|
||||
default:
|
||||
return ErrKeyAlg
|
||||
}
|
||||
h.Write(buf)
|
||||
if !hmac.Equal(h.Sum(nil), msgMAC) {
|
||||
return ErrSig
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a wiredata buffer for the MAC calculation.
|
||||
func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte {
|
||||
var buf []byte
|
||||
if rr.TimeSigned == 0 {
|
||||
rr.TimeSigned = uint64(time.Now().Unix())
|
||||
}
|
||||
if rr.Fudge == 0 {
|
||||
rr.Fudge = 300 // Standard (RFC) default.
|
||||
}
|
||||
|
||||
// Replace message ID in header with original ID from TSIG
|
||||
binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId)
|
||||
|
||||
if requestMAC != "" {
|
||||
m := new(macWireFmt)
|
||||
m.MACSize = uint16(len(requestMAC) / 2)
|
||||
m.MAC = requestMAC
|
||||
buf = make([]byte, len(requestMAC)) // long enough
|
||||
n, _ := packMacWire(m, buf)
|
||||
buf = buf[:n]
|
||||
}
|
||||
|
||||
tsigvar := make([]byte, DefaultMsgSize)
|
||||
if timersOnly {
|
||||
tsig := new(timerWireFmt)
|
||||
tsig.TimeSigned = rr.TimeSigned
|
||||
tsig.Fudge = rr.Fudge
|
||||
n, _ := packTimerWire(tsig, tsigvar)
|
||||
tsigvar = tsigvar[:n]
|
||||
} else {
|
||||
tsig := new(tsigWireFmt)
|
||||
tsig.Name = strings.ToLower(rr.Hdr.Name)
|
||||
tsig.Class = ClassANY
|
||||
tsig.Ttl = rr.Hdr.Ttl
|
||||
tsig.Algorithm = strings.ToLower(rr.Algorithm)
|
||||
tsig.TimeSigned = rr.TimeSigned
|
||||
tsig.Fudge = rr.Fudge
|
||||
tsig.Error = rr.Error
|
||||
tsig.OtherLen = rr.OtherLen
|
||||
tsig.OtherData = rr.OtherData
|
||||
n, _ := packTsigWire(tsig, tsigvar)
|
||||
tsigvar = tsigvar[:n]
|
||||
}
|
||||
|
||||
if requestMAC != "" {
|
||||
x := append(buf, msgbuf...)
|
||||
buf = append(x, tsigvar...)
|
||||
} else {
|
||||
buf = append(msgbuf, tsigvar...)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// Strip the TSIG from the raw message.
|
||||
func stripTsig(msg []byte) ([]byte, *TSIG, error) {
|
||||
// Copied from msg.go's Unpack() Header, but modified.
|
||||
var (
|
||||
dh Header
|
||||
err error
|
||||
)
|
||||
off, tsigoff := 0, 0
|
||||
|
||||
if dh, off, err = unpackMsgHdr(msg, off); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if dh.Arcount == 0 {
|
||||
return nil, nil, ErrNoSig
|
||||
}
|
||||
|
||||
// Rcode, see msg.go Unpack()
|
||||
if int(dh.Bits&0xF) == RcodeNotAuth {
|
||||
return nil, nil, ErrAuth
|
||||
}
|
||||
|
||||
for i := 0; i < int(dh.Qdcount); i++ {
|
||||
_, off, err = unpackQuestion(msg, off)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
_, off, err = unpackRRslice(int(dh.Ancount), msg, off)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
_, off, err = unpackRRslice(int(dh.Nscount), msg, off)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
rr := new(TSIG)
|
||||
var extra RR
|
||||
for i := 0; i < int(dh.Arcount); i++ {
|
||||
tsigoff = off
|
||||
extra, off, err = UnpackRR(msg, off)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if extra.Header().Rrtype == TypeTSIG {
|
||||
rr = extra.(*TSIG)
|
||||
// Adjust Arcount.
|
||||
arcount := binary.BigEndian.Uint16(msg[10:])
|
||||
binary.BigEndian.PutUint16(msg[10:], arcount-1)
|
||||
break
|
||||
}
|
||||
}
|
||||
if rr == nil {
|
||||
return nil, nil, ErrNoSig
|
||||
}
|
||||
return msg[:tsigoff], rr, nil
|
||||
}
|
||||
|
||||
// Translate the TSIG time signed into a date. There is no
|
||||
// need for RFC1982 calculations as this date is 48 bits.
|
||||
func tsigTimeToString(t uint64) string {
|
||||
ti := time.Unix(int64(t), 0).UTC()
|
||||
return ti.Format("20060102150405")
|
||||
}
|
||||
|
||||
func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) {
|
||||
// copied from zmsg.go TSIG packing
|
||||
// RR_Header
|
||||
off, err := PackDomainName(tw.Name, msg, 0, nil, false)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint16(tw.Class, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint32(tw.Ttl, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
|
||||
off, err = PackDomainName(tw.Algorithm, msg, off, nil, false)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint48(tw.TimeSigned, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint16(tw.Fudge, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
|
||||
off, err = packUint16(tw.Error, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint16(tw.OtherLen, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packStringHex(tw.OtherData, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func packMacWire(mw *macWireFmt, msg []byte) (int, error) {
|
||||
off, err := packUint16(mw.MACSize, msg, 0)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packStringHex(mw.MAC, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) {
|
||||
off, err := packUint48(tw.TimeSigned, msg, 0)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint16(tw.Fudge, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
1381
vendor/github.com/miekg/dns/types.go
generated
vendored
1381
vendor/github.com/miekg/dns/types.go
generated
vendored
File diff suppressed because it is too large
Load Diff
272
vendor/github.com/miekg/dns/types_generate.go
generated
vendored
272
vendor/github.com/miekg/dns/types_generate.go
generated
vendored
@ -1,272 +0,0 @@
|
||||
//+build ignore
|
||||
|
||||
// types_generate.go is meant to run with go generate. It will use
|
||||
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||
// it will generate conversion tables (TypeToRR and TypeToString) and banal
|
||||
// methods (len, Header, copy) based on the struct tags. The generated source is
|
||||
// written to ztypes.go, and is meant to be checked into git.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/importer"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var skipLen = map[string]struct{}{
|
||||
"NSEC": {},
|
||||
"NSEC3": {},
|
||||
"OPT": {},
|
||||
"CSYNC": {},
|
||||
}
|
||||
|
||||
var packageHdr = `
|
||||
// Code generated by "go run types_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net"
|
||||
)
|
||||
|
||||
`
|
||||
|
||||
var TypeToRR = template.Must(template.New("TypeToRR").Parse(`
|
||||
// TypeToRR is a map of constructors for each RR type.
|
||||
var TypeToRR = map[uint16]func() RR{
|
||||
{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) },
|
||||
{{end}}{{end}} }
|
||||
|
||||
`))
|
||||
|
||||
var typeToString = template.Must(template.New("typeToString").Parse(`
|
||||
// TypeToString is a map of strings for each RR type.
|
||||
var TypeToString = map[uint16]string{
|
||||
{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}",
|
||||
{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR",
|
||||
}
|
||||
|
||||
`))
|
||||
|
||||
var headerFunc = template.Must(template.New("headerFunc").Parse(`
|
||||
{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr }
|
||||
{{end}}
|
||||
|
||||
`))
|
||||
|
||||
// getTypeStruct will take a type and the package scope, and return the
|
||||
// (innermost) struct if the type is considered a RR type (currently defined as
|
||||
// those structs beginning with a RR_Header, could be redefined as implementing
|
||||
// the RR interface). The bool return value indicates if embedded structs were
|
||||
// resolved.
|
||||
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||
st, ok := t.Underlying().(*types.Struct)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||
return st, false
|
||||
}
|
||||
if st.Field(0).Anonymous() {
|
||||
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||
return st, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Import and type-check the package
|
||||
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||
fatalIfErr(err)
|
||||
scope := pkg.Scope()
|
||||
|
||||
// Collect constants like TypeX
|
||||
var numberedTypes []string
|
||||
for _, name := range scope.Names() {
|
||||
o := scope.Lookup(name)
|
||||
if o == nil || !o.Exported() {
|
||||
continue
|
||||
}
|
||||
b, ok := o.Type().(*types.Basic)
|
||||
if !ok || b.Kind() != types.Uint16 {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(o.Name(), "Type") {
|
||||
continue
|
||||
}
|
||||
name := strings.TrimPrefix(o.Name(), "Type")
|
||||
if name == "PrivateRR" {
|
||||
continue
|
||||
}
|
||||
numberedTypes = append(numberedTypes, name)
|
||||
}
|
||||
|
||||
// Collect actual types (*X)
|
||||
var namedTypes []string
|
||||
for _, name := range scope.Names() {
|
||||
o := scope.Lookup(name)
|
||||
if o == nil || !o.Exported() {
|
||||
continue
|
||||
}
|
||||
if st, _ := getTypeStruct(o.Type(), scope); st == nil {
|
||||
continue
|
||||
}
|
||||
if name == "PrivateRR" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if corresponding TypeX exists
|
||||
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
|
||||
log.Fatalf("Constant Type%s does not exist.", o.Name())
|
||||
}
|
||||
|
||||
namedTypes = append(namedTypes, o.Name())
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
b.WriteString(packageHdr)
|
||||
|
||||
// Generate TypeToRR
|
||||
fatalIfErr(TypeToRR.Execute(b, namedTypes))
|
||||
|
||||
// Generate typeToString
|
||||
fatalIfErr(typeToString.Execute(b, numberedTypes))
|
||||
|
||||
// Generate headerFunc
|
||||
fatalIfErr(headerFunc.Execute(b, namedTypes))
|
||||
|
||||
// Generate len()
|
||||
fmt.Fprint(b, "// len() functions\n")
|
||||
for _, name := range namedTypes {
|
||||
if _, ok := skipLen[name]; ok {
|
||||
continue
|
||||
}
|
||||
o := scope.Lookup(name)
|
||||
st, isEmbedded := getTypeStruct(o.Type(), scope)
|
||||
if isEmbedded {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(b, "func (rr *%s) len() int {\n", name)
|
||||
fmt.Fprintf(b, "l := rr.Hdr.len()\n")
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) }
|
||||
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
switch st.Tag(i) {
|
||||
case `dns:"-"`:
|
||||
// ignored
|
||||
case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`:
|
||||
o("for _, x := range rr.%s { l += len(x) + 1 }\n")
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"-"`:
|
||||
// ignored
|
||||
case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`:
|
||||
o("l += len(rr.%s) + 1\n")
|
||||
case st.Tag(i) == `dns:"octet"`:
|
||||
o("l += len(rr.%s)\n")
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`):
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"base64"`:
|
||||
o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n")
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored
|
||||
o("l += len(rr.%s)/2\n")
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`):
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"hex"`:
|
||||
o("l += len(rr.%s)/2 + 1\n")
|
||||
case st.Tag(i) == `dns:"a"`:
|
||||
o("l += net.IPv4len // %s\n")
|
||||
case st.Tag(i) == `dns:"aaaa"`:
|
||||
o("l += net.IPv6len // %s\n")
|
||||
case st.Tag(i) == `dns:"txt"`:
|
||||
o("for _, t := range rr.%s { l += len(t) + 1 }\n")
|
||||
case st.Tag(i) == `dns:"uint48"`:
|
||||
o("l += 6 // %s\n")
|
||||
case st.Tag(i) == "":
|
||||
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||
case types.Uint8:
|
||||
o("l++ // %s\n")
|
||||
case types.Uint16:
|
||||
o("l += 2 // %s\n")
|
||||
case types.Uint32:
|
||||
o("l += 4 // %s\n")
|
||||
case types.Uint64:
|
||||
o("l += 8 // %s\n")
|
||||
case types.String:
|
||||
o("l += len(rr.%s) + 1\n")
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name())
|
||||
}
|
||||
default:
|
||||
log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(b, "return l }\n")
|
||||
}
|
||||
|
||||
// Generate copy()
|
||||
fmt.Fprint(b, "// copy() functions\n")
|
||||
for _, name := range namedTypes {
|
||||
o := scope.Lookup(name)
|
||||
st, isEmbedded := getTypeStruct(o.Type(), scope)
|
||||
if isEmbedded {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name)
|
||||
fields := []string{"*rr.Hdr.copyHeader()"}
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
f := st.Field(i).Name()
|
||||
if sl, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
t := sl.Underlying().String()
|
||||
t = strings.TrimPrefix(t, "[]")
|
||||
if strings.Contains(t, ".") {
|
||||
splits := strings.Split(t, ".")
|
||||
t = splits[len(splits)-1]
|
||||
}
|
||||
fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n",
|
||||
f, t, f, f, f)
|
||||
fields = append(fields, f)
|
||||
continue
|
||||
}
|
||||
if st.Field(i).Type().String() == "net.IP" {
|
||||
fields = append(fields, "copyIP(rr."+f+")")
|
||||
continue
|
||||
}
|
||||
fields = append(fields, "rr."+f)
|
||||
}
|
||||
fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ","))
|
||||
fmt.Fprintf(b, "}\n")
|
||||
}
|
||||
|
||||
// gofmt
|
||||
res, err := format.Source(b.Bytes())
|
||||
if err != nil {
|
||||
b.WriteTo(os.Stderr)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// write result
|
||||
f, err := os.Create("ztypes.go")
|
||||
fatalIfErr(err)
|
||||
defer f.Close()
|
||||
f.Write(res)
|
||||
}
|
||||
|
||||
func fatalIfErr(err error) {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
89
vendor/github.com/miekg/dns/udp.go
generated
vendored
89
vendor/github.com/miekg/dns/udp.go
generated
vendored
@ -1,89 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// SessionUDP holds the remote address and the associated
|
||||
// out-of-band data.
|
||||
type SessionUDP struct {
|
||||
raddr *net.UDPAddr
|
||||
context []byte
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||
|
||||
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||
// net.UDPAddr.
|
||||
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
oob := make([]byte, 40)
|
||||
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
|
||||
if err != nil {
|
||||
return n, nil, err
|
||||
}
|
||||
return n, &SessionUDP{raddr, oob[:oobn]}, err
|
||||
}
|
||||
|
||||
// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
|
||||
oob := correctSource(session.context)
|
||||
n, _, err := conn.WriteMsgUDP(b, oob, session.raddr)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func setUDPSocketOptions(conn *net.UDPConn) error {
|
||||
// Try setting the flags for both families and ignore the errors unless they
|
||||
// both error.
|
||||
err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true)
|
||||
err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true)
|
||||
if err6 != nil && err4 != nil {
|
||||
return err4
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseDstFromOOB takes oob data and returns the destination IP.
|
||||
func parseDstFromOOB(oob []byte) net.IP {
|
||||
// Start with IPv6 and then fallback to IPv4
|
||||
// TODO(fastest963): Figure out a way to prefer one or the other. Looking at
|
||||
// the lvl of the header for a 0 or 41 isn't cross-platform.
|
||||
var dst net.IP
|
||||
cm6 := new(ipv6.ControlMessage)
|
||||
if cm6.Parse(oob) == nil {
|
||||
dst = cm6.Dst
|
||||
}
|
||||
if dst == nil {
|
||||
cm4 := new(ipv4.ControlMessage)
|
||||
if cm4.Parse(oob) == nil {
|
||||
dst = cm4.Dst
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// correctSource takes oob data and returns new oob data with the Src equal to the Dst
|
||||
func correctSource(oob []byte) []byte {
|
||||
dst := parseDstFromOOB(oob)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
// If the dst is definitely an IPv6, then use ipv6's ControlMessage to
|
||||
// respond otherwise use ipv4's because ipv6's marshal ignores ipv4
|
||||
// addresses.
|
||||
if dst.To4() == nil {
|
||||
cm := new(ipv6.ControlMessage)
|
||||
cm.Src = dst
|
||||
oob = cm.Marshal()
|
||||
} else {
|
||||
cm := new(ipv4.ControlMessage)
|
||||
cm.Src = dst
|
||||
oob = cm.Marshal()
|
||||
}
|
||||
return oob
|
||||
}
|
||||
37
vendor/github.com/miekg/dns/udp_windows.go
generated
vendored
37
vendor/github.com/miekg/dns/udp_windows.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package dns
|
||||
|
||||
import "net"
|
||||
|
||||
// SessionUDP holds the remote address
|
||||
type SessionUDP struct {
|
||||
raddr *net.UDPAddr
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||
|
||||
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||
// net.UDPAddr.
|
||||
// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP.
|
||||
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
n, raddr, err := conn.ReadFrom(b)
|
||||
if err != nil {
|
||||
return n, nil, err
|
||||
}
|
||||
session := &SessionUDP{raddr.(*net.UDPAddr)}
|
||||
return n, session, err
|
||||
}
|
||||
|
||||
// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||
// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP.
|
||||
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
|
||||
n, err := conn.WriteTo(b, session.raddr)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods
|
||||
// use the standard method in udp.go for these.
|
||||
func setUDPSocketOptions(*net.UDPConn) error { return nil }
|
||||
func parseDstFromOOB([]byte, net.IP) net.IP { return nil }
|
||||
106
vendor/github.com/miekg/dns/update.go
generated
vendored
106
vendor/github.com/miekg/dns/update.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
package dns
|
||||
|
||||
// NameUsed sets the RRs in the prereq section to
|
||||
// "Name is in use" RRs. RFC 2136 section 2.4.4.
|
||||
func (u *Msg) NameUsed(rr []RR) {
|
||||
if u.Answer == nil {
|
||||
u.Answer = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
|
||||
}
|
||||
}
|
||||
|
||||
// NameNotUsed sets the RRs in the prereq section to
|
||||
// "Name is in not use" RRs. RFC 2136 section 2.4.5.
|
||||
func (u *Msg) NameNotUsed(rr []RR) {
|
||||
if u.Answer == nil {
|
||||
u.Answer = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}})
|
||||
}
|
||||
}
|
||||
|
||||
// Used sets the RRs in the prereq section to
|
||||
// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2.
|
||||
func (u *Msg) Used(rr []RR) {
|
||||
if len(u.Question) == 0 {
|
||||
panic("dns: empty question section")
|
||||
}
|
||||
if u.Answer == nil {
|
||||
u.Answer = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
r.Header().Class = u.Question[0].Qclass
|
||||
u.Answer = append(u.Answer, r)
|
||||
}
|
||||
}
|
||||
|
||||
// RRsetUsed sets the RRs in the prereq section to
|
||||
// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1.
|
||||
func (u *Msg) RRsetUsed(rr []RR) {
|
||||
if u.Answer == nil {
|
||||
u.Answer = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}})
|
||||
}
|
||||
}
|
||||
|
||||
// RRsetNotUsed sets the RRs in the prereq section to
|
||||
// "RRset does not exist" RRs. RFC 2136 section 2.4.3.
|
||||
func (u *Msg) RRsetNotUsed(rr []RR) {
|
||||
if u.Answer == nil {
|
||||
u.Answer = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}})
|
||||
}
|
||||
}
|
||||
|
||||
// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1.
|
||||
func (u *Msg) Insert(rr []RR) {
|
||||
if len(u.Question) == 0 {
|
||||
panic("dns: empty question section")
|
||||
}
|
||||
if u.Ns == nil {
|
||||
u.Ns = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
r.Header().Class = u.Question[0].Qclass
|
||||
u.Ns = append(u.Ns, r)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2.
|
||||
func (u *Msg) RemoveRRset(rr []RR) {
|
||||
if u.Ns == nil {
|
||||
u.Ns = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}})
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3
|
||||
func (u *Msg) RemoveName(rr []RR) {
|
||||
if u.Ns == nil {
|
||||
u.Ns = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
|
||||
}
|
||||
}
|
||||
|
||||
// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4
|
||||
func (u *Msg) Remove(rr []RR) {
|
||||
if u.Ns == nil {
|
||||
u.Ns = make([]RR, 0, len(rr))
|
||||
}
|
||||
for _, r := range rr {
|
||||
r.Header().Class = ClassNONE
|
||||
r.Header().Ttl = 0
|
||||
u.Ns = append(u.Ns, r)
|
||||
}
|
||||
}
|
||||
15
vendor/github.com/miekg/dns/version.go
generated
vendored
15
vendor/github.com/miekg/dns/version.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
package dns
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Version is current version of this library.
|
||||
var Version = V{1, 0, 4}
|
||||
|
||||
// V holds the version of this library.
|
||||
type V struct {
|
||||
Major, Minor, Patch int
|
||||
}
|
||||
|
||||
func (v V) String() string {
|
||||
return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||
}
|
||||
260
vendor/github.com/miekg/dns/xfr.go
generated
vendored
260
vendor/github.com/miekg/dns/xfr.go
generated
vendored
@ -1,260 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Envelope is used when doing a zone transfer with a remote server.
|
||||
type Envelope struct {
|
||||
RR []RR // The set of RRs in the answer section of the xfr reply message.
|
||||
Error error // If something went wrong, this contains the error.
|
||||
}
|
||||
|
||||
// A Transfer defines parameters that are used during a zone transfer.
|
||||
type Transfer struct {
|
||||
*Conn
|
||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds
|
||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds
|
||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds
|
||||
TsigSecret map[string]string // Secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
tsigTimersOnly bool
|
||||
}
|
||||
|
||||
// Think we need to away to stop the transfer
|
||||
|
||||
// In performs an incoming transfer with the server in a.
|
||||
// If you would like to set the source IP, or some other attribute
|
||||
// of a Dialer for a Transfer, you can do so by specifying the attributes
|
||||
// in the Transfer.Conn:
|
||||
//
|
||||
// d := net.Dialer{LocalAddr: transfer_source}
|
||||
// con, err := d.Dial("tcp", master)
|
||||
// dnscon := &dns.Conn{Conn:con}
|
||||
// transfer = &dns.Transfer{Conn: dnscon}
|
||||
// channel, err := transfer.In(message, master)
|
||||
//
|
||||
func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
|
||||
timeout := dnsTimeout
|
||||
if t.DialTimeout != 0 {
|
||||
timeout = t.DialTimeout
|
||||
}
|
||||
if t.Conn == nil {
|
||||
t.Conn, err = DialTimeout("tcp", a, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := t.WriteMsg(q); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
env = make(chan *Envelope)
|
||||
go func() {
|
||||
if q.Question[0].Qtype == TypeAXFR {
|
||||
go t.inAxfr(q, env)
|
||||
return
|
||||
}
|
||||
if q.Question[0].Qtype == TypeIXFR {
|
||||
go t.inIxfr(q, env)
|
||||
return
|
||||
}
|
||||
}()
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) {
|
||||
first := true
|
||||
defer t.Close()
|
||||
defer close(c)
|
||||
timeout := dnsTimeout
|
||||
if t.ReadTimeout != 0 {
|
||||
timeout = t.ReadTimeout
|
||||
}
|
||||
for {
|
||||
t.Conn.SetReadDeadline(time.Now().Add(timeout))
|
||||
in, err := t.ReadMsg()
|
||||
if err != nil {
|
||||
c <- &Envelope{nil, err}
|
||||
return
|
||||
}
|
||||
if q.Id != in.Id {
|
||||
c <- &Envelope{in.Answer, ErrId}
|
||||
return
|
||||
}
|
||||
if first {
|
||||
if in.Rcode != RcodeSuccess {
|
||||
c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}}
|
||||
return
|
||||
}
|
||||
if !isSOAFirst(in) {
|
||||
c <- &Envelope{in.Answer, ErrSoa}
|
||||
return
|
||||
}
|
||||
first = !first
|
||||
// only one answer that is SOA, receive more
|
||||
if len(in.Answer) == 1 {
|
||||
t.tsigTimersOnly = true
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !first {
|
||||
t.tsigTimersOnly = true // Subsequent envelopes use this.
|
||||
if isSOALast(in) {
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
return
|
||||
}
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) {
|
||||
serial := uint32(0) // The first serial seen is the current server serial
|
||||
axfr := true
|
||||
n := 0
|
||||
qser := q.Ns[0].(*SOA).Serial
|
||||
defer t.Close()
|
||||
defer close(c)
|
||||
timeout := dnsTimeout
|
||||
if t.ReadTimeout != 0 {
|
||||
timeout = t.ReadTimeout
|
||||
}
|
||||
for {
|
||||
t.SetReadDeadline(time.Now().Add(timeout))
|
||||
in, err := t.ReadMsg()
|
||||
if err != nil {
|
||||
c <- &Envelope{nil, err}
|
||||
return
|
||||
}
|
||||
if q.Id != in.Id {
|
||||
c <- &Envelope{in.Answer, ErrId}
|
||||
return
|
||||
}
|
||||
if in.Rcode != RcodeSuccess {
|
||||
c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}}
|
||||
return
|
||||
}
|
||||
if n == 0 {
|
||||
// Check if the returned answer is ok
|
||||
if !isSOAFirst(in) {
|
||||
c <- &Envelope{in.Answer, ErrSoa}
|
||||
return
|
||||
}
|
||||
// This serial is important
|
||||
serial = in.Answer[0].(*SOA).Serial
|
||||
// Check if there are no changes in zone
|
||||
if qser >= serial {
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
return
|
||||
}
|
||||
}
|
||||
// Now we need to check each message for SOA records, to see what we need to do
|
||||
t.tsigTimersOnly = true
|
||||
for _, rr := range in.Answer {
|
||||
if v, ok := rr.(*SOA); ok {
|
||||
if v.Serial == serial {
|
||||
n++
|
||||
// quit if it's a full axfr or the the servers' SOA is repeated the third time
|
||||
if axfr && n == 2 || n == 3 {
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
return
|
||||
}
|
||||
} else if axfr {
|
||||
// it's an ixfr
|
||||
axfr = false
|
||||
}
|
||||
}
|
||||
}
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
}
|
||||
}
|
||||
|
||||
// Out performs an outgoing transfer with the client connecting in w.
|
||||
// Basic use pattern:
|
||||
//
|
||||
// ch := make(chan *dns.Envelope)
|
||||
// tr := new(dns.Transfer)
|
||||
// go tr.Out(w, r, ch)
|
||||
// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
|
||||
// close(ch)
|
||||
// w.Hijack()
|
||||
// // w.Close() // Client closes connection
|
||||
//
|
||||
// The server is responsible for sending the correct sequence of RRs through the
|
||||
// channel ch.
|
||||
func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error {
|
||||
for x := range ch {
|
||||
r := new(Msg)
|
||||
// Compress?
|
||||
r.SetReply(q)
|
||||
r.Authoritative = true
|
||||
// assume it fits TODO(miek): fix
|
||||
r.Answer = append(r.Answer, x.RR...)
|
||||
if err := w.WriteMsg(r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.TsigTimersOnly(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadMsg reads a message from the transfer connection t.
|
||||
func (t *Transfer) ReadMsg() (*Msg, error) {
|
||||
m := new(Msg)
|
||||
p := make([]byte, MaxMsgSize)
|
||||
n, err := t.Read(p)
|
||||
if err != nil && n == 0 {
|
||||
return nil, err
|
||||
}
|
||||
p = p[:n]
|
||||
if err := m.Unpack(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
|
||||
if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
|
||||
return m, ErrSecret
|
||||
}
|
||||
// Need to work on the original message p, as that was used to calculate the tsig.
|
||||
err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
|
||||
t.tsigRequestMAC = ts.MAC
|
||||
}
|
||||
return m, err
|
||||
}
|
||||
|
||||
// WriteMsg writes a message through the transfer connection t.
|
||||
func (t *Transfer) WriteMsg(m *Msg) (err error) {
|
||||
var out []byte
|
||||
if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
|
||||
if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
|
||||
return ErrSecret
|
||||
}
|
||||
out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
|
||||
} else {
|
||||
out, err = m.Pack()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = t.Write(out); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isSOAFirst(in *Msg) bool {
|
||||
if len(in.Answer) > 0 {
|
||||
return in.Answer[0].Header().Rrtype == TypeSOA
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isSOALast(in *Msg) bool {
|
||||
if len(in.Answer) > 0 {
|
||||
return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const errXFR = "bad xfr rcode: %d"
|
||||
118
vendor/github.com/miekg/dns/zcompress.go
generated
vendored
118
vendor/github.com/miekg/dns/zcompress.go
generated
vendored
@ -1,118 +0,0 @@
|
||||
// Code generated by "go run compress_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
func compressionLenHelperType(c map[string]int, r RR) {
|
||||
switch x := r.(type) {
|
||||
case *AFSDB:
|
||||
compressionLenHelper(c, x.Hostname)
|
||||
case *CNAME:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *DNAME:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *HIP:
|
||||
for i := range x.RendezvousServers {
|
||||
compressionLenHelper(c, x.RendezvousServers[i])
|
||||
}
|
||||
case *KX:
|
||||
compressionLenHelper(c, x.Exchanger)
|
||||
case *LP:
|
||||
compressionLenHelper(c, x.Fqdn)
|
||||
case *MB:
|
||||
compressionLenHelper(c, x.Mb)
|
||||
case *MD:
|
||||
compressionLenHelper(c, x.Md)
|
||||
case *MF:
|
||||
compressionLenHelper(c, x.Mf)
|
||||
case *MG:
|
||||
compressionLenHelper(c, x.Mg)
|
||||
case *MINFO:
|
||||
compressionLenHelper(c, x.Rmail)
|
||||
compressionLenHelper(c, x.Email)
|
||||
case *MR:
|
||||
compressionLenHelper(c, x.Mr)
|
||||
case *MX:
|
||||
compressionLenHelper(c, x.Mx)
|
||||
case *NAPTR:
|
||||
compressionLenHelper(c, x.Replacement)
|
||||
case *NS:
|
||||
compressionLenHelper(c, x.Ns)
|
||||
case *NSAPPTR:
|
||||
compressionLenHelper(c, x.Ptr)
|
||||
case *NSEC:
|
||||
compressionLenHelper(c, x.NextDomain)
|
||||
case *PTR:
|
||||
compressionLenHelper(c, x.Ptr)
|
||||
case *PX:
|
||||
compressionLenHelper(c, x.Map822)
|
||||
compressionLenHelper(c, x.Mapx400)
|
||||
case *RP:
|
||||
compressionLenHelper(c, x.Mbox)
|
||||
compressionLenHelper(c, x.Txt)
|
||||
case *RRSIG:
|
||||
compressionLenHelper(c, x.SignerName)
|
||||
case *RT:
|
||||
compressionLenHelper(c, x.Host)
|
||||
case *SIG:
|
||||
compressionLenHelper(c, x.SignerName)
|
||||
case *SOA:
|
||||
compressionLenHelper(c, x.Ns)
|
||||
compressionLenHelper(c, x.Mbox)
|
||||
case *SRV:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *TALINK:
|
||||
compressionLenHelper(c, x.PreviousName)
|
||||
compressionLenHelper(c, x.NextName)
|
||||
case *TKEY:
|
||||
compressionLenHelper(c, x.Algorithm)
|
||||
case *TSIG:
|
||||
compressionLenHelper(c, x.Algorithm)
|
||||
}
|
||||
}
|
||||
|
||||
func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
|
||||
switch x := r.(type) {
|
||||
case *AFSDB:
|
||||
k1, ok1 := compressionLenSearch(c, x.Hostname)
|
||||
return k1, ok1
|
||||
case *CNAME:
|
||||
k1, ok1 := compressionLenSearch(c, x.Target)
|
||||
return k1, ok1
|
||||
case *MB:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mb)
|
||||
return k1, ok1
|
||||
case *MD:
|
||||
k1, ok1 := compressionLenSearch(c, x.Md)
|
||||
return k1, ok1
|
||||
case *MF:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mf)
|
||||
return k1, ok1
|
||||
case *MG:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mg)
|
||||
return k1, ok1
|
||||
case *MINFO:
|
||||
k1, ok1 := compressionLenSearch(c, x.Rmail)
|
||||
k2, ok2 := compressionLenSearch(c, x.Email)
|
||||
return k1 + k2, ok1 && ok2
|
||||
case *MR:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mr)
|
||||
return k1, ok1
|
||||
case *MX:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mx)
|
||||
return k1, ok1
|
||||
case *NS:
|
||||
k1, ok1 := compressionLenSearch(c, x.Ns)
|
||||
return k1, ok1
|
||||
case *PTR:
|
||||
k1, ok1 := compressionLenSearch(c, x.Ptr)
|
||||
return k1, ok1
|
||||
case *RT:
|
||||
k1, ok1 := compressionLenSearch(c, x.Host)
|
||||
return k1, ok1
|
||||
case *SOA:
|
||||
k1, ok1 := compressionLenSearch(c, x.Ns)
|
||||
k2, ok2 := compressionLenSearch(c, x.Mbox)
|
||||
return k1 + k2, ok1 && ok2
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user