Update dependencies

This commit is contained in:
Zack Scholl 2017-11-04 04:45:08 -06:00
parent 8b3e7b0605
commit 1ef24a0347
254 changed files with 75372 additions and 22854 deletions

18
Gopkg.lock generated
View File

@ -28,8 +28,8 @@
[[projects]]
name = "github.com/garyburd/redigo"
packages = ["internal","redis"]
revision = "433969511232c397de61b1442f9fd49ec06ae9ba"
version = "v1.1.0"
revision = "34a326de1fea52965fa5ad664d3fc7163dd4b0a1"
version = "v1.2.0"
[[projects]]
branch = "master"
@ -59,7 +59,7 @@
branch = "master"
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
[[projects]]
name = "github.com/gorilla/context"
@ -125,7 +125,7 @@
branch = "master"
name = "github.com/sergi/go-diff"
packages = ["diffmatchpatch"]
revision = "feef008d51ad2b3778f85d387ccf91735543008d"
revision = "2fc9cd33b5f86077aa3e0f442fa0476a9fa9a1dc"
[[projects]]
branch = "master"
@ -149,7 +149,7 @@
branch = "master"
name = "github.com/shurcooL/graphql"
packages = ["ident"]
revision = "d1a3e018e03c4a6b00bc8641eb272872fff371dd"
revision = "cf6db17b893acfad0ca1929ba6be45bf854790ed"
[[projects]]
branch = "master"
@ -191,25 +191,25 @@
branch = "master"
name = "github.com/ugorji/go"
packages = ["codec"]
revision = "54210f4e076c57f351166f0ed60e67d3fca57a36"
revision = "50189f05eaf5a0c17e5084eb8f7fb91e23699840"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["bcrypt","blowfish"]
revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3"
revision = "bd6f299fb381e4c3393d1c4b1f0b94f5e77650c8"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["html","html/atom"]
revision = "0a9397675ba34b2845f758fe3cd68828369c6517"
revision = "01c190206fbdffa42f334f4b2bf2220f50e64920"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
revision = "314a259e304ff91bd6985da2a7149bbf91237993"
revision = "4fe5d7925040acd225bf9c7cee65e82d07f06bff"
[[projects]]
name = "gopkg.in/go-playground/validator.v8"

File diff suppressed because one or more lines are too long

View File

@ -9,6 +9,7 @@ go:
- 1.6
- 1.7
- 1.8
- 1.9
- tip
script:

View File

@ -76,7 +76,7 @@ type dialOptions struct {
dial func(network, addr string) (net.Conn, error)
db int
password string
dialTLS bool
useTLS bool
skipVerify bool
tlsConfig *tls.Config
}
@ -135,14 +135,22 @@ func DialTLSConfig(c *tls.Config) DialOption {
}}
}
// DialTLSSkipVerify to disable server name verification when connecting
// over TLS. Has no effect when not dialing a TLS connection.
// DialTLSSkipVerify disables server name verification when connecting over
// TLS. Has no effect when not dialing a TLS connection.
func DialTLSSkipVerify(skip bool) DialOption {
return DialOption{func(do *dialOptions) {
do.skipVerify = skip
}}
}
// DialUseTLS specifies whether TLS should be used when connecting to the
// server. This option is ignore by DialURL.
func DialUseTLS(useTLS bool) DialOption {
return DialOption{func(do *dialOptions) {
do.useTLS = useTLS
}}
}
// Dial connects to the Redis server at the given network and
// address using the specified options.
func Dial(network, address string, options ...DialOption) (Conn, error) {
@ -158,7 +166,7 @@ func Dial(network, address string, options ...DialOption) (Conn, error) {
return nil, err
}
if do.dialTLS {
if do.useTLS {
tlsConfig := cloneTLSClientConfig(do.tlsConfig, do.skipVerify)
if tlsConfig.ServerName == "" {
host, _, err := net.SplitHostPort(address)
@ -202,10 +210,6 @@ func Dial(network, address string, options ...DialOption) (Conn, error) {
return c, nil
}
func dialTLS(do *dialOptions) {
do.dialTLS = true
}
var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`)
// DialURL connects to a Redis server at the given URL using the Redis
@ -257,9 +261,7 @@ func DialURL(rawurl string, options ...DialOption) (Conn, error) {
return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
}
if u.Scheme == "rediss" {
options = append([]DialOption{{dialTLS}}, options...)
}
options = append(options, DialUseTLS(u.Scheme == "rediss"))
return Dial("tcp", address, options...)
}
@ -344,39 +346,55 @@ func (c *conn) writeFloat64(n float64) error {
return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64))
}
func (c *conn) writeCommand(cmd string, args []interface{}) (err error) {
func (c *conn) writeCommand(cmd string, args []interface{}) error {
c.writeLen('*', 1+len(args))
err = c.writeString(cmd)
if err := c.writeString(cmd); err != nil {
return err
}
for _, arg := range args {
if err != nil {
break
}
switch arg := arg.(type) {
case string:
err = c.writeString(arg)
case []byte:
err = c.writeBytes(arg)
case int:
err = c.writeInt64(int64(arg))
case int64:
err = c.writeInt64(arg)
case float64:
err = c.writeFloat64(arg)
case bool:
if arg {
err = c.writeString("1")
} else {
err = c.writeString("0")
}
case nil:
err = c.writeString("")
default:
var buf bytes.Buffer
fmt.Fprint(&buf, arg)
err = c.writeBytes(buf.Bytes())
if err := c.writeArg(arg, true); err != nil {
return err
}
}
return err
return nil
}
func (c *conn) writeArg(arg interface{}, argumentTypeOK bool) (err error) {
switch arg := arg.(type) {
case string:
return c.writeString(arg)
case []byte:
return c.writeBytes(arg)
case int:
return c.writeInt64(int64(arg))
case int64:
return c.writeInt64(arg)
case float64:
return c.writeFloat64(arg)
case bool:
if arg {
return c.writeString("1")
} else {
return c.writeString("0")
}
case nil:
return c.writeString("")
case Argument:
if argumentTypeOK {
return c.writeArg(arg.RedisArg(), false)
}
// See comment in default clause below.
var buf bytes.Buffer
fmt.Fprint(&buf, arg)
return c.writeBytes(buf.Bytes())
default:
// This default clause is intended to handle builtin numeric types.
// The function should return an error for other types, but this is not
// done for compatibility with previous versions of the package.
var buf bytes.Buffer
fmt.Fprint(&buf, arg)
return c.writeBytes(buf.Bytes())
}
}
type protocolError string

View File

@ -16,6 +16,9 @@ package redis_test
import (
"bytes"
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"math"
"net"
@ -40,12 +43,49 @@ func (*testConn) SetDeadline(t time.Time) error { return nil }
func (*testConn) SetReadDeadline(t time.Time) error { return nil }
func (*testConn) SetWriteDeadline(t time.Time) error { return nil }
func dialTestConn(r io.Reader, w io.Writer) redis.DialOption {
return redis.DialNetDial(func(net, addr string) (net.Conn, error) {
return &testConn{Reader: r, Writer: w}, nil
func dialTestConn(r string, w io.Writer) redis.DialOption {
return redis.DialNetDial(func(network, addr string) (net.Conn, error) {
return &testConn{Reader: strings.NewReader(r), Writer: w}, nil
})
}
type tlsTestConn struct {
net.Conn
done chan struct{}
}
func (c *tlsTestConn) Close() error {
c.Conn.Close()
<-c.done
return nil
}
func dialTestConnTLS(r string, w io.Writer) redis.DialOption {
return redis.DialNetDial(func(network, addr string) (net.Conn, error) {
client, server := net.Pipe()
tlsServer := tls.Server(server, &serverTLSConfig)
go io.Copy(tlsServer, strings.NewReader(r))
done := make(chan struct{})
go func() {
io.Copy(w, tlsServer)
close(done)
}()
return &tlsTestConn{Conn: client, done: done}, nil
})
}
type durationArg struct {
time.Duration
}
func (t durationArg) RedisArg() interface{} {
return t.Seconds()
}
type recursiveArg int
func (v recursiveArg) RedisArg() interface{} { return v }
var writeTests = []struct {
args []interface{}
expected string
@ -82,6 +122,14 @@ var writeTests = []struct {
[]interface{}{"SET", "key", nil},
"*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n",
},
{
[]interface{}{"SET", "key", durationArg{time.Minute}},
"*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$2\r\n60\r\n",
},
{
[]interface{}{"SET", "key", recursiveArg(123)},
"*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n123\r\n",
},
{
[]interface{}{"ECHO", true, false},
"*3\r\n$4\r\nECHO\r\n$1\r\n1\r\n$1\r\n0\r\n",
@ -91,7 +139,7 @@ var writeTests = []struct {
func TestWrite(t *testing.T) {
for _, tt := range writeTests {
var buf bytes.Buffer
c, _ := redis.Dial("", "", dialTestConn(nil, &buf))
c, _ := redis.Dial("", "", dialTestConn("", &buf))
err := c.Send(tt.args[0].(string), tt.args[1:]...)
if err != nil {
t.Errorf("Send(%v) returned error %v", tt.args, err)
@ -190,7 +238,7 @@ var readTests = []struct {
func TestRead(t *testing.T) {
for _, tt := range readTests {
c, _ := redis.Dial("", "", dialTestConn(strings.NewReader(tt.reply), nil))
c, _ := redis.Dial("", "", dialTestConn(tt.reply, nil))
actual, err := c.Receive()
if tt.expected == errorSentinel {
if err == nil {
@ -447,7 +495,7 @@ var dialErrors = []struct {
"localhost",
"invalid redis URL scheme",
},
// The error message for invalid hosts is diffferent in different
// The error message for invalid hosts is different in different
// versions of Go, so just check that there is an error message.
{
"redis://weird url",
@ -502,41 +550,85 @@ func TestDialURLHost(t *testing.T) {
}
}
func TestDialURLPassword(t *testing.T) {
var buf bytes.Buffer
_, err := redis.DialURL("redis://x:abc123@localhost", dialTestConn(strings.NewReader("+OK\r\n"), &buf))
if err != nil {
t.Error("dial error:", err)
var dialURLTests = []struct {
description string
url string
r string
w string
}{
{"password", "redis://x:abc123@localhost", "+OK\r\n", "*2\r\n$4\r\nAUTH\r\n$6\r\nabc123\r\n"},
{"database 3", "redis://localhost/3", "+OK\r\n", "*2\r\n$6\r\nSELECT\r\n$1\r\n3\r\n"},
{"database 99", "redis://localhost/99", "+OK\r\n", "*2\r\n$6\r\nSELECT\r\n$2\r\n99\r\n"},
{"no database", "redis://localhost/", "+OK\r\n", ""},
}
func TestDialURL(t *testing.T) {
for _, tt := range dialURLTests {
var buf bytes.Buffer
// UseTLS should be ignored in all of these tests.
_, err := redis.DialURL(tt.url, dialTestConn(tt.r, &buf), redis.DialUseTLS(true))
if err != nil {
t.Errorf("%s dial error: %v", tt.description, err)
continue
}
if w := buf.String(); w != tt.w {
t.Errorf("%s commands = %q, want %q", tt.description, w, tt.w)
}
}
expected := "*2\r\n$4\r\nAUTH\r\n$6\r\nabc123\r\n"
}
func checkPingPong(t *testing.T, buf *bytes.Buffer, c redis.Conn) {
resp, err := c.Do("PING")
if err != nil {
t.Fatal("ping error:", err)
}
// Close connection to ensure that writes to buf are complete.
c.Close()
expected := "*1\r\n$4\r\nPING\r\n"
actual := buf.String()
if actual != expected {
t.Errorf("commands = %q, want %q", actual, expected)
}
if resp != "PONG" {
t.Errorf("resp = %v, want %v", resp, "PONG")
}
}
func TestDialURLDatabase(t *testing.T) {
var buf3 bytes.Buffer
_, err3 := redis.DialURL("redis://localhost/3", dialTestConn(strings.NewReader("+OK\r\n"), &buf3))
if err3 != nil {
t.Error("dial error:", err3)
const pingResponse = "+PONG\r\n"
func TestDialURLTLS(t *testing.T) {
var buf bytes.Buffer
c, err := redis.DialURL("rediss://example.com/",
redis.DialTLSConfig(&clientTLSConfig),
dialTestConnTLS(pingResponse, &buf))
if err != nil {
t.Fatal("dial error:", err)
}
expected3 := "*2\r\n$6\r\nSELECT\r\n$1\r\n3\r\n"
actual3 := buf3.String()
if actual3 != expected3 {
t.Errorf("commands = %q, want %q", actual3, expected3)
checkPingPong(t, &buf, c)
}
func TestDialUseTLS(t *testing.T) {
var buf bytes.Buffer
c, err := redis.Dial("tcp", "example.com:6379",
redis.DialTLSConfig(&clientTLSConfig),
dialTestConnTLS(pingResponse, &buf),
redis.DialUseTLS(true))
if err != nil {
t.Fatal("dial error:", err)
}
// empty DB means 0
var buf0 bytes.Buffer
_, err0 := redis.DialURL("redis://localhost/", dialTestConn(strings.NewReader("+OK\r\n"), &buf0))
if err0 != nil {
t.Error("dial error:", err0)
}
expected0 := ""
actual0 := buf0.String()
if actual0 != expected0 {
t.Errorf("commands = %q, want %q", actual0, expected0)
checkPingPong(t, &buf, c)
}
func TestDialTLSSKipVerify(t *testing.T) {
var buf bytes.Buffer
c, err := redis.Dial("tcp", "example.com:6379",
dialTestConnTLS(pingResponse, &buf),
redis.DialTLSSkipVerify(true),
redis.DialUseTLS(true))
if err != nil {
t.Fatal("dial error:", err)
}
checkPingPong(t, &buf, c)
}
// Connect to local instance of Redis running on the default port.
@ -668,3 +760,65 @@ func BenchmarkDoPing(b *testing.B) {
}
}
}
var clientTLSConfig, serverTLSConfig tls.Config
func init() {
// The certificate and key for testing TLS dial options was created
// using the command
//
// go run GOROOT/src/crypto/tls/generate_cert.go \
// --rsa-bits 1024 \
// --host 127.0.0.1,::1,example.com --ca \
// --start-date "Jan 1 00:00:00 1970" \
// --duration=1000000h
//
// where GOROOT is the value of GOROOT reported by go env.
localhostCert := []byte(`
-----BEGIN CERTIFICATE-----
MIICFDCCAX2gAwIBAgIRAJfBL4CUxkXcdlFurb3K+iowDQYJKoZIhvcNAQELBQAw
EjEQMA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2
MDAwMFowEjEQMA4GA1UEChMHQWNtZSBDbzCBnzANBgkqhkiG9w0BAQEFAAOBjQAw
gYkCgYEArizw8WxMUQ3bGHLeuJ4fDrEpy+L2pqrbYRlKk1DasJ/VkB8bImzIpe6+
LGjiYIxvnDCOJ3f3QplcQuiuMyl6f2irJlJsbFT8Lo/3obnuTKAIaqUdJUqBg6y+
JaL8Auk97FvunfKFv8U1AIhgiLzAfQ/3Eaq1yi87Ra6pMjGbTtcCAwEAAaNoMGYw
DgYDVR0PAQH/BAQDAgKkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEwEB/wQF
MAMBAf8wLgYDVR0RBCcwJYILZXhhbXBsZS5jb22HBH8AAAGHEAAAAAAAAAAAAAAA
AAAAAAEwDQYJKoZIhvcNAQELBQADgYEAdZ8daIVkyhVwflt5I19m0oq1TycbGO1+
ach7T6cZiBQeNR/SJtxr/wKPEpmvUgbv2BfFrKJ8QoIHYsbNSURTWSEa02pfw4k9
6RQhij3ZkG79Ituj5OYRORV6Z0HUW32r670BtcuHuAhq7YA6Nxy4FtSt7bAlVdRt
rrKgNsltzMk=
-----END CERTIFICATE-----`)
localhostKey := []byte(`
-----BEGIN RSA PRIVATE KEY-----
MIICXAIBAAKBgQCuLPDxbExRDdsYct64nh8OsSnL4vamqtthGUqTUNqwn9WQHxsi
bMil7r4saOJgjG+cMI4nd/dCmVxC6K4zKXp/aKsmUmxsVPwuj/ehue5MoAhqpR0l
SoGDrL4lovwC6T3sW+6d8oW/xTUAiGCIvMB9D/cRqrXKLztFrqkyMZtO1wIDAQAB
AoGACrc5G6FOEK6JjDeE/Fa+EmlT6PdNtXNNi+vCas3Opo8u1G8VfEi1D4BgstrB
Eq+RLkrOdB8tVyuYQYWPMhabMqF+hhKJN72j0OwfuPlVvTInwb/cKjo/zbH1IA+Y
HenHNK4ywv7/p/9/MvQPJ3I32cQBCgGUW5chVSH5M1sj5gECQQDabQAI1X0uDqCm
KbX9gXVkAgxkFddrt6LBHt57xujFcqEKFE7nwKhDh7DweVs/VEJ+kpid4z+UnLOw
KjtP9JolAkEAzCNBphQ//IsbH5rNs10wIUw3Ks/Oepicvr6kUFbIv+neRzi1iJHa
m6H7EayK3PWgax6BAsR/t0Jc9XV7r2muSwJAVzN09BHnK+ADGtNEKLTqXMbEk6B0
pDhn7ZmZUOkUPN+Kky+QYM11X6Bob1jDqQDGmymDbGUxGO+GfSofC8inUQJAGfci
Eo3g1a6b9JksMPRZeuLG4ZstGErxJRH6tH1Va5PDwitka8qhk8o2tTjNMO3NSdLH
diKoXBcE2/Pll5pJoQJBAIMiiMIzXJhnN4mX8may44J/HvMlMf2xuVH2gNMwmZuc
Bjqn3yoLHaoZVvbWOi0C2TCN4FjXjaLNZGifQPbIcaA=
-----END RSA PRIVATE KEY-----`)
cert, err := tls.X509KeyPair(localhostCert, localhostKey)
if err != nil {
panic(fmt.Sprintf("error creating key pair: %v", err))
}
serverTLSConfig.Certificates = []tls.Certificate{cert}
certificate, err := x509.ParseCertificate(serverTLSConfig.Certificates[0].Certificate[0])
if err != nil {
panic(fmt.Sprintf("error parsing x509 certificate: %v", err))
}
clientTLSConfig.RootCAs = x509.NewCertPool()
clientTLSConfig.RootCAs.AddCert(certificate)
}

View File

@ -38,7 +38,7 @@
//
// n, err := conn.Do("APPEND", "key", "value")
//
// The Do method converts command arguments to binary strings for transmission
// The Do method converts command arguments to bulk strings for transmission
// to the server as follows:
//
// Go Type Conversion
@ -48,7 +48,7 @@
// float64 strconv.FormatFloat(v, 'g', -1, 64)
// bool true -> "1", false -> "0"
// nil ""
// all other types fmt.Print(v)
// all other types fmt.Fprint(w, v)
//
// Redis command reply types are represented using the following Go types:
//

View File

@ -181,7 +181,27 @@ func (p *Pool) Get() Conn {
return &pooledConnection{p: p, c: c}
}
// ActiveCount returns the number of active connections in the pool.
// PoolStats contains pool statistics.
type PoolStats struct {
// ActiveCount is the number of connections in the pool. The count includes idle connections and connections in use.
ActiveCount int
// IdleCount is the number of idle connections in the pool.
IdleCount int
}
// Stats returns pool's statistics.
func (p *Pool) Stats() PoolStats {
p.mu.Lock()
stats := PoolStats{
ActiveCount: p.active,
IdleCount: p.idle.Len(),
}
p.mu.Unlock()
return stats
}
// ActiveCount returns the number of connections in the pool. The count includes idle connections and connections in use.
func (p *Pool) ActiveCount() int {
p.mu.Lock()
active := p.active
@ -189,6 +209,14 @@ func (p *Pool) ActiveCount() int {
return active
}
// IdleCount returns the number of idle connections in the pool.
func (p *Pool) IdleCount() int {
p.mu.Lock()
idle := p.idle.Len()
p.mu.Unlock()
return idle
}
// Close releases the resources used by the pool.
func (p *Pool) Close() error {
p.mu.Lock()

View File

@ -83,7 +83,7 @@ func (d *poolDialer) dial() (redis.Conn, error) {
return &poolTestConn{d: d, Conn: c}, nil
}
func (d *poolDialer) check(message string, p *redis.Pool, dialed, open int) {
func (d *poolDialer) check(message string, p *redis.Pool, dialed, open, inuse int) {
d.mu.Lock()
if d.dialed != dialed {
d.t.Errorf("%s: dialed=%d, want %d", message, d.dialed, dialed)
@ -91,9 +91,16 @@ func (d *poolDialer) check(message string, p *redis.Pool, dialed, open int) {
if d.open != open {
d.t.Errorf("%s: open=%d, want %d", message, d.open, open)
}
if active := p.ActiveCount(); active != open {
d.t.Errorf("%s: active=%d, want %d", message, active, open)
stats := p.Stats()
if stats.ActiveCount != open {
d.t.Errorf("%s: active=%d, want %d", message, stats.ActiveCount, open)
}
if stats.IdleCount != open-inuse {
d.t.Errorf("%s: idle=%d, want %d", message, stats.IdleCount, open-inuse)
}
d.mu.Unlock()
}
@ -113,9 +120,9 @@ func TestPoolReuse(t *testing.T) {
c2.Close()
}
d.check("before close", p, 2, 2)
d.check("before close", p, 2, 2, 0)
p.Close()
d.check("after close", p, 2, 0)
d.check("after close", p, 2, 0, 0)
}
func TestPoolMaxIdle(t *testing.T) {
@ -137,9 +144,9 @@ func TestPoolMaxIdle(t *testing.T) {
c2.Close()
c3.Close()
}
d.check("before close", p, 12, 2)
d.check("before close", p, 12, 2, 0)
p.Close()
d.check("after close", p, 12, 0)
d.check("after close", p, 12, 0, 0)
}
func TestPoolError(t *testing.T) {
@ -161,7 +168,7 @@ func TestPoolError(t *testing.T) {
c.Do("ERR", io.EOF)
c.Close()
d.check(".", p, 2, 0)
d.check(".", p, 2, 0, 0)
}
func TestPoolClose(t *testing.T) {
@ -189,7 +196,7 @@ func TestPoolClose(t *testing.T) {
p.Close()
d.check("after pool close", p, 3, 1)
d.check("after pool close", p, 3, 1, 1)
if _, err := c1.Do("PING"); err == nil {
t.Errorf("expected error after connection and pool closed")
@ -197,7 +204,7 @@ func TestPoolClose(t *testing.T) {
c3.Close()
d.check("after conn close", p, 3, 0)
d.check("after conn close", p, 3, 0, 0)
c1 = p.Get()
if _, err := c1.Do("PING"); err == nil {
@ -222,7 +229,7 @@ func TestPoolTimeout(t *testing.T) {
c.Do("PING")
c.Close()
d.check("1", p, 1, 1)
d.check("1", p, 1, 1, 0)
now = now.Add(p.IdleTimeout)
@ -230,7 +237,7 @@ func TestPoolTimeout(t *testing.T) {
c.Do("PING")
c.Close()
d.check("2", p, 2, 1)
d.check("2", p, 2, 1, 0)
}
func TestPoolConcurrenSendReceive(t *testing.T) {
@ -272,7 +279,7 @@ func TestPoolBorrowCheck(t *testing.T) {
c.Do("PING")
c.Close()
}
d.check("1", p, 10, 1)
d.check("1", p, 10, 1, 0)
}
func TestPoolMaxActive(t *testing.T) {
@ -289,7 +296,7 @@ func TestPoolMaxActive(t *testing.T) {
c2 := p.Get()
c2.Do("PING")
d.check("1", p, 2, 2)
d.check("1", p, 2, 2, 2)
c3 := p.Get()
if _, err := c3.Do("PING"); err != redis.ErrPoolExhausted {
@ -297,9 +304,9 @@ func TestPoolMaxActive(t *testing.T) {
}
c3.Close()
d.check("2", p, 2, 2)
d.check("2", p, 2, 2, 2)
c2.Close()
d.check("3", p, 2, 2)
d.check("3", p, 2, 2, 1)
c3 = p.Get()
if _, err := c3.Do("PING"); err != nil {
@ -307,7 +314,7 @@ func TestPoolMaxActive(t *testing.T) {
}
c3.Close()
d.check("4", p, 2, 2)
d.check("4", p, 2, 2, 1)
}
func TestPoolMonitorCleanup(t *testing.T) {
@ -323,7 +330,7 @@ func TestPoolMonitorCleanup(t *testing.T) {
c.Send("MONITOR")
c.Close()
d.check("", p, 1, 0)
d.check("", p, 1, 0, 0)
}
func TestPoolPubSubCleanup(t *testing.T) {
@ -433,8 +440,8 @@ func startGoroutines(p *redis.Pool, cmd string, args ...interface{}) chan error
go func() {
c := p.Get()
_, err := c.Do(cmd, args...)
errs <- err
c.Close()
errs <- err
}()
}
@ -456,7 +463,7 @@ func TestWaitPool(t *testing.T) {
c := p.Get()
errs := startGoroutines(p, "PING")
d.check("before close", p, 1, 1)
d.check("before close", p, 1, 1, 1)
c.Close()
timeout := time.After(2 * time.Second)
for i := 0; i < cap(errs); i++ {
@ -469,7 +476,7 @@ func TestWaitPool(t *testing.T) {
t.Fatalf("timeout waiting for blocked goroutine %d", i)
}
}
d.check("done", p, 1, 1)
d.check("done", p, 1, 1, 0)
}
func TestWaitPoolClose(t *testing.T) {
@ -487,7 +494,7 @@ func TestWaitPoolClose(t *testing.T) {
t.Fatal(err)
}
errs := startGoroutines(p, "PING")
d.check("before close", p, 1, 1)
d.check("before close", p, 1, 1, 1)
p.Close()
timeout := time.After(2 * time.Second)
for i := 0; i < cap(errs); i++ {
@ -504,7 +511,7 @@ func TestWaitPoolClose(t *testing.T) {
}
}
c.Close()
d.check("done", p, 1, 0)
d.check("done", p, 1, 0, 0)
}
func TestWaitPoolCommandError(t *testing.T) {
@ -520,7 +527,7 @@ func TestWaitPoolCommandError(t *testing.T) {
c := p.Get()
errs := startGoroutines(p, "ERR", testErr)
d.check("before close", p, 1, 1)
d.check("before close", p, 1, 1, 1)
c.Close()
timeout := time.After(2 * time.Second)
for i := 0; i < cap(errs); i++ {
@ -533,7 +540,7 @@ func TestWaitPoolCommandError(t *testing.T) {
t.Fatalf("timeout waiting for blocked goroutine %d", i)
}
}
d.check("done", p, cap(errs), 0)
d.check("done", p, cap(errs), 0, 0)
}
func TestWaitPoolDialError(t *testing.T) {
@ -549,7 +556,7 @@ func TestWaitPoolDialError(t *testing.T) {
c := p.Get()
errs := startGoroutines(p, "ERR", testErr)
d.check("before close", p, 1, 1)
d.check("before close", p, 1, 1, 1)
d.dialErr = errors.New("dial")
c.Close()
@ -578,7 +585,7 @@ func TestWaitPoolDialError(t *testing.T) {
if errCount != cap(errs)-1 {
t.Errorf("expected %d dial errors, got %d", cap(errs)-1, errCount)
}
d.check("done", p, cap(errs), 0)
d.check("done", p, cap(errs), 0, 0)
}
// Borrowing requires us to iterate over the idle connections, unlock the pool,

View File

@ -94,6 +94,9 @@ func (c PubSubConn) PUnsubscribe(channel ...interface{}) error {
}
// Ping sends a PING to the server with the specified data.
//
// The connection must be subscribed to at least one channel or pattern when
// calling this method.
func (c PubSubConn) Ping(data string) error {
c.Conn.Send("PING", data)
return c.Conn.Flush()

View File

@ -39,3 +39,23 @@ type Conn interface {
// Receive receives a single reply from the Redis server
Receive() (reply interface{}, err error)
}
// Argument is the interface implemented by an object which wants to control how
// the object is converted to Redis bulk strings.
type Argument interface {
// RedisArg returns a value to be encoded as a bulk string per the
// conversions listed in the section 'Executing Commands'.
// Implementations should typically return a []byte or string.
RedisArg() interface{}
}
// Scanner is implemented by an object which wants to control its value is
// interpreted when read from Redis.
type Scanner interface {
// RedisScan assigns a value from a Redis value. The argument src is one of
// the reply types listed in the section `Executing Commands`.
//
// An error should be returned if the value cannot be stored without
// loss of information.
RedisScan(src interface{}) error
}

View File

@ -333,7 +333,7 @@ func StringMap(result interface{}, err error) (map[string]string, error) {
key, okKey := values[i].([]byte)
value, okValue := values[i+1].([]byte)
if !okKey || !okValue {
return nil, errors.New("redigo: ScanMap key not a bulk string value")
return nil, errors.New("redigo: StringMap key not a bulk string value")
}
m[string(key)] = string(value)
}
@ -355,7 +355,7 @@ func IntMap(result interface{}, err error) (map[string]int, error) {
for i := 0; i < len(values); i += 2 {
key, ok := values[i].([]byte)
if !ok {
return nil, errors.New("redigo: ScanMap key not a bulk string value")
return nil, errors.New("redigo: IntMap key not a bulk string value")
}
value, err := Int(values[i+1], nil)
if err != nil {
@ -381,7 +381,7 @@ func Int64Map(result interface{}, err error) (map[string]int64, error) {
for i := 0; i < len(values); i += 2 {
key, ok := values[i].([]byte)
if !ok {
return nil, errors.New("redigo: ScanMap key not a bulk string value")
return nil, errors.New("redigo: Int64Map key not a bulk string value")
}
value, err := Int64(values[i+1], nil)
if err != nil {

View File

@ -110,6 +110,25 @@ func convertAssignInt(d reflect.Value, s int64) (err error) {
}
func convertAssignValue(d reflect.Value, s interface{}) (err error) {
if d.Kind() != reflect.Ptr {
if d.CanAddr() {
d2 := d.Addr()
if d2.CanInterface() {
if scanner, ok := d2.Interface().(Scanner); ok {
return scanner.RedisScan(s)
}
}
}
} else if d.CanInterface() {
// Already a reflect.Ptr
if d.IsNil() {
d.Set(reflect.New(d.Type().Elem()))
}
if scanner, ok := d.Interface().(Scanner); ok {
return scanner.RedisScan(s)
}
}
switch s := s.(type) {
case []byte:
err = convertAssignBulkString(d, s)
@ -135,11 +154,15 @@ func convertAssignArray(d reflect.Value, s []interface{}) error {
}
func convertAssign(d interface{}, s interface{}) (err error) {
if scanner, ok := d.(Scanner); ok {
return scanner.RedisScan(s)
}
// Handle the most common destination types using type switches and
// fall back to reflection for all other types.
switch s := s.(type) {
case nil:
// ingore
// ignore
case []byte:
switch d := d.(type) {
case *string:
@ -219,6 +242,8 @@ func convertAssign(d interface{}, s interface{}) (err error) {
// Scan copies from src to the values pointed at by dest.
//
// Scan uses RedisScan if available otherwise:
//
// The values pointed at by dest must be an integer, float, boolean, string,
// []byte, interface{} or slices of these types. Scan uses the standard strconv
// package to convert bulk strings to numeric and boolean types.
@ -359,6 +384,7 @@ var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil po
//
// Fields with the tag redis:"-" are ignored.
//
// Each field uses RedisScan if available otherwise:
// Integer, float, boolean, string and []byte fields are supported. Scan uses the
// standard strconv package to convert bulk string values to numeric and
// boolean types.

View File

@ -19,10 +19,32 @@ import (
"math"
"reflect"
"testing"
"time"
"github.com/garyburd/redigo/redis"
)
type durationScan struct {
time.Duration `redis:"sd"`
}
func (t *durationScan) RedisScan(src interface{}) (err error) {
if t == nil {
return fmt.Errorf("nil pointer")
}
switch src := src.(type) {
case string:
t.Duration, err = time.ParseDuration(src)
case []byte:
t.Duration, err = time.ParseDuration(string(src))
case int64:
t.Duration = time.Duration(src)
default:
err = fmt.Errorf("cannot convert from %T to %T", src, t)
}
return err
}
var scanConversionTests = []struct {
src interface{}
dest interface{}
@ -59,6 +81,11 @@ var scanConversionTests = []struct {
{[]interface{}{[]byte("1"), []byte("2")}, []float64{1, 2}},
{[]interface{}{[]byte("1")}, []byte{1}},
{[]interface{}{[]byte("1")}, []bool{true}},
{"1m", durationScan{Duration: time.Minute}},
{[]byte("1m"), durationScan{Duration: time.Minute}},
{time.Minute.Nanoseconds(), durationScan{Duration: time.Minute}},
{[]interface{}{[]byte("1m")}, []durationScan{durationScan{Duration: time.Minute}}},
{[]interface{}{[]byte("1m")}, []*durationScan{&durationScan{Duration: time.Minute}}},
}
func TestScanConversion(t *testing.T) {
@ -86,6 +113,8 @@ var scanConversionErrorTests = []struct {
{int64(-1), byte(0)},
{[]byte("junk"), false},
{redis.Error("blah"), false},
{redis.Error("blah"), durationScan{Duration: time.Minute}},
{"invalid", durationScan{Duration: time.Minute}},
}
func TestScanConversionError(t *testing.T) {
@ -158,6 +187,8 @@ type s1 struct {
Bt bool
Bf bool
s0
Sd durationScan `redis:"sd"`
Sdp *durationScan `redis:"sdp"`
}
var scanStructTests = []struct {
@ -166,8 +197,31 @@ var scanStructTests = []struct {
value interface{}
}{
{"basic",
[]string{"i", "-1234", "u", "5678", "s", "hello", "p", "world", "b", "t", "Bt", "1", "Bf", "0", "X", "123", "y", "456"},
&s1{I: -1234, U: 5678, S: "hello", P: []byte("world"), B: true, Bt: true, Bf: false, s0: s0{X: 123, Y: 456}},
[]string{
"i", "-1234",
"u", "5678",
"s", "hello",
"p", "world",
"b", "t",
"Bt", "1",
"Bf", "0",
"X", "123",
"y", "456",
"sd", "1m",
"sdp", "1m",
},
&s1{
I: -1234,
U: 5678,
S: "hello",
P: []byte("world"),
B: true,
Bt: true,
Bf: false,
s0: s0{X: 123, Y: 456},
Sd: durationScan{Duration: time.Minute},
Sdp: &durationScan{Duration: time.Minute},
},
},
}

View File

@ -55,6 +55,11 @@ func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} {
return args
}
// Hash returns the script hash.
func (s *Script) Hash() string {
return s.hash
}
// Do evaluates the script. Under the covers, Do optimistically evaluates the
// script using the EVALSHA command. If the command fails because the script is
// not loaded, then Do evaluates the script using the EVAL command (thus

View File

@ -96,7 +96,8 @@ func (s *Server) watch(r io.Reader, ready chan error) {
text = scn.Text()
fmt.Fprintf(serverLog, "%s\n", text)
if !listening {
if strings.Contains(text, "The server is now ready to accept connections on port") {
if strings.Contains(text, " * Ready to accept connections") ||
strings.Contains(text, " * The server is now ready to accept connections on port") {
listening = true
ready <- nil
}

View File

@ -16,6 +16,7 @@ package redis_test
import (
"fmt"
"github.com/garyburd/redigo/redis"
)

View File

@ -1,6 +1,7 @@
# Go support for Protocol Buffers
[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf)
[![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf)
Google's data interchange format.
Copyright 2010 The Go Authors.

View File

@ -21,32 +21,32 @@ import (
// Patch represents one patch operation.
type Patch struct {
diffs []Diff
start1 int
start2 int
length1 int
length2 int
Start1 int
Start2 int
Length1 int
Length2 int
}
// String emulates GNU diff's format.
// Header: @@ -382,8 +481,9 @@
// Indicies are printed as 1-based, not 0-based.
// Indices are printed as 1-based, not 0-based.
func (p *Patch) String() string {
var coords1, coords2 string
if p.length1 == 0 {
coords1 = strconv.Itoa(p.start1) + ",0"
} else if p.length1 == 1 {
coords1 = strconv.Itoa(p.start1 + 1)
if p.Length1 == 0 {
coords1 = strconv.Itoa(p.Start1) + ",0"
} else if p.Length1 == 1 {
coords1 = strconv.Itoa(p.Start1 + 1)
} else {
coords1 = strconv.Itoa(p.start1+1) + "," + strconv.Itoa(p.length1)
coords1 = strconv.Itoa(p.Start1+1) + "," + strconv.Itoa(p.Length1)
}
if p.length2 == 0 {
coords2 = strconv.Itoa(p.start2) + ",0"
} else if p.length2 == 1 {
coords2 = strconv.Itoa(p.start2 + 1)
if p.Length2 == 0 {
coords2 = strconv.Itoa(p.Start2) + ",0"
} else if p.Length2 == 1 {
coords2 = strconv.Itoa(p.Start2 + 1)
} else {
coords2 = strconv.Itoa(p.start2+1) + "," + strconv.Itoa(p.length2)
coords2 = strconv.Itoa(p.Start2+1) + "," + strconv.Itoa(p.Length2)
}
var text bytes.Buffer
@ -76,37 +76,37 @@ func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch {
return patch
}
pattern := text[patch.start2 : patch.start2+patch.length1]
pattern := text[patch.Start2 : patch.Start2+patch.Length1]
padding := 0
// Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length.
for strings.Index(text, pattern) != strings.LastIndex(text, pattern) &&
len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin {
padding += dmp.PatchMargin
maxStart := max(0, patch.start2-padding)
minEnd := min(len(text), patch.start2+patch.length1+padding)
maxStart := max(0, patch.Start2-padding)
minEnd := min(len(text), patch.Start2+patch.Length1+padding)
pattern = text[maxStart:minEnd]
}
// Add one chunk for good luck.
padding += dmp.PatchMargin
// Add the prefix.
prefix := text[max(0, patch.start2-padding):patch.start2]
prefix := text[max(0, patch.Start2-padding):patch.Start2]
if len(prefix) != 0 {
patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...)
}
// Add the suffix.
suffix := text[patch.start2+patch.length1 : min(len(text), patch.start2+patch.length1+padding)]
suffix := text[patch.Start2+patch.Length1 : min(len(text), patch.Start2+patch.Length1+padding)]
if len(suffix) != 0 {
patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix})
}
// Roll back the start points.
patch.start1 -= len(prefix)
patch.start2 -= len(prefix)
patch.Start1 -= len(prefix)
patch.Start2 -= len(prefix)
// Extend the lengths.
patch.length1 += len(prefix) + len(suffix)
patch.length2 += len(prefix) + len(suffix)
patch.Length1 += len(prefix) + len(suffix)
patch.Length2 += len(prefix) + len(suffix)
return patch
}
@ -155,18 +155,18 @@ func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch {
for i, aDiff := range diffs {
if len(patch.diffs) == 0 && aDiff.Type != DiffEqual {
// A new patch starts here.
patch.start1 = charCount1
patch.start2 = charCount2
patch.Start1 = charCount1
patch.Start2 = charCount2
}
switch aDiff.Type {
case DiffInsert:
patch.diffs = append(patch.diffs, aDiff)
patch.length2 += len(aDiff.Text)
patch.Length2 += len(aDiff.Text)
postpatchText = postpatchText[:charCount2] +
aDiff.Text + postpatchText[charCount2:]
case DiffDelete:
patch.length1 += len(aDiff.Text)
patch.Length1 += len(aDiff.Text)
patch.diffs = append(patch.diffs, aDiff)
postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):]
case DiffEqual:
@ -174,8 +174,8 @@ func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch {
len(patch.diffs) != 0 && i != len(diffs)-1 {
// Small equality inside a patch.
patch.diffs = append(patch.diffs, aDiff)
patch.length1 += len(aDiff.Text)
patch.length2 += len(aDiff.Text)
patch.Length1 += len(aDiff.Text)
patch.Length2 += len(aDiff.Text)
}
if len(aDiff.Text) >= 2*dmp.PatchMargin {
// Time for a new patch.
@ -219,10 +219,10 @@ func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch {
aDiff.Text,
})
}
patchCopy.start1 = aPatch.start1
patchCopy.start2 = aPatch.start2
patchCopy.length1 = aPatch.length1
patchCopy.length2 = aPatch.length2
patchCopy.Start1 = aPatch.Start1
patchCopy.Start2 = aPatch.Start2
patchCopy.Length1 = aPatch.Length1
patchCopy.Length2 = aPatch.Length2
patchesCopy = append(patchesCopy, patchCopy)
}
return patchesCopy
@ -246,7 +246,7 @@ func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []b
delta := 0
results := make([]bool, len(patches))
for _, aPatch := range patches {
expectedLoc := aPatch.start2 + delta
expectedLoc := aPatch.Start2 + delta
text1 := dmp.DiffText1(aPatch.diffs)
var startLoc int
endLoc := -1
@ -268,7 +268,7 @@ func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []b
// No match found. :(
results[x] = false
// Subtract the delta for this failed patch from subsequent patches.
delta -= aPatch.length2 - aPatch.length1
delta -= aPatch.Length2 - aPatch.Length1
} else {
// Found a match. :)
results[x] = true
@ -329,26 +329,26 @@ func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string {
// Bump all the patches forward.
for i := range patches {
patches[i].start1 += paddingLength
patches[i].start2 += paddingLength
patches[i].Start1 += paddingLength
patches[i].Start2 += paddingLength
}
// Add some padding on start of first diff.
if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual {
// Add nullPadding equality.
patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...)
patches[0].start1 -= paddingLength // Should be 0.
patches[0].start2 -= paddingLength // Should be 0.
patches[0].length1 += paddingLength
patches[0].length2 += paddingLength
patches[0].Start1 -= paddingLength // Should be 0.
patches[0].Start2 -= paddingLength // Should be 0.
patches[0].Length1 += paddingLength
patches[0].Length2 += paddingLength
} else if paddingLength > len(patches[0].diffs[0].Text) {
// Grow first equality.
extraLength := paddingLength - len(patches[0].diffs[0].Text)
patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text
patches[0].start1 -= extraLength
patches[0].start2 -= extraLength
patches[0].length1 += extraLength
patches[0].length2 += extraLength
patches[0].Start1 -= extraLength
patches[0].Start2 -= extraLength
patches[0].Length1 += extraLength
patches[0].Length2 += extraLength
}
// Add some padding on end of last diff.
@ -356,15 +356,15 @@ func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string {
if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual {
// Add nullPadding equality.
patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding})
patches[last].length1 += paddingLength
patches[last].length2 += paddingLength
patches[last].Length1 += paddingLength
patches[last].Length2 += paddingLength
} else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) {
// Grow last equality.
lastDiff := patches[last].diffs[len(patches[last].diffs)-1]
extraLength := paddingLength - len(lastDiff.Text)
patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength]
patches[last].length1 += extraLength
patches[last].length2 += extraLength
patches[last].Length1 += extraLength
patches[last].Length2 += extraLength
}
return nullPadding
@ -375,7 +375,7 @@ func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string {
func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch {
patchSize := dmp.MatchMaxBits
for x := 0; x < len(patches); x++ {
if patches[x].length1 <= patchSize {
if patches[x].Length1 <= patchSize {
continue
}
bigpatch := patches[x]
@ -383,46 +383,46 @@ func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch {
patches = append(patches[:x], patches[x+1:]...)
x--
start1 := bigpatch.start1
start2 := bigpatch.start2
Start1 := bigpatch.Start1
Start2 := bigpatch.Start2
precontext := ""
for len(bigpatch.diffs) != 0 {
// Create one of several smaller patches.
patch := Patch{}
empty := true
patch.start1 = start1 - len(precontext)
patch.start2 = start2 - len(precontext)
patch.Start1 = Start1 - len(precontext)
patch.Start2 = Start2 - len(precontext)
if len(precontext) != 0 {
patch.length1 = len(precontext)
patch.length2 = len(precontext)
patch.Length1 = len(precontext)
patch.Length2 = len(precontext)
patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext})
}
for len(bigpatch.diffs) != 0 && patch.length1 < patchSize-dmp.PatchMargin {
for len(bigpatch.diffs) != 0 && patch.Length1 < patchSize-dmp.PatchMargin {
diffType := bigpatch.diffs[0].Type
diffText := bigpatch.diffs[0].Text
if diffType == DiffInsert {
// Insertions are harmless.
patch.length2 += len(diffText)
start2 += len(diffText)
patch.Length2 += len(diffText)
Start2 += len(diffText)
patch.diffs = append(patch.diffs, bigpatch.diffs[0])
bigpatch.diffs = bigpatch.diffs[1:]
empty = false
} else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize {
// This is a large deletion. Let it pass in one chunk.
patch.length1 += len(diffText)
start1 += len(diffText)
patch.Length1 += len(diffText)
Start1 += len(diffText)
empty = false
patch.diffs = append(patch.diffs, Diff{diffType, diffText})
bigpatch.diffs = bigpatch.diffs[1:]
} else {
// Deletion or equality. Only take as much as we can stomach.
diffText = diffText[:min(len(diffText), patchSize-patch.length1-dmp.PatchMargin)]
diffText = diffText[:min(len(diffText), patchSize-patch.Length1-dmp.PatchMargin)]
patch.length1 += len(diffText)
start1 += len(diffText)
patch.Length1 += len(diffText)
Start1 += len(diffText)
if diffType == DiffEqual {
patch.length2 += len(diffText)
start2 += len(diffText)
patch.Length2 += len(diffText)
Start2 += len(diffText)
} else {
empty = false
}
@ -448,8 +448,8 @@ func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch {
}
if len(postcontext) != 0 {
patch.length1 += len(postcontext)
patch.length2 += len(postcontext)
patch.Length1 += len(postcontext)
patch.Length2 += len(postcontext)
if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual {
patch.diffs[len(patch.diffs)-1].Text += postcontext
} else {
@ -496,27 +496,27 @@ func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) {
patch = Patch{}
m := patchHeader.FindStringSubmatch(text[textPointer])
patch.start1, _ = strconv.Atoi(m[1])
patch.Start1, _ = strconv.Atoi(m[1])
if len(m[2]) == 0 {
patch.start1--
patch.length1 = 1
patch.Start1--
patch.Length1 = 1
} else if m[2] == "0" {
patch.length1 = 0
patch.Length1 = 0
} else {
patch.start1--
patch.length1, _ = strconv.Atoi(m[2])
patch.Start1--
patch.Length1, _ = strconv.Atoi(m[2])
}
patch.start2, _ = strconv.Atoi(m[3])
patch.Start2, _ = strconv.Atoi(m[3])
if len(m[4]) == 0 {
patch.start2--
patch.length2 = 1
patch.Start2--
patch.Length2 = 1
} else if m[4] == "0" {
patch.length2 = 0
patch.Length2 = 0
} else {
patch.start2--
patch.length2, _ = strconv.Atoi(m[4])
patch.Start2--
patch.Length2, _ = strconv.Atoi(m[4])
}
textPointer++

View File

@ -26,10 +26,10 @@ func TestPatchString(t *testing.T) {
for i, tc := range []TestCase{
{
Patch: Patch{
start1: 20,
start2: 21,
length1: 18,
length2: 17,
Start1: 20,
Start2: 21,
Length1: 18,
Length2: 17,
diffs: []Diff{
{DiffEqual, "jump"},

View File

@ -69,7 +69,7 @@ var query struct {
}
```
And call `client.Query`, passing a pointer to it:
Then call `client.Query`, passing a pointer to it:
```Go
err := client.Query(context.Background(), &query, nil)
@ -107,7 +107,7 @@ var q struct {
}
```
And call `client.Query`:
Then call `client.Query`:
```Go
err := client.Query(context.Background(), &q, nil)
@ -151,6 +151,80 @@ if err != nil {
}
```
### Inline Fragments
Some GraphQL queries contain inline fragments. You can use the `graphql` struct field tag to express them.
For example, to make the following GraphQL query:
```GraphQL
{
hero(episode: "JEDI") {
name
... on Droid {
primaryFunction
}
... on Human {
height
}
}
}
```
You can define this variable:
```Go
var q struct {
Hero struct {
Name graphql.String
Droid struct {
PrimaryFunction graphql.String
} `graphql:"... on Droid"`
Human struct {
Height graphql.Float
} `graphql:"... on Human"`
} `graphql:"hero(episode: \"JEDI\")"`
}
```
Alternatively, you can define the struct types corresponding to inline fragments, and use them as embedded fields in your query:
```Go
type (
DroidFragment struct {
PrimaryFunction graphql.String
}
HumanFragment struct {
Height graphql.Float
}
)
var q struct {
Hero struct {
Name graphql.String
DroidFragment `graphql:"... on Droid"`
HumanFragment `graphql:"... on Human"`
} `graphql:"hero(episode: \"JEDI\")"`
}
```
Then call `client.Query`:
```Go
err := client.Query(context.Background(), &q, nil)
if err != nil {
// Handle error.
}
fmt.Println(q.Hero.Name)
fmt.Println(q.Hero.PrimaryFunction)
fmt.Println(q.Hero.Height)
// Output:
// R2-D2
// Astromech
// 0
```
### Mutations
Mutations often require information that you can only find out by performing a query first. Let's suppose you've already done that.
@ -191,7 +265,7 @@ variables := map[string]interface{}{
}
```
And call `client.Mutate`:
Then call `client.Mutate`:
```Go
err := client.Mutate(context.Background(), &m, variables)

10
vendor/github.com/ugorji/go/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,10 @@
language: go
sudo: false
go:
- 1.8.x
- 1.9.x
- tip
script:
- go test -tags "alltests" -run Suite -coverprofile coverage.txt github.com/ugorji/go/codec
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@ -1,3 +1,10 @@
[![Sourcegraph](https://sourcegraph.com/github.com/ugorji/go/-/badge.svg)](https://sourcegraph.com/github.com/ugorji/go/-/blob/codec)
[![Build Status](https://travis-ci.org/ugorji/go.svg?branch=master)](https://travis-ci.org/ugorji/go)
[![codecov](https://codecov.io/gh/ugorji/go/branch/master/graph/badge.svg)](https://codecov.io/gh/ugorji/go)
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/ugorji/go/codec)
[![rcard](https://goreportcard.com/badge/github.com/ugorji/go/codec)](https://goreportcard.com/report/github.com/ugorji/go/codec)
[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/ugorji/go/master/LICENSE)
# go/codec
This repository contains the `go-codec` library,

View File

@ -31,8 +31,13 @@ the standard library (ie json, xml, gob, etc).
Rich Feature Set includes:
- Simple but extremely powerful and feature-rich API
- Support for go1.4 and above, while selectively using newer APIs for later releases
- Good code coverage ( > 70% )
- Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
- Careful selected use of 'unsafe' for targeted performance gains.
100% mode exists where 'unsafe' is not used at all.
- Lock-free (sans mutex) concurrency for scaling to 100's of cores
- Multiple conversions:
Package coerces types where appropriate
e.g. decode an int in the stream into a float, etc.
@ -88,6 +93,27 @@ encoded as an empty map because it has no exported fields, while UUID
would be encoded as a string. However, with extension support, you can
encode any of these however you like.
Custom Encoding and Decoding
This package maintains symmetry in the encoding and decoding halfs.
We determine how to encode or decode by walking this decision tree
- is type a codec.Selfer?
- is there an extension registered for the type?
- is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
- is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
- is format text-based, and type an encoding.TextMarshaler?
- else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
This symmetry is important to reduce chances of issues happening because the
encoding and decoding sides are out of sync e.g. decoded via very specific
encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
Consequently, if a type only defines one-half of the symetry
(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
then that type doesn't satisfy the check and we will continue walking down the
decision tree.
RPC
RPC Client and Server Codecs are implemented, so the codecs can be used
@ -156,40 +182,25 @@ Sample usage model:
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
client := rpc.NewClientWithCodec(rpcCodec)
Running Tests
To run tests, use the following:
go test
To run the full suite of tests, use the following:
go test -tags alltests -run Suite
You can run the tag 'safe' to run tests or build in safe mode. e.g.
go test -tags safe -run Json
go test -tags "alltests safe" -run Suite
Running Benchmarks
Please see http://github.com/ugorji/go-codec-bench .
*/
package codec
// Benefits of go-codec:
//
// - encoding/json always reads whole file into memory first.
// This makes it unsuitable for parsing very large files.
// - encoding/xml cannot parse into a map[string]interface{}
// I found this out on reading https://github.com/clbanning/mxj
// TODO:
//
// - optimization for codecgen:
// if len of entity is <= 3 words, then support a value receiver for encode.
// - (En|De)coder should store an error when it occurs.
// Until reset, subsequent calls return that error that was stored.
// This means that free panics must go away.
// All errors must be raised through errorf method.
// - Decoding using a chan is good, but incurs concurrency costs.
// This is because there's no fast way to use a channel without it
// having to switch goroutines constantly.
// Callback pattern is still the best. Maybe consider supporting something like:
// type X struct {
// Name string
// Ys []Y
// Ys chan <- Y
// Ys func(Y) -> call this function for each entry
// }
// - Consider adding a isZeroer interface { isZero() bool }
// It is used within isEmpty, for omitEmpty support.
// - Consider making Handle used AS-IS within the encoding/decoding session.
// This means that we don't cache Handle information within the (En|De)coder,
// except we really need it at Reset(...)
// - Consider adding math/big support
// - Consider reducing the size of the generated functions:
// Maybe use one loop, and put the conditionals in the loop.
// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } }

View File

@ -91,6 +91,27 @@ encoded as an empty map because it has no exported fields, while UUID
would be encoded as a string. However, with extension support, you can
encode any of these however you like.
## Custom Encoding and Decoding
This package maintains symmetry in the encoding and decoding halfs.
We determine how to encode or decode by walking this decision tree
- is type a codec.Selfer?
- is there an extension registered for the type?
- is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
- is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
- is format text-based, and type an encoding.TextMarshaler?
- else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
This symmetry is important to reduce chances of issues happening because the
encoding and decoding sides are out of sync e.g. decoded via very specific
encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
Consequently, if a type only defines one-half of the symetry
(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
then that type doesn't satisfy the check and we will continue walking down the
decision tree.
## RPC
RPC Client and Server Codecs are implemented, so the codecs can be used

View File

@ -61,7 +61,8 @@ type bincEncDriver struct {
m map[string]uint16 // symbols
b [scratchByteArrayLen]byte
s uint16 // symbols sequencer
encNoSeparator
// encNoSeparator
encDriverNoopContainerWriter
}
func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool {
@ -195,11 +196,11 @@ func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
e.w.writen1(xtag)
}
func (e *bincEncDriver) EncodeArrayStart(length int) {
func (e *bincEncDriver) WriteArrayStart(length int) {
e.encLen(bincVdArray<<4, uint64(length))
}
func (e *bincEncDriver) EncodeMapStart(length int) {
func (e *bincEncDriver) WriteMapStart(length int) {
e.encLen(bincVdMap<<4, uint64(length))
}
@ -332,13 +333,14 @@ type bincDecDriver struct {
bd byte
vd byte
vs byte
noStreamingCodec
decNoSeparator
// noStreamingCodec
// decNoSeparator
b [scratchByteArrayLen]byte
// linear searching on this slice is ok,
// because we typically expect < 32 symbols in each stream.
s []bincDecSymbol
decDriverNoopContainerReader
}
func (d *bincDecDriver) readNextBd() {
@ -512,7 +514,7 @@ func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) {
i = -i
}
if chkOvf.Int(i, bitsize) {
d.d.errorf("binc: overflow integer: %v", i)
d.d.errorf("binc: overflow integer: %v for num bits: %v", i, bitsize)
return
}
d.bdRead = false
@ -804,7 +806,7 @@ func (d *bincDecDriver) DecodeNaked() {
d.readNextBd()
}
n := &d.d.n
n := d.d.n
var decodeFurther bool
switch d.vd {
@ -909,6 +911,7 @@ func (d *bincDecDriver) DecodeNaked() {
type BincHandle struct {
BasicHandle
binaryEncodingType
noElemSeparators
}
func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
@ -923,6 +926,10 @@ func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes}
}
func (_ *BincHandle) IsBuiltinType(rt uintptr) bool {
return rt == timeTypId
}
func (e *bincEncDriver) reset() {
e.w = e.e.w
e.s = 0

View File

@ -61,7 +61,8 @@ const (
type cborEncDriver struct {
noBuiltInTypes
encNoSeparator
encDriverNoopContainerWriter
// encNoSeparator
e *Encoder
w encWriter
h *CborHandle
@ -134,39 +135,89 @@ func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Enco
func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
e.encUint(uint64(re.Tag), cborBaseTag)
if re.Data != nil {
if false && re.Data != nil {
en.encode(re.Data)
} else if re.Value == nil {
e.EncodeNil()
} else {
} else if re.Value != nil {
en.encode(re.Value)
} else {
e.EncodeNil()
}
}
func (e *cborEncDriver) EncodeArrayStart(length int) {
e.encLen(cborBaseArray, length)
func (e *cborEncDriver) WriteArrayStart(length int) {
if e.h.IndefiniteLength {
e.w.writen1(cborBdIndefiniteArray)
} else {
e.encLen(cborBaseArray, length)
}
}
func (e *cborEncDriver) EncodeMapStart(length int) {
e.encLen(cborBaseMap, length)
func (e *cborEncDriver) WriteMapStart(length int) {
if e.h.IndefiniteLength {
e.w.writen1(cborBdIndefiniteMap)
} else {
e.encLen(cborBaseMap, length)
}
}
func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
e.encLen(cborBaseString, len(v))
e.w.writestr(v)
func (e *cborEncDriver) WriteMapEnd() {
if e.h.IndefiniteLength {
e.w.writen1(cborBdBreak)
}
}
func (e *cborEncDriver) WriteArrayEnd() {
if e.h.IndefiniteLength {
e.w.writen1(cborBdBreak)
}
}
func (e *cborEncDriver) EncodeSymbol(v string) {
e.EncodeString(c_UTF8, v)
e.encStringBytesS(cborBaseString, v)
}
func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
e.encStringBytesS(cborBaseString, v)
}
func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
if c == c_RAW {
e.encLen(cborBaseBytes, len(v))
e.encStringBytesS(cborBaseBytes, stringView(v))
} else {
e.encLen(cborBaseString, len(v))
e.encStringBytesS(cborBaseString, stringView(v))
}
}
func (e *cborEncDriver) encStringBytesS(bb byte, v string) {
if e.h.IndefiniteLength {
if bb == cborBaseBytes {
e.w.writen1(cborBdIndefiniteBytes)
} else {
e.w.writen1(cborBdIndefiniteString)
}
blen := len(v) / 4
if blen == 0 {
blen = 64
} else if blen > 1024 {
blen = 1024
}
for i := 0; i < len(v); {
var v2 string
i2 := i + blen
if i2 < len(v) {
v2 = v[i:i2]
} else {
v2 = v[i:]
}
e.encLen(bb, len(v2))
e.w.writestr(v2)
i = i2
}
e.w.writen1(cborBdBreak)
} else {
e.encLen(bb, len(v))
e.w.writestr(v)
}
e.w.writeb(v)
}
// ----------------------
@ -180,7 +231,8 @@ type cborDecDriver struct {
bdRead bool
bd byte
noBuiltInTypes
decNoSeparator
// decNoSeparator
decDriverNoopContainerReader
}
func (d *cborDecDriver) readNextBd() {
@ -416,8 +468,9 @@ func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
return nil
}
if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
d.bdRead = false
if bs == nil {
return d.decAppendIndefiniteBytes(nil)
return d.decAppendIndefiniteBytes(zeroByteSlice)
}
return d.decAppendIndefiniteBytes(bs[:0])
}
@ -469,7 +522,7 @@ func (d *cborDecDriver) DecodeNaked() {
d.readNextBd()
}
n := &d.d.n
n := d.d.n
var decodeFurther bool
switch d.bd {
@ -577,7 +630,11 @@ func (d *cborDecDriver) DecodeNaked() {
//
type CborHandle struct {
binaryEncodingType
noElemSeparators
BasicHandle
// IndefiniteLength=true, means that we encode using indefinitelength
IndefiniteLength bool
}
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {

View File

@ -97,7 +97,7 @@ type testCborGolden struct {
}
// Some tests are skipped because they include numbers outside the range of int64/uint64
func doTestCborGoldens(t *testing.T) {
func TestCborGoldens(t *testing.T) {
oldMapType := testCborH.MapType
defer func() {
testCborH.MapType = oldMapType
@ -200,6 +200,31 @@ func testCborError(t *testing.T, i int, v0, v1 interface{}, err error, equal *bo
// fmt.Printf("%v testCborError passed (checks passed)\n", i)
}
func TestCborGoldens(t *testing.T) {
doTestCborGoldens(t)
func TestCborHalfFloat(t *testing.T) {
m := map[uint16]float64{
// using examples from
// https://en.wikipedia.org/wiki/Half-precision_floating-point_format
0x3c00: 1,
0x3c01: 1 + math.Pow(2, -10),
0xc000: -2,
0x7bff: 65504,
0x0400: math.Pow(2, -14),
0x03ff: math.Pow(2, -14) - math.Pow(2, -24),
0x0001: math.Pow(2, -24),
0x0000: 0,
0x8000: -0.0,
}
var ba [3]byte
ba[0] = cborBdFloat16
var res float64
for k, v := range m {
res = 0
bigen.PutUint16(ba[1:], k)
testUnmarshalErr(&res, ba[:3], testCborH, t, "-")
if res == v {
logT(t, "equal floats: from %x %b, %v", k, k, v)
} else {
failT(t, "unequal floats: from %x %b, %v != %v", k, k, res, v)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -62,7 +62,7 @@ func CodecGenTempWrite{{ .RandString }}() {
var t{{ $index }} {{ . }}
typs = append(typs, reflect.TypeOf(t{{ $index }}))
{{ end }}
{{ if not .CodecPkgFiles }}{{ .CodecPkgName }}.{{ end }}Gen(&out, "{{ .BuildTag }}", "{{ .PackageName }}", "{{ .RandString }}", {{ if not .CodecPkgFiles }}{{ .CodecPkgName }}.{{ end }}NewTypeInfos(strings.Split("{{ .StructTags }}", ",")), typs...)
{{ if not .CodecPkgFiles }}{{ .CodecPkgName }}.{{ end }}Gen(&out, "{{ .BuildTag }}", "{{ .PackageName }}", "{{ .RandString }}", {{ .NoExtensions }}, {{ if not .CodecPkgFiles }}{{ .CodecPkgName }}.{{ end }}NewTypeInfos(strings.Split("{{ .StructTags }}", ",")), typs...)
bout, err := format.Source(out.Bytes())
if err != nil {
fout.Write(out.Bytes())
@ -82,8 +82,12 @@ func CodecGenTempWrite{{ .RandString }}() {
// Tool then executes: "go run __frun__" which creates fout.
// fout contains Codec(En|De)codeSelf implementations for every type T.
//
func Generate(outfile, buildTag, codecPkgPath string, uid int64, goRunTag string,
st string, regexName *regexp.Regexp, notRegexName *regexp.Regexp, deleteTempFile bool, infiles ...string) (err error) {
func Generate(outfile, buildTag, codecPkgPath string,
uid int64,
goRunTag string, st string,
regexName, notRegexName *regexp.Regexp,
deleteTempFile, noExtensions bool,
infiles ...string) (err error) {
// For each file, grab AST, find each type, and write a call to it.
if len(infiles) == 0 {
return
@ -121,6 +125,7 @@ func Generate(outfile, buildTag, codecPkgPath string, uid int64, goRunTag string
StructTags string
Types []string
CodecPkgFiles bool
NoExtensions bool
}
tv := tmplT{
CodecPkgName: genCodecPkg,
@ -129,6 +134,7 @@ func Generate(outfile, buildTag, codecPkgPath string, uid int64, goRunTag string
BuildTag: buildTag,
RandString: strconv.FormatInt(uid, 10),
StructTags: st,
NoExtensions: noExtensions,
}
tv.ImportPath = pkg.ImportPath
if tv.ImportPath == tv.CodecImportPath {
@ -313,9 +319,12 @@ func main() {
x := flag.Bool("x", false, "keep temp file")
_ = flag.Bool("u", false, "*IGNORED - kept for backwards compatibility*: Allow unsafe use")
d := flag.Int64("d", 0, "random identifier for use in generated code")
nx := flag.Bool("nx", false, "no extensions")
flag.Parse()
if err := Generate(*o, *t, *c, *d, *rt, *st,
regexp.MustCompile(*r), regexp.MustCompile(*nr), !*x, flag.Args()...); err != nil {
regexp.MustCompile(*r), regexp.MustCompile(*nr), !*x, *nx,
flag.Args()...); err != nil {
fmt.Fprintf(os.Stderr, "codecgen error: %v\n", err)
os.Exit(1)
}

View File

@ -1,24 +0,0 @@
// +build x,codecgen
package codec
import (
"fmt"
"testing"
)
func _TestCodecgenJson1(t *testing.T) {
// This is just a simplistic test for codecgen.
// It is typically disabled. We only enable it for debugging purposes.
const callCodecgenDirect bool = true
v := newTestStruc(2, false, !testSkipIntf, false)
var bs []byte
e := NewEncoderBytes(&bs, testJsonH)
if callCodecgenDirect {
v.CodecEncodeSelf(e)
e.w.atEndOfEncode()
} else {
e.MustEncode(v)
}
fmt.Printf("%s\n", bs)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -40,9 +40,6 @@ import (
const fastpathEnabled = true
const fastpathCheckNilFalse = false // for reflect
const fastpathCheckNilTrue = true // for type switch
type fastpathT struct {}
var fastpathTV fastpathT
@ -50,8 +47,8 @@ var fastpathTV fastpathT
type fastpathE struct {
rtid uintptr
rt reflect.Type
encfn func(*encFnInfo, reflect.Value)
decfn func(*decFnInfo, reflect.Value)
encfn func(*Encoder, *codecFnInfo, reflect.Value)
decfn func(*Decoder, *codecFnInfo, reflect.Value)
}
type fastpathA [{{ .FastpathLen }}]fastpathE
@ -83,20 +80,29 @@ var fastpathAV fastpathA
// due to possible initialization loop error, make fastpath in an init()
func init() {
if useLookupRecognizedTypes && recognizedRtidsLoaded {
panic("recognizedRtidsLoaded = true - cannot happen")
}
i := 0
fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
fn := func(v interface{},
fe func(*Encoder, *codecFnInfo, reflect.Value),
fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) {
xrt := reflect.TypeOf(v)
xptr := rt2id(xrt)
if useLookupRecognizedTypes {
recognizedRtids = append(recognizedRtids, xptr)
recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(xrt)))
}
fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
i++
return
}
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
fn([]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
sort.Sort(fastpathAslice(fastpathAV[:]))
}
@ -109,10 +115,10 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}:{{else}}
case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if not .MapKey }}
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e){{if not .MapKey }}
case *[]{{ .Elem }}:{{else}}
case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
{{end}}{{end}}
default:
_ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
@ -121,13 +127,14 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
return true
}
{{/* **** removing this block, as they are never called directly ****
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e)
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
case *[]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
{{end}}{{end}}{{end}}
default:
_ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
@ -140,9 +147,9 @@ func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
case map[{{ .MapKey }}]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e)
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
case *map[{{ .MapKey }}]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
{{end}}{{end}}{{end}}
default:
_ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
@ -150,72 +157,60 @@ func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
}
return true
}
*/}}
// -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
if f.ti.mbs {
fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), fastpathCheckNilFalse, f.e)
fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), e)
} else {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).([]{{ .Elem }}), fastpathCheckNilFalse, f.e)
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).([]{{ .Elem }}), e)
}
}
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) {
ee := e.e
cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) {
if v == nil { e.e.EncodeNil(); return }
ee, esep := e.e, e.hh.hasElemSeparators()
ee.WriteArrayStart(len(v))
for _, v2 := range v {
if cr != nil { cr.sendContainerState(containerArrayElem) }
if esep { ee.WriteArrayElem() }
{{ encmd .Elem "v2"}}
}
if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}}
ee.WriteArrayEnd()
}
func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) {
ee := e.e
cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) {
ee, esep := e.e, e.hh.hasElemSeparators()
if len(v)%2 == 1 {
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
return
}
ee.EncodeMapStart(len(v) / 2)
ee.WriteMapStart(len(v) / 2)
for j, v2 := range v {
if cr != nil {
if esep {
if j%2 == 0 {
cr.sendContainerState(containerMapKey)
ee.WriteMapElemKey()
} else {
cr.sendContainerState(containerMapValue)
ee.WriteMapElemValue()
}
}
{{ encmd .Elem "v2"}}
}
if cr != nil { cr.sendContainerState(containerMapEnd) }
ee.WriteMapEnd()
}
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}
func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e)
func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e)
}
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) {
ee := e.e
cr := e.cr
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeMapStart(len(v))
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) {
if v == nil { e.e.EncodeNil(); return }
ee, esep := e.e, e.hh.hasElemSeparators()
ee.WriteMapStart(len(v))
{{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0
{{end}}if e.h.Canonical {
{{if eq .MapKey "interface{}"}}{{/* out of band
@ -234,9 +229,9 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele
}
sort.Sort(bytesISlice(v2))
for j := range v2 {
if cr != nil { cr.sendContainerState(containerMapKey) }
if esep { ee.WriteMapElemKey() }
e.asis(v2[j].v)
if cr != nil { cr.sendContainerState(containerMapValue) }
if esep { ee.WriteMapElemValue() }
e.encode(v[v2[j].i])
} {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
var i int
@ -246,28 +241,28 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele
}
sort.Sort({{ sorttype .MapKey false}}(v2))
for _, k2 := range v2 {
if cr != nil { cr.sendContainerState(containerMapKey) }
if esep { ee.WriteMapElemKey() }
{{if eq .MapKey "string"}}if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}}
if cr != nil { cr.sendContainerState(containerMapValue) }
if esep { ee.WriteMapElemValue() }
{{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }}
} {{end}}
} else {
for k2, v2 := range v {
if cr != nil { cr.sendContainerState(containerMapKey) }
if esep { ee.WriteMapElemKey() }
{{if eq .MapKey "string"}}if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}{{else}}{{ encmd .MapKey "k2"}}{{end}}
if cr != nil { cr.sendContainerState(containerMapValue) }
if esep { ee.WriteMapElemValue() }
{{ encmd .Elem "v2"}}
}
}
if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}}
ee.WriteMapEnd()
}
{{end}}{{end}}{{end}}
@ -280,11 +275,10 @@ func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case []{{ .Elem }}:{{else}}
case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if not .MapKey }}
case *[]{{ .Elem }}:{{else}}
case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d)
if changed2 {
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d){{if not .MapKey }}
case *[]{{ .Elem }}: {{else}}
case *map[{{ .MapKey }}]{{ .Elem }}: {{end}}
if v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d); changed2 {
*v = v2
}
{{end}}{{end}}
@ -295,6 +289,20 @@ func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
return true
}
func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool {
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
case *[]{{ .Elem }}: {{else}}
case *map[{{ .MapKey }}]{{ .Elem }}: {{end}}
*v = nil
{{end}}{{end}}
default:
_ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release)
return false
}
return true
}
// -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if not .MapKey }}
{{/*
@ -303,33 +311,24 @@ Slices can change if they
- are addressable (from a ptr)
- are settable (e.g. contained in an interface{})
*/}}
func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
array := f.seq == seqTypeArray
if !array && rv.CanAddr() { {{/* // CanSet => CanAddr + Exported */}}
vp := rv2i(rv.Addr()).(*[]{{ .Elem }})
v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d)
if changed {
*vp = v
func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr {
var vp = rv2i(rv).(*[]{{ .Elem }})
if v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, !array, d); changed {
*vp = v
}
} else {
v := rv2i(rv).([]{{ .Elem }})
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d)
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).([]{{ .Elem }}), !array, d)
}
}
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil bool, d *Decoder) {
v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d)
if changed {
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) {
if v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed {
*vp = v
}
}
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) {
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) {
dd := d.d
{{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}}
if checkNil && dd.TryDecodeAsNil() {
if v != nil { changed = true }
return nil, changed
}
slh, containerLenS := d.decSliceHelperStart()
if containerLenS == 0 {
if canChange {
@ -411,33 +410,25 @@ Maps can change if they are
- addressable (from a ptr)
- settable (e.g. contained in an interface{})
*/}}
func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
if rv.CanAddr() {
vp := rv2i(rv.Addr()).(*map[{{ .MapKey }}]{{ .Elem }})
v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d)
if changed {
func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
if rv.Kind() == reflect.Ptr {
vp := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})
if v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed {
*vp = v
}
} else {
v := rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }})
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d)
return
}
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), false, d)
}
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, d *Decoder) {
v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d)
if changed {
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) {
if v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed {
*vp = v
}
}
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool,
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, canChange bool,
d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) {
dd := d.d
cr := d.cr
dd, esep := d.d, d.hh.hasElemSeparators()
{{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}}
if checkNil && dd.TryDecodeAsNil() {
if v != nil { changed = true }
return nil, changed
}
containerLen := dd.ReadMapStart()
if canChange && v == nil {
xlen := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }})
@ -445,7 +436,7 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Ele
changed = true
}
if containerLen == 0 {
if cr != nil { cr.sendContainerState(containerMapEnd) }
dd.ReadMapEnd()
return v, changed
}
{{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}}
@ -453,20 +444,24 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Ele
var mv {{ .Elem }}
hasLen := containerLen > 0
for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ {
if cr != nil { cr.sendContainerState(containerMapKey) }
if esep { dd.ReadMapElemKey() }
{{ if eq .MapKey "interface{}" }}mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
}{{ else }}mk = {{ decmd .MapKey }}{{ end }}
if cr != nil { cr.sendContainerState(containerMapValue) }
if esep { dd.ReadMapElemValue() }
if dd.TryDecodeAsNil() {
if d.h.DeleteOnNilMapValue { delete(v, mk) } else { v[mk] = {{ zerocmd .Elem }} }
continue
}
{{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil }
d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }}
if v != nil {
v[mk] = mv
}
}
if cr != nil { cr.sendContainerState(containerMapEnd) }
dd.ReadMapEnd()
return v, changed
}

View File

@ -14,17 +14,18 @@ const fastpathEnabled = false
// This tag disables fastpath during build, allowing for faster build, test execution,
// short-program runs, etc.
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { return false }
type fastpathT struct{}
type fastpathE struct {
rtid uintptr
rt reflect.Type
encfn func(*encFnInfo, reflect.Value)
decfn func(*decFnInfo, reflect.Value)
encfn func(*Encoder, *codecFnInfo, reflect.Value)
decfn func(*Decoder, *codecFnInfo, reflect.Value)
}
type fastpathA [0]fastpathE
@ -32,3 +33,6 @@ func (x fastpathA) index(rtid uintptr) int { return -1 }
var fastpathAV fastpathA
var fastpathTV fastpathT
// ----
type TestMammoth2Wrapper struct{} // to allow testMammoth work in notfastpath mode

View File

@ -15,7 +15,7 @@ if {{var "l"}} == 0 {
} {{end}}
} else {
{{var "hl"}} := {{var "l"}} > 0
var {{var "rl"}} int
var {{var "rl"}} int; _ = {{var "rl"}}
{{if isSlice }} if {{var "hl"}} {
if {{var "l"}} > cap({{var "v"}}) {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
@ -30,9 +30,10 @@ if {{var "l"}} == 0 {
{{var "c"}} = true
}
} {{end}}
{{var "j"}} := 0
var {{var "j"}} int
// var {{var "dn"}} bool
for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
if {{var "j"}} == 0 && len({{var "v"}}) == 0 {
{{if not isArray}} if {{var "j"}} == 0 && len({{var "v"}}) == 0 {
if {{var "hl"}} {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
} else {
@ -40,20 +41,26 @@ if {{var "l"}} == 0 {
}
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
{{var "c"}} = true
}
}{{end}}
{{var "h"}}.ElemContainerState({{var "j"}})
// {{var "dn"}} = r.TryDecodeAsNil()
{{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }}
{{ decLineVar $x }}
{{var "v"}} <- {{ $x }}
{{else}}
// if indefinite, etc, then expand the slice if necessary
var {{var "db"}} bool
if {{var "j"}} >= len({{var "v"}}) {
{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}); {{var "c"}} = true
{{end}} {{if isArray}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
{{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
{{end}}
}
{{var "h"}}.ElemContainerState({{var "j"}})
if {{var "db"}} {
z.DecSwallow()
} else {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
}
{{end}}
}
{{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "j"}}]

View File

@ -8,7 +8,7 @@ if {{var "v"}} == nil {
}
var {{var "mk"}} {{ .KTyp }}
var {{var "mv"}} {{ .Typ }}
var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
if {{var "bh"}}.MapValueReset {
{{if decElemKindPtr}}{{var "mg"}} = true
{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
@ -17,7 +17,7 @@ if {{var "bh"}}.MapValueReset {
if {{var "l"}} != 0 {
{{var "hl"}} := {{var "l"}} > 0
for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}}
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
@ -29,11 +29,14 @@ if {{var "l"}} != 0 {
{{var "ms"}} = false
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}}
{{var "mdn"}} = false
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }}
if {{var "mdn"}} {
if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} }
} else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} // else len==0: TODO: Should we clear map entries?
z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}}

View File

@ -16,7 +16,7 @@ import (
)
// GenVersion is the current version of codecgen.
const GenVersion = 6
const GenVersion = 8
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
@ -61,13 +61,14 @@ func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding()
return f.e.cf.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// println(">>>>>>>>> EncFallback")
f.e.encodeI(iv, false, false)
// f.e.encodeI(iv, false, false)
f.e.encodeValue(reflect.ValueOf(iv), nil, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
@ -90,7 +91,7 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) {
f.e.raw(iv)
f.e.rawBytes(iv)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
@ -103,7 +104,7 @@ func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) IsJSONHandle() bool {
return f.e.js
return f.e.cf.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
@ -125,13 +126,6 @@ func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
return false
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncSendContainerState(c containerState) {
if f.e.cr != nil {
f.e.cr.sendContainerState(c)
}
}
// ---------------- DECODER FOLLOWS -----------------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
@ -157,7 +151,12 @@ func (f genHelperDecoder) DecScratchBuffer() []byte {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
// println(">>>>>>>>> DecFallback")
f.d.decodeI(iv, chkPtr, false, false, false)
rv := reflect.ValueOf(iv)
if chkPtr {
rv = f.d.ensureDecodeable(rv)
}
f.d.decodeValue(rv, nil, false, false)
// f.d.decodeValueFallback(rv)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
@ -203,7 +202,7 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte {
return f.d.raw()
return f.d.rawBytes()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
@ -244,10 +243,3 @@ func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
func (f genHelperDecoder) StringView(v []byte) string {
return stringView(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSendContainerState(c containerState) {
if f.d.cr != nil {
f.d.cr.sendContainerState(c)
}
}

View File

@ -61,12 +61,13 @@ func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding()
return f.e.cf.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// println(">>>>>>>>> EncFallback")
f.e.encodeI(iv, false, false)
// f.e.encodeI(iv, false, false)
f.e.encodeValue(reflect.ValueOf(iv), nil, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
@ -85,7 +86,7 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) {
f.e.raw(iv)
f.e.rawBytes(iv)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
@ -96,7 +97,7 @@ func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) IsJSONHandle() bool {
return f.e.js
return f.e.cf.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) HasExtensions() bool {
@ -115,12 +116,6 @@ func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
}
return false
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncSendContainerState(c containerState) {
if f.e.cr != nil {
f.e.cr.sendContainerState(c)
}
}
// ---------------- DECODER FOLLOWS -----------------
@ -143,7 +138,12 @@ func (f genHelperDecoder) DecScratchBuffer() []byte {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
// println(">>>>>>>>> DecFallback")
f.d.decodeI(iv, chkPtr, false, false, false)
rv := reflect.ValueOf(iv)
if chkPtr {
rv = f.d.ensureDecodeable(rv)
}
f.d.decodeValue(rv, nil, false, false)
// f.d.decodeValueFallback(rv)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
@ -182,7 +182,7 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte {
return f.d.raw()
return f.d.rawBytes()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
@ -217,165 +217,4 @@ func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
func (f genHelperDecoder) StringView(v []byte) string {
return stringView(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSendContainerState(c containerState) {
if f.d.cr != nil {
f.d.cr.sendContainerState(c)
}
}
{{/*
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncDriver() encDriver {
return f.e.e
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecDriver() decDriver {
return f.d.d
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncNil() {
f.e.e.EncodeNil()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBytes(v []byte) {
f.e.e.EncodeStringBytes(c_RAW, v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncArrayStart(length int) {
f.e.e.EncodeArrayStart(length)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncArrayEnd() {
f.e.e.EncodeArrayEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncArrayEntrySeparator() {
f.e.e.EncodeArrayEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapStart(length int) {
f.e.e.EncodeMapStart(length)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapEnd() {
f.e.e.EncodeMapEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapEntrySeparator() {
f.e.e.EncodeMapEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapKVSeparator() {
f.e.e.EncodeMapKVSeparator()
}
// ---------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBytes(v *[]byte) {
*v = f.d.d.DecodeBytes(*v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTryNil() bool {
return f.d.d.TryDecodeAsNil()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerIsNil() (b bool) {
return f.d.d.IsContainerType(valueTypeNil)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerIsMap() (b bool) {
return f.d.d.IsContainerType(valueTypeMap)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerIsArray() (b bool) {
return f.d.d.IsContainerType(valueTypeArray)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecCheckBreak() bool {
return f.d.d.CheckBreak()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapStart() int {
return f.d.d.ReadMapStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayStart() int {
return f.d.d.ReadArrayStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapEnd() {
f.d.d.ReadMapEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayEnd() {
f.d.d.ReadArrayEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayEntrySeparator() {
f.d.d.ReadArrayEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapEntrySeparator() {
f.d.d.ReadMapEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapKVSeparator() {
f.d.d.ReadMapKVSeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte {
return f.d.d.DecodeStringAsBytes(bs)
}
// -- encode calls (primitives)
{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) {
ee := f.e.e
{{ encmd .Primitive "v" }}
}
{{ end }}{{ end }}{{ end }}
// -- decode calls (primitives)
{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) {
dd := f.d.d
*vp = {{ decmd .Primitive }}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) {
dd := f.d.d
v = {{ decmd .Primitive }}
return
}
{{ end }}{{ end }}{{ end }}
// -- encode calls (slices/maps)
{{range .Values}}{{if not .Primitive }}{{if .Slice }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}}
f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e)
}
{{ end }}{{ end }}
// -- decode calls (slices/maps)
{{range .Values}}{{if not .Primitive }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) {
{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}}
v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d)
if changed {
*vp = v
}
}
{{ end }}{{ end }}
*/}}

View File

@ -16,7 +16,7 @@ if {{var "v"}} == nil {
}
var {{var "mk"}} {{ .KTyp }}
var {{var "mv"}} {{ .Typ }}
var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
if {{var "bh"}}.MapValueReset {
{{if decElemKindPtr}}{{var "mg"}} = true
{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
@ -25,7 +25,7 @@ if {{var "bh"}}.MapValueReset {
if {{var "l"}} != 0 {
{{var "hl"}} := {{var "l"}} > 0
for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}}
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
@ -37,14 +37,17 @@ if {{var "l"}} != 0 {
{{var "ms"}} = false
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}}
{{var "mdn"}} = false
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }}
if {{var "mdn"}} {
if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} }
} else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} // else len==0: TODO: Should we clear map entries?
z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}}
`
const genDecListTmpl = `
@ -65,7 +68,7 @@ if {{var "l"}} == 0 {
} {{end}}
} else {
{{var "hl"}} := {{var "l"}} > 0
var {{var "rl"}} int
var {{var "rl"}} int; _ = {{var "rl"}}
{{if isSlice }} if {{var "hl"}} {
if {{var "l"}} > cap({{var "v"}}) {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
@ -80,9 +83,10 @@ if {{var "l"}} == 0 {
{{var "c"}} = true
}
} {{end}}
{{var "j"}} := 0
var {{var "j"}} int
// var {{var "dn"}} bool
for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
if {{var "j"}} == 0 && len({{var "v"}}) == 0 {
{{if not isArray}} if {{var "j"}} == 0 && len({{var "v"}}) == 0 {
if {{var "hl"}} {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
} else {
@ -90,20 +94,26 @@ if {{var "l"}} == 0 {
}
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
{{var "c"}} = true
}
}{{end}}
{{var "h"}}.ElemContainerState({{var "j"}})
// {{var "dn"}} = r.TryDecodeAsNil()
{{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }}
{{ decLineVar $x }}
{{var "v"}} <- {{ $x }}
{{else}}
// if indefinite, etc, then expand the slice if necessary
var {{var "db"}} bool
if {{var "j"}} >= len({{var "v"}}) {
{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}); {{var "c"}} = true
{{end}} {{if isArray}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
{{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
{{end}}
}
{{var "h"}}.ElemContainerState({{var "j"}})
if {{var "db"}} {
z.DecSwallow()
} else {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
}
{{end}}
}
{{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "j"}}]

View File

@ -101,7 +101,7 @@ import (
// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen)
// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections.
// v6: removed unsafe from gen, and now uses codecgen.exec tag
const genVersion = 6
const genVersion = 8
const (
genCodecPkg = "codec1978"
@ -163,13 +163,16 @@ type genRunner struct {
ti *TypeInfos
// rr *rand.Rand // random generator for file-specific types
nx bool // no extensions
}
// Gen will write a complete go file containing Selfer implementations for each
// type passed. All the types must be in the same package.
//
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func Gen(w io.Writer, buildTags, pkgName, uid string, ti *TypeInfos, typ ...reflect.Type) {
func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool,
ti *TypeInfos, typ ...reflect.Type) {
// All types passed to this method do not have a codec.Selfer method implemented directly.
// codecgen already checks the AST and skips any types that define the codec.Selfer methods.
// Consequently, there's no need to check and trim them if they implement codec.Selfer
@ -190,6 +193,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, ti *TypeInfos, typ ...refl
bp: genImportPath(typ[0]),
xs: uid,
ti: ti,
nx: noExtensions,
}
if x.ti == nil {
x.ti = defTypeInfos
@ -500,7 +504,7 @@ func (x *genRunner) selfer(encode bool) {
t0 := t
// always make decode use a pointer receiver,
// and structs always use a ptr receiver (encode|decode)
isptr := !encode || t.Kind() == reflect.Struct
isptr := !encode || (t.Kind() == reflect.Struct || t.Kind() == reflect.Array)
x.varsfxreset()
fnSigPfx := "func (x "
if isptr {
@ -713,7 +717,7 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname)
}
// only check for extensions if the type is named, and has a packagePath.
if genImportPath(t) != "" && t.Name() != "" {
if !x.nx && genImportPath(t) != "" && t.Name() != "" {
// first check if extensions are configued, before doing the interface conversion
x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname)
}
@ -773,7 +777,7 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))")
} else if fastpathAV.index(rtid) != -1 {
g := x.newGenV(t)
x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)")
x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)")
} else {
x.xtraSM(varname, true, t)
// x.encListFallback(varname, rtid, t)
@ -787,7 +791,7 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
// x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ")
if fastpathAV.index(rtid) != -1 {
g := x.newGenV(t)
x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)")
x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)")
} else {
x.xtraSM(varname, true, t)
// x.encMapFallback(varname, rtid, t)
@ -845,55 +849,64 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
// number of non-empty things we write out first.
// This is required as we need to pre-determine the size of the container,
// to support length-prefixing.
x.linef("var %s [%v]bool", numfieldsvar, len(tisfi))
x.linef("_, _, _ = %s, %s, %s", sepVarname, numfieldsvar, struct2arrvar)
if ti.anyOmitEmpty {
x.linef("var %s [%v]bool", numfieldsvar, len(tisfi))
x.linef("_ = %s", numfieldsvar)
}
x.linef("_, _ = %s, %s", sepVarname, struct2arrvar)
x.linef("const %s bool = %v", ti2arrayvar, ti.toArray)
nn := 0
for j, si := range tisfi {
if !si.omitEmpty {
nn++
continue
}
var t2 reflect.StructField
var omitline string
if si.i != -1 {
t2 = t.Field(int(si.i))
} else {
t2typ := t
varname3 := varname
for _, ix := range si.is {
for t2typ.Kind() == reflect.Ptr {
t2typ = t2typ.Elem()
}
t2 = t2typ.Field(ix)
t2typ = t2.Type
varname3 = varname3 + "." + t2.Name
if t2typ.Kind() == reflect.Ptr {
omitline += varname3 + " != nil && "
var nn int
if ti.anyOmitEmpty {
for j, si := range tisfi {
if !si.omitEmpty {
nn++
continue
}
var t2 reflect.StructField
var omitline string
{
t2typ := t
varname3 := varname
for ij, ix := range si.is {
if uint8(ij) == si.nis {
break
}
for t2typ.Kind() == reflect.Ptr {
t2typ = t2typ.Elem()
}
t2 = t2typ.Field(int(ix))
t2typ = t2.Type
varname3 = varname3 + "." + t2.Name
if t2typ.Kind() == reflect.Ptr {
omitline += varname3 + " != nil && "
}
}
}
// never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc.
// also, for maps/slices/arrays, check if len ! 0 (not if == zero value)
switch t2.Type.Kind() {
case reflect.Struct:
omitline += " true"
case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan:
omitline += "len(" + varname + "." + t2.Name + ") != 0"
default:
omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type)
}
x.linef("%s[%v] = %s", numfieldsvar, j, omitline)
}
// never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc.
// also, for maps/slices/arrays, check if len ! 0 (not if == zero value)
switch t2.Type.Kind() {
case reflect.Struct:
omitline += " true"
case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan:
omitline += "len(" + varname + "." + t2.Name + ") != 0"
default:
omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type)
}
x.linef("%s[%v] = %s", numfieldsvar, j, omitline)
}
x.linef("var %snn%s int", genTempVarPfx, i)
// x.linef("var %snn%s int", genTempVarPfx, i)
x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")")
x.linef("r.WriteArrayStart(%d)", len(tisfi))
x.linef("} else {") // if not ti.toArray
x.linef("%snn%s = %v", genTempVarPfx, i, nn)
x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i)
x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i)
x.linef("%snn%s = %v", genTempVarPfx, i, 0)
// x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")")
if ti.anyOmitEmpty {
x.linef("var %snn%s = %v", genTempVarPfx, i, nn)
x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i)
x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i)
x.linef("%snn%s = %v", genTempVarPfx, i, 0)
} else {
x.linef("r.WriteMapStart(%d)", len(tisfi))
}
x.line("}") // close if not StructToArray
for j, si := range tisfi {
@ -901,17 +914,18 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
isNilVarName := genTempVarPfx + "n" + i
var labelUsed bool
var t2 reflect.StructField
if si.i != -1 {
t2 = t.Field(int(si.i))
} else {
{
t2typ := t
varname3 := varname
for _, ix := range si.is {
for ij, ix := range si.is {
if uint8(ij) == si.nis {
break
}
// fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix)
for t2typ.Kind() == reflect.Ptr {
t2typ = t2typ.Elem()
}
t2 = t2typ.Field(ix)
t2 = t2typ.Field(int(ix))
t2typ = t2.Type
varname3 = varname3 + "." + t2.Name
if t2typ.Kind() == reflect.Ptr {
@ -934,9 +948,10 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray
if labelUsed {
x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName)
// x.linef("if %s { z.EncSendContainerState(codecSelfer_containerArrayElem%s); r.EncodeNil() } else { ", isNilVarName, x.xs)
}
x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
if si.omitEmpty {
x.linef("if %s[%v] {", numfieldsvar, j)
}
@ -955,9 +970,9 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
if si.omitEmpty {
x.linef("if %s[%v] {", numfieldsvar, j)
}
x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.line("r.WriteMapElemKey()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))")
x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
x.line("r.WriteMapElemValue()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
if labelUsed {
x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
x.encVar(varname+"."+t2.Name, t2.Type)
@ -971,9 +986,9 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
x.linef("} ") // end if/else ti.toArray
}
x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
x.line("r.WriteArrayEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
x.line("} else {")
x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
x.line("r.WriteMapEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
x.line("}")
}
@ -984,41 +999,41 @@ func (x *genRunner) encListFallback(varname string, t reflect.Type) {
return
}
if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname)
x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ((*[%d]byte)(%s))[:])", x.xs, t.Len(), varname)
return
}
i := x.varsfx()
g := genTempVarPfx
x.line("r.EncodeArrayStart(len(" + varname + "))")
x.line("r.WriteArrayStart(len(" + varname + "))")
if t.Kind() == reflect.Chan {
x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i)
x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.linef("%sv%s := <-%s", g, i, varname)
} else {
// x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname)
x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
}
x.encVar(genTempVarPfx+"v"+i, t.Elem())
x.line("}")
x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
x.line("r.WriteArrayEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
}
func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
// TODO: expand this to handle canonical.
i := x.varsfx()
x.line("r.EncodeMapStart(len(" + varname + "))")
x.line("r.WriteMapStart(len(" + varname + "))")
x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
// x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {")
x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.line("r.WriteMapElemKey()") // f("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.encVar(genTempVarPfx+"k"+i, t.Key())
x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
x.line("r.WriteMapElemValue()") // f("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
x.encVar(genTempVarPfx+"v"+i, t.Elem())
x.line("}")
x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
x.line("r.WriteMapEnd()") // f("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
}
func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) {
func (x *genRunner) decVar(varname, decodedNilVarname string, t reflect.Type, canBeNil bool) {
// We only encode as nil if a nillable value.
// This removes some of the wasted checks for TryDecodeAsNil.
// We need to think about this more, to see what happens if omitempty, etc
@ -1031,7 +1046,9 @@ func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) {
}
if canBeNil {
x.line("if r.TryDecodeAsNil() {")
if t.Kind() == reflect.Ptr {
if decodedNilVarname != "" {
x.line(decodedNilVarname + " = true")
} else if t.Kind() == reflect.Ptr {
x.line("if " + varname + " != nil { ")
// if varname is a field of a struct (has a dot in it),
@ -1149,7 +1166,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname)
}
// only check for extensions if the type is named, and has a packagePath.
if genImportPath(t) != "" && t.Name() != "" {
if !x.nx && genImportPath(t) != "" && t.Name() != "" {
// first check if extensions are configued, before doing the interface conversion
x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname)
}
@ -1227,7 +1244,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false)")
} else if fastpathAV.index(rtid) != -1 {
g := x.newGenV(t)
x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", d)")
} else {
x.xtraSM(varname, false, t)
// x.decListFallback(varname, rtid, false, t)
@ -1239,7 +1256,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
// - else call Encoder.encode(XXX) on it.
if fastpathAV.index(rtid) != -1 {
g := x.newGenV(t)
x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", d)")
} else {
x.xtraSM(varname, false, t)
// x.decMapFallback(varname, rtid, t)
@ -1315,7 +1332,7 @@ func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type
return
}
if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], true)", t.Len(), varname)
x.linef("r.DecodeBytes( ((*[%d]byte)(%s))[:], true)", t.Len(), varname)
return
}
type tstruc struct {
@ -1333,13 +1350,13 @@ func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type
funcs := make(template.FuncMap)
funcs["decLineVar"] = func(varname string) string {
x.decVar(varname, telem, false)
return ""
}
funcs["decLine"] = func(pfx string) string {
x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false)
x.decVar(varname, "", telem, false)
return ""
}
// funcs["decLine"] = func(pfx string) string {
// x.decVar(ts.TempVar+pfx+ts.Rand, "", reflect.PtrTo(telem), false)
// return ""
// }
funcs["var"] = func(s string) string {
return ts.TempVar + s + ts.Rand
}
@ -1395,21 +1412,21 @@ func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type)
return telem.Kind() == reflect.Interface
}
funcs["decLineVarK"] = func(varname string) string {
x.decVar(varname, tkey, false)
x.decVar(varname, "", tkey, false)
return ""
}
funcs["decLineVar"] = func(varname string) string {
x.decVar(varname, telem, false)
return ""
}
funcs["decLineK"] = func(pfx string) string {
x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false)
return ""
}
funcs["decLine"] = func(pfx string) string {
x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false)
funcs["decLineVar"] = func(varname, decodedNilVarname string) string {
x.decVar(varname, decodedNilVarname, telem, false)
return ""
}
// funcs["decLineK"] = func(pfx string) string {
// x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false)
// return ""
// }
// funcs["decLine"] = func(pfx string) string {
// x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false)
// return ""
// }
funcs["var"] = func(s string) string {
return ts.TempVar + s + ts.Rand
}
@ -1430,18 +1447,19 @@ func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintpt
for _, si := range tisfi {
x.line("case \"" + si.encName + "\":")
var t2 reflect.StructField
if si.i != -1 {
t2 = t.Field(int(si.i))
} else {
{
//we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
// t2 = t.FieldByIndex(si.is)
t2typ := t
varname3 := varname
for _, ix := range si.is {
for ij, ix := range si.is {
if uint8(ij) == si.nis {
break
}
for t2typ.Kind() == reflect.Ptr {
t2typ = t2typ.Elem()
}
t2 = t2typ.Field(ix)
t2 = t2typ.Field(int(ix))
t2typ = t2.Type
varname3 = varname3 + "." + t2.Name
if t2typ.Kind() == reflect.Ptr {
@ -1449,7 +1467,7 @@ func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintpt
}
}
}
x.decVar(varname+"."+t2.Name, t2.Type, false)
x.decVar(varname+"."+t2.Name, "", t2.Type, false)
}
x.line("default:")
// pass the slice here, so that the string will not escape, and maybe save allocation
@ -1480,15 +1498,15 @@ func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t ref
x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname)
x.line("} else { if r.CheckBreak() { break }; }")
}
x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.line("r.ReadMapElemKey()") // f("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.line(kName + "Slc = r.DecodeStringAsBytes()")
// let string be scoped to this loop alone, so it doesn't escape.
x.line(kName + " := string(" + kName + "Slc)")
x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
x.line("r.ReadMapElemValue()") // f("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
x.decStructMapSwitch(kName, varname, rtid, t)
x.line("} // end for " + tpfx + "j" + i)
x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
x.line("r.ReadMapEnd()") // f("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
}
func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) {
@ -1501,18 +1519,19 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid
x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
for _, si := range tisfi {
var t2 reflect.StructField
if si.i != -1 {
t2 = t.Field(int(si.i))
} else {
{
//we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
// t2 = t.FieldByIndex(si.is)
t2typ := t
varname3 := varname
for _, ix := range si.is {
for ij, ix := range si.is {
if uint8(ij) == si.nis {
break
}
for t2typ.Kind() == reflect.Ptr {
t2typ = t2typ.Elem()
}
t2 = t2typ.Field(ix)
t2 = t2typ.Field(int(ix))
t2typ = t2.Type
varname3 = varname3 + "." + t2.Name
if t2typ.Kind() == reflect.Ptr {
@ -1524,10 +1543,10 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid
x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
tpfx, i, tpfx, i, tpfx, i,
tpfx, i, lenvarname, tpfx, i)
x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }",
tpfx, i, x.xs, breakString)
x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.decVar(varname+"."+t2.Name, t2.Type, true)
x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString)
// x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }", tpfx, i, x.xs, breakString)
x.line("r.ReadArrayElem()") // f("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.decVar(varname+"."+t2.Name, "", t2.Type, true)
}
// read remaining values and throw away.
x.line("for {")
@ -1535,10 +1554,10 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid
tpfx, i, tpfx, i, tpfx, i,
tpfx, i, lenvarname, tpfx, i)
x.linef("if %sb%s { break }", tpfx, i)
x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.line("r.ReadArrayElem()") // f("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i)
x.line("}")
x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
x.line("r.ReadArrayEnd()") // f("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
}
func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
@ -1548,7 +1567,7 @@ func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs)
x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()")
x.linef("if %sl%s == 0 {", genTempVarPfx, i)
x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
x.line("r.ReadMapEnd()") // f("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
if genUseOneFunctionForDecStructMap {
x.line("} else { ")
x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i)
@ -1564,7 +1583,7 @@ func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs)
x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()")
x.linef("if %sl%s == 0 {", genTempVarPfx, i)
x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
x.line("r.ReadArrayEnd()") // f("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
x.line("} else { ")
x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i)
x.line("}")
@ -1778,6 +1797,21 @@ func genInternalZeroValue(s string) string {
}
}
func genInternalNonZeroValue(s string) string {
switch s {
case "interface{}", "interface {}":
return `"string-is-an-interface"` // return string, to remove ambiguity
case "bool":
return "true"
case "string":
return `"some-string"`
case "float32", "float64", "float", "double":
return "10.1"
default:
return "10"
}
}
func genInternalEncCommandAsString(s string, vname string) string {
switch s {
case "uint", "uint8", "uint16", "uint32", "uint64":
@ -1951,6 +1985,7 @@ func genInternalInit() {
funcs["encmd"] = genInternalEncCommandAsString
funcs["decmd"] = genInternalDecCommandAsString
funcs["zerocmd"] = genInternalZeroValue
funcs["nonzerocmd"] = genInternalNonZeroValue
funcs["hasprefix"] = strings.HasPrefix
funcs["sorttype"] = genInternalSortType

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,6 @@ package codec
import (
"errors"
"fmt"
"math"
"reflect"
)
@ -86,37 +85,6 @@ func pruneSignExt(v []byte, pos bool) (n int) {
return
}
func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) {
if typ == nil {
return
}
rt := typ
// The type might be a pointer and we need to keep
// dereferencing to the base type until we find an implementation.
for {
if rt.Implements(iTyp) {
return true, indir
}
if p := rt; p.Kind() == reflect.Ptr {
indir++
if indir >= math.MaxInt8 { // insane number of indirections
return false, 0
}
rt = p.Elem()
continue
}
break
}
// No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
if typ.Kind() != reflect.Ptr {
// Not a pointer, but does the pointer work?
if reflect.PtrTo(typ).Implements(iTyp) {
return true, -1
}
}
return false, 0
}
// validate that this function is correct ...
// culled from OGRE (Object-Oriented Graphics Rendering Engine)
// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)

View File

@ -10,6 +10,8 @@ import (
"sync/atomic"
)
const safeMode = true
// stringView returns a view of the []byte as a string.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
@ -30,6 +32,21 @@ func bytesView(v string) []byte {
return []byte(v)
}
func definitelyNil(v interface{}) bool {
// this is a best-effort option.
// We just return false, so we don't unneessarily incur the cost of reflection this early.
return false
// rv := reflect.ValueOf(v)
// switch rv.Kind() {
// case reflect.Invalid:
// return true
// case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func:
// return rv.IsNil()
// default:
// return false
// }
}
// // keepAlive4BytesView maintains a reference to the input parameter for bytesView.
// //
// // Usage: call this at point where done with the bytes view.
@ -48,14 +65,18 @@ func rt2id(rt reflect.Type) uintptr {
return reflect.ValueOf(rt).Pointer()
}
// --------------------------
type ptrToRvMap struct{}
func (_ *ptrToRvMap) init() {}
func (_ *ptrToRvMap) get(i interface{}) reflect.Value {
return reflect.ValueOf(i).Elem()
func rv2rtid(rv reflect.Value) uintptr {
return reflect.ValueOf(rv.Type()).Pointer()
}
// --------------------------
// type ptrToRvMap struct{}
// func (_ *ptrToRvMap) init() {}
// func (_ *ptrToRvMap) get(i interface{}) reflect.Value {
// return reflect.ValueOf(i).Elem()
// }
// --------------------------
type atomicTypeInfoSlice struct {
v atomic.Value
@ -74,70 +95,128 @@ func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) {
}
// --------------------------
func (f *decFnInfo) raw(rv reflect.Value) {
rv.SetBytes(f.d.raw())
func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
rv.SetBytes(d.rawBytes())
}
func (f *decFnInfo) kString(rv reflect.Value) {
rv.SetString(f.d.d.DecodeString())
func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
rv.SetString(d.d.DecodeString())
}
func (f *decFnInfo) kBool(rv reflect.Value) {
rv.SetBool(f.d.d.DecodeBool())
func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
rv.SetBool(d.d.DecodeBool())
}
func (f *decFnInfo) kFloat32(rv reflect.Value) {
rv.SetFloat(f.d.d.DecodeFloat(true))
func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
rv.SetFloat(d.d.DecodeFloat(true))
}
func (f *decFnInfo) kFloat64(rv reflect.Value) {
rv.SetFloat(f.d.d.DecodeFloat(false))
func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
rv.SetFloat(d.d.DecodeFloat(false))
}
func (f *decFnInfo) kInt(rv reflect.Value) {
rv.SetInt(f.d.d.DecodeInt(intBitsize))
func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(d.d.DecodeInt(intBitsize))
}
func (f *decFnInfo) kInt8(rv reflect.Value) {
rv.SetInt(f.d.d.DecodeInt(8))
func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(d.d.DecodeInt(8))
}
func (f *decFnInfo) kInt16(rv reflect.Value) {
rv.SetInt(f.d.d.DecodeInt(16))
func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(d.d.DecodeInt(16))
}
func (f *decFnInfo) kInt32(rv reflect.Value) {
rv.SetInt(f.d.d.DecodeInt(32))
func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(d.d.DecodeInt(32))
}
func (f *decFnInfo) kInt64(rv reflect.Value) {
rv.SetInt(f.d.d.DecodeInt(64))
func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
rv.SetInt(d.d.DecodeInt(64))
}
func (f *decFnInfo) kUint(rv reflect.Value) {
rv.SetUint(f.d.d.DecodeUint(uintBitsize))
func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(d.d.DecodeUint(uintBitsize))
}
func (f *decFnInfo) kUintptr(rv reflect.Value) {
rv.SetUint(f.d.d.DecodeUint(uintBitsize))
func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(d.d.DecodeUint(uintBitsize))
}
func (f *decFnInfo) kUint8(rv reflect.Value) {
rv.SetUint(f.d.d.DecodeUint(8))
func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(d.d.DecodeUint(8))
}
func (f *decFnInfo) kUint16(rv reflect.Value) {
rv.SetUint(f.d.d.DecodeUint(16))
func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(d.d.DecodeUint(16))
}
func (f *decFnInfo) kUint32(rv reflect.Value) {
rv.SetUint(f.d.d.DecodeUint(32))
func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(d.d.DecodeUint(32))
}
func (f *decFnInfo) kUint64(rv reflect.Value) {
rv.SetUint(f.d.d.DecodeUint(64))
func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
rv.SetUint(d.d.DecodeUint(64))
}
// func i2rv(i interface{}) reflect.Value {
// return reflect.ValueOf(i)
// }
// ----------------
func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeBool(rv.Bool())
}
func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeString(c_UTF8, rv.String())
}
func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeFloat64(rv.Float())
}
func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeFloat32(float32(rv.Float()))
}
func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeInt(rv.Int())
}
func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}
func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
e.e.EncodeUint(rv.Uint())
}

View File

@ -8,41 +8,9 @@ package codec
import (
"errors"
"fmt"
"reflect"
"testing"
)
// ----- functions below are used only by tests (not benchmarks)
const (
testLogToT = true
failNowOnFail = true
)
func checkErrT(t *testing.T, err error) {
if err != nil {
logT(t, err.Error())
failT(t)
}
}
func checkEqualT(t *testing.T, v1 interface{}, v2 interface{}, desc string) (err error) {
if err = deepEqual(v1, v2); err != nil {
logT(t, "Not Equal: %s: %v. v1: %v, v2: %v", desc, err, v1, v2)
failT(t)
}
return
}
func failT(t *testing.T) {
if failNowOnFail {
t.FailNow()
} else {
t.Fail()
}
}
// --- these functions are used by both benchmarks and tests
func deepEqual(v1, v2 interface{}) (err error) {
@ -52,21 +20,6 @@ func deepEqual(v1, v2 interface{}) (err error) {
return
}
func logT(x interface{}, format string, args ...interface{}) {
if t, ok := x.(*testing.T); ok && t != nil && testLogToT {
if testVerbose {
t.Logf(format, args...)
}
} else if b, ok := x.(*testing.B); ok && b != nil && testLogToT {
b.Logf(format, args...)
} else {
if len(format) == 0 || format[len(format)-1] != '\n' {
format = format + "\n"
}
fmt.Printf(format, args...)
}
}
func approxDataSize(rv reflect.Value) (sum int) {
switch rk := rv.Kind(); rk {
case reflect.Invalid:

View File

@ -18,6 +18,9 @@ import (
// var zeroRTv [4]uintptr
const safeMode = false
const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go
type unsafeString struct {
Data uintptr
Len int
@ -60,6 +63,19 @@ func bytesView(v string) []byte {
return *(*[]byte)(unsafe.Pointer(&bx))
}
func definitelyNil(v interface{}) bool {
// There is no global way of checking if an interface is nil.
// For true references (map, ptr, func, chan), you can just look
// at the word of the interface. However, for slices, you have to dereference
// the word, and get a pointer to the 3-word interface value.
// var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v))
// var word unsafe.Pointer = ui.word
// // fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n", v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil)
// return word == nil // || *((*unsafe.Pointer)(word)) == nil
return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil
}
// func keepAlive4BytesView(v string) {
// runtime.KeepAlive(v)
// }
@ -68,30 +84,35 @@ func bytesView(v string) []byte {
// runtime.KeepAlive(v)
// }
const _unsafe_rv2i_is_safe = false
// TODO: consider a more generally-known optimization for reflect.Value ==> Interface
//
// Currently, we use this fragile method that taps into implememtation details from
// the source go stdlib reflect/value.go,
// and trims the implementation.
func rv2i(rv reflect.Value) interface{} {
if _unsafe_rv2i_is_safe {
return rv.Interface()
}
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// references that are single-words (map, ptr) may be double-referenced as flagIndir
kk := urv.flag & (1<<5 - 1)
if (kk == uintptr(reflect.Map) || kk == uintptr(reflect.Ptr)) && urv.flag&(1<<7) != 0 {
return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
// true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir
var ptr unsafe.Pointer
// kk := reflect.Kind(urv.flag & (1<<5 - 1))
// if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 {
if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 {
ptr = *(*unsafe.Pointer)(urv.ptr)
} else {
ptr = urv.ptr
}
return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr}))
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
}
func rt2id(rt reflect.Type) uintptr {
return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
}
func rv2rtid(rv reflect.Value) uintptr {
return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ)
}
// func rv0t(rt reflect.Type) reflect.Value {
// ut := (*unsafeIntf)(unsafe.Pointer(&rt))
// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr
@ -99,48 +120,6 @@ func rt2id(rt reflect.Type) uintptr {
// return *(*reflect.Value)(unsafe.Pointer(&uv})
// }
type ptrToRVKV struct {
k uintptr
v reflect.Value
}
type ptrToRvMap struct {
// m map[uintptr]reflect.Value
a [4]ptrToRVKV
v []ptrToRVKV
}
func (p *ptrToRvMap) init() {
// fmt.Printf(">>>> new ptr to rv map\n")
// p.m = make(map[uintptr]reflect.Value, 32)
p.v = p.a[:0]
}
func (p *ptrToRvMap) get(intf interface{}) (rv reflect.Value) {
word := uintptr(((*unsafeIntf)(unsafe.Pointer(&intf))).word)
// binary search. adapted from sort/search.go.
h, i, j := 0, 0, len(p.v)
for i < j {
h = i + (j-i)/2
if p.v[h].k < word {
i = h + 1
} else {
j = h
}
}
if i < len(p.v) && p.v[i].k == word {
return p.v[i].v
}
// insert into position i
// fmt.Printf(">>>> resetting rv for word: %x, interface: %v\n", word, intf)
rv = reflect.ValueOf(intf).Elem()
p.v = append(p.v, ptrToRVKV{})
copy(p.v[i+1:len(p.v)], p.v[i:len(p.v)-1])
p.v[i].k, p.v[i].v = word, rv
return
}
// --------------------------
type atomicTypeInfoSlice struct {
v unsafe.Pointer
@ -155,95 +134,167 @@ func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) {
}
// --------------------------
func (f *decFnInfo) raw(rv reflect.Value) {
func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*[]byte)(urv.ptr) = f.d.raw()
// if urv.flag&unsafeFlagIndir != 0 {
// urv.ptr = *(*unsafe.Pointer)(urv.ptr)
// }
*(*[]byte)(urv.ptr) = d.rawBytes()
}
func (f *decFnInfo) kString(rv reflect.Value) {
func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*string)(urv.ptr) = f.d.d.DecodeString()
*(*string)(urv.ptr) = d.d.DecodeString()
}
func (f *decFnInfo) kBool(rv reflect.Value) {
func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*bool)(urv.ptr) = f.d.d.DecodeBool()
*(*bool)(urv.ptr) = d.d.DecodeBool()
}
func (f *decFnInfo) kFloat32(rv reflect.Value) {
func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*float32)(urv.ptr) = float32(f.d.d.DecodeFloat(true))
*(*float32)(urv.ptr) = float32(d.d.DecodeFloat(true))
}
func (f *decFnInfo) kFloat64(rv reflect.Value) {
func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*float64)(urv.ptr) = f.d.d.DecodeFloat(false)
*(*float64)(urv.ptr) = d.d.DecodeFloat(false)
}
func (f *decFnInfo) kInt(rv reflect.Value) {
func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int)(urv.ptr) = int(f.d.d.DecodeInt(intBitsize))
*(*int)(urv.ptr) = int(d.d.DecodeInt(intBitsize))
}
func (f *decFnInfo) kInt8(rv reflect.Value) {
func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int8)(urv.ptr) = int8(f.d.d.DecodeInt(8))
*(*int8)(urv.ptr) = int8(d.d.DecodeInt(8))
}
func (f *decFnInfo) kInt16(rv reflect.Value) {
func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int16)(urv.ptr) = int16(f.d.d.DecodeInt(16))
*(*int16)(urv.ptr) = int16(d.d.DecodeInt(16))
}
func (f *decFnInfo) kInt32(rv reflect.Value) {
func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int32)(urv.ptr) = int32(f.d.d.DecodeInt(32))
*(*int32)(urv.ptr) = int32(d.d.DecodeInt(32))
}
func (f *decFnInfo) kInt64(rv reflect.Value) {
func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int64)(urv.ptr) = f.d.d.DecodeInt(64)
*(*int64)(urv.ptr) = d.d.DecodeInt(64)
}
func (f *decFnInfo) kUint(rv reflect.Value) {
func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint)(urv.ptr) = uint(f.d.d.DecodeUint(uintBitsize))
*(*uint)(urv.ptr) = uint(d.d.DecodeUint(uintBitsize))
}
func (f *decFnInfo) kUintptr(rv reflect.Value) {
func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uintptr)(urv.ptr) = uintptr(f.d.d.DecodeUint(uintBitsize))
*(*uintptr)(urv.ptr) = uintptr(d.d.DecodeUint(uintBitsize))
}
func (f *decFnInfo) kUint8(rv reflect.Value) {
func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint8)(urv.ptr) = uint8(f.d.d.DecodeUint(8))
*(*uint8)(urv.ptr) = uint8(d.d.DecodeUint(8))
}
func (f *decFnInfo) kUint16(rv reflect.Value) {
func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint16)(urv.ptr) = uint16(f.d.d.DecodeUint(16))
*(*uint16)(urv.ptr) = uint16(d.d.DecodeUint(16))
}
func (f *decFnInfo) kUint32(rv reflect.Value) {
func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint32)(urv.ptr) = uint32(f.d.d.DecodeUint(32))
}
func (f *decFnInfo) kUint64(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint64)(urv.ptr) = f.d.d.DecodeUint(64)
*(*uint32)(urv.ptr) = uint32(d.d.DecodeUint(32))
}
// func (p *ptrToRvMap) get(i interface{}) (rv reflect.Value) {
// word := uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).word)
// rv, exists := p.m[word]
// if !exists {
// fmt.Printf(">>>> resetting rv for word: %x, interface: %v\n", word, i)
// rv = reflect.ValueOf(i).Elem()
// p.m[word] = rv
// }
// return
// }
func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint64)(urv.ptr) = d.d.DecodeUint(64)
}
// ------------
func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeBool(*(*bool)(v.ptr))
}
func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeString(c_UTF8, *(*string)(v.ptr))
}
func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeFloat64(*(*float64)(v.ptr))
}
func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeFloat32(*(*float32)(v.ptr))
}
func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int)(v.ptr)))
}
func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int8)(v.ptr)))
}
func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int16)(v.ptr)))
}
func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int32)(v.ptr)))
}
func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeInt(int64(*(*int64)(v.ptr)))
}
func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint)(v.ptr)))
}
func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint8)(v.ptr)))
}
func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint16)(v.ptr)))
}
func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint32)(v.ptr)))
}
func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uint64)(v.ptr)))
}
func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
e.e.EncodeUint(uint64(*(*uintptr)(v.ptr)))
}
// ------------
// func rt2id(rt reflect.Type) uintptr {
// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
@ -455,8 +506,3 @@ func (f *decFnInfo) kUint64(rv reflect.Value) {
// return *(*interface{})(unsafe.Pointer(&ui))
// // return i
// }
// func i2rv(i interface{}) reflect.Value {
// // u := *(*unsafeIntf)(unsafe.Pointer(&i))
// return reflect.ValueOf(i)
// }

File diff suppressed because it is too large Load Diff

View File

@ -8,11 +8,16 @@
package codec
import "testing"
import "fmt"
// TestMammoth has all the different paths optimized in fast-path
// It has all the primitives, slices and maps.
//
// For each of those types, it has a pointer and a non-pointer field.
func init() { _ = fmt.Printf } // so we can include fmt as needed
type TestMammoth struct {
{{range .Values }}{{if .Primitive }}{{/*
@ -31,3 +36,65 @@ type TestMammoth struct {
{{end}}{{end}}{{end}}
}
{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
*/}} type {{ .MethodNamePfx "type" false }} []{{ .Elem }}
func (_ {{ .MethodNamePfx "type" false }}) MapBySlice() { }
{{end}}{{end}}{{end}}
func doTestMammothSlices(t *testing.T, h Handle) {
{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
*/}}
for _, v := range [][]{{ .Elem }}{ nil, []{{ .Elem }}{}, []{{ .Elem }}{ {{ nonzerocmd .Elem }}, {{ nonzerocmd .Elem }} } } {
// fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v)
var v{{$i}}v1, v{{$i}}v2, v{{$i}}v3, v{{$i}}v4 []{{ .Elem }}
v{{$i}}v1 = v
bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}")
if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}")
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p")
v{{$i}}v2 = nil
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p")
// ...
v{{$i}}v2 = nil
if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
v{{$i}}v3 = {{ .MethodNamePfx "type" false }}(v{{$i}}v1)
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom")
v{{$i}}v4 = {{ .MethodNamePfx "type" false }}(v{{$i}}v2)
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom")
v{{$i}}v2 = nil
bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p")
v{{$i}}v4 = {{ .MethodNamePfx "type" false }}(v{{$i}}v2)
testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p")
}
{{end}}{{end}}{{end}}
}
func doTestMammothMaps(t *testing.T, h Handle) {
{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
*/}}
for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, map[{{ .MapKey }}]{{ .Elem }}{}, map[{{ .MapKey }}]{{ .Elem }}{ {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} } } {
// fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v)
var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }}
v{{$i}}v1 = v
bs{{$i}} := testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}")
if v != nil { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) }
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}")
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p")
v{{$i}}v2 = nil
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p")
}
{{end}}{{end}}{{end}}
}
func doTestMammothMapsAndSlices(t *testing.T, h Handle) {
doTestMammothSlices(t, h)
doTestMammothMaps(t, h)
}

View File

@ -0,0 +1,97 @@
// +build !notfastpath
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from mammoth2-test.go.tmpl
// ************************************************************
package codec
// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go....
//
// Add:
// - test file for creating a mammoth generated file as _mammoth_generated.go
// - generate a second mammoth files in a different file: mammoth2_generated_test.go
// - mammoth-test.go.tmpl will do this
// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags)
// - as part of TestMammoth, run it also
// - this will cover all the codecgen, gen-helper, etc in one full run
// - check in mammoth* files into github also
// - then
//
// Now, add some types:
// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it
// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types
// - this wrapper object is what we work encode/decode (so that the codecgen methods are called)
// import "encoding/binary"
import "fmt"
type TestMammoth2 struct {
{{range .Values }}{{if .Primitive }}{{/*
*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }}
{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
{{end}}{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
{{end}}{{end}}{{end}}
}
// -----------
type testMammoth2Binary uint64
func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) {
data = make([]byte, 8)
bigen.PutUint64(data, uint64(x))
return
}
func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) {
*x = testMammoth2Binary(bigen.Uint64(data))
return
}
type testMammoth2Text uint64
func (x testMammoth2Text) MarshalText() (data []byte, err error) {
data = []byte(fmt.Sprintf("%b", uint64(x)))
return
}
func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) {
_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x))
return
}
type testMammoth2Json uint64
func (x testMammoth2Json) MarshalJSON() (data []byte, err error) {
data = []byte(fmt.Sprintf("%v", uint64(x)))
return
}
func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) {
_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x))
return
}
type testMammoth2Basic [4]uint64
type TestMammoth2Wrapper struct {
V TestMammoth2
T testMammoth2Text
B testMammoth2Binary
J testMammoth2Json
C testMammoth2Basic
M map[testMammoth2Basic]TestMammoth2
L []TestMammoth2
A [4]int64
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,659 @@
// +build !notfastpath
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from mammoth2-test.go.tmpl
// ************************************************************
package codec
// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go....
//
// Add:
// - test file for creating a mammoth generated file as _mammoth_generated.go
// - generate a second mammoth files in a different file: mammoth2_generated_test.go
// - mammoth-test.go.tmpl will do this
// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags)
// - as part of TestMammoth, run it also
// - this will cover all the codecgen, gen-helper, etc in one full run
// - check in mammoth* files into github also
// - then
//
// Now, add some types:
// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it
// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types
// - this wrapper object is what we work encode/decode (so that the codecgen methods are called)
// import "encoding/binary"
import "fmt"
type TestMammoth2 struct {
FIntf interface{}
FptrIntf *interface{}
FString string
FptrString *string
FFloat32 float32
FptrFloat32 *float32
FFloat64 float64
FptrFloat64 *float64
FUint uint
FptrUint *uint
FUint8 uint8
FptrUint8 *uint8
FUint16 uint16
FptrUint16 *uint16
FUint32 uint32
FptrUint32 *uint32
FUint64 uint64
FptrUint64 *uint64
FUintptr uintptr
FptrUintptr *uintptr
FInt int
FptrInt *int
FInt8 int8
FptrInt8 *int8
FInt16 int16
FptrInt16 *int16
FInt32 int32
FptrInt32 *int32
FInt64 int64
FptrInt64 *int64
FBool bool
FptrBool *bool
FSliceIntf []interface{}
FptrSliceIntf *[]interface{}
FSliceString []string
FptrSliceString *[]string
FSliceFloat32 []float32
FptrSliceFloat32 *[]float32
FSliceFloat64 []float64
FptrSliceFloat64 *[]float64
FSliceUint []uint
FptrSliceUint *[]uint
FSliceUint16 []uint16
FptrSliceUint16 *[]uint16
FSliceUint32 []uint32
FptrSliceUint32 *[]uint32
FSliceUint64 []uint64
FptrSliceUint64 *[]uint64
FSliceUintptr []uintptr
FptrSliceUintptr *[]uintptr
FSliceInt []int
FptrSliceInt *[]int
FSliceInt8 []int8
FptrSliceInt8 *[]int8
FSliceInt16 []int16
FptrSliceInt16 *[]int16
FSliceInt32 []int32
FptrSliceInt32 *[]int32
FSliceInt64 []int64
FptrSliceInt64 *[]int64
FSliceBool []bool
FptrSliceBool *[]bool
FMapIntfIntf map[interface{}]interface{}
FptrMapIntfIntf *map[interface{}]interface{}
FMapIntfString map[interface{}]string
FptrMapIntfString *map[interface{}]string
FMapIntfUint map[interface{}]uint
FptrMapIntfUint *map[interface{}]uint
FMapIntfUint8 map[interface{}]uint8
FptrMapIntfUint8 *map[interface{}]uint8
FMapIntfUint16 map[interface{}]uint16
FptrMapIntfUint16 *map[interface{}]uint16
FMapIntfUint32 map[interface{}]uint32
FptrMapIntfUint32 *map[interface{}]uint32
FMapIntfUint64 map[interface{}]uint64
FptrMapIntfUint64 *map[interface{}]uint64
FMapIntfUintptr map[interface{}]uintptr
FptrMapIntfUintptr *map[interface{}]uintptr
FMapIntfInt map[interface{}]int
FptrMapIntfInt *map[interface{}]int
FMapIntfInt8 map[interface{}]int8
FptrMapIntfInt8 *map[interface{}]int8
FMapIntfInt16 map[interface{}]int16
FptrMapIntfInt16 *map[interface{}]int16
FMapIntfInt32 map[interface{}]int32
FptrMapIntfInt32 *map[interface{}]int32
FMapIntfInt64 map[interface{}]int64
FptrMapIntfInt64 *map[interface{}]int64
FMapIntfFloat32 map[interface{}]float32
FptrMapIntfFloat32 *map[interface{}]float32
FMapIntfFloat64 map[interface{}]float64
FptrMapIntfFloat64 *map[interface{}]float64
FMapIntfBool map[interface{}]bool
FptrMapIntfBool *map[interface{}]bool
FMapStringIntf map[string]interface{}
FptrMapStringIntf *map[string]interface{}
FMapStringString map[string]string
FptrMapStringString *map[string]string
FMapStringUint map[string]uint
FptrMapStringUint *map[string]uint
FMapStringUint8 map[string]uint8
FptrMapStringUint8 *map[string]uint8
FMapStringUint16 map[string]uint16
FptrMapStringUint16 *map[string]uint16
FMapStringUint32 map[string]uint32
FptrMapStringUint32 *map[string]uint32
FMapStringUint64 map[string]uint64
FptrMapStringUint64 *map[string]uint64
FMapStringUintptr map[string]uintptr
FptrMapStringUintptr *map[string]uintptr
FMapStringInt map[string]int
FptrMapStringInt *map[string]int
FMapStringInt8 map[string]int8
FptrMapStringInt8 *map[string]int8
FMapStringInt16 map[string]int16
FptrMapStringInt16 *map[string]int16
FMapStringInt32 map[string]int32
FptrMapStringInt32 *map[string]int32
FMapStringInt64 map[string]int64
FptrMapStringInt64 *map[string]int64
FMapStringFloat32 map[string]float32
FptrMapStringFloat32 *map[string]float32
FMapStringFloat64 map[string]float64
FptrMapStringFloat64 *map[string]float64
FMapStringBool map[string]bool
FptrMapStringBool *map[string]bool
FMapFloat32Intf map[float32]interface{}
FptrMapFloat32Intf *map[float32]interface{}
FMapFloat32String map[float32]string
FptrMapFloat32String *map[float32]string
FMapFloat32Uint map[float32]uint
FptrMapFloat32Uint *map[float32]uint
FMapFloat32Uint8 map[float32]uint8
FptrMapFloat32Uint8 *map[float32]uint8
FMapFloat32Uint16 map[float32]uint16
FptrMapFloat32Uint16 *map[float32]uint16
FMapFloat32Uint32 map[float32]uint32
FptrMapFloat32Uint32 *map[float32]uint32
FMapFloat32Uint64 map[float32]uint64
FptrMapFloat32Uint64 *map[float32]uint64
FMapFloat32Uintptr map[float32]uintptr
FptrMapFloat32Uintptr *map[float32]uintptr
FMapFloat32Int map[float32]int
FptrMapFloat32Int *map[float32]int
FMapFloat32Int8 map[float32]int8
FptrMapFloat32Int8 *map[float32]int8
FMapFloat32Int16 map[float32]int16
FptrMapFloat32Int16 *map[float32]int16
FMapFloat32Int32 map[float32]int32
FptrMapFloat32Int32 *map[float32]int32
FMapFloat32Int64 map[float32]int64
FptrMapFloat32Int64 *map[float32]int64
FMapFloat32Float32 map[float32]float32
FptrMapFloat32Float32 *map[float32]float32
FMapFloat32Float64 map[float32]float64
FptrMapFloat32Float64 *map[float32]float64
FMapFloat32Bool map[float32]bool
FptrMapFloat32Bool *map[float32]bool
FMapFloat64Intf map[float64]interface{}
FptrMapFloat64Intf *map[float64]interface{}
FMapFloat64String map[float64]string
FptrMapFloat64String *map[float64]string
FMapFloat64Uint map[float64]uint
FptrMapFloat64Uint *map[float64]uint
FMapFloat64Uint8 map[float64]uint8
FptrMapFloat64Uint8 *map[float64]uint8
FMapFloat64Uint16 map[float64]uint16
FptrMapFloat64Uint16 *map[float64]uint16
FMapFloat64Uint32 map[float64]uint32
FptrMapFloat64Uint32 *map[float64]uint32
FMapFloat64Uint64 map[float64]uint64
FptrMapFloat64Uint64 *map[float64]uint64
FMapFloat64Uintptr map[float64]uintptr
FptrMapFloat64Uintptr *map[float64]uintptr
FMapFloat64Int map[float64]int
FptrMapFloat64Int *map[float64]int
FMapFloat64Int8 map[float64]int8
FptrMapFloat64Int8 *map[float64]int8
FMapFloat64Int16 map[float64]int16
FptrMapFloat64Int16 *map[float64]int16
FMapFloat64Int32 map[float64]int32
FptrMapFloat64Int32 *map[float64]int32
FMapFloat64Int64 map[float64]int64
FptrMapFloat64Int64 *map[float64]int64
FMapFloat64Float32 map[float64]float32
FptrMapFloat64Float32 *map[float64]float32
FMapFloat64Float64 map[float64]float64
FptrMapFloat64Float64 *map[float64]float64
FMapFloat64Bool map[float64]bool
FptrMapFloat64Bool *map[float64]bool
FMapUintIntf map[uint]interface{}
FptrMapUintIntf *map[uint]interface{}
FMapUintString map[uint]string
FptrMapUintString *map[uint]string
FMapUintUint map[uint]uint
FptrMapUintUint *map[uint]uint
FMapUintUint8 map[uint]uint8
FptrMapUintUint8 *map[uint]uint8
FMapUintUint16 map[uint]uint16
FptrMapUintUint16 *map[uint]uint16
FMapUintUint32 map[uint]uint32
FptrMapUintUint32 *map[uint]uint32
FMapUintUint64 map[uint]uint64
FptrMapUintUint64 *map[uint]uint64
FMapUintUintptr map[uint]uintptr
FptrMapUintUintptr *map[uint]uintptr
FMapUintInt map[uint]int
FptrMapUintInt *map[uint]int
FMapUintInt8 map[uint]int8
FptrMapUintInt8 *map[uint]int8
FMapUintInt16 map[uint]int16
FptrMapUintInt16 *map[uint]int16
FMapUintInt32 map[uint]int32
FptrMapUintInt32 *map[uint]int32
FMapUintInt64 map[uint]int64
FptrMapUintInt64 *map[uint]int64
FMapUintFloat32 map[uint]float32
FptrMapUintFloat32 *map[uint]float32
FMapUintFloat64 map[uint]float64
FptrMapUintFloat64 *map[uint]float64
FMapUintBool map[uint]bool
FptrMapUintBool *map[uint]bool
FMapUint8Intf map[uint8]interface{}
FptrMapUint8Intf *map[uint8]interface{}
FMapUint8String map[uint8]string
FptrMapUint8String *map[uint8]string
FMapUint8Uint map[uint8]uint
FptrMapUint8Uint *map[uint8]uint
FMapUint8Uint8 map[uint8]uint8
FptrMapUint8Uint8 *map[uint8]uint8
FMapUint8Uint16 map[uint8]uint16
FptrMapUint8Uint16 *map[uint8]uint16
FMapUint8Uint32 map[uint8]uint32
FptrMapUint8Uint32 *map[uint8]uint32
FMapUint8Uint64 map[uint8]uint64
FptrMapUint8Uint64 *map[uint8]uint64
FMapUint8Uintptr map[uint8]uintptr
FptrMapUint8Uintptr *map[uint8]uintptr
FMapUint8Int map[uint8]int
FptrMapUint8Int *map[uint8]int
FMapUint8Int8 map[uint8]int8
FptrMapUint8Int8 *map[uint8]int8
FMapUint8Int16 map[uint8]int16
FptrMapUint8Int16 *map[uint8]int16
FMapUint8Int32 map[uint8]int32
FptrMapUint8Int32 *map[uint8]int32
FMapUint8Int64 map[uint8]int64
FptrMapUint8Int64 *map[uint8]int64
FMapUint8Float32 map[uint8]float32
FptrMapUint8Float32 *map[uint8]float32
FMapUint8Float64 map[uint8]float64
FptrMapUint8Float64 *map[uint8]float64
FMapUint8Bool map[uint8]bool
FptrMapUint8Bool *map[uint8]bool
FMapUint16Intf map[uint16]interface{}
FptrMapUint16Intf *map[uint16]interface{}
FMapUint16String map[uint16]string
FptrMapUint16String *map[uint16]string
FMapUint16Uint map[uint16]uint
FptrMapUint16Uint *map[uint16]uint
FMapUint16Uint8 map[uint16]uint8
FptrMapUint16Uint8 *map[uint16]uint8
FMapUint16Uint16 map[uint16]uint16
FptrMapUint16Uint16 *map[uint16]uint16
FMapUint16Uint32 map[uint16]uint32
FptrMapUint16Uint32 *map[uint16]uint32
FMapUint16Uint64 map[uint16]uint64
FptrMapUint16Uint64 *map[uint16]uint64
FMapUint16Uintptr map[uint16]uintptr
FptrMapUint16Uintptr *map[uint16]uintptr
FMapUint16Int map[uint16]int
FptrMapUint16Int *map[uint16]int
FMapUint16Int8 map[uint16]int8
FptrMapUint16Int8 *map[uint16]int8
FMapUint16Int16 map[uint16]int16
FptrMapUint16Int16 *map[uint16]int16
FMapUint16Int32 map[uint16]int32
FptrMapUint16Int32 *map[uint16]int32
FMapUint16Int64 map[uint16]int64
FptrMapUint16Int64 *map[uint16]int64
FMapUint16Float32 map[uint16]float32
FptrMapUint16Float32 *map[uint16]float32
FMapUint16Float64 map[uint16]float64
FptrMapUint16Float64 *map[uint16]float64
FMapUint16Bool map[uint16]bool
FptrMapUint16Bool *map[uint16]bool
FMapUint32Intf map[uint32]interface{}
FptrMapUint32Intf *map[uint32]interface{}
FMapUint32String map[uint32]string
FptrMapUint32String *map[uint32]string
FMapUint32Uint map[uint32]uint
FptrMapUint32Uint *map[uint32]uint
FMapUint32Uint8 map[uint32]uint8
FptrMapUint32Uint8 *map[uint32]uint8
FMapUint32Uint16 map[uint32]uint16
FptrMapUint32Uint16 *map[uint32]uint16
FMapUint32Uint32 map[uint32]uint32
FptrMapUint32Uint32 *map[uint32]uint32
FMapUint32Uint64 map[uint32]uint64
FptrMapUint32Uint64 *map[uint32]uint64
FMapUint32Uintptr map[uint32]uintptr
FptrMapUint32Uintptr *map[uint32]uintptr
FMapUint32Int map[uint32]int
FptrMapUint32Int *map[uint32]int
FMapUint32Int8 map[uint32]int8
FptrMapUint32Int8 *map[uint32]int8
FMapUint32Int16 map[uint32]int16
FptrMapUint32Int16 *map[uint32]int16
FMapUint32Int32 map[uint32]int32
FptrMapUint32Int32 *map[uint32]int32
FMapUint32Int64 map[uint32]int64
FptrMapUint32Int64 *map[uint32]int64
FMapUint32Float32 map[uint32]float32
FptrMapUint32Float32 *map[uint32]float32
FMapUint32Float64 map[uint32]float64
FptrMapUint32Float64 *map[uint32]float64
FMapUint32Bool map[uint32]bool
FptrMapUint32Bool *map[uint32]bool
FMapUint64Intf map[uint64]interface{}
FptrMapUint64Intf *map[uint64]interface{}
FMapUint64String map[uint64]string
FptrMapUint64String *map[uint64]string
FMapUint64Uint map[uint64]uint
FptrMapUint64Uint *map[uint64]uint
FMapUint64Uint8 map[uint64]uint8
FptrMapUint64Uint8 *map[uint64]uint8
FMapUint64Uint16 map[uint64]uint16
FptrMapUint64Uint16 *map[uint64]uint16
FMapUint64Uint32 map[uint64]uint32
FptrMapUint64Uint32 *map[uint64]uint32
FMapUint64Uint64 map[uint64]uint64
FptrMapUint64Uint64 *map[uint64]uint64
FMapUint64Uintptr map[uint64]uintptr
FptrMapUint64Uintptr *map[uint64]uintptr
FMapUint64Int map[uint64]int
FptrMapUint64Int *map[uint64]int
FMapUint64Int8 map[uint64]int8
FptrMapUint64Int8 *map[uint64]int8
FMapUint64Int16 map[uint64]int16
FptrMapUint64Int16 *map[uint64]int16
FMapUint64Int32 map[uint64]int32
FptrMapUint64Int32 *map[uint64]int32
FMapUint64Int64 map[uint64]int64
FptrMapUint64Int64 *map[uint64]int64
FMapUint64Float32 map[uint64]float32
FptrMapUint64Float32 *map[uint64]float32
FMapUint64Float64 map[uint64]float64
FptrMapUint64Float64 *map[uint64]float64
FMapUint64Bool map[uint64]bool
FptrMapUint64Bool *map[uint64]bool
FMapUintptrIntf map[uintptr]interface{}
FptrMapUintptrIntf *map[uintptr]interface{}
FMapUintptrString map[uintptr]string
FptrMapUintptrString *map[uintptr]string
FMapUintptrUint map[uintptr]uint
FptrMapUintptrUint *map[uintptr]uint
FMapUintptrUint8 map[uintptr]uint8
FptrMapUintptrUint8 *map[uintptr]uint8
FMapUintptrUint16 map[uintptr]uint16
FptrMapUintptrUint16 *map[uintptr]uint16
FMapUintptrUint32 map[uintptr]uint32
FptrMapUintptrUint32 *map[uintptr]uint32
FMapUintptrUint64 map[uintptr]uint64
FptrMapUintptrUint64 *map[uintptr]uint64
FMapUintptrUintptr map[uintptr]uintptr
FptrMapUintptrUintptr *map[uintptr]uintptr
FMapUintptrInt map[uintptr]int
FptrMapUintptrInt *map[uintptr]int
FMapUintptrInt8 map[uintptr]int8
FptrMapUintptrInt8 *map[uintptr]int8
FMapUintptrInt16 map[uintptr]int16
FptrMapUintptrInt16 *map[uintptr]int16
FMapUintptrInt32 map[uintptr]int32
FptrMapUintptrInt32 *map[uintptr]int32
FMapUintptrInt64 map[uintptr]int64
FptrMapUintptrInt64 *map[uintptr]int64
FMapUintptrFloat32 map[uintptr]float32
FptrMapUintptrFloat32 *map[uintptr]float32
FMapUintptrFloat64 map[uintptr]float64
FptrMapUintptrFloat64 *map[uintptr]float64
FMapUintptrBool map[uintptr]bool
FptrMapUintptrBool *map[uintptr]bool
FMapIntIntf map[int]interface{}
FptrMapIntIntf *map[int]interface{}
FMapIntString map[int]string
FptrMapIntString *map[int]string
FMapIntUint map[int]uint
FptrMapIntUint *map[int]uint
FMapIntUint8 map[int]uint8
FptrMapIntUint8 *map[int]uint8
FMapIntUint16 map[int]uint16
FptrMapIntUint16 *map[int]uint16
FMapIntUint32 map[int]uint32
FptrMapIntUint32 *map[int]uint32
FMapIntUint64 map[int]uint64
FptrMapIntUint64 *map[int]uint64
FMapIntUintptr map[int]uintptr
FptrMapIntUintptr *map[int]uintptr
FMapIntInt map[int]int
FptrMapIntInt *map[int]int
FMapIntInt8 map[int]int8
FptrMapIntInt8 *map[int]int8
FMapIntInt16 map[int]int16
FptrMapIntInt16 *map[int]int16
FMapIntInt32 map[int]int32
FptrMapIntInt32 *map[int]int32
FMapIntInt64 map[int]int64
FptrMapIntInt64 *map[int]int64
FMapIntFloat32 map[int]float32
FptrMapIntFloat32 *map[int]float32
FMapIntFloat64 map[int]float64
FptrMapIntFloat64 *map[int]float64
FMapIntBool map[int]bool
FptrMapIntBool *map[int]bool
FMapInt8Intf map[int8]interface{}
FptrMapInt8Intf *map[int8]interface{}
FMapInt8String map[int8]string
FptrMapInt8String *map[int8]string
FMapInt8Uint map[int8]uint
FptrMapInt8Uint *map[int8]uint
FMapInt8Uint8 map[int8]uint8
FptrMapInt8Uint8 *map[int8]uint8
FMapInt8Uint16 map[int8]uint16
FptrMapInt8Uint16 *map[int8]uint16
FMapInt8Uint32 map[int8]uint32
FptrMapInt8Uint32 *map[int8]uint32
FMapInt8Uint64 map[int8]uint64
FptrMapInt8Uint64 *map[int8]uint64
FMapInt8Uintptr map[int8]uintptr
FptrMapInt8Uintptr *map[int8]uintptr
FMapInt8Int map[int8]int
FptrMapInt8Int *map[int8]int
FMapInt8Int8 map[int8]int8
FptrMapInt8Int8 *map[int8]int8
FMapInt8Int16 map[int8]int16
FptrMapInt8Int16 *map[int8]int16
FMapInt8Int32 map[int8]int32
FptrMapInt8Int32 *map[int8]int32
FMapInt8Int64 map[int8]int64
FptrMapInt8Int64 *map[int8]int64
FMapInt8Float32 map[int8]float32
FptrMapInt8Float32 *map[int8]float32
FMapInt8Float64 map[int8]float64
FptrMapInt8Float64 *map[int8]float64
FMapInt8Bool map[int8]bool
FptrMapInt8Bool *map[int8]bool
FMapInt16Intf map[int16]interface{}
FptrMapInt16Intf *map[int16]interface{}
FMapInt16String map[int16]string
FptrMapInt16String *map[int16]string
FMapInt16Uint map[int16]uint
FptrMapInt16Uint *map[int16]uint
FMapInt16Uint8 map[int16]uint8
FptrMapInt16Uint8 *map[int16]uint8
FMapInt16Uint16 map[int16]uint16
FptrMapInt16Uint16 *map[int16]uint16
FMapInt16Uint32 map[int16]uint32
FptrMapInt16Uint32 *map[int16]uint32
FMapInt16Uint64 map[int16]uint64
FptrMapInt16Uint64 *map[int16]uint64
FMapInt16Uintptr map[int16]uintptr
FptrMapInt16Uintptr *map[int16]uintptr
FMapInt16Int map[int16]int
FptrMapInt16Int *map[int16]int
FMapInt16Int8 map[int16]int8
FptrMapInt16Int8 *map[int16]int8
FMapInt16Int16 map[int16]int16
FptrMapInt16Int16 *map[int16]int16
FMapInt16Int32 map[int16]int32
FptrMapInt16Int32 *map[int16]int32
FMapInt16Int64 map[int16]int64
FptrMapInt16Int64 *map[int16]int64
FMapInt16Float32 map[int16]float32
FptrMapInt16Float32 *map[int16]float32
FMapInt16Float64 map[int16]float64
FptrMapInt16Float64 *map[int16]float64
FMapInt16Bool map[int16]bool
FptrMapInt16Bool *map[int16]bool
FMapInt32Intf map[int32]interface{}
FptrMapInt32Intf *map[int32]interface{}
FMapInt32String map[int32]string
FptrMapInt32String *map[int32]string
FMapInt32Uint map[int32]uint
FptrMapInt32Uint *map[int32]uint
FMapInt32Uint8 map[int32]uint8
FptrMapInt32Uint8 *map[int32]uint8
FMapInt32Uint16 map[int32]uint16
FptrMapInt32Uint16 *map[int32]uint16
FMapInt32Uint32 map[int32]uint32
FptrMapInt32Uint32 *map[int32]uint32
FMapInt32Uint64 map[int32]uint64
FptrMapInt32Uint64 *map[int32]uint64
FMapInt32Uintptr map[int32]uintptr
FptrMapInt32Uintptr *map[int32]uintptr
FMapInt32Int map[int32]int
FptrMapInt32Int *map[int32]int
FMapInt32Int8 map[int32]int8
FptrMapInt32Int8 *map[int32]int8
FMapInt32Int16 map[int32]int16
FptrMapInt32Int16 *map[int32]int16
FMapInt32Int32 map[int32]int32
FptrMapInt32Int32 *map[int32]int32
FMapInt32Int64 map[int32]int64
FptrMapInt32Int64 *map[int32]int64
FMapInt32Float32 map[int32]float32
FptrMapInt32Float32 *map[int32]float32
FMapInt32Float64 map[int32]float64
FptrMapInt32Float64 *map[int32]float64
FMapInt32Bool map[int32]bool
FptrMapInt32Bool *map[int32]bool
FMapInt64Intf map[int64]interface{}
FptrMapInt64Intf *map[int64]interface{}
FMapInt64String map[int64]string
FptrMapInt64String *map[int64]string
FMapInt64Uint map[int64]uint
FptrMapInt64Uint *map[int64]uint
FMapInt64Uint8 map[int64]uint8
FptrMapInt64Uint8 *map[int64]uint8
FMapInt64Uint16 map[int64]uint16
FptrMapInt64Uint16 *map[int64]uint16
FMapInt64Uint32 map[int64]uint32
FptrMapInt64Uint32 *map[int64]uint32
FMapInt64Uint64 map[int64]uint64
FptrMapInt64Uint64 *map[int64]uint64
FMapInt64Uintptr map[int64]uintptr
FptrMapInt64Uintptr *map[int64]uintptr
FMapInt64Int map[int64]int
FptrMapInt64Int *map[int64]int
FMapInt64Int8 map[int64]int8
FptrMapInt64Int8 *map[int64]int8
FMapInt64Int16 map[int64]int16
FptrMapInt64Int16 *map[int64]int16
FMapInt64Int32 map[int64]int32
FptrMapInt64Int32 *map[int64]int32
FMapInt64Int64 map[int64]int64
FptrMapInt64Int64 *map[int64]int64
FMapInt64Float32 map[int64]float32
FptrMapInt64Float32 *map[int64]float32
FMapInt64Float64 map[int64]float64
FptrMapInt64Float64 *map[int64]float64
FMapInt64Bool map[int64]bool
FptrMapInt64Bool *map[int64]bool
FMapBoolIntf map[bool]interface{}
FptrMapBoolIntf *map[bool]interface{}
FMapBoolString map[bool]string
FptrMapBoolString *map[bool]string
FMapBoolUint map[bool]uint
FptrMapBoolUint *map[bool]uint
FMapBoolUint8 map[bool]uint8
FptrMapBoolUint8 *map[bool]uint8
FMapBoolUint16 map[bool]uint16
FptrMapBoolUint16 *map[bool]uint16
FMapBoolUint32 map[bool]uint32
FptrMapBoolUint32 *map[bool]uint32
FMapBoolUint64 map[bool]uint64
FptrMapBoolUint64 *map[bool]uint64
FMapBoolUintptr map[bool]uintptr
FptrMapBoolUintptr *map[bool]uintptr
FMapBoolInt map[bool]int
FptrMapBoolInt *map[bool]int
FMapBoolInt8 map[bool]int8
FptrMapBoolInt8 *map[bool]int8
FMapBoolInt16 map[bool]int16
FptrMapBoolInt16 *map[bool]int16
FMapBoolInt32 map[bool]int32
FptrMapBoolInt32 *map[bool]int32
FMapBoolInt64 map[bool]int64
FptrMapBoolInt64 *map[bool]int64
FMapBoolFloat32 map[bool]float32
FptrMapBoolFloat32 *map[bool]float32
FMapBoolFloat64 map[bool]float64
FptrMapBoolFloat64 *map[bool]float64
FMapBoolBool map[bool]bool
FptrMapBoolBool *map[bool]bool
}
// -----------
type testMammoth2Binary uint64
func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) {
data = make([]byte, 8)
bigen.PutUint64(data, uint64(x))
return
}
func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) {
*x = testMammoth2Binary(bigen.Uint64(data))
return
}
type testMammoth2Text uint64
func (x testMammoth2Text) MarshalText() (data []byte, err error) {
data = []byte(fmt.Sprintf("%b", uint64(x)))
return
}
func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) {
_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x))
return
}
type testMammoth2Json uint64
func (x testMammoth2Json) MarshalJSON() (data []byte, err error) {
data = []byte(fmt.Sprintf("%v", uint64(x)))
return
}
func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) {
_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x))
return
}
type testMammoth2Basic [4]uint64
type TestMammoth2Wrapper struct {
V TestMammoth2
T testMammoth2Text
B testMammoth2Binary
J testMammoth2Json
C testMammoth2Basic
M map[testMammoth2Basic]TestMammoth2
L []TestMammoth2
A [4]int64
}

File diff suppressed because it is too large Load Diff

View File

@ -104,7 +104,8 @@ var (
type msgpackEncDriver struct {
noBuiltInTypes
encNoSeparator
encDriverNoopContainerWriter
// encNoSeparator
e *Encoder
w encWriter
h *MsgpackHandle
@ -116,10 +117,26 @@ func (e *msgpackEncDriver) EncodeNil() {
}
func (e *msgpackEncDriver) EncodeInt(i int64) {
if i >= 0 {
e.EncodeUint(uint64(i))
// if i >= 0 {
// e.EncodeUint(uint64(i))
// } else if false &&
if i > math.MaxInt8 {
if i <= math.MaxInt16 {
e.w.writen1(mpInt16)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
} else if i <= math.MaxInt32 {
e.w.writen1(mpInt32)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
} else {
e.w.writen1(mpInt64)
bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
}
} else if i >= -32 {
e.w.writen1(byte(i))
if e.h.NoFixedNum {
e.w.writen2(mpInt8, byte(i))
} else {
e.w.writen1(byte(i))
}
} else if i >= math.MinInt8 {
e.w.writen2(mpInt8, byte(i))
} else if i >= math.MinInt16 {
@ -136,7 +153,11 @@ func (e *msgpackEncDriver) EncodeInt(i int64) {
func (e *msgpackEncDriver) EncodeUint(i uint64) {
if i <= math.MaxInt8 {
e.w.writen1(byte(i))
if e.h.NoFixedNum {
e.w.writen2(mpUint8, byte(i))
} else {
e.w.writen1(byte(i))
}
} else if i <= math.MaxUint8 {
e.w.writen2(mpUint8, byte(i))
} else if i <= math.MaxUint16 {
@ -213,21 +234,22 @@ func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
}
}
func (e *msgpackEncDriver) EncodeArrayStart(length int) {
func (e *msgpackEncDriver) WriteArrayStart(length int) {
e.writeContainerLen(msgpackContainerList, length)
}
func (e *msgpackEncDriver) EncodeMapStart(length int) {
func (e *msgpackEncDriver) WriteMapStart(length int) {
e.writeContainerLen(msgpackContainerMap, length)
}
func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) {
slen := len(s)
if c == c_RAW && e.h.WriteExt {
e.writeContainerLen(msgpackContainerBin, len(s))
e.writeContainerLen(msgpackContainerBin, slen)
} else {
e.writeContainerLen(msgpackContainerStr, len(s))
e.writeContainerLen(msgpackContainerStr, slen)
}
if len(s) > 0 {
if slen > 0 {
e.w.writestr(s)
}
}
@ -237,12 +259,13 @@ func (e *msgpackEncDriver) EncodeSymbol(v string) {
}
func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) {
slen := len(bs)
if c == c_RAW && e.h.WriteExt {
e.writeContainerLen(msgpackContainerBin, len(bs))
e.writeContainerLen(msgpackContainerBin, slen)
} else {
e.writeContainerLen(msgpackContainerStr, len(bs))
e.writeContainerLen(msgpackContainerStr, slen)
}
if len(bs) > 0 {
if slen > 0 {
e.w.writeb(bs)
}
}
@ -272,8 +295,9 @@ type msgpackDecDriver struct {
bdRead bool
br bool // bytes reader
noBuiltInTypes
noStreamingCodec
decNoSeparator
// noStreamingCodec
// decNoSeparator
decDriverNoopContainerReader
}
// Note: This returns either a primitive (int, bool, etc) for non-containers,
@ -286,7 +310,7 @@ func (d *msgpackDecDriver) DecodeNaked() {
d.readNextBd()
}
bd := d.bd
n := &d.d.n
n := d.d.n
var decodeFurther bool
switch bd {
@ -529,12 +553,42 @@ func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte)
if !d.bdRead {
d.readNextBd()
}
// DecodeBytes could be from: bin str fixstr fixarray array ...
var clen int
if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
clen = d.readContainerLen(msgpackContainerBin)
} else {
vt := d.ContainerType()
switch vt {
case valueTypeBytes:
// valueTypeBytes may be a mpBin or an mpStr container
if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
clen = d.readContainerLen(msgpackContainerBin)
} else {
clen = d.readContainerLen(msgpackContainerStr)
}
case valueTypeString:
clen = d.readContainerLen(msgpackContainerStr)
case valueTypeArray:
clen = d.readContainerLen(msgpackContainerList)
// ensure everything after is one byte each
for i := 0; i < clen; i++ {
d.readNextBd()
if d.bd == mpNil {
bs = append(bs, 0)
} else if d.bd == mpUint8 {
bs = append(bs, d.r.readn1())
} else {
d.d.errorf("cannot read non-byte into a byte array")
return
}
}
d.bdRead = false
return bs
default:
d.d.errorf("invalid container type: expecting bin|str|array")
return
}
// these are (bin|str)(8|16|32)
// println("DecodeBytes: clen: ", clen)
d.bdRead = false
// bytes may be nil, so handle it. if nil, clen=-1.
@ -716,6 +770,9 @@ type MsgpackHandle struct {
// RawToString controls how raw bytes are decoded into a nil interface{}.
RawToString bool
// NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum.
NoFixedNum bool
// WriteExt flag supports encoding configured extensions with extension tags.
// It also controls whether other elements of the new spec are encoded (ie Str8).
//
@ -728,6 +785,7 @@ type MsgpackHandle struct {
// a []byte or string based on the setting of RawToString.
WriteExt bool
binaryEncodingType
noElemSeparators
}
func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
@ -801,7 +859,6 @@ func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
}
func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
if c.isClosed() {
return io.EOF
}
@ -815,11 +872,19 @@ func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint
// err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
// return
// }
var b byte
b, err = c.br.ReadByte()
if err != nil {
return
var ba [1]byte
var n int
for {
n, err = c.r.Read(ba[:])
if err != nil {
return
}
if n == 1 {
break
}
}
var b = ba[0]
if b != fia {
err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b)
return

View File

@ -1,6 +1,8 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build ignore
package codec
import (

View File

@ -4,71 +4,80 @@
package codec
import (
"bufio"
"errors"
"io"
"net/rpc"
"sync"
)
// rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode.
//
// Some codecs like json need to put a space after each encoded value, to serve as a
// delimiter for things like numbers (else json codec will continue reading till EOF).
type rpcEncodeTerminator interface {
rpcEncodeTerminate() []byte
}
// Rpc provides a rpc Server or Client Codec for rpc communication.
type Rpc interface {
ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
}
// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
// used by the rpc connection. It accommodates use-cases where the connection
// should be used by rpc and non-rpc functions, e.g. streaming a file after
// sending an rpc response.
type RpcCodecBuffered interface {
BufferedReader() *bufio.Reader
BufferedWriter() *bufio.Writer
}
// // RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
// // used by the rpc connection. It accommodates use-cases where the connection
// // should be used by rpc and non-rpc functions, e.g. streaming a file after
// // sending an rpc response.
// type RpcCodecBuffered interface {
// BufferedReader() *bufio.Reader
// BufferedWriter() *bufio.Writer
// }
// -------------------------------------
type rpcFlusher interface {
Flush() error
}
// rpcCodec defines the struct members and common methods.
type rpcCodec struct {
rwc io.ReadWriteCloser
c io.Closer
r io.Reader
w io.Writer
f rpcFlusher
dec *Decoder
enc *Encoder
bw *bufio.Writer
br *bufio.Reader
mu sync.Mutex
h Handle
// bw *bufio.Writer
// br *bufio.Reader
mu sync.Mutex
h Handle
cls bool
clsmu sync.RWMutex
}
func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
bw := bufio.NewWriter(conn)
br := bufio.NewReader(conn)
// return newRPCCodec2(bufio.NewReader(conn), bufio.NewWriter(conn), conn, h)
return newRPCCodec2(conn, conn, conn, h)
}
func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec {
// defensive: ensure that jsonH has TermWhitespace turned on.
if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace {
panic(errors.New("rpc requires a JsonHandle with TermWhitespace set to true"))
}
f, _ := w.(rpcFlusher)
return rpcCodec{
rwc: conn,
bw: bw,
br: br,
enc: NewEncoder(bw, h),
dec: NewDecoder(br, h),
c: c,
w: w,
r: r,
f: f,
h: h,
enc: NewEncoder(w, h),
dec: NewDecoder(r, h),
}
}
func (c *rpcCodec) BufferedReader() *bufio.Reader {
return c.br
}
// func (c *rpcCodec) BufferedReader() *bufio.Reader {
// return c.br
// }
func (c *rpcCodec) BufferedWriter() *bufio.Writer {
return c.bw
}
// func (c *rpcCodec) BufferedWriter() *bufio.Writer {
// return c.bw
// }
func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) {
if c.isClosed() {
@ -77,20 +86,13 @@ func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err e
if err = c.enc.Encode(obj1); err != nil {
return
}
t, tOk := c.h.(rpcEncodeTerminator)
if tOk {
c.bw.Write(t.rpcEncodeTerminate())
}
if writeObj2 {
if err = c.enc.Encode(obj2); err != nil {
return
}
if tOk {
c.bw.Write(t.rpcEncodeTerminate())
}
}
if doFlush {
return c.bw.Flush()
if doFlush && c.f != nil {
return c.f.Flush()
}
return
}
@ -108,6 +110,9 @@ func (c *rpcCodec) read(obj interface{}) (err error) {
}
func (c *rpcCodec) isClosed() bool {
if c.c == nil {
return false
}
c.clsmu.RLock()
x := c.cls
c.clsmu.RUnlock()
@ -115,13 +120,17 @@ func (c *rpcCodec) isClosed() bool {
}
func (c *rpcCodec) Close() error {
if c.c == nil {
return nil
}
if c.isClosed() {
return io.EOF
}
c.clsmu.Lock()
c.cls = true
err := c.c.Close()
c.clsmu.Unlock()
return c.rwc.Close()
return err
}
func (c *rpcCodec) ReadResponseBody(body interface{}) error {
@ -177,4 +186,10 @@ func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
return &goRpcCodec{newRPCCodec(conn, h)}
}
var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered
// Use this method to allow you create wrapped versions of the reader, writer if desired.
// For example, to create a buffered implementation.
func (x goRpc) Codec(r io.Reader, w io.Writer, c io.Closer, h Handle) *goRpcCodec {
return &goRpcCodec{newRPCCodec2(r, w, c, h)}
}
// var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered

View File

@ -43,7 +43,10 @@ package codec
import (
"bytes"
"flag"
"fmt"
"io"
"sync"
"testing"
)
// DO NOT REMOVE - replacement line for go-codec-bench import declaration tag //
@ -54,8 +57,24 @@ type testHED struct {
D *Decoder
}
type ioReaderWrapper struct {
r io.Reader
}
func (x ioReaderWrapper) Read(p []byte) (n int, err error) {
return x.r.Read(p)
}
type ioWriterWrapper struct {
w io.Writer
}
func (x ioWriterWrapper) Write(p []byte) (n int, err error) {
return x.w.Write(p)
}
var (
testNoopH = NoopHandle(8)
// testNoopH = NoopHandle(8)
testMsgpackH = &MsgpackHandle{}
testBincH = &BincHandle{}
testSimpleH = &SimpleHandle{}
@ -73,22 +92,30 @@ var (
// flag variables used by tests (and bench)
var (
testVerbose bool
testInitDebug bool
testUseIoEncDec bool
testStructToArray bool
testCanonical bool
testUseReset bool
testWriteNoSymbols bool
testSkipIntf bool
testInternStr bool
testUseMust bool
testCheckCircRef bool
testJsonIndent int
testMaxInitLen int
testDepth int
testJsonHTMLCharsAsIs bool
testJsonPreferFloat bool
testVerbose bool
testInitDebug bool
testStructToArray bool
testCanonical bool
testUseReset bool
testSkipIntf bool
testInternStr bool
testUseMust bool
testCheckCircRef bool
testUseIoEncDec int
testUseIoWrapper bool
testMaxInitLen int
testNumRepeatString int
)
// variables that are not flags, but which can configure the handles
var (
testEncodeOptions EncodeOptions
testDecodeOptions DecodeOptions
)
// flag variables used by bench
@ -106,7 +133,8 @@ var (
func init() {
testHEDs = make([]testHED, 0, 32)
testHandles = append(testHandles,
testNoopH, testMsgpackH, testBincH, testSimpleH,
// testNoopH,
testMsgpackH, testBincH, testSimpleH,
testCborH, testJsonH)
testInitFlags()
benchInitFlags()
@ -114,21 +142,20 @@ func init() {
func testInitFlags() {
// delete(testDecOpts.ExtFuncs, timeTyp)
flag.IntVar(&testDepth, "tsd", 0, "Test Struc Depth")
flag.BoolVar(&testVerbose, "tv", false, "Test Verbose")
flag.BoolVar(&testInitDebug, "tg", false, "Test Init Debug")
flag.BoolVar(&testUseIoEncDec, "ti", false, "Use IO Reader/Writer for Marshal/Unmarshal")
flag.IntVar(&testUseIoEncDec, "ti", -1, "Use IO Reader/Writer for Marshal/Unmarshal ie >= 0")
flag.BoolVar(&testUseIoWrapper, "tiw", false, "Wrap the IO Reader/Writer with a base pass-through reader/writer")
flag.BoolVar(&testStructToArray, "ts", false, "Set StructToArray option")
flag.BoolVar(&testWriteNoSymbols, "tn", false, "Set NoSymbols option")
flag.BoolVar(&testCanonical, "tc", false, "Set Canonical option")
flag.BoolVar(&testInternStr, "te", false, "Set InternStr option")
flag.BoolVar(&testSkipIntf, "tf", false, "Skip Interfaces")
flag.BoolVar(&testUseReset, "tr", false, "Use Reset")
flag.IntVar(&testJsonIndent, "td", 0, "Use JSON Indent")
flag.IntVar(&testNumRepeatString, "trs", 8, "Create string variables by repeating a string N times")
flag.IntVar(&testMaxInitLen, "tx", 0, "Max Init Len")
flag.BoolVar(&testUseMust, "tm", true, "Use Must(En|De)code")
flag.BoolVar(&testCheckCircRef, "tl", false, "Use Check Circular Ref")
flag.BoolVar(&testJsonHTMLCharsAsIs, "tas", false, "Set JSON HTMLCharsAsIs")
flag.BoolVar(&testJsonPreferFloat, "tjf", false, "Prefer Float in json")
}
func benchInitFlags() {
@ -169,8 +196,8 @@ func testInitAll() {
}
}
func testCodecEncode(ts interface{}, bsIn []byte,
fn func([]byte) *bytes.Buffer, h Handle) (bs []byte, err error) {
func sTestCodecEncode(ts interface{}, bsIn []byte, fn func([]byte) *bytes.Buffer,
h Handle, bh *BasicHandle) (bs []byte, err error) {
// bs = make([]byte, 0, approxSize)
var e *Encoder
var buf *bytes.Buffer
@ -179,9 +206,17 @@ func testCodecEncode(ts interface{}, bsIn []byte,
} else {
e = NewEncoder(nil, h)
}
if testUseIoEncDec {
var oldWriteBufferSize int
if testUseIoEncDec >= 0 {
buf = fn(bsIn)
e.Reset(buf)
// set the encode options for using a buffer
oldWriteBufferSize = bh.WriterBufferSize
bh.WriterBufferSize = testUseIoEncDec
if testUseIoWrapper {
e.Reset(ioWriterWrapper{buf})
} else {
e.Reset(buf)
}
} else {
bs = bsIn
e.ResetBytes(&bs)
@ -191,23 +226,31 @@ func testCodecEncode(ts interface{}, bsIn []byte,
} else {
err = e.Encode(ts)
}
if testUseIoEncDec {
if testUseIoEncDec >= 0 {
bs = buf.Bytes()
bh.WriterBufferSize = oldWriteBufferSize
}
return
}
func testCodecDecode(bs []byte, ts interface{}, h Handle) (err error) {
func sTestCodecDecode(bs []byte, ts interface{}, h Handle, bh *BasicHandle) (err error) {
var d *Decoder
var buf *bytes.Reader
// var buf *bytes.Reader
if testUseReset {
d = testHEDGet(h).D
} else {
d = NewDecoder(nil, h)
}
if testUseIoEncDec {
buf = bytes.NewReader(bs)
d.Reset(buf)
var oldReadBufferSize int
if testUseIoEncDec >= 0 {
buf := bytes.NewReader(bs)
oldReadBufferSize = bh.ReaderBufferSize
bh.ReaderBufferSize = testUseIoEncDec
if testUseIoWrapper {
d.Reset(ioReaderWrapper{buf})
} else {
d.Reset(buf)
}
} else {
d.ResetBytes(bs)
}
@ -216,10 +259,28 @@ func testCodecDecode(bs []byte, ts interface{}, h Handle) (err error) {
} else {
err = d.Decode(ts)
}
if testUseIoEncDec >= 0 {
bh.ReaderBufferSize = oldReadBufferSize
}
return
}
// ----- functions below are used only by benchmarks alone
// --- functions below are used by both benchmarks and tests
func logT(x interface{}, format string, args ...interface{}) {
if t, ok := x.(*testing.T); ok && t != nil {
t.Logf(format, args...)
} else if b, ok := x.(*testing.B); ok && b != nil {
b.Logf(format, args...)
} else if testVerbose {
if len(format) == 0 || format[len(format)-1] != '\n' {
format = format + "\n"
}
fmt.Printf(format, args...)
}
}
// --- functions below are used only by benchmarks alone
func fnBenchmarkByteBuf(bsIn []byte) (buf *bytes.Buffer) {
// var buf bytes.Buffer
@ -229,10 +290,10 @@ func fnBenchmarkByteBuf(bsIn []byte) (buf *bytes.Buffer) {
return
}
func benchFnCodecEncode(ts interface{}, bsIn []byte, h Handle) (bs []byte, err error) {
return testCodecEncode(ts, bsIn, fnBenchmarkByteBuf, h)
}
// func benchFnCodecEncode(ts interface{}, bsIn []byte, h Handle) (bs []byte, err error) {
// return testCodecEncode(ts, bsIn, fnBenchmarkByteBuf, h)
// }
func benchFnCodecDecode(bs []byte, ts interface{}, h Handle) (err error) {
return testCodecDecode(bs, ts, h)
}
// func benchFnCodecDecode(bs []byte, ts interface{}, h Handle) (err error) {
// return testCodecDecode(bs, ts, h)
// }

View File

@ -30,7 +30,8 @@ const (
type simpleEncDriver struct {
noBuiltInTypes
encNoSeparator
encDriverNoopContainerWriter
// encNoSeparator
e *Encoder
h *SimpleHandle
w encWriter
@ -124,11 +125,11 @@ func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
e.w.writen1(xtag)
}
func (e *simpleEncDriver) EncodeArrayStart(length int) {
func (e *simpleEncDriver) WriteArrayStart(length int) {
e.encLen(simpleVdArray, length)
}
func (e *simpleEncDriver) EncodeMapStart(length int) {
func (e *simpleEncDriver) WriteMapStart(length int) {
e.encLen(simpleVdMap, length)
}
@ -155,10 +156,10 @@ type simpleDecDriver struct {
bdRead bool
bd byte
br bool // bytes reader
b [scratchByteArrayLen]byte
noBuiltInTypes
noStreamingCodec
decNoSeparator
b [scratchByteArrayLen]byte
// noStreamingCodec
decDriverNoopContainerReader
}
func (d *simpleDecDriver) readNextBd() {
@ -433,7 +434,7 @@ func (d *simpleDecDriver) DecodeNaked() {
d.readNextBd()
}
n := &d.d.n
n := d.d.n
var decodeFurther bool
switch d.bd {
@ -512,6 +513,7 @@ func (d *simpleDecDriver) DecodeNaked() {
type SimpleHandle struct {
BasicHandle
binaryEncodingType
noElemSeparators
}
func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {

View File

@ -197,24 +197,24 @@ func decodeTime(bs []byte) (tt time.Time, err error) {
return
}
func timeLocUTCName(tzint int16) string {
if tzint == 0 {
return "UTC"
}
var tzname = []byte("UTC+00:00")
//tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
//tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
var tzhr, tzmin int16
if tzint < 0 {
tzname[3] = '-' // (TODO: verify. this works here)
tzhr, tzmin = -tzint/60, (-tzint)%60
} else {
tzhr, tzmin = tzint/60, tzint%60
}
tzname[4] = timeDigits[tzhr/10]
tzname[5] = timeDigits[tzhr%10]
tzname[7] = timeDigits[tzmin/10]
tzname[8] = timeDigits[tzmin%10]
return string(tzname)
//return time.FixedZone(string(tzname), int(tzint)*60)
}
// func timeLocUTCName(tzint int16) string {
// if tzint == 0 {
// return "UTC"
// }
// var tzname = []byte("UTC+00:00")
// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
// var tzhr, tzmin int16
// if tzint < 0 {
// tzname[3] = '-' // (TODO: verify. this works here)
// tzhr, tzmin = -tzint/60, (-tzint)%60
// } else {
// tzhr, tzmin = tzint/60, tzint%60
// }
// tzname[4] = timeDigits[tzhr/10]
// tzname[5] = timeDigits[tzhr%10]
// tzname[7] = timeDigits[tzmin/10]
// tzname[8] = timeDigits[tzmin%10]
// return string(tzname)
// //return time.FixedZone(string(tzname), int(tzint)*60)
// }

71
vendor/github.com/ugorji/go/codec/values_flex_test.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
/* // +build testing */
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
// This file contains values used by tests and benchmarks.
type TestStrucFlex struct {
_struct struct{} `codec:",omitempty"` //set omitempty for every field
testStrucCommon
Mis map[int]string
Mbu64 map[bool]struct{}
Miwu64s map[int]wrapUint64Slice
Mfwss map[float64]wrapStringSlice
Mf32wss map[float32]wrapStringSlice
Mui2wss map[uint64]wrapStringSlice
Msu2wss map[stringUint64T]wrapStringSlice
//M map[interface{}]interface{} `json:"-",bson:"-"`
Mtsptr map[string]*TestStrucFlex
Mts map[string]TestStrucFlex
Its []*TestStrucFlex
Nteststruc *TestStrucFlex
}
func newTestStrucFlex(depth, n int, bench, useInterface, useStringKeyOnly bool) (ts *TestStrucFlex) {
ts = &TestStrucFlex{
Miwu64s: map[int]wrapUint64Slice{
5: []wrapUint64{1, 2, 3, 4, 5},
3: []wrapUint64{1, 2, 3},
},
Mf32wss: map[float32]wrapStringSlice{
5.0: []wrapString{"1.0", "2.0", "3.0", "4.0", "5.0"},
3.0: []wrapString{"1.0", "2.0", "3.0"},
},
Mui2wss: map[uint64]wrapStringSlice{
5: []wrapString{"1.0", "2.0", "3.0", "4.0", "5.0"},
3: []wrapString{"1.0", "2.0", "3.0"},
},
Mfwss: map[float64]wrapStringSlice{
5.0: []wrapString{"1.0", "2.0", "3.0", "4.0", "5.0"},
3.0: []wrapString{"1.0", "2.0", "3.0"},
},
Mis: map[int]string{
1: "one",
22: "twenty two",
-44: "minus forty four",
},
Mbu64: map[bool]struct{}{false: struct{}{}, true: struct{}{}},
}
populateTestStrucCommon(&ts.testStrucCommon, n, bench, useInterface, useStringKeyOnly)
if depth > 0 {
depth--
if ts.Mtsptr == nil {
ts.Mtsptr = make(map[string]*TestStrucFlex)
}
if ts.Mts == nil {
ts.Mts = make(map[string]TestStrucFlex)
}
ts.Mtsptr["0"] = newTestStrucFlex(depth, n, bench, useInterface, useStringKeyOnly)
ts.Mts["0"] = *(ts.Mtsptr["0"])
ts.Its = append(ts.Its, ts.Mtsptr["0"])
}
return
}

View File

@ -11,22 +11,43 @@ package codec
import (
"math"
"strings"
"time"
)
var testStrucTime = time.Date(2012, 2, 2, 2, 2, 2, 2000, time.UTC).UTC()
type wrapSliceUint64 []uint64
type wrapSliceString []string
type wrapUint64 uint64
type wrapString string
type wrapUint64Slice []wrapUint64
type wrapStringSlice []wrapString
type stringUint64T struct {
S string
U uint64
}
type AnonInTestStruc struct {
AS string
AI64 int64
AI16 int16
AUi64 uint64
ASslice []string
AI64slice []int64
AF64slice []float64
AS string
AI64 int64
AI16 int16
AUi64 uint64
ASslice []string
AI64slice []int64
AUi64slice []uint64
AF64slice []float64
AF32slice []float32
// AMI32U32 map[int32]uint32
// AMU32F64 map[uint32]float64 // json/bson do not like it
AMSU16 map[string]uint16
// use these to test 0-len or nil slices/maps/arrays
AI64arr0 [0]int64
A164slice0 []int64
AUi64sliceN []uint64
AMSU16N map[string]uint16
AMSU16E map[string]uint16
}
type AnonInTestStrucIntf struct {
@ -36,16 +57,29 @@ type AnonInTestStrucIntf struct {
T time.Time
}
type TestStruc struct {
_struct struct{} `codec:",omitempty"` //set omitempty for every field
type testSimpleFields struct {
S string
I64 int64
I32 int32
I16 int16
I8 int8
I64n int64
I32n int32
I16n int16
I8n int8
S string
I64 int64
I16 int16
Ui64 uint64
Ui32 uint32
Ui16 uint16
Ui8 uint8
B bool
By uint8 // byte: msgp doesn't like byte
F64 float64
F32 float32
B bool
By uint8 // byte: msgp doesn't like byte
Sslice []string
I64slice []int64
@ -59,22 +93,80 @@ type TestStruc struct {
// TODO: test these separately, specifically for reflection and codecgen.
// Unfortunately, ffjson doesn't support these. Its compilation even fails.
// Ui64array [4]uint64
// Ui64slicearray [][4]uint64
Ui64array [4]uint64
Ui64slicearray []*[4]uint64
WrapSliceInt64 wrapSliceUint64
WrapSliceString wrapSliceString
Msi64 map[string]int64
}
type testStrucCommon struct {
S string
I64 int64
I32 int32
I16 int16
I8 int8
I64n int64
I32n int32
I16n int16
I8n int8
Ui64 uint64
Ui32 uint32
Ui16 uint16
Ui8 uint8
F64 float64
F32 float32
B bool
By uint8 // byte: msgp doesn't like byte
Sslice []string
I64slice []int64
I16slice []int16
Ui64slice []uint64
Ui8slice []uint8
Bslice []bool
Byslice []byte
Iptrslice []*int64
// TODO: test these separately, specifically for reflection and codecgen.
// Unfortunately, ffjson doesn't support these. Its compilation even fails.
Ui64array [4]uint64
Ui64slicearray []*[4]uint64
WrapSliceInt64 wrapSliceUint64
WrapSliceString wrapSliceString
Msi64 map[string]int64
Simplef testSimpleFields
AnonInTestStruc
//M map[interface{}]interface{} `json:"-",bson:"-"`
Msi64 map[string]int64
NotAnon AnonInTestStruc
// make this a ptr, so that it could be set or not.
// for comparison (e.g. with msgp), give it a struct tag (so it is not inlined),
// make this one omitempty (so it is included if nil).
// make this one omitempty (so it is excluded if nil).
*AnonInTestStrucIntf `codec:",omitempty"`
Nmap map[string]bool //don't set this, so we can test for nil
Nslice []byte //don't set this, so we can test for nil
Nint64 *int64 //don't set this, so we can test for nil
Nmap map[string]bool //don't set this, so we can test for nil
Nslice []byte //don't set this, so we can test for nil
Nint64 *int64 //don't set this, so we can test for nil
}
type TestStruc struct {
_struct struct{} `codec:",omitempty"` //set omitempty for every field
testStrucCommon
Mtsptr map[string]*TestStruc
Mts map[string]TestStruc
Its []*TestStruc
@ -89,97 +181,6 @@ type tLowerFirstLetter struct {
b []byte
}
func newTestStruc(depth int, bench bool, useInterface, useStringKeyOnly bool) (ts *TestStruc) {
var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464
ts = &TestStruc{
S: "some string",
I64: math.MaxInt64 * 2 / 3, // 64,
I16: 1616,
Ui64: uint64(int64(math.MaxInt64 * 2 / 3)), // 64, //don't use MaxUint64, as bson can't write it
Ui8: 160,
B: true,
By: 5,
Sslice: []string{"one", "two", "three"},
I64slice: []int64{1111, 2222, 3333},
I16slice: []int16{44, 55, 66},
Ui64slice: []uint64{12121212, 34343434, 56565656},
Ui8slice: []uint8{210, 211, 212},
Bslice: []bool{true, false, true, false},
Byslice: []byte{13, 14, 15},
Msi64: map[string]int64{
"one": 1,
"two": 2,
},
AnonInTestStruc: AnonInTestStruc{
// There's more leeway in altering this.
AS: "A-String",
AI64: -64646464,
AI16: 1616,
AUi64: 64646464,
// (U+1D11E)G-clef character may be represented in json as "\uD834\uDD1E".
// single reverse solidus character may be represented in json as "\u005C".
// include these in ASslice below.
ASslice: []string{"Aone", "Atwo", "Athree",
"Afour.reverse_solidus.\u005c", "Afive.Gclef.\U0001d11E"},
AI64slice: []int64{1, -22, 333, -4444, 55555, -666666},
AMSU16: map[string]uint16{"1": 1, "22": 2, "333": 3, "4444": 4},
AF64slice: []float64{
11.11e-11, -11.11e+11,
2.222E+12, -2.222E-12,
-555.55E-5, 555.55E+5,
666.66E-6, -666.66E+6,
7777.7777E-7, -7777.7777E-7,
-8888.8888E+8, 8888.8888E+8,
-99999.9999E+9, 99999.9999E+9,
// these below are hairy enough to need strconv.ParseFloat
33.33E-33, -33.33E+33,
44.44e+44, -44.44e-44,
},
},
}
if useInterface {
ts.AnonInTestStrucIntf = &AnonInTestStrucIntf{
Islice: []interface{}{"true", true, "no", false, uint64(288), float64(0.4)},
Ms: map[string]interface{}{
"true": "true",
"int64(9)": false,
},
T: testStrucTime,
}
}
//For benchmarks, some things will not work.
if !bench {
//json and bson require string keys in maps
//ts.M = map[interface{}]interface{}{
// true: "true",
// int8(9): false,
//}
//gob cannot encode nil in element in array (encodeArray: nil element)
ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil}
// ts.Iptrslice = nil
}
if !useStringKeyOnly {
// ts.AnonInTestStruc.AMU32F64 = map[uint32]float64{1: 1, 2: 2, 3: 3} // Json/Bson barf
}
if depth > 0 {
depth--
if ts.Mtsptr == nil {
ts.Mtsptr = make(map[string]*TestStruc)
}
if ts.Mts == nil {
ts.Mts = make(map[string]TestStruc)
}
ts.Mtsptr["0"] = newTestStruc(depth, bench, useInterface, useStringKeyOnly)
ts.Mts["0"] = *(ts.Mtsptr["0"])
ts.Its = append(ts.Its, ts.Mtsptr["0"])
}
return
}
// Some other types
type Sstring string
@ -212,3 +213,240 @@ type SstructbigMapBySlice struct {
type Sinterface interface {
Noop()
}
var testStrucTime = time.Date(2012, 2, 2, 2, 2, 2, 2000, time.UTC).UTC()
func populateTestStrucCommon(ts *testStrucCommon, n int, bench, useInterface, useStringKeyOnly bool) {
var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464
var a = AnonInTestStruc{
// There's more leeway in altering this.
AS: strRpt(n, "A-String"),
AI64: -64646464,
AI16: 1616,
AUi64: 64646464,
// (U+1D11E)G-clef character may be represented in json as "\uD834\uDD1E".
// single reverse solidus character may be represented in json as "\u005C".
// include these in ASslice below.
ASslice: []string{
strRpt(n, "Aone"),
strRpt(n, "Atwo"),
strRpt(n, "Athree"),
strRpt(n, "Afour.reverse_solidus.\u005c"),
strRpt(n, "Afive.Gclef.\U0001d11E\"ugorji\"done.")},
AI64slice: []int64{
0, 1, -1, -22, 333, -4444, 55555, -666666,
// msgpack ones
-48, -32, -24, -8, 32, 127, 192, 255,
// standard ones
0, -1, 1,
math.MaxInt8, math.MaxInt8 + 4, math.MaxInt8 - 4,
math.MaxInt16, math.MaxInt16 + 4, math.MaxInt16 - 4,
math.MaxInt32, math.MaxInt32 + 4, math.MaxInt32 - 4,
math.MaxInt64, math.MaxInt64 - 4,
math.MinInt8, math.MinInt8 + 4, math.MinInt8 - 4,
math.MinInt16, math.MinInt16 + 4, math.MinInt16 - 4,
math.MinInt32, math.MinInt32 + 4, math.MinInt32 - 4,
math.MinInt64, math.MinInt64 + 4,
},
AUi64slice: []uint64{
0, 1, 22, 333, 4444, 55555, 666666,
// standard ones
math.MaxUint8, math.MaxUint8 + 4, math.MaxUint8 - 4,
math.MaxUint16, math.MaxUint16 + 4, math.MaxUint16 - 4,
math.MaxUint32, math.MaxUint32 + 4, math.MaxUint32 - 4,
math.MaxUint64, math.MaxUint64 - 4,
},
AMSU16: map[string]uint16{strRpt(n, "1"): 1, strRpt(n, "22"): 2, strRpt(n, "333"): 3, strRpt(n, "4444"): 4},
// Note: +/- inf, NaN, and other non-representable numbers should not be explicitly tested here
AF64slice: []float64{
11.11e-11, -11.11e+11,
2.222E+12, -2.222E-12,
-555.55E-5, 555.55E+5,
666.66E-6, -666.66E+6,
7777.7777E-7, -7777.7777E-7,
-8888.8888E+8, 8888.8888E+8,
-99999.9999E+9, 99999.9999E+9,
// these below are hairy enough to need strconv.ParseFloat
33.33E-33, -33.33E+33,
44.44e+44, -44.44e-44,
// standard ones
0, -1, 1,
// math.Inf(1), math.Inf(-1),
math.Pi, math.Phi, math.E,
math.MaxFloat64, math.SmallestNonzeroFloat64,
},
AF32slice: []float32{
11.11e-11, -11.11e+11,
2.222E+12, -2.222E-12,
-555.55E-5, 555.55E+5,
666.66E-6, -666.66E+6,
7777.7777E-7, -7777.7777E-7,
-8888.8888E+8, 8888.8888E+8,
-99999.9999E+9, 99999.9999E+9,
// these below are hairy enough to need strconv.ParseFloat
33.33E-33, -33.33E+33,
// standard ones
0, -1, 1,
// math.Float32frombits(0x7FF00000), math.Float32frombits(0xFFF00000), //+inf and -inf
math.MaxFloat32, math.SmallestNonzeroFloat32,
},
A164slice0: []int64{},
AUi64sliceN: nil,
AMSU16N: nil,
AMSU16E: map[string]uint16{},
}
*ts = testStrucCommon{
S: strRpt(n, `some really really cool names that are nigerian and american like "ugorji melody nwoke" - get it? `),
// set the numbers close to the limits
I8: math.MaxInt8 * 2 / 3, // 8,
I8n: math.MinInt8 * 2 / 3, // 8,
I16: math.MaxInt16 * 2 / 3, // 16,
I16n: math.MinInt16 * 2 / 3, // 16,
I32: math.MaxInt32 * 2 / 3, // 32,
I32n: math.MinInt32 * 2 / 3, // 32,
I64: math.MaxInt64 * 2 / 3, // 64,
I64n: math.MinInt64 * 2 / 3, // 64,
Ui64: math.MaxUint64 * 2 / 3, // 64
Ui32: math.MaxUint32 * 2 / 3, // 32
Ui16: math.MaxUint16 * 2 / 3, // 16
Ui8: math.MaxUint8 * 2 / 3, // 8
F32: 3.402823e+38, // max representable float32 without losing precision
F64: 3.40281991833838838338e+53,
B: true,
By: 5,
Sslice: []string{strRpt(n, "one"), strRpt(n, "two"), strRpt(n, "three")},
I64slice: []int64{1111, 2222, 3333},
I16slice: []int16{44, 55, 66},
Ui64slice: []uint64{12121212, 34343434, 56565656},
Ui8slice: []uint8{210, 211, 212},
Bslice: []bool{true, false, true, false},
Byslice: []byte{13, 14, 15},
Msi64: map[string]int64{
strRpt(n, "one"): 1,
strRpt(n, "two"): 2,
strRpt(n, "\"three\""): 3,
},
Ui64array: [4]uint64{4, 16, 64, 256},
WrapSliceInt64: []uint64{4, 16, 64, 256},
WrapSliceString: []string{strRpt(n, "4"), strRpt(n, "16"), strRpt(n, "64"), strRpt(n, "256")},
// DecodeNaked bombs here, because the stringUint64T is decoded as a map,
// and a map cannot be the key type of a map.
// Thus, don't initialize this here.
// Msu2wss: map[stringUint64T]wrapStringSlice{
// {"5", 5}: []wrapString{"1", "2", "3", "4", "5"},
// {"3", 3}: []wrapString{"1", "2", "3"},
// },
// make Simplef same as top-level
Simplef: testSimpleFields{
S: strRpt(n, `some really really cool names that are nigerian and american like "ugorji melody nwoke" - get it? `),
// set the numbers close to the limits
I8: math.MaxInt8 * 2 / 3, // 8,
I8n: math.MinInt8 * 2 / 3, // 8,
I16: math.MaxInt16 * 2 / 3, // 16,
I16n: math.MinInt16 * 2 / 3, // 16,
I32: math.MaxInt32 * 2 / 3, // 32,
I32n: math.MinInt32 * 2 / 3, // 32,
I64: math.MaxInt64 * 2 / 3, // 64,
I64n: math.MinInt64 * 2 / 3, // 64,
Ui64: math.MaxUint64 * 2 / 3, // 64
Ui32: math.MaxUint32 * 2 / 3, // 32
Ui16: math.MaxUint16 * 2 / 3, // 16
Ui8: math.MaxUint8 * 2 / 3, // 8
F32: 3.402823e+38, // max representable float32 without losing precision
F64: 3.40281991833838838338e+53,
B: true,
By: 5,
Sslice: []string{strRpt(n, "one"), strRpt(n, "two"), strRpt(n, "three")},
I64slice: []int64{1111, 2222, 3333},
I16slice: []int16{44, 55, 66},
Ui64slice: []uint64{12121212, 34343434, 56565656},
Ui8slice: []uint8{210, 211, 212},
Bslice: []bool{true, false, true, false},
Byslice: []byte{13, 14, 15},
Msi64: map[string]int64{
strRpt(n, "one"): 1,
strRpt(n, "two"): 2,
strRpt(n, "\"three\""): 3,
},
Ui64array: [4]uint64{4, 16, 64, 256},
WrapSliceInt64: []uint64{4, 16, 64, 256},
WrapSliceString: []string{strRpt(n, "4"), strRpt(n, "16"), strRpt(n, "64"), strRpt(n, "256")},
},
AnonInTestStruc: a,
NotAnon: a,
}
ts.Ui64slicearray = []*[4]uint64{&ts.Ui64array, &ts.Ui64array}
if useInterface {
ts.AnonInTestStrucIntf = &AnonInTestStrucIntf{
Islice: []interface{}{strRpt(n, "true"), true, strRpt(n, "no"), false, uint64(288), float64(0.4)},
Ms: map[string]interface{}{
strRpt(n, "true"): strRpt(n, "true"),
strRpt(n, "int64(9)"): false,
},
T: testStrucTime,
}
}
//For benchmarks, some things will not work.
if !bench {
//json and bson require string keys in maps
//ts.M = map[interface{}]interface{}{
// true: "true",
// int8(9): false,
//}
//gob cannot encode nil in element in array (encodeArray: nil element)
ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil}
// ts.Iptrslice = nil
}
if !useStringKeyOnly {
// ts.AnonInTestStruc.AMU32F64 = map[uint32]float64{1: 1, 2: 2, 3: 3} // Json/Bson barf
}
}
func newTestStruc(depth, n int, bench, useInterface, useStringKeyOnly bool) (ts *TestStruc) {
ts = &TestStruc{}
populateTestStrucCommon(&ts.testStrucCommon, n, bench, useInterface, useStringKeyOnly)
if depth > 0 {
depth--
if ts.Mtsptr == nil {
ts.Mtsptr = make(map[string]*TestStruc)
}
if ts.Mts == nil {
ts.Mts = make(map[string]TestStruc)
}
ts.Mtsptr[strRpt(n, "0")] = newTestStruc(depth, n, bench, useInterface, useStringKeyOnly)
ts.Mts[strRpt(n, "0")] = *(ts.Mtsptr[strRpt(n, "0")])
ts.Its = append(ts.Its, ts.Mtsptr[strRpt(n, "0")])
}
return
}
func strRpt(n int, s string) string {
return strings.Repeat(s, n)
}

123
vendor/github.com/ugorji/go/codec/x_bench_gen_test.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
// +build x
// +build generated
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"bytes"
"errors"
"fmt"
"testing"
"github.com/mailru/easyjson"
"github.com/pquerna/ffjson/ffjson"
"github.com/tinylib/msgp/msgp"
)
/*
To update all these, use:
go get -u github.com/tinylib/msgp/msgp github.com/tinylib/msgp \
github.com/pquerna/ffjson/ffjson github.com/pquerna/ffjson \
github.com/mailru/easyjson/...
Known Issues with external libraries:
- msgp io.R/W support doesn't work. It throws error
*/
func init() {
testPreInitFns = append(testPreInitFns, benchXGenPreInit)
}
func benchXGenPreInit() {
benchCheckers = append(benchCheckers,
benchChecker{"msgp", fnMsgpEncodeFn, fnMsgpDecodeFn},
benchChecker{"easyjson", fnEasyjsonEncodeFn, fnEasyjsonDecodeFn},
benchChecker{"ffjson", fnFfjsonEncodeFn, fnFfjsonDecodeFn},
)
}
func fnEasyjsonEncodeFn(ts interface{}, bsIn []byte) ([]byte, error) {
if _, ok := ts.(easyjson.Marshaler); !ok {
return nil, errors.New("easyjson: input is not a easyjson.Marshaler")
}
if testUseIoEncDec >= 0 {
buf := new(bytes.Buffer)
_, err := easyjson.MarshalToWriter(ts.(easyjson.Marshaler), buf)
return buf.Bytes(), err
}
return easyjson.Marshal(ts.(easyjson.Marshaler))
// return ts.(json.Marshaler).MarshalJSON()
}
func fnEasyjsonDecodeFn(buf []byte, ts interface{}) error {
if _, ok := ts.(easyjson.Unmarshaler); !ok {
return errors.New("easyjson: input is not a easyjson.Unmarshaler")
}
if testUseIoEncDec >= 0 {
return easyjson.UnmarshalFromReader(bytes.NewReader(buf), ts.(easyjson.Unmarshaler))
}
return easyjson.Unmarshal(buf, ts.(easyjson.Unmarshaler))
// return ts.(json.Unmarshaler).UnmarshalJSON(buf)
}
func fnFfjsonEncodeFn(ts interface{}, bsIn []byte) ([]byte, error) {
return ffjson.Marshal(ts)
// return ts.(json.Marshaler).MarshalJSON()
}
func fnFfjsonDecodeFn(buf []byte, ts interface{}) error {
return ffjson.Unmarshal(buf, ts)
// return ts.(json.Unmarshaler).UnmarshalJSON(buf)
}
func fnMsgpEncodeFn(ts interface{}, bsIn []byte) ([]byte, error) {
if _, ok := ts.(msgp.Encodable); !ok {
return nil, fmt.Errorf("msgp: input of type %T is not a msgp.Encodable", ts)
}
if testUseIoEncDec >= 0 {
buf := fnBenchmarkByteBuf(bsIn)
err := ts.(msgp.Encodable).EncodeMsg(msgp.NewWriter(buf))
return buf.Bytes(), err
}
return ts.(msgp.Marshaler).MarshalMsg(bsIn[:0]) // msgp appends to slice.
}
func fnMsgpDecodeFn(buf []byte, ts interface{}) (err error) {
if _, ok := ts.(msgp.Decodable); !ok {
return fmt.Errorf("msgp: input of type %T is not a msgp.Decodable", ts)
}
if testUseIoEncDec >= 0 {
err = ts.(msgp.Decodable).DecodeMsg(msgp.NewReader(bytes.NewReader(buf)))
return
}
_, err = ts.(msgp.Unmarshaler).UnmarshalMsg(buf)
return
}
func Benchmark__Msgp_______Encode(b *testing.B) {
fnBenchmarkEncode(b, "msgp", benchTs, fnMsgpEncodeFn)
}
func Benchmark__Msgp_______Decode(b *testing.B) {
fnBenchmarkDecode(b, "msgp", benchTs, fnMsgpEncodeFn, fnMsgpDecodeFn, fnBenchNewTs)
}
func Benchmark__Easyjson___Encode(b *testing.B) {
fnBenchmarkEncode(b, "easyjson", benchTs, fnEasyjsonEncodeFn)
}
func Benchmark__Easyjson___Decode(b *testing.B) {
fnBenchmarkDecode(b, "easyjson", benchTs, fnEasyjsonEncodeFn, fnEasyjsonDecodeFn, fnBenchNewTs)
}
func Benchmark__Ffjson_____Encode(b *testing.B) {
fnBenchmarkEncode(b, "ffjson", benchTs, fnFfjsonEncodeFn)
}
func Benchmark__Ffjson_____Decode(b *testing.B) {
fnBenchmarkDecode(b, "ffjson", benchTs, fnFfjsonEncodeFn, fnFfjsonDecodeFn, fnBenchNewTs)
}

View File

@ -29,49 +29,84 @@ import "testing"
// os.Exit(exitcode)
// }
func testGroupResetFlags() {
testUseMust = false
testCanonical = false
testUseMust = false
testInternStr = false
testUseIoEncDec = -1
testStructToArray = false
testCheckCircRef = false
testUseReset = false
testMaxInitLen = 0
testUseIoWrapper = false
testNumRepeatString = 8
}
func testSuite(t *testing.T, f func(t *testing.T)) {
// find . -name "*_test.go" | xargs grep -e 'flag.' | cut -d '&' -f 2 | cut -d ',' -f 1 | grep -e '^test'
// Disregard the following: testVerbose, testInitDebug, testSkipIntf, testJsonIndent (Need a test for it)
testReinit() // so flag.Parse() is called first, and never called again
testUseMust = false
testCanonical = false
testUseMust = false
testInternStr = false
testUseIoEncDec = false
testStructToArray = false
testWriteNoSymbols = false
testCheckCircRef = false
testJsonHTMLCharsAsIs = false
testUseReset = false
testMaxInitLen = 0
testJsonIndent = 0
testDecodeOptions = DecodeOptions{}
testEncodeOptions = EncodeOptions{}
testGroupResetFlags()
testReinit()
t.Run("optionsFalse", f)
testMaxInitLen = 10
testJsonIndent = 8
testReinit()
t.Run("initLen10-jsonSpaces", f)
testReinit()
testMaxInitLen = 10
testJsonIndent = -1
testReinit()
t.Run("initLen10-jsonTabs", f)
testCanonical = true
testUseMust = true
testInternStr = true
testUseIoEncDec = true
testUseIoEncDec = 0
testStructToArray = true
testWriteNoSymbols = true
testCheckCircRef = true
testJsonHTMLCharsAsIs = true
testUseReset = true
testDecodeOptions.MapValueReset = true
testReinit()
t.Run("optionsTrue", f)
testEncodeOptions.AsSymbols = AsSymbolAll
testUseIoWrapper = true
testReinit()
t.Run("optionsTrue-ioWrapper", f)
testUseIoEncDec = -1
testDepth = 6
testReinit()
t.Run("optionsTrue-deepstruct", f)
// make buffer small enough so that we have to re-fill multiple times.
testSkipRPCTests = true
testUseIoEncDec = 128
// testDecodeOptions.ReaderBufferSize = 128
// testEncodeOptions.WriterBufferSize = 128
testReinit()
t.Run("optionsTrue-bufio", f)
// testDecodeOptions.ReaderBufferSize = 0
// testEncodeOptions.WriterBufferSize = 0
testUseIoEncDec = -1
testSkipRPCTests = false
testNumRepeatString = 32
testReinit()
t.Run("optionsTrue-largestrings", f)
// The following here MUST be tested individually, as they create
// side effects i.e. the decoded value is different.
// testDecodeOptions.MapValueReset = true // ok - no side effects
// testDecodeOptions.InterfaceReset = true // error??? because we do deepEquals to verify
// testDecodeOptions.ErrorIfNoField = true // error, as expected, as fields not there
// testDecodeOptions.ErrorIfNoArrayExpand = true // no error, but no error case either
// testDecodeOptions.PreferArrayOverSlice = true // error??? because slice != array.
// .... however, update deepEqual to take this option
// testReinit()
// t.Run("optionsTrue-resetOptions", f)
testGroupResetFlags()
}
/*
@ -84,6 +119,7 @@ find . -name "$z" | xargs grep -e '^func Test' | \
func testCodecGroup(t *testing.T) {
// println("running testcodecsuite")
// <setup code>
t.Run("TestBincCodecsTable", TestBincCodecsTable)
t.Run("TestBincCodecsMisc", TestBincCodecsMisc)
t.Run("TestBincCodecsEmbeddedPointer", TestBincCodecsEmbeddedPointer)
@ -129,7 +165,219 @@ func testCodecGroup(t *testing.T) {
t.Run("TestJsonLargeInteger", TestJsonLargeInteger)
t.Run("TestJsonDecodeNonStringScalarInStringContext", TestJsonDecodeNonStringScalarInStringContext)
t.Run("TestJsonEncodeIndent", TestJsonEncodeIndent)
t.Run("TestJsonSwallowAndZero", TestJsonSwallowAndZero)
t.Run("TestCborSwallowAndZero", TestCborSwallowAndZero)
t.Run("TestMsgpackSwallowAndZero", TestMsgpackSwallowAndZero)
t.Run("TestBincSwallowAndZero", TestBincSwallowAndZero)
t.Run("TestSimpleSwallowAndZero", TestSimpleSwallowAndZero)
t.Run("TestJsonRawExt", TestJsonRawExt)
t.Run("TestCborRawExt", TestCborRawExt)
t.Run("TestMsgpackRawExt", TestMsgpackRawExt)
t.Run("TestBincRawExt", TestBincRawExt)
t.Run("TestSimpleRawExt", TestSimpleRawExt)
t.Run("TestJsonMapStructKey", TestJsonMapStructKey)
t.Run("TestCborMapStructKey", TestCborMapStructKey)
t.Run("TestMsgpackMapStructKey", TestMsgpackMapStructKey)
t.Run("TestBincMapStructKey", TestBincMapStructKey)
t.Run("TestSimpleMapStructKey", TestSimpleMapStructKey)
t.Run("TestJsonDecodeNilMapValue", TestJsonDecodeNilMapValue)
t.Run("TestCborDecodeNilMapValue", TestCborDecodeNilMapValue)
t.Run("TestMsgpackDecodeNilMapValue", TestMsgpackDecodeNilMapValue)
t.Run("TestBincDecodeNilMapValue", TestBincDecodeNilMapValue)
t.Run("TestSimpleDecodeNilMapValue", TestSimpleDecodeNilMapValue)
t.Run("TestJsonEmbeddedFieldPrecedence", TestJsonEmbeddedFieldPrecedence)
t.Run("TestCborEmbeddedFieldPrecedence", TestCborEmbeddedFieldPrecedence)
t.Run("TestMsgpackEmbeddedFieldPrecedence", TestMsgpackEmbeddedFieldPrecedence)
t.Run("TestBincEmbeddedFieldPrecedence", TestBincEmbeddedFieldPrecedence)
t.Run("TestSimpleEmbeddedFieldPrecedence", TestSimpleEmbeddedFieldPrecedence)
t.Run("TestJsonLargeContainerLen", TestJsonLargeContainerLen)
t.Run("TestCborLargeContainerLen", TestCborLargeContainerLen)
t.Run("TestMsgpackLargeContainerLen", TestMsgpackLargeContainerLen)
t.Run("TestBincLargeContainerLen", TestBincLargeContainerLen)
t.Run("TestSimpleLargeContainerLen", TestSimpleLargeContainerLen)
t.Run("TestJsonMammothMapsAndSlices", TestJsonMammothMapsAndSlices)
t.Run("TestCborMammothMapsAndSlices", TestCborMammothMapsAndSlices)
t.Run("TestMsgpackMammothMapsAndSlices", TestMsgpackMammothMapsAndSlices)
t.Run("TestBincMammothMapsAndSlices", TestBincMammothMapsAndSlices)
t.Run("TestSimpleMammothMapsAndSlices", TestSimpleMammothMapsAndSlices)
t.Run("TestJsonTime", TestJsonTime)
t.Run("TestCborTime", TestCborTime)
t.Run("TestMsgpackTime", TestMsgpackTime)
t.Run("TestBincTime", TestBincTime)
t.Run("TestSimpleTime", TestSimpleTime)
t.Run("TestJsonUintToInt", TestJsonUintToInt)
t.Run("TestCborUintToInt", TestCborUintToInt)
t.Run("TestMsgpackUintToInt", TestMsgpackUintToInt)
t.Run("TestBincUintToInt", TestBincUintToInt)
t.Run("TestSimpleUintToInt", TestSimpleUintToInt)
t.Run("TestJsonInvalidUnicode", TestJsonInvalidUnicode)
t.Run("TestCborHalfFloat", TestCborHalfFloat)
// <tear-down code>
}
func TestCodecSuite(t *testing.T) { testSuite(t, testCodecGroup) }
func testJsonGroup(t *testing.T) {
t.Run("TestJsonCodecsTable", TestJsonCodecsTable)
t.Run("TestJsonCodecsMisc", TestJsonCodecsMisc)
t.Run("TestJsonCodecsEmbeddedPointer", TestJsonCodecsEmbeddedPointer)
t.Run("TestJsonCodecChan", TestJsonCodecChan)
t.Run("TestJsonStdEncIntf", TestJsonStdEncIntf)
t.Run("TestJsonMammoth", TestJsonMammoth)
t.Run("TestJsonRaw", TestJsonRaw)
t.Run("TestJsonRpcGo", TestJsonRpcGo)
t.Run("TestJsonLargeInteger", TestJsonLargeInteger)
t.Run("TestJsonDecodeNonStringScalarInStringContext", TestJsonDecodeNonStringScalarInStringContext)
t.Run("TestJsonEncodeIndent", TestJsonEncodeIndent)
t.Run("TestJsonSwallowAndZero", TestJsonSwallowAndZero)
t.Run("TestJsonRawExt", TestJsonRawExt)
t.Run("TestJsonMapStructKey", TestJsonMapStructKey)
t.Run("TestJsonDecodeNilMapValue", TestJsonDecodeNilMapValue)
t.Run("TestJsonEmbeddedFieldPrecedence", TestJsonEmbeddedFieldPrecedence)
t.Run("TestJsonLargeContainerLen", TestJsonLargeContainerLen)
t.Run("TestJsonMammothMapsAndSlices", TestJsonMammothMapsAndSlices)
t.Run("TestJsonInvalidUnicode", TestJsonInvalidUnicode)
t.Run("TestJsonTime", TestJsonTime)
t.Run("TestJsonUintToInt", TestJsonUintToInt)
}
func testBincGroup(t *testing.T) {
t.Run("TestBincCodecsTable", TestBincCodecsTable)
t.Run("TestBincCodecsMisc", TestBincCodecsMisc)
t.Run("TestBincCodecsEmbeddedPointer", TestBincCodecsEmbeddedPointer)
t.Run("TestBincStdEncIntf", TestBincStdEncIntf)
t.Run("TestBincMammoth", TestBincMammoth)
t.Run("TestBincRaw", TestBincRaw)
t.Run("TestSimpleRpcGo", TestSimpleRpcGo)
t.Run("TestBincUnderlyingType", TestBincUnderlyingType)
t.Run("TestBincSwallowAndZero", TestBincSwallowAndZero)
t.Run("TestBincRawExt", TestBincRawExt)
t.Run("TestBincMapStructKey", TestBincMapStructKey)
t.Run("TestBincDecodeNilMapValue", TestBincDecodeNilMapValue)
t.Run("TestBincEmbeddedFieldPrecedence", TestBincEmbeddedFieldPrecedence)
t.Run("TestBincLargeContainerLen", TestBincLargeContainerLen)
t.Run("TestBincMammothMapsAndSlices", TestBincMammothMapsAndSlices)
t.Run("TestBincTime", TestBincTime)
t.Run("TestBincUintToInt", TestBincUintToInt)
}
func testCborGroup(t *testing.T) {
t.Run("TestCborCodecsTable", TestCborCodecsTable)
t.Run("TestCborCodecsMisc", TestCborCodecsMisc)
t.Run("TestCborCodecsEmbeddedPointer", TestCborCodecsEmbeddedPointer)
t.Run("TestCborMapEncodeForCanonical", TestCborMapEncodeForCanonical)
t.Run("TestCborCodecChan", TestCborCodecChan)
t.Run("TestCborStdEncIntf", TestCborStdEncIntf)
t.Run("TestCborMammoth", TestCborMammoth)
t.Run("TestCborRaw", TestCborRaw)
t.Run("TestCborRpcGo", TestCborRpcGo)
t.Run("TestCborSwallowAndZero", TestCborSwallowAndZero)
t.Run("TestCborRawExt", TestCborRawExt)
t.Run("TestCborMapStructKey", TestCborMapStructKey)
t.Run("TestCborDecodeNilMapValue", TestCborDecodeNilMapValue)
t.Run("TestCborEmbeddedFieldPrecedence", TestCborEmbeddedFieldPrecedence)
t.Run("TestCborLargeContainerLen", TestCborLargeContainerLen)
t.Run("TestCborMammothMapsAndSlices", TestCborMammothMapsAndSlices)
t.Run("TestCborTime", TestCborTime)
t.Run("TestCborUintToInt", TestCborUintToInt)
t.Run("TestCborHalfFloat", TestCborHalfFloat)
}
func testMsgpackGroup(t *testing.T) {
t.Run("TestMsgpackCodecsTable", TestMsgpackCodecsTable)
t.Run("TestMsgpackCodecsMisc", TestMsgpackCodecsMisc)
t.Run("TestMsgpackCodecsEmbeddedPointer", TestMsgpackCodecsEmbeddedPointer)
t.Run("TestMsgpackStdEncIntf", TestMsgpackStdEncIntf)
t.Run("TestMsgpackMammoth", TestMsgpackMammoth)
t.Run("TestMsgpackRaw", TestMsgpackRaw)
t.Run("TestMsgpackRpcGo", TestMsgpackRpcGo)
t.Run("TestMsgpackRpcSpec", TestMsgpackRpcSpec)
t.Run("TestMsgpackSwallowAndZero", TestMsgpackSwallowAndZero)
t.Run("TestMsgpackRawExt", TestMsgpackRawExt)
t.Run("TestMsgpackMapStructKey", TestMsgpackMapStructKey)
t.Run("TestMsgpackDecodeNilMapValue", TestMsgpackDecodeNilMapValue)
t.Run("TestMsgpackEmbeddedFieldPrecedence", TestMsgpackEmbeddedFieldPrecedence)
t.Run("TestMsgpackLargeContainerLen", TestMsgpackLargeContainerLen)
t.Run("TestMsgpackMammothMapsAndSlices", TestMsgpackMammothMapsAndSlices)
t.Run("TestMsgpackTime", TestMsgpackTime)
t.Run("TestMsgpackUintToInt", TestMsgpackUintToInt)
}
func TestCodecSuite(t *testing.T) {
testSuite(t, testCodecGroup)
testGroupResetFlags()
oldIndent, oldCharsAsis, oldPreferFloat, oldMapKeyAsString :=
testJsonH.Indent, testJsonH.HTMLCharsAsIs, testJsonH.PreferFloat, testJsonH.MapKeyAsString
testMaxInitLen = 10
testJsonH.Indent = 8
testJsonH.HTMLCharsAsIs = true
testJsonH.MapKeyAsString = true
// testJsonH.PreferFloat = true
testReinit()
t.Run("json-spaces-htmlcharsasis-initLen10", testJsonGroup)
testMaxInitLen = 10
testJsonH.Indent = -1
testJsonH.HTMLCharsAsIs = false
testJsonH.MapKeyAsString = true
// testJsonH.PreferFloat = false
testReinit()
t.Run("json-tabs-initLen10", testJsonGroup)
testJsonH.Indent, testJsonH.HTMLCharsAsIs, testJsonH.PreferFloat, testJsonH.MapKeyAsString =
oldIndent, oldCharsAsis, oldPreferFloat, oldMapKeyAsString
oldIndefLen := testCborH.IndefiniteLength
testCborH.IndefiniteLength = true
testReinit()
t.Run("cbor-indefinitelength", testCborGroup)
testCborH.IndefiniteLength = oldIndefLen
oldSymbols := testBincH.getBasicHandle().AsSymbols
testBincH.getBasicHandle().AsSymbols = AsSymbolNone
testReinit()
t.Run("binc-no-symbols", testBincGroup)
testBincH.getBasicHandle().AsSymbols = AsSymbolAll
testReinit()
t.Run("binc-all-symbols", testBincGroup)
testBincH.getBasicHandle().AsSymbols = oldSymbols
oldWriteExt := testMsgpackH.WriteExt
oldNoFixedNum := testMsgpackH.NoFixedNum
testMsgpackH.WriteExt = !testMsgpackH.WriteExt
testReinit()
t.Run("msgpack-inverse-writeext", testMsgpackGroup)
testMsgpackH.WriteExt = oldWriteExt
testMsgpackH.NoFixedNum = !testMsgpackH.NoFixedNum
testReinit()
t.Run("msgpack-fixednum", testMsgpackGroup)
testMsgpackH.NoFixedNum = oldNoFixedNum
testGroupResetFlags()
}
// func TestCodecSuite(t *testing.T) {
// testReinit() // so flag.Parse() is called first, and never called again
// testDecodeOptions, testEncodeOptions = DecodeOptions{}, EncodeOptions{}
// testGroupResetFlags()
// testReinit()
// t.Run("optionsFalse", func(t *testing.T) {
// t.Run("TestJsonMammothMapsAndSlices", TestJsonMammothMapsAndSlices)
// })
// }

View File

@ -0,0 +1,49 @@
// +build alltests
// +build x
// +build go1.7
// +build generated
package codec
// see notes in z_all_bench_test.go
import "testing"
func benchmarkCodecXGenGroup(t *testing.B) {
logT(nil, "\n-------------------------------\n")
t.Run("Benchmark__Msgpack____Encode", Benchmark__Msgpack____Encode)
t.Run("Benchmark__Binc_______Encode", Benchmark__Binc_______Encode)
t.Run("Benchmark__Simple_____Encode", Benchmark__Simple_____Encode)
t.Run("Benchmark__Cbor_______Encode", Benchmark__Cbor_______Encode)
t.Run("Benchmark__Json_______Encode", Benchmark__Json_______Encode)
t.Run("Benchmark__Std_Json___Encode", Benchmark__Std_Json___Encode)
t.Run("Benchmark__Gob________Encode", Benchmark__Gob________Encode)
t.Run("Benchmark__JsonIter___Encode", Benchmark__JsonIter___Encode)
t.Run("Benchmark__Bson_______Encode", Benchmark__Bson_______Encode)
t.Run("Benchmark__VMsgpack___Encode", Benchmark__VMsgpack___Encode)
t.Run("Benchmark__Msgp_______Encode", Benchmark__Msgp_______Encode)
t.Run("Benchmark__Easyjson___Encode", Benchmark__Easyjson___Encode)
t.Run("Benchmark__Ffjson_____Encode", Benchmark__Ffjson_____Encode)
t.Run("Benchmark__Gcbor______Encode", Benchmark__Gcbor______Encode)
t.Run("Benchmark__Xdr________Encode", Benchmark__Xdr________Encode)
t.Run("Benchmark__Sereal_____Encode", Benchmark__Sereal_____Encode)
t.Run("Benchmark__Msgpack____Decode", Benchmark__Msgpack____Decode)
t.Run("Benchmark__Binc_______Decode", Benchmark__Binc_______Decode)
t.Run("Benchmark__Simple_____Decode", Benchmark__Simple_____Decode)
t.Run("Benchmark__Cbor_______Decode", Benchmark__Cbor_______Decode)
t.Run("Benchmark__Json_______Decode", Benchmark__Json_______Decode)
t.Run("Benchmark__Std_Json___Decode", Benchmark__Std_Json___Decode)
t.Run("Benchmark__Gob________Decode", Benchmark__Gob________Decode)
t.Run("Benchmark__JsonIter___Decode", Benchmark__JsonIter___Decode)
t.Run("Benchmark__Bson_______Decode", Benchmark__Bson_______Decode)
t.Run("Benchmark__VMsgpack___Decode", Benchmark__VMsgpack___Decode)
t.Run("Benchmark__Msgp_______Decode", Benchmark__Msgp_______Decode)
t.Run("Benchmark__Easyjson___Decode", Benchmark__Easyjson___Decode)
t.Run("Benchmark__Ffjson_____Decode", Benchmark__Ffjson_____Decode)
t.Run("Benchmark__Gcbor______Decode", Benchmark__Gcbor______Decode)
t.Run("Benchmark__Xdr________Decode", Benchmark__Xdr________Decode)
t.Run("Benchmark__Sereal_____Decode", Benchmark__Sereal_____Decode)
}
func BenchmarkCodecXGenSuite(t *testing.B) { benchmarkSuite(t, benchmarkCodecXGenGroup) }

View File

@ -142,7 +142,7 @@ func (c *Client) Discover(ctx context.Context) (Directory, error) {
//
// In the case where CA server does not provide the issued certificate in the response,
// CreateCert will poll certURL using c.FetchCert, which will result in additional round-trips.
// In such scenario the caller can cancel the polling with ctx.
// In such a scenario, the caller can cancel the polling with ctx.
//
// CreateCert returns an error if the CA's response or chain was unreasonably large.
// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features.
@ -257,7 +257,7 @@ func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte,
func AcceptTOS(tosURL string) bool { return true }
// Register creates a new account registration by following the "new-reg" flow.
// It returns registered account. The account is not modified.
// It returns the registered account. The account is not modified.
//
// The registration may require the caller to agree to the CA's Terms of Service (TOS).
// If so, and the account has not indicated the acceptance of the terms (see Account for details),
@ -995,6 +995,7 @@ func keyAuth(pub crypto.PublicKey, token string) (string, error) {
// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges
// with the given SANs and auto-generated public/private key pair.
// The Subject Common Name is set to the first SAN to aid debugging.
// To create a cert with a custom key pair, specify WithKey option.
func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) {
var (
@ -1033,6 +1034,9 @@ func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) {
}
}
tmpl.DNSNames = san
if len(san) > 0 {
tmpl.Subject.CommonName = san[0]
}
der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key)
if err != nil {

View File

@ -1186,6 +1186,9 @@ func TestTLSSNI01ChallengeCert(t *testing.T) {
if cert.DNSNames[0] != name {
t.Errorf("cert.DNSNames[0] != name: %q vs %q", cert.DNSNames[0], name)
}
if cn := cert.Subject.CommonName; cn != san {
t.Errorf("cert.Subject.CommonName = %q; want %q", cn, san)
}
}
func TestTLSSNI02ChallengeCert(t *testing.T) {
@ -1219,6 +1222,9 @@ func TestTLSSNI02ChallengeCert(t *testing.T) {
if i >= len(cert.DNSNames) || cert.DNSNames[i] != name {
t.Errorf("%v doesn't have %q", cert.DNSNames, name)
}
if cn := cert.Subject.CommonName; cn != sanA {
t.Errorf("CommonName = %q; want %q", cn, sanA)
}
}
func TestTLSChallengeCertOpt(t *testing.T) {

View File

@ -371,7 +371,7 @@ func (m *Manager) createCert(ctx context.Context, domain string) (*tls.Certifica
// We are the first; state is locked.
// Unblock the readers when domain ownership is verified
// and the we got the cert or the process failed.
// and we got the cert or the process failed.
defer state.Unlock()
state.locked = false
@ -439,7 +439,7 @@ func (m *Manager) certState(domain string) (*certState, error) {
return state, nil
}
// authorizedCert starts domain ownership verification process and requests a new cert upon success.
// authorizedCert starts the domain ownership verification process and requests a new cert upon success.
// The key argument is the certificate private key.
func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, domain string) (der [][]byte, leaf *x509.Certificate, err error) {
if err := m.verify(ctx, domain); err != nil {

View File

@ -6,7 +6,7 @@
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05.
// RFC 8032.
package ed25519
// This code is a port of the public domain, “ref10” implementation of ed25519

View File

@ -295,17 +295,17 @@ const (
// The enumerated reasons for revoking a certificate. See RFC 5280.
const (
Unspecified = iota
KeyCompromise = iota
CACompromise = iota
AffiliationChanged = iota
Superseded = iota
CessationOfOperation = iota
CertificateHold = iota
_ = iota
RemoveFromCRL = iota
PrivilegeWithdrawn = iota
AACompromise = iota
Unspecified = 0
KeyCompromise = 1
CACompromise = 2
AffiliationChanged = 3
Superseded = 4
CessationOfOperation = 5
CertificateHold = 6
RemoveFromCRL = 8
PrivilegeWithdrawn = 9
AACompromise = 10
)
// Request represents an OCSP request. See RFC 6960.
@ -659,7 +659,7 @@ func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte
//
// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields.
//
// The template is used to populate the SerialNumber, RevocationStatus, RevokedAt,
// The template is used to populate the SerialNumber, Status, RevokedAt,
// RevocationReason, ThisUpdate, and NextUpdate fields.
//
// If template.IssuerHash is not set, SHA1 will be used.

View File

@ -5,6 +5,8 @@
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// and between processes.
// As of Go 1.7 this package is available in the standard library under the
// name context. https://golang.org/pkg/context.
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must

View File

@ -789,6 +789,12 @@ func (m *Message) Unpack(msg []byte) error {
// Pack packs a full Message.
func (m *Message) Pack() ([]byte, error) {
return m.AppendPack(make([]byte, 0, packStartingCap))
}
// AppendPack is like Pack but appends the full Message to b and returns the
// extended buffer.
func (m *Message) AppendPack(b []byte) ([]byte, error) {
// Validate the lengths. It is very unlikely that anyone will try to
// pack more than 65535 of any particular type, but it is possible and
// we should fail gracefully.
@ -813,9 +819,7 @@ func (m *Message) Pack() ([]byte, error) {
h.authorities = uint16(len(m.Authorities))
h.additionals = uint16(len(m.Additionals))
msg := make([]byte, 0, packStartingCap)
msg = h.pack(msg)
msg := h.pack(b)
// RFC 1035 allows (but does not require) compression for packing. RFC
// 1035 requires unpacking implementations to support compression, so

View File

@ -852,6 +852,31 @@ func smallTestMsg() Message {
}
}
func BenchmarkPack(b *testing.B) {
msg := largeTestMsg()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
if _, err := msg.Pack(); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkAppendPack(b *testing.B) {
msg := largeTestMsg()
buf := make([]byte, 0, packStartingCap)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
if _, err := msg.AppendPack(buf[:0]); err != nil {
b.Fatal(err)
}
}
}
func largeTestMsg() Message {
name := mustNewName("foo.bar.example.com.")
return Message{

View File

@ -853,8 +853,13 @@ func (sc *serverConn) serve() {
}
}
if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame {
return
// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
// with no error code (graceful shutdown), don't start the timer until
// all open streams have been completed.
sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0
if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) {
sc.shutDownIn(goAwayTimeout)
}
}
}
@ -1218,30 +1223,31 @@ func (sc *serverConn) startGracefulShutdown() {
sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
}
// After sending GOAWAY, the connection will close after goAwayTimeout.
// If we close the connection immediately after sending GOAWAY, there may
// be unsent data in our kernel receive buffer, which will cause the kernel
// to send a TCP RST on close() instead of a FIN. This RST will abort the
// connection immediately, whether or not the client had received the GOAWAY.
//
// Ideally we should delay for at least 1 RTT + epsilon so the client has
// a chance to read the GOAWAY and stop sending messages. Measuring RTT
// is hard, so we approximate with 1 second. See golang.org/issue/18701.
//
// This is a var so it can be shorter in tests, where all requests uses the
// loopback interface making the expected RTT very small.
//
// TODO: configurable?
var goAwayTimeout = 1 * time.Second
func (sc *serverConn) startGracefulShutdownInternal() {
sc.goAwayIn(ErrCodeNo, 0)
sc.goAway(ErrCodeNo)
}
func (sc *serverConn) goAway(code ErrCode) {
sc.serveG.check()
var forceCloseIn time.Duration
if code != ErrCodeNo {
forceCloseIn = 250 * time.Millisecond
} else {
// TODO: configurable
forceCloseIn = 1 * time.Second
}
sc.goAwayIn(code, forceCloseIn)
}
func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) {
sc.serveG.check()
if sc.inGoAway {
return
}
if forceCloseIn != 0 {
sc.shutDownIn(forceCloseIn)
}
sc.inGoAway = true
sc.needToSendGoAway = true
sc.goAwayCode = code

View File

@ -68,6 +68,7 @@ type serverTester struct {
func init() {
testHookOnPanicMu = new(sync.Mutex)
goAwayTimeout = 25 * time.Millisecond
}
func resetHooks() {

View File

@ -274,6 +274,13 @@ func (cs *clientStream) checkResetOrDone() error {
}
}
func (cs *clientStream) getStartedWrite() bool {
cc := cs.cc
cc.mu.Lock()
defer cc.mu.Unlock()
return cs.startedWrite
}
func (cs *clientStream) abortRequestBodyWrite(err error) {
if err == nil {
panic("nil error")
@ -349,14 +356,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
return nil, err
}
traceGotConn(req, cc)
res, err := cc.RoundTrip(req)
res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req)
if err != nil && retry <= 6 {
afterBodyWrite := false
if e, ok := err.(afterReqBodyWriteError); ok {
err = e
afterBodyWrite = true
}
if req, err = shouldRetryRequest(req, err, afterBodyWrite); err == nil {
if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
continue
@ -394,16 +396,6 @@ var (
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
)
// afterReqBodyWriteError is a wrapper around errors returned by ClientConn.RoundTrip.
// It is used to signal that err happened after part of Request.Body was sent to the server.
type afterReqBodyWriteError struct {
err error
}
func (e afterReqBodyWriteError) Error() string {
return e.err.Error() + "; some request body already written"
}
// shouldRetryRequest is called by RoundTrip when a request fails to get
// response headers. It is always called with a non-nil error.
// It returns either a request to retry (either the same request, or a
@ -752,8 +744,13 @@ func actualContentLength(req *http.Request) int64 {
}
func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
resp, _, err := cc.roundTrip(req)
return resp, err
}
func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) {
if err := checkConnHeaders(req); err != nil {
return nil, err
return nil, false, err
}
if cc.idleTimer != nil {
cc.idleTimer.Stop()
@ -761,14 +758,14 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
trailers, err := commaSeparatedTrailers(req)
if err != nil {
return nil, err
return nil, false, err
}
hasTrailers := trailers != ""
cc.mu.Lock()
if err := cc.awaitOpenSlotForRequest(req); err != nil {
cc.mu.Unlock()
return nil, err
return nil, false, err
}
body := req.Body
@ -802,7 +799,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
if err != nil {
cc.mu.Unlock()
return nil, err
return nil, false, err
}
cs := cc.newStream()
@ -828,7 +825,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
// Don't bother sending a RST_STREAM (our write already failed;
// no need to keep writing)
traceWroteRequest(cs.trace, werr)
return nil, werr
return nil, false, werr
}
var respHeaderTimer <-chan time.Time
@ -847,7 +844,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
bodyWritten := false
ctx := reqContext(req)
handleReadLoopResponse := func(re resAndError) (*http.Response, error) {
handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) {
res := re.res
if re.err != nil || res.StatusCode > 299 {
// On error or status code 3xx, 4xx, 5xx, etc abort any
@ -863,18 +860,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
cs.abortRequestBodyWrite(errStopReqBodyWrite)
}
if re.err != nil {
cc.mu.Lock()
afterBodyWrite := cs.startedWrite
cc.mu.Unlock()
cc.forgetStreamID(cs.ID)
if afterBodyWrite {
return nil, afterReqBodyWriteError{re.err}
}
return nil, re.err
return nil, cs.getStartedWrite(), re.err
}
res.Request = req
res.TLS = cc.tlsState
return res, nil
return res, false, nil
}
for {
@ -889,7 +880,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
}
cc.forgetStreamID(cs.ID)
return nil, errTimeout
return nil, cs.getStartedWrite(), errTimeout
case <-ctx.Done():
if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
@ -898,7 +889,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
}
cc.forgetStreamID(cs.ID)
return nil, ctx.Err()
return nil, cs.getStartedWrite(), ctx.Err()
case <-req.Cancel:
if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
@ -907,12 +898,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
}
cc.forgetStreamID(cs.ID)
return nil, errRequestCanceled
return nil, cs.getStartedWrite(), errRequestCanceled
case <-cs.peerReset:
// processResetStream already removed the
// stream from the streams map; no need for
// forgetStreamID.
return nil, cs.resetErr
return nil, cs.getStartedWrite(), cs.resetErr
case err := <-bodyWriter.resc:
// Prefer the read loop's response, if available. Issue 16102.
select {
@ -921,7 +912,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
default:
}
if err != nil {
return nil, err
return nil, cs.getStartedWrite(), err
}
bodyWritten = true
if d := cc.responseHeaderTimeout(); d != 0 {
@ -1536,7 +1527,17 @@ func (rl *clientConnReadLoop) run() error {
func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
cc := rl.cc
cs := cc.streamByID(f.StreamID, f.StreamEnded())
if f.StreamEnded() {
// Issue 20521: If the stream has ended, streamByID() causes
// clientStream.done to be closed, which causes the request's bodyWriter
// to be closed with an errStreamClosed, which may be received by
// clientConn.RoundTrip before the result of processing these headers.
// Deferring stream closure allows the header processing to occur first.
// clientConn.RoundTrip may still receive the bodyWriter error first, but
// the fix for issue 16102 prioritises any response.
defer cc.streamByID(f.StreamID, true)
}
cs := cc.streamByID(f.StreamID, false)
if cs == nil {
// We'd get here if we canceled a request while the
// server had its response still in flight. So if this
@ -1841,6 +1842,14 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
return nil
}
if f.Length > 0 {
if cs.req.Method == "HEAD" && len(data) > 0 {
cc.logf("protocol error: received DATA on a HEAD request")
rl.endStreamError(cs, StreamError{
StreamID: f.StreamID,
Code: ErrCodeProtocol,
})
return nil
}
// Check connection-level flow control.
cc.mu.Lock()
if cs.inflow.available() >= int32(f.Length) {
@ -1902,11 +1911,11 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
err = io.EOF
code = cs.copyTrailers
}
cs.bufPipe.closeWithErrorAndCode(err, code)
delete(rl.activeRes, cs.ID)
if isConnectionCloseRequest(cs.req) {
rl.closeWhenIdle = true
}
cs.bufPipe.closeWithErrorAndCode(err, code)
delete(rl.activeRes, cs.ID)
select {
case cs.resc <- resAndError{err: err}:

View File

@ -2290,6 +2290,60 @@ func TestTransportReadHeadResponse(t *testing.T) {
ct.run()
}
func TestTransportReadHeadResponseWithBody(t *testing.T) {
response := "redirecting to /elsewhere"
ct := newClientTester(t)
clientDone := make(chan struct{})
ct.client = func() error {
defer close(clientDone)
req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil)
res, err := ct.tr.RoundTrip(req)
if err != nil {
return err
}
if res.ContentLength != int64(len(response)) {
return fmt.Errorf("Content-Length = %d; want %d", res.ContentLength, len(response))
}
slurp, err := ioutil.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("ReadAll: %v", err)
}
if len(slurp) > 0 {
return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp)
}
return nil
}
ct.server = func() error {
ct.greet()
for {
f, err := ct.fr.ReadFrame()
if err != nil {
t.Logf("ReadFrame: %v", err)
return nil
}
hf, ok := f.(*HeadersFrame)
if !ok {
continue
}
var buf bytes.Buffer
enc := hpack.NewEncoder(&buf)
enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
enc.WriteField(hpack.HeaderField{Name: "content-length", Value: strconv.Itoa(len(response))})
ct.fr.WriteHeaders(HeadersFrameParam{
StreamID: hf.StreamID,
EndHeaders: true,
EndStream: false,
BlockFragment: buf.Bytes(),
})
ct.fr.WriteData(hf.StreamID, true, []byte(response))
<-clientDone
return nil
}
}
ct.run()
}
type neverEnding byte
func (b neverEnding) Read(p []byte) (int, error) {
@ -3043,6 +3097,34 @@ func TestTransportCancelDataResponseRace(t *testing.T) {
}
}
// Issue 21316: It should be safe to reuse an http.Request after the
// request has completed.
func TestTransportNoRaceOnRequestObjectAfterRequestComplete(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
io.WriteString(w, "body")
}, optOnlyServer)
defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
req, _ := http.NewRequest("GET", st.ts.URL, nil)
resp, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
}
if _, err = io.Copy(ioutil.Discard, resp.Body); err != nil {
t.Fatalf("error reading response body: %v", err)
}
if err := resp.Body.Close(); err != nil {
t.Fatalf("error closing response body: %v", err)
}
// This access of req.Header should not race with code in the transport.
req.Header = http.Header{}
}
func TestTransportRetryAfterGOAWAY(t *testing.T) {
var dialer struct {
sync.Mutex
@ -3678,6 +3760,34 @@ func benchSimpleRoundTrip(b *testing.B, nHeaders int) {
}
}
type infiniteReader struct{}
func (r infiniteReader) Read(b []byte) (int, error) {
return len(b), nil
}
// Issue 20521: it is not an error to receive a response and end stream
// from the server without the body being consumed.
func TestTransportResponseAndResetWithoutConsumingBodyRace(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}, optOnlyServer)
defer st.Close()
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
// The request body needs to be big enough to trigger flow control.
req, _ := http.NewRequest("PUT", st.ts.URL, infiniteReader{})
res, err := tr.RoundTrip(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != http.StatusOK {
t.Fatalf("Response code = %v; want %v", res.StatusCode, http.StatusOK)
}
}
func BenchmarkClientRequestHeaders(b *testing.B) {
b.Run(" 0 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 0) })
b.Run(" 10 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 10) })

View File

@ -10,7 +10,6 @@ import (
"log"
"net/http"
"net/url"
"time"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/lex/httplex"
@ -90,11 +89,7 @@ type writeGoAway struct {
func (p *writeGoAway) writeFrame(ctx writeContext) error {
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
if p.code != 0 {
ctx.Flush() // ignore error: we're hanging up on them anyway
time.Sleep(50 * time.Millisecond)
ctx.CloseConn()
}
ctx.Flush() // ignore error: we're hanging up on them anyway
return err
}

124
vendor/golang.org/x/net/idna/idna.go generated vendored
View File

@ -21,6 +21,7 @@ import (
"unicode/utf8"
"golang.org/x/text/secure/bidirule"
"golang.org/x/text/unicode/bidi"
"golang.org/x/text/unicode/norm"
)
@ -68,7 +69,7 @@ func VerifyDNSLength(verify bool) Option {
}
// RemoveLeadingDots removes leading label separators. Leading runes that map to
// dots, such as U+3002, are removed as well.
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
//
// This is the behavior suggested by the UTS #46 and is adopted by some
// browsers.
@ -92,7 +93,7 @@ func ValidateLabels(enable bool) Option {
}
}
// StrictDomainName limits the set of permissable ASCII characters to those
// StrictDomainName limits the set of permissible ASCII characters to those
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
//
@ -142,7 +143,6 @@ func MapForLookup() Option {
o.mapping = validateAndMap
StrictDomainName(true)(o)
ValidateLabels(true)(o)
RemoveLeadingDots(true)(o)
}
}
@ -160,7 +160,7 @@ type options struct {
// mapping implements a validation and mapping step as defined in RFC 5895
// or UTS 46, tailored to, for example, domain registration or lookup.
mapping func(p *Profile, s string) (string, error)
mapping func(p *Profile, s string) (mapped string, isBidi bool, err error)
// bidirule, if specified, checks whether s conforms to the Bidi Rule
// defined in RFC 5893.
@ -251,23 +251,21 @@ var (
punycode = &Profile{}
lookup = &Profile{options{
transitional: true,
useSTD3Rules: true,
validateLabels: true,
removeLeadingDots: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
bidirule: bidirule.ValidString,
transitional: true,
useSTD3Rules: true,
validateLabels: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
bidirule: bidirule.ValidString,
}}
display = &Profile{options{
useSTD3Rules: true,
validateLabels: true,
removeLeadingDots: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
bidirule: bidirule.ValidString,
useSTD3Rules: true,
validateLabels: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
bidirule: bidirule.ValidString,
}}
registration = &Profile{options{
useSTD3Rules: true,
@ -302,14 +300,16 @@ func (e runeError) Error() string {
// see http://www.unicode.org/reports/tr46.
func (p *Profile) process(s string, toASCII bool) (string, error) {
var err error
var isBidi bool
if p.mapping != nil {
s, err = p.mapping(p, s)
s, isBidi, err = p.mapping(p, s)
}
// Remove leading empty labels.
if p.removeLeadingDots {
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
}
}
// TODO: allow for a quick check of the tables data.
// It seems like we should only create this error on ToASCII, but the
// UTS 46 conformance tests suggests we should always check this.
if err == nil && p.verifyDNSLength && s == "" {
@ -335,6 +335,7 @@ func (p *Profile) process(s string, toASCII bool) (string, error) {
// Spec says keep the old label.
continue
}
isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight
labels.set(u)
if err == nil && p.validateLabels {
err = p.fromPuny(p, u)
@ -349,6 +350,14 @@ func (p *Profile) process(s string, toASCII bool) (string, error) {
err = p.validateLabel(label)
}
}
if isBidi && p.bidirule != nil && err == nil {
for labels.reset(); !labels.done(); labels.next() {
if !p.bidirule(labels.label()) {
err = &labelError{s, "B"}
break
}
}
}
if toASCII {
for labels.reset(); !labels.done(); labels.next() {
label := labels.label()
@ -380,16 +389,26 @@ func (p *Profile) process(s string, toASCII bool) (string, error) {
return s, err
}
func normalize(p *Profile, s string) (string, error) {
return norm.NFC.String(s), nil
func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) {
// TODO: consider first doing a quick check to see if any of these checks
// need to be done. This will make it slower in the general case, but
// faster in the common case.
mapped = norm.NFC.String(s)
isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft
return mapped, isBidi, nil
}
func validateRegistration(p *Profile, s string) (string, error) {
func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) {
// TODO: filter need for normalization in loop below.
if !norm.NFC.IsNormalString(s) {
return s, &labelError{s, "V1"}
return s, false, &labelError{s, "V1"}
}
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
if sz == 0 {
return s, bidi, runeError(utf8.RuneError)
}
bidi = bidi || info(v).isBidi(s[i:])
// Copy bytes not copied so far.
switch p.simplify(info(v).category()) {
// TODO: handle the NV8 defined in the Unicode idna data set to allow
@ -397,21 +416,50 @@ func validateRegistration(p *Profile, s string) (string, error) {
case valid, deviation:
case disallowed, mapped, unknown, ignored:
r, _ := utf8.DecodeRuneInString(s[i:])
return s, runeError(r)
return s, bidi, runeError(r)
}
i += sz
}
return s, nil
return s, bidi, nil
}
func validateAndMap(p *Profile, s string) (string, error) {
func (c info) isBidi(s string) bool {
if !c.isMapped() {
return c&attributesMask == rtl
}
// TODO: also store bidi info for mapped data. This is possible, but a bit
// cumbersome and not for the common case.
p, _ := bidi.LookupString(s)
switch p.Class() {
case bidi.R, bidi.AL, bidi.AN:
return true
}
return false
}
func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) {
var (
err error
b []byte
k int
b []byte
k int
)
// combinedInfoBits contains the or-ed bits of all runes. We use this
// to derive the mayNeedNorm bit later. This may trigger normalization
// overeagerly, but it will not do so in the common case. The end result
// is another 10% saving on BenchmarkProfile for the common case.
var combinedInfoBits info
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
if sz == 0 {
b = append(b, s[k:i]...)
b = append(b, "\ufffd"...)
k = len(s)
if err == nil {
err = runeError(utf8.RuneError)
}
break
}
combinedInfoBits |= info(v)
bidi = bidi || info(v).isBidi(s[i:])
start := i
i += sz
// Copy bytes not copied so far.
@ -438,7 +486,9 @@ func validateAndMap(p *Profile, s string) (string, error) {
}
if k == 0 {
// No changes so far.
s = norm.NFC.String(s)
if combinedInfoBits&mayNeedNorm != 0 {
s = norm.NFC.String(s)
}
} else {
b = append(b, s[k:]...)
if norm.NFC.QuickSpan(b) != len(b) {
@ -447,7 +497,7 @@ func validateAndMap(p *Profile, s string) (string, error) {
// TODO: the punycode converters require strings as input.
s = string(b)
}
return s, err
return s, bidi, err
}
// A labelIter allows iterating over domain name labels.
@ -542,8 +592,13 @@ func validateFromPunycode(p *Profile, s string) error {
if !norm.NFC.IsNormalString(s) {
return &labelError{s, "V1"}
}
// TODO: detect whether string may have to be normalized in the following
// loop.
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
if sz == 0 {
return runeError(utf8.RuneError)
}
if c := p.simplify(info(v).category()); c != valid && c != deviation {
return &labelError{s, "V6"}
}
@ -616,16 +671,13 @@ var joinStates = [][numJoinTypes]joinState{
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
// already implicitly satisfied by the overall implementation.
func (p *Profile) validateLabel(s string) error {
func (p *Profile) validateLabel(s string) (err error) {
if s == "" {
if p.verifyDNSLength {
return &labelError{s, "A4"}
}
return nil
}
if p.bidirule != nil && !p.bidirule(s) {
return &labelError{s, "B"}
}
if !p.validateLabels {
return nil
}

4396
vendor/golang.org/x/net/idna/tables.go generated vendored

File diff suppressed because it is too large Load Diff

View File

@ -26,9 +26,9 @@ package idna
// 15..3 index into xor or mapping table
// }
// } else {
// 15..13 unused
// 12 modifier (including virama)
// 11 virama modifier
// 15..14 unused
// 13 mayNeedNorm
// 12..11 attributes
// 10..8 joining type
// 7..3 category type
// }
@ -49,15 +49,20 @@ const (
joinShift = 8
joinMask = 0x07
viramaModifier = 0x0800
// Attributes
attributesMask = 0x1800
viramaModifier = 0x1800
modifier = 0x1000
rtl = 0x0800
mayNeedNorm = 0x2000
)
// A category corresponds to a category defined in the IDNA mapping table.
type category uint16
const (
unknown category = 0 // not defined currently in unicode.
unknown category = 0 // not currently defined in unicode.
mapped category = 1
disallowedSTD3Mapped category = 2
deviation category = 3
@ -110,5 +115,5 @@ func (c info) isModifier() bool {
}
func (c info) isViramaModifier() bool {
return c&(viramaModifier|catSmallMask) == viramaModifier
return c&(attributesMask|catSmallMask) == viramaModifier
}

View File

@ -10,6 +10,10 @@ package socket
import "unsafe"
func (v *iovec) set(b []byte) {
l := len(b)
if l == 0 {
return
}
v.Base = (*byte)(unsafe.Pointer(&b[0]))
v.Len = uint32(len(b))
v.Len = uint32(l)
}

View File

@ -10,6 +10,10 @@ package socket
import "unsafe"
func (v *iovec) set(b []byte) {
l := len(b)
if l == 0 {
return
}
v.Base = (*byte)(unsafe.Pointer(&b[0]))
v.Len = uint64(len(b))
v.Len = uint64(l)
}

View File

@ -10,6 +10,10 @@ package socket
import "unsafe"
func (v *iovec) set(b []byte) {
l := len(b)
if l == 0 {
return
}
v.Base = (*int8)(unsafe.Pointer(&b[0]))
v.Len = uint64(len(b))
v.Len = uint64(l)
}

View File

@ -7,6 +7,10 @@
package socket
func (h *msghdr) setIov(vs []iovec) {
l := len(vs)
if l == 0 {
return
}
h.Iov = &vs[0]
h.Iovlen = int32(len(vs))
h.Iovlen = int32(l)
}

View File

@ -10,8 +10,12 @@ package socket
import "unsafe"
func (h *msghdr) setIov(vs []iovec) {
l := len(vs)
if l == 0 {
return
}
h.Iov = &vs[0]
h.Iovlen = uint32(len(vs))
h.Iovlen = uint32(l)
}
func (h *msghdr) setControl(b []byte) {

View File

@ -10,8 +10,12 @@ package socket
import "unsafe"
func (h *msghdr) setIov(vs []iovec) {
l := len(vs)
if l == 0 {
return
}
h.Iov = &vs[0]
h.Iovlen = uint64(len(vs))
h.Iovlen = uint64(l)
}
func (h *msghdr) setControl(b []byte) {

View File

@ -5,6 +5,10 @@
package socket
func (h *msghdr) setIov(vs []iovec) {
l := len(vs)
if l == 0 {
return
}
h.Iov = &vs[0]
h.Iovlen = uint32(len(vs))
h.Iovlen = uint32(l)
}

View File

@ -13,8 +13,10 @@ func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {
for i := range vs {
vs[i].set(bs[i])
}
h.Iov = &vs[0]
h.Iovlen = int32(len(vs))
if len(vs) > 0 {
h.Iov = &vs[0]
h.Iovlen = int32(len(vs))
}
if len(oob) > 0 {
h.Accrights = (*int8)(unsafe.Pointer(&oob[0]))
h.Accrightslen = int32(len(oob))

View File

@ -119,81 +119,84 @@ func TestUDP(t *testing.T) {
t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err)
}
defer c.Close()
cc, err := socket.NewConn(c.(net.Conn))
if err != nil {
t.Fatal(err)
}
t.Run("Message", func(t *testing.T) {
testUDPMessage(t, c.(net.Conn))
data := []byte("HELLO-R-U-THERE")
wm := socket.Message{
Buffers: bytes.SplitAfter(data, []byte("-")),
Addr: c.LocalAddr(),
}
if err := cc.SendMsg(&wm, 0); err != nil {
t.Fatal(err)
}
b := make([]byte, 32)
rm := socket.Message{
Buffers: [][]byte{b[:1], b[1:3], b[3:7], b[7:11], b[11:]},
}
if err := cc.RecvMsg(&rm, 0); err != nil {
t.Fatal(err)
}
if !bytes.Equal(b[:rm.N], data) {
t.Fatalf("got %#v; want %#v", b[:rm.N], data)
}
})
switch runtime.GOOS {
case "linux":
case "android", "linux":
t.Run("Messages", func(t *testing.T) {
testUDPMessages(t, c.(net.Conn))
data := []byte("HELLO-R-U-THERE")
wmbs := bytes.SplitAfter(data, []byte("-"))
wms := []socket.Message{
{Buffers: wmbs[:1], Addr: c.LocalAddr()},
{Buffers: wmbs[1:], Addr: c.LocalAddr()},
}
n, err := cc.SendMsgs(wms, 0)
if err != nil {
t.Fatal(err)
}
if n != len(wms) {
t.Fatalf("got %d; want %d", n, len(wms))
}
b := make([]byte, 32)
rmbs := [][][]byte{{b[:len(wmbs[0])]}, {b[len(wmbs[0]):]}}
rms := []socket.Message{
{Buffers: rmbs[0]},
{Buffers: rmbs[1]},
}
n, err = cc.RecvMsgs(rms, 0)
if err != nil {
t.Fatal(err)
}
if n != len(rms) {
t.Fatalf("got %d; want %d", n, len(rms))
}
nn := 0
for i := 0; i < n; i++ {
nn += rms[i].N
}
if !bytes.Equal(b[:nn], data) {
t.Fatalf("got %#v; want %#v", b[:nn], data)
}
})
}
}
func testUDPMessage(t *testing.T, c net.Conn) {
cc, err := socket.NewConn(c)
if err != nil {
t.Fatal(err)
}
data := []byte("HELLO-R-U-THERE")
// The behavior of transmission for zero byte paylaod depends
// on each platform implementation. Some may transmit only
// protocol header and options, other may transmit nothing.
// We test only that SendMsg and SendMsgs will not crash with
// empty buffers.
wm := socket.Message{
Buffers: bytes.SplitAfter(data, []byte("-")),
Buffers: [][]byte{{}},
Addr: c.LocalAddr(),
}
if err := cc.SendMsg(&wm, 0); err != nil {
t.Fatal(err)
}
b := make([]byte, 32)
rm := socket.Message{
Buffers: [][]byte{b[:1], b[1:3], b[3:7], b[7:11], b[11:]},
}
if err := cc.RecvMsg(&rm, 0); err != nil {
t.Fatal(err)
}
if !bytes.Equal(b[:rm.N], data) {
t.Fatalf("got %#v; want %#v", b[:rm.N], data)
}
}
func testUDPMessages(t *testing.T, c net.Conn) {
cc, err := socket.NewConn(c)
if err != nil {
t.Fatal(err)
}
data := []byte("HELLO-R-U-THERE")
wmbs := bytes.SplitAfter(data, []byte("-"))
cc.SendMsg(&wm, 0)
wms := []socket.Message{
{Buffers: wmbs[:1], Addr: c.LocalAddr()},
{Buffers: wmbs[1:], Addr: c.LocalAddr()},
}
n, err := cc.SendMsgs(wms, 0)
if err != nil {
t.Fatal(err)
}
if n != len(wms) {
t.Fatalf("got %d; want %d", n, len(wms))
}
b := make([]byte, 32)
rmbs := [][][]byte{{b[:len(wmbs[0])]}, {b[len(wmbs[0]):]}}
rms := []socket.Message{
{Buffers: rmbs[0]},
{Buffers: rmbs[1]},
}
n, err = cc.RecvMsgs(rms, 0)
if err != nil {
t.Fatal(err)
}
if n != len(rms) {
t.Fatalf("got %d; want %d", n, len(rms))
}
nn := 0
for i := 0; i < n; i++ {
nn += rms[i].N
}
if !bytes.Equal(b[:nn], data) {
t.Fatalf("got %#v; want %#v", b[:nn], data)
{Buffers: [][]byte{{}}, Addr: c.LocalAddr()},
}
cc.SendMsgs(wms, 0)
}
func BenchmarkUDP(b *testing.B) {
@ -230,7 +233,7 @@ func BenchmarkUDP(b *testing.B) {
}
})
switch runtime.GOOS {
case "linux":
case "android", "linux":
wms := make([]socket.Message, M)
for i := range wms {
wms[i].Buffers = [][]byte{data}

View File

@ -34,7 +34,7 @@ func marshalSockaddr(ip net.IP, port int, zone string) []byte {
if ip4 := ip.To4(); ip4 != nil {
b := make([]byte, sizeofSockaddrInet)
switch runtime.GOOS {
case "linux", "solaris", "windows":
case "android", "linux", "solaris", "windows":
NativeEndian.PutUint16(b[:2], uint16(sysAF_INET))
default:
b[0] = sizeofSockaddrInet
@ -47,7 +47,7 @@ func marshalSockaddr(ip net.IP, port int, zone string) []byte {
if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil {
b := make([]byte, sizeofSockaddrInet6)
switch runtime.GOOS {
case "linux", "solaris", "windows":
case "android", "linux", "solaris", "windows":
NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6))
default:
b[0] = sizeofSockaddrInet6
@ -69,7 +69,7 @@ func parseInetAddr(b []byte, network string) (net.Addr, error) {
}
var af int
switch runtime.GOOS {
case "linux", "solaris", "windows":
case "android", "linux", "solaris", "windows":
af = int(NativeEndian.Uint16(b[:2]))
default:
af = int(b[1])

View File

@ -61,7 +61,7 @@ func (p *PerHost) dialerForRequest(host string) Dialer {
return p.bypass
}
if host == zone[1:] {
// For a zone "example.com", we match "example.com"
// For a zone ".example.com", we match "example.com"
// too.
return p.bypass
}

View File

@ -12,7 +12,7 @@ import (
)
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
// with an optional username and password. See RFC 1928 and 1929.
// with an optional username and password. See RFC 1928 and RFC 1929.
func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) {
s := &socks5{
network: network,
@ -60,7 +60,7 @@ var socks5Errors = []string{
"address type not supported",
}
// Dial connects to the address addr on the network net via the SOCKS5 proxy.
// Dial connects to the address addr on the given network via the SOCKS5 proxy.
func (s *socks5) Dial(network, addr string) (net.Conn, error) {
switch network {
case "tcp", "tcp6", "tcp4":

View File

@ -6,7 +6,10 @@
package route
import "syscall"
import (
"runtime"
"syscall"
)
func (m *RouteMessage) marshal() ([]byte, error) {
w, ok := wireFormats[m.Type]
@ -14,6 +17,11 @@ func (m *RouteMessage) marshal() ([]byte, error) {
return nil, errUnsupportedMessage
}
l := w.bodyOff + addrsSpace(m.Addrs)
if runtime.GOOS == "darwin" {
// Fix stray pointer writes on macOS.
// See golang.org/issue/22456.
l += 1024
}
b := make([]byte, l)
nativeEndian.PutUint16(b[:2], uint16(l))
if m.Version == 0 {

View File

@ -1,4 +1,4 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

View File

@ -5,10 +5,10 @@
// +build plan9
// Package plan9 contains an interface to the low-level operating system
// primitives. OS details vary depending on the underlying system, and
// primitives. OS details vary depending on the underlying system, and
// by default, godoc will display the OS-specific documentation for the current
// system. If you want godoc to display documentation for another
// system, set $GOOS and $GOARCH to the desired system. For example, if
// system. If you want godoc to display documentation for another
// system, set $GOOS and $GOARCH to the desired system. For example, if
// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
// to freebsd and $GOARCH to arm.
// The primary use of this package is inside other packages that provide a more

View File

@ -1,4 +1,4 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

Some files were not shown because too many files have changed in this diff Show More