1
0
mirror of https://github.com/schollz/cowyo.git synced 2023-08-10 21:13:00 +03:00

Updated packages, new fixed Bolt version

Former-commit-id: da2423082a10392910bbafcd7683c1c93d7aa556 [formerly 708ea8f31cde627a1b64a5a49c80b5faf571740e] [formerly f127dbccd73fbba2526cb7763c1fafccc926596c [formerly 4558c48cdf]]
Former-commit-id: c9d4a6c430c6c5f731e755f270bdf66f89eaa646 [formerly 291db3593d93e63d7234d85f49773ec34c867f1e]
Former-commit-id: d1a21b94fe3760e037c7c02a56ca933123a8cae7
This commit is contained in:
Zack Scholl 2017-02-02 14:32:55 -05:00
parent 25e76fb242
commit 9ad5907b48
44 changed files with 637 additions and 1120 deletions

56
Godeps/Godeps.json generated
View File

@ -1,7 +1,7 @@
{
"ImportPath": "github.com/schollz/cowyo",
"GoVersion": "go1.7",
"GodepVersion": "v78",
"GodepVersion": "v79",
"Deps": [
{
"ImportPath": "github.com/boj/redistore",
@ -10,8 +10,8 @@
},
{
"ImportPath": "github.com/boltdb/bolt",
"Comment": "v1.2.0",
"Rev": "c6ba97b89e0454fec9aa92e1d33a4e2c5fc1f631"
"Comment": "v1.3.0-58-ge9cf4fa",
"Rev": "e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd"
},
{
"ImportPath": "github.com/garyburd/redigo/internal",
@ -29,18 +29,18 @@
},
{
"ImportPath": "github.com/gin-gonic/gin",
"Comment": "v1.1.4",
"Rev": "e2212d40c62a98b388a5eb48ecbdcf88534688ba"
"Comment": "v1.1-65-g049da60",
"Rev": "049da60f5114a479f04a668f3d8a6f3b44fe6658"
},
{
"ImportPath": "github.com/gin-gonic/gin/binding",
"Comment": "v1.1.4",
"Rev": "e2212d40c62a98b388a5eb48ecbdcf88534688ba"
"Comment": "v1.1-65-g049da60",
"Rev": "049da60f5114a479f04a668f3d8a6f3b44fe6658"
},
{
"ImportPath": "github.com/gin-gonic/gin/render",
"Comment": "v1.1.4",
"Rev": "e2212d40c62a98b388a5eb48ecbdcf88534688ba"
"Comment": "v1.1-65-g049da60",
"Rev": "049da60f5114a479f04a668f3d8a6f3b44fe6658"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
@ -63,17 +63,13 @@
},
{
"ImportPath": "github.com/gorilla/websocket",
"Comment": "v1.1.0-19-gc36f2fe",
"Rev": "c36f2fe5c330f0ac404b616b96c438b8616b1aaf"
"Comment": "v1.1.0-6-gadf16b3",
"Rev": "adf16b31781325cbd41085c5be901d95b4d1f33d"
},
{
"ImportPath": "github.com/jcelliott/lumber",
"Rev": "dd349441af25132d146d7095c6693a15431fc9b1"
},
{
"ImportPath": "github.com/manucorporat/sse",
"Rev": "ee05b128a739a0fb76c7ebd3ae4810c1de808d6d"
},
{
"ImportPath": "github.com/mattn/go-isatty",
"Rev": "30a891c33c7cde7b02a981314b4228ec99380cca"
@ -97,56 +93,56 @@
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "2f8be38b9a7533b8763d48273737ff6e90428a96"
"Rev": "2e74c773682f59dc50a56475f7918dd8fa6dcaf8"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "2f8be38b9a7533b8763d48273737ff6e90428a96"
"Rev": "2e74c773682f59dc50a56475f7918dd8fa6dcaf8"
},
{
"ImportPath": "golang.org/x/crypto/cast5",
"Rev": "2f8be38b9a7533b8763d48273737ff6e90428a96"
"Rev": "2e74c773682f59dc50a56475f7918dd8fa6dcaf8"
},
{
"ImportPath": "golang.org/x/crypto/openpgp",
"Rev": "2f8be38b9a7533b8763d48273737ff6e90428a96"
"Rev": "2e74c773682f59dc50a56475f7918dd8fa6dcaf8"
},
{
"ImportPath": "golang.org/x/crypto/openpgp/armor",
"Rev": "2f8be38b9a7533b8763d48273737ff6e90428a96"
"Rev": "2e74c773682f59dc50a56475f7918dd8fa6dcaf8"
},
{
"ImportPath": "golang.org/x/crypto/openpgp/elgamal",
"Rev": "2f8be38b9a7533b8763d48273737ff6e90428a96"
"Rev": "2e74c773682f59dc50a56475f7918dd8fa6dcaf8"
},
{
"ImportPath": "golang.org/x/crypto/openpgp/errors",
"Rev": "2f8be38b9a7533b8763d48273737ff6e90428a96"
"Rev": "2e74c773682f59dc50a56475f7918dd8fa6dcaf8"
},
{
"ImportPath": "golang.org/x/crypto/openpgp/packet",
"Rev": "2f8be38b9a7533b8763d48273737ff6e90428a96"
"Rev": "2e74c773682f59dc50a56475f7918dd8fa6dcaf8"
},
{
"ImportPath": "golang.org/x/crypto/openpgp/s2k",
"Rev": "2f8be38b9a7533b8763d48273737ff6e90428a96"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
"Rev": "2e74c773682f59dc50a56475f7918dd8fa6dcaf8"
},
{
"ImportPath": "golang.org/x/net/html",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
"Rev": "007e530097ad7f954752df63046b4036f98ba6a6"
},
{
"ImportPath": "golang.org/x/net/html/atom",
"Rev": "f2499483f923065a842d38eb4c7f1927e6fc6e6d"
"Rev": "007e530097ad7f954752df63046b4036f98ba6a6"
},
{
"ImportPath": "golang.org/x/sys/unix",
"Rev": "d75a52659825e75fff6158388dddc6a5b04f9ba5"
},
{
"ImportPath": "gopkg.in/gin-contrib/sse.v0",
"Rev": "22d885f9ecc78bf4ee5d72b937e4bbcdc58e8cae"
},
{
"ImportPath": "gopkg.in/go-playground/validator.v8",
"Comment": "v8.18.1",

View File

@ -1,4 +1,4 @@
Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg)
Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg)
====
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
@ -15,11 +15,11 @@ and setting values. That's it.
## Project Status
Bolt is stable and the API is fixed. Full unit test coverage and randomized
black box testing are used to ensure database consistency and thread safety.
Bolt is currently in high-load production environments serving databases as
large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
services every day.
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
test coverage and randomized black box testing are used to ensure database
consistency and thread safety. Bolt is currently used in high-load production
environments serving databases as large as 1TB. Many companies such as
Shopify and Heroku use Bolt-backed services every day.
## Table of Contents
@ -209,7 +209,7 @@ and then safely close your transaction if an error is returned. This is the
recommended way to use Bolt transactions.
However, sometimes you may want to manually start and end your transactions.
You can use the `Tx.Begin()` function directly but **please** be sure to close
You can use the `DB.Begin()` function directly but **please** be sure to close
the transaction.
```go
@ -313,7 +313,7 @@ func (s *Store) CreateUser(u *User) error {
// Generate ID for the user.
// This returns an error only if the Tx is closed or not writeable.
// That can't happen in an Update() call so I ignore the error check.
id, _ = b.NextSequence()
id, _ := b.NextSequence()
u.ID = int(id)
// Marshal user data into bytes.
@ -395,7 +395,7 @@ db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("MyBucket")).Cursor()
prefix := []byte("1234")
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
fmt.Printf("key=%s, value=%s\n", k, v)
}
@ -427,6 +427,8 @@ db.View(func(tx *bolt.Tx) error {
})
```
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
#### ForEach()
@ -437,7 +439,7 @@ all the keys in a bucket:
db.View(func(tx *bolt.Tx) error {
// Assume bucket exists and has keys
b := tx.Bucket([]byte("MyBucket"))
b.ForEach(func(k, v []byte) error {
fmt.Printf("key=%s, value=%s\n", k, v)
return nil
@ -446,6 +448,10 @@ db.View(func(tx *bolt.Tx) error {
})
```
Please note that keys and values in `ForEach()` are only valid while
the transaction is open. If you need to use a key or value outside of
the transaction, you must use `copy()` to copy it to another byte
slice.
### Nested buckets
@ -458,6 +464,55 @@ func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
func (*Bucket) DeleteBucket(key []byte) error
```
Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
```go
// createUser creates a new user in the given account.
func createUser(accountID int, u *User) error {
// Start the transaction.
tx, err := db.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
// Retrieve the root bucket for the account.
// Assume this has already been created when the account was set up.
root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
// Setup the users bucket.
bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
if err != nil {
return err
}
// Generate an ID for the new user.
userID, err := bkt.NextSequence()
if err != nil {
return err
}
u.ID = userID
// Marshal and save the encoded user.
if buf, err := json.Marshal(u); err != nil {
return err
} else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
return err
}
// Commit the transaction.
if err := tx.Commit(); err != nil {
return err
}
return nil
}
```
### Database backups
@ -555,7 +610,7 @@ if err != nil {
Bolt is able to run on mobile devices by leveraging the binding feature of the
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
contain your database logic and a reference to a `*bolt.DB` with a initializing
contstructor that takes in a filepath where the database file will be stored.
constructor that takes in a filepath where the database file will be stored.
Neither Android nor iOS require extra permissions or cleanup from using this method.
```go
@ -617,7 +672,7 @@ Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
{
NSURL* URL= [NSURL fileURLWithPath: filePathString];
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
NSError *error = nil;
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
forKey: NSURLIsExcludedFromBackupKey error: &error];
@ -713,6 +768,9 @@ Here are a few things to note when evaluating and using Bolt:
can be reused by a new page or can be unmapped from virtual memory and you'll
see an `unexpected fault address` panic when accessing it.
* Bolt uses an exclusive write lock on the database file so it cannot be
shared by multiple processes.
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
buckets that have random inserts will cause your database to have very poor
page utilization.
@ -805,6 +863,7 @@ them via pull request.
Below is a list of public, open source projects that use Bolt:
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
@ -823,7 +882,6 @@ Below is a list of public, open source projects that use Bolt:
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
@ -840,5 +898,18 @@ Below is a list of public, open source projects that use Bolt:
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
If you are using Bolt in a project please send a pull request to add it to the list.

View File

@ -5,3 +5,6 @@ const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -5,3 +5,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -1,7 +1,28 @@
package bolt
import "unsafe"
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned bool
func init() {
// Simple check to see whether this arch handles unaligned load/stores
// correctly.
// ARM9 and older devices require load/stores to be from/to aligned
// addresses. If not, the lower 2 bits are cleared and that address is
// read in a jumbled up order.
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
brokenUnaligned = val != 0x11222211
}

View File

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -89,7 +89,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
func funlock(db *DB) error {
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
db.lockfile.Close()
os.Remove(db.path+lockExt)
os.Remove(db.path + lockExt)
return err
}

View File

@ -130,9 +130,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
func (b *Bucket) openBucket(value []byte) *Bucket {
var child = newBucket(b.tx)
// If unaligned load/stores are broken on this arch and value is
// unaligned simply clone to an aligned byte array.
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
if unaligned {
value = cloneBytes(value)
}
// If this is a writable transaction then we need to copy the bucket entry.
// Read-only transactions can point directly at the mmap entry.
if b.tx.writable {
if b.tx.writable && !unaligned {
child.bucket = &bucket{}
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
} else {
@ -167,9 +175,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
if bytes.Equal(key, k) {
if (flags & bucketLeafFlag) != 0 {
return nil, ErrBucketExists
} else {
return nil, ErrIncompatibleValue
}
return nil, ErrIncompatibleValue
}
// Create empty, inline bucket.
@ -329,6 +336,28 @@ func (b *Bucket) Delete(key []byte) error {
return nil
}
// Sequence returns the current integer for the bucket without incrementing it.
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
// SetSequence updates the sequence number for the bucket.
func (b *Bucket) SetSequence(v uint64) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence = v
return nil
}
// NextSequence returns an autoincrementing integer for the bucket.
func (b *Bucket) NextSequence() (uint64, error) {
if b.tx.db == nil {

86
vendor/github.com/boltdb/bolt/db.go generated vendored
View File

@ -36,6 +36,9 @@ const (
DefaultAllocSize = 16 * 1024 * 1024
)
// default page size for db is set to the OS page size.
var defaultPageSize = os.Getpagesize()
// DB represents a collection of buckets persisted to a file on disk.
// All data access is performed through transactions which can be obtained through the DB.
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
@ -94,7 +97,7 @@ type DB struct {
path string
file *os.File
lockfile *os.File // windows only
dataref []byte // mmap'ed readonly, write throws SEGV
dataref []byte // mmap'ed readonly, write throws SEGV
data *[maxMapSize]byte
datasz int
filesz int // current on disk file size
@ -107,6 +110,8 @@ type DB struct {
freelist *freelist
stats Stats
pagePool sync.Pool
batchMu sync.Mutex
batch *batch
@ -200,12 +205,27 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
if _, err := db.file.ReadAt(buf[:], 0); err == nil {
m := db.pageInBuffer(buf[:], 0).meta()
if err := m.validate(); err != nil {
return nil, err
// If we can't read the page size, we can assume it's the same
// as the OS -- since that's how the page size was chosen in the
// first place.
//
// If the first page is invalid and this OS uses a different
// page size than what the database was created with then we
// are out of luck and cannot access the database.
db.pageSize = os.Getpagesize()
} else {
db.pageSize = int(m.pageSize)
}
db.pageSize = int(m.pageSize)
}
}
// Initialize page pool.
db.pagePool = sync.Pool{
New: func() interface{} {
return make([]byte, db.pageSize)
},
}
// Memory map the data file.
if err := db.mmap(options.InitialMmapSize); err != nil {
_ = db.close()
@ -262,12 +282,13 @@ func (db *DB) mmap(minsz int) error {
db.meta0 = db.page(0).meta()
db.meta1 = db.page(1).meta()
// Validate the meta pages.
if err := db.meta0.validate(); err != nil {
return err
}
if err := db.meta1.validate(); err != nil {
return err
// Validate the meta pages. We only return an error if both meta pages fail
// validation, since meta0 failing validation means that it wasn't saved
// properly -- but we can recover using meta1. And vice-versa.
err0 := db.meta0.validate()
err1 := db.meta1.validate()
if err0 != nil && err1 != nil {
return err0
}
return nil
@ -339,6 +360,7 @@ func (db *DB) init() error {
m.root = bucket{root: 3}
m.pgid = 4
m.txid = txid(i)
m.checksum = m.sum64()
}
// Write an empty freelist at page 3.
@ -383,11 +405,10 @@ func (db *DB) close() error {
if !db.opened {
return nil
}
db.opened = false
db.freelist = nil
db.path = ""
// Clear ops.
db.ops.writeAt = nil
@ -414,6 +435,7 @@ func (db *DB) close() error {
db.file = nil
}
db.path = ""
return nil
}
@ -530,7 +552,10 @@ func (db *DB) removeTx(tx *Tx) {
// Remove the transaction.
for i, t := range db.txs {
if t == tx {
db.txs = append(db.txs[:i], db.txs[i+1:]...)
last := len(db.txs) - 1
db.txs[i] = db.txs[last]
db.txs[last] = nil
db.txs = db.txs[:last]
break
}
}
@ -778,16 +803,37 @@ func (db *DB) pageInBuffer(b []byte, id pgid) *page {
// meta retrieves the current meta page reference.
func (db *DB) meta() *meta {
if db.meta0.txid > db.meta1.txid {
return db.meta0
// We have to return the meta with the highest txid which doesn't fail
// validation. Otherwise, we can cause errors when in fact the database is
// in a consistent state. metaA is the one with the higher txid.
metaA := db.meta0
metaB := db.meta1
if db.meta1.txid > db.meta0.txid {
metaA = db.meta1
metaB = db.meta0
}
return db.meta1
// Use higher meta page if valid. Otherwise fallback to previous, if valid.
if err := metaA.validate(); err == nil {
return metaA
} else if err := metaB.validate(); err == nil {
return metaB
}
// This should never be reached, because both meta1 and meta0 were validated
// on mmap() and we do fsync() on every write.
panic("bolt.DB.meta(): invalid meta pages")
}
// allocate returns a contiguous block of memory starting at a given page.
func (db *DB) allocate(count int) (*page, error) {
// Allocate a temporary buffer for the page.
buf := make([]byte, count*db.pageSize)
var buf []byte
if count == 1 {
buf = db.pagePool.Get().([]byte)
} else {
buf = make([]byte, count*db.pageSize)
}
p := (*page)(unsafe.Pointer(&buf[0]))
p.overflow = uint32(count - 1)
@ -909,7 +955,7 @@ func (s *Stats) Sub(other *Stats) Stats {
diff.PendingPageN = s.PendingPageN
diff.FreeAlloc = s.FreeAlloc
diff.FreelistInuse = s.FreelistInuse
diff.TxN = other.TxN - s.TxN
diff.TxN = s.TxN - other.TxN
diff.TxStats = s.TxStats.Sub(&other.TxStats)
return diff
}
@ -937,12 +983,12 @@ type meta struct {
// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
func (m *meta) validate() error {
if m.checksum != 0 && m.checksum != m.sum64() {
return ErrChecksum
} else if m.magic != magic {
if m.magic != magic {
return ErrInvalid
} else if m.version != version {
return ErrVersionMismatch
} else if m.checksum != 0 && m.checksum != m.sum64() {
return ErrChecksum
}
return nil
}

View File

@ -12,7 +12,8 @@ var (
// already open.
ErrDatabaseOpen = errors.New("database already open")
// ErrInvalid is returned when a data file is not a Bolt-formatted database.
// ErrInvalid is returned when both meta pages on a database are invalid.
// This typically occurs when a file is not a bolt database.
ErrInvalid = errors.New("invalid database")
// ErrVersionMismatch is returned when the data file was created with a

View File

@ -24,7 +24,12 @@ func newFreelist() *freelist {
// size returns the size of the page after serialization.
func (f *freelist) size() int {
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
n := f.count()
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n++
}
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
}
// count returns count of pages on the freelist
@ -46,16 +51,15 @@ func (f *freelist) pending_count() int {
return count
}
// all returns a list of all free ids and all pending ids in one sorted list.
func (f *freelist) all() []pgid {
m := make(pgids, 0)
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
func (f *freelist) copyall(dst []pgid) {
m := make(pgids, 0, f.pending_count())
for _, list := range f.pending {
m = append(m, list...)
}
sort.Sort(m)
return pgids(f.ids).merge(m)
mergepgids(dst, f.ids, m)
}
// allocate returns the starting page id of a contiguous list of pages of a given size.
@ -166,12 +170,16 @@ func (f *freelist) read(p *page) {
}
// Copy the list of page ids from the freelist.
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
f.ids = make([]pgid, len(ids))
copy(f.ids, ids)
if count == 0 {
f.ids = nil
} else {
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
f.ids = make([]pgid, len(ids))
copy(f.ids, ids)
// Make sure they're sorted.
sort.Sort(pgids(f.ids))
// Make sure they're sorted.
sort.Sort(pgids(f.ids))
}
// Rebuild the page cache.
f.reindex()
@ -182,20 +190,22 @@ func (f *freelist) read(p *page) {
// become free.
func (f *freelist) write(p *page) error {
// Combine the old free pgids and pgids waiting on an open transaction.
ids := f.all()
// Update the header flag.
p.flags |= freelistPageFlag
// The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element.
if len(ids) < 0xFFFF {
p.count = uint16(len(ids))
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
lenids := f.count()
if lenids == 0 {
p.count = uint16(lenids)
} else if lenids < 0xFFFF {
p.count = uint16(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
} else {
p.count = 0xFFFF
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
}
return nil
@ -230,7 +240,7 @@ func (f *freelist) reload(p *page) {
// reindex rebuilds the free cache based on available and pending free lists.
func (f *freelist) reindex() {
f.cache = make(map[pgid]bool)
f.cache = make(map[pgid]bool, len(f.ids))
for _, id := range f.ids {
f.cache[id] = true
}

View File

@ -201,6 +201,11 @@ func (n *node) write(p *page) {
}
p.count = uint16(len(n.inodes))
// Stop here if there are no items to write.
if p.count == 0 {
return
}
// Loop over each item and write it to the page.
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
for i, item := range n.inodes {

View File

@ -62,6 +62,9 @@ func (p *page) leafPageElement(index uint16) *leafPageElement {
// leafPageElements retrieves a list of leaf nodes.
func (p *page) leafPageElements() []leafPageElement {
if p.count == 0 {
return nil
}
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
@ -72,6 +75,9 @@ func (p *page) branchPageElement(index uint16) *branchPageElement {
// branchPageElements retrieves a list of branch nodes.
func (p *page) branchPageElements() []branchPageElement {
if p.count == 0 {
return nil
}
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
@ -111,13 +117,13 @@ type leafPageElement struct {
// key returns a byte slice of the node key.
func (n *leafPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
}
// value returns a byte slice of the node value.
func (n *leafPageElement) value() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize]
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
}
// PageInfo represents human readable information about a page.
@ -139,12 +145,33 @@ func (a pgids) merge(b pgids) pgids {
// Return the opposite slice if one is nil.
if len(a) == 0 {
return b
} else if len(b) == 0 {
}
if len(b) == 0 {
return a
}
merged := make(pgids, len(a)+len(b))
mergepgids(merged, a, b)
return merged
}
// Create a list to hold all elements from both lists.
merged := make(pgids, 0, len(a)+len(b))
// mergepgids copies the sorted union of a and b into dst.
// If dst is too small, it panics.
func mergepgids(dst, a, b pgids) {
if len(dst) < len(a)+len(b) {
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
}
// Copy in the opposite slice if one is nil.
if len(a) == 0 {
copy(dst, b)
return
}
if len(b) == 0 {
copy(dst, a)
return
}
// Merged will hold all elements from both lists.
merged := dst[:0]
// Assign lead to the slice with a lower starting value, follow to the higher value.
lead, follow := a, b
@ -166,7 +193,5 @@ func (a pgids) merge(b pgids) pgids {
}
// Append what's left in follow.
merged = append(merged, follow...)
return merged
_ = append(merged, follow...)
}

24
vendor/github.com/boltdb/bolt/tx.go generated vendored
View File

@ -381,7 +381,9 @@ func (tx *Tx) Check() <-chan error {
func (tx *Tx) check(ch chan error) {
// Check if any pages are double freed.
freed := make(map[pgid]bool)
for _, id := range tx.db.freelist.all() {
all := make([]pgid, tx.db.freelist.count())
tx.db.freelist.copyall(all)
for _, id := range all {
if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id)
}
@ -473,6 +475,8 @@ func (tx *Tx) write() error {
for _, p := range tx.pages {
pages = append(pages, p)
}
// Clear out page cache early.
tx.pages = make(map[pgid]*page)
sort.Sort(pages)
// Write pages to disk in order.
@ -517,8 +521,22 @@ func (tx *Tx) write() error {
}
}
// Clear out page cache.
tx.pages = make(map[pgid]*page)
// Put small pages back to page pool.
for _, p := range pages {
// Ignore page sizes over 1 page.
// These are allocated using make() instead of the page pool.
if int(p.overflow) != 0 {
continue
}
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
for i := range buf {
buf[i] = 0
}
tx.db.pagePool.Put(buf)
}
return nil
}

View File

@ -1,12 +1,17 @@
language: go
sudo: false
go:
- 1.4
- 1.5.4
- 1.6.4
- 1.7.4
- tip
git:
depth: 3
install:
- go get -v github.com/kardianos/govendor
- govendor sync
script:
- go test -v -covermode=count -coverprofile=coverage.out

View File

@ -102,12 +102,22 @@ BenchmarkZeus_GithubAll | 2000 | 944234 | 300688 | 2648
import "net/http"
```
4. (Optional) Use latest changes (note: they may be broken and/or unstable):
   ```sh
$ GIN_PATH=$GOPATH/src/gopkg.in/gin-gonic/gin.v1
$ git -C $GIN_PATH checkout develop
$ git -C $GIN_PATH pull origin develop
   ```
## API Examples
#### Using GET, POST, PUT, PATCH, DELETE and OPTIONS
### Using GET, POST, PUT, PATCH, DELETE and OPTIONS
```go
func main() {
// Disable Console Color
// gin.DisableConsoleColor()
// Creates a gin router with default middleware:
// logger and recovery (crash-free) middleware
router := gin.Default()
@ -127,7 +137,7 @@ func main() {
}
```
#### Parameters in path
### Parameters in path
```go
func main() {
@ -152,7 +162,7 @@ func main() {
}
```
#### Querystring parameters
### Querystring parameters
```go
func main() {
router := gin.Default()
@ -219,34 +229,66 @@ func main() {
id: 1234; page: 1; name: manu; message: this_is_great
```
### Another example: upload file
### Upload files
References issue [#548](https://github.com/gin-gonic/gin/issues/548).
#### Single file
References issue [#774](https://github.com/gin-gonic/gin/issues/774) and detail [example code](examples/upload-file/single).
```go
func main() {
router := gin.Default()
router.POST("/upload", func(c *gin.Context) {
// single file
file, _ := c.FormFile("file")
log.Println(file.Filename)
file, header , err := c.Request.FormFile("upload")
filename := header.Filename
fmt.Println(header.Filename)
out, err := os.Create("./tmp/"+filename+".png")
if err != nil {
log.Fatal(err)
}
defer out.Close()
_, err = io.Copy(out, file)
if err != nil {
log.Fatal(err)
}
c.String(http.StatusOK, fmt.Printf("'%s' uploaded!", file.Filename))
})
router.Run(":8080")
}
```
#### Grouping routes
How to `curl`:
```bash
curl -X POST http://localhost:8080/upload \
-F "file=@/Users/appleboy/test.zip" \
-H "Content-Type: multipart/form-data"
```
#### Multiple files
See the detail [example code](examples/upload-file/multiple).
```go
func main() {
router := gin.Default()
router.POST("/upload", func(c *gin.Context) {
// Multipart form
form, _ := c.MultipartForm()
files := form.File["upload[]"]
for _, file := range files {
log.Println(file.Filename)
}
c.String(http.StatusOK, fmt.Printf("%d files uploaded!", len(files)))
})
router.Run(":8080")
}
```
How to `curl`:
```bash
curl -X POST http://localhost:8080/upload \
-F "upload[]=@/Users/appleboy/test1.zip" \
-F "upload[]=@/Users/appleboy/test2.zip" \
-H "Content-Type: multipart/form-data"
```
### Grouping routes
```go
func main() {
router := gin.Default()
@ -272,7 +314,7 @@ func main() {
```
#### Blank Gin without middleware by default
### Blank Gin without middleware by default
Use
@ -286,7 +328,7 @@ r := gin.Default()
```
#### Using middleware
### Using middleware
```go
func main() {
// Creates a router without any middleware by default
@ -321,7 +363,7 @@ func main() {
}
```
#### Model binding and validation
### Model binding and validation
To bind a request body into a type, use model binding. We currently support binding of JSON, XML and standard form values (foo=bar&boo=baz).
@ -371,8 +413,44 @@ func main() {
}
```
### Bind Query String
See the [detail information](https://github.com/gin-gonic/gin/issues/742#issuecomment-264681292).
```go
package main
import "log"
import "github.com/gin-gonic/gin"
type Person struct {
Name string `form:"name"`
Address string `form:"address"`
}
func main() {
route := gin.Default()
route.GET("/testing", startPage)
route.Run(":8085")
}
func startPage(c *gin.Context) {
var person Person
// If `GET`, only `Form` binding engine (`query`) used.
// If `POST`, first checks the `content-type` for `JSON` or `XML`, then uses `Form` (`form-data`).
// See more at https://github.com/gin-gonic/gin/blob/develop/binding/binding.go#L45
if c.Bind(&person) == nil {
log.Println(person.Name)
log.Println(person.Address)
}
c.String(200, "Success")
}
```
### Multipart/Urlencoded binding
###Multipart/Urlencoded binding
```go
package main
@ -411,7 +489,7 @@ $ curl -v --form user=user --form password=password http://localhost:8080/login
```
#### XML, JSON and YAML rendering
### XML, JSON and YAML rendering
```go
func main() {
@ -450,7 +528,7 @@ func main() {
}
```
####Serving static files
### Serving static files
```go
func main() {
@ -464,9 +542,9 @@ func main() {
}
```
####HTML rendering
### HTML rendering
Using LoadHTMLTemplates()
Using LoadHTMLGlob() or LoadHTMLFiles()
```go
func main() {
@ -543,8 +621,11 @@ func main() {
}
```
### Multitemplate
#### Redirects
Gin allow by default use only one html.Template. Check [a multitemplate render](https://github.com/gin-contrib/multitemplate) for using features like go 1.6 `block template`.
### Redirects
Issuing a HTTP redirect is easy:
@ -556,7 +637,7 @@ r.GET("/test", func(c *gin.Context) {
Both internal and external locations are supported.
#### Custom Middleware
### Custom Middleware
```go
func Logger() gin.HandlerFunc {
@ -596,7 +677,7 @@ func main() {
}
```
#### Using BasicAuth() middleware
### Using BasicAuth() middleware
```go
// simulate some private data
var secrets = gin.H{
@ -635,7 +716,7 @@ func main() {
```
#### Goroutines inside a middleware
### Goroutines inside a middleware
When starting inside a middleware or handler, you **SHOULD NOT** use the original context inside it, you have to use a read-only copy.
```go
@ -667,7 +748,7 @@ func main() {
}
```
#### Custom HTTP configuration
### Custom HTTP configuration
Use `http.ListenAndServe()` directly, like this:
@ -694,7 +775,7 @@ func main() {
}
```
#### Graceful restart or stop
### Graceful restart or stop
Do you want to graceful restart or stop your web server?
There are some ways this can be done.
@ -725,7 +806,7 @@ An alternative to endless:
- You should add/modify tests to cover your proposed code changes.
- If your pull request contains a new feature, please document it on the README.
## Example
## Users
Awesome project lists using [Gin](https://github.com/gin-gonic/gin) web framework.

View File

@ -8,6 +8,7 @@ import (
"errors"
"io"
"math"
"mime/multipart"
"net"
"net/http"
"net/url"
@ -16,8 +17,7 @@ import (
"github.com/gin-gonic/gin/binding"
"github.com/gin-gonic/gin/render"
"github.com/manucorporat/sse"
"golang.org/x/net/context"
"gopkg.in/gin-contrib/sse.v0"
)
// Content-Type MIME of the most common data formats
@ -31,7 +31,10 @@ const (
MIMEMultipartPOSTForm = binding.MIMEMultipartPOSTForm
)
const abortIndex int8 = math.MaxInt8 / 2
const (
defaultMemory = 32 << 20 // 32 MB
abortIndex int8 = math.MaxInt8 / 2
)
// Context is the most important part of gin. It allows us to pass variables between middleware,
// manage the flow, validate the JSON of a request and render a JSON response for example.
@ -50,8 +53,6 @@ type Context struct {
Accepted []string
}
var _ context.Context = &Context{}
/************************************/
/********** CONTEXT CREATION ********/
/************************************/
@ -67,7 +68,7 @@ func (c *Context) reset() {
}
// Copy returns a copy of the current context that can be safely used outside the request's scope.
// This have to be used then the context has to be passed to a goroutine.
// This has to be used when the context has to be passed to a goroutine.
func (c *Context) Copy() *Context {
var cp = *c
cp.writermem.ResponseWriter = nil
@ -166,9 +167,7 @@ func (c *Context) Set(key string, value interface{}) {
// Get returns the value for the given key, ie: (value, true).
// If the value does not exists it returns (nil, false)
func (c *Context) Get(key string) (value interface{}, exists bool) {
if c.Keys != nil {
value, exists = c.Keys[key]
}
value, exists = c.Keys[key]
return
}
@ -296,7 +295,7 @@ func (c *Context) PostFormArray(key string) []string {
func (c *Context) GetPostFormArray(key string) ([]string, bool) {
req := c.Request
req.ParseForm()
req.ParseMultipartForm(32 << 20) // 32 MB
req.ParseMultipartForm(defaultMemory)
if values := req.PostForm[key]; len(values) > 0 {
return values, true
}
@ -308,6 +307,18 @@ func (c *Context) GetPostFormArray(key string) ([]string, bool) {
return []string{}, false
}
// FormFile returns the first file for the provided form key.
func (c *Context) FormFile(name string) (*multipart.FileHeader, error) {
_, fh, err := c.Request.FormFile(name)
return fh, err
}
// MultipartForm is the parsed multipart form, including file uploads.
func (c *Context) MultipartForm() (*multipart.Form, error) {
err := c.Request.ParseMultipartForm(defaultMemory)
return c.Request.MultipartForm, err
}
// Bind checks the Content-Type to select a binding engine automatically,
// Depending the "Content-Type" header different bindings are used:
// "application/json" --> JSON binding
@ -338,13 +349,10 @@ func (c *Context) BindWith(obj interface{}, b binding.Binding) error {
// ClientIP implements a best effort algorithm to return the real client IP, it parses
// X-Real-IP and X-Forwarded-For in order to work properly with reverse-proxies such us: nginx or haproxy.
// Use X-Forwarded-For before X-Real-Ip as nginx uses X-Real-Ip with the proxy's IP.
func (c *Context) ClientIP() string {
if c.engine.ForwardedByClientIP {
clientIP := strings.TrimSpace(c.requestHeader("X-Real-Ip"))
if len(clientIP) > 0 {
return clientIP
}
clientIP = c.requestHeader("X-Forwarded-For")
clientIP := c.requestHeader("X-Forwarded-For")
if index := strings.IndexByte(clientIP, ','); index >= 0 {
clientIP = clientIP[0:index]
}
@ -352,10 +360,22 @@ func (c *Context) ClientIP() string {
if len(clientIP) > 0 {
return clientIP
}
clientIP = strings.TrimSpace(c.requestHeader("X-Real-Ip"))
if len(clientIP) > 0 {
return clientIP
}
}
if c.engine.AppEngine {
if addr := c.Request.Header.Get("X-Appengine-Remote-Addr"); addr != "" {
return addr
}
}
if ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr)); err == nil {
return ip
}
return ""
}
@ -364,6 +384,16 @@ func (c *Context) ContentType() string {
return filterFlags(c.requestHeader("Content-Type"))
}
// IsWebsocket returns true if the request headers indicate that a websocket
// handshake is being initiated by the client.
func (c *Context) IsWebsocket() bool {
if strings.Contains(strings.ToLower(c.requestHeader("Connection")), "upgrade") &&
strings.ToLower(c.requestHeader("Upgrade")) == "websocket" {
return true
}
return false
}
func (c *Context) requestHeader(key string) string {
if values, _ := c.Request.Header[key]; len(values) > 0 {
return values[0]
@ -375,6 +405,19 @@ func (c *Context) requestHeader(key string) string {
/******** RESPONSE RENDERING ********/
/************************************/
// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function
func bodyAllowedForStatus(status int) bool {
switch {
case status >= 100 && status <= 199:
return false
case status == 204:
return false
case status == 304:
return false
}
return true
}
func (c *Context) Status(code int) {
c.writermem.WriteHeader(code)
}
@ -424,6 +467,13 @@ func (c *Context) Cookie(name string) (string, error) {
func (c *Context) Render(code int, r render.Render) {
c.Status(code)
if !bodyAllowedForStatus(code) {
r.WriteContentType(c.Writer)
c.Writer.WriteHeaderNow()
return
}
if err := r.Render(c.Writer); err != nil {
panic(err)
}
@ -448,10 +498,7 @@ func (c *Context) IndentedJSON(code int, obj interface{}) {
// JSON serializes the given struct as JSON into the response body.
// It also sets the Content-Type as "application/json".
func (c *Context) JSON(code int, obj interface{}) {
c.Status(code)
if err := render.WriteJSON(c.Writer, obj); err != nil {
panic(err)
}
c.Render(code, render.JSON{Data: obj})
}
// XML serializes the given struct as XML into the response body.
@ -467,8 +514,7 @@ func (c *Context) YAML(code int, obj interface{}) {
// String writes the given string into the response body.
func (c *Context) String(code int, format string, values ...interface{}) {
c.Status(code)
render.WriteString(c.Writer, format, values)
c.Render(code, render.String{Format: format, Data: values})
}
// Redirect returns a HTTP redirect to the specific location.

View File

@ -72,7 +72,7 @@ func (msg *Error) MarshalJSON() ([]byte, error) {
}
// Implements the error interface
func (msg *Error) Error() string {
func (msg Error) Error() string {
return msg.Err.Error()
}

View File

@ -15,10 +15,11 @@ import (
)
// Version is Framework's version
const Version = "v1.0rc2"
const Version = "v1.1.4"
var default404Body = []byte("404 page not found")
var default405Body = []byte("405 method not allowed")
var defaultAppEngine bool
type HandlerFunc func(*Context)
type HandlersChain []HandlerFunc
@ -78,6 +79,10 @@ type (
// handler.
HandleMethodNotAllowed bool
ForwardedByClientIP bool
// #726 #755 If enabled, it will thrust some headers starting with
// 'X-AppEngine...' for better integration with that PaaS.
AppEngine bool
}
)
@ -101,6 +106,7 @@ func New() *Engine {
RedirectFixedPath: false,
HandleMethodNotAllowed: false,
ForwardedByClientIP: true,
AppEngine: defaultAppEngine,
trees: make(methodTrees, 0, 9),
}
engine.RouterGroup.engine = engine

View File

@ -14,16 +14,21 @@ import (
)
var (
green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109})
white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109})
yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109})
red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109})
blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109})
magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109})
cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109})
reset = string([]byte{27, 91, 48, 109})
green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109})
white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109})
yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109})
red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109})
blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109})
magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109})
cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109})
reset = string([]byte{27, 91, 48, 109})
disableColor = false
)
func DisableConsoleColor() {
disableColor = true
}
func ErrorLogger() HandlerFunc {
return ErrorLoggerT(ErrorTypeAny)
}
@ -49,7 +54,7 @@ func Logger() HandlerFunc {
func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc {
isTerm := true
if w, ok := out.(*os.File); !ok || !isatty.IsTerminal(w.Fd()) {
if w, ok := out.(*os.File); !ok || !isatty.IsTerminal(w.Fd()) || disableColor {
isTerm = false
}
@ -87,12 +92,12 @@ func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc {
}
comment := c.Errors.ByType(ErrorTypePrivate).String()
fmt.Fprintf(out, "[GIN] %v |%s %3d %s| %13v | %s |%s %s %-7s %s\n%s",
fmt.Fprintf(out, "[GIN] %v |%s %3d %s| %13v | %15s |%s %s %-7s %s\n%s",
end.Format("2006/01/02 - 15:04:05"),
statusColor, statusCode, reset,
latency,
clientIP,
methodColor, reset, method,
methodColor, method, reset,
path,
comment,
)

View File

@ -11,10 +11,13 @@ type Data struct {
Data []byte
}
func (r Data) Render(w http.ResponseWriter) error {
if len(r.ContentType) > 0 {
w.Header()["Content-Type"] = []string{r.ContentType}
}
w.Write(r.Data)
return nil
// Render (Data) writes data with custom ContentType
func (r Data) Render(w http.ResponseWriter) (err error) {
r.WriteContentType(w)
_, err = w.Write(r.Data)
return
}
func (r Data) WriteContentType(w http.ResponseWriter) {
writeContentType(w, []string{r.ContentType})
}

View File

@ -58,9 +58,14 @@ func (r HTMLDebug) loadTemplate() *template.Template {
}
func (r HTML) Render(w http.ResponseWriter) error {
writeContentType(w, htmlContentType)
r.WriteContentType(w)
if len(r.Name) == 0 {
return r.Template.Execute(w, r.Data)
}
return r.Template.ExecuteTemplate(w, r.Name, r.Data)
}
func (r HTML) WriteContentType(w http.ResponseWriter) {
writeContentType(w, htmlContentType)
}

View File

@ -21,12 +21,29 @@ type (
var jsonContentType = []string{"application/json; charset=utf-8"}
func (r JSON) Render(w http.ResponseWriter) error {
return WriteJSON(w, r.Data)
func (r JSON) Render(w http.ResponseWriter) (err error) {
if err = WriteJSON(w, r.Data); err != nil {
panic(err)
}
return
}
func (r JSON) WriteContentType(w http.ResponseWriter) {
writeContentType(w, jsonContentType)
}
func WriteJSON(w http.ResponseWriter, obj interface{}) error {
writeContentType(w, jsonContentType)
jsonBytes, err := json.Marshal(obj)
if err != nil {
return err
}
w.Write(jsonBytes)
return nil
}
func (r IndentedJSON) Render(w http.ResponseWriter) error {
writeContentType(w, jsonContentType)
r.WriteContentType(w)
jsonBytes, err := json.MarshalIndent(r.Data, "", " ")
if err != nil {
return err
@ -35,7 +52,6 @@ func (r IndentedJSON) Render(w http.ResponseWriter) error {
return nil
}
func WriteJSON(w http.ResponseWriter, obj interface{}) error {
func (r IndentedJSON) WriteContentType(w http.ResponseWriter) {
writeContentType(w, jsonContentType)
return json.NewEncoder(w).Encode(obj)
}

View File

@ -22,3 +22,5 @@ func (r Redirect) Render(w http.ResponseWriter) error {
http.Redirect(w, r.Request, r.Location, r.Code)
return nil
}
func (r Redirect) WriteContentType(http.ResponseWriter) {}

View File

@ -8,6 +8,7 @@ import "net/http"
type Render interface {
Render(http.ResponseWriter) error
WriteContentType(w http.ResponseWriter)
}
var (

View File

@ -22,9 +22,12 @@ func (r String) Render(w http.ResponseWriter) error {
return nil
}
func (r String) WriteContentType(w http.ResponseWriter) {
writeContentType(w, plainContentType)
}
func WriteString(w http.ResponseWriter, format string, data []interface{}) {
writeContentType(w, plainContentType)
if len(data) > 0 {
fmt.Fprintf(w, format, data...)
} else {

View File

@ -16,6 +16,10 @@ type XML struct {
var xmlContentType = []string{"application/xml; charset=utf-8"}
func (r XML) Render(w http.ResponseWriter) error {
writeContentType(w, xmlContentType)
r.WriteContentType(w)
return xml.NewEncoder(w).Encode(r.Data)
}
func (r XML) WriteContentType(w http.ResponseWriter) {
writeContentType(w, xmlContentType)
}

View File

@ -17,7 +17,7 @@ type YAML struct {
var yamlContentType = []string{"application/x-yaml; charset=utf-8"}
func (r YAML) Render(w http.ResponseWriter) error {
writeContentType(w, yamlContentType)
r.WriteContentType(w)
bytes, err := yaml.Marshal(r.Data)
if err != nil {
@ -27,3 +27,7 @@ func (r YAML) Render(w http.ResponseWriter) error {
w.Write(bytes)
return nil
}
func (r YAML) WriteContentType(w http.ResponseWriter) {
writeContentType(w, yamlContentType)
}

View File

@ -12,15 +12,12 @@ import (
"sync"
)
const (
minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
maxCompressionLevel = flate.BestCompression
defaultCompressionLevel = 1
)
var (
flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
flateReaderPool = sync.Pool{New: func() interface{} {
flateWriterPool = sync.Pool{New: func() interface{} {
fw, _ := flate.NewWriter(nil, 3)
return fw
}}
flateReaderPool = sync.Pool{New: func() interface{} {
return flate.NewReader(nil)
}}
)
@ -37,20 +34,11 @@ func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
return &flateReadWrapper{fr}
}
func isValidCompressionLevel(level int) bool {
return minCompressionLevel <= level && level <= maxCompressionLevel
}
func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
p := &flateWriterPools[level-minCompressionLevel]
func compressNoContextTakeover(w io.WriteCloser) io.WriteCloser {
tw := &truncWriter{w: w}
fw, _ := p.Get().(*flate.Writer)
if fw == nil {
fw, _ = flate.NewWriter(tw, level)
} else {
fw.Reset(tw)
}
return &flateWriteWrapper{fw: fw, tw: tw, p: p}
fw, _ := flateWriterPool.Get().(*flate.Writer)
fw.Reset(tw)
return &flateWriteWrapper{fw: fw, tw: tw}
}
// truncWriter is an io.Writer that writes all but the last four bytes of the
@ -92,7 +80,6 @@ func (w *truncWriter) Write(p []byte) (int, error) {
type flateWriteWrapper struct {
fw *flate.Writer
tw *truncWriter
p *sync.Pool
}
func (w *flateWriteWrapper) Write(p []byte) (int, error) {
@ -107,7 +94,7 @@ func (w *flateWriteWrapper) Close() error {
return errWriteClosed
}
err1 := w.fw.Flush()
w.p.Put(w.fw)
flateWriterPool.Put(w.fw)
w.fw = nil
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")

View File

@ -241,8 +241,7 @@ type Conn struct {
writeErr error
enableWriteCompression bool
compressionLevel int
newCompressionWriter func(io.WriteCloser, int) io.WriteCloser
newCompressionWriter func(io.WriteCloser) io.WriteCloser
// Read fields
reader io.ReadCloser // the current reader returned to the application
@ -286,7 +285,6 @@ func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int)
readFinal: true,
writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize),
enableWriteCompression: true,
compressionLevel: defaultCompressionLevel,
}
c.SetCloseHandler(nil)
c.SetPingHandler(nil)
@ -452,7 +450,7 @@ func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
}
c.writer = mw
if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) {
w := c.newCompressionWriter(c.writer, c.compressionLevel)
w := c.newCompressionWriter(c.writer)
mw.compress = true
c.writer = w
}
@ -983,15 +981,6 @@ func (c *Conn) CloseHandler() func(code int, text string) error {
// The code argument to h is the received close code or CloseNoStatusReceived
// if the close message is empty. The default close handler sends a close frame
// back to the peer.
//
// The application must read the connection to process close messages as
// described in the section on Control Frames above.
//
// The connection read methods return a CloseError when a close frame is
// received. Most applications should handle close messages as part of their
// normal error handling. Applications should only set a close handler when the
// application must perform some action before sending a close frame back to
// the peer.
func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
if h == nil {
h = func(code int, text string) error {
@ -1014,9 +1003,6 @@ func (c *Conn) PingHandler() func(appData string) error {
// SetPingHandler sets the handler for ping messages received from the peer.
// The appData argument to h is the PING frame application data. The default
// ping handler sends a pong to the peer.
//
// The application must read the connection to process ping messages as
// described in the section on Control Frames above.
func (c *Conn) SetPingHandler(h func(appData string) error) {
if h == nil {
h = func(message string) error {
@ -1040,9 +1026,6 @@ func (c *Conn) PongHandler() func(appData string) error {
// SetPongHandler sets the handler for pong messages received from the peer.
// The appData argument to h is the PONG frame application data. The default
// pong handler does nothing.
//
// The application must read the connection to process ping messages as
// described in the section on Control Frames above.
func (c *Conn) SetPongHandler(h func(appData string) error) {
if h == nil {
h = func(string) error { return nil }
@ -1063,18 +1046,6 @@ func (c *Conn) EnableWriteCompression(enable bool) {
c.enableWriteCompression = enable
}
// SetCompressionLevel sets the flate compression level for subsequent text and
// binary messages. This function is a noop if compression was not negotiated
// with the peer. See the compress/flate package for a description of
// compression levels.
func (c *Conn) SetCompressionLevel(level int) error {
if !isValidCompressionLevel(level) {
return errors.New("websocket: invalid compression level")
}
c.compressionLevel = level
return nil
}
// FormatCloseMessage formats closeCode and text as a WebSocket close message.
func FormatCloseMessage(closeCode int, text string) []byte {
buf := make([]byte, 2+len(text))

View File

@ -118,10 +118,9 @@
//
// Applications are responsible for ensuring that no more than one goroutine
// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
// that no more than one goroutine calls the read methods (NextReader,
// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
// concurrently.
// WriteJSON) concurrently and that no more than one goroutine calls the read
// methods (NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler,
// SetPingHandler) concurrently.
//
// The Close and WriteControl methods can be called concurrently with all other
// methods.
@ -151,25 +150,19 @@
// application's responsibility to check the Origin header before calling
// Upgrade.
//
// Compression EXPERIMENTAL
// Compression
//
// Per message compression extensions (RFC 7692) are experimentally supported
// by this package in a limited capacity. Setting the EnableCompression option
// to true in Dialer or Upgrader will attempt to negotiate per message deflate
// support.
//
// var upgrader = websocket.Upgrader{
// EnableCompression: true,
// }
//
// If compression was successfully negotiated with the connection's peer, any
// message received in compressed form will be automatically decompressed.
// All Read methods will return uncompressed bytes.
// support. If compression was successfully negotiated with the connection's
// peer, any message received in compressed form will be automatically
// decompressed. All Read methods will return uncompressed bytes.
//
// Per message compression of messages written to a connection can be enabled
// or disabled by calling the corresponding Conn method:
//
// conn.EnableWriteCompression(false)
// conn.EnableWriteCompression(true)
//
// Currently this package does not support compression with "context takeover".
// This means that messages must be compressed and decompressed in isolation,

View File

@ -104,23 +104,23 @@ func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header
// response.
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
if r.Method != "GET" {
return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET")
return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: method not GET")
}
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported")
}
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header")
}
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header")
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific Sec-Websocket-Extensions headers are unsupported")
}
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
return u.returnError(w, r, http.StatusBadRequest, "websocket: version != 13")
}
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find connection header with token 'upgrade'")
}
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find upgrade header with token 'websocket'")
}
checkOrigin := u.CheckOrigin
@ -128,12 +128,12 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
checkOrigin = checkSameOrigin
}
if !checkOrigin(r) {
return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed")
return u.returnError(w, r, http.StatusForbidden, "websocket: origin not allowed")
}
challengeKey := r.Header.Get("Sec-Websocket-Key")
if challengeKey == "" {
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank")
return u.returnError(w, r, http.StatusBadRequest, "websocket: key missing or blank")
}
subprotocol := u.selectSubprotocol(r, responseHeader)

View File

@ -1,6 +0,0 @@
language: go
sudo: false
go:
- 1.3
- 1.4
- tip

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Manuel Martínez-Almeida
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,54 +0,0 @@
#Server-Sent Events [![GoDoc](https://godoc.org/github.com/manucorporat/sse?status.svg)](https://godoc.org/github.com/manucorporat/sse) [![Build Status](https://travis-ci.org/manucorporat/sse.svg)](https://travis-ci.org/manucorporat/sse)
Server-sent events (SSE) is a technology where a browser receives automatic updates from a server via HTTP connection. The Server-Sent Events EventSource API is [standardized as part of HTML5[1] by the W3C](http://www.w3.org/TR/2009/WD-eventsource-20091029/).
- [Real world demostration using Gin](http://sse.getgin.io/)
- [Read this great SSE introduction by the HTML5Rocks guys](http://www.html5rocks.com/en/tutorials/eventsource/basics/)
- [Browser support](http://caniuse.com/#feat=eventsource)
##Sample code
```go
import "github.com/manucorporat/sse"
func httpHandler(w http.ResponseWriter, req *http.Request) {
// data can be a primitive like a string, an integer or a float
sse.Encode(w, sse.Event{
Event: "message",
Data: "some data\nmore data",
})
// also a complex type, like a map, a struct or a slice
sse.Encode(w, sse.Event{
Id: "124",
Event: "message",
Data: map[string]interface{}{
"user": "manu",
"date": time.Now().Unix(),
"content": "hi!",
},
})
}
```
```
event: message
data: some data\\nmore data
id: 124
event: message
data: {"content":"hi!","date":1431540810,"user":"manu"}
```
##Content-Type
```go
fmt.Println(sse.ContentType)
```
```
text/event-stream
```
##Decoding support
There is a client-side implementation of SSE coming soon.

View File

@ -1,116 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package sse
import (
"bytes"
"io"
"io/ioutil"
)
type decoder struct {
events []Event
}
func Decode(r io.Reader) ([]Event, error) {
var dec decoder
return dec.decode(r)
}
func (d *decoder) dispatchEvent(event Event, data string) {
dataLength := len(data)
if dataLength > 0 {
//If the data buffer's last character is a U+000A LINE FEED (LF) character, then remove the last character from the data buffer.
data = data[:dataLength-1]
dataLength--
}
if dataLength == 0 && event.Event == "" {
return
}
if event.Event == "" {
event.Event = "message"
}
event.Data = data
d.events = append(d.events, event)
}
func (d *decoder) decode(r io.Reader) ([]Event, error) {
buf, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
var currentEvent Event
var dataBuffer *bytes.Buffer = new(bytes.Buffer)
// TODO (and unit tests)
// Lines must be separated by either a U+000D CARRIAGE RETURN U+000A LINE FEED (CRLF) character pair,
// a single U+000A LINE FEED (LF) character,
// or a single U+000D CARRIAGE RETURN (CR) character.
lines := bytes.Split(buf, []byte{'\n'})
for _, line := range lines {
if len(line) == 0 {
// If the line is empty (a blank line). Dispatch the event.
d.dispatchEvent(currentEvent, dataBuffer.String())
// reset current event and data buffer
currentEvent = Event{}
dataBuffer.Reset()
continue
}
if line[0] == byte(':') {
// If the line starts with a U+003A COLON character (:), ignore the line.
continue
}
var field, value []byte
colonIndex := bytes.IndexRune(line, ':')
if colonIndex != -1 {
// If the line contains a U+003A COLON character character (:)
// Collect the characters on the line before the first U+003A COLON character (:),
// and let field be that string.
field = line[:colonIndex]
// Collect the characters on the line after the first U+003A COLON character (:),
// and let value be that string.
value = line[colonIndex+1:]
// If value starts with a single U+0020 SPACE character, remove it from value.
if len(value) > 0 && value[0] == ' ' {
value = value[1:]
}
} else {
// Otherwise, the string is not empty but does not contain a U+003A COLON character character (:)
// Use the whole line as the field name, and the empty string as the field value.
field = line
value = []byte{}
}
// The steps to process the field given a field name and a field value depend on the field name,
// as given in the following list. Field names must be compared literally,
// with no case folding performed.
switch string(field) {
case "event":
// Set the event name buffer to field value.
currentEvent.Event = string(value)
case "id":
// Set the event stream's last event ID to the field value.
currentEvent.Id = string(value)
case "retry":
// If the field value consists of only characters in the range U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9),
// then interpret the field value as an integer in base ten, and set the event stream's reconnection time to that integer.
// Otherwise, ignore the field.
currentEvent.Id = string(value)
case "data":
// Append the field value to the data buffer,
dataBuffer.Write(value)
// then append a single U+000A LINE FEED (LF) character to the data buffer.
dataBuffer.WriteString("\n")
default:
//Otherwise. The field is ignored.
continue
}
}
// Once the end of the file is reached, the user agent must dispatch the event one final time.
d.dispatchEvent(currentEvent, dataBuffer.String())
return d.events, nil
}

View File

@ -1,106 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package sse
import (
"encoding/json"
"fmt"
"io"
"net/http"
"reflect"
"strconv"
"strings"
)
// Server-Sent Events
// W3C Working Draft 29 October 2009
// http://www.w3.org/TR/2009/WD-eventsource-20091029/
const ContentType = "text/event-stream"
var contentType = []string{ContentType}
var noCache = []string{"no-cache"}
var fieldReplacer = strings.NewReplacer(
"\n", "\\n",
"\r", "\\r")
var dataReplacer = strings.NewReplacer(
"\n", "\ndata:",
"\r", "\\r")
type Event struct {
Event string
Id string
Retry uint
Data interface{}
}
func Encode(writer io.Writer, event Event) error {
w := checkWriter(writer)
writeId(w, event.Id)
writeEvent(w, event.Event)
writeRetry(w, event.Retry)
return writeData(w, event.Data)
}
func writeId(w stringWriter, id string) {
if len(id) > 0 {
w.WriteString("id:")
fieldReplacer.WriteString(w, id)
w.WriteString("\n")
}
}
func writeEvent(w stringWriter, event string) {
if len(event) > 0 {
w.WriteString("event:")
fieldReplacer.WriteString(w, event)
w.WriteString("\n")
}
}
func writeRetry(w stringWriter, retry uint) {
if retry > 0 {
w.WriteString("retry:")
w.WriteString(strconv.FormatUint(uint64(retry), 10))
w.WriteString("\n")
}
}
func writeData(w stringWriter, data interface{}) error {
w.WriteString("data:")
switch kindOfData(data) {
case reflect.Struct, reflect.Slice, reflect.Map:
err := json.NewEncoder(w).Encode(data)
if err != nil {
return err
}
w.WriteString("\n")
default:
dataReplacer.WriteString(w, fmt.Sprint(data))
w.WriteString("\n\n")
}
return nil
}
func (r Event) Render(w http.ResponseWriter) error {
header := w.Header()
header["Content-Type"] = contentType
if _, exist := header["Cache-Control"]; !exist {
header["Cache-Control"] = noCache
}
return Encode(w, r)
}
func kindOfData(data interface{}) reflect.Kind {
value := reflect.ValueOf(data)
valueType := value.Kind()
if valueType == reflect.Ptr {
valueType = value.Elem().Kind()
}
return valueType
}

View File

@ -1,24 +0,0 @@
package sse
import "io"
type stringWriter interface {
io.Writer
WriteString(string) (int, error)
}
type stringWrapper struct {
io.Writer
}
func (w stringWrapper) WriteString(str string) (int, error) {
return w.Writer.Write([]byte(str))
}
func checkWriter(writer io.Writer) stringWriter {
if w, ok := writer.(stringWriter); ok {
return w
} else {
return stringWrapper{writer}
}
}

View File

@ -1,156 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// and between processes.
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
//
// Programs that use Contexts should follow these rules to keep interfaces
// consistent across packages and enable static analysis tools to check context
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The same Context may be passed to functions running in different goroutines;
// Contexts are safe for simultaneous use by multiple goroutines.
//
// See http://blog.golang.org/context for example code for a server that uses
// Contexts.
package context
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
func Background() Context {
return background
}
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine
// whether Contexts are propagated correctly in a program.
func TODO() Context {
return todo
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()

View File

@ -1,72 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package context
import (
"context" // standard library's context, as of Go 1.7
"time"
)
var (
todo = context.TODO()
background = context.Background()
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = context.Canceled
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = context.DeadlineExceeded
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
ctx, f := context.WithCancel(parent)
return ctx, CancelFunc(f)
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
ctx, f := context.WithDeadline(parent, deadline)
return ctx, CancelFunc(f)
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return context.WithValue(parent, key, val)
}

View File

@ -1,300 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package context
import (
"errors"
"fmt"
"sync"
"time"
)
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (*emptyCtx) Done() <-chan struct{} {
return nil
}
func (*emptyCtx) Err() error {
return nil
}
func (*emptyCtx) Value(key interface{}) interface{} {
return nil
}
func (e *emptyCtx) String() string {
switch e {
case background:
return "context.Background"
case todo:
return "context.TODO"
}
return "unknown empty Context"
}
var (
background = new(emptyCtx)
todo = new(emptyCtx)
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = errors.New("context canceled")
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = errors.New("context deadline exceeded")
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := newCancelCtx(parent)
propagateCancel(parent, c)
return c, func() { c.cancel(true, Canceled) }
}
// newCancelCtx returns an initialized cancelCtx.
func newCancelCtx(parent Context) *cancelCtx {
return &cancelCtx{
Context: parent,
done: make(chan struct{}),
}
}
// propagateCancel arranges for child to be canceled when parent is.
func propagateCancel(parent Context, child canceler) {
if parent.Done() == nil {
return // parent is never canceled
}
if p, ok := parentCancelCtx(parent); ok {
p.mu.Lock()
if p.err != nil {
// parent has already been canceled
child.cancel(false, p.err)
} else {
if p.children == nil {
p.children = make(map[canceler]bool)
}
p.children[child] = true
}
p.mu.Unlock()
} else {
go func() {
select {
case <-parent.Done():
child.cancel(false, parent.Err())
case <-child.Done():
}
}()
}
}
// parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for {
switch c := parent.(type) {
case *cancelCtx:
return c, true
case *timerCtx:
return c.cancelCtx, true
case *valueCtx:
parent = c.Context
default:
return nil, false
}
}
}
// removeChild removes a context from its parent.
func removeChild(parent Context, child canceler) {
p, ok := parentCancelCtx(parent)
if !ok {
return
}
p.mu.Lock()
if p.children != nil {
delete(p.children, child)
}
p.mu.Unlock()
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err error)
Done() <-chan struct{}
}
// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
done chan struct{} // closed by the first cancel call.
mu sync.Mutex
children map[canceler]bool // set to nil by the first cancel call
err error // set to non-nil by the first cancel call
}
func (c *cancelCtx) Done() <-chan struct{} {
return c.done
}
func (c *cancelCtx) Err() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.err
}
func (c *cancelCtx) String() string {
return fmt.Sprintf("%v.WithCancel", c.Context)
}
// cancel closes c.done, cancels each of c's children, and, if
// removeFromParent is true, removes c from its parent's children.
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
if err == nil {
panic("context: internal error: missing cancel error")
}
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return // already canceled
}
c.err = err
close(c.done)
for child := range c.children {
// NOTE: acquiring the child's lock while holding parent's lock.
child.cancel(false, err)
}
c.children = nil
c.mu.Unlock()
if removeFromParent {
removeChild(c.Context, c)
}
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
// The current deadline is already sooner than the new one.
return WithCancel(parent)
}
c := &timerCtx{
cancelCtx: newCancelCtx(parent),
deadline: deadline,
}
propagateCancel(parent, c)
d := deadline.Sub(time.Now())
if d <= 0 {
c.cancel(true, DeadlineExceeded) // deadline has already passed
return c, func() { c.cancel(true, Canceled) }
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err == nil {
c.timer = time.AfterFunc(d, func() {
c.cancel(true, DeadlineExceeded)
})
}
return c, func() { c.cancel(true, Canceled) }
}
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
*cancelCtx
timer *time.Timer // Under cancelCtx.mu.
deadline time.Time
}
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timerCtx) String() string {
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
}
func (c *timerCtx) cancel(removeFromParent bool, err error) {
c.cancelCtx.cancel(false, err)
if removeFromParent {
// Remove this timerCtx from its parent cancelCtx's children.
removeChild(c.cancelCtx.Context, c)
}
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
c.timer = nil
}
c.mu.Unlock()
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val}
}
// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
key, val interface{}
}
func (c *valueCtx) String() string {
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
}
func (c *valueCtx) Value(key interface{}) interface{} {
if c.key == key {
return c.val
}
return c.Context.Value(key)
}