diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json deleted file mode 100644 index 888cde70e..000000000 --- a/Godeps/Godeps.json +++ /dev/null @@ -1,135 +0,0 @@ -{ - "ImportPath": "github.com/tendermint/tendermint", - "GoVersion": "go1.4.2", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "code.google.com/p/go.crypto/ripemd160", - "Comment": "null-236", - "Rev": "69e2a90ed92d03812364aeb947b7068dc42e561e" - }, - { - "ImportPath": "github.com/agl/ed25519/edwards25519", - "Rev": "d2b94fd789ea21d12fac1a4443dd3a3f79cda72c" - }, - { - "ImportPath": "github.com/codegangsta/cli", - "Comment": "1.2.0-106-ga889873", - "Rev": "a889873af50a499d060097216dcdbcc26ed09e7c" - }, - { - "ImportPath": "github.com/gorilla/websocket", - "Rev": "1f87405cd9755fc388e111c4003caca4a2f52fa6" - }, - { - "ImportPath": "github.com/inconshreveable/log15/stack", - "Comment": "v2.3-38-g352fceb", - "Rev": "352fceb48e895bd1dd0b9f5d3ae8f8516c49af0f" - }, - { - "ImportPath": "github.com/inconshreveable/log15/term", - "Comment": "v2.3-38-g352fceb", - "Rev": "352fceb48e895bd1dd0b9f5d3ae8f8516c49af0f" - }, - { - "ImportPath": "github.com/mattn/go-colorable", - "Rev": "043ae16291351db8465272edf465c9f388161627" - }, - { - "ImportPath": "github.com/naoina/go-stringutil", - "Rev": "360db0db4b01d34e12a2ec042c09e7d37fece761" - }, - { - "ImportPath": "github.com/naoina/toml", - "Rev": "7b2dffbeaee47506726f29e36d19cf4ee90d361b" - }, - { - "ImportPath": "github.com/sfreiberg/gotwilio", - "Rev": "b7230c284bd0c1614c94d00b9998c49f9a2737d8" - }, - { - "ImportPath": "github.com/spf13/pflag", - "Rev": "5644820622454e71517561946e3d94b9f9db6842" - }, - { - "ImportPath": "github.com/syndtr/goleveldb/leveldb", - "Rev": "63c9e642efad852f49e20a6f90194cae112fd2ac" - }, - { - "ImportPath": "github.com/syndtr/gosnappy/snappy", - "Rev": "ce8acff4829e0c2458a67ead32390ac0a381c862" - }, - { - "ImportPath": "github.com/tendermint/ed25519", - "Rev": "533fb6548e2071076888eda3c38749d707ba49bc" - }, - { - "ImportPath": "github.com/tendermint/flowcontrol", - "Rev": "84d9671090430e8ec80e35b339907e0579b999eb" - }, - { - "ImportPath": "github.com/tendermint/go-common", - "Rev": "6b54e7b8d945347f8fd1bdf83a2e9843561abbc5" - }, - { - "ImportPath": "github.com/tendermint/go-config", - "Rev": "3b895c7ce4999ee6fff7b7ca6253f0b41d9bf85c" - }, - { - "ImportPath": "github.com/tendermint/go-db", - "Rev": "28d39f8726c76b163e881c3d05dad227c93200ae" - }, - { - "ImportPath": "github.com/tendermint/go-logger", - "Rev": "b072ed317354e6b507d6abde4c0cfbb516f31ab5" - }, - { - "ImportPath": "github.com/tendermint/go-merkle", - "Rev": "8eab4cc2d55f17ff1529151ba2905015ad00e17d" - }, - { - "ImportPath": "github.com/tendermint/go-p2p", - "Rev": "318a1b353532af2dcc430b9a7db976617fc729b8" - }, - { - "ImportPath": "github.com/tendermint/go-wire", - "Rev": "4331183eb80dbaaedc9e84ab22e219c2d7d0bbe7" - }, - { - "ImportPath": "github.com/tendermint/log15", - "Comment": "v2.3-36-g6e46075", - "Rev": "6e460758f10ef42a4724b8e4a82fee59aaa0e41d" - }, - { - "ImportPath": "golang.org/x/crypto/curve25519", - "Rev": "02a186af8b62cb007f392270669b91be5527d39c" - }, - { - "ImportPath": "golang.org/x/crypto/nacl/box", - "Rev": "02a186af8b62cb007f392270669b91be5527d39c" - }, - { - "ImportPath": "golang.org/x/crypto/nacl/secretbox", - "Rev": "02a186af8b62cb007f392270669b91be5527d39c" - }, - { - "ImportPath": "golang.org/x/crypto/poly1305", - "Rev": "02a186af8b62cb007f392270669b91be5527d39c" - }, - { - "ImportPath": "golang.org/x/crypto/ripemd160", - "Rev": "02a186af8b62cb007f392270669b91be5527d39c" - }, - { - "ImportPath": "golang.org/x/crypto/salsa20/salsa", - "Rev": "02a186af8b62cb007f392270669b91be5527d39c" - }, - { - "ImportPath": "gopkg.in/fatih/set.v0", - "Comment": "v0.1.0-3-g27c4092", - "Rev": "27c40922c40b43fe04554d8223a402af3ea333f3" - } - ] -} diff --git a/Godeps/Readme b/Godeps/Readme deleted file mode 100644 index 4cdaa53d5..000000000 --- a/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore deleted file mode 100644 index f037d684e..000000000 --- a/Godeps/_workspace/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/pkg -/bin diff --git a/Godeps/_workspace/src/code.google.com/p/go.crypto/ripemd160/ripemd160.go b/Godeps/_workspace/src/code.google.com/p/go.crypto/ripemd160/ripemd160.go deleted file mode 100644 index da690f0b9..000000000 --- a/Godeps/_workspace/src/code.google.com/p/go.crypto/ripemd160/ripemd160.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ripemd160 implements the RIPEMD-160 hash algorithm. -package ripemd160 - -// RIPEMD-160 is designed by by Hans Dobbertin, Antoon Bosselaers, and Bart -// Preneel with specifications available at: -// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf. - -import ( - "crypto" - "hash" -) - -func init() { - crypto.RegisterHash(crypto.RIPEMD160, New) -} - -// The size of the checksum in bytes. -const Size = 20 - -// The block size of the hash algorithm in bytes. -const BlockSize = 64 - -const ( - _s0 = 0x67452301 - _s1 = 0xefcdab89 - _s2 = 0x98badcfe - _s3 = 0x10325476 - _s4 = 0xc3d2e1f0 -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - s [5]uint32 // running context - x [BlockSize]byte // temporary buffer - nx int // index into x - tc uint64 // total count of bytes processed -} - -func (d *digest) Reset() { - d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4 - d.nx = 0 - d.tc = 0 -} - -// New returns a new hash.Hash computing the checksum. -func New() hash.Hash { - result := new(digest) - result.Reset() - return result -} - -func (d *digest) Size() int { return Size } - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - d.tc += uint64(nn) - if d.nx > 0 { - n := len(p) - if n > BlockSize-d.nx { - n = BlockSize - d.nx - } - for i := 0; i < n; i++ { - d.x[d.nx+i] = p[i] - } - d.nx += n - if d.nx == BlockSize { - _Block(d, d.x[0:]) - d.nx = 0 - } - p = p[n:] - } - n := _Block(d, p) - p = p[n:] - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -func (d0 *digest) Sum(in []byte) []byte { - // Make a copy of d0 so that caller can keep writing and summing. - d := *d0 - - // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. - tc := d.tc - var tmp [64]byte - tmp[0] = 0x80 - if tc%64 < 56 { - d.Write(tmp[0 : 56-tc%64]) - } else { - d.Write(tmp[0 : 64+56-tc%64]) - } - - // Length in bits. - tc <<= 3 - for i := uint(0); i < 8; i++ { - tmp[i] = byte(tc >> (8 * i)) - } - d.Write(tmp[0:8]) - - if d.nx != 0 { - panic("d.nx != 0") - } - - var digest [Size]byte - for i, s := range d.s { - digest[i*4] = byte(s) - digest[i*4+1] = byte(s >> 8) - digest[i*4+2] = byte(s >> 16) - digest[i*4+3] = byte(s >> 24) - } - - return append(in, digest[:]...) -} diff --git a/Godeps/_workspace/src/code.google.com/p/go.crypto/ripemd160/ripemd160_test.go b/Godeps/_workspace/src/code.google.com/p/go.crypto/ripemd160/ripemd160_test.go deleted file mode 100644 index 5df1b2593..000000000 --- a/Godeps/_workspace/src/code.google.com/p/go.crypto/ripemd160/ripemd160_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ripemd160 - -// Test vectors are from: -// http://homes.esat.kuleuven.be/~bosselae/ripemd160.html - -import ( - "fmt" - "io" - "testing" -) - -type mdTest struct { - out string - in string -} - -var vectors = [...]mdTest{ - {"9c1185a5c5e9fc54612808977ee8f548b2258d31", ""}, - {"0bdc9d2d256b3ee9daae347be6f4dc835a467ffe", "a"}, - {"8eb208f7e05d987a9b044a8e98c6b087f15a0bfc", "abc"}, - {"5d0689ef49d2fae572b881b123a85ffa21595f36", "message digest"}, - {"f71c27109c692c1b56bbdceb5b9d2865b3708dbc", "abcdefghijklmnopqrstuvwxyz"}, - {"12a053384a9c0c88e405a06c27dcf49ada62eb2b", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"}, - {"b0e20b6e3116640286ed3a87a5713079b21f5189", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"}, - {"9b752e45573d4b39f4dbd3323cab82bf63326bfb", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"}, -} - -func TestVectors(t *testing.T) { - for i := 0; i < len(vectors); i++ { - tv := vectors[i] - md := New() - for j := 0; j < 3; j++ { - if j < 2 { - io.WriteString(md, tv.in) - } else { - io.WriteString(md, tv.in[0:len(tv.in)/2]) - md.Sum(nil) - io.WriteString(md, tv.in[len(tv.in)/2:]) - } - s := fmt.Sprintf("%x", md.Sum(nil)) - if s != tv.out { - t.Fatalf("RIPEMD-160[%d](%s) = %s, expected %s", j, tv.in, s, tv.out) - } - md.Reset() - } - } -} - -func TestMillionA(t *testing.T) { - md := New() - for i := 0; i < 100000; i++ { - io.WriteString(md, "aaaaaaaaaa") - } - out := "52783243c1697bdbe16d37f97f68f08325dc1528" - s := fmt.Sprintf("%x", md.Sum(nil)) - if s != out { - t.Fatalf("RIPEMD-160 (1 million 'a') = %s, expected %s", s, out) - } - md.Reset() -} diff --git a/Godeps/_workspace/src/code.google.com/p/go.crypto/ripemd160/ripemd160block.go b/Godeps/_workspace/src/code.google.com/p/go.crypto/ripemd160/ripemd160block.go deleted file mode 100644 index 7bc8e6c48..000000000 --- a/Godeps/_workspace/src/code.google.com/p/go.crypto/ripemd160/ripemd160block.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// RIPEMD-160 block step. -// In its own file so that a faster assembly or C version -// can be substituted easily. - -package ripemd160 - -// work buffer indices and roll amounts for one line -var _n = [80]uint{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8, - 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12, - 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2, - 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13, -} - -var _r = [80]uint{ - 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8, - 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12, - 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5, - 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12, - 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6, -} - -// same for the other parallel one -var n_ = [80]uint{ - 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12, - 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2, - 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13, - 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14, - 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11, -} - -var r_ = [80]uint{ - 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6, - 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11, - 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5, - 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8, - 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11, -} - -func _Block(md *digest, p []byte) int { - n := 0 - var x [16]uint32 - var alpha, beta uint32 - for len(p) >= BlockSize { - a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4] - aa, bb, cc, dd, ee := a, b, c, d, e - j := 0 - for i := 0; i < 16; i++ { - x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 - j += 4 - } - - // round 1 - i := 0 - for i < 16 { - alpha = a + (b ^ c ^ d) + x[_n[i]] - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 - a, b, c, d, e = e, alpha, b, beta, d - - // parallel line - alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 - aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd - - i++ - } - - // round 2 - for i < 32 { - alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999 - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 - a, b, c, d, e = e, alpha, b, beta, d - - // parallel line - alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 - aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd - - i++ - } - - // round 3 - for i < 48 { - alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1 - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 - a, b, c, d, e = e, alpha, b, beta, d - - // parallel line - alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 - aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd - - i++ - } - - // round 4 - for i < 64 { - alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 - a, b, c, d, e = e, alpha, b, beta, d - - // parallel line - alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 - aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd - - i++ - } - - // round 5 - for i < 80 { - alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 - a, b, c, d, e = e, alpha, b, beta, d - - // parallel line - alpha = aa + (bb ^ cc ^ dd) + x[n_[i]] - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 - aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd - - i++ - } - - // combine results - dd += c + md.s[1] - md.s[1] = md.s[2] + d + ee - md.s[2] = md.s[3] + e + aa - md.s[3] = md.s[4] + a + bb - md.s[4] = md.s[0] + b + cc - md.s[0] = dd - - p = p[BlockSize:] - n += BlockSize - } - return n -} diff --git a/Godeps/_workspace/src/github.com/agl/ed25519/edwards25519/const.go b/Godeps/_workspace/src/github.com/agl/ed25519/edwards25519/const.go deleted file mode 100644 index ea5b77a71..000000000 --- a/Godeps/_workspace/src/github.com/agl/ed25519/edwards25519/const.go +++ /dev/null @@ -1,1411 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/Godeps/_workspace/src/github.com/agl/ed25519/edwards25519/edwards25519.go b/Godeps/_workspace/src/github.com/agl/ed25519/edwards25519/edwards25519.go deleted file mode 100644 index 184b4a859..000000000 --- a/Godeps/_workspace/src/github.com/agl/ed25519/edwards25519/edwards25519.go +++ /dev/null @@ -1,2127 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package edwards25519 implements operations in GF(2**255-19) and on an -// Edwards curve that is isomorphic to curve25519. See -// http://ed25519.cr.yp.to/. -package edwards25519 - -// This code is a port of the public domain, "ref10" implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -func FeZero(fe *FieldElement) { - for i := range fe { - fe[i] = 0 - } -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - for i := range dst { - dst[i] = a[i] + b[i] - } -} - -func FeSub(dst, a, b *FieldElement) { - for i := range dst { - dst[i] = a[i] - b[i] - } -} - -func FeCopy(dst, src *FieldElement) { - for i := range dst { - dst[i] = src[i] - } -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - var x FieldElement - b = -b - for i := range x { - x[i] = b & (f[i] ^ g[i]) - } - - for i := range f { - f[i] ^= x[i] - } -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - var carry [10]int64 - carry[9] = (h9 + 1<<24) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + 1<<24) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + 1<<24) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + 1<<24) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + 1<<24) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + 1<<25) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + 1<<25) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + 1<<25) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + 1<<25) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + 1<<25) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - dst[0] = int32(h0) - dst[1] = int32(h1) - dst[2] = int32(h2) - dst[3] = int32(h3) - dst[4] = int32(h4) - dst[5] = int32(h5) - dst[6] = int32(h6) - dst[7] = int32(h7) - dst[8] = int32(h8) - dst[9] = int32(h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - for i := range h { - h[i] = -f[i] - } -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - g0 := g[0] - g1 := g[1] - g2 := g[2] - g3 := g[3] - g4 := g[4] - g5 := g[5] - g6 := g[6] - g7 := g[7] - g8 := g[8] - g9 := g[9] - g1_19 := 19 * g1 /* 1.4*2^29 */ - g2_19 := 19 * g2 /* 1.4*2^30; still ok */ - g3_19 := 19 * g3 - g4_19 := 19 * g4 - g5_19 := 19 * g5 - g6_19 := 19 * g6 - g7_19 := 19 * g7 - g8_19 := 19 * g8 - g9_19 := 19 * g9 - f1_2 := 2 * f1 - f3_2 := 2 * f3 - f5_2 := 2 * f5 - f7_2 := 2 * f7 - f9_2 := 2 * f9 - f0g0 := int64(f0) * int64(g0) - f0g1 := int64(f0) * int64(g1) - f0g2 := int64(f0) * int64(g2) - f0g3 := int64(f0) * int64(g3) - f0g4 := int64(f0) * int64(g4) - f0g5 := int64(f0) * int64(g5) - f0g6 := int64(f0) * int64(g6) - f0g7 := int64(f0) * int64(g7) - f0g8 := int64(f0) * int64(g8) - f0g9 := int64(f0) * int64(g9) - f1g0 := int64(f1) * int64(g0) - f1g1_2 := int64(f1_2) * int64(g1) - f1g2 := int64(f1) * int64(g2) - f1g3_2 := int64(f1_2) * int64(g3) - f1g4 := int64(f1) * int64(g4) - f1g5_2 := int64(f1_2) * int64(g5) - f1g6 := int64(f1) * int64(g6) - f1g7_2 := int64(f1_2) * int64(g7) - f1g8 := int64(f1) * int64(g8) - f1g9_38 := int64(f1_2) * int64(g9_19) - f2g0 := int64(f2) * int64(g0) - f2g1 := int64(f2) * int64(g1) - f2g2 := int64(f2) * int64(g2) - f2g3 := int64(f2) * int64(g3) - f2g4 := int64(f2) * int64(g4) - f2g5 := int64(f2) * int64(g5) - f2g6 := int64(f2) * int64(g6) - f2g7 := int64(f2) * int64(g7) - f2g8_19 := int64(f2) * int64(g8_19) - f2g9_19 := int64(f2) * int64(g9_19) - f3g0 := int64(f3) * int64(g0) - f3g1_2 := int64(f3_2) * int64(g1) - f3g2 := int64(f3) * int64(g2) - f3g3_2 := int64(f3_2) * int64(g3) - f3g4 := int64(f3) * int64(g4) - f3g5_2 := int64(f3_2) * int64(g5) - f3g6 := int64(f3) * int64(g6) - f3g7_38 := int64(f3_2) * int64(g7_19) - f3g8_19 := int64(f3) * int64(g8_19) - f3g9_38 := int64(f3_2) * int64(g9_19) - f4g0 := int64(f4) * int64(g0) - f4g1 := int64(f4) * int64(g1) - f4g2 := int64(f4) * int64(g2) - f4g3 := int64(f4) * int64(g3) - f4g4 := int64(f4) * int64(g4) - f4g5 := int64(f4) * int64(g5) - f4g6_19 := int64(f4) * int64(g6_19) - f4g7_19 := int64(f4) * int64(g7_19) - f4g8_19 := int64(f4) * int64(g8_19) - f4g9_19 := int64(f4) * int64(g9_19) - f5g0 := int64(f5) * int64(g0) - f5g1_2 := int64(f5_2) * int64(g1) - f5g2 := int64(f5) * int64(g2) - f5g3_2 := int64(f5_2) * int64(g3) - f5g4 := int64(f5) * int64(g4) - f5g5_38 := int64(f5_2) * int64(g5_19) - f5g6_19 := int64(f5) * int64(g6_19) - f5g7_38 := int64(f5_2) * int64(g7_19) - f5g8_19 := int64(f5) * int64(g8_19) - f5g9_38 := int64(f5_2) * int64(g9_19) - f6g0 := int64(f6) * int64(g0) - f6g1 := int64(f6) * int64(g1) - f6g2 := int64(f6) * int64(g2) - f6g3 := int64(f6) * int64(g3) - f6g4_19 := int64(f6) * int64(g4_19) - f6g5_19 := int64(f6) * int64(g5_19) - f6g6_19 := int64(f6) * int64(g6_19) - f6g7_19 := int64(f6) * int64(g7_19) - f6g8_19 := int64(f6) * int64(g8_19) - f6g9_19 := int64(f6) * int64(g9_19) - f7g0 := int64(f7) * int64(g0) - f7g1_2 := int64(f7_2) * int64(g1) - f7g2 := int64(f7) * int64(g2) - f7g3_38 := int64(f7_2) * int64(g3_19) - f7g4_19 := int64(f7) * int64(g4_19) - f7g5_38 := int64(f7_2) * int64(g5_19) - f7g6_19 := int64(f7) * int64(g6_19) - f7g7_38 := int64(f7_2) * int64(g7_19) - f7g8_19 := int64(f7) * int64(g8_19) - f7g9_38 := int64(f7_2) * int64(g9_19) - f8g0 := int64(f8) * int64(g0) - f8g1 := int64(f8) * int64(g1) - f8g2_19 := int64(f8) * int64(g2_19) - f8g3_19 := int64(f8) * int64(g3_19) - f8g4_19 := int64(f8) * int64(g4_19) - f8g5_19 := int64(f8) * int64(g5_19) - f8g6_19 := int64(f8) * int64(g6_19) - f8g7_19 := int64(f8) * int64(g7_19) - f8g8_19 := int64(f8) * int64(g8_19) - f8g9_19 := int64(f8) * int64(g9_19) - f9g0 := int64(f9) * int64(g0) - f9g1_38 := int64(f9_2) * int64(g1_19) - f9g2_19 := int64(f9) * int64(g2_19) - f9g3_38 := int64(f9_2) * int64(g3_19) - f9g4_19 := int64(f9) * int64(g4_19) - f9g5_38 := int64(f9_2) * int64(g5_19) - f9g6_19 := int64(f9) * int64(g6_19) - f9g7_38 := int64(f9_2) * int64(g7_19) - f9g8_19 := int64(f9) * int64(g8_19) - f9g9_38 := int64(f9_2) * int64(g9_19) - h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 - h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 - h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 - h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 - h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 - h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 - h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 - h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 - h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 - h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 - var carry [10]int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.959375*2^30 - f6_19 := 19 * f6 // 1.959375*2^30 - f7_38 := 38 * f7 // 1.959375*2^30 - f8_19 := 19 * f8 // 1.959375*2^30 - f9_38 := 38 * f9 // 1.959375*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) == (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml b/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml deleted file mode 100644 index baf46abc6..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go -go: 1.1 - -script: -- go vet ./... -- go test -v ./... diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE b/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE deleted file mode 100644 index 5515ccfb7..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (C) 2013 Jeremy Saenz -All Rights Reserved. - -MIT LICENSE - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/README.md b/Godeps/_workspace/src/github.com/codegangsta/cli/README.md deleted file mode 100644 index a7bec8418..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/README.md +++ /dev/null @@ -1,298 +0,0 @@ -[![Build Status](https://travis-ci.org/codegangsta/cli.png?branch=master)](https://travis-ci.org/codegangsta/cli) - -# cli.go -cli.go is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. - -You can view the API docs here: -http://godoc.org/github.com/codegangsta/cli - -## Overview -Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app. - -**This is where cli.go comes into play.** cli.go makes command line programming fun, organized, and expressive! - -## Installation -Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html). - -To install `cli.go`, simply run: -``` -$ go get github.com/codegangsta/cli -``` - -Make sure your `PATH` includes to the `$GOPATH/bin` directory so your commands can be easily used: -``` -export PATH=$PATH:$GOPATH/bin -``` - -## Getting Started -One of the philosophies behind cli.go is that an API should be playful and full of discovery. So a cli.go app can be as little as one line of code in `main()`. - -``` go -package main - -import ( - "os" - "github.com/codegangsta/cli" -) - -func main() { - cli.NewApp().Run(os.Args) -} -``` - -This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation: - -``` go -package main - -import ( - "os" - "github.com/codegangsta/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "boom" - app.Usage = "make an explosive entrance" - app.Action = func(c *cli.Context) { - println("boom! I say!") - } - - app.Run(os.Args) -} -``` - -Running this already gives you a ton of functionality, plus support for things like subcommands and flags, which are covered below. - -## Example - -Being a programmer can be a lonely job. Thankfully by the power of automation that is not the case! Let's create a greeter app to fend off our demons of loneliness! - -Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it: - -``` go -package main - -import ( - "os" - "github.com/codegangsta/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "greet" - app.Usage = "fight the loneliness!" - app.Action = func(c *cli.Context) { - println("Hello friend!") - } - - app.Run(os.Args) -} -``` - -Install our command to the `$GOPATH/bin` directory: - -``` -$ go install -``` - -Finally run our new command: - -``` -$ greet -Hello friend! -``` - -cli.go also generates some bitchass help text: -``` -$ greet help -NAME: - greet - fight the loneliness! - -USAGE: - greet [global options] command [command options] [arguments...] - -VERSION: - 0.0.0 - -COMMANDS: - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS - --version Shows version information -``` - -### Arguments -You can lookup arguments by calling the `Args` function on `cli.Context`. - -``` go -... -app.Action = func(c *cli.Context) { - println("Hello", c.Args()[0]) -} -... -``` - -### Flags -Setting and querying flags is simple. -``` go -... -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - }, -} -app.Action = func(c *cli.Context) { - name := "someone" - if len(c.Args()) > 0 { - name = c.Args()[0] - } - if c.String("lang") == "spanish" { - println("Hola", name) - } else { - println("Hello", name) - } -} -... -``` - -#### Alternate Names - -You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - }, -} -``` - -That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error. - -#### Values from the Environment - -You can also have the default value set from the environment via `EnvVar`. e.g. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "APP_LANG", - }, -} -``` - -The `EnvVar` may also be given as a comma-delimited "cascade", where the first environment variable that resolves is used as the default. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", - }, -} -``` - -### Subcommands - -Subcommands can be defined for a more git-like command line app. -```go -... -app.Commands = []cli.Command{ - { - Name: "add", - Aliases: []string{"a"}, - Usage: "add a task to the list", - Action: func(c *cli.Context) { - println("added task: ", c.Args().First()) - }, - }, - { - Name: "complete", - Aliases: []string{"c"}, - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - }, - { - Name: "template", - Aliases: []string{"r"}, - Usage: "options for task templates", - Subcommands: []cli.Command{ - { - Name: "add", - Usage: "add a new template", - Action: func(c *cli.Context) { - println("new task template: ", c.Args().First()) - }, - }, - { - Name: "remove", - Usage: "remove an existing template", - Action: func(c *cli.Context) { - println("removed task template: ", c.Args().First()) - }, - }, - }, - }, -} -... -``` - -### Bash Completion - -You can enable completion commands by setting the `EnableBashCompletion` -flag on the `App` object. By default, this setting will only auto-complete to -show an app's subcommands, but you can write your own completion methods for -the App or its subcommands. -```go -... -var tasks = []string{"cook", "clean", "laundry", "eat", "sleep", "code"} -app := cli.NewApp() -app.EnableBashCompletion = true -app.Commands = []cli.Command{ - { - Name: "complete", - Aliases: []string{"c"}, - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - BashComplete: func(c *cli.Context) { - // This will complete if no args are passed - if len(c.Args()) > 0 { - return - } - for _, t := range tasks { - fmt.Println(t) - } - }, - } -} -... -``` - -#### To Enable - -Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while -setting the `PROG` variable to the name of your program: - -`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` - - -## Contribution Guidelines -Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch. - -If you have contributed something significant to the project, I will most likely add you as a collaborator. As a collaborator you are given the ability to merge others pull requests. It is very important that new code does not break existing code, so be careful about what code you do choose to merge. If you have any questions feel free to link @codegangsta to the issue in question and we can review it together. - -If you feel like you have contributed to the project but have not yet been added as a collaborator, I probably forgot to add you. Hit @codegangsta up over email and we will get it figured out. diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/app.go b/Godeps/_workspace/src/github.com/codegangsta/cli/app.go deleted file mode 100644 index 891416d2a..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/app.go +++ /dev/null @@ -1,298 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "time" -) - -// App is the main structure of a cli application. It is recomended that -// and app be created with the cli.NewApp() function -type App struct { - // The name of the program. Defaults to os.Args[0] - Name string - // Description of the program. - Usage string - // Version of the program - Version string - // List of commands to execute - Commands []Command - // List of flags to parse - Flags []Flag - // Boolean to enable bash completion commands - EnableBashCompletion bool - // Boolean to hide built-in help command - HideHelp bool - // Boolean to hide built-in version flag - HideVersion bool - // An action to execute when the bash-completion flag is set - BashComplete func(context *Context) - // An action to execute before any subcommands are run, but after the context is ready - // If a non-nil error is returned, no subcommands are run - Before func(context *Context) error - // An action to execute after any subcommands are run, but after the subcommand has finished - // It is run even if Action() panics - After func(context *Context) error - // The action to execute when no subcommands are specified - Action func(context *Context) - // Execute this function if the proper command cannot be found - CommandNotFound func(context *Context, command string) - // Compilation date - Compiled time.Time - // List of all authors who contributed - Authors []Author - // Name of Author (Note: Use App.Authors, this is deprecated) - Author string - // Email of Author (Note: Use App.Authors, this is deprecated) - Email string - // Writer writer to write output to - Writer io.Writer -} - -// Tries to find out when this binary was compiled. -// Returns the current time if it fails to find it. -func compileTime() time.Time { - info, err := os.Stat(os.Args[0]) - if err != nil { - return time.Now() - } - return info.ModTime() -} - -// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action. -func NewApp() *App { - return &App{ - Name: os.Args[0], - Usage: "A new cli application", - Version: "0.0.0", - BashComplete: DefaultAppComplete, - Action: helpCommand.Action, - Compiled: compileTime(), - Writer: os.Stdout, - } -} - -// Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination -func (a *App) Run(arguments []string) (err error) { - if a.Author != "" || a.Email != "" { - a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) - } - - // append help to commands - if a.Command(helpCommand.Name) == nil && !a.HideHelp { - a.Commands = append(a.Commands, helpCommand) - if (HelpFlag != BoolFlag{}) { - a.appendFlag(HelpFlag) - } - } - - //append version/help flags - if a.EnableBashCompletion { - a.appendFlag(BashCompletionFlag) - } - - if !a.HideVersion { - a.appendFlag(VersionFlag) - } - - // parse flags - set := flagSet(a.Name, a.Flags) - set.SetOutput(ioutil.Discard) - err = set.Parse(arguments[1:]) - nerr := normalizeFlags(a.Flags, set) - if nerr != nil { - fmt.Fprintln(a.Writer, nerr) - context := NewContext(a, set, set) - ShowAppHelp(context) - fmt.Fprintln(a.Writer) - return nerr - } - context := NewContext(a, set, set) - - if err != nil { - fmt.Fprintf(a.Writer, "Incorrect Usage.\n\n") - ShowAppHelp(context) - fmt.Fprintln(a.Writer) - return err - } - - if checkCompletions(context) { - return nil - } - - if checkHelp(context) { - return nil - } - - if checkVersion(context) { - return nil - } - - if a.After != nil { - defer func() { - // err is always nil here. - // There is a check to see if it is non-nil - // just few lines before. - err = a.After(context) - }() - } - - if a.Before != nil { - err := a.Before(context) - if err != nil { - return err - } - } - - args := context.Args() - if args.Present() { - name := args.First() - c := a.Command(name) - if c != nil { - return c.Run(context) - } - } - - // Run default Action - a.Action(context) - return nil -} - -// Another entry point to the cli app, takes care of passing arguments and error handling -func (a *App) RunAndExitOnError() { - if err := a.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -// Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags -func (a *App) RunAsSubcommand(ctx *Context) (err error) { - // append help to commands - if len(a.Commands) > 0 { - if a.Command(helpCommand.Name) == nil && !a.HideHelp { - a.Commands = append(a.Commands, helpCommand) - if (HelpFlag != BoolFlag{}) { - a.appendFlag(HelpFlag) - } - } - } - - // append flags - if a.EnableBashCompletion { - a.appendFlag(BashCompletionFlag) - } - - // parse flags - set := flagSet(a.Name, a.Flags) - set.SetOutput(ioutil.Discard) - err = set.Parse(ctx.Args().Tail()) - nerr := normalizeFlags(a.Flags, set) - context := NewContext(a, set, ctx.globalSet) - - if nerr != nil { - fmt.Fprintln(a.Writer, nerr) - if len(a.Commands) > 0 { - ShowSubcommandHelp(context) - } else { - ShowCommandHelp(ctx, context.Args().First()) - } - fmt.Fprintln(a.Writer) - return nerr - } - - if err != nil { - fmt.Fprintf(a.Writer, "Incorrect Usage.\n\n") - ShowSubcommandHelp(context) - return err - } - - if checkCompletions(context) { - return nil - } - - if len(a.Commands) > 0 { - if checkSubcommandHelp(context) { - return nil - } - } else { - if checkCommandHelp(ctx, context.Args().First()) { - return nil - } - } - - if a.After != nil { - defer func() { - // err is always nil here. - // There is a check to see if it is non-nil - // just few lines before. - err = a.After(context) - }() - } - - if a.Before != nil { - err := a.Before(context) - if err != nil { - return err - } - } - - args := context.Args() - if args.Present() { - name := args.First() - c := a.Command(name) - if c != nil { - return c.Run(context) - } - } - - // Run default Action - a.Action(context) - - return nil -} - -// Returns the named command on App. Returns nil if the command does not exist -func (a *App) Command(name string) *Command { - for _, c := range a.Commands { - if c.HasName(name) { - return &c - } - } - - return nil -} - -func (a *App) hasFlag(flag Flag) bool { - for _, f := range a.Flags { - if flag == f { - return true - } - } - - return false -} - -func (a *App) appendFlag(flag Flag) { - if !a.hasFlag(flag) { - a.Flags = append(a.Flags, flag) - } -} - -// Author represents someone who has contributed to a cli project. -type Author struct { - Name string // The Authors name - Email string // The Authors email -} - -// String makes Author comply to the Stringer interface, to allow an easy print in the templating process -func (a Author) String() string { - e := "" - if a.Email != "" { - e = "<" + a.Email + "> " - } - - return fmt.Sprintf("%v %v", a.Name, e) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go deleted file mode 100644 index ae8bb0f9d..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go +++ /dev/null @@ -1,679 +0,0 @@ -package cli_test - -import ( - "bytes" - "flag" - "fmt" - "io" - "os" - "strings" - "testing" - - "github.com/codegangsta/cli" -) - -func ExampleApp() { - // set args for examples sake - os.Args = []string{"greet", "--name", "Jeremy"} - - app := cli.NewApp() - app.Name = "greet" - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, - } - app.Action = func(c *cli.Context) { - fmt.Printf("Hello %v\n", c.String("name")) - } - app.Author = "Harrison" - app.Email = "harrison@lolwut.com" - app.Authors = []cli.Author{cli.Author{Name: "Oliver Allen", Email: "oliver@toyshop.com"}} - app.Run(os.Args) - // Output: - // Hello Jeremy -} - -func ExampleAppSubcommand() { - // set args for examples sake - os.Args = []string{"say", "hi", "english", "--name", "Jeremy"} - app := cli.NewApp() - app.Name = "say" - app.Commands = []cli.Command{ - { - Name: "hello", - Aliases: []string{"hi"}, - Usage: "use it to see a description", - Description: "This is how we describe hello the function", - Subcommands: []cli.Command{ - { - Name: "english", - Aliases: []string{"en"}, - Usage: "sends a greeting in english", - Description: "greets someone in english", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Value: "Bob", - Usage: "Name of the person to greet", - }, - }, - Action: func(c *cli.Context) { - fmt.Println("Hello,", c.String("name")) - }, - }, - }, - }, - } - - app.Run(os.Args) - // Output: - // Hello, Jeremy -} - -func ExampleAppHelp() { - // set args for examples sake - os.Args = []string{"greet", "h", "describeit"} - - app := cli.NewApp() - app.Name = "greet" - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, - } - app.Commands = []cli.Command{ - { - Name: "describeit", - Aliases: []string{"d"}, - Usage: "use it to see a description", - Description: "This is how we describe describeit the function", - Action: func(c *cli.Context) { - fmt.Printf("i like to describe things") - }, - }, - } - app.Run(os.Args) - // Output: - // NAME: - // describeit - use it to see a description - // - // USAGE: - // command describeit [arguments...] - // - // DESCRIPTION: - // This is how we describe describeit the function -} - -func ExampleAppBashComplete() { - // set args for examples sake - os.Args = []string{"greet", "--generate-bash-completion"} - - app := cli.NewApp() - app.Name = "greet" - app.EnableBashCompletion = true - app.Commands = []cli.Command{ - { - Name: "describeit", - Aliases: []string{"d"}, - Usage: "use it to see a description", - Description: "This is how we describe describeit the function", - Action: func(c *cli.Context) { - fmt.Printf("i like to describe things") - }, - }, { - Name: "next", - Usage: "next example", - Description: "more stuff to see when generating bash completion", - Action: func(c *cli.Context) { - fmt.Printf("the next example") - }, - }, - } - - app.Run(os.Args) - // Output: - // describeit - // d - // next - // help - // h -} - -func TestApp_Run(t *testing.T) { - s := "" - - app := cli.NewApp() - app.Action = func(c *cli.Context) { - s = s + c.Args().First() - } - - err := app.Run([]string{"command", "foo"}) - expect(t, err, nil) - err = app.Run([]string{"command", "bar"}) - expect(t, err, nil) - expect(t, s, "foobar") -} - -var commandAppTests = []struct { - name string - expected bool -}{ - {"foobar", true}, - {"batbaz", true}, - {"b", true}, - {"f", true}, - {"bat", false}, - {"nothing", false}, -} - -func TestApp_Command(t *testing.T) { - app := cli.NewApp() - fooCommand := cli.Command{Name: "foobar", Aliases: []string{"f"}} - batCommand := cli.Command{Name: "batbaz", Aliases: []string{"b"}} - app.Commands = []cli.Command{ - fooCommand, - batCommand, - } - - for _, test := range commandAppTests { - expect(t, app.Command(test.name) != nil, test.expected) - } -} - -func TestApp_CommandWithArgBeforeFlags(t *testing.T) { - var parsedOption, firstArg string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Flags: []cli.Flag{ - cli.StringFlag{Name: "option", Value: "", Usage: "some option"}, - }, - Action: func(c *cli.Context) { - parsedOption = c.String("option") - firstArg = c.Args().First() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"}) - - expect(t, parsedOption, "my-option") - expect(t, firstArg, "my-arg") -} - -func TestApp_RunAsSubcommandParseFlags(t *testing.T) { - var context *cli.Context - - a := cli.NewApp() - a.Commands = []cli.Command{ - { - Name: "foo", - Action: func(c *cli.Context) { - context = c - }, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - }, - }, - Before: func(_ *cli.Context) error { return nil }, - }, - } - a.Run([]string{"", "foo", "--lang", "spanish", "abcd"}) - - expect(t, context.Args().Get(0), "abcd") - expect(t, context.String("lang"), "spanish") -} - -func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) { - var parsedOption string - var args []string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Flags: []cli.Flag{ - cli.StringFlag{Name: "option", Value: "", Usage: "some option"}, - }, - Action: func(c *cli.Context) { - parsedOption = c.String("option") - args = c.Args() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--option", "my-option", "--", "--notARealFlag"}) - - expect(t, parsedOption, "my-option") - expect(t, args[0], "my-arg") - expect(t, args[1], "--") - expect(t, args[2], "--notARealFlag") -} - -func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) { - var args []string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Action: func(c *cli.Context) { - args = c.Args() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--", "notAFlagAtAll"}) - - expect(t, args[0], "my-arg") - expect(t, args[1], "--") - expect(t, args[2], "notAFlagAtAll") -} - -func TestApp_Float64Flag(t *testing.T) { - var meters float64 - - app := cli.NewApp() - app.Flags = []cli.Flag{ - cli.Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"}, - } - app.Action = func(c *cli.Context) { - meters = c.Float64("height") - } - - app.Run([]string{"", "--height", "1.93"}) - expect(t, meters, 1.93) -} - -func TestApp_ParseSliceFlags(t *testing.T) { - var parsedOption, firstArg string - var parsedIntSlice []int - var parsedStringSlice []string - - app := cli.NewApp() - command := cli.Command{ - Name: "cmd", - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "p", Value: &cli.IntSlice{}, Usage: "set one or more ip addr"}, - cli.StringSliceFlag{Name: "ip", Value: &cli.StringSlice{}, Usage: "set one or more ports to open"}, - }, - Action: func(c *cli.Context) { - parsedIntSlice = c.IntSlice("p") - parsedStringSlice = c.StringSlice("ip") - parsedOption = c.String("option") - firstArg = c.Args().First() - }, - } - app.Commands = []cli.Command{command} - - app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"}) - - IntsEquals := func(a, b []int) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true - } - - StrsEquals := func(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true - } - var expectedIntSlice = []int{22, 80} - var expectedStringSlice = []string{"8.8.8.8", "8.8.4.4"} - - if !IntsEquals(parsedIntSlice, expectedIntSlice) { - t.Errorf("%v does not match %v", parsedIntSlice, expectedIntSlice) - } - - if !StrsEquals(parsedStringSlice, expectedStringSlice) { - t.Errorf("%v does not match %v", parsedStringSlice, expectedStringSlice) - } -} - -func TestApp_DefaultStdout(t *testing.T) { - app := cli.NewApp() - - if app.Writer != os.Stdout { - t.Error("Default output writer not set.") - } -} - -type mockWriter struct { - written []byte -} - -func (fw *mockWriter) Write(p []byte) (n int, err error) { - if fw.written == nil { - fw.written = p - } else { - fw.written = append(fw.written, p...) - } - - return len(p), nil -} - -func (fw *mockWriter) GetWritten() (b []byte) { - return fw.written -} - -func TestApp_SetStdout(t *testing.T) { - w := &mockWriter{} - - app := cli.NewApp() - app.Name = "test" - app.Writer = w - - err := app.Run([]string{"help"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if len(w.written) == 0 { - t.Error("App did not write output to desired writer.") - } -} - -func TestApp_BeforeFunc(t *testing.T) { - beforeRun, subcommandRun := false, false - beforeError := fmt.Errorf("fail") - var err error - - app := cli.NewApp() - - app.Before = func(c *cli.Context) error { - beforeRun = true - s := c.String("opt") - if s == "fail" { - return beforeError - } - - return nil - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "sub", - Action: func(c *cli.Context) { - subcommandRun = true - }, - }, - } - - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "opt"}, - } - - // run with the Before() func succeeding - err = app.Run([]string{"command", "--opt", "succeed", "sub"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if beforeRun == false { - t.Errorf("Before() not executed when expected") - } - - if subcommandRun == false { - t.Errorf("Subcommand not executed when expected") - } - - // reset - beforeRun, subcommandRun = false, false - - // run with the Before() func failing - err = app.Run([]string{"command", "--opt", "fail", "sub"}) - - // should be the same error produced by the Before func - if err != beforeError { - t.Errorf("Run error expected, but not received") - } - - if beforeRun == false { - t.Errorf("Before() not executed when expected") - } - - if subcommandRun == true { - t.Errorf("Subcommand executed when NOT expected") - } - -} - -func TestApp_AfterFunc(t *testing.T) { - afterRun, subcommandRun := false, false - afterError := fmt.Errorf("fail") - var err error - - app := cli.NewApp() - - app.After = func(c *cli.Context) error { - afterRun = true - s := c.String("opt") - if s == "fail" { - return afterError - } - - return nil - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "sub", - Action: func(c *cli.Context) { - subcommandRun = true - }, - }, - } - - app.Flags = []cli.Flag{ - cli.StringFlag{Name: "opt"}, - } - - // run with the After() func succeeding - err = app.Run([]string{"command", "--opt", "succeed", "sub"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if afterRun == false { - t.Errorf("After() not executed when expected") - } - - if subcommandRun == false { - t.Errorf("Subcommand not executed when expected") - } - - // reset - afterRun, subcommandRun = false, false - - // run with the Before() func failing - err = app.Run([]string{"command", "--opt", "fail", "sub"}) - - // should be the same error produced by the Before func - if err != afterError { - t.Errorf("Run error expected, but not received") - } - - if afterRun == false { - t.Errorf("After() not executed when expected") - } - - if subcommandRun == false { - t.Errorf("Subcommand not executed when expected") - } -} - -func TestAppNoHelpFlag(t *testing.T) { - oldFlag := cli.HelpFlag - defer func() { - cli.HelpFlag = oldFlag - }() - - cli.HelpFlag = cli.BoolFlag{} - - app := cli.NewApp() - err := app.Run([]string{"test", "-h"}) - - if err != flag.ErrHelp { - t.Errorf("expected error about missing help flag, but got: %s (%T)", err, err) - } -} - -func TestAppHelpPrinter(t *testing.T) { - oldPrinter := cli.HelpPrinter - defer func() { - cli.HelpPrinter = oldPrinter - }() - - var wasCalled = false - cli.HelpPrinter = func(w io.Writer, template string, data interface{}) { - wasCalled = true - } - - app := cli.NewApp() - app.Run([]string{"-h"}) - - if wasCalled == false { - t.Errorf("Help printer expected to be called, but was not") - } -} - -func TestAppVersionPrinter(t *testing.T) { - oldPrinter := cli.VersionPrinter - defer func() { - cli.VersionPrinter = oldPrinter - }() - - var wasCalled = false - cli.VersionPrinter = func(c *cli.Context) { - wasCalled = true - } - - app := cli.NewApp() - ctx := cli.NewContext(app, nil, nil) - cli.ShowVersion(ctx) - - if wasCalled == false { - t.Errorf("Version printer expected to be called, but was not") - } -} - -func TestAppCommandNotFound(t *testing.T) { - beforeRun, subcommandRun := false, false - app := cli.NewApp() - - app.CommandNotFound = func(c *cli.Context, command string) { - beforeRun = true - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "bar", - Action: func(c *cli.Context) { - subcommandRun = true - }, - }, - } - - app.Run([]string{"command", "foo"}) - - expect(t, beforeRun, true) - expect(t, subcommandRun, false) -} - -func TestGlobalFlagsInSubcommands(t *testing.T) { - subcommandRun := false - app := cli.NewApp() - - app.Flags = []cli.Flag{ - cli.BoolFlag{Name: "debug, d", Usage: "Enable debugging"}, - } - - app.Commands = []cli.Command{ - cli.Command{ - Name: "foo", - Subcommands: []cli.Command{ - { - Name: "bar", - Action: func(c *cli.Context) { - if c.GlobalBool("debug") { - subcommandRun = true - } - }, - }, - }, - }, - } - - app.Run([]string{"command", "-d", "foo", "bar"}) - - expect(t, subcommandRun, true) -} - -func TestApp_Run_CommandWithSubcommandHasHelpTopic(t *testing.T) { - var subcommandHelpTopics = [][]string{ - {"command", "foo", "--help"}, - {"command", "foo", "-h"}, - {"command", "foo", "help"}, - } - - for _, flagSet := range subcommandHelpTopics { - t.Logf("==> checking with flags %v", flagSet) - - app := cli.NewApp() - buf := new(bytes.Buffer) - app.Writer = buf - - subCmdBar := cli.Command{ - Name: "bar", - Usage: "does bar things", - } - subCmdBaz := cli.Command{ - Name: "baz", - Usage: "does baz things", - } - cmd := cli.Command{ - Name: "foo", - Description: "descriptive wall of text about how it does foo things", - Subcommands: []cli.Command{subCmdBar, subCmdBaz}, - } - - app.Commands = []cli.Command{cmd} - err := app.Run(flagSet) - - if err != nil { - t.Error(err) - } - - output := buf.String() - t.Logf("output: %q\n", buf.Bytes()) - - if strings.Contains(output, "No help topic for") { - t.Errorf("expect a help topic, got none: \n%q", output) - } - - for _, shouldContain := range []string{ - cmd.Name, cmd.Description, - subCmdBar.Name, subCmdBar.Usage, - subCmdBaz.Name, subCmdBaz.Usage, - } { - if !strings.Contains(output, shouldContain) { - t.Errorf("want help to contain %q, did not: \n%q", shouldContain, output) - } - } - } -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete b/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete deleted file mode 100644 index 9b55dd990..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete +++ /dev/null @@ -1,13 +0,0 @@ -#! /bin/bash - -_cli_bash_autocomplete() { - local cur prev opts base - COMPREPLY=() - cur="${COMP_WORDS[COMP_CWORD]}" - prev="${COMP_WORDS[COMP_CWORD-1]}" - opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion ) - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) - return 0 - } - - complete -F _cli_bash_autocomplete $PROG \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete b/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete deleted file mode 100644 index 5430a18f9..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete +++ /dev/null @@ -1,5 +0,0 @@ -autoload -U compinit && compinit -autoload -U bashcompinit && bashcompinit - -script_dir=$(dirname $0) -source ${script_dir}/bash_autocomplete diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go b/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go deleted file mode 100644 index b74254581..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/cli.go +++ /dev/null @@ -1,19 +0,0 @@ -// Package cli provides a minimal framework for creating and organizing command line -// Go applications. cli is designed to be easy to understand and write, the most simple -// cli application can be written as follows: -// func main() { -// cli.NewApp().Run(os.Args) -// } -// -// Of course this application does not do much, so let's make this an actual application: -// func main() { -// app := cli.NewApp() -// app.Name = "greet" -// app.Usage = "say a greeting" -// app.Action = func(c *cli.Context) { -// println("Greetings") -// } -// -// app.Run(os.Args) -// } -package cli diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go deleted file mode 100644 index 8a8df9736..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package cli_test - -import ( - "os" - - "github.com/codegangsta/cli" -) - -func Example() { - app := cli.NewApp() - app.Name = "todo" - app.Usage = "task list on the command line" - app.Commands = []cli.Command{ - { - Name: "add", - Aliases: []string{"a"}, - Usage: "add a task to the list", - Action: func(c *cli.Context) { - println("added task: ", c.Args().First()) - }, - }, - { - Name: "complete", - Aliases: []string{"c"}, - Usage: "complete a task on the list", - Action: func(c *cli.Context) { - println("completed task: ", c.Args().First()) - }, - }, - } - - app.Run(os.Args) -} - -func ExampleSubcommand() { - app := cli.NewApp() - app.Name = "say" - app.Commands = []cli.Command{ - { - Name: "hello", - Aliases: []string{"hi"}, - Usage: "use it to see a description", - Description: "This is how we describe hello the function", - Subcommands: []cli.Command{ - { - Name: "english", - Aliases: []string{"en"}, - Usage: "sends a greeting in english", - Description: "greets someone in english", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name", - Value: "Bob", - Usage: "Name of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Hello, ", c.String("name")) - }, - }, { - Name: "spanish", - Aliases: []string{"sp"}, - Usage: "sends a greeting in spanish", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "surname", - Value: "Jones", - Usage: "Surname of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Hola, ", c.String("surname")) - }, - }, { - Name: "french", - Aliases: []string{"fr"}, - Usage: "sends a greeting in french", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "nickname", - Value: "Stevie", - Usage: "Nickname of the person to greet", - }, - }, - Action: func(c *cli.Context) { - println("Bonjour, ", c.String("nickname")) - }, - }, - }, - }, { - Name: "bye", - Usage: "says goodbye", - Action: func(c *cli.Context) { - println("bye") - }, - }, - } - - app.Run(os.Args) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/command.go b/Godeps/_workspace/src/github.com/codegangsta/cli/command.go deleted file mode 100644 index d0bbd0c6e..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/command.go +++ /dev/null @@ -1,184 +0,0 @@ -package cli - -import ( - "fmt" - "io/ioutil" - "strings" -) - -// Command is a subcommand for a cli.App. -type Command struct { - // The name of the command - Name string - // short name of the command. Typically one character (deprecated, use `Aliases`) - ShortName string - // A list of aliases for the command - Aliases []string - // A short description of the usage of this command - Usage string - // A longer explanation of how the command works - Description string - // The function to call when checking for bash command completions - BashComplete func(context *Context) - // An action to execute before any sub-subcommands are run, but after the context is ready - // If a non-nil error is returned, no sub-subcommands are run - Before func(context *Context) error - // An action to execute after any subcommands are run, but after the subcommand has finished - // It is run even if Action() panics - After func(context *Context) error - // The function to call when this command is invoked - Action func(context *Context) - // List of child commands - Subcommands []Command - // List of flags to parse - Flags []Flag - // Treat all flags as normal arguments if true - SkipFlagParsing bool - // Boolean to hide built-in help command - HideHelp bool -} - -// Invokes the command given the context, parses ctx.Args() to generate command-specific flags -func (c Command) Run(ctx *Context) error { - - if len(c.Subcommands) > 0 || c.Before != nil || c.After != nil { - return c.startApp(ctx) - } - - if !c.HideHelp && (HelpFlag != BoolFlag{}) { - // append help to flags - c.Flags = append( - c.Flags, - HelpFlag, - ) - } - - if ctx.App.EnableBashCompletion { - c.Flags = append(c.Flags, BashCompletionFlag) - } - - set := flagSet(c.Name, c.Flags) - set.SetOutput(ioutil.Discard) - - firstFlagIndex := -1 - terminatorIndex := -1 - for index, arg := range ctx.Args() { - if arg == "--" { - terminatorIndex = index - break - } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { - firstFlagIndex = index - } - } - - var err error - if firstFlagIndex > -1 && !c.SkipFlagParsing { - args := ctx.Args() - regularArgs := make([]string, len(args[1:firstFlagIndex])) - copy(regularArgs, args[1:firstFlagIndex]) - - var flagArgs []string - if terminatorIndex > -1 { - flagArgs = args[firstFlagIndex:terminatorIndex] - regularArgs = append(regularArgs, args[terminatorIndex:]...) - } else { - flagArgs = args[firstFlagIndex:] - } - - err = set.Parse(append(flagArgs, regularArgs...)) - } else { - err = set.Parse(ctx.Args().Tail()) - } - - if err != nil { - fmt.Fprint(ctx.App.Writer, "Incorrect Usage.\n\n") - ShowCommandHelp(ctx, c.Name) - fmt.Fprintln(ctx.App.Writer) - return err - } - - nerr := normalizeFlags(c.Flags, set) - if nerr != nil { - fmt.Fprintln(ctx.App.Writer, nerr) - fmt.Fprintln(ctx.App.Writer) - ShowCommandHelp(ctx, c.Name) - fmt.Fprintln(ctx.App.Writer) - return nerr - } - context := NewContext(ctx.App, set, ctx.globalSet) - - if checkCommandCompletions(context, c.Name) { - return nil - } - - if checkCommandHelp(context, c.Name) { - return nil - } - context.Command = c - c.Action(context) - return nil -} - -func (c Command) Names() []string { - names := []string{c.Name} - - if c.ShortName != "" { - names = append(names, c.ShortName) - } - - return append(names, c.Aliases...) -} - -// Returns true if Command.Name or Command.ShortName matches given name -func (c Command) HasName(name string) bool { - for _, n := range c.Names() { - if n == name { - return true - } - } - return false -} - -func (c Command) startApp(ctx *Context) error { - app := NewApp() - - // set the name and usage - app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) - if c.Description != "" { - app.Usage = c.Description - } else { - app.Usage = c.Usage - } - - // set CommandNotFound - app.CommandNotFound = ctx.App.CommandNotFound - - // set the flags and commands - app.Commands = c.Subcommands - app.Flags = c.Flags - app.HideHelp = c.HideHelp - - app.Version = ctx.App.Version - app.HideVersion = ctx.App.HideVersion - app.Compiled = ctx.App.Compiled - app.Author = ctx.App.Author - app.Email = ctx.App.Email - app.Writer = ctx.App.Writer - - // bash completion - app.EnableBashCompletion = ctx.App.EnableBashCompletion - if c.BashComplete != nil { - app.BashComplete = c.BashComplete - } - - // set the actions - app.Before = c.Before - app.After = c.After - if c.Action != nil { - app.Action = c.Action - } else { - app.Action = helpSubcommand.Action - } - - return app.RunAsSubcommand(ctx) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go deleted file mode 100644 index 4125b0c1b..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package cli_test - -import ( - "flag" - "testing" - - "github.com/codegangsta/cli" -) - -func TestCommandDoNotIgnoreFlags(t *testing.T) { - app := cli.NewApp() - set := flag.NewFlagSet("test", 0) - test := []string{"blah", "blah", "-break"} - set.Parse(test) - - c := cli.NewContext(app, set, set) - - command := cli.Command{ - Name: "test-cmd", - Aliases: []string{"tc"}, - Usage: "this is for testing", - Description: "testing", - Action: func(_ *cli.Context) {}, - } - err := command.Run(c) - - expect(t, err.Error(), "flag provided but not defined: -break") -} - -func TestCommandIgnoreFlags(t *testing.T) { - app := cli.NewApp() - set := flag.NewFlagSet("test", 0) - test := []string{"blah", "blah"} - set.Parse(test) - - c := cli.NewContext(app, set, set) - - command := cli.Command{ - Name: "test-cmd", - Aliases: []string{"tc"}, - Usage: "this is for testing", - Description: "testing", - Action: func(_ *cli.Context) {}, - SkipFlagParsing: true, - } - err := command.Run(c) - - expect(t, err, nil) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/context.go b/Godeps/_workspace/src/github.com/codegangsta/cli/context.go deleted file mode 100644 index 37221bdc2..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/context.go +++ /dev/null @@ -1,344 +0,0 @@ -package cli - -import ( - "errors" - "flag" - "strconv" - "strings" - "time" -) - -// Context is a type that is passed through to -// each Handler action in a cli application. Context -// can be used to retrieve context-specific Args and -// parsed command-line options. -type Context struct { - App *App - Command Command - flagSet *flag.FlagSet - globalSet *flag.FlagSet - setFlags map[string]bool - globalSetFlags map[string]bool -} - -// Creates a new context. For use in when invoking an App or Command action. -func NewContext(app *App, set *flag.FlagSet, globalSet *flag.FlagSet) *Context { - return &Context{App: app, flagSet: set, globalSet: globalSet} -} - -// Looks up the value of a local int flag, returns 0 if no int flag exists -func (c *Context) Int(name string) int { - return lookupInt(name, c.flagSet) -} - -// Looks up the value of a local time.Duration flag, returns 0 if no time.Duration flag exists -func (c *Context) Duration(name string) time.Duration { - return lookupDuration(name, c.flagSet) -} - -// Looks up the value of a local float64 flag, returns 0 if no float64 flag exists -func (c *Context) Float64(name string) float64 { - return lookupFloat64(name, c.flagSet) -} - -// Looks up the value of a local bool flag, returns false if no bool flag exists -func (c *Context) Bool(name string) bool { - return lookupBool(name, c.flagSet) -} - -// Looks up the value of a local boolT flag, returns false if no bool flag exists -func (c *Context) BoolT(name string) bool { - return lookupBoolT(name, c.flagSet) -} - -// Looks up the value of a local string flag, returns "" if no string flag exists -func (c *Context) String(name string) string { - return lookupString(name, c.flagSet) -} - -// Looks up the value of a local string slice flag, returns nil if no string slice flag exists -func (c *Context) StringSlice(name string) []string { - return lookupStringSlice(name, c.flagSet) -} - -// Looks up the value of a local int slice flag, returns nil if no int slice flag exists -func (c *Context) IntSlice(name string) []int { - return lookupIntSlice(name, c.flagSet) -} - -// Looks up the value of a local generic flag, returns nil if no generic flag exists -func (c *Context) Generic(name string) interface{} { - return lookupGeneric(name, c.flagSet) -} - -// Looks up the value of a global int flag, returns 0 if no int flag exists -func (c *Context) GlobalInt(name string) int { - return lookupInt(name, c.globalSet) -} - -// Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists -func (c *Context) GlobalDuration(name string) time.Duration { - return lookupDuration(name, c.globalSet) -} - -// Looks up the value of a global bool flag, returns false if no bool flag exists -func (c *Context) GlobalBool(name string) bool { - return lookupBool(name, c.globalSet) -} - -// Looks up the value of a global string flag, returns "" if no string flag exists -func (c *Context) GlobalString(name string) string { - return lookupString(name, c.globalSet) -} - -// Looks up the value of a global string slice flag, returns nil if no string slice flag exists -func (c *Context) GlobalStringSlice(name string) []string { - return lookupStringSlice(name, c.globalSet) -} - -// Looks up the value of a global int slice flag, returns nil if no int slice flag exists -func (c *Context) GlobalIntSlice(name string) []int { - return lookupIntSlice(name, c.globalSet) -} - -// Looks up the value of a global generic flag, returns nil if no generic flag exists -func (c *Context) GlobalGeneric(name string) interface{} { - return lookupGeneric(name, c.globalSet) -} - -// Returns the number of flags set -func (c *Context) NumFlags() int { - return c.flagSet.NFlag() -} - -// Determines if the flag was actually set -func (c *Context) IsSet(name string) bool { - if c.setFlags == nil { - c.setFlags = make(map[string]bool) - c.flagSet.Visit(func(f *flag.Flag) { - c.setFlags[f.Name] = true - }) - } - return c.setFlags[name] == true -} - -// Determines if the global flag was actually set -func (c *Context) GlobalIsSet(name string) bool { - if c.globalSetFlags == nil { - c.globalSetFlags = make(map[string]bool) - c.globalSet.Visit(func(f *flag.Flag) { - c.globalSetFlags[f.Name] = true - }) - } - return c.globalSetFlags[name] == true -} - -// Returns a slice of flag names used in this context. -func (c *Context) FlagNames() (names []string) { - for _, flag := range c.Command.Flags { - name := strings.Split(flag.getName(), ",")[0] - if name == "help" { - continue - } - names = append(names, name) - } - return -} - -// Returns a slice of global flag names used by the app. -func (c *Context) GlobalFlagNames() (names []string) { - for _, flag := range c.App.Flags { - name := strings.Split(flag.getName(), ",")[0] - if name == "help" || name == "version" { - continue - } - names = append(names, name) - } - return -} - -type Args []string - -// Returns the command line arguments associated with the context. -func (c *Context) Args() Args { - args := Args(c.flagSet.Args()) - return args -} - -// Returns the nth argument, or else a blank string -func (a Args) Get(n int) string { - if len(a) > n { - return a[n] - } - return "" -} - -// Returns the first argument, or else a blank string -func (a Args) First() string { - return a.Get(0) -} - -// Return the rest of the arguments (not the first one) -// or else an empty string slice -func (a Args) Tail() []string { - if len(a) >= 2 { - return []string(a)[1:] - } - return []string{} -} - -// Checks if there are any arguments present -func (a Args) Present() bool { - return len(a) != 0 -} - -// Swaps arguments at the given indexes -func (a Args) Swap(from, to int) error { - if from >= len(a) || to >= len(a) { - return errors.New("index out of range") - } - a[from], a[to] = a[to], a[from] - return nil -} - -func lookupInt(name string, set *flag.FlagSet) int { - f := set.Lookup(name) - if f != nil { - val, err := strconv.Atoi(f.Value.String()) - if err != nil { - return 0 - } - return val - } - - return 0 -} - -func lookupDuration(name string, set *flag.FlagSet) time.Duration { - f := set.Lookup(name) - if f != nil { - val, err := time.ParseDuration(f.Value.String()) - if err == nil { - return val - } - } - - return 0 -} - -func lookupFloat64(name string, set *flag.FlagSet) float64 { - f := set.Lookup(name) - if f != nil { - val, err := strconv.ParseFloat(f.Value.String(), 64) - if err != nil { - return 0 - } - return val - } - - return 0 -} - -func lookupString(name string, set *flag.FlagSet) string { - f := set.Lookup(name) - if f != nil { - return f.Value.String() - } - - return "" -} - -func lookupStringSlice(name string, set *flag.FlagSet) []string { - f := set.Lookup(name) - if f != nil { - return (f.Value.(*StringSlice)).Value() - - } - - return nil -} - -func lookupIntSlice(name string, set *flag.FlagSet) []int { - f := set.Lookup(name) - if f != nil { - return (f.Value.(*IntSlice)).Value() - - } - - return nil -} - -func lookupGeneric(name string, set *flag.FlagSet) interface{} { - f := set.Lookup(name) - if f != nil { - return f.Value - } - return nil -} - -func lookupBool(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - val, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return false - } - return val - } - - return false -} - -func lookupBoolT(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - val, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return true - } - return val - } - - return false -} - -func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { - switch ff.Value.(type) { - case *StringSlice: - default: - set.Set(name, ff.Value.String()) - } -} - -func normalizeFlags(flags []Flag, set *flag.FlagSet) error { - visited := make(map[string]bool) - set.Visit(func(f *flag.Flag) { - visited[f.Name] = true - }) - for _, f := range flags { - parts := strings.Split(f.getName(), ",") - if len(parts) == 1 { - continue - } - var ff *flag.Flag - for _, name := range parts { - name = strings.Trim(name, " ") - if visited[name] { - if ff != nil { - return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) - } - ff = set.Lookup(name) - } - } - if ff == nil { - continue - } - for _, name := range parts { - name = strings.Trim(name, " ") - if !visited[name] { - copyFlag(name, ff, set) - } - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go deleted file mode 100644 index d4a1877f0..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package cli_test - -import ( - "flag" - "testing" - "time" - - "github.com/codegangsta/cli" -) - -func TestNewContext(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Int("myflag", 12, "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Int("myflag", 42, "doc") - command := cli.Command{Name: "mycommand"} - c := cli.NewContext(nil, set, globalSet) - c.Command = command - expect(t, c.Int("myflag"), 12) - expect(t, c.GlobalInt("myflag"), 42) - expect(t, c.Command.Name, "mycommand") -} - -func TestContext_Int(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Int("myflag", 12, "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.Int("myflag"), 12) -} - -func TestContext_Duration(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Duration("myflag", time.Duration(12*time.Second), "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.Duration("myflag"), time.Duration(12*time.Second)) -} - -func TestContext_String(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.String("myflag", "hello world", "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.String("myflag"), "hello world") -} - -func TestContext_Bool(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.Bool("myflag"), false) -} - -func TestContext_BoolT(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", true, "doc") - c := cli.NewContext(nil, set, set) - expect(t, c.BoolT("myflag"), true) -} - -func TestContext_Args(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - c := cli.NewContext(nil, set, set) - set.Parse([]string{"--myflag", "bat", "baz"}) - expect(t, len(c.Args()), 2) - expect(t, c.Bool("myflag"), true) -} - -func TestContext_IsSet(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - c := cli.NewContext(nil, set, globalSet) - set.Parse([]string{"--myflag", "bat", "baz"}) - globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) - expect(t, c.IsSet("myflag"), true) - expect(t, c.IsSet("otherflag"), false) - expect(t, c.IsSet("bogusflag"), false) - expect(t, c.IsSet("myflagGlobal"), false) -} - -func TestContext_GlobalIsSet(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - globalSet.Bool("myflagGlobalUnset", true, "doc") - c := cli.NewContext(nil, set, globalSet) - set.Parse([]string{"--myflag", "bat", "baz"}) - globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) - expect(t, c.GlobalIsSet("myflag"), false) - expect(t, c.GlobalIsSet("otherflag"), false) - expect(t, c.GlobalIsSet("bogusflag"), false) - expect(t, c.GlobalIsSet("myflagGlobal"), true) - expect(t, c.GlobalIsSet("myflagGlobalUnset"), false) - expect(t, c.GlobalIsSet("bogusGlobal"), false) -} - -func TestContext_NumFlags(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - c := cli.NewContext(nil, set, globalSet) - set.Parse([]string{"--myflag", "--otherflag=foo"}) - globalSet.Parse([]string{"--myflagGlobal"}) - expect(t, c.NumFlags(), 2) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go b/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go deleted file mode 100644 index 251158667..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/flag.go +++ /dev/null @@ -1,454 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "os" - "strconv" - "strings" - "time" -) - -// This flag enables bash-completion for all commands and subcommands -var BashCompletionFlag = BoolFlag{ - Name: "generate-bash-completion", -} - -// This flag prints the version for the application -var VersionFlag = BoolFlag{ - Name: "version, v", - Usage: "print the version", -} - -// This flag prints the help for all commands and subcommands -// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand -// unless HideHelp is set to true) -var HelpFlag = BoolFlag{ - Name: "help, h", - Usage: "show help", -} - -// Flag is a common interface related to parsing flags in cli. -// For more advanced flag parsing techniques, it is recomended that -// this interface be implemented. -type Flag interface { - fmt.Stringer - // Apply Flag settings to the given flag set - Apply(*flag.FlagSet) - getName() string -} - -func flagSet(name string, flags []Flag) *flag.FlagSet { - set := flag.NewFlagSet(name, flag.ContinueOnError) - - for _, f := range flags { - f.Apply(set) - } - return set -} - -func eachName(longName string, fn func(string)) { - parts := strings.Split(longName, ",") - for _, name := range parts { - name = strings.Trim(name, " ") - fn(name) - } -} - -// Generic is a generic parseable type identified by a specific flag -type Generic interface { - Set(value string) error - String() string -} - -// GenericFlag is the flag type for types implementing Generic -type GenericFlag struct { - Name string - Value Generic - Usage string - EnvVar string -} - -// String returns the string representation of the generic flag to display the -// help text to the user (uses the String() method of the generic flag to show -// the value) -func (f GenericFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s%s \"%v\"\t%v", prefixFor(f.Name), f.Name, f.Value, f.Usage)) -} - -// Apply takes the flagset and calls Set on the generic flag with the value -// provided by the user for parsing by the flag -func (f GenericFlag) Apply(set *flag.FlagSet) { - val := f.Value - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - val.Set(envVal) - break - } - } - } - - eachName(f.Name, func(name string) { - set.Var(f.Value, name, f.Usage) - }) -} - -func (f GenericFlag) getName() string { - return f.Name -} - -type StringSlice []string - -func (f *StringSlice) Set(value string) error { - *f = append(*f, value) - return nil -} - -func (f *StringSlice) String() string { - return fmt.Sprintf("%s", *f) -} - -func (f *StringSlice) Value() []string { - return *f -} - -type StringSliceFlag struct { - Name string - Value *StringSlice - Usage string - EnvVar string -} - -func (f StringSliceFlag) String() string { - firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") - pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) -} - -func (f StringSliceFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - newVal := &StringSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - newVal.Set(s) - } - f.Value = newVal - break - } - } - } - - eachName(f.Name, func(name string) { - set.Var(f.Value, name, f.Usage) - }) -} - -func (f StringSliceFlag) getName() string { - return f.Name -} - -type IntSlice []int - -func (f *IntSlice) Set(value string) error { - - tmp, err := strconv.Atoi(value) - if err != nil { - return err - } else { - *f = append(*f, tmp) - } - return nil -} - -func (f *IntSlice) String() string { - return fmt.Sprintf("%d", *f) -} - -func (f *IntSlice) Value() []int { - return *f -} - -type IntSliceFlag struct { - Name string - Value *IntSlice - Usage string - EnvVar string -} - -func (f IntSliceFlag) String() string { - firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") - pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) -} - -func (f IntSliceFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - newVal := &IntSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - err := newVal.Set(s) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - } - } - f.Value = newVal - break - } - } - } - - eachName(f.Name, func(name string) { - set.Var(f.Value, name, f.Usage) - }) -} - -func (f IntSliceFlag) getName() string { - return f.Name -} - -type BoolFlag struct { - Name string - Usage string - EnvVar string -} - -func (f BoolFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) -} - -func (f BoolFlag) Apply(set *flag.FlagSet) { - val := false - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool - } - break - } - } - } - - eachName(f.Name, func(name string) { - set.Bool(name, val, f.Usage) - }) -} - -func (f BoolFlag) getName() string { - return f.Name -} - -type BoolTFlag struct { - Name string - Usage string - EnvVar string -} - -func (f BoolTFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) -} - -func (f BoolTFlag) Apply(set *flag.FlagSet) { - val := true - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool - break - } - } - } - } - - eachName(f.Name, func(name string) { - set.Bool(name, val, f.Usage) - }) -} - -func (f BoolTFlag) getName() string { - return f.Name -} - -type StringFlag struct { - Name string - Value string - Usage string - EnvVar string -} - -func (f StringFlag) String() string { - var fmtString string - fmtString = "%s %v\t%v" - - if len(f.Value) > 0 { - fmtString = "%s \"%v\"\t%v" - } else { - fmtString = "%s %v\t%v" - } - - return withEnvHint(f.EnvVar, fmt.Sprintf(fmtString, prefixedNames(f.Name), f.Value, f.Usage)) -} - -func (f StringFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - f.Value = envVal - break - } - } - } - - eachName(f.Name, func(name string) { - set.String(name, f.Value, f.Usage) - }) -} - -func (f StringFlag) getName() string { - return f.Name -} - -type IntFlag struct { - Name string - Value int - Usage string - EnvVar string -} - -func (f IntFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) -} - -func (f IntFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValInt, err := strconv.ParseInt(envVal, 0, 64) - if err == nil { - f.Value = int(envValInt) - break - } - } - } - } - - eachName(f.Name, func(name string) { - set.Int(name, f.Value, f.Usage) - }) -} - -func (f IntFlag) getName() string { - return f.Name -} - -type DurationFlag struct { - Name string - Value time.Duration - Usage string - EnvVar string -} - -func (f DurationFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) -} - -func (f DurationFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValDuration, err := time.ParseDuration(envVal) - if err == nil { - f.Value = envValDuration - break - } - } - } - } - - eachName(f.Name, func(name string) { - set.Duration(name, f.Value, f.Usage) - }) -} - -func (f DurationFlag) getName() string { - return f.Name -} - -type Float64Flag struct { - Name string - Value float64 - Usage string - EnvVar string -} - -func (f Float64Flag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) -} - -func (f Float64Flag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValFloat, err := strconv.ParseFloat(envVal, 10) - if err == nil { - f.Value = float64(envValFloat) - } - } - } - } - - eachName(f.Name, func(name string) { - set.Float64(name, f.Value, f.Usage) - }) -} - -func (f Float64Flag) getName() string { - return f.Name -} - -func prefixFor(name string) (prefix string) { - if len(name) == 1 { - prefix = "-" - } else { - prefix = "--" - } - - return -} - -func prefixedNames(fullName string) (prefixed string) { - parts := strings.Split(fullName, ",") - for i, name := range parts { - name = strings.Trim(name, " ") - prefixed += prefixFor(name) + name - if i < len(parts)-1 { - prefixed += ", " - } - } - return -} - -func withEnvHint(envVar, str string) string { - envText := "" - if envVar != "" { - envText = fmt.Sprintf(" [$%s]", strings.Join(strings.Split(envVar, ","), ", $")) - } - return str + envText -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go deleted file mode 100644 index f0f096a2d..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go +++ /dev/null @@ -1,742 +0,0 @@ -package cli_test - -import ( - "fmt" - "os" - "reflect" - "strings" - "testing" - - "github.com/codegangsta/cli" -) - -var boolFlagTests = []struct { - name string - expected string -}{ - {"help", "--help\t"}, - {"h", "-h\t"}, -} - -func TestBoolFlagHelpOutput(t *testing.T) { - - for _, test := range boolFlagTests { - flag := cli.BoolFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -var stringFlagTests = []struct { - name string - value string - expected string -}{ - {"help", "", "--help \t"}, - {"h", "", "-h \t"}, - {"h", "", "-h \t"}, - {"test", "Something", "--test \"Something\"\t"}, -} - -func TestStringFlagHelpOutput(t *testing.T) { - - for _, test := range stringFlagTests { - flag := cli.StringFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestStringFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_FOO", "derp") - for _, test := range stringFlagTests { - flag := cli.StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_FOO]") { - t.Errorf("%s does not end with [$APP_FOO]", output) - } - } -} - -var stringSliceFlagTests = []struct { - name string - value *cli.StringSlice - expected string -}{ - {"help", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("") - return s - }(), "--help [--help option --help option]\t"}, - {"h", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("") - return s - }(), "-h [-h option -h option]\t"}, - {"h", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("") - return s - }(), "-h [-h option -h option]\t"}, - {"test", func() *cli.StringSlice { - s := &cli.StringSlice{} - s.Set("Something") - return s - }(), "--test [--test option --test option]\t"}, -} - -func TestStringSliceFlagHelpOutput(t *testing.T) { - - for _, test := range stringSliceFlagTests { - flag := cli.StringSliceFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestStringSliceFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_QWWX", "11,4") - for _, test := range stringSliceFlagTests { - flag := cli.StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_QWWX]") { - t.Errorf("%q does not end with [$APP_QWWX]", output) - } - } -} - -var intFlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestIntFlagHelpOutput(t *testing.T) { - - for _, test := range intFlagTests { - flag := cli.IntFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestIntFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAR", "2") - for _, test := range intFlagTests { - flag := cli.IntFlag{Name: test.name, EnvVar: "APP_BAR"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAR]") { - t.Errorf("%s does not end with [$APP_BAR]", output) - } - } -} - -var durationFlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestDurationFlagHelpOutput(t *testing.T) { - - for _, test := range durationFlagTests { - flag := cli.DurationFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestDurationFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAR", "2h3m6s") - for _, test := range durationFlagTests { - flag := cli.DurationFlag{Name: test.name, EnvVar: "APP_BAR"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAR]") { - t.Errorf("%s does not end with [$APP_BAR]", output) - } - } -} - -var intSliceFlagTests = []struct { - name string - value *cli.IntSlice - expected string -}{ - {"help", &cli.IntSlice{}, "--help [--help option --help option]\t"}, - {"h", &cli.IntSlice{}, "-h [-h option -h option]\t"}, - {"h", &cli.IntSlice{}, "-h [-h option -h option]\t"}, - {"test", func() *cli.IntSlice { - i := &cli.IntSlice{} - i.Set("9") - return i - }(), "--test [--test option --test option]\t"}, -} - -func TestIntSliceFlagHelpOutput(t *testing.T) { - - for _, test := range intSliceFlagTests { - flag := cli.IntSliceFlag{Name: test.name, Value: test.value} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestIntSliceFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_SMURF", "42,3") - for _, test := range intSliceFlagTests { - flag := cli.IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_SMURF]") { - t.Errorf("%q does not end with [$APP_SMURF]", output) - } - } -} - -var float64FlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestFloat64FlagHelpOutput(t *testing.T) { - - for _, test := range float64FlagTests { - flag := cli.Float64Flag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAZ", "99.4") - for _, test := range float64FlagTests { - flag := cli.Float64Flag{Name: test.name, EnvVar: "APP_BAZ"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAZ]") { - t.Errorf("%s does not end with [$APP_BAZ]", output) - } - } -} - -var genericFlagTests = []struct { - name string - value cli.Generic - expected string -}{ - {"test", &Parser{"abc", "def"}, "--test \"abc,def\"\ttest flag"}, - {"t", &Parser{"abc", "def"}, "-t \"abc,def\"\ttest flag"}, -} - -func TestGenericFlagHelpOutput(t *testing.T) { - - for _, test := range genericFlagTests { - flag := cli.GenericFlag{Name: test.name, Value: test.value, Usage: "test flag"} - output := flag.String() - - if output != test.expected { - t.Errorf("%q does not match %q", output, test.expected) - } - } -} - -func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_ZAP", "3") - for _, test := range genericFlagTests { - flag := cli.GenericFlag{Name: test.name, EnvVar: "APP_ZAP"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_ZAP]") { - t.Errorf("%s does not end with [$APP_ZAP]", output) - } - } -} - -func TestParseMultiString(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.String("serve") != "10" { - t.Errorf("main name not set") - } - if ctx.String("s") != "10" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10"}) -} - -func TestParseMultiStringFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_COUNT", "20") - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "count, c", EnvVar: "APP_COUNT"}, - }, - Action: func(ctx *cli.Context) { - if ctx.String("count") != "20" { - t.Errorf("main name not set") - } - if ctx.String("c") != "20" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_COUNT", "20") - (&cli.App{ - Flags: []cli.Flag{ - cli.StringFlag{Name: "count, c", EnvVar: "COMPAT_COUNT,APP_COUNT"}, - }, - Action: func(ctx *cli.Context) { - if ctx.String("count") != "20" { - t.Errorf("main name not set") - } - if ctx.String("c") != "20" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringSlice(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "serve, s", Value: &cli.StringSlice{}}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.StringSlice("serve"), []string{"10", "20"}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.StringSlice("s"), []string{"10", "20"}) { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10", "-s", "20"}) -} - -func TestParseMultiStringSliceFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "intervals, i", Value: &cli.StringSlice{}, EnvVar: "APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringSliceFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.StringSliceFlag{Name: "intervals, i", Value: &cli.StringSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiInt(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Int("serve") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("s") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10"}) -} - -func TestParseMultiIntFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Int("timeout") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("t") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiIntFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := cli.App{ - Flags: []cli.Flag{ - cli.IntFlag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Int("timeout") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("t") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiIntSlice(t *testing.T) { - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "serve, s", Value: &cli.IntSlice{}}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.IntSlice("serve"), []int{10, 20}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.IntSlice("s"), []int{10, 20}) { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run", "-s", "10", "-s", "20"}) -} - -func TestParseMultiIntSliceFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "intervals, i", Value: &cli.IntSlice{}, EnvVar: "APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiIntSliceFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&cli.App{ - Flags: []cli.Flag{ - cli.IntSliceFlag{Name: "intervals, i", Value: &cli.IntSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiFloat64(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Float64("serve") != 10.2 { - t.Errorf("main name not set") - } - if ctx.Float64("s") != 10.2 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10.2"}) -} - -func TestParseMultiFloat64FromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Float64("timeout") != 15.5 { - t.Errorf("main name not set") - } - if ctx.Float64("t") != 15.5 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiFloat64FromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := cli.App{ - Flags: []cli.Flag{ - cli.Float64Flag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Float64("timeout") != 15.5 { - t.Errorf("main name not set") - } - if ctx.Float64("t") != 15.5 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBool(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Bool("serve") != true { - t.Errorf("main name not set") - } - if ctx.Bool("s") != true { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "--serve"}) -} - -func TestParseMultiBoolFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "1") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Bool("debug") != true { - t.Errorf("main name not set from env") - } - if ctx.Bool("d") != true { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "1") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.Bool("debug") != true { - t.Errorf("main name not set from env") - } - if ctx.Bool("d") != true { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolT(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "serve, s"}, - }, - Action: func(ctx *cli.Context) { - if ctx.BoolT("serve") != true { - t.Errorf("main name not set") - } - if ctx.BoolT("s") != true { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "--serve"}) -} - -func TestParseMultiBoolTFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "0") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.BoolT("debug") != false { - t.Errorf("main name not set from env") - } - if ctx.BoolT("d") != false { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolTFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "0") - a := cli.App{ - Flags: []cli.Flag{ - cli.BoolTFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, - }, - Action: func(ctx *cli.Context) { - if ctx.BoolT("debug") != false { - t.Errorf("main name not set from env") - } - if ctx.BoolT("d") != false { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -type Parser [2]string - -func (p *Parser) Set(value string) error { - parts := strings.Split(value, ",") - if len(parts) != 2 { - return fmt.Errorf("invalid format") - } - - (*p)[0] = parts[0] - (*p)[1] = parts[1] - - return nil -} - -func (p *Parser) String() string { - return fmt.Sprintf("%s,%s", p[0], p[1]) -} - -func TestParseGeneric(t *testing.T) { - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "serve, s", Value: &Parser{}}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"10", "20"}) { - t.Errorf("main name not set") - } - if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"10", "20"}) { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run", "-s", "10,20"}) -} - -func TestParseGenericFromEnv(t *testing.T) { - os.Clearenv() - os.Setenv("APP_SERVE", "20,30") - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"20", "30"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"20", "30"}) { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseGenericFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_FOO", "99,2000") - a := cli.App{ - Flags: []cli.Flag{ - cli.GenericFlag{Name: "foos", Value: &Parser{}, EnvVar: "COMPAT_FOO,APP_FOO"}, - }, - Action: func(ctx *cli.Context) { - if !reflect.DeepEqual(ctx.Generic("foos"), &Parser{"99", "2000"}) { - t.Errorf("value not set from env") - } - }, - } - a.Run([]string{"run"}) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/help.go b/Godeps/_workspace/src/github.com/codegangsta/cli/help.go deleted file mode 100644 index 1117945f0..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/help.go +++ /dev/null @@ -1,235 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "strings" - "text/tabwriter" - "text/template" -) - -// The text template for the Default help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var AppHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} - -USAGE: - {{.Name}} {{if .Flags}}[global options] {{end}}command{{if .Flags}} [command options]{{end}} [arguments...] - -VERSION: - {{.Version}}{{if len .Authors}} - -AUTHOR(S): - {{range .Authors}}{{ . }}{{end}}{{end}} - -COMMANDS: - {{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} - {{end}}{{if .Flags}} -GLOBAL OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{end}} -` - -// The text template for the command help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var CommandHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} - -USAGE: - command {{.Name}}{{if .Flags}} [command options]{{end}} [arguments...]{{if .Description}} - -DESCRIPTION: - {{.Description}}{{end}}{{if .Flags}} - -OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{ end }} -` - -// The text template for the subcommand help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var SubcommandHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} - -USAGE: - {{.Name}} command{{if .Flags}} [command options]{{end}} [arguments...] - -COMMANDS: - {{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} - {{end}}{{if .Flags}} -OPTIONS: - {{range .Flags}}{{.}} - {{end}}{{end}} -` - -var helpCommand = Command{ - Name: "help", - Aliases: []string{"h"}, - Usage: "Shows a list of commands or help for one command", - Action: func(c *Context) { - args := c.Args() - if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowAppHelp(c) - } - }, -} - -var helpSubcommand = Command{ - Name: "help", - Aliases: []string{"h"}, - Usage: "Shows a list of commands or help for one command", - Action: func(c *Context) { - args := c.Args() - if args.Present() { - ShowCommandHelp(c, args.First()) - } else { - ShowSubcommandHelp(c) - } - }, -} - -// Prints help for the App or Command -type helpPrinter func(w io.Writer, templ string, data interface{}) - -var HelpPrinter helpPrinter = printHelp - -// Prints version for the App -var VersionPrinter = printVersion - -func ShowAppHelp(c *Context) { - HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) -} - -// Prints the list of subcommands as the default app completion method -func DefaultAppComplete(c *Context) { - for _, command := range c.App.Commands { - for _, name := range command.Names() { - fmt.Fprintln(c.App.Writer, name) - } - } -} - -// Prints help for the given command -func ShowCommandHelp(ctx *Context, command string) { - // show the subcommand help for a command with subcommands - if command == "" { - HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) - return - } - - for _, c := range ctx.App.Commands { - if c.HasName(command) { - HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) - return - } - } - - if ctx.App.CommandNotFound != nil { - ctx.App.CommandNotFound(ctx, command) - } else { - fmt.Fprintf(ctx.App.Writer, "No help topic for '%v'\n", command) - } -} - -// Prints help for the given subcommand -func ShowSubcommandHelp(c *Context) { - ShowCommandHelp(c, c.Command.Name) -} - -// Prints the version number of the App -func ShowVersion(c *Context) { - VersionPrinter(c) -} - -func printVersion(c *Context) { - fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) -} - -// Prints the lists of commands within a given context -func ShowCompletions(c *Context) { - a := c.App - if a != nil && a.BashComplete != nil { - a.BashComplete(c) - } -} - -// Prints the custom completions for a given command -func ShowCommandCompletions(ctx *Context, command string) { - c := ctx.App.Command(command) - if c != nil && c.BashComplete != nil { - c.BashComplete(ctx) - } -} - -func printHelp(out io.Writer, templ string, data interface{}) { - funcMap := template.FuncMap{ - "join": strings.Join, - } - - w := tabwriter.NewWriter(out, 0, 8, 1, '\t', 0) - t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) - err := t.Execute(w, data) - if err != nil { - panic(err) - } - w.Flush() -} - -func checkVersion(c *Context) bool { - if c.GlobalBool("version") { - ShowVersion(c) - return true - } - - return false -} - -func checkHelp(c *Context) bool { - if c.GlobalBool("h") || c.GlobalBool("help") { - ShowAppHelp(c) - return true - } - - return false -} - -func checkCommandHelp(c *Context, name string) bool { - if c.Bool("h") || c.Bool("help") { - ShowCommandHelp(c, name) - return true - } - - return false -} - -func checkSubcommandHelp(c *Context) bool { - if c.GlobalBool("h") || c.GlobalBool("help") { - ShowSubcommandHelp(c) - return true - } - - return false -} - -func checkCompletions(c *Context) bool { - if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion { - ShowCompletions(c) - return true - } - - return false -} - -func checkCommandCompletions(c *Context, name string) bool { - if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion { - ShowCommandCompletions(c, name) - return true - } - - return false -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/help_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/help_test.go deleted file mode 100644 index b3c1fda69..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/help_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package cli_test - -import ( - "bytes" - "testing" - - "github.com/codegangsta/cli" -) - -func Test_ShowAppHelp_NoAuthor(t *testing.T) { - output := new(bytes.Buffer) - app := cli.NewApp() - app.Writer = output - - c := cli.NewContext(app, nil, nil) - - cli.ShowAppHelp(c) - - if bytes.Index(output.Bytes(), []byte("AUTHOR(S):")) != -1 { - t.Errorf("expected\n%snot to include %s", output.String(), "AUTHOR(S):") - } -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go b/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go deleted file mode 100644 index cdc4feb2f..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package cli_test - -import ( - "reflect" - "testing" -) - -/* Test Helpers */ -func expect(t *testing.T, a interface{}, b interface{}) { - if a != b { - t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} - -func refute(t *testing.T, a interface{}, b interface{}) { - if a == b { - t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/.gitignore b/Godeps/_workspace/src/github.com/gorilla/websocket/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/websocket/.travis.yml deleted file mode 100644 index 8687342e9..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/AUTHORS b/Godeps/_workspace/src/github.com/gorilla/websocket/AUTHORS deleted file mode 100644 index b003eca0c..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the official list of Gorilla WebSocket authors for copyright -# purposes. -# -# Please keep the list sorted. - -Gary Burd -Joachim Bauch - diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/LICENSE b/Godeps/_workspace/src/github.com/gorilla/websocket/LICENSE deleted file mode 100644 index 9171c9722..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/README.md b/Godeps/_workspace/src/github.com/gorilla/websocket/README.md deleted file mode 100644 index 9ad75a0f5..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# Gorilla WebSocket - -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the -[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. - -### Documentation - -* [API Reference](http://godoc.org/github.com/gorilla/websocket) -* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) -* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) - -### Status - -The Gorilla WebSocket package provides a complete and tested implementation of -the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The -package API is stable. - -### Installation - - go get github.com/gorilla/websocket - -### Protocol Compliance - -The Gorilla WebSocket package passes the server tests in the [Autobahn Test -Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn -subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). - -### Gorilla WebSocket compared with other packages - - - - - - - - - - - - - - - - - - -
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Limit size of received messageYesNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
- -Notes: - -1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). -2. The application can get the type of a received data message by implementing - a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) - function. -3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. - Read returns when the input buffer is full or a frame boundary is - encountered. Each call to Write sends a single frame message. The Gorilla - io.Reader and io.WriteCloser operate on a single WebSocket message. - diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/bench_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/bench_test.go deleted file mode 100644 index f66fc36bc..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/bench_test.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "testing" -) - -func BenchmarkMaskBytes(b *testing.B) { - var key [4]byte - data := make([]byte, 1024) - pos := 0 - for i := 0; i < b.N; i++ { - pos = maskBytes(key, pos, data) - } - b.SetBytes(int64(len(data))) -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/client.go b/Godeps/_workspace/src/github.com/gorilla/websocket/client.go deleted file mode 100644 index c25d24f80..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/client.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "crypto/tls" - "errors" - "net" - "net/http" - "net/url" - "strings" - "time" -) - -// ErrBadHandshake is returned when the server response to opening handshake is -// invalid. -var ErrBadHandshake = errors.New("websocket: bad handshake") - -// NewClient creates a new client connection using the given net connection. -// The URL u specifies the host and request URI. Use requestHeader to specify -// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies -// (Cookie). Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etc. -func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { - challengeKey, err := generateChallengeKey() - if err != nil { - return nil, nil, err - } - acceptKey := computeAcceptKey(challengeKey) - - c = newConn(netConn, false, readBufSize, writeBufSize) - p := c.writeBuf[:0] - p = append(p, "GET "...) - p = append(p, u.RequestURI()...) - p = append(p, " HTTP/1.1\r\nHost: "...) - p = append(p, u.Host...) - // "Upgrade" is capitalized for servers that do not use case insensitive - // comparisons on header tokens. - p = append(p, "\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Version: 13\r\nSec-WebSocket-Key: "...) - p = append(p, challengeKey...) - p = append(p, "\r\n"...) - for k, vs := range requestHeader { - for _, v := range vs { - p = append(p, k...) - p = append(p, ": "...) - p = append(p, v...) - p = append(p, "\r\n"...) - } - } - p = append(p, "\r\n"...) - - if _, err := netConn.Write(p); err != nil { - return nil, nil, err - } - - resp, err := http.ReadResponse(c.br, &http.Request{Method: "GET", URL: u}) - if err != nil { - return nil, nil, err - } - if resp.StatusCode != 101 || - !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || - !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || - resp.Header.Get("Sec-Websocket-Accept") != acceptKey { - return nil, resp, ErrBadHandshake - } - c.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - return c, resp, nil -} - -// A Dialer contains options for connecting to WebSocket server. -type Dialer struct { - // NetDial specifies the dial function for creating TCP connections. If - // NetDial is nil, net.Dial is used. - NetDial func(network, addr string) (net.Conn, error) - - // TLSClientConfig specifies the TLS configuration to use with tls.Client. - // If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // Input and output buffer sizes. If the buffer size is zero, then a - // default value of 4096 is used. - ReadBufferSize, WriteBufferSize int - - // Subprotocols specifies the client's requested subprotocols. - Subprotocols []string -} - -var errMalformedURL = errors.New("malformed ws or wss URL") - -// parseURL parses the URL. The url.Parse function is not used here because -// url.Parse mangles the path. -func parseURL(s string) (*url.URL, error) { - // From the RFC: - // - // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] - // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] - // - // We don't use the net/url parser here because the dialer interface does - // not provide a way for applications to work around percent deocding in - // the net/url parser. - - var u url.URL - switch { - case strings.HasPrefix(s, "ws://"): - u.Scheme = "ws" - s = s[len("ws://"):] - case strings.HasPrefix(s, "wss://"): - u.Scheme = "wss" - s = s[len("wss://"):] - default: - return nil, errMalformedURL - } - - u.Host = s - u.Opaque = "/" - if i := strings.Index(s, "/"); i >= 0 { - u.Host = s[:i] - u.Opaque = s[i:] - } - - return &u, nil -} - -func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { - hostPort = u.Host - hostNoPort = u.Host - if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { - hostNoPort = hostNoPort[:i] - } else { - if u.Scheme == "wss" { - hostPort += ":443" - } else { - hostPort += ":80" - } - } - return hostPort, hostNoPort -} - -// DefaultDialer is a dialer with all fields set to the default zero values. -var DefaultDialer *Dialer - -// Dial creates a new client connection. Use requestHeader to specify the -// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). -// Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etc. -func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - u, err := parseURL(urlStr) - if err != nil { - return nil, nil, err - } - - hostPort, hostNoPort := hostPortNoPort(u) - - if d == nil { - d = &Dialer{} - } - - var deadline time.Time - if d.HandshakeTimeout != 0 { - deadline = time.Now().Add(d.HandshakeTimeout) - } - - netDial := d.NetDial - if netDial == nil { - netDialer := &net.Dialer{Deadline: deadline} - netDial = netDialer.Dial - } - - netConn, err := netDial("tcp", hostPort) - if err != nil { - return nil, nil, err - } - - defer func() { - if netConn != nil { - netConn.Close() - } - }() - - if err := netConn.SetDeadline(deadline); err != nil { - return nil, nil, err - } - - if u.Scheme == "wss" { - cfg := d.TLSClientConfig - if cfg == nil { - cfg = &tls.Config{ServerName: hostNoPort} - } else if cfg.ServerName == "" { - shallowCopy := *cfg - cfg = &shallowCopy - cfg.ServerName = hostNoPort - } - tlsConn := tls.Client(netConn, cfg) - netConn = tlsConn - if err := tlsConn.Handshake(); err != nil { - return nil, nil, err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return nil, nil, err - } - } - } - - if len(d.Subprotocols) > 0 { - h := http.Header{} - for k, v := range requestHeader { - h[k] = v - } - h.Set("Sec-Websocket-Protocol", strings.Join(d.Subprotocols, ", ")) - requestHeader = h - } - - conn, resp, err := NewClient(netConn, u, requestHeader, d.ReadBufferSize, d.WriteBufferSize) - if err != nil { - return nil, resp, err - } - - netConn.SetDeadline(time.Time{}) - netConn = nil // to avoid close in defer. - return conn, resp, nil -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/client_server_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/client_server_test.go deleted file mode 100644 index 8c608f68c..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/client_server_test.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "crypto/tls" - "crypto/x509" - "io" - "net" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "testing" - "time" -) - -var cstUpgrader = Upgrader{ - Subprotocols: []string{"p0", "p1"}, - ReadBufferSize: 1024, - WriteBufferSize: 1024, - Error: func(w http.ResponseWriter, r *http.Request, status int, reason error) { - http.Error(w, reason.Error(), status) - }, -} - -var cstDialer = Dialer{ - Subprotocols: []string{"p1", "p2"}, - ReadBufferSize: 1024, - WriteBufferSize: 1024, -} - -type cstHandler struct{ *testing.T } - -type Server struct { - *httptest.Server - URL string -} - -func newServer(t *testing.T) *Server { - var s Server - s.Server = httptest.NewServer(cstHandler{t}) - s.URL = "ws" + s.Server.URL[len("http"):] - return &s -} - -func newTLSServer(t *testing.T) *Server { - var s Server - s.Server = httptest.NewTLSServer(cstHandler{t}) - s.URL = "ws" + s.Server.URL[len("http"):] - return &s -} - -func (t cstHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { - t.Logf("method %s not allowed", r.Method) - http.Error(w, "method not allowed", 405) - return - } - subprotos := Subprotocols(r) - if !reflect.DeepEqual(subprotos, cstDialer.Subprotocols) { - t.Logf("subprotols=%v, want %v", subprotos, cstDialer.Subprotocols) - http.Error(w, "bad protocol", 400) - return - } - ws, err := cstUpgrader.Upgrade(w, r, http.Header{"Set-Cookie": {"sessionID=1234"}}) - if err != nil { - t.Logf("Upgrade: %v", err) - return - } - defer ws.Close() - - if ws.Subprotocol() != "p1" { - t.Logf("Subprotocol() = %s, want p1", ws.Subprotocol()) - ws.Close() - return - } - op, rd, err := ws.NextReader() - if err != nil { - t.Logf("NextReader: %v", err) - return - } - wr, err := ws.NextWriter(op) - if err != nil { - t.Logf("NextWriter: %v", err) - return - } - if _, err = io.Copy(wr, rd); err != nil { - t.Logf("NextWriter: %v", err) - return - } - if err := wr.Close(); err != nil { - t.Logf("Close: %v", err) - return - } -} - -func sendRecv(t *testing.T, ws *Conn) { - const message = "Hello World!" - if err := ws.SetWriteDeadline(time.Now().Add(time.Second)); err != nil { - t.Fatalf("SetWriteDeadline: %v", err) - } - if err := ws.WriteMessage(TextMessage, []byte(message)); err != nil { - t.Fatalf("WriteMessage: %v", err) - } - if err := ws.SetReadDeadline(time.Now().Add(time.Second)); err != nil { - t.Fatalf("SetReadDeadline: %v", err) - } - _, p, err := ws.ReadMessage() - if err != nil { - t.Fatalf("ReadMessage: %v", err) - } - if string(p) != message { - t.Fatalf("message=%s, want %s", p, message) - } -} - -func TestDial(t *testing.T) { - s := newServer(t) - defer s.Close() - - ws, _, err := cstDialer.Dial(s.URL, nil) - if err != nil { - t.Fatalf("Dial: %v", err) - } - defer ws.Close() - sendRecv(t, ws) -} - -func TestDialTLS(t *testing.T) { - s := newTLSServer(t) - defer s.Close() - - certs := x509.NewCertPool() - for _, c := range s.TLS.Certificates { - roots, err := x509.ParseCertificates(c.Certificate[len(c.Certificate)-1]) - if err != nil { - t.Fatalf("error parsing server's root cert: %v", err) - } - for _, root := range roots { - certs.AddCert(root) - } - } - - u, _ := url.Parse(s.URL) - d := cstDialer - d.NetDial = func(network, addr string) (net.Conn, error) { return net.Dial(network, u.Host) } - d.TLSClientConfig = &tls.Config{RootCAs: certs} - ws, _, err := d.Dial("wss://example.com/", nil) - if err != nil { - t.Fatalf("Dial: %v", err) - } - defer ws.Close() - sendRecv(t, ws) -} - -func xTestDialTLSBadCert(t *testing.T) { - s := newTLSServer(t) - defer s.Close() - - ws, _, err := cstDialer.Dial(s.URL, nil) - if err == nil { - ws.Close() - t.Fatalf("Dial: nil") - } -} - -func xTestDialTLSNoVerify(t *testing.T) { - s := newTLSServer(t) - defer s.Close() - - d := cstDialer - d.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - ws, _, err := d.Dial(s.URL, nil) - if err != nil { - t.Fatalf("Dial: %v", err) - } - defer ws.Close() - sendRecv(t, ws) -} - -func TestDialTimeout(t *testing.T) { - s := newServer(t) - defer s.Close() - - d := cstDialer - d.HandshakeTimeout = -1 - ws, _, err := d.Dial(s.URL, nil) - if err == nil { - ws.Close() - t.Fatalf("Dial: nil") - } -} - -func TestDialBadScheme(t *testing.T) { - s := newServer(t) - defer s.Close() - - ws, _, err := cstDialer.Dial(s.Server.URL, nil) - if err == nil { - ws.Close() - t.Fatalf("Dial: nil") - } -} - -func TestDialBadOrigin(t *testing.T) { - s := newServer(t) - defer s.Close() - - ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Origin": {"bad"}}) - if err == nil { - ws.Close() - t.Fatalf("Dial: nil") - } - if resp == nil { - t.Fatalf("resp=nil, err=%v", err) - } - if resp.StatusCode != http.StatusForbidden { - t.Fatalf("status=%d, want %d", resp.StatusCode, http.StatusForbidden) - } -} - -func TestHandshake(t *testing.T) { - s := newServer(t) - defer s.Close() - - ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Origin": {s.URL}}) - if err != nil { - t.Fatalf("Dial: %v", err) - } - defer ws.Close() - - var sessionID string - for _, c := range resp.Cookies() { - if c.Name == "sessionID" { - sessionID = c.Value - } - } - if sessionID != "1234" { - t.Error("Set-Cookie not received from the server.") - } - - if ws.Subprotocol() != "p1" { - t.Errorf("ws.Subprotocol() = %s, want p1", ws.Subprotocol()) - } - sendRecv(t, ws) -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/client_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/client_test.go deleted file mode 100644 index d2f2ebd79..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/client_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "net/url" - "reflect" - "testing" -) - -var parseURLTests = []struct { - s string - u *url.URL -}{ - {"ws://example.com/", &url.URL{Scheme: "ws", Host: "example.com", Opaque: "/"}}, - {"ws://example.com", &url.URL{Scheme: "ws", Host: "example.com", Opaque: "/"}}, - {"ws://example.com:7777/", &url.URL{Scheme: "ws", Host: "example.com:7777", Opaque: "/"}}, - {"wss://example.com/", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/"}}, - {"wss://example.com/a/b", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/a/b"}}, - {"ss://example.com/a/b", nil}, -} - -func TestParseURL(t *testing.T) { - for _, tt := range parseURLTests { - u, err := parseURL(tt.s) - if tt.u != nil && err != nil { - t.Errorf("parseURL(%q) returned error %v", tt.s, err) - continue - } - if tt.u == nil && err == nil { - t.Errorf("parseURL(%q) did not return error", tt.s) - continue - } - if !reflect.DeepEqual(u, tt.u) { - t.Errorf("parseURL(%q) returned %v, want %v", tt.s, u, tt.u) - continue - } - } -} - -var hostPortNoPortTests = []struct { - u *url.URL - hostPort, hostNoPort string -}{ - {&url.URL{Scheme: "ws", Host: "example.com"}, "example.com:80", "example.com"}, - {&url.URL{Scheme: "wss", Host: "example.com"}, "example.com:443", "example.com"}, - {&url.URL{Scheme: "ws", Host: "example.com:7777"}, "example.com:7777", "example.com"}, - {&url.URL{Scheme: "wss", Host: "example.com:7777"}, "example.com:7777", "example.com"}, -} - -func TestHostPortNoPort(t *testing.T) { - for _, tt := range hostPortNoPortTests { - hostPort, hostNoPort := hostPortNoPort(tt.u) - if hostPort != tt.hostPort { - t.Errorf("hostPortNoPort(%v) returned hostPort %q, want %q", tt.u, hostPort, tt.hostPort) - } - if hostNoPort != tt.hostNoPort { - t.Errorf("hostPortNoPort(%v) returned hostNoPort %q, want %q", tt.u, hostNoPort, tt.hostNoPort) - } - } -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/conn.go b/Godeps/_workspace/src/github.com/gorilla/websocket/conn.go deleted file mode 100644 index 86c35e5fc..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/conn.go +++ /dev/null @@ -1,825 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "encoding/binary" - "errors" - "io" - "io/ioutil" - "math/rand" - "net" - "strconv" - "time" -) - -const ( - maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask - maxControlFramePayloadSize = 125 - finalBit = 1 << 7 - maskBit = 1 << 7 - writeWait = time.Second - - defaultReadBufferSize = 4096 - defaultWriteBufferSize = 4096 - - continuationFrame = 0 - noFrame = -1 -) - -// Close codes defined in RFC 6455, section 11.7. -const ( - CloseNormalClosure = 1000 - CloseGoingAway = 1001 - CloseProtocolError = 1002 - CloseUnsupportedData = 1003 - CloseNoStatusReceived = 1005 - CloseAbnormalClosure = 1006 - CloseInvalidFramePayloadData = 1007 - ClosePolicyViolation = 1008 - CloseMessageTooBig = 1009 - CloseMandatoryExtension = 1010 - CloseInternalServerErr = 1011 - CloseTLSHandshake = 1015 -) - -// The message types are defined in RFC 6455, section 11.8. -const ( - // TextMessage denotes a text data message. The text message payload is - // interpreted as UTF-8 encoded text data. - TextMessage = 1 - - // BinaryMessage denotes a binary data message. - BinaryMessage = 2 - - // CloseMessage denotes a close control message. The optional message - // payload contains a numeric code and text. Use the FormatCloseMessage - // function to format a close message payload. - CloseMessage = 8 - - // PingMessage denotes a ping control message. The optional message payload - // is UTF-8 encoded text. - PingMessage = 9 - - // PongMessage denotes a ping control message. The optional message payload - // is UTF-8 encoded text. - PongMessage = 10 -) - -// ErrCloseSent is returned when the application writes a message to the -// connection after sending a close message. -var ErrCloseSent = errors.New("websocket: close sent") - -// ErrReadLimit is returned when reading a message that is larger than the -// read limit set for the connection. -var ErrReadLimit = errors.New("websocket: read limit exceeded") - -// netError satisfies the net Error interface. -type netError struct { - msg string - temporary bool - timeout bool -} - -func (e *netError) Error() string { return e.msg } -func (e *netError) Temporary() bool { return e.temporary } -func (e *netError) Timeout() bool { return e.timeout } - -// closeError represents close frame. -type closeError struct { - code int - text string -} - -func (e *closeError) Error() string { - return "websocket: close " + strconv.Itoa(e.code) + " " + e.text -} - -var ( - errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true} - errUnexpectedEOF = &closeError{code: CloseAbnormalClosure, text: io.ErrUnexpectedEOF.Error()} - errBadWriteOpCode = errors.New("websocket: bad write message type") - errWriteClosed = errors.New("websocket: write closed") - errInvalidControlFrame = errors.New("websocket: invalid control frame") -) - -func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok && e.Temporary() { - err = &netError{msg: e.Error(), timeout: e.Timeout()} - } - return err -} - -func isControl(frameType int) bool { - return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage -} - -func isData(frameType int) bool { - return frameType == TextMessage || frameType == BinaryMessage -} - -func maskBytes(key [4]byte, pos int, b []byte) int { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 -} - -func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} -} - -// Conn represents a WebSocket connection. -type Conn struct { - conn net.Conn - isServer bool - subprotocol string - - // Write fields - mu chan bool // used as mutex to protect write to conn and closeSent - closeSent bool // true if close message was sent - - // Message writer fields. - writeErr error - writeBuf []byte // frame is constructed in this buffer. - writePos int // end of data in writeBuf. - writeFrameType int // type of the current frame. - writeSeq int // incremented to invalidate message writers. - writeDeadline time.Time - - // Read fields - readErr error - br *bufio.Reader - readRemaining int64 // bytes remaining in current frame. - readFinal bool // true the current message has more frames. - readSeq int // incremented to invalidate message readers. - readLength int64 // Message size. - readLimit int64 // Maximum message size. - readMaskPos int - readMaskKey [4]byte - handlePong func(string) error - handlePing func(string) error -} - -func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { - mu := make(chan bool, 1) - mu <- true - - if readBufferSize == 0 { - readBufferSize = defaultReadBufferSize - } - if writeBufferSize == 0 { - writeBufferSize = defaultWriteBufferSize - } - - c := &Conn{ - isServer: isServer, - br: bufio.NewReaderSize(conn, readBufferSize), - conn: conn, - mu: mu, - readFinal: true, - writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize), - writeFrameType: noFrame, - writePos: maxFrameHeaderSize, - } - c.SetPingHandler(nil) - c.SetPongHandler(nil) - return c -} - -// Subprotocol returns the negotiated protocol for the connection. -func (c *Conn) Subprotocol() string { - return c.subprotocol -} - -// Close closes the underlying network connection without sending or waiting for a close frame. -func (c *Conn) Close() error { - return c.conn.Close() -} - -// LocalAddr returns the local network address. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// Write methods - -func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { - <-c.mu - defer func() { c.mu <- true }() - - if c.closeSent { - return ErrCloseSent - } else if frameType == CloseMessage { - c.closeSent = true - } - - c.conn.SetWriteDeadline(deadline) - for _, buf := range bufs { - if len(buf) > 0 { - n, err := c.conn.Write(buf) - if n != len(buf) { - // Close on partial write. - c.conn.Close() - } - if err != nil { - return err - } - } - } - return nil -} - -// WriteControl writes a control message with the given deadline. The allowed -// message types are CloseMessage, PingMessage and PongMessage. -func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { - if !isControl(messageType) { - return errBadWriteOpCode - } - if len(data) > maxControlFramePayloadSize { - return errInvalidControlFrame - } - - b0 := byte(messageType) | finalBit - b1 := byte(len(data)) - if !c.isServer { - b1 |= maskBit - } - - buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) - buf = append(buf, b0, b1) - - if c.isServer { - buf = append(buf, data...) - } else { - key := newMaskKey() - buf = append(buf, key[:]...) - buf = append(buf, data...) - maskBytes(key, 0, buf[6:]) - } - - d := time.Hour * 1000 - if !deadline.IsZero() { - d = deadline.Sub(time.Now()) - if d < 0 { - return errWriteTimeout - } - } - - timer := time.NewTimer(d) - select { - case <-c.mu: - timer.Stop() - case <-timer.C: - return errWriteTimeout - } - defer func() { c.mu <- true }() - - if c.closeSent { - return ErrCloseSent - } else if messageType == CloseMessage { - c.closeSent = true - } - - c.conn.SetWriteDeadline(deadline) - n, err := c.conn.Write(buf) - if n != 0 && n != len(buf) { - c.conn.Close() - } - return err -} - -// NextWriter returns a writer for the next message to send. The writer's -// Close method flushes the complete message to the network. -// -// There can be at most one open writer on a connection. NextWriter closes the -// previous writer if the application has not already done so. -// -// The NextWriter method and the writers returned from the method cannot be -// accessed by more than one goroutine at a time. -func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - if c.writeErr != nil { - return nil, c.writeErr - } - - if c.writeFrameType != noFrame { - if err := c.flushFrame(true, nil); err != nil { - return nil, err - } - } - - if !isControl(messageType) && !isData(messageType) { - return nil, errBadWriteOpCode - } - - c.writeFrameType = messageType - return messageWriter{c, c.writeSeq}, nil -} - -func (c *Conn) flushFrame(final bool, extra []byte) error { - length := c.writePos - maxFrameHeaderSize + len(extra) - - // Check for invalid control frames. - if isControl(c.writeFrameType) && - (!final || length > maxControlFramePayloadSize) { - c.writeSeq++ - c.writeFrameType = noFrame - c.writePos = maxFrameHeaderSize - return errInvalidControlFrame - } - - b0 := byte(c.writeFrameType) - if final { - b0 |= finalBit - } - b1 := byte(0) - if !c.isServer { - b1 |= maskBit - } - - // Assume that the frame starts at beginning of c.writeBuf. - framePos := 0 - if c.isServer { - // Adjust up if mask not included in the header. - framePos = 4 - } - - switch { - case length >= 65536: - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 127 - binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) - case length > 125: - framePos += 6 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 126 - binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) - default: - framePos += 8 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | byte(length) - } - - if !c.isServer { - key := newMaskKey() - copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) - maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:c.writePos]) - if len(extra) > 0 { - c.writeErr = errors.New("websocket: internal error, extra used in client mode") - return c.writeErr - } - } - - // Write the buffers to the connection. - c.writeErr = c.write(c.writeFrameType, c.writeDeadline, c.writeBuf[framePos:c.writePos], extra) - - // Setup for next frame. - c.writePos = maxFrameHeaderSize - c.writeFrameType = continuationFrame - if final { - c.writeSeq++ - c.writeFrameType = noFrame - } - return c.writeErr -} - -type messageWriter struct { - c *Conn - seq int -} - -func (w messageWriter) err() error { - c := w.c - if c.writeSeq != w.seq { - return errWriteClosed - } - if c.writeErr != nil { - return c.writeErr - } - return nil -} - -func (w messageWriter) ncopy(max int) (int, error) { - n := len(w.c.writeBuf) - w.c.writePos - if n <= 0 { - if err := w.c.flushFrame(false, nil); err != nil { - return 0, err - } - n = len(w.c.writeBuf) - w.c.writePos - } - if n > max { - n = max - } - return n, nil -} - -func (w messageWriter) write(final bool, p []byte) (int, error) { - if err := w.err(); err != nil { - return 0, err - } - - if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { - // Don't buffer large messages. - err := w.c.flushFrame(final, p) - if err != nil { - return 0, err - } - return len(p), nil - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.c.writePos:], p[:n]) - w.c.writePos += n - p = p[n:] - } - return nn, nil -} - -func (w messageWriter) Write(p []byte) (int, error) { - return w.write(false, p) -} - -func (w messageWriter) WriteString(p string) (int, error) { - if err := w.err(); err != nil { - return 0, err - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.c.writePos:], p[:n]) - w.c.writePos += n - p = p[n:] - } - return nn, nil -} - -func (w messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { - if err := w.err(); err != nil { - return 0, err - } - for { - if w.c.writePos == len(w.c.writeBuf) { - err = w.c.flushFrame(false, nil) - if err != nil { - break - } - } - var n int - n, err = r.Read(w.c.writeBuf[w.c.writePos:]) - w.c.writePos += n - nn += int64(n) - if err != nil { - if err == io.EOF { - err = nil - } - break - } - } - return nn, err -} - -func (w messageWriter) Close() error { - if err := w.err(); err != nil { - return err - } - return w.c.flushFrame(true, nil) -} - -// WriteMessage is a helper method for getting a writer using NextWriter, -// writing the message and closing the writer. -func (c *Conn) WriteMessage(messageType int, data []byte) error { - wr, err := c.NextWriter(messageType) - if err != nil { - return err - } - w := wr.(messageWriter) - if _, err := w.write(true, data); err != nil { - return err - } - if c.writeSeq == w.seq { - if err := c.flushFrame(true, nil); err != nil { - return err - } - } - return nil -} - -// SetWriteDeadline sets the write deadline on the underlying network -// connection. After a write has timed out, the websocket state is corrupt and -// all future writes will return an error. A zero value for t means writes will -// not time out. -func (c *Conn) SetWriteDeadline(t time.Time) error { - c.writeDeadline = t - return nil -} - -// Read methods - -// readFull is like io.ReadFull except that io.EOF is never returned. -func (c *Conn) readFull(p []byte) (err error) { - var n int - for n < len(p) && err == nil { - var nn int - nn, err = c.br.Read(p[n:]) - n += nn - } - if n == len(p) { - err = nil - } else if err == io.EOF { - err = errUnexpectedEOF - } - return -} - -func (c *Conn) advanceFrame() (int, error) { - - // 1. Skip remainder of previous frame. - - if c.readRemaining > 0 { - if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { - return noFrame, err - } - } - - // 2. Read and parse first two bytes of frame header. - - var b [8]byte - if err := c.readFull(b[:2]); err != nil { - return noFrame, err - } - - final := b[0]&finalBit != 0 - frameType := int(b[0] & 0xf) - reserved := int((b[0] >> 4) & 0x7) - mask := b[1]&maskBit != 0 - c.readRemaining = int64(b[1] & 0x7f) - - if reserved != 0 { - return noFrame, c.handleProtocolError("unexpected reserved bits " + strconv.Itoa(reserved)) - } - - switch frameType { - case CloseMessage, PingMessage, PongMessage: - if c.readRemaining > maxControlFramePayloadSize { - return noFrame, c.handleProtocolError("control frame length > 125") - } - if !final { - return noFrame, c.handleProtocolError("control frame not final") - } - case TextMessage, BinaryMessage: - if !c.readFinal { - return noFrame, c.handleProtocolError("message start before final message frame") - } - c.readFinal = final - case continuationFrame: - if c.readFinal { - return noFrame, c.handleProtocolError("continuation after final message frame") - } - c.readFinal = final - default: - return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) - } - - // 3. Read and parse frame length. - - switch c.readRemaining { - case 126: - if err := c.readFull(b[:2]); err != nil { - return noFrame, err - } - c.readRemaining = int64(binary.BigEndian.Uint16(b[:2])) - case 127: - if err := c.readFull(b[:8]); err != nil { - return noFrame, err - } - c.readRemaining = int64(binary.BigEndian.Uint64(b[:8])) - } - - // 4. Handle frame masking. - - if mask != c.isServer { - return noFrame, c.handleProtocolError("incorrect mask flag") - } - - if mask { - c.readMaskPos = 0 - if err := c.readFull(c.readMaskKey[:]); err != nil { - return noFrame, err - } - } - - // 5. For text and binary messages, enforce read limit and return. - - if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { - - c.readLength += c.readRemaining - if c.readLimit > 0 && c.readLength > c.readLimit { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) - return noFrame, ErrReadLimit - } - - return frameType, nil - } - - // 6. Read control frame payload. - - var payload []byte - if c.readRemaining > 0 { - payload = make([]byte, c.readRemaining) - c.readRemaining = 0 - if err := c.readFull(payload); err != nil { - return noFrame, err - } - if c.isServer { - maskBytes(c.readMaskKey, 0, payload) - } - } - - // 7. Process control frame payload. - - switch frameType { - case PongMessage: - if err := c.handlePong(string(payload)); err != nil { - return noFrame, err - } - case PingMessage: - if err := c.handlePing(string(payload)); err != nil { - return noFrame, err - } - case CloseMessage: - c.WriteControl(CloseMessage, []byte{}, time.Now().Add(writeWait)) - closeCode := CloseNoStatusReceived - closeText := "" - if len(payload) >= 2 { - closeCode = int(binary.BigEndian.Uint16(payload)) - closeText = string(payload[2:]) - } - switch closeCode { - case CloseNormalClosure, CloseGoingAway: - return noFrame, io.EOF - default: - return noFrame, &closeError{code: closeCode, text: closeText} - } - } - - return frameType, nil -} - -func (c *Conn) handleProtocolError(message string) error { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) - return errors.New("websocket: " + message) -} - -// NextReader returns the next data message received from the peer. The -// returned messageType is either TextMessage or BinaryMessage. -// -// There can be at most one open reader on a connection. NextReader discards -// the previous message if the application has not already consumed it. -// -// The NextReader method and the readers returned from the method cannot be -// accessed by more than one goroutine at a time. -func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { - - c.readSeq++ - c.readLength = 0 - - for c.readErr == nil { - frameType, err := c.advanceFrame() - if err != nil { - c.readErr = hideTempErr(err) - break - } - if frameType == TextMessage || frameType == BinaryMessage { - return frameType, messageReader{c, c.readSeq}, nil - } - } - return noFrame, nil, c.readErr -} - -type messageReader struct { - c *Conn - seq int -} - -func (r messageReader) Read(b []byte) (int, error) { - - if r.seq != r.c.readSeq { - return 0, io.EOF - } - - for r.c.readErr == nil { - - if r.c.readRemaining > 0 { - if int64(len(b)) > r.c.readRemaining { - b = b[:r.c.readRemaining] - } - n, err := r.c.br.Read(b) - r.c.readErr = hideTempErr(err) - if r.c.isServer { - r.c.readMaskPos = maskBytes(r.c.readMaskKey, r.c.readMaskPos, b[:n]) - } - r.c.readRemaining -= int64(n) - return n, r.c.readErr - } - - if r.c.readFinal { - r.c.readSeq++ - return 0, io.EOF - } - - frameType, err := r.c.advanceFrame() - switch { - case err != nil: - r.c.readErr = hideTempErr(err) - case frameType == TextMessage || frameType == BinaryMessage: - r.c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") - } - } - - err := r.c.readErr - if err == io.EOF && r.seq == r.c.readSeq { - err = errUnexpectedEOF - } - return 0, err -} - -// ReadMessage is a helper method for getting a reader using NextReader and -// reading from that reader to a buffer. -func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { - var r io.Reader - messageType, r, err = c.NextReader() - if err != nil { - return messageType, nil, err - } - p, err = ioutil.ReadAll(r) - return messageType, p, err -} - -// SetReadDeadline sets the read deadline on the underlying network connection. -// After a read has timed out, the websocket connection state is corrupt and -// all future reads will return an error. A zero value for t means reads will -// not time out. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetReadLimit sets the maximum size for a message read from the peer. If a -// message exceeds the limit, the connection sends a close frame to the peer -// and returns ErrReadLimit to the application. -func (c *Conn) SetReadLimit(limit int64) { - c.readLimit = limit -} - -// SetPingHandler sets the handler for ping messages received from the peer. -// The default ping handler sends a pong to the peer. -func (c *Conn) SetPingHandler(h func(string) error) { - if h == nil { - h = func(message string) error { - c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) - return nil - } - } - c.handlePing = h -} - -// SetPongHandler sets then handler for pong messages received from the peer. -// The default pong handler does nothing. -func (c *Conn) SetPongHandler(h func(string) error) { - if h == nil { - h = func(string) error { return nil } - } - c.handlePong = h -} - -// UnderlyingConn returns the internal net.Conn. This can be used to further -// modifications to connection specific flags. -func (c *Conn) UnderlyingConn() net.Conn { - return c.conn -} - -// FormatCloseMessage formats closeCode and text as a WebSocket close message. -func FormatCloseMessage(closeCode int, text string) []byte { - buf := make([]byte, 2+len(text)) - binary.BigEndian.PutUint16(buf, uint16(closeCode)) - copy(buf[2:], text) - return buf -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/conn_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/conn_test.go deleted file mode 100644 index 1f1197e71..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/conn_test.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net" - "testing" - "testing/iotest" - "time" -) - -var _ net.Error = errWriteTimeout - -type fakeNetConn struct { - io.Reader - io.Writer -} - -func (c fakeNetConn) Close() error { return nil } -func (c fakeNetConn) LocalAddr() net.Addr { return nil } -func (c fakeNetConn) RemoteAddr() net.Addr { return nil } -func (c fakeNetConn) SetDeadline(t time.Time) error { return nil } -func (c fakeNetConn) SetReadDeadline(t time.Time) error { return nil } -func (c fakeNetConn) SetWriteDeadline(t time.Time) error { return nil } - -func TestFraming(t *testing.T) { - frameSizes := []int{0, 1, 2, 124, 125, 126, 127, 128, 129, 65534, 65535, 65536, 65537} - var readChunkers = []struct { - name string - f func(io.Reader) io.Reader - }{ - {"half", iotest.HalfReader}, - {"one", iotest.OneByteReader}, - {"asis", func(r io.Reader) io.Reader { return r }}, - } - - writeBuf := make([]byte, 65537) - for i := range writeBuf { - writeBuf[i] = byte(i) - } - - for _, isServer := range []bool{true, false} { - for _, chunker := range readChunkers { - - var connBuf bytes.Buffer - wc := newConn(fakeNetConn{Reader: nil, Writer: &connBuf}, isServer, 1024, 1024) - rc := newConn(fakeNetConn{Reader: chunker.f(&connBuf), Writer: nil}, !isServer, 1024, 1024) - - for _, n := range frameSizes { - for _, iocopy := range []bool{true, false} { - name := fmt.Sprintf("s:%v, r:%s, n:%d c:%v", isServer, chunker.name, n, iocopy) - - w, err := wc.NextWriter(TextMessage) - if err != nil { - t.Errorf("%s: wc.NextWriter() returned %v", name, err) - continue - } - var nn int - if iocopy { - var n64 int64 - n64, err = io.Copy(w, bytes.NewReader(writeBuf[:n])) - nn = int(n64) - } else { - nn, err = w.Write(writeBuf[:n]) - } - if err != nil || nn != n { - t.Errorf("%s: w.Write(writeBuf[:n]) returned %d, %v", name, nn, err) - continue - } - err = w.Close() - if err != nil { - t.Errorf("%s: w.Close() returned %v", name, err) - continue - } - - opCode, r, err := rc.NextReader() - if err != nil || opCode != TextMessage { - t.Errorf("%s: NextReader() returned %d, r, %v", name, opCode, err) - continue - } - rbuf, err := ioutil.ReadAll(r) - if err != nil { - t.Errorf("%s: ReadFull() returned rbuf, %v", name, err) - continue - } - - if len(rbuf) != n { - t.Errorf("%s: len(rbuf) is %d, want %d", name, len(rbuf), n) - continue - } - - for i, b := range rbuf { - if byte(i) != b { - t.Errorf("%s: bad byte at offset %d", name, i) - break - } - } - } - } - } - } -} - -func TestControl(t *testing.T) { - const message = "this is a ping/pong messsage" - for _, isServer := range []bool{true, false} { - for _, isWriteControl := range []bool{true, false} { - name := fmt.Sprintf("s:%v, wc:%v", isServer, isWriteControl) - var connBuf bytes.Buffer - wc := newConn(fakeNetConn{Reader: nil, Writer: &connBuf}, isServer, 1024, 1024) - rc := newConn(fakeNetConn{Reader: &connBuf, Writer: nil}, !isServer, 1024, 1024) - if isWriteControl { - wc.WriteControl(PongMessage, []byte(message), time.Now().Add(time.Second)) - } else { - w, err := wc.NextWriter(PongMessage) - if err != nil { - t.Errorf("%s: wc.NextWriter() returned %v", name, err) - continue - } - if _, err := w.Write([]byte(message)); err != nil { - t.Errorf("%s: w.Write() returned %v", name, err) - continue - } - if err := w.Close(); err != nil { - t.Errorf("%s: w.Close() returned %v", name, err) - continue - } - var actualMessage string - rc.SetPongHandler(func(s string) error { actualMessage = s; return nil }) - rc.NextReader() - if actualMessage != message { - t.Errorf("%s: pong=%q, want %q", name, actualMessage, message) - continue - } - } - } - } -} - -func TestCloseBeforeFinalFrame(t *testing.T) { - const bufSize = 512 - - var b1, b2 bytes.Buffer - wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, bufSize) - rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) - - w, _ := wc.NextWriter(BinaryMessage) - w.Write(make([]byte, bufSize+bufSize/2)) - wc.WriteControl(CloseMessage, FormatCloseMessage(CloseNormalClosure, ""), time.Now().Add(10*time.Second)) - w.Close() - - op, r, err := rc.NextReader() - if op != BinaryMessage || err != nil { - t.Fatalf("NextReader() returned %d, %v", op, err) - } - _, err = io.Copy(ioutil.Discard, r) - if err != errUnexpectedEOF { - t.Fatalf("io.Copy() returned %v, want %v", err, errUnexpectedEOF) - } - _, _, err = rc.NextReader() - if err != io.EOF { - t.Fatalf("NextReader() returned %v, want %v", err, io.EOF) - } -} - -func TestEOFBeforeFinalFrame(t *testing.T) { - const bufSize = 512 - - var b1, b2 bytes.Buffer - wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, bufSize) - rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) - - w, _ := wc.NextWriter(BinaryMessage) - w.Write(make([]byte, bufSize+bufSize/2)) - - op, r, err := rc.NextReader() - if op != BinaryMessage || err != nil { - t.Fatalf("NextReader() returned %d, %v", op, err) - } - _, err = io.Copy(ioutil.Discard, r) - if err != errUnexpectedEOF { - t.Fatalf("io.Copy() returned %v, want %v", err, errUnexpectedEOF) - } - _, _, err = rc.NextReader() - if err != errUnexpectedEOF { - t.Fatalf("NextReader() returned %v, want %v", err, errUnexpectedEOF) - } -} - -func TestReadLimit(t *testing.T) { - - const readLimit = 512 - message := make([]byte, readLimit+1) - - var b1, b2 bytes.Buffer - wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, readLimit-2) - rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) - rc.SetReadLimit(readLimit) - - // Send message at the limit with interleaved pong. - w, _ := wc.NextWriter(BinaryMessage) - w.Write(message[:readLimit-1]) - wc.WriteControl(PongMessage, []byte("this is a pong"), time.Now().Add(10*time.Second)) - w.Write(message[:1]) - w.Close() - - // Send message larger than the limit. - wc.WriteMessage(BinaryMessage, message[:readLimit+1]) - - op, _, err := rc.NextReader() - if op != BinaryMessage || err != nil { - t.Fatalf("1: NextReader() returned %d, %v", op, err) - } - op, r, err := rc.NextReader() - if op != BinaryMessage || err != nil { - t.Fatalf("2: NextReader() returned %d, %v", op, err) - } - _, err = io.Copy(ioutil.Discard, r) - if err != ErrReadLimit { - t.Fatalf("io.Copy() returned %v", err) - } -} - -func TestUnderlyingConn(t *testing.T) { - var b1, b2 bytes.Buffer - fc := fakeNetConn{Reader: &b1, Writer: &b2} - c := newConn(fc, true, 1024, 1024) - ul := c.UnderlyingConn() - if ul != fc { - t.Fatalf("Underlying conn is not what it should be.") - } -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/doc.go b/Godeps/_workspace/src/github.com/gorilla/websocket/doc.go deleted file mode 100644 index 0d2bd912b..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/doc.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package websocket implements the WebSocket protocol defined in RFC 6455. -// -// Overview -// -// The Conn type represents a WebSocket connection. A server application uses -// the Upgrade function from an Upgrader object with a HTTP request handler -// to get a pointer to a Conn: -// -// var upgrader = websocket.Upgrader{ -// ReadBufferSize: 1024, -// WriteBufferSize: 1024, -// } -// -// func handler(w http.ResponseWriter, r *http.Request) { -// conn, err := upgrader.Upgrade(w, r, nil) -// if err != nil { -// log.Println(err) -// return -// } -// ... Use conn to send and receive messages. -// } -// -// Call the connection WriteMessage and ReadMessages methods to send and -// receive messages as a slice of bytes. This snippet of code shows how to echo -// messages using these methods: -// -// for { -// messageType, p, err := conn.ReadMessage() -// if err != nil { -// return -// } -// if err = conn.WriteMessage(messageType, p); err != nil { -// return err -// } -// } -// -// In above snippet of code, p is a []byte and messageType is an int with value -// websocket.BinaryMessage or websocket.TextMessage. -// -// An application can also send and receive messages using the io.WriteCloser -// and io.Reader interfaces. To send a message, call the connection NextWriter -// method to get an io.WriteCloser, write the message to the writer and close -// the writer when done. To receive a message, call the connection NextReader -// method to get an io.Reader and read until io.EOF is returned. This snippet -// snippet shows how to echo messages using the NextWriter and NextReader -// methods: -// -// for { -// messageType, r, err := conn.NextReader() -// if err != nil { -// return -// } -// w, err := conn.NextWriter(messageType) -// if err != nil { -// return err -// } -// if _, err := io.Copy(w, r); err != nil { -// return err -// } -// if err := w.Close(); err != nil { -// return err -// } -// } -// -// Data Messages -// -// The WebSocket protocol distinguishes between text and binary data messages. -// Text messages are interpreted as UTF-8 encoded text. The interpretation of -// binary messages is left to the application. -// -// This package uses the TextMessage and BinaryMessage integer constants to -// identify the two data message types. The ReadMessage and NextReader methods -// return the type of the received message. The messageType argument to the -// WriteMessage and NextWriter methods specifies the type of a sent message. -// -// It is the application's responsibility to ensure that text messages are -// valid UTF-8 encoded text. -// -// Control Messages -// -// The WebSocket protocol defines three types of control messages: close, ping -// and pong. Call the connection WriteControl, WriteMessage or NextWriter -// methods to send a control message to the peer. -// -// Connections handle received ping and pong messages by invoking a callback -// function set with SetPingHandler and SetPongHandler methods. These callback -// functions can be invoked from the ReadMessage method, the NextReader method -// or from a call to the data message reader returned from NextReader. -// -// Connections handle received close messages by returning an error from the -// ReadMessage method, the NextReader method or from a call to the data message -// reader returned from NextReader. -// -// Concurrency -// -// Connections do not support concurrent calls to the write methods -// (NextWriter, SetWriteDeadline, WriteMessage) or concurrent calls to the read -// methods methods (NextReader, SetReadDeadline, ReadMessage). Connections do -// support a concurrent reader and writer. -// -// The Close and WriteControl methods can be called concurrently with all other -// methods. -// -// Read is Required -// -// The application must read the connection to process ping and close messages -// sent from the peer. If the application is not otherwise interested in -// messages from the peer, then the application should start a goroutine to read -// and discard messages from the peer. A simple example is: -// -// func readLoop(c *websocket.Conn) { -// for { -// if _, _, err := c.NextReader(); err != nil { -// c.Close() -// break -// } -// } -// } -// -// Origin Considerations -// -// Web browsers allow Javascript applications to open a WebSocket connection to -// any host. It's up to the server to enforce an origin policy using the Origin -// request header sent by the browser. -// -// The Upgrader calls the function specified in the CheckOrigin field to check -// the origin. If the CheckOrigin function returns false, then the Upgrade -// method fails the WebSocket handshake with HTTP status 403. -// -// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail -// the handshake if the Origin request header is present and not equal to the -// Host request header. -// -// An application can allow connections from any origin by specifying a -// function that always returns true: -// -// var upgrader = websocket.Upgrader{ -// CheckOrigin: func(r *http.Request) bool { return true }, -// } -// -// The deprecated Upgrade function does not enforce an origin policy. It's the -// application's responsibility to check the Origin header before calling -// Upgrade. -package websocket diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/README.md b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/README.md deleted file mode 100644 index 075ac1530..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Test Server - -This package contains a server for the [Autobahn WebSockets Test Suite](http://autobahn.ws/testsuite). - -To test the server, run - - go run server.go - -and start the client test driver - - wstest -m fuzzingclient -s fuzzingclient.json - -When the client completes, it writes a report to reports/clients/index.html. diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json deleted file mode 100644 index 27d5a5b14..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json +++ /dev/null @@ -1,14 +0,0 @@ - -{ - "options": {"failByDrop": false}, - "outdir": "./reports/clients", - "servers": [ - {"agent": "ReadAllWriteMessage", "url": "ws://localhost:9000/m", "options": {"version": 18}}, - {"agent": "ReadAllWrite", "url": "ws://localhost:9000/r", "options": {"version": 18}}, - {"agent": "CopyFull", "url": "ws://localhost:9000/f", "options": {"version": 18}}, - {"agent": "CopyWriterOnly", "url": "ws://localhost:9000/c", "options": {"version": 18}} - ], - "cases": ["*"], - "exclude-cases": [], - "exclude-agent-cases": {} -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/server.go b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/server.go deleted file mode 100644 index d96ac84db..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/autobahn/server.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Command server is a test server for the Autobahn WebSockets Test Suite. -package main - -import ( - "errors" - "flag" - "github.com/gorilla/websocket" - "io" - "log" - "net/http" - "time" - "unicode/utf8" -) - -var upgrader = websocket.Upgrader{ - ReadBufferSize: 4096, - WriteBufferSize: 4096, - CheckOrigin: func(r *http.Request) bool { - return true - }, -} - -// echoCopy echoes messages from the client using io.Copy. -func echoCopy(w http.ResponseWriter, r *http.Request, writerOnly bool) { - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Println("Upgrade:", err) - return - } - defer conn.Close() - for { - mt, r, err := conn.NextReader() - if err != nil { - if err != io.EOF { - log.Println("NextReader:", err) - } - return - } - if mt == websocket.TextMessage { - r = &validator{r: r} - } - w, err := conn.NextWriter(mt) - if err != nil { - log.Println("NextWriter:", err) - return - } - if mt == websocket.TextMessage { - r = &validator{r: r} - } - if writerOnly { - _, err = io.Copy(struct{ io.Writer }{w}, r) - } else { - _, err = io.Copy(w, r) - } - if err != nil { - if err == errInvalidUTF8 { - conn.WriteControl(websocket.CloseMessage, - websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, ""), - time.Time{}) - } - log.Println("Copy:", err) - return - } - err = w.Close() - if err != nil { - log.Println("Close:", err) - return - } - } -} - -func echoCopyWriterOnly(w http.ResponseWriter, r *http.Request) { - echoCopy(w, r, true) -} - -func echoCopyFull(w http.ResponseWriter, r *http.Request) { - echoCopy(w, r, false) -} - -// echoReadAll echoes messages from the client by reading the entire message -// with ioutil.ReadAll. -func echoReadAll(w http.ResponseWriter, r *http.Request, writeMessage bool) { - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Println("Upgrade:", err) - return - } - defer conn.Close() - for { - mt, b, err := conn.ReadMessage() - if err != nil { - if err != io.EOF { - log.Println("NextReader:", err) - } - return - } - if mt == websocket.TextMessage { - if !utf8.Valid(b) { - conn.WriteControl(websocket.CloseMessage, - websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, ""), - time.Time{}) - log.Println("ReadAll: invalid utf8") - } - } - if writeMessage { - err = conn.WriteMessage(mt, b) - if err != nil { - log.Println("WriteMessage:", err) - } - } else { - w, err := conn.NextWriter(mt) - if err != nil { - log.Println("NextWriter:", err) - return - } - if _, err := w.Write(b); err != nil { - log.Println("Writer:", err) - return - } - if err := w.Close(); err != nil { - log.Println("Close:", err) - return - } - } - } -} - -func echoReadAllWriter(w http.ResponseWriter, r *http.Request) { - echoReadAll(w, r, false) -} - -func echoReadAllWriteMessage(w http.ResponseWriter, r *http.Request) { - echoReadAll(w, r, true) -} - -func serveHome(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.Error(w, "Not found.", 404) - return - } - if r.Method != "GET" { - http.Error(w, "Method not allowed", 405) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - io.WriteString(w, "Echo Server") -} - -var addr = flag.String("addr", ":9000", "http service address") - -func main() { - flag.Parse() - http.HandleFunc("/", serveHome) - http.HandleFunc("/c", echoCopyWriterOnly) - http.HandleFunc("/f", echoCopyFull) - http.HandleFunc("/r", echoReadAllWriter) - http.HandleFunc("/m", echoReadAllWriteMessage) - err := http.ListenAndServe(*addr, nil) - if err != nil { - log.Fatal("ListenAndServe: ", err) - } -} - -type validator struct { - state int - x rune - r io.Reader -} - -var errInvalidUTF8 = errors.New("invalid utf8") - -func (r *validator) Read(p []byte) (int, error) { - n, err := r.r.Read(p) - state := r.state - x := r.x - for _, b := range p[:n] { - state, x = decode(state, x, b) - if state == utf8Reject { - break - } - } - r.state = state - r.x = x - if state == utf8Reject || (err == io.EOF && state != utf8Accept) { - return n, errInvalidUTF8 - } - return n, err -} - -// UTF-8 decoder from http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ -// -// Copyright (c) 2008-2009 Bjoern Hoehrmann -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to -// deal in the Software without restriction, including without limitation the -// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -// sell copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -// IN THE SOFTWARE. -var utf8d = [...]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1f - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3f - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5f - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7f - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9f - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // a0..bf - 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0..df - 0xa, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // e0..ef - 0xb, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // f0..ff - 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2 - 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4 - 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6 - 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // s7..s8 -} - -const ( - utf8Accept = 0 - utf8Reject = 1 -) - -func decode(state int, x rune, b byte) (int, rune) { - t := utf8d[b] - if state != utf8Accept { - x = rune(b&0x3f) | (x << 6) - } else { - x = rune((0xff >> t) & b) - } - state = int(utf8d[256+state*16+int(t)]) - return state, x -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/README.md b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/README.md deleted file mode 100644 index 08fc3e65c..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Chat Example - -This application shows how to use use the -[websocket](https://github.com/gorilla/websocket) package and -[jQuery](http://jquery.com) to implement a simple web chat application. - -## Running the example - -The example requires a working Go development environment. The [Getting -Started](http://golang.org/doc/install) page describes how to install the -development environment. - -Once you have Go up and running, you can download, build and run the example -using the following commands. - - $ go get github.com/gorilla/websocket - $ cd `go list -f '{{.Dir}}' github.com/gorilla/websocket/examples/chat` - $ go run *.go - diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/conn.go b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/conn.go deleted file mode 100644 index 7cc0496c3..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/conn.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "github.com/gorilla/websocket" - "log" - "net/http" - "time" -) - -const ( - // Time allowed to write a message to the peer. - writeWait = 10 * time.Second - - // Time allowed to read the next pong message from the peer. - pongWait = 60 * time.Second - - // Send pings to peer with this period. Must be less than pongWait. - pingPeriod = (pongWait * 9) / 10 - - // Maximum message size allowed from peer. - maxMessageSize = 512 -) - -var upgrader = websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, -} - -// connection is an middleman between the websocket connection and the hub. -type connection struct { - // The websocket connection. - ws *websocket.Conn - - // Buffered channel of outbound messages. - send chan []byte -} - -// readPump pumps messages from the websocket connection to the hub. -func (c *connection) readPump() { - defer func() { - h.unregister <- c - c.ws.Close() - }() - c.ws.SetReadLimit(maxMessageSize) - c.ws.SetReadDeadline(time.Now().Add(pongWait)) - c.ws.SetPongHandler(func(string) error { c.ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) - for { - _, message, err := c.ws.ReadMessage() - if err != nil { - break - } - h.broadcast <- message - } -} - -// write writes a message with the given message type and payload. -func (c *connection) write(mt int, payload []byte) error { - c.ws.SetWriteDeadline(time.Now().Add(writeWait)) - return c.ws.WriteMessage(mt, payload) -} - -// writePump pumps messages from the hub to the websocket connection. -func (c *connection) writePump() { - ticker := time.NewTicker(pingPeriod) - defer func() { - ticker.Stop() - c.ws.Close() - }() - for { - select { - case message, ok := <-c.send: - if !ok { - c.write(websocket.CloseMessage, []byte{}) - return - } - if err := c.write(websocket.TextMessage, message); err != nil { - return - } - case <-ticker.C: - if err := c.write(websocket.PingMessage, []byte{}); err != nil { - return - } - } - } -} - -// serverWs handles websocket requests from the peer. -func serveWs(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { - http.Error(w, "Method not allowed", 405) - return - } - ws, err := upgrader.Upgrade(w, r, nil) - if err != nil { - log.Println(err) - return - } - c := &connection{send: make(chan []byte, 256), ws: ws} - h.register <- c - go c.writePump() - c.readPump() -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/home.html b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/home.html deleted file mode 100644 index 29599225c..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/home.html +++ /dev/null @@ -1,92 +0,0 @@ - - - -Chat Example - - - - - -
-
- - -
- - diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/hub.go b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/hub.go deleted file mode 100644 index 449ba753d..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/hub.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -// hub maintains the set of active connections and broadcasts messages to the -// connections. -type hub struct { - // Registered connections. - connections map[*connection]bool - - // Inbound messages from the connections. - broadcast chan []byte - - // Register requests from the connections. - register chan *connection - - // Unregister requests from connections. - unregister chan *connection -} - -var h = hub{ - broadcast: make(chan []byte), - register: make(chan *connection), - unregister: make(chan *connection), - connections: make(map[*connection]bool), -} - -func (h *hub) run() { - for { - select { - case c := <-h.register: - h.connections[c] = true - case c := <-h.unregister: - if _, ok := h.connections[c]; ok { - delete(h.connections, c) - close(c.send) - } - case m := <-h.broadcast: - for c := range h.connections { - select { - case c.send <- m: - default: - close(c.send) - delete(h.connections, c) - } - } - } - } -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/main.go b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/main.go deleted file mode 100644 index 3c4448d72..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/chat/main.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "flag" - "log" - "net/http" - "text/template" -) - -var addr = flag.String("addr", ":8080", "http service address") -var homeTempl = template.Must(template.ParseFiles("home.html")) - -func serveHome(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.Error(w, "Not found", 404) - return - } - if r.Method != "GET" { - http.Error(w, "Method not allowed", 405) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - homeTempl.Execute(w, r.Host) -} - -func main() { - flag.Parse() - go h.run() - http.HandleFunc("/", serveHome) - http.HandleFunc("/ws", serveWs) - err := http.ListenAndServe(*addr, nil) - if err != nil { - log.Fatal("ListenAndServe: ", err) - } -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/filewatch/README.md b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/filewatch/README.md deleted file mode 100644 index ca4931f3b..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/filewatch/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# File Watch example. - -This example sends a file to the browser client for display whenever the file is modified. - - $ go get github.com/gorilla/websocket - $ cd `go list -f '{{.Dir}}' github.com/gorilla/websocket/examples/filewatch` - $ go run main.go - # Open http://localhost:8080/ . - # Modify the file to see it update in the browser. diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/filewatch/main.go b/Godeps/_workspace/src/github.com/gorilla/websocket/examples/filewatch/main.go deleted file mode 100644 index a2c7b85fa..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/examples/filewatch/main.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "flag" - "io/ioutil" - "log" - "net/http" - "os" - "strconv" - "text/template" - "time" - - "github.com/gorilla/websocket" -) - -const ( - // Time allowed to write the file to the client. - writeWait = 10 * time.Second - - // Time allowed to read the next pong message from the client. - pongWait = 60 * time.Second - - // Send pings to client with this period. Must be less than pongWait. - pingPeriod = (pongWait * 9) / 10 - - // Poll file for changes with this period. - filePeriod = 10 * time.Second -) - -var ( - addr = flag.String("addr", ":8080", "http service address") - homeTempl = template.Must(template.New("").Parse(homeHTML)) - filename string - upgrader = websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, - } -) - -func readFileIfModified(lastMod time.Time) ([]byte, time.Time, error) { - fi, err := os.Stat(filename) - if err != nil { - return nil, lastMod, err - } - if !fi.ModTime().After(lastMod) { - return nil, lastMod, nil - } - p, err := ioutil.ReadFile(filename) - if err != nil { - return nil, fi.ModTime(), err - } - return p, fi.ModTime(), nil -} - -func reader(ws *websocket.Conn) { - defer ws.Close() - ws.SetReadLimit(512) - ws.SetReadDeadline(time.Now().Add(pongWait)) - ws.SetPongHandler(func(string) error { ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) - for { - _, _, err := ws.ReadMessage() - if err != nil { - break - } - } -} - -func writer(ws *websocket.Conn, lastMod time.Time) { - lastError := "" - pingTicker := time.NewTicker(pingPeriod) - fileTicker := time.NewTicker(filePeriod) - defer func() { - pingTicker.Stop() - fileTicker.Stop() - ws.Close() - }() - for { - select { - case <-fileTicker.C: - var p []byte - var err error - - p, lastMod, err = readFileIfModified(lastMod) - - if err != nil { - if s := err.Error(); s != lastError { - lastError = s - p = []byte(lastError) - } - } else { - lastError = "" - } - - if p != nil { - ws.SetWriteDeadline(time.Now().Add(writeWait)) - if err := ws.WriteMessage(websocket.TextMessage, p); err != nil { - return - } - } - case <-pingTicker.C: - ws.SetWriteDeadline(time.Now().Add(writeWait)) - if err := ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil { - return - } - } - } -} - -func serveWs(w http.ResponseWriter, r *http.Request) { - ws, err := upgrader.Upgrade(w, r, nil) - if err != nil { - if _, ok := err.(websocket.HandshakeError); !ok { - log.Println(err) - } - return - } - - var lastMod time.Time - if n, err := strconv.ParseInt(r.FormValue("lastMod"), 16, 64); err != nil { - lastMod = time.Unix(0, n) - } - - go writer(ws, lastMod) - reader(ws) -} - -func serveHome(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/" { - http.Error(w, "Not found", 404) - return - } - if r.Method != "GET" { - http.Error(w, "Method not allowed", 405) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - p, lastMod, err := readFileIfModified(time.Time{}) - if err != nil { - p = []byte(err.Error()) - lastMod = time.Unix(0, 0) - } - var v = struct { - Host string - Data string - LastMod string - }{ - r.Host, - string(p), - strconv.FormatInt(lastMod.UnixNano(), 16), - } - homeTempl.Execute(w, &v) -} - -func main() { - flag.Parse() - if flag.NArg() != 1 { - log.Fatal("filename not specified") - } - filename = flag.Args()[0] - http.HandleFunc("/", serveHome) - http.HandleFunc("/ws", serveWs) - if err := http.ListenAndServe(*addr, nil); err != nil { - log.Fatal(err) - } -} - -const homeHTML = ` - - - WebSocket Example - - -
{{.Data}}
- - - -` diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/json.go b/Godeps/_workspace/src/github.com/gorilla/websocket/json.go deleted file mode 100644 index e0668f25e..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/json.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "encoding/json" -) - -// WriteJSON is deprecated, use c.WriteJSON instead. -func WriteJSON(c *Conn, v interface{}) error { - return c.WriteJSON(v) -} - -// WriteJSON writes the JSON encoding of v to the connection. -// -// See the documentation for encoding/json Marshal for details about the -// conversion of Go values to JSON. -func (c *Conn) WriteJSON(v interface{}) error { - w, err := c.NextWriter(TextMessage) - if err != nil { - return err - } - err1 := json.NewEncoder(w).Encode(v) - err2 := w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -// ReadJSON is deprecated, use c.ReadJSON instead. -func ReadJSON(c *Conn, v interface{}) error { - return c.ReadJSON(v) -} - -// ReadJSON reads the next JSON-encoded message from the connection and stores -// it in the value pointed to by v. -// -// See the documentation for the encoding/json Unmarshal function for details -// about the conversion of JSON to a Go value. -func (c *Conn) ReadJSON(v interface{}) error { - _, r, err := c.NextReader() - if err != nil { - return err - } - return json.NewDecoder(r).Decode(v) -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/json_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/json_test.go deleted file mode 100644 index 2edb28d2f..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/json_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "reflect" - "testing" -) - -func TestJSON(t *testing.T) { - var buf bytes.Buffer - c := fakeNetConn{&buf, &buf} - wc := newConn(c, true, 1024, 1024) - rc := newConn(c, false, 1024, 1024) - - var actual, expect struct { - A int - B string - } - expect.A = 1 - expect.B = "hello" - - if err := wc.WriteJSON(&expect); err != nil { - t.Fatal("write", err) - } - - if err := rc.ReadJSON(&actual); err != nil { - t.Fatal("read", err) - } - - if !reflect.DeepEqual(&actual, &expect) { - t.Fatal("equal", actual, expect) - } -} - -func TestDeprecatedJSON(t *testing.T) { - var buf bytes.Buffer - c := fakeNetConn{&buf, &buf} - wc := newConn(c, true, 1024, 1024) - rc := newConn(c, false, 1024, 1024) - - var actual, expect struct { - A int - B string - } - expect.A = 1 - expect.B = "hello" - - if err := WriteJSON(wc, &expect); err != nil { - t.Fatal("write", err) - } - - if err := ReadJSON(rc, &actual); err != nil { - t.Fatal("read", err) - } - - if !reflect.DeepEqual(&actual, &expect) { - t.Fatal("equal", actual, expect) - } -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/server.go b/Godeps/_workspace/src/github.com/gorilla/websocket/server.go deleted file mode 100644 index 349e5b997..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/server.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "errors" - "net" - "net/http" - "net/url" - "strings" - "time" -) - -// HandshakeError describes an error with the handshake from the peer. -type HandshakeError struct { - message string -} - -func (e HandshakeError) Error() string { return e.message } - -// Upgrader specifies parameters for upgrading an HTTP connection to a -// WebSocket connection. -type Upgrader struct { - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer - // size is zero, then a default value of 4096 is used. The I/O buffer sizes - // do not limit the size of the messages that can be sent or received. - ReadBufferSize, WriteBufferSize int - - // Subprotocols specifies the server's supported protocols in order of - // preference. If this field is set, then the Upgrade method negotiates a - // subprotocol by selecting the first match in this list with a protocol - // requested by the client. - Subprotocols []string - - // Error specifies the function for generating HTTP error responses. If Error - // is nil, then http.Error is used to generate the HTTP response. - Error func(w http.ResponseWriter, r *http.Request, status int, reason error) - - // CheckOrigin returns true if the request Origin header is acceptable. If - // CheckOrigin is nil, the host in the Origin header must not be set or - // must match the host of the request. - CheckOrigin func(r *http.Request) bool -} - -func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { - err := HandshakeError{reason} - if u.Error != nil { - u.Error(w, r, status, err) - } else { - http.Error(w, http.StatusText(status), status) - } - return nil, err -} - -// checkSameOrigin returns true if the origin is not set or is equal to the request host. -func checkSameOrigin(r *http.Request) bool { - origin := r.Header["Origin"] - if len(origin) == 0 { - return true - } - u, err := url.Parse(origin[0]) - if err != nil { - return false - } - return u.Host == r.Host -} - -func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { - if u.Subprotocols != nil { - clientProtocols := Subprotocols(r) - for _, serverProtocol := range u.Subprotocols { - for _, clientProtocol := range clientProtocols { - if clientProtocol == serverProtocol { - return clientProtocol - } - } - } - } else if responseHeader != nil { - return responseHeader.Get("Sec-Websocket-Protocol") - } - return "" -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie) and the -// application negotiated subprotocol (Sec-Websocket-Protocol). -func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { - if values := r.Header["Sec-Websocket-Version"]; len(values) == 0 || values[0] != "13" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: version != 13") - } - - if !tokenListContainsValue(r.Header, "Connection", "upgrade") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: connection header != upgrade") - } - - if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: upgrade != websocket") - } - - checkOrigin := u.CheckOrigin - if checkOrigin == nil { - checkOrigin = checkSameOrigin - } - if !checkOrigin(r) { - return u.returnError(w, r, http.StatusForbidden, "websocket: origin not allowed") - } - - challengeKey := r.Header.Get("Sec-Websocket-Key") - if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: key missing or blank") - } - - subprotocol := u.selectSubprotocol(r, responseHeader) - - var ( - netConn net.Conn - br *bufio.Reader - err error - ) - - h, ok := w.(http.Hijacker) - if !ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") - } - var rw *bufio.ReadWriter - netConn, rw, err = h.Hijack() - if err != nil { - return u.returnError(w, r, http.StatusInternalServerError, err.Error()) - } - br = rw.Reader - - if br.Buffered() > 0 { - netConn.Close() - return nil, errors.New("websocket: client sent data before handshake is complete") - } - - c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize) - c.subprotocol = subprotocol - - p := c.writeBuf[:0] - p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) - p = append(p, computeAcceptKey(challengeKey)...) - p = append(p, "\r\n"...) - if c.subprotocol != "" { - p = append(p, "Sec-Websocket-Protocol: "...) - p = append(p, c.subprotocol...) - p = append(p, "\r\n"...) - } - for k, vs := range responseHeader { - if k == "Sec-Websocket-Protocol" { - continue - } - for _, v := range vs { - p = append(p, k...) - p = append(p, ": "...) - for i := 0; i < len(v); i++ { - b := v[i] - if b <= 31 { - // prevent response splitting. - b = ' ' - } - p = append(p, b) - } - p = append(p, "\r\n"...) - } - } - p = append(p, "\r\n"...) - - // Clear deadlines set by HTTP server. - netConn.SetDeadline(time.Time{}) - - if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) - } - if _, err = netConn.Write(p); err != nil { - netConn.Close() - return nil, err - } - if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Time{}) - } - - return c, nil -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// This function is deprecated, use websocket.Upgrader instead. -// -// The application is responsible for checking the request origin before -// calling Upgrade. An example implementation of the same origin policy is: -// -// if req.Header.Get("Origin") != "http://"+req.Host { -// http.Error(w, "Origin not allowed", 403) -// return -// } -// -// If the endpoint supports subprotocols, then the application is responsible -// for negotiating the protocol used on the connection. Use the Subprotocols() -// function to get the subprotocols requested by the client. Use the -// Sec-Websocket-Protocol response header to specify the subprotocol selected -// by the application. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie) and the -// negotiated subprotocol (Sec-Websocket-Protocol). -// -// The connection buffers IO to the underlying network connection. The -// readBufSize and writeBufSize parameters specify the size of the buffers to -// use. Messages can be larger than the buffers. -// -// If the request is not a valid WebSocket handshake, then Upgrade returns an -// error of type HandshakeError. Applications should handle this error by -// replying to the client with an HTTP error response. -func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { - u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} - u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { - // don't return errors to maintain backwards compatibility - } - u.CheckOrigin = func(r *http.Request) bool { - // allow all connections by default - return true - } - return u.Upgrade(w, r, responseHeader) -} - -// Subprotocols returns the subprotocols requested by the client in the -// Sec-Websocket-Protocol header. -func Subprotocols(r *http.Request) []string { - h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) - if h == "" { - return nil - } - protocols := strings.Split(h, ",") - for i := range protocols { - protocols[i] = strings.TrimSpace(protocols[i]) - } - return protocols -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/server_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/server_test.go deleted file mode 100644 index ead0776af..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/server_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "net/http" - "reflect" - "testing" -) - -var subprotocolTests = []struct { - h string - protocols []string -}{ - {"", nil}, - {"foo", []string{"foo"}}, - {"foo,bar", []string{"foo", "bar"}}, - {"foo, bar", []string{"foo", "bar"}}, - {" foo, bar", []string{"foo", "bar"}}, - {" foo, bar ", []string{"foo", "bar"}}, -} - -func TestSubprotocols(t *testing.T) { - for _, st := range subprotocolTests { - r := http.Request{Header: http.Header{"Sec-Websocket-Protocol": {st.h}}} - protocols := Subprotocols(&r) - if !reflect.DeepEqual(st.protocols, protocols) { - t.Errorf("SubProtocols(%q) returned %#v, want %#v", st.h, protocols, st.protocols) - } - } -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/util.go b/Godeps/_workspace/src/github.com/gorilla/websocket/util.go deleted file mode 100644 index ffdc265ed..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/util.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "crypto/rand" - "crypto/sha1" - "encoding/base64" - "io" - "net/http" - "strings" -) - -// tokenListContainsValue returns true if the 1#token header with the given -// name contains token. -func tokenListContainsValue(header http.Header, name string, value string) bool { - for _, v := range header[name] { - for _, s := range strings.Split(v, ",") { - if strings.EqualFold(value, strings.TrimSpace(s)) { - return true - } - } - } - return false -} - -var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") - -func computeAcceptKey(challengeKey string) string { - h := sha1.New() - h.Write([]byte(challengeKey)) - h.Write(keyGUID) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func generateChallengeKey() (string, error) { - p := make([]byte, 16) - if _, err := io.ReadFull(rand.Reader, p); err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(p), nil -} diff --git a/Godeps/_workspace/src/github.com/gorilla/websocket/util_test.go b/Godeps/_workspace/src/github.com/gorilla/websocket/util_test.go deleted file mode 100644 index 91f70ceb0..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/websocket/util_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "net/http" - "testing" -) - -var tokenListContainsValueTests = []struct { - value string - ok bool -}{ - {"WebSocket", true}, - {"WEBSOCKET", true}, - {"websocket", true}, - {"websockets", false}, - {"x websocket", false}, - {"websocket x", false}, - {"other,websocket,more", true}, - {"other, websocket, more", true}, -} - -func TestTokenListContainsValue(t *testing.T) { - for _, tt := range tokenListContainsValueTests { - h := http.Header{"Upgrade": {tt.value}} - ok := tokenListContainsValue(h, "Upgrade", "websocket") - if ok != tt.ok { - t.Errorf("tokenListContainsValue(h, n, %q) = %v, want %v", tt.value, ok, tt.ok) - } - } -} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/stack/stack.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/stack/stack.go deleted file mode 100644 index ae3021cce..000000000 --- a/Godeps/_workspace/src/github.com/inconshreveable/log15/stack/stack.go +++ /dev/null @@ -1,225 +0,0 @@ -// Package stack implements utilities to capture, manipulate, and format call -// stacks. -package stack - -import ( - "fmt" - "path/filepath" - "runtime" - "strings" -) - -// Call records a single function invocation from a goroutine stack. It is a -// wrapper for the program counter values returned by runtime.Caller and -// runtime.Callers and consumed by runtime.FuncForPC. -type Call uintptr - -// Format implements fmt.Formatter with support for the following verbs. -// -// %s source file -// %d line number -// %n function name -// %v equivalent to %s:%d -// -// It accepts the '+' and '#' flags for most of the verbs as follows. -// -// %+s path of source file relative to the compile time GOPATH -// %#s full path of source file -// %+n import path qualified function name -// %+v equivalent to %+s:%d -// %#v equivalent to %#s:%d -func (pc Call) Format(s fmt.State, c rune) { - // BUG(ChrisHines): Subtracting one from pc is a work around for - // https://code.google.com/p/go/issues/detail?id=7690. The idea for this - // work around comes from rsc's initial patch at - // https://codereview.appspot.com/84100043/#ps20001, but as noted in the - // issue discussion, it is not a complete fix since it doesn't handle some - // cases involving signals. Just the same, it handles all of the other - // cases I have tested. - pcFix := uintptr(pc) - 1 - fn := runtime.FuncForPC(pcFix) - if fn == nil { - fmt.Fprintf(s, "%%!%c(NOFUNC)", c) - return - } - - switch c { - case 's', 'v': - file, line := fn.FileLine(pcFix) - switch { - case s.Flag('#'): - // done - case s.Flag('+'): - // Here we want to get the source file path relative to the - // compile time GOPATH. As of Go 1.3.x there is no direct way to - // know the compiled GOPATH at runtime, but we can infer the - // number of path segments in the GOPATH. We note that fn.Name() - // returns the function name qualified by the import path, which - // does not include the GOPATH. Thus we can trim segments from the - // beginning of the file path until the number of path separators - // remaining is one more than the number of path separators in the - // function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path - // separator than our desired output. - const sep = "/" - impCnt := strings.Count(fn.Name(), sep) + 1 - pathCnt := strings.Count(file, sep) - for pathCnt > impCnt { - i := strings.Index(file, sep) - if i == -1 { - break - } - file = file[i+len(sep):] - pathCnt-- - } - default: - const sep = "/" - if i := strings.LastIndex(file, sep); i != -1 { - file = file[i+len(sep):] - } - } - fmt.Fprint(s, file) - if c == 'v' { - fmt.Fprint(s, ":", line) - } - - case 'd': - _, line := fn.FileLine(pcFix) - fmt.Fprint(s, line) - - case 'n': - name := fn.Name() - if !s.Flag('+') { - const pathSep = "/" - if i := strings.LastIndex(name, pathSep); i != -1 { - name = name[i+len(pathSep):] - } - const pkgSep = "." - if i := strings.Index(name, pkgSep); i != -1 { - name = name[i+len(pkgSep):] - } - } - fmt.Fprint(s, name) - } -} - -// Callers returns a Trace for the current goroutine with element 0 -// identifying the calling function. -func Callers() Trace { - pcs := poolBuf() - pcs = pcs[:cap(pcs)] - n := runtime.Callers(2, pcs) - cs := make([]Call, n) - for i, pc := range pcs[:n] { - cs[i] = Call(pc) - } - putPoolBuf(pcs) - return cs -} - -// name returns the import path qualified name of the function containing the -// call. -func (pc Call) name() string { - pcFix := uintptr(pc) - 1 // work around for go issue #7690 - fn := runtime.FuncForPC(pcFix) - if fn == nil { - return "???" - } - return fn.Name() -} - -func (pc Call) file() string { - pcFix := uintptr(pc) - 1 // work around for go issue #7690 - fn := runtime.FuncForPC(pcFix) - if fn == nil { - return "???" - } - file, _ := fn.FileLine(pcFix) - return file -} - -// Trace records a sequence of function invocations from a goroutine stack. -type Trace []Call - -// Format implements fmt.Formatter by printing the Trace as square brackes ([, -// ]) surrounding a space separated list of Calls each formatted with the -// supplied verb and options. -func (pcs Trace) Format(s fmt.State, c rune) { - s.Write([]byte("[")) - for i, pc := range pcs { - if i > 0 { - s.Write([]byte(" ")) - } - pc.Format(s, c) - } - s.Write([]byte("]")) -} - -// TrimBelow returns a slice of the Trace with all entries below pc removed. -func (pcs Trace) TrimBelow(pc Call) Trace { - for len(pcs) > 0 && pcs[0] != pc { - pcs = pcs[1:] - } - return pcs -} - -// TrimAbove returns a slice of the Trace with all entries above pc removed. -func (pcs Trace) TrimAbove(pc Call) Trace { - for len(pcs) > 0 && pcs[len(pcs)-1] != pc { - pcs = pcs[:len(pcs)-1] - } - return pcs -} - -// TrimBelowName returns a slice of the Trace with all entries below the -// lowest with function name name removed. -func (pcs Trace) TrimBelowName(name string) Trace { - for len(pcs) > 0 && pcs[0].name() != name { - pcs = pcs[1:] - } - return pcs -} - -// TrimAboveName returns a slice of the Trace with all entries above the -// highest with function name name removed. -func (pcs Trace) TrimAboveName(name string) Trace { - for len(pcs) > 0 && pcs[len(pcs)-1].name() != name { - pcs = pcs[:len(pcs)-1] - } - return pcs -} - -var goroot string - -func init() { - goroot = filepath.ToSlash(runtime.GOROOT()) - if runtime.GOOS == "windows" { - goroot = strings.ToLower(goroot) - } -} - -func inGoroot(path string) bool { - if runtime.GOOS == "windows" { - path = strings.ToLower(path) - } - return strings.HasPrefix(path, goroot) -} - -// TrimRuntime returns a slice of the Trace with the topmost entries from the -// go runtime removed. It considers any calls originating from files under -// GOROOT as part of the runtime. -func (pcs Trace) TrimRuntime() Trace { - for len(pcs) > 0 && inGoroot(pcs[len(pcs)-1].file()) { - pcs = pcs[:len(pcs)-1] - } - return pcs -} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/stack/stack_pool.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/stack/stack_pool.go deleted file mode 100644 index 34f2ca970..000000000 --- a/Godeps/_workspace/src/github.com/inconshreveable/log15/stack/stack_pool.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.3 - -package stack - -import ( - "sync" -) - -var pcStackPool = sync.Pool{ - New: func() interface{} { return make([]uintptr, 1000) }, -} - -func poolBuf() []uintptr { - return pcStackPool.Get().([]uintptr) -} - -func putPoolBuf(p []uintptr) { - pcStackPool.Put(p) -} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/stack/stack_pool_chan.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/stack/stack_pool_chan.go deleted file mode 100644 index a9d6c154d..000000000 --- a/Godeps/_workspace/src/github.com/inconshreveable/log15/stack/stack_pool_chan.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !go1.3 appengine - -package stack - -const ( - stackPoolSize = 64 -) - -var ( - pcStackPool = make(chan []uintptr, stackPoolSize) -) - -func poolBuf() []uintptr { - select { - case p := <-pcStackPool: - return p - default: - return make([]uintptr, 1000) - } -} - -func putPoolBuf(p []uintptr) { - select { - case pcStackPool <- p: - default: - } -} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/stack/stack_test.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/stack/stack_test.go deleted file mode 100644 index 52371b1e4..000000000 --- a/Godeps/_workspace/src/github.com/inconshreveable/log15/stack/stack_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package stack_test - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "runtime" - "testing" - - "github.com/inconshreveable/log15/stack" -) - -type testType struct{} - -func (tt testType) testMethod() (pc uintptr, file string, line int, ok bool) { - return runtime.Caller(0) -} - -func TestCallFormat(t *testing.T) { - t.Parallel() - - pc, file, line, ok := runtime.Caller(0) - if !ok { - t.Fatal("runtime.Caller(0) failed") - } - - gopathSrc := filepath.Join(os.Getenv("GOPATH"), "src") - relFile, err := filepath.Rel(gopathSrc, file) - if err != nil { - t.Fatalf("failed to determine path relative to GOPATH: %v", err) - } - relFile = filepath.ToSlash(relFile) - - pc2, file2, line2, ok2 := testType{}.testMethod() - if !ok2 { - t.Fatal("runtime.Caller(0) failed") - } - relFile2, err := filepath.Rel(gopathSrc, file) - if err != nil { - t.Fatalf("failed to determine path relative to GOPATH: %v", err) - } - relFile2 = filepath.ToSlash(relFile2) - - data := []struct { - pc uintptr - desc string - fmt string - out string - }{ - {0, "error", "%s", "%!s(NOFUNC)"}, - - {pc, "func", "%s", path.Base(file)}, - {pc, "func", "%+s", relFile}, - {pc, "func", "%#s", file}, - {pc, "func", "%d", fmt.Sprint(line)}, - {pc, "func", "%n", "TestCallFormat"}, - {pc, "func", "%+n", runtime.FuncForPC(pc).Name()}, - {pc, "func", "%v", fmt.Sprint(path.Base(file), ":", line)}, - {pc, "func", "%+v", fmt.Sprint(relFile, ":", line)}, - {pc, "func", "%#v", fmt.Sprint(file, ":", line)}, - {pc, "func", "%v|%[1]n()", fmt.Sprint(path.Base(file), ":", line, "|", "TestCallFormat()")}, - - {pc2, "meth", "%s", path.Base(file2)}, - {pc2, "meth", "%+s", relFile2}, - {pc2, "meth", "%#s", file2}, - {pc2, "meth", "%d", fmt.Sprint(line2)}, - {pc2, "meth", "%n", "testType.testMethod"}, - {pc2, "meth", "%+n", runtime.FuncForPC(pc2).Name()}, - {pc2, "meth", "%v", fmt.Sprint(path.Base(file2), ":", line2)}, - {pc2, "meth", "%+v", fmt.Sprint(relFile2, ":", line2)}, - {pc2, "meth", "%#v", fmt.Sprint(file2, ":", line2)}, - {pc2, "meth", "%v|%[1]n()", fmt.Sprint(path.Base(file2), ":", line2, "|", "testType.testMethod()")}, - } - - for _, d := range data { - got := fmt.Sprintf(d.fmt, stack.Call(d.pc)) - if got != d.out { - t.Errorf("fmt.Sprintf(%q, Call(%s)) = %s, want %s", d.fmt, d.desc, got, d.out) - } - } -} - -func BenchmarkCallVFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprint(ioutil.Discard, stack.Call(pc)) - } -} - -func BenchmarkCallPlusVFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%+v", stack.Call(pc)) - } -} - -func BenchmarkCallSharpVFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%#v", stack.Call(pc)) - } -} - -func BenchmarkCallSFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%s", stack.Call(pc)) - } -} - -func BenchmarkCallPlusSFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%+s", stack.Call(pc)) - } -} - -func BenchmarkCallSharpSFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%#s", stack.Call(pc)) - } -} - -func BenchmarkCallDFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%d", stack.Call(pc)) - } -} - -func BenchmarkCallNFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%n", stack.Call(pc)) - } -} - -func BenchmarkCallPlusNFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%+n", stack.Call(pc)) - } -} - -func BenchmarkCallers(b *testing.B) { - for i := 0; i < b.N; i++ { - stack.Callers() - } -} - -func deepStack(depth int, b *testing.B) stack.Trace { - if depth > 0 { - return deepStack(depth-1, b) - } - b.StartTimer() - s := stack.Callers() - b.StopTimer() - return s -} - -func BenchmarkCallers10(b *testing.B) { - b.StopTimer() - - for i := 0; i < b.N; i++ { - deepStack(10, b) - } -} - -func BenchmarkCallers50(b *testing.B) { - b.StopTimer() - - for i := 0; i < b.N; i++ { - deepStack(50, b) - } -} - -func BenchmarkCallers100(b *testing.B) { - b.StopTimer() - - for i := 0; i < b.N; i++ { - deepStack(100, b) - } -} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE deleted file mode 100644 index f090cb42f..000000000 --- a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go deleted file mode 100644 index c1b5d2a3b..000000000 --- a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go +++ /dev/null @@ -1,13 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -package term - -// IsTty always returns false on AppEngine. -func IsTty(fd uintptr) bool { - return false -} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go deleted file mode 100644 index b05de4cb8..000000000 --- a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go deleted file mode 100644 index cfaceab33..000000000 --- a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go +++ /dev/null @@ -1,18 +0,0 @@ -package term - -import ( - "syscall" -) - -const ioctlReadTermios = syscall.TIOCGETA - -// Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go deleted file mode 100644 index 5290468d6..000000000 --- a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go +++ /dev/null @@ -1,14 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -package term - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go deleted file mode 100644 index c0b201a53..000000000 --- a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go +++ /dev/null @@ -1,20 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux,!appengine darwin freebsd - -package term - -import ( - "syscall" - "unsafe" -) - -// IsTty returns true if the given file descriptor is a terminal. -func IsTty(fd uintptr) bool { - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go deleted file mode 100644 index df3c30c15..000000000 --- a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package term - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTty returns true if the given file descriptor is a terminal. -func IsTty(fd uintptr) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/Godeps/_workspace/src/github.com/mattn/go-colorable/README.md b/Godeps/_workspace/src/github.com/mattn/go-colorable/README.md deleted file mode 100644 index c69da4a76..000000000 --- a/Godeps/_workspace/src/github.com/mattn/go-colorable/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# go-colorable - -Colorable writer for windows. - -For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) -This package is possible to handle escape sequence for ansi color on windows. - -## Too Bad! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) - - -## So Good! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) - -## Usage - -```go -logrus.SetOutput(colorable.NewColorableStdout()) - -logrus.Info("succeeded") -logrus.Warn("not correct") -logrus.Error("something error") -logrus.Fatal("panic") -``` - -You can compile above code on non-windows OSs. - -## Installation - -``` -$ go get github.com/mattn/go-colorable -``` - -# License - -MIT - -# Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/Godeps/_workspace/src/github.com/mattn/go-colorable/colorable_others.go b/Godeps/_workspace/src/github.com/mattn/go-colorable/colorable_others.go deleted file mode 100644 index 219f02f62..000000000 --- a/Godeps/_workspace/src/github.com/mattn/go-colorable/colorable_others.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !windows - -package colorable - -import ( - "io" - "os" -) - -func NewColorableStdout() io.Writer { - return os.Stdout -} - -func NewColorableStderr() io.Writer { - return os.Stderr -} diff --git a/Godeps/_workspace/src/github.com/mattn/go-colorable/colorable_windows.go b/Godeps/_workspace/src/github.com/mattn/go-colorable/colorable_windows.go deleted file mode 100644 index 6a2787808..000000000 --- a/Godeps/_workspace/src/github.com/mattn/go-colorable/colorable_windows.go +++ /dev/null @@ -1,594 +0,0 @@ -package colorable - -import ( - "bytes" - "fmt" - "io" - "os" - "strconv" - "strings" - "syscall" - "unsafe" - - "github.com/mattn/go-isatty" -) - -const ( - foregroundBlue = 0x1 - foregroundGreen = 0x2 - foregroundRed = 0x4 - foregroundIntensity = 0x8 - foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) - backgroundBlue = 0x10 - backgroundGreen = 0x20 - backgroundRed = 0x40 - backgroundIntensity = 0x80 - backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) -) - -type wchar uint16 -type short int16 -type dword uint32 -type word uint16 - -type coord struct { - x short - y short -} - -type smallRect struct { - left short - top short - right short - bottom short -} - -type consoleScreenBufferInfo struct { - size coord - cursorPosition coord - attributes word - window smallRect - maximumWindowSize coord -} - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") -) - -type Writer struct { - out io.Writer - handle syscall.Handle - lastbuf bytes.Buffer - oldattr word -} - -func NewColorableStdout() io.Writer { - var csbi consoleScreenBufferInfo - out := os.Stdout - if !isatty.IsTerminal(out.Fd()) { - return out - } - handle := syscall.Handle(out.Fd()) - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - return &Writer{out: out, handle: handle, oldattr: csbi.attributes} -} - -func NewColorableStderr() io.Writer { - var csbi consoleScreenBufferInfo - out := os.Stderr - if !isatty.IsTerminal(out.Fd()) { - return out - } - handle := syscall.Handle(out.Fd()) - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - return &Writer{out: out, handle: handle, oldattr: csbi.attributes} -} - -var color256 = map[int]int{ - 0: 0x000000, - 1: 0x800000, - 2: 0x008000, - 3: 0x808000, - 4: 0x000080, - 5: 0x800080, - 6: 0x008080, - 7: 0xc0c0c0, - 8: 0x808080, - 9: 0xff0000, - 10: 0x00ff00, - 11: 0xffff00, - 12: 0x0000ff, - 13: 0xff00ff, - 14: 0x00ffff, - 15: 0xffffff, - 16: 0x000000, - 17: 0x00005f, - 18: 0x000087, - 19: 0x0000af, - 20: 0x0000d7, - 21: 0x0000ff, - 22: 0x005f00, - 23: 0x005f5f, - 24: 0x005f87, - 25: 0x005faf, - 26: 0x005fd7, - 27: 0x005fff, - 28: 0x008700, - 29: 0x00875f, - 30: 0x008787, - 31: 0x0087af, - 32: 0x0087d7, - 33: 0x0087ff, - 34: 0x00af00, - 35: 0x00af5f, - 36: 0x00af87, - 37: 0x00afaf, - 38: 0x00afd7, - 39: 0x00afff, - 40: 0x00d700, - 41: 0x00d75f, - 42: 0x00d787, - 43: 0x00d7af, - 44: 0x00d7d7, - 45: 0x00d7ff, - 46: 0x00ff00, - 47: 0x00ff5f, - 48: 0x00ff87, - 49: 0x00ffaf, - 50: 0x00ffd7, - 51: 0x00ffff, - 52: 0x5f0000, - 53: 0x5f005f, - 54: 0x5f0087, - 55: 0x5f00af, - 56: 0x5f00d7, - 57: 0x5f00ff, - 58: 0x5f5f00, - 59: 0x5f5f5f, - 60: 0x5f5f87, - 61: 0x5f5faf, - 62: 0x5f5fd7, - 63: 0x5f5fff, - 64: 0x5f8700, - 65: 0x5f875f, - 66: 0x5f8787, - 67: 0x5f87af, - 68: 0x5f87d7, - 69: 0x5f87ff, - 70: 0x5faf00, - 71: 0x5faf5f, - 72: 0x5faf87, - 73: 0x5fafaf, - 74: 0x5fafd7, - 75: 0x5fafff, - 76: 0x5fd700, - 77: 0x5fd75f, - 78: 0x5fd787, - 79: 0x5fd7af, - 80: 0x5fd7d7, - 81: 0x5fd7ff, - 82: 0x5fff00, - 83: 0x5fff5f, - 84: 0x5fff87, - 85: 0x5fffaf, - 86: 0x5fffd7, - 87: 0x5fffff, - 88: 0x870000, - 89: 0x87005f, - 90: 0x870087, - 91: 0x8700af, - 92: 0x8700d7, - 93: 0x8700ff, - 94: 0x875f00, - 95: 0x875f5f, - 96: 0x875f87, - 97: 0x875faf, - 98: 0x875fd7, - 99: 0x875fff, - 100: 0x878700, - 101: 0x87875f, - 102: 0x878787, - 103: 0x8787af, - 104: 0x8787d7, - 105: 0x8787ff, - 106: 0x87af00, - 107: 0x87af5f, - 108: 0x87af87, - 109: 0x87afaf, - 110: 0x87afd7, - 111: 0x87afff, - 112: 0x87d700, - 113: 0x87d75f, - 114: 0x87d787, - 115: 0x87d7af, - 116: 0x87d7d7, - 117: 0x87d7ff, - 118: 0x87ff00, - 119: 0x87ff5f, - 120: 0x87ff87, - 121: 0x87ffaf, - 122: 0x87ffd7, - 123: 0x87ffff, - 124: 0xaf0000, - 125: 0xaf005f, - 126: 0xaf0087, - 127: 0xaf00af, - 128: 0xaf00d7, - 129: 0xaf00ff, - 130: 0xaf5f00, - 131: 0xaf5f5f, - 132: 0xaf5f87, - 133: 0xaf5faf, - 134: 0xaf5fd7, - 135: 0xaf5fff, - 136: 0xaf8700, - 137: 0xaf875f, - 138: 0xaf8787, - 139: 0xaf87af, - 140: 0xaf87d7, - 141: 0xaf87ff, - 142: 0xafaf00, - 143: 0xafaf5f, - 144: 0xafaf87, - 145: 0xafafaf, - 146: 0xafafd7, - 147: 0xafafff, - 148: 0xafd700, - 149: 0xafd75f, - 150: 0xafd787, - 151: 0xafd7af, - 152: 0xafd7d7, - 153: 0xafd7ff, - 154: 0xafff00, - 155: 0xafff5f, - 156: 0xafff87, - 157: 0xafffaf, - 158: 0xafffd7, - 159: 0xafffff, - 160: 0xd70000, - 161: 0xd7005f, - 162: 0xd70087, - 163: 0xd700af, - 164: 0xd700d7, - 165: 0xd700ff, - 166: 0xd75f00, - 167: 0xd75f5f, - 168: 0xd75f87, - 169: 0xd75faf, - 170: 0xd75fd7, - 171: 0xd75fff, - 172: 0xd78700, - 173: 0xd7875f, - 174: 0xd78787, - 175: 0xd787af, - 176: 0xd787d7, - 177: 0xd787ff, - 178: 0xd7af00, - 179: 0xd7af5f, - 180: 0xd7af87, - 181: 0xd7afaf, - 182: 0xd7afd7, - 183: 0xd7afff, - 184: 0xd7d700, - 185: 0xd7d75f, - 186: 0xd7d787, - 187: 0xd7d7af, - 188: 0xd7d7d7, - 189: 0xd7d7ff, - 190: 0xd7ff00, - 191: 0xd7ff5f, - 192: 0xd7ff87, - 193: 0xd7ffaf, - 194: 0xd7ffd7, - 195: 0xd7ffff, - 196: 0xff0000, - 197: 0xff005f, - 198: 0xff0087, - 199: 0xff00af, - 200: 0xff00d7, - 201: 0xff00ff, - 202: 0xff5f00, - 203: 0xff5f5f, - 204: 0xff5f87, - 205: 0xff5faf, - 206: 0xff5fd7, - 207: 0xff5fff, - 208: 0xff8700, - 209: 0xff875f, - 210: 0xff8787, - 211: 0xff87af, - 212: 0xff87d7, - 213: 0xff87ff, - 214: 0xffaf00, - 215: 0xffaf5f, - 216: 0xffaf87, - 217: 0xffafaf, - 218: 0xffafd7, - 219: 0xffafff, - 220: 0xffd700, - 221: 0xffd75f, - 222: 0xffd787, - 223: 0xffd7af, - 224: 0xffd7d7, - 225: 0xffd7ff, - 226: 0xffff00, - 227: 0xffff5f, - 228: 0xffff87, - 229: 0xffffaf, - 230: 0xffffd7, - 231: 0xffffff, - 232: 0x080808, - 233: 0x121212, - 234: 0x1c1c1c, - 235: 0x262626, - 236: 0x303030, - 237: 0x3a3a3a, - 238: 0x444444, - 239: 0x4e4e4e, - 240: 0x585858, - 241: 0x626262, - 242: 0x6c6c6c, - 243: 0x767676, - 244: 0x808080, - 245: 0x8a8a8a, - 246: 0x949494, - 247: 0x9e9e9e, - 248: 0xa8a8a8, - 249: 0xb2b2b2, - 250: 0xbcbcbc, - 251: 0xc6c6c6, - 252: 0xd0d0d0, - 253: 0xdadada, - 254: 0xe4e4e4, - 255: 0xeeeeee, -} - -func (w *Writer) Write(data []byte) (n int, err error) { - var csbi consoleScreenBufferInfo - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - - er := bytes.NewBuffer(data) -loop: - for { - r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - if r1 == 0 { - break loop - } - - c1, _, err := er.ReadRune() - if err != nil { - break loop - } - if c1 != 0x1b { - fmt.Fprint(w.out, string(c1)) - continue - } - c2, _, err := er.ReadRune() - if err != nil { - w.lastbuf.WriteRune(c1) - break loop - } - if c2 != 0x5b { - w.lastbuf.WriteRune(c1) - w.lastbuf.WriteRune(c2) - continue - } - - var buf bytes.Buffer - var m rune - for { - c, _, err := er.ReadRune() - if err != nil { - w.lastbuf.WriteRune(c1) - w.lastbuf.WriteRune(c2) - w.lastbuf.Write(buf.Bytes()) - break loop - } - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - m = c - break - } - buf.Write([]byte(string(c))) - } - - switch m { - case 'm': - attr := csbi.attributes - cs := buf.String() - if cs == "" { - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) - continue - } - token := strings.Split(cs, ";") - for i, ns := range token { - if n, err = strconv.Atoi(ns); err == nil { - switch { - case n == 0 || n == 100: - attr = w.oldattr - case 1 <= n && n <= 5: - attr |= foregroundIntensity - case n == 7: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) - case 22 == n || n == 25 || n == 25: - attr |= foregroundIntensity - case n == 27: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) - case 30 <= n && n <= 37: - attr = (attr & backgroundMask) - if (n-30)&1 != 0 { - attr |= foregroundRed - } - if (n-30)&2 != 0 { - attr |= foregroundGreen - } - if (n-30)&4 != 0 { - attr |= foregroundBlue - } - case n == 38: // set foreground color. - if i < len(token)-2 && token[i+1] == "5" { - if n256, err := strconv.Atoi(token[i+2]); err == nil { - if n256foreAttr == nil { - n256setup() - } - attr &= backgroundMask - attr |= n256foreAttr[n256] - i += 2 - } - } else { - attr = attr & (w.oldattr & backgroundMask) - } - case n == 39: // reset foreground color. - attr &= backgroundMask - attr |= w.oldattr & foregroundMask - case 40 <= n && n <= 47: - attr = (attr & foregroundMask) - if (n-40)&1 != 0 { - attr |= backgroundRed - } - if (n-40)&2 != 0 { - attr |= backgroundGreen - } - if (n-40)&4 != 0 { - attr |= backgroundBlue - } - case n == 48: // set background color. - if i < len(token)-2 && token[i+1] == "5" { - if n256, err := strconv.Atoi(token[i+2]); err == nil { - if n256backAttr == nil { - n256setup() - } - attr &= foregroundMask - attr |= n256backAttr[n256] - i += 2 - } - } else { - attr = attr & (w.oldattr & foregroundMask) - } - case n == 49: // reset foreground color. - attr &= foregroundMask - attr |= w.oldattr & backgroundMask - } - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) - } - } - } - } - return len(data) - w.lastbuf.Len(), nil -} - -type consoleColor struct { - red bool - green bool - blue bool - intensity bool -} - -func minmax3(a, b, c int) (min, max int) { - if a < b { - if b < c { - return a, c - } else if a < c { - return a, b - } else { - return c, b - } - } else { - if a < c { - return b, c - } else if b < c { - return b, a - } else { - return c, a - } - } -} - -func toConsoleColor(rgb int) (c consoleColor) { - r, g, b := (rgb&0xFF0000)>>16, (rgb&0x00FF00)>>8, rgb&0x0000FF - min, max := minmax3(r, g, b) - a := (min + max) / 2 - if r < 128 && g < 128 && b < 128 { - if r >= a { - c.red = true - } - if g >= a { - c.green = true - } - if b >= a { - c.blue = true - } - // non-intensed white is lighter than intensed black, so swap those. - if c.red && c.green && c.blue { - c.red, c.green, c.blue = false, false, false - c.intensity = true - } - } else { - if min < 128 { - min = 128 - a = (min + max) / 2 - } - if r >= a { - c.red = true - } - if g >= a { - c.green = true - } - if b >= a { - c.blue = true - } - c.intensity = true - // intensed black is darker than non-intensed white, so swap those. - if !c.red && !c.green && !c.blue { - c.red, c.green, c.blue = true, true, true - c.intensity = false - } - } - return c -} - -func (c consoleColor) foregroundAttr() (attr word) { - if c.red { - attr |= foregroundRed - } - if c.green { - attr |= foregroundGreen - } - if c.blue { - attr |= foregroundBlue - } - if c.intensity { - attr |= foregroundIntensity - } - return -} - -func (c consoleColor) backgroundAttr() (attr word) { - if c.red { - attr |= backgroundRed - } - if c.green { - attr |= backgroundGreen - } - if c.blue { - attr |= backgroundBlue - } - if c.intensity { - attr |= backgroundIntensity - } - return -} - -var n256foreAttr []word -var n256backAttr []word - -func n256setup() { - n256foreAttr = make([]word, 256) - n256backAttr = make([]word, 256) - for i, rgb := range color256 { - c := toConsoleColor(rgb) - n256foreAttr[i] = c.foregroundAttr() - n256backAttr[i] = c.backgroundAttr() - } -} diff --git a/Godeps/_workspace/src/github.com/naoina/go-stringutil/.travis.yml b/Godeps/_workspace/src/github.com/naoina/go-stringutil/.travis.yml deleted file mode 100644 index 926eb992a..000000000 --- a/Godeps/_workspace/src/github.com/naoina/go-stringutil/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 - - tip -install: - - go get -v github.com/naoina/go-stringutil -script: - - go test -v ./... -bench . -benchmem diff --git a/Godeps/_workspace/src/github.com/naoina/go-stringutil/LICENSE b/Godeps/_workspace/src/github.com/naoina/go-stringutil/LICENSE deleted file mode 100644 index 0fff1c58b..000000000 --- a/Godeps/_workspace/src/github.com/naoina/go-stringutil/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Naoya Inada - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/naoina/go-stringutil/README.md b/Godeps/_workspace/src/github.com/naoina/go-stringutil/README.md deleted file mode 100644 index 87772e88d..000000000 --- a/Godeps/_workspace/src/github.com/naoina/go-stringutil/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# stringutil [![Build Status](https://travis-ci.org/naoina/go-stringutil.png?branch=master)](https://travis-ci.org/naoina/go-stringutil) - -## Installation - - go get -u github.com/naoina/go-stringutil - -## Documentation - -See https://godoc.org/github.com/naoina/go-stringutil - -## License - -MIT diff --git a/Godeps/_workspace/src/github.com/naoina/go-stringutil/strings.go b/Godeps/_workspace/src/github.com/naoina/go-stringutil/strings.go deleted file mode 100644 index c0fdd4cc1..000000000 --- a/Godeps/_workspace/src/github.com/naoina/go-stringutil/strings.go +++ /dev/null @@ -1,120 +0,0 @@ -package stringutil - -import ( - "bytes" - "unicode" -) - -// ToUpperCamelCase returns a copy of the string s with all Unicode letters mapped to their camel case. -// It will convert to upper case previous letter of '_' and first letter, and remove letter of '_'. -func ToUpperCamelCase(s string) string { - if s == "" { - return "" - } - upper := true - var result bytes.Buffer - for _, c := range s { - if c == '_' { - upper = true - continue - } - if upper { - result.WriteRune(unicode.ToUpper(c)) - upper = false - continue - } - result.WriteRune(c) - } - return result.String() -} - -// ToUpperCamelCaseASCII is similar to ToUpperCamelCase, but optimized for -// only the ASCII characters. -// ToUpperCamelCaseASCII is faster than ToUpperCamelCase, but doesn't work if -// contains non-ASCII characters. -func ToUpperCamelCaseASCII(s string) string { - if s == "" { - return "" - } - upper := true - result := make([]byte, 0, len(s)) - for i := 0; i < len(s); i++ { - c := s[i] - if c == '_' { - upper = true - continue - } - if upper { - result = append(result, toUpperASCII(c)) - upper = false - continue - } - result = append(result, c) - } - return string(result) -} - -// ToSnakeCase returns a copy of the string s with all Unicode letters mapped to their snake case. -// It will insert letter of '_' at position of previous letter of uppercase and all -// letters convert to lower case. -func ToSnakeCase(s string) string { - if s == "" { - return "" - } - var result bytes.Buffer - for _, c := range s { - if unicode.IsUpper(c) { - result.WriteByte('_') - } - result.WriteRune(unicode.ToLower(c)) - } - s = result.String() - if s[0] == '_' { - return s[1:] - } - return s -} - -// ToSnakeCaseASCII is similar to ToSnakeCase, but optimized for only the ASCII -// characters. -// ToSnakeCaseASCII is faster than ToSnakeCase, but doesn't work correctly if -// contains non-ASCII characters. -func ToSnakeCaseASCII(s string) string { - if s == "" { - return "" - } - result := make([]byte, 0, len(s)) - for i := 0; i < len(s); i++ { - c := s[i] - if isUpperASCII(c) { - result = append(result, '_') - } - result = append(result, toLowerASCII(c)) - } - if result[0] == '_' { - return string(result[1:]) - } - return string(result) -} - -func isUpperASCII(c byte) bool { - return 'A' <= c && c <= 'Z' -} - -func isLowerASCII(c byte) bool { - return 'a' <= c && c <= 'z' -} - -func toUpperASCII(c byte) byte { - if isLowerASCII(c) { - return c - ('a' - 'A') - } - return c -} - -func toLowerASCII(c byte) byte { - if isUpperASCII(c) { - return c + 'a' - 'A' - } - return c -} diff --git a/Godeps/_workspace/src/github.com/naoina/go-stringutil/strings_bench_test.go b/Godeps/_workspace/src/github.com/naoina/go-stringutil/strings_bench_test.go deleted file mode 100644 index 90c280bda..000000000 --- a/Godeps/_workspace/src/github.com/naoina/go-stringutil/strings_bench_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package stringutil_test - -import ( - "testing" - - "github.com/naoina/go-stringutil" -) - -var benchcaseForCamelCase = "the_quick_brown_fox_jumps_over_the_lazy_dog" - -func BenchmarkToUpperCamelCase(b *testing.B) { - for i := 0; i < b.N; i++ { - stringutil.ToUpperCamelCase(benchcaseForCamelCase) - } -} - -func BenchmarkToUpperCamelCaseASCII(b *testing.B) { - for i := 0; i < b.N; i++ { - stringutil.ToUpperCamelCaseASCII(benchcaseForCamelCase) - } -} - -var benchcaseForSnakeCase = "TheQuickBrownFoxJumpsOverTheLazyDog" - -func BenchmarkToSnakeCase(b *testing.B) { - for i := 0; i < b.N; i++ { - stringutil.ToSnakeCase(benchcaseForSnakeCase) - } -} - -func BenchmarkToSnakeCaseASCII(b *testing.B) { - for i := 0; i < b.N; i++ { - stringutil.ToSnakeCaseASCII(benchcaseForSnakeCase) - } -} diff --git a/Godeps/_workspace/src/github.com/naoina/go-stringutil/strings_test.go b/Godeps/_workspace/src/github.com/naoina/go-stringutil/strings_test.go deleted file mode 100644 index 69c831e12..000000000 --- a/Godeps/_workspace/src/github.com/naoina/go-stringutil/strings_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package stringutil_test - -import ( - "reflect" - "testing" - - "github.com/naoina/go-stringutil" -) - -func TestToUpperCamelCase(t *testing.T) { - for _, v := range []struct { - input, expect string - }{ - {"", ""}, - {"thequickbrownfoxoverthelazydog", "Thequickbrownfoxoverthelazydog"}, - {"thequickbrownfoxoverthelazydoG", "ThequickbrownfoxoverthelazydoG"}, - {"thequickbrownfoxoverthelazydo_g", "ThequickbrownfoxoverthelazydoG"}, - {"TheQuickBrownFoxJumpsOverTheLazyDog", "TheQuickBrownFoxJumpsOverTheLazyDog"}, - {"the_quick_brown_fox_jumps_over_the_lazy_dog", "TheQuickBrownFoxJumpsOverTheLazyDog"}, - {"the_Quick_Brown_Fox_Jumps_Over_The_Lazy_Dog", "TheQuickBrownFoxJumpsOverTheLazyDog"}, - {"the_quick_brown_fox_over_the_lazy_dog", "TheQuickBrownFoxOverTheLazyDog"}, - } { - actual := stringutil.ToUpperCamelCase(v.input) - expect := v.expect - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`stringutil.ToUpperCamelCase(%#v) => %#v; want %#v`, v.input, actual, expect) - } - } -} - -func TestToUpperCamelCaseASCII(t *testing.T) { - for _, v := range []struct { - input, expect string - }{ - {"", ""}, - {"thequickbrownfoxoverthelazydog", "Thequickbrownfoxoverthelazydog"}, - {"thequickbrownfoxoverthelazydoG", "ThequickbrownfoxoverthelazydoG"}, - {"thequickbrownfoxoverthelazydo_g", "ThequickbrownfoxoverthelazydoG"}, - {"TheQuickBrownFoxJumpsOverTheLazyDog", "TheQuickBrownFoxJumpsOverTheLazyDog"}, - {"the_quick_brown_fox_jumps_over_the_lazy_dog", "TheQuickBrownFoxJumpsOverTheLazyDog"}, - {"the_Quick_Brown_Fox_Jumps_Over_The_Lazy_Dog", "TheQuickBrownFoxJumpsOverTheLazyDog"}, - } { - actual := stringutil.ToUpperCamelCaseASCII(v.input) - expect := v.expect - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`stringutil.ToUpperCamelCaseASCII(%#v) => %#v; want %#v`, v.input, actual, expect) - } - } -} - -func TestToSnakeCase(t *testing.T) { - for _, v := range []struct { - input, expect string - }{ - {"", ""}, - {"thequickbrownfoxjumpsoverthelazydog", "thequickbrownfoxjumpsoverthelazydog"}, - {"Thequickbrownfoxjumpsoverthelazydog", "thequickbrownfoxjumpsoverthelazydog"}, - {"ThequickbrownfoxjumpsoverthelazydoG", "thequickbrownfoxjumpsoverthelazydo_g"}, - {"TheQuickBrownFoxJumpsOverTheLazyDog", "the_quick_brown_fox_jumps_over_the_lazy_dog"}, - {"the_quick_brown_fox_jumps_over_the_lazy_dog", "the_quick_brown_fox_jumps_over_the_lazy_dog"}, - {"TheQuickBrownFoxOverTheLazyDog", "the_quick_brown_fox_over_the_lazy_dog"}, - } { - actual := stringutil.ToSnakeCase(v.input) - expect := v.expect - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`stringutil.ToSnakeCase(%#v) => %#v; want %#v`, v.input, actual, expect) - } - } -} - -func TestToSnakeCaseASCII(t *testing.T) { - for _, v := range []struct { - input, expect string - }{ - {"", ""}, - {"thequickbrownfoxjumpsoverthelazydog", "thequickbrownfoxjumpsoverthelazydog"}, - {"Thequickbrownfoxjumpsoverthelazydog", "thequickbrownfoxjumpsoverthelazydog"}, - {"ThequickbrownfoxjumpsoverthelazydoG", "thequickbrownfoxjumpsoverthelazydo_g"}, - {"TheQuickBrownFoxJumpsOverTheLazyDog", "the_quick_brown_fox_jumps_over_the_lazy_dog"}, - {"the_quick_brown_fox_jumps_over_the_lazy_dog", "the_quick_brown_fox_jumps_over_the_lazy_dog"}, - } { - actual := stringutil.ToSnakeCaseASCII(v.input) - expect := v.expect - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`stringutil.ToSnakeCaseASCII(%#v) => %#v; want %#v`, v.input, actual, expect) - } - } -} diff --git a/Godeps/_workspace/src/github.com/naoina/toml/.travis.yml b/Godeps/_workspace/src/github.com/naoina/toml/.travis.yml deleted file mode 100644 index 6bd7b6df3..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go - -go: - - 1.3 - - tip - -install: - - go get -v ./... - -script: - - go test ./... diff --git a/Godeps/_workspace/src/github.com/naoina/toml/LICENSE b/Godeps/_workspace/src/github.com/naoina/toml/LICENSE deleted file mode 100644 index e65039ad8..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2014 Naoya Inada - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/naoina/toml/Makefile b/Godeps/_workspace/src/github.com/naoina/toml/Makefile deleted file mode 100644 index 10dc5a553..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -GO = go -PEG = peg - -.SUFFIXES: .peg .peg.go - -.PHONY: all test clean -all: parse.peg.go - -.peg.peg.go: - $(PEG) -switch -inline $< - -test: all - $(GO) test ./... - -clean: - $(RM) *.peg.go diff --git a/Godeps/_workspace/src/github.com/naoina/toml/README.md b/Godeps/_workspace/src/github.com/naoina/toml/README.md deleted file mode 100644 index 7330ce4d0..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/README.md +++ /dev/null @@ -1,364 +0,0 @@ -# TOML parser and encoder library for Golang [![Build Status](https://travis-ci.org/naoina/toml.png?branch=master)](https://travis-ci.org/naoina/toml) - -[TOML](https://github.com/toml-lang/toml) parser and encoder library for [Golang](http://golang.org/). - -This library is compatible with TOML version [v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md). - -## Installation - - go get -u github.com/naoina/toml - -## Usage - -The following TOML save as `example.toml`. - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Lance Uppercut" -dob = 1979-05-27T07:32:00-08:00 # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -Then above TOML will mapping to `tomlConfig` struct using `toml.Unmarshal`. - -```go -package main - -import ( - "io/ioutil" - "os" - "time" - - "github.com/naoina/toml" -) - -type tomlConfig struct { - Title string - Owner struct { - Name string - Dob time.Time - } - Database struct { - Server string - Ports []int - ConnectionMax uint - Enabled bool - } - Servers map[string]Server - Clients struct { - Data [][]interface{} - Hosts []string - } -} - -type Server struct { - IP string - DC string -} - -func main() { - f, err := os.Open("example.toml") - if err != nil { - panic(err) - } - defer f.Close() - buf, err := ioutil.ReadAll(f) - if err != nil { - panic(err) - } - var config tomlConfig - if err := toml.Unmarshal(buf, &config); err != nil { - panic(err) - } - // then to use the unmarshaled config... -} -``` - -## Mappings - -A key and value of TOML will map to the corresponding field. -The fields of struct for mapping must be exported. - -The rules of the mapping of key are following: - -#### Exact matching - -```toml -timeout_seconds = 256 -``` - -```go -type Config struct { - Timeout_seconds int -} -``` - -#### Camelcase matching - -```toml -server_name = "srv1" -``` - -```go -type Config struct { - ServerName string -} -``` - -#### Uppercase matching - -```toml -ip = "10.0.0.1" -``` - -```go -type Config struct { - IP string -} -``` - -See the following examples for the value mappings. - -### String - -```toml -val = "string" -``` - -```go -type Config struct { - Val string -} -``` - -### Integer - -```toml -val = 100 -``` - -```go -type Config struct { - Val int -} -``` - -All types that can be used are following: - -* int8 (from `-128` to `127`) -* int16 (from `-32768` to `32767`) -* int32 (from `-2147483648` to `2147483647`) -* int64 (from `-9223372036854775808` to `9223372036854775807`) -* int (same as `int32` on 32bit environment, or `int64` on 64bit environment) -* uint8 (from `0` to `255`) -* uint16 (from `0` to `65535`) -* uint32 (from `0` to `4294967295`) -* uint64 (from `0` to `18446744073709551615`) -* uint (same as `uint` on 32bit environment, or `uint64` on 64bit environment) - -### Float - -```toml -val = 3.1415 -``` - -```go -type Config struct { - Val float32 -} -``` - -All types that can be used are following: - -* float32 -* float64 - -### Boolean - -```toml -val = true -``` - -```go -type Config struct { - Val bool -} -``` - -### Datetime - -```toml -val = 2014-09-28T21:27:39Z -``` - -```go -type Config struct { - Val time.Time -} -``` - -### Array - -```toml -val = ["a", "b", "c"] -``` - -```go -type Config struct { - Val []string -} -``` - -Also following examples all can be mapped: - -```toml -val1 = [1, 2, 3] -val2 = [["a", "b"], ["c", "d"]] -val3 = [[1, 2, 3], ["a", "b", "c"]] -val4 = [[1, 2, 3], [["a", "b"], [true, false]]] -``` - -```go -type Config struct { - Val1 []int - Val2 [][]string - Val3 [][]interface{} - Val4 [][]interface{} -} -``` - -### Table - -```toml -[server] -type = "app" - - [server.development] - ip = "10.0.0.1" - - [server.production] - ip = "10.0.0.2" -``` - -```go -type Config struct { - Server map[string]Server -} - -type Server struct { - IP string -} -``` - -You can also use the following struct instead of map of struct. - -```go -type Config struct { - Server struct { - Development Server - Production Server - } -} - -type Server struct { - IP string -} -``` - -### Array of Tables - -```toml -[[fruit]] - name = "apple" - - [fruit.physical] - color = "red" - shape = "round" - - [[fruit.variety]] - name = "red delicious" - - [[fruit.variety]] - name = "granny smith" - -[[fruit]] - name = "banana" - - [[fruit.variety]] - name = "plantain" -``` - -```go -type Config struct { - Fruit []struct { - Name string - Physical struct { - Color string - Shape string - } - Variety []struct { - Name string - } - } -} -``` - -### Using `toml.UnmarshalTOML` interface - -```toml -duration = "10s" -``` - -```go -import time - -type Config struct { - Duration Duration -} - -type Duration struct { - time.Duration -} - -func (d *Duration) UnmarshalTOML(data []byte) error { - d.Duration, err := time.ParseDuration(string(data)) - return err -} -``` - -## API documentation - -See [Godoc](http://godoc.org/github.com/naoina/toml). - -## License - -MIT diff --git a/Godeps/_workspace/src/github.com/naoina/toml/ast/ast.go b/Godeps/_workspace/src/github.com/naoina/toml/ast/ast.go deleted file mode 100644 index 68bb0ee15..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/ast/ast.go +++ /dev/null @@ -1,184 +0,0 @@ -package ast - -import ( - "strconv" - "time" -) - -type Position struct { - Begin int - End int -} - -type Value interface { - Pos() int - End() int - Source() string -} - -type String struct { - Position Position - Value string - Data []rune -} - -func (s *String) Pos() int { - return s.Position.Begin -} - -func (s *String) End() int { - return s.Position.End -} - -func (s *String) Source() string { - return string(s.Data) -} - -type Integer struct { - Position Position - Value string - Data []rune -} - -func (i *Integer) Pos() int { - return i.Position.Begin -} - -func (i *Integer) End() int { - return i.Position.End -} - -func (i *Integer) Source() string { - return string(i.Data) -} - -func (i *Integer) Int() (int64, error) { - return strconv.ParseInt(i.Value, 10, 64) -} - -type Float struct { - Position Position - Value string - Data []rune -} - -func (f *Float) Pos() int { - return f.Position.Begin -} - -func (f *Float) End() int { - return f.Position.End -} - -func (f *Float) Source() string { - return string(f.Data) -} - -func (f *Float) Float() (float64, error) { - return strconv.ParseFloat(f.Value, 64) -} - -type Boolean struct { - Position Position - Value string - Data []rune -} - -func (b *Boolean) Pos() int { - return b.Position.Begin -} - -func (b *Boolean) End() int { - return b.Position.End -} - -func (b *Boolean) Source() string { - return string(b.Data) -} - -func (b *Boolean) Boolean() (bool, error) { - return strconv.ParseBool(b.Value) -} - -type Datetime struct { - Position Position - Value string - Data []rune -} - -func (d *Datetime) Pos() int { - return d.Position.Begin -} - -func (d *Datetime) End() int { - return d.Position.End -} - -func (d *Datetime) Source() string { - return string(d.Data) -} - -func (d *Datetime) Time() (time.Time, error) { - return time.Parse(time.RFC3339Nano, d.Value) -} - -type Array struct { - Position Position - Value []Value - Data []rune -} - -func (a *Array) Pos() int { - return a.Position.Begin -} - -func (a *Array) End() int { - return a.Position.End -} - -func (a *Array) Source() string { - return string(a.Data) -} - -type TableType uint8 - -const ( - TableTypeNormal TableType = iota - TableTypeArray -) - -var tableTypes = [...]string{ - "normal", - "array", -} - -func (t TableType) String() string { - return tableTypes[t] -} - -type Table struct { - Position Position - Line int - Name string - Fields map[string]interface{} - Type TableType - Data []rune -} - -func (t *Table) Pos() int { - return t.Position.Begin -} - -func (t *Table) End() int { - return t.Position.End -} - -func (t *Table) Source() string { - return string(t.Data) -} - -type KeyValue struct { - Key string - Value Value - Line int -} diff --git a/Godeps/_workspace/src/github.com/naoina/toml/decode.go b/Godeps/_workspace/src/github.com/naoina/toml/decode.go deleted file mode 100644 index 3b2465c12..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/decode.go +++ /dev/null @@ -1,649 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/naoina/toml/ast" -) - -const ( - tableSeparator = '.' -) - -var ( - escapeReplacer = strings.NewReplacer( - "\b", "\\n", - "\f", "\\f", - "\n", "\\n", - "\r", "\\r", - "\t", "\\t", - ) - underscoreReplacer = strings.NewReplacer( - "_", "", - ) -) - -// Unmarshal parses the TOML data and stores the result in the value pointed to by v. -// -// Unmarshal will mapped to v that according to following rules: -// -// TOML strings to string -// TOML integers to any int type -// TOML floats to float32 or float64 -// TOML booleans to bool -// TOML datetimes to time.Time -// TOML arrays to any type of slice or []interface{} -// TOML tables to struct -// TOML array of tables to slice of struct -func Unmarshal(data []byte, v interface{}) error { - table, err := Parse(data) - if err != nil { - return err - } - if err := UnmarshalTable(table, v); err != nil { - return fmt.Errorf("toml: unmarshal: %v", err) - } - return nil -} - -// Unmarshaler is the interface implemented by objects that can unmarshal a -// TOML description of themselves. -// The input can be assumed to be a valid encoding of a TOML value. -// UnmarshalJSON must copy the TOML data if it wishes to retain the data after -// returning. -type Unmarshaler interface { - UnmarshalTOML([]byte) error -} - -// UnmarshalTable applies the contents of an ast.Table to the value pointed at by v. -// -// UnmarshalTable will mapped to v that according to following rules: -// -// TOML strings to string -// TOML integers to any int type -// TOML floats to float32 or float64 -// TOML booleans to bool -// TOML datetimes to time.Time -// TOML arrays to any type of slice or []interface{} -// TOML tables to struct -// TOML array of tables to slice of struct -func UnmarshalTable(t *ast.Table, v interface{}) (err error) { - if v == nil { - return fmt.Errorf("v must not be nil") - } - rv := reflect.ValueOf(v) - if kind := rv.Kind(); kind != reflect.Ptr && kind != reflect.Map { - return fmt.Errorf("v must be a pointer or map") - } - for rv.Kind() == reflect.Ptr { - rv = rv.Elem() - } - for key, val := range t.Fields { - switch av := val.(type) { - case *ast.KeyValue: - fv, fieldName, found := findField(rv, key) - if !found { - return fmt.Errorf("line %d: field corresponding to `%s' is not defined in `%T'", av.Line, key, v) - } - switch fv.Kind() { - case reflect.Map: - mv := reflect.New(fv.Type().Elem()).Elem() - if err := UnmarshalTable(t, mv.Addr().Interface()); err != nil { - return err - } - fv.SetMapIndex(reflect.ValueOf(fieldName), mv) - default: - if err := setValue(fv, av.Value); err != nil { - return fmt.Errorf("line %d: %v.%s: %v", av.Line, rv.Type(), fieldName, err) - } - if rv.Kind() == reflect.Map { - rv.SetMapIndex(reflect.ValueOf(fieldName), fv) - } - } - case *ast.Table: - fv, fieldName, found := findField(rv, key) - if !found { - return fmt.Errorf("line %d: field corresponding to `%s' is not defined in `%T'", av.Line, key, v) - } - if err, ok := setUnmarshaler(fv, string(av.Data)); ok { - if err != nil { - return err - } - continue - } - for fv.Kind() == reflect.Ptr { - fv.Set(reflect.New(fv.Type().Elem())) - fv = fv.Elem() - } - switch fv.Kind() { - case reflect.Struct: - vv := reflect.New(fv.Type()).Elem() - if err := UnmarshalTable(av, vv.Addr().Interface()); err != nil { - return err - } - fv.Set(vv) - if rv.Kind() == reflect.Map { - rv.SetMapIndex(reflect.ValueOf(fieldName), fv) - } - case reflect.Map: - mv := reflect.MakeMap(fv.Type()) - if err := UnmarshalTable(av, mv.Interface()); err != nil { - return err - } - fv.Set(mv) - default: - return fmt.Errorf("line %d: `%v.%s' must be struct or map, but %v given", av.Line, rv.Type(), fieldName, fv.Kind()) - } - case []*ast.Table: - fv, fieldName, found := findField(rv, key) - if !found { - return fmt.Errorf("line %d: field corresponding to `%s' is not defined in `%T'", av[0].Line, key, v) - } - data := make([]string, 0, len(av)) - for _, tbl := range av { - data = append(data, string(tbl.Data)) - } - if err, ok := setUnmarshaler(fv, strings.Join(data, "\n")); ok { - if err != nil { - return err - } - continue - } - t := fv.Type().Elem() - pc := 0 - for ; t.Kind() == reflect.Ptr; pc++ { - t = t.Elem() - } - if fv.Kind() != reflect.Slice { - return fmt.Errorf("line %d: `%v.%s' must be slice type, but %v given", av[0].Line, rv.Type(), fieldName, fv.Kind()) - } - for _, tbl := range av { - var vv reflect.Value - switch t.Kind() { - case reflect.Map: - vv = reflect.MakeMap(t) - if err := UnmarshalTable(tbl, vv.Interface()); err != nil { - return err - } - default: - vv = reflect.New(t).Elem() - if err := UnmarshalTable(tbl, vv.Addr().Interface()); err != nil { - return err - } - } - for i := 0; i < pc; i++ { - vv = vv.Addr() - pv := reflect.New(vv.Type()).Elem() - pv.Set(vv) - vv = pv - } - fv.Set(reflect.Append(fv, vv)) - } - if rv.Kind() == reflect.Map { - rv.SetMapIndex(reflect.ValueOf(fieldName), fv) - } - default: - return fmt.Errorf("BUG: unknown type `%T'", t) - } - } - return nil -} - -func setUnmarshaler(lhs reflect.Value, data string) (error, bool) { - for lhs.Kind() == reflect.Ptr { - lhs.Set(reflect.New(lhs.Type().Elem())) - lhs = lhs.Elem() - } - if lhs.CanAddr() { - if u, ok := lhs.Addr().Interface().(Unmarshaler); ok { - return u.UnmarshalTOML([]byte(data)), true - } - } - return nil, false -} - -func setValue(lhs reflect.Value, val ast.Value) error { - for lhs.Kind() == reflect.Ptr { - lhs.Set(reflect.New(lhs.Type().Elem())) - lhs = lhs.Elem() - } - if err, ok := setUnmarshaler(lhs, val.Source()); ok { - return err - } - switch v := val.(type) { - case *ast.Integer: - if err := setInt(lhs, v); err != nil { - return err - } - case *ast.Float: - if err := setFloat(lhs, v); err != nil { - return err - } - case *ast.String: - if err := setString(lhs, v); err != nil { - return err - } - case *ast.Boolean: - if err := setBoolean(lhs, v); err != nil { - return err - } - case *ast.Datetime: - if err := setDatetime(lhs, v); err != nil { - return err - } - case *ast.Array: - if err := setArray(lhs, v); err != nil { - return err - } - } - return nil -} - -func setInt(fv reflect.Value, v *ast.Integer) error { - i, err := v.Int() - if err != nil { - return err - } - switch fv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if fv.OverflowInt(i) { - return &errorOutOfRange{fv.Kind(), i} - } - fv.SetInt(i) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - fv.SetUint(uint64(i)) - case reflect.Interface: - fv.Set(reflect.ValueOf(i)) - default: - return fmt.Errorf("`%v' is not any types of int", fv.Type()) - } - return nil -} - -func setFloat(fv reflect.Value, v *ast.Float) error { - f, err := v.Float() - if err != nil { - return err - } - switch fv.Kind() { - case reflect.Float32, reflect.Float64: - if fv.OverflowFloat(f) { - return &errorOutOfRange{fv.Kind(), f} - } - fv.SetFloat(f) - case reflect.Interface: - fv.Set(reflect.ValueOf(f)) - default: - return fmt.Errorf("`%v' is not float32 or float64", fv.Type()) - } - return nil -} - -func setString(fv reflect.Value, v *ast.String) error { - return set(fv, v.Value) -} - -func setBoolean(fv reflect.Value, v *ast.Boolean) error { - b, err := v.Boolean() - if err != nil { - return err - } - return set(fv, b) -} - -func setDatetime(fv reflect.Value, v *ast.Datetime) error { - tm, err := v.Time() - if err != nil { - return err - } - return set(fv, tm) -} - -func setArray(fv reflect.Value, v *ast.Array) error { - if len(v.Value) == 0 { - return nil - } - typ := reflect.TypeOf(v.Value[0]) - for _, vv := range v.Value[1:] { - if typ != reflect.TypeOf(vv) { - return fmt.Errorf("array cannot contain multiple types") - } - } - sliceType := fv.Type() - if fv.Kind() == reflect.Interface { - sliceType = reflect.SliceOf(sliceType) - } - slice := reflect.MakeSlice(sliceType, 0, len(v.Value)) - t := sliceType.Elem() - for _, vv := range v.Value { - tmp := reflect.New(t).Elem() - if err := setValue(tmp, vv); err != nil { - return err - } - slice = reflect.Append(slice, tmp) - } - fv.Set(slice) - return nil -} - -func set(fv reflect.Value, v interface{}) error { - rhs := reflect.ValueOf(v) - if !rhs.Type().AssignableTo(fv.Type()) { - return fmt.Errorf("`%v' type is not assignable to `%v' type", rhs.Type(), fv.Type()) - } - fv.Set(rhs) - return nil -} - -type stack struct { - key string - table *ast.Table -} - -type toml struct { - table *ast.Table - line int - currentTable *ast.Table - s string - key string - val ast.Value - arr *array - tableMap map[string]*ast.Table - stack []*stack - skip bool -} - -func (p *toml) init() { - p.line = 1 - p.table = &ast.Table{ - Line: p.line, - Type: ast.TableTypeNormal, - } - p.tableMap = map[string]*ast.Table{ - "": p.table, - } - p.currentTable = p.table -} - -func (p *toml) Error(err error) { - panic(convertError{fmt.Errorf("toml: line %d: %v", p.line, err)}) -} - -func (p *tomlParser) SetTime(begin, end int) { - p.val = &ast.Datetime{ - Position: ast.Position{Begin: begin, End: end}, - Data: p.buffer[begin:end], - Value: string(p.buffer[begin:end]), - } -} - -func (p *tomlParser) SetFloat64(begin, end int) { - p.val = &ast.Float{ - Position: ast.Position{Begin: begin, End: end}, - Data: p.buffer[begin:end], - Value: underscoreReplacer.Replace(string(p.buffer[begin:end])), - } -} - -func (p *tomlParser) SetInt64(begin, end int) { - p.val = &ast.Integer{ - Position: ast.Position{Begin: begin, End: end}, - Data: p.buffer[begin:end], - Value: underscoreReplacer.Replace(string(p.buffer[begin:end])), - } -} - -func (p *tomlParser) SetString(begin, end int) { - p.val = &ast.String{ - Position: ast.Position{Begin: begin, End: end}, - Data: p.buffer[begin:end], - Value: p.s, - } - p.s = "" -} - -func (p *tomlParser) SetBool(begin, end int) { - p.val = &ast.Boolean{ - Position: ast.Position{Begin: begin, End: end}, - Data: p.buffer[begin:end], - Value: string(p.buffer[begin:end]), - } -} - -func (p *tomlParser) StartArray() { - if p.arr == nil { - p.arr = &array{line: p.line, current: &ast.Array{}} - return - } - p.arr.child = &array{parent: p.arr, line: p.line, current: &ast.Array{}} - p.arr = p.arr.child -} - -func (p *tomlParser) AddArrayVal() { - if p.arr.current == nil { - p.arr.current = &ast.Array{} - } - p.arr.current.Value = append(p.arr.current.Value, p.val) -} - -func (p *tomlParser) SetArray(begin, end int) { - p.arr.current.Position = ast.Position{Begin: begin, End: end} - p.arr.current.Data = p.buffer[begin:end] - p.val = p.arr.current - p.arr = p.arr.parent -} - -func (p *toml) SetTable(buf []rune, begin, end int) { - p.setTable(p.table, buf, begin, end) -} - -func (p *toml) setTable(t *ast.Table, buf []rune, begin, end int) { - name := string(buf[begin:end]) - names := splitTableKey(name) - if t, exists := p.tableMap[name]; exists { - if lt := p.tableMap[names[len(names)-1]]; t.Type == ast.TableTypeArray || lt != nil && lt.Type == ast.TableTypeNormal { - p.Error(fmt.Errorf("table `%s' is in conflict with %v table in line %d", name, t.Type, t.Line)) - } - } - t, err := p.lookupTable(t, names) - if err != nil { - p.Error(err) - } - p.currentTable = t - p.tableMap[name] = p.currentTable -} - -func (p *tomlParser) SetTableString(begin, end int) { - p.currentTable.Data = p.buffer[begin:end] - - p.currentTable.Position.Begin = begin - p.currentTable.Position.End = end -} - -func (p *toml) SetArrayTable(buf []rune, begin, end int) { - p.setArrayTable(p.table, buf, begin, end) -} - -func (p *toml) setArrayTable(t *ast.Table, buf []rune, begin, end int) { - name := string(buf[begin:end]) - if t, exists := p.tableMap[name]; exists && t.Type == ast.TableTypeNormal { - p.Error(fmt.Errorf("table `%s' is in conflict with %v table in line %d", name, t.Type, t.Line)) - } - names := splitTableKey(name) - t, err := p.lookupTable(t, names[:len(names)-1]) - if err != nil { - p.Error(err) - } - last := names[len(names)-1] - tbl := &ast.Table{ - Position: ast.Position{begin, end}, - Line: p.line, - Name: last, - Type: ast.TableTypeArray, - } - switch v := t.Fields[last].(type) { - case nil: - if t.Fields == nil { - t.Fields = make(map[string]interface{}) - } - t.Fields[last] = []*ast.Table{tbl} - case []*ast.Table: - t.Fields[last] = append(v, tbl) - case *ast.KeyValue: - p.Error(fmt.Errorf("key `%s' is in conflict with line %d", last, v.Line)) - default: - p.Error(fmt.Errorf("BUG: key `%s' is in conflict but it's unknown type `%T'", last, v)) - } - p.currentTable = tbl - p.tableMap[name] = p.currentTable -} - -func (p *toml) StartInlineTable() { - p.skip = false - p.stack = append(p.stack, &stack{p.key, p.currentTable}) - buf := []rune(p.key) - if p.arr == nil { - p.setTable(p.currentTable, buf, 0, len(buf)) - } else { - p.setArrayTable(p.currentTable, buf, 0, len(buf)) - } -} - -func (p *toml) EndInlineTable() { - st := p.stack[len(p.stack)-1] - p.key, p.currentTable = st.key, st.table - p.stack[len(p.stack)-1] = nil - p.stack = p.stack[:len(p.stack)-1] - p.skip = true -} - -func (p *toml) AddLineCount(i int) { - p.line += i -} - -func (p *toml) SetKey(buf []rune, begin, end int) { - p.key = string(buf[begin:end]) -} - -func (p *toml) AddKeyValue() { - if p.skip { - p.skip = false - return - } - if val, exists := p.currentTable.Fields[p.key]; exists { - switch v := val.(type) { - case *ast.Table: - p.Error(fmt.Errorf("key `%s' is in conflict with %v table in line %d", p.key, v.Type, v.Line)) - case *ast.KeyValue: - p.Error(fmt.Errorf("key `%s' is in conflict with line %d", p.key, v.Line)) - default: - p.Error(fmt.Errorf("BUG: key `%s' is in conflict but it's unknown type `%T'", p.key, v)) - } - } - if p.currentTable.Fields == nil { - p.currentTable.Fields = make(map[string]interface{}) - } - p.currentTable.Fields[p.key] = &ast.KeyValue{ - Key: p.key, - Value: p.val, - Line: p.line, - } -} - -func (p *toml) SetBasicString(buf []rune, begin, end int) { - p.s = p.unquote(string(buf[begin:end])) -} - -func (p *toml) SetMultilineString() { - p.s = p.unquote(`"` + escapeReplacer.Replace(strings.TrimLeft(p.s, "\r\n")) + `"`) -} - -func (p *toml) AddMultilineBasicBody(buf []rune, begin, end int) { - p.s += string(buf[begin:end]) -} - -func (p *toml) SetLiteralString(buf []rune, begin, end int) { - p.s = string(buf[begin:end]) -} - -func (p *toml) SetMultilineLiteralString(buf []rune, begin, end int) { - p.s = strings.TrimLeft(string(buf[begin:end]), "\r\n") -} - -func (p *toml) unquote(s string) string { - s, err := strconv.Unquote(s) - if err != nil { - p.Error(err) - } - return s -} - -func (p *toml) lookupTable(t *ast.Table, keys []string) (*ast.Table, error) { - for _, s := range keys { - val, exists := t.Fields[s] - if !exists { - tbl := &ast.Table{ - Line: p.line, - Name: s, - Type: ast.TableTypeNormal, - } - if t.Fields == nil { - t.Fields = make(map[string]interface{}) - } - t.Fields[s] = tbl - t = tbl - continue - } - switch v := val.(type) { - case *ast.Table: - t = v - case []*ast.Table: - t = v[len(v)-1] - case *ast.KeyValue: - return nil, fmt.Errorf("key `%s' is in conflict with line %d", s, v.Line) - default: - return nil, fmt.Errorf("BUG: key `%s' is in conflict but it's unknown type `%T'", s, v) - } - } - return t, nil -} - -func splitTableKey(tk string) []string { - key := make([]byte, 0, 1) - keys := make([]string, 0, 1) - inQuote := false - for i := 0; i < len(tk); i++ { - k := tk[i] - switch { - case k == tableSeparator && !inQuote: - keys = append(keys, string(key)) - key = key[:0] // reuse buffer. - case k == '"': - inQuote = !inQuote - case (k == ' ' || k == '\t') && !inQuote: - // skip. - default: - key = append(key, k) - } - } - keys = append(keys, string(key)) - return keys -} - -type convertError struct { - err error -} - -func (e convertError) Error() string { - return e.err.Error() -} - -type array struct { - parent *array - child *array - current *ast.Array - line int -} diff --git a/Godeps/_workspace/src/github.com/naoina/toml/decode_bench_test.go b/Godeps/_workspace/src/github.com/naoina/toml/decode_bench_test.go deleted file mode 100644 index b85c1c680..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/decode_bench_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package toml_test - -import ( - "testing" - "time" - - "github.com/naoina/toml" -) - -func BenchmarkUnmarshal(b *testing.B) { - var v struct { - Title string - Owner struct { - Name string - Organization string - Bio string - Dob time.Time - } - Database struct { - Server string - Ports []int - ConnectionMax int - Enabled bool - } - Servers struct { - Alpha struct { - IP string - DC string - } - Beta struct { - IP string - DC string - } - } - Clients struct { - Data []interface{} - Hosts []string - } - } - data, err := loadTestData() - if err != nil { - b.Fatal(err) - } - for i := 0; i < b.N; i++ { - if err := toml.Unmarshal(data, &v); err != nil { - b.Fatal(err) - } - } -} diff --git a/Godeps/_workspace/src/github.com/naoina/toml/decode_test.go b/Godeps/_workspace/src/github.com/naoina/toml/decode_test.go deleted file mode 100644 index 1fcae9b7a..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/decode_test.go +++ /dev/null @@ -1,1083 +0,0 @@ -package toml_test - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/naoina/toml" -) - -const ( - dataDir = "testdata" -) - -func loadTestData() ([]byte, error) { - f := filepath.Join(dataDir, "test.toml") - data, err := ioutil.ReadFile(f) - if err != nil { - return nil, err - } - return data, nil -} - -func mustTime(tm time.Time, err error) time.Time { - if err != nil { - panic(err) - } - return tm -} - -type Name struct { - First string - Last string -} -type Point struct { - X int - Y int -} -type Inline struct { - Name Name - Point Point -} -type Subtable struct { - Key string -} -type Table struct { - Key string - Subtable Subtable - Inline Inline -} -type W struct { -} -type Z struct { - W W -} -type Y struct { - Z Z -} -type X struct { - Y Y -} -type Basic struct { - Basic string -} -type Continued struct { - Key1 string - Key2 string - Key3 string -} -type Multiline struct { - Key1 string - Key2 string - Key3 string - Continued Continued -} -type LiteralMultiline struct { - Regex2 string - Lines string -} -type Literal struct { - Winpath string - Winpath2 string - Quoted string - Regex string - Multiline LiteralMultiline -} -type String struct { - Basic Basic - Multiline Multiline - Literal Literal -} -type IntegerUnderscores struct { - Key1 int - Key2 int - Key3 int -} -type Integer struct { - Key1 int - Key2 int - Key3 int - Key4 int - Underscores IntegerUnderscores -} -type Fractional struct { - Key1 float64 - Key2 float64 - Key3 float64 -} -type Exponent struct { - Key1 float64 - Key2 float64 - Key3 float64 -} -type Both struct { - Key float64 -} -type FloatUnderscores struct { - Key1 float64 - Key2 float64 -} -type Float struct { - Fractional Fractional - Exponent Exponent - Both Both - Underscores FloatUnderscores -} -type Boolean struct { - True bool - False bool -} -type Datetime struct { - Key1 time.Time - Key2 time.Time - Key3 time.Time -} -type Array struct { - Key1 []int - Key2 []string - Key3 [][]int - Key4 [][]interface{} - Key5 []int - Key6 []int -} -type Product struct { - Name string - Sku int64 - Color string -} -type Physical struct { - Color string - Shape string -} -type Variety struct { - Name string -} -type Fruit struct { - Name string - Physical Physical - Variety []Variety -} -type testStruct struct { - Table Table - X X - String String - Integer Integer - Float Float - Boolean Boolean - Datetime Datetime - Array Array - Products []Product - Fruit []Fruit -} - -func TestUnmarshal(t *testing.T) { - data, err := loadTestData() - if err != nil { - t.Fatal(err) - } - var v testStruct - var actual interface{} = toml.Unmarshal(data, &v) - var expect interface{} = nil - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`toml.Unmarshal(data, &testStruct{}) => %#v; want %#v`, actual, expect) - } - - actual = v - expect = testStruct{ - Table: Table{ - Key: "value", - Subtable: Subtable{ - Key: "another value", - }, - Inline: Inline{ - Name: Name{ - First: "Tom", - Last: "Preston-Werner", - }, - Point: Point{ - X: 1, - Y: 2, - }, - }, - }, - X: X{}, - String: String{ - Basic: Basic{ - Basic: "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF.", - }, - Multiline: Multiline{ - Key1: "One\nTwo", - Key2: "One\nTwo", - Key3: "One\nTwo", - Continued: Continued{ - Key1: "The quick brown fox jumps over the lazy dog.", - Key2: "The quick brown fox jumps over the lazy dog.", - Key3: "The quick brown fox jumps over the lazy dog.", - }, - }, - Literal: Literal{ - Winpath: `C:\Users\nodejs\templates`, - Winpath2: `\\ServerX\admin$\system32\`, - Quoted: `Tom "Dubs" Preston-Werner`, - Regex: `<\i\c*\s*>`, - Multiline: LiteralMultiline{ - Regex2: `I [dw]on't need \d{2} apples`, - Lines: "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n", - }, - }, - }, - Integer: Integer{ - Key1: 99, - Key2: 42, - Key3: 0, - Key4: -17, - Underscores: IntegerUnderscores{ - Key1: 1000, - Key2: 5349221, - Key3: 12345, - }, - }, - Float: Float{ - Fractional: Fractional{ - Key1: 1.0, - Key2: 3.1415, - Key3: -0.01, - }, - Exponent: Exponent{ - Key1: 5e22, - Key2: 1e6, - Key3: -2e-2, - }, - Both: Both{ - Key: 6.626e-34, - }, - Underscores: FloatUnderscores{ - Key1: 9224617.445991228313, - Key2: 1e100, - }, - }, - Boolean: Boolean{ - True: true, - False: false, - }, - Datetime: Datetime{ - Key1: mustTime(time.Parse(time.RFC3339Nano, "1979-05-27T07:32:00Z")), - Key2: mustTime(time.Parse(time.RFC3339Nano, "1979-05-27T00:32:00-07:00")), - Key3: mustTime(time.Parse(time.RFC3339Nano, "1979-05-27T00:32:00.999999-07:00")), - }, - Array: Array{ - Key1: []int{1, 2, 3}, - Key2: []string{"red", "yellow", "green"}, - Key3: [][]int{{1, 2}, {3, 4, 5}}, - Key4: [][]interface{}{{int64(1), int64(2)}, {"a", "b", "c"}}, - Key5: []int{1, 2, 3}, - Key6: []int{1, 2}, - }, - Products: []Product{ - {Name: "Hammer", Sku: 738594937}, - {}, - {Name: "Nail", Sku: 284758393, Color: "gray"}, - }, - Fruit: []Fruit{ - { - Name: "apple", - Physical: Physical{ - Color: "red", - Shape: "round", - }, - Variety: []Variety{ - {Name: "red delicious"}, - {Name: "granny smith"}, - }, - }, - { - Name: "banana", - Variety: []Variety{ - {Name: "plantain"}, - }, - }, - }, - } - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`toml.Unmarshal(data, v); v => %#v; want %#v`, actual, expect) - } -} - -type testcase struct { - data string - err error - actual interface{} - expect interface{} -} - -func testUnmarshal(t *testing.T, testcases []testcase) { - for _, v := range testcases { - var actual error = toml.Unmarshal([]byte(v.data), v.actual) - var expect error = v.err - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`toml.Unmarshal([]byte(%#v), %#v) => %#v; want %#v`, v.data, nil, actual, expect) - } - if !reflect.DeepEqual(v.actual, v.expect) { - t.Errorf(`toml.Unmarshal([]byte(%#v), v); v => %#v; want %#v`, v.data, v.actual, v.expect) - } - } -} - -func TestUnmarshal_WithString(t *testing.T) { - type testStruct struct { - Str string - Key1 string - Key2 string - Key3 string - Winpath string - Winpath2 string - Quoted string - Regex string - Regex2 string - Lines string - } - testUnmarshal(t, []testcase{ - {`str = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF."`, nil, &testStruct{}, &testStruct{ - Str: "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF.", - }}, - {`key1 = "One\nTwo" -key2 = """One\nTwo""" -key3 = """ -One -Two""" -`, nil, &testStruct{}, &testStruct{ - Key1: "One\nTwo", - Key2: "One\nTwo", - Key3: "One\nTwo", - }}, - {`# The following strings are byte-for-byte equivalent: -key1 = "The quick brown fox jumps over the lazy dog." - -key2 = """ -The quick brown \ - - - fox jumps over \ - the lazy dog.""" - -key3 = """\ - The quick brown \ - fox jumps over \ - the lazy dog.\ - """`, nil, &testStruct{}, &testStruct{ - Key1: "The quick brown fox jumps over the lazy dog.", - Key2: "The quick brown fox jumps over the lazy dog.", - Key3: "The quick brown fox jumps over the lazy dog.", - }}, - {`# What you see is what you get. -winpath = 'C:\Users\nodejs\templates' -winpath2 = '\\ServerX\admin$\system32\' -quoted = 'Tom "Dubs" Preston-Werner' -regex = '<\i\c*\s*>'`, nil, &testStruct{}, &testStruct{ - Winpath: `C:\Users\nodejs\templates`, - Winpath2: `\\ServerX\admin$\system32\`, - Quoted: `Tom "Dubs" Preston-Werner`, - Regex: `<\i\c*\s*>`, - }}, - {`regex2 = '''I [dw]on't need \d{2} apples''' -lines = ''' -The first newline is -trimmed in raw strings. - All other whitespace - is preserved. -'''`, nil, &testStruct{}, &testStruct{ - Regex2: `I [dw]on't need \d{2} apples`, - Lines: "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n", - }}, - }) -} - -func TestUnmarshal_WithInteger(t *testing.T) { - type testStruct struct { - Intval int64 - } - testUnmarshal(t, []testcase{ - {`intval = 0`, nil, &testStruct{}, &testStruct{0}}, - {`intval = +0`, nil, &testStruct{}, &testStruct{0}}, - {`intval = -0`, nil, &testStruct{}, &testStruct{-0}}, - {`intval = 1`, nil, &testStruct{}, &testStruct{1}}, - {`intval = +1`, nil, &testStruct{}, &testStruct{1}}, - {`intval = -1`, nil, &testStruct{}, &testStruct{-1}}, - {`intval = 10`, nil, &testStruct{}, &testStruct{10}}, - {`intval = 777`, nil, &testStruct{}, &testStruct{777}}, - {`intval = 2147483647`, nil, &testStruct{}, &testStruct{2147483647}}, - {`intval = 2147483648`, nil, &testStruct{}, &testStruct{2147483648}}, - {`intval = +2147483648`, nil, &testStruct{}, &testStruct{2147483648}}, - {`intval = -2147483648`, nil, &testStruct{}, &testStruct{-2147483648}}, - {`intval = -2147483649`, nil, &testStruct{}, &testStruct{-2147483649}}, - {`intval = 9223372036854775807`, nil, &testStruct{}, &testStruct{9223372036854775807}}, - {`intval = +9223372036854775807`, nil, &testStruct{}, &testStruct{9223372036854775807}}, - {`intval = 9223372036854775808`, fmt.Errorf(`toml: unmarshal: line 1: toml_test.testStruct.Intval: strconv.ParseInt: parsing "9223372036854775808": value out of range`), &testStruct{}, &testStruct{}}, - {`intval = +9223372036854775808`, fmt.Errorf(`toml: unmarshal: line 1: toml_test.testStruct.Intval: strconv.ParseInt: parsing "+9223372036854775808": value out of range`), &testStruct{}, &testStruct{}}, - {`intval = -9223372036854775808`, nil, &testStruct{}, &testStruct{-9223372036854775808}}, - {`intval = -9223372036854775809`, fmt.Errorf(`toml: unmarshal: line 1: toml_test.testStruct.Intval: strconv.ParseInt: parsing "-9223372036854775809": value out of range`), &testStruct{}, &testStruct{}}, - {`intval = 1_000`, nil, &testStruct{}, &testStruct{1000}}, - {`intval = 5_349_221`, nil, &testStruct{}, &testStruct{5349221}}, - {`intval = 1_2_3_4_5`, nil, &testStruct{}, &testStruct{12345}}, - {`intval = _1_000`, fmt.Errorf("toml: line 1: parse error"), &testStruct{}, &testStruct{}}, - {`intval = 1_000_`, fmt.Errorf("toml: line 1: parse error"), &testStruct{}, &testStruct{}}, - }) -} - -func TestUnmarshal_WithFloat(t *testing.T) { - type testStruct struct { - Floatval float64 - } - testUnmarshal(t, []testcase{ - {`floatval = 0.0`, nil, &testStruct{}, &testStruct{0.0}}, - {`floatval = +0.0`, nil, &testStruct{}, &testStruct{0.0}}, - {`floatval = -0.0`, nil, &testStruct{}, &testStruct{-0.0}}, - {`floatval = 0.1`, nil, &testStruct{}, &testStruct{0.1}}, - {`floatval = +0.1`, nil, &testStruct{}, &testStruct{0.1}}, - {`floatval = -0.1`, nil, &testStruct{}, &testStruct{-0.1}}, - {`floatval = 0.2`, nil, &testStruct{}, &testStruct{0.2}}, - {`floatval = +0.2`, nil, &testStruct{}, &testStruct{0.2}}, - {`floatval = -0.2`, nil, &testStruct{}, &testStruct{-0.2}}, - {`floatval = 1.0`, nil, &testStruct{}, &testStruct{1.0}}, - {`floatval = +1.0`, nil, &testStruct{}, &testStruct{1.0}}, - {`floatval = -1.0`, nil, &testStruct{}, &testStruct{-1.0}}, - {`floatval = 1.1`, nil, &testStruct{}, &testStruct{1.1}}, - {`floatval = +1.1`, nil, &testStruct{}, &testStruct{1.1}}, - {`floatval = -1.1`, nil, &testStruct{}, &testStruct{-1.1}}, - {`floatval = 3.1415`, nil, &testStruct{}, &testStruct{3.1415}}, - {`floatval = +3.1415`, nil, &testStruct{}, &testStruct{3.1415}}, - {`floatval = -3.1415`, nil, &testStruct{}, &testStruct{-3.1415}}, - {`floatval = 10.2e5`, nil, &testStruct{}, &testStruct{10.2e5}}, - {`floatval = +10.2e5`, nil, &testStruct{}, &testStruct{10.2e5}}, - {`floatval = -10.2e5`, nil, &testStruct{}, &testStruct{-10.2e5}}, - {`floatval = 10.2E5`, nil, &testStruct{}, &testStruct{10.2e5}}, - {`floatval = +10.2E5`, nil, &testStruct{}, &testStruct{10.2e5}}, - {`floatval = -10.2E5`, nil, &testStruct{}, &testStruct{-10.2e5}}, - {`floatval = 5e+22`, nil, &testStruct{}, &testStruct{5e+22}}, - {`floatval = 1e6`, nil, &testStruct{}, &testStruct{1e6}}, - {`floatval = -2E-2`, nil, &testStruct{}, &testStruct{-2E-2}}, - {`floatval = 6.626e-34`, nil, &testStruct{}, &testStruct{6.626e-34}}, - {`floatval = 9_224_617.445_991_228_313`, nil, &testStruct{}, &testStruct{9224617.445991228313}}, - {`floatval = 1e1_00`, nil, &testStruct{}, &testStruct{1e100}}, - {`floatval = 1e02`, nil, &testStruct{}, &testStruct{1e2}}, - {`floatval = _1e1_00`, fmt.Errorf("toml: line 1: parse error"), &testStruct{}, &testStruct{}}, - {`floatval = 1e1_00_`, fmt.Errorf("toml: line 1: parse error"), &testStruct{}, &testStruct{}}, - }) -} - -func TestUnmarshal_WithBoolean(t *testing.T) { - type testStruct struct { - Boolval bool - } - testUnmarshal(t, []testcase{ - {`boolval = true`, nil, &testStruct{}, &testStruct{true}}, - {`boolval = false`, nil, &testStruct{}, &testStruct{false}}, - }) -} - -func TestUnmarshal_WithDatetime(t *testing.T) { - type testStruct struct { - Datetimeval time.Time - } - testUnmarshal(t, []testcase{ - {`datetimeval = 1979-05-27T07:32:00Z`, nil, &testStruct{}, &testStruct{ - mustTime(time.Parse(time.RFC3339Nano, "1979-05-27T07:32:00Z")), - }}, - {`datetimeval = 2014-09-13T12:37:39Z`, nil, &testStruct{}, &testStruct{ - mustTime(time.Parse(time.RFC3339Nano, "2014-09-13T12:37:39Z")), - }}, - {`datetimeval = 1979-05-27T00:32:00-07:00`, nil, &testStruct{}, &testStruct{ - mustTime(time.Parse(time.RFC3339Nano, "1979-05-27T00:32:00-07:00")), - }}, - {`datetimeval = 1979-05-27T00:32:00.999999-07:00`, nil, &testStruct{}, &testStruct{ - mustTime(time.Parse(time.RFC3339Nano, "1979-05-27T00:32:00.999999-07:00")), - }}, - }) -} - -func TestUnmarshal_WithArray(t *testing.T) { - testUnmarshal(t, []testcase{ - {`arrayval = []`, nil, &struct{ Arrayval []interface{} }{}, &struct{ Arrayval []interface{} }{}}, - {`arrayval = [ 1 ]`, nil, &struct{ Arrayval []int }{}, - &struct { - Arrayval []int - }{ - []int{1}, - }}, - {`arrayval = [ 1, 2, 3 ]`, nil, &struct{ Arrayval []int }{}, - &struct { - Arrayval []int - }{ - []int{1, 2, 3}, - }}, - {`arrayval = [ 1, 2, 3, ]`, nil, &struct{ Arrayval []int }{}, - &struct { - Arrayval []int - }{ - []int{1, 2, 3}, - }}, - {`arrayval = ["red", "yellow", "green"]`, nil, &struct{ Arrayval []string }{}, - &struct{ Arrayval []string }{ - []string{"red", "yellow", "green"}, - }}, - {`arrayval = [ "all", 'strings', """are the same""", '''type''']`, nil, &struct{ Arrayval []string }{}, - &struct{ Arrayval []string }{ - []string{"all", "strings", "are the same", "type"}, - }}, - {`arrayval = [[1,2],[3,4,5]]`, nil, &struct{ Arrayval [][]int }{}, - &struct{ Arrayval [][]int }{ - [][]int{ - []int{1, 2}, - []int{3, 4, 5}, - }, - }}, - {`arrayval = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok`, nil, &struct{ Arrayval [][]interface{} }{}, - &struct{ Arrayval [][]interface{} }{ - [][]interface{}{ - []interface{}{int64(1), int64(2)}, - []interface{}{"a", "b", "c"}, - }, - }}, - {`arrayval = [ [ 1, 2 ], [ [3, 4], [5, 6] ] ] # this is ok`, nil, &struct{ Arrayval [][]interface{} }{}, - &struct{ Arrayval [][]interface{} }{ - [][]interface{}{ - []interface{}{int64(1), int64(2)}, - []interface{}{ - []interface{}{int64(3), int64(4)}, - []interface{}{int64(5), int64(6)}, - }, - }, - }}, - {`arrayval = [ [ 1, 2 ], [ [3, 4], [5, 6], [7, 8] ] ] # this is ok`, nil, &struct{ Arrayval [][]interface{} }{}, - &struct{ Arrayval [][]interface{} }{ - [][]interface{}{ - []interface{}{int64(1), int64(2)}, - []interface{}{ - []interface{}{int64(3), int64(4)}, - []interface{}{int64(5), int64(6)}, - []interface{}{int64(7), int64(8)}, - }, - }, - }}, - {`arrayval = [ [[ 1, 2 ]], [3, 4], [5, 6] ] # this is ok`, nil, &struct{ Arrayval [][]interface{} }{}, - &struct{ Arrayval [][]interface{} }{ - [][]interface{}{ - []interface{}{ - []interface{}{int64(1), int64(2)}, - }, - []interface{}{int64(3), int64(4)}, - []interface{}{int64(5), int64(6)}, - }, - }}, - {`arrayval = [ 1, 2.0 ] # note: this is NOT ok`, fmt.Errorf("toml: unmarshal: line 1: struct { Arrayval []interface {} }.Arrayval: array cannot contain multiple types"), &struct{ Arrayval []interface{} }{}, &struct{ Arrayval []interface{} }{}}, - {`key = [ - 1, 2, 3 -]`, nil, &struct{ Key []int }{}, - &struct{ Key []int }{ - []int{1, 2, 3}, - }}, - {`key = [ - 1, - 2, # this is ok -]`, nil, &struct{ Key []int }{}, - &struct{ Key []int }{ - []int{1, 2}, - }}, - }) -} - -func TestUnmarshal_WithTable(t *testing.T) { - type W struct{} - type Z struct { - W W - } - type Y struct { - Z Z - } - type X struct { - Y Y - } - type testStruct struct { - Table struct { - Key string - } - Dog struct { - Tater struct{} - } - X X - A struct { - D int - B struct { - C int - } - } - } - type testQuotedKeyStruct struct { - Dog struct { - TaterMan struct { - Type string - } `toml:"tater.man"` - } - } - type testQuotedKeyWithWhitespaceStruct struct { - Dog struct { - TaterMan struct { - Type string - } `toml:"tater . man"` - } - } - type testStructWithMap struct { - Servers map[string]struct { - IP string - DC string - } - } - testUnmarshal(t, []testcase{ - {`[table]`, nil, &testStruct{}, &testStruct{}}, - {`[table] -key = "value"`, nil, &testStruct{}, - &testStruct{ - Table: struct { - Key string - }{ - Key: "value", - }, - }}, - {`[dog.tater]`, nil, &testStruct{}, - &testStruct{ - Dog: struct { - Tater struct{} - }{ - Tater: struct{}{}, - }, - }}, - {`[dog."tater.man"] -type = "pug"`, nil, &testQuotedKeyStruct{}, - &testQuotedKeyStruct{ - Dog: struct { - TaterMan struct { - Type string - } `toml:"tater.man"` - }{ - TaterMan: struct { - Type string - }{ - Type: "pug", - }, - }, - }}, - {`[dog."tater . man"] -type = "pug"`, nil, &testQuotedKeyWithWhitespaceStruct{}, - &testQuotedKeyWithWhitespaceStruct{ - Dog: struct { - TaterMan struct { - Type string - } `toml:"tater . man"` - }{ - TaterMan: struct { - Type string - }{ - Type: "pug", - }, - }, - }}, - {`[x.y.z.w] # for this to work`, nil, &testStruct{}, - &testStruct{ - X: X{}, - }}, - {`[ x . y . z . w ]`, nil, &testStruct{}, - &testStruct{ - X: X{}, - }}, - {`[ x . "y" . z . "w" ]`, nil, &testStruct{}, - &testStruct{ - X: X{}, - }}, - {`table = {}`, nil, &testStruct{}, &testStruct{}}, - {`table = { key = "value" }`, nil, &testStruct{}, &testStruct{ - Table: struct { - Key string - }{ - Key: "value", - }, - }}, - {`x = { y = { "z" = { w = {} } } }`, nil, &testStruct{}, &testStruct{X: X{}}}, - {`[a.b] -c = 1 - -[a] -d = 2`, nil, &testStruct{}, - &testStruct{ - A: struct { - D int - B struct { - C int - } - }{ - D: 2, - B: struct { - C int - }{ - C: 1, - }, - }, - }}, - {`# DO NOT DO THIS - -[a] -b = 1 - -[a] -c = 2`, fmt.Errorf("toml: line 6: table `a' is in conflict with normal table in line 3"), &testStruct{}, &testStruct{}}, - {`# DO NOT DO THIS EITHER - -[a] -b = 1 - -[a.b] -c = 2`, fmt.Errorf("toml: line 6: key `b' is in conflict with line 4"), &testStruct{}, &testStruct{}}, - {`# DO NOT DO THIS EITHER - -[a.b] -c = 2 - -[a] -b = 1`, fmt.Errorf("toml: line 7: key `b' is in conflict with normal table in line 3"), &testStruct{}, &testStruct{}}, - {`[]`, fmt.Errorf("toml: line 1: parse error"), &testStruct{}, &testStruct{}}, - {`[a.]`, fmt.Errorf("toml: line 1: parse error"), &testStruct{}, &testStruct{}}, - {`[a..b]`, fmt.Errorf("toml: line 1: parse error"), &testStruct{}, &testStruct{}}, - {`[.b]`, fmt.Errorf("toml: line 1: parse error"), &testStruct{}, &testStruct{}}, - {`[.]`, fmt.Errorf("toml: line 1: parse error"), &testStruct{}, &testStruct{}}, - {` = "no key name" # not allowed`, fmt.Errorf("toml: line 1: parse error"), &testStruct{}, &testStruct{}}, - {`[servers] -[servers.alpha] -ip = "10.0.0.1" -dc = "eqdc10" -[servers.beta] -ip = "10.0.0.2" -dc = "eqdc10" -`, nil, &testStructWithMap{}, - &testStructWithMap{ - Servers: map[string]struct { - IP string - DC string - }{ - "alpha": { - IP: "10.0.0.1", - DC: "eqdc10", - }, - "beta": { - IP: "10.0.0.2", - DC: "eqdc10", - }, - }, - }}, - }) -} - -func TestUnmarshal_WithArrayTable(t *testing.T) { - type Product struct { - Name string - SKU int64 - Color string - } - type Physical struct { - Color string - Shape string - } - type Variety struct { - Name string - } - type Fruit struct { - Name string - Physical Physical - Variety []Variety - } - type testStruct struct { - Products []Product - Fruit []Fruit - } - type testStructWithMap struct { - Fruit []map[string][]struct { - Name string - } - } - testUnmarshal(t, []testcase{ - {`[[products]] - name = "Hammer" - sku = 738594937 - - [[products]] - - [[products]] - name = "Nail" - sku = 284758393 - color = "gray"`, nil, &testStruct{}, - &testStruct{ - Products: []Product{ - {Name: "Hammer", SKU: 738594937}, - {}, - {Name: "Nail", SKU: 284758393, Color: "gray"}, - }, - }}, - {`products = [{name = "Hammer", sku = 738594937}, {}, -{name = "Nail", sku = 284758393, color = "gray"}]`, nil, &testStruct{}, &testStruct{ - Products: []Product{ - {Name: "Hammer", SKU: 738594937}, - {}, - {Name: "Nail", SKU: 284758393, Color: "gray"}, - }, - }}, - {`[[fruit]] - name = "apple" - - [fruit.physical] - color = "red" - shape = "round" - - [[fruit.variety]] - name = "red delicious" - - [[fruit.variety]] - name = "granny smith" - - [[fruit]] - name = "banana" - - [fruit.physical] - color = "yellow" - shape = "lune" - - [[fruit.variety]] - name = "plantain"`, nil, &testStruct{}, - &testStruct{ - Fruit: []Fruit{ - { - Name: "apple", - Physical: Physical{ - Color: "red", - Shape: "round", - }, - Variety: []Variety{ - {Name: "red delicious"}, - {Name: "granny smith"}, - }, - }, - { - Name: "banana", - Physical: Physical{ - Color: "yellow", - Shape: "lune", - }, - Variety: []Variety{ - {Name: "plantain"}, - }, - }, - }, - }}, - {`[[fruit]] - - [[fruit.variety]] - name = "red delicious" - - [[fruit.variety]] - name = "granny smith" - - [[fruit]] - - [[fruit.variety]] - name = "plantain" - - [[fruit.area]] - name = "phillippines"`, nil, &testStructWithMap{}, - &testStructWithMap{ - Fruit: []map[string][]struct { - Name string - }{ - { - "variety": { - {Name: "red delicious"}, - {Name: "granny smith"}, - }, - }, - { - "variety": { - {Name: "plantain"}, - }, - "area": { - {Name: "phillippines"}, - }, - }, - }, - }}, - {`# INVALID TOML DOC - [[fruit]] - name = "apple" - - [[fruit.variety]] - name = "red delicious" - - # This table conflicts with the previous table - [fruit.variety] - name = "granny smith"`, fmt.Errorf("toml: line 9: table `fruit.variety' is in conflict with array table in line 5"), &testStruct{}, &testStruct{}}, - {`# INVALID TOML DOC - [[fruit]] - name = "apple" - - [fruit.variety] - name = "granny smith" - - # This table conflicts with the previous table - [[fruit.variety]] - name = "red delicious"`, fmt.Errorf("toml: line 9: table `fruit.variety' is in conflict with normal table in line 5"), &testStruct{}, &testStruct{}}, - }) -} - -type UnmarshalString string - -func (u *UnmarshalString) UnmarshalTOML(data []byte) error { - *u = UnmarshalString("UnmarshalString: " + string(data)) - return nil -} - -func TestUnmarshal_WithUnmarshaler(t *testing.T) { - type testStruct struct { - Title UnmarshalString - MaxConn UnmarshalString - Ports UnmarshalString - Servers UnmarshalString - Table UnmarshalString - Arraytable UnmarshalString - } - data := `title = "testtitle" -max_conn = 777 -ports = [8080, 8081, 8082] -servers = [1, 2, 3] -[table] -name = "alice" -[[arraytable]] -name = "alice" -[[arraytable]] -name = "bob" -` - var v testStruct - if err := toml.Unmarshal([]byte(data), &v); err != nil { - t.Fatal(err) - } - actual := v - expect := testStruct{ - Title: `UnmarshalString: "testtitle"`, - MaxConn: `UnmarshalString: 777`, - Ports: `UnmarshalString: [8080, 8081, 8082]`, - Servers: `UnmarshalString: [1, 2, 3]`, - Table: "UnmarshalString: [table]\nname = \"alice\"", - Arraytable: "UnmarshalString: [[arraytable]]\nname = \"alice\"\n[[arraytable]]\nname = \"bob\"", - } - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`toml.Unmarshal(data, &v); v => %#v; want %#v`, actual, expect) - } -} - -func TestUnmarshal_WithMultibyteString(t *testing.T) { - type testStruct struct { - Name string - Numbers []string - } - v := testStruct{} - data := `name = "七一〇七" -numbers = ["壱", "弐", "参"] -` - if err := toml.Unmarshal([]byte(data), &v); err != nil { - t.Fatal(err) - } - actual := v - expect := testStruct{ - Name: "七一〇七", - Numbers: []string{"壱", "弐", "参"}, - } - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`toml.Unmarshal([]byte(data), &v); v => %#v; want %#v`, actual, expect) - } -} - -func TestUnmarshal_WithPointers(t *testing.T) { - type Inline struct { - Key1 string - Key2 *string - Key3 **string - } - type Table struct { - Key1 *string - Key2 **string - Key3 ***string - } - type testStruct struct { - Inline *Inline - Tables []*Table - } - type testStruct2 struct { - Inline **Inline - Tables []**Table - } - type testStruct3 struct { - Inline ***Inline - Tables []***Table - } - data := ` -inline = { key1 = "test", key2 = "a", key3 = "b" } -[[tables]] -key1 = "a" -key2 = "a" -key3 = "a" -[[tables]] -key1 = "b" -key2 = "b" -key3 = "b" -` - s1 := "a" - s2 := &s1 - s3 := &s2 - s4 := &s3 - s5 := "b" - s6 := &s5 - s7 := &s6 - s8 := &s7 - i1 := &Inline{"test", s2, s7} - i2 := &i1 - i3 := &i2 - t1 := &Table{s2, s3, s4} - t2 := &Table{s6, s7, s8} - t3 := &t1 - t4 := &t2 - sc := &testStruct{ - Inline: i1, Tables: []*Table{t1, t2}, - } - ac := &testStruct{} - testUnmarshal(t, []testcase{ - {data, nil, ac, sc}, - {data, nil, &testStruct2{}, &testStruct2{ - Inline: i2, - Tables: []**Table{&t1, &t2}, - }}, - {data, nil, &testStruct3{}, &testStruct3{ - Inline: i3, - Tables: []***Table{&t3, &t4}, - }}, - }) -} - -func TestUnmarshalMap(t *testing.T) { - testUnmarshal(t, []testcase{ - {` -name = "evan" -foo = 1 -`, nil, map[string]interface{}{}, map[string]interface{}{ - "name": "evan", - "foo": int64(1), - }}, - }) -} diff --git a/Godeps/_workspace/src/github.com/naoina/toml/encode.go b/Godeps/_workspace/src/github.com/naoina/toml/encode.go deleted file mode 100644 index e50d59414..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/encode.go +++ /dev/null @@ -1,211 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "strconv" - "time" - - "go/ast" - - "github.com/naoina/go-stringutil" -) - -const ( - tagOmitempty = "omitempty" - tagSkip = "-" -) - -// Marshal returns the TOML encoding of v. -// -// Struct values encode as TOML. Each exported struct field becomes a field of -// the TOML structure unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The "toml" key in the struct field's tag value is the key name, followed by -// an optional comma and options. Examples: -// -// // Field is ignored by this package. -// Field int `toml:"-"` -// -// // Field appears in TOML as key "myName". -// Field int `toml:"myName"` -// -// // Field appears in TOML as key "myName" and the field is omitted from the -// // result of encoding if its value is empty. -// Field int `toml:"myName,omitempty"` -// -// // Field appears in TOML as key "field", but the field is skipped if -// // empty. -// // Note the leading comma. -// Field int `toml:",omitempty"` -func Marshal(v interface{}) ([]byte, error) { - return marshal(nil, "", reflect.ValueOf(v), false, false) -} - -// Marshaler is the interface implemented by objects that can marshal themshelves into valid TOML. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -func marshal(buf []byte, prefix string, rv reflect.Value, inArray, arrayTable bool) ([]byte, error) { - rt := rv.Type() - for i := 0; i < rv.NumField(); i++ { - ft := rt.Field(i) - if !ast.IsExported(ft.Name) { - continue - } - colName, rest := extractTag(rt.Field(i).Tag.Get(fieldTagName)) - if colName == tagSkip { - continue - } - if colName == "" { - colName = stringutil.ToSnakeCase(ft.Name) - } - fv := rv.Field(i) - switch rest { - case tagOmitempty: - if fv.Interface() == reflect.Zero(ft.Type).Interface() { - continue - } - } - var err error - if buf, err = encodeValue(buf, prefix, colName, fv, inArray, arrayTable); err != nil { - return nil, err - } - } - return buf, nil -} - -func encodeValue(buf []byte, prefix, name string, fv reflect.Value, inArray, arrayTable bool) ([]byte, error) { - switch t := fv.Interface().(type) { - case Marshaler: - b, err := t.MarshalTOML() - if err != nil { - return nil, err - } - return appendNewline(append(appendKey(buf, name, inArray, arrayTable), b...), inArray, arrayTable), nil - case time.Time: - return appendNewline(encodeTime(appendKey(buf, name, inArray, arrayTable), t), inArray, arrayTable), nil - } - switch fv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return appendNewline(encodeInt(appendKey(buf, name, inArray, arrayTable), fv.Int()), inArray, arrayTable), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return appendNewline(encodeUint(appendKey(buf, name, inArray, arrayTable), fv.Uint()), inArray, arrayTable), nil - case reflect.Float32, reflect.Float64: - return appendNewline(encodeFloat(appendKey(buf, name, inArray, arrayTable), fv.Float()), inArray, arrayTable), nil - case reflect.Bool: - return appendNewline(encodeBool(appendKey(buf, name, inArray, arrayTable), fv.Bool()), inArray, arrayTable), nil - case reflect.String: - return appendNewline(encodeString(appendKey(buf, name, inArray, arrayTable), fv.String()), inArray, arrayTable), nil - case reflect.Slice, reflect.Array: - ft := fv.Type().Elem() - for ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - if ft.Kind() == reflect.Struct { - name := tableName(prefix, name) - var err error - for i := 0; i < fv.Len(); i++ { - if buf, err = marshal(append(append(append(buf, '[', '['), name...), ']', ']', '\n'), name, fv.Index(i), false, true); err != nil { - return nil, err - } - } - return buf, nil - } - buf = append(appendKey(buf, name, inArray, arrayTable), '[') - var err error - for i := 0; i < fv.Len(); i++ { - if i != 0 { - buf = append(buf, ',') - } - if buf, err = encodeValue(buf, prefix, name, fv.Index(i), true, false); err != nil { - return nil, err - } - } - return appendNewline(append(buf, ']'), inArray, arrayTable), nil - case reflect.Struct: - name := tableName(prefix, name) - return marshal(append(append(append(buf, '['), name...), ']', '\n'), name, fv, inArray, arrayTable) - case reflect.Interface: - var err error - if buf, err = encodeInterface(appendKey(buf, name, inArray, arrayTable), fv.Interface()); err != nil { - return nil, err - } - return appendNewline(buf, inArray, arrayTable), nil - } - return nil, fmt.Errorf("toml: marshal: unsupported type %v", fv.Kind()) -} - -func appendKey(buf []byte, key string, inArray, arrayTable bool) []byte { - if !inArray { - return append(append(buf, key...), '=') - } - return buf -} - -func appendNewline(buf []byte, inArray, arrayTable bool) []byte { - if !inArray { - return append(buf, '\n') - } - return buf -} - -func encodeInterface(buf []byte, v interface{}) ([]byte, error) { - switch v := v.(type) { - case int: - return encodeInt(buf, int64(v)), nil - case int8: - return encodeInt(buf, int64(v)), nil - case int16: - return encodeInt(buf, int64(v)), nil - case int32: - return encodeInt(buf, int64(v)), nil - case int64: - return encodeInt(buf, v), nil - case uint: - return encodeUint(buf, uint64(v)), nil - case uint8: - return encodeUint(buf, uint64(v)), nil - case uint16: - return encodeUint(buf, uint64(v)), nil - case uint32: - return encodeUint(buf, uint64(v)), nil - case uint64: - return encodeUint(buf, v), nil - case float32: - return encodeFloat(buf, float64(v)), nil - case float64: - return encodeFloat(buf, v), nil - case bool: - return encodeBool(buf, v), nil - case string: - return encodeString(buf, v), nil - } - return nil, fmt.Errorf("toml: marshal: unable to detect a type of value `%v'", v) -} - -func encodeInt(buf []byte, i int64) []byte { - return strconv.AppendInt(buf, i, 10) -} - -func encodeUint(buf []byte, u uint64) []byte { - return strconv.AppendUint(buf, u, 10) -} - -func encodeFloat(buf []byte, f float64) []byte { - return strconv.AppendFloat(buf, f, 'e', -1, 64) -} - -func encodeBool(buf []byte, b bool) []byte { - return strconv.AppendBool(buf, b) -} - -func encodeString(buf []byte, s string) []byte { - return strconv.AppendQuote(buf, s) -} - -func encodeTime(buf []byte, t time.Time) []byte { - return append(buf, t.Format(time.RFC3339Nano)...) -} diff --git a/Godeps/_workspace/src/github.com/naoina/toml/encode_test.go b/Godeps/_workspace/src/github.com/naoina/toml/encode_test.go deleted file mode 100644 index 17e04fd0a..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/encode_test.go +++ /dev/null @@ -1,298 +0,0 @@ -package toml_test - -import ( - "reflect" - "testing" - "time" - - "github.com/naoina/toml" -) - -func TestMarshal(t *testing.T) { - for _, v := range []struct { - v interface{} - expect string - }{ - {struct{ Name string }{"alice"}, "name=\"alice\"\n"}, - {struct{ Age int }{7}, "age=7\n"}, - {struct { - Name string - Age int - }{"alice", 7}, "name=\"alice\"\nage=7\n"}, - {struct { - Name string `toml:"-"` - Age int - }{"alice", 7}, "age=7\n"}, - {struct { - Name string `toml:"my_name"` - }{"bob"}, "my_name=\"bob\"\n"}, - {struct { - Name string `toml:"my_name,omitempty"` - }{"bob"}, "my_name=\"bob\"\n"}, - {struct { - Name string `toml:",omitempty"` - }{"bob"}, "name=\"bob\"\n"}, - {struct { - Name string `toml:",omitempty"` - }{""}, ""}, - } { - b, err := toml.Marshal(v.v) - var actual interface{} = err - var expect interface{} = nil - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`Marshal(%#v) => %#v; want %#v`, v.v, actual, expect) - } - - actual = string(b) - expect = v.expect - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`Marshal(%#v); v => %#v; want %#v`, v, actual, expect) - } - } -} - -func TestMarshalWhole(t *testing.T) { - for _, v := range []struct { - v interface{} - expect string - }{ - { - testStruct{ - Table: Table{ - Key: "value", - Subtable: Subtable{ - Key: "another value", - }, - Inline: Inline{ - Name: Name{ - First: "Tom", - Last: "Preston-Werner", - }, - Point: Point{ - X: 1, - Y: 2, - }, - }, - }, - X: X{}, - String: String{ - Basic: Basic{ - Basic: "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF.", - }, - Multiline: Multiline{ - Key1: "One\nTwo", - Continued: Continued{ - Key1: "The quick brown fox jumps over the lazy dog.", - }, - }, - Literal: Literal{ - Winpath: `C:\Users\nodejs\templates`, - Winpath2: `\\ServerX\admin$\system32\`, - Quoted: `Tom "Dubs" Preston-Werner`, - Regex: `<\i\c*\s*>`, - Multiline: LiteralMultiline{ - Regex2: `I [dw]on't need \d{2} apples`, - Lines: "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n", - }, - }, - }, - Integer: Integer{ - Key1: 99, - Key2: 42, - Key3: 0, - Key4: -17, - Underscores: IntegerUnderscores{ - Key1: 1000, - Key2: 5349221, - Key3: 12345, - }, - }, - Float: Float{ - Fractional: Fractional{ - Key1: 1.0, - Key2: 3.1415, - Key3: -0.01, - }, - Exponent: Exponent{ - Key1: 5e22, - Key2: 1e6, - Key3: -2e-2, - }, - Both: Both{ - Key: 6.626e-34, - }, - Underscores: FloatUnderscores{ - Key1: 9224617.445991228313, - Key2: 1e100, - }, - }, - Boolean: Boolean{ - True: true, - False: false, - }, - Datetime: Datetime{ - Key1: mustTime(time.Parse(time.RFC3339Nano, "1979-05-27T07:32:00Z")), - Key2: mustTime(time.Parse(time.RFC3339Nano, "1979-05-27T00:32:00-07:00")), - Key3: mustTime(time.Parse(time.RFC3339Nano, "1979-05-27T00:32:00.999999-07:00")), - }, - Array: Array{ - Key1: []int{1, 2, 3}, - Key2: []string{"red", "yellow", "green"}, - Key3: [][]int{{1, 2}, {3, 4, 5}}, - Key4: [][]interface{}{{int64(1), int64(2)}, {"a", "b", "c"}}, - Key5: []int{1, 2, 3}, - Key6: []int{1, 2}, - }, - Products: []Product{ - {Name: "Hammer", Sku: 738594937}, - {}, - {Name: "Nail", Sku: 284758393, Color: "gray"}, - }, - Fruit: []Fruit{ - { - Name: "apple", - Physical: Physical{ - Color: "red", - Shape: "round", - }, - Variety: []Variety{ - {Name: "red delicious"}, - {Name: "granny smith"}, - }, - }, - { - Name: "banana", - Variety: []Variety{ - {Name: "plantain"}, - }, - }, - }, - }, - `[table] -key="value" -[table.subtable] -key="another value" -[table.inline] -[table.inline.name] -first="Tom" -last="Preston-Werner" -[table.inline.point] -x=1 -y=2 -[x] -[x.y] -[x.y.z] -[x.y.z.w] -[string] -[string.basic] -basic="I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF." -[string.multiline] -key1="One\nTwo" -key2="" -key3="" -[string.multiline.continued] -key1="The quick brown fox jumps over the lazy dog." -key2="" -key3="" -[string.literal] -winpath="C:\\Users\\nodejs\\templates" -winpath2="\\\\ServerX\\admin$\\system32\\" -quoted="Tom \"Dubs\" Preston-Werner" -regex="<\\i\\c*\\s*>" -[string.literal.multiline] -regex2="I [dw]on't need \\d{2} apples" -lines="The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n" -[integer] -key1=99 -key2=42 -key3=0 -key4=-17 -[integer.underscores] -key1=1000 -key2=5349221 -key3=12345 -[float] -[float.fractional] -key1=1e+00 -key2=3.1415e+00 -key3=-1e-02 -[float.exponent] -key1=5e+22 -key2=1e+06 -key3=-2e-02 -[float.both] -key=6.626e-34 -[float.underscores] -key1=9.224617445991227e+06 -key2=1e+100 -[boolean] -true=true -false=false -[datetime] -key1=1979-05-27T07:32:00Z -key2=1979-05-27T00:32:00-07:00 -key3=1979-05-27T00:32:00.999999-07:00 -[array] -key1=[1,2,3] -key2=["red","yellow","green"] -key3=[[1,2],[3,4,5]] -key4=[[1,2],["a","b","c"]] -key5=[1,2,3] -key6=[1,2] -[[products]] -name="Hammer" -sku=738594937 -color="" -[[products]] -name="" -sku=0 -color="" -[[products]] -name="Nail" -sku=284758393 -color="gray" -[[fruit]] -name="apple" -[fruit.physical] -color="red" -shape="round" -[[fruit.variety]] -name="red delicious" -[[fruit.variety]] -name="granny smith" -[[fruit]] -name="banana" -[fruit.physical] -color="" -shape="" -[[fruit.variety]] -name="plantain" -`, - }, - } { - b, err := toml.Marshal(v.v) - var actual interface{} = err - var expect interface{} = nil - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`Marshal(%#v) => %#v; want %#v`, v.v, actual, expect) - } - actual = string(b) - expect = v.expect - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`Marshal(%#v); v => %#v; want %#v`, v.v, actual, expect) - } - - // test for reversible. - dest := testStruct{} - actual = toml.Unmarshal(b, &dest) - expect = nil - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`Unmarshal after Marshal => %#v; want %#v`, actual, expect) - } - actual = dest - expect = v.v - if !reflect.DeepEqual(actual, expect) { - t.Errorf(`Unmarshal after Marshal => %#v; want %#v`, v, actual, expect) - } - } -} diff --git a/Godeps/_workspace/src/github.com/naoina/toml/error.go b/Godeps/_workspace/src/github.com/naoina/toml/error.go deleted file mode 100644 index 0261b3c2b..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/error.go +++ /dev/null @@ -1,31 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" -) - -func (e *parseError) Line() int { - tokens := e.p.tokenTree.Error() - positions := make([]int, len(tokens)*2) - p := 0 - for _, token := range tokens { - positions[p], p = int(token.begin), p+1 - positions[p], p = int(token.end), p+1 - } - for _, t := range translatePositions(e.p.Buffer, positions) { - if e.p.line < t.line { - e.p.line = t.line - } - } - return e.p.line -} - -type errorOutOfRange struct { - kind reflect.Kind - v interface{} -} - -func (err *errorOutOfRange) Error() string { - return fmt.Sprintf("value %d is out of range for `%v` type", err.v, err.kind) -} diff --git a/Godeps/_workspace/src/github.com/naoina/toml/parse.go b/Godeps/_workspace/src/github.com/naoina/toml/parse.go deleted file mode 100644 index e01866625..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/parse.go +++ /dev/null @@ -1,54 +0,0 @@ -package toml - -import ( - "fmt" - - "github.com/naoina/toml/ast" -) - -// Parse returns an AST representation of TOML. -// The toplevel is represented by a table. -func Parse(data []byte) (*ast.Table, error) { - d := &parseState{p: &tomlParser{Buffer: string(data)}} - d.init() - - if err := d.parse(); err != nil { - return nil, err - } - - return d.p.toml.table, nil -} - -type parseState struct { - p *tomlParser -} - -func (d *parseState) init() { - d.p.Init() - d.p.toml.init() -} - -func (d *parseState) parse() error { - if err := d.p.Parse(); err != nil { - if err, ok := err.(*parseError); ok { - return fmt.Errorf("toml: line %d: parse error", err.Line()) - } - return err - } - return d.execute() -} - -func (d *parseState) execute() (err error) { - defer func() { - e := recover() - if e != nil { - cerr, ok := e.(convertError) - if !ok { - panic(e) - } - err = cerr.err - } - }() - d.p.Execute() - return nil -} diff --git a/Godeps/_workspace/src/github.com/naoina/toml/parse.peg b/Godeps/_workspace/src/github.com/naoina/toml/parse.peg deleted file mode 100644 index 0380a201d..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/parse.peg +++ /dev/null @@ -1,138 +0,0 @@ -package toml - -type tomlParser Peg { - toml -} - -TOML <- Expression (newline Expression)* newline? !. { _ = buffer } - -Expression <- ( - { p.SetTableString(begin, end) } - / ws keyval ws comment? - / ws comment? - / ws -) - -newline <- <[\r\n]+> { p.AddLineCount(end - begin) } - -ws <- [ \t]* -wsnl <- ( - [ \t] - / <[\r\n]> { p.AddLineCount(end - begin) } -)* - -comment <- '#' <[\t -\0x10FFFF]*> - -keyval <- key ws '=' ws val { p.AddKeyValue() } - -key <- bareKey / quotedKey - -bareKey <- <[0-9A-Za-z\-_]+> { p.SetKey(p.buffer, begin, end) } - -quotedKey <- '"' '"' { p.SetKey(p.buffer, begin, end) } - -val <- ( - { p.SetTime(begin, end) } - / { p.SetFloat64(begin, end) } - / { p.SetInt64(begin, end) } - / { p.SetString(begin, end) } - / { p.SetBool(begin, end) } - / { p.SetArray(begin, end) } - / inlineTable -) - -table <- stdTable / arrayTable - -stdTable <- '[' ws ws ']' { p.SetTable(p.buffer, begin, end) } - -arrayTable <- '[[' ws ws ']]' { p.SetArrayTable(p.buffer, begin, end) } - -inlineTable <- ( - '{' { p.StartInlineTable() } - ws inlineTableKeyValues ws - '}' { p.EndInlineTable() } -) - -inlineTableKeyValues <- (keyval inlineTableValSep?)* - -tableKey <- key (tableKeySep key)* - -tableKeySep <- ws '.' ws - -inlineTableValSep <- ws ',' ws - -integer <- [\-+]? int -int <- [1-9] (digit / '_' digit)+ / digit - -float <- integer (frac exp? / frac? exp) -frac <- '.' digit (digit / '_' digit)* -exp <- [eE] [\-+]? digit (digit / '_' digit)* - -string <- ( - mlLiteralString - / literalString - / mlBasicString - / basicString -) - -basicString <- <'"' basicChar* '"'> { p.SetBasicString(p.buffer, begin, end) } - -basicChar <- basicUnescaped / escaped -escaped <- escape ([btnfr"/\\] / 'u' hexQuad / 'U' hexQuad hexQuad) - -basicUnescaped <- [ -!#-\[\]-\0x10FFFF] - -escape <- '\\' - -mlBasicString <- '"""' mlBasicBody '"""' { p.SetMultilineString() } - -mlBasicBody <- ( - { p.AddMultilineBasicBody(p.buffer, begin, end) } - / escape newline wsnl -)* - -literalString <- "'" "'" { p.SetLiteralString(p.buffer, begin, end) } - -literalChar <- [\t -&(-\0x10FFFF] - -mlLiteralString <- "'''" "'''" { p.SetMultilineLiteralString(p.buffer, begin, end) } - -mlLiteralBody <- (!"'''" (mlLiteralChar / newline))* - -mlLiteralChar <- [\t -\0x10FFFF] - -hexdigit <- [0-9A-Fa-f] -hexQuad <- hexdigit hexdigit hexdigit hexdigit - -boolean <- 'true' / 'false' - -dateFullYear <- digitQuad -dateMonth <- digitDual -dateMDay <- digitDual -timeHour <- digitDual -timeMinute <- digitDual -timeSecond <- digitDual -timeSecfrac <- '.' digit+ -timeNumoffset <- [\-+] timeHour ':' timeMinute -timeOffset <- 'Z' / timeNumoffset -partialTime <- timeHour ':' timeMinute ':' timeSecond timeSecfrac? -fullDate <- dateFullYear '-' dateMonth '-' dateMDay -fullTime <- partialTime timeOffset -datetime <- fullDate 'T' fullTime - -digit <- [0-9] -digitDual <- digit digit -digitQuad <- digitDual digitDual - -array <- ( - '[' { p.StartArray() } - wsnl arrayValues wsnl - ']' -) - -arrayValues <- ( - val { p.AddArrayVal() } - arraySep? (comment? newline)? -)* - -arraySep <- ws ',' wsnl diff --git a/Godeps/_workspace/src/github.com/naoina/toml/parse.peg.go b/Godeps/_workspace/src/github.com/naoina/toml/parse.peg.go deleted file mode 100644 index ce967e968..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/parse.peg.go +++ /dev/null @@ -1,3065 +0,0 @@ -package toml - -import ( - "fmt" - "math" - "sort" - "strconv" -) - -const end_symbol rune = 4 - -/* The rule types inferred from the grammar are below. */ -type pegRule uint8 - -const ( - ruleUnknown pegRule = iota - ruleTOML - ruleExpression - rulenewline - rulews - rulewsnl - rulecomment - rulekeyval - rulekey - rulebareKey - rulequotedKey - ruleval - ruletable - rulestdTable - rulearrayTable - ruleinlineTable - ruleinlineTableKeyValues - ruletableKey - ruletableKeySep - ruleinlineTableValSep - ruleinteger - ruleint - rulefloat - rulefrac - ruleexp - rulestring - rulebasicString - rulebasicChar - ruleescaped - rulebasicUnescaped - ruleescape - rulemlBasicString - rulemlBasicBody - ruleliteralString - ruleliteralChar - rulemlLiteralString - rulemlLiteralBody - rulemlLiteralChar - rulehexdigit - rulehexQuad - ruleboolean - ruledateFullYear - ruledateMonth - ruledateMDay - ruletimeHour - ruletimeMinute - ruletimeSecond - ruletimeSecfrac - ruletimeNumoffset - ruletimeOffset - rulepartialTime - rulefullDate - rulefullTime - ruledatetime - ruledigit - ruledigitDual - ruledigitQuad - rulearray - rulearrayValues - rulearraySep - ruleAction0 - rulePegText - ruleAction1 - ruleAction2 - ruleAction3 - ruleAction4 - ruleAction5 - ruleAction6 - ruleAction7 - ruleAction8 - ruleAction9 - ruleAction10 - ruleAction11 - ruleAction12 - ruleAction13 - ruleAction14 - ruleAction15 - ruleAction16 - ruleAction17 - ruleAction18 - ruleAction19 - ruleAction20 - ruleAction21 - ruleAction22 - ruleAction23 - - rulePre_ - rule_In_ - rule_Suf -) - -var rul3s = [...]string{ - "Unknown", - "TOML", - "Expression", - "newline", - "ws", - "wsnl", - "comment", - "keyval", - "key", - "bareKey", - "quotedKey", - "val", - "table", - "stdTable", - "arrayTable", - "inlineTable", - "inlineTableKeyValues", - "tableKey", - "tableKeySep", - "inlineTableValSep", - "integer", - "int", - "float", - "frac", - "exp", - "string", - "basicString", - "basicChar", - "escaped", - "basicUnescaped", - "escape", - "mlBasicString", - "mlBasicBody", - "literalString", - "literalChar", - "mlLiteralString", - "mlLiteralBody", - "mlLiteralChar", - "hexdigit", - "hexQuad", - "boolean", - "dateFullYear", - "dateMonth", - "dateMDay", - "timeHour", - "timeMinute", - "timeSecond", - "timeSecfrac", - "timeNumoffset", - "timeOffset", - "partialTime", - "fullDate", - "fullTime", - "datetime", - "digit", - "digitDual", - "digitQuad", - "array", - "arrayValues", - "arraySep", - "Action0", - "PegText", - "Action1", - "Action2", - "Action3", - "Action4", - "Action5", - "Action6", - "Action7", - "Action8", - "Action9", - "Action10", - "Action11", - "Action12", - "Action13", - "Action14", - "Action15", - "Action16", - "Action17", - "Action18", - "Action19", - "Action20", - "Action21", - "Action22", - "Action23", - - "Pre_", - "_In_", - "_Suf", -} - -type tokenTree interface { - Print() - PrintSyntax() - PrintSyntaxTree(buffer string) - Add(rule pegRule, begin, end, next, depth int) - Expand(index int) tokenTree - Tokens() <-chan token32 - AST() *node32 - Error() []token32 - trim(length int) -} - -type node32 struct { - token32 - up, next *node32 -} - -func (node *node32) print(depth int, buffer string) { - for node != nil { - for c := 0; c < depth; c++ { - fmt.Printf(" ") - } - fmt.Printf("\x1B[34m%v\x1B[m %v\n", rul3s[node.pegRule], strconv.Quote(string(([]rune(buffer)[node.begin:node.end])))) - if node.up != nil { - node.up.print(depth+1, buffer) - } - node = node.next - } -} - -func (ast *node32) Print(buffer string) { - ast.print(0, buffer) -} - -type element struct { - node *node32 - down *element -} - -/* ${@} bit structure for abstract syntax tree */ -type token16 struct { - pegRule - begin, end, next int16 -} - -func (t *token16) isZero() bool { - return t.pegRule == ruleUnknown && t.begin == 0 && t.end == 0 && t.next == 0 -} - -func (t *token16) isParentOf(u token16) bool { - return t.begin <= u.begin && t.end >= u.end && t.next > u.next -} - -func (t *token16) getToken32() token32 { - return token32{pegRule: t.pegRule, begin: int32(t.begin), end: int32(t.end), next: int32(t.next)} -} - -func (t *token16) String() string { - return fmt.Sprintf("\x1B[34m%v\x1B[m %v %v %v", rul3s[t.pegRule], t.begin, t.end, t.next) -} - -type tokens16 struct { - tree []token16 - ordered [][]token16 -} - -func (t *tokens16) trim(length int) { - t.tree = t.tree[0:length] -} - -func (t *tokens16) Print() { - for _, token := range t.tree { - fmt.Println(token.String()) - } -} - -func (t *tokens16) Order() [][]token16 { - if t.ordered != nil { - return t.ordered - } - - depths := make([]int16, 1, math.MaxInt16) - for i, token := range t.tree { - if token.pegRule == ruleUnknown { - t.tree = t.tree[:i] - break - } - depth := int(token.next) - if length := len(depths); depth >= length { - depths = depths[:depth+1] - } - depths[depth]++ - } - depths = append(depths, 0) - - ordered, pool := make([][]token16, len(depths)), make([]token16, len(t.tree)+len(depths)) - for i, depth := range depths { - depth++ - ordered[i], pool, depths[i] = pool[:depth], pool[depth:], 0 - } - - for i, token := range t.tree { - depth := token.next - token.next = int16(i) - ordered[depth][depths[depth]] = token - depths[depth]++ - } - t.ordered = ordered - return ordered -} - -type state16 struct { - token16 - depths []int16 - leaf bool -} - -func (t *tokens16) AST() *node32 { - tokens := t.Tokens() - stack := &element{node: &node32{token32: <-tokens}} - for token := range tokens { - if token.begin == token.end { - continue - } - node := &node32{token32: token} - for stack != nil && stack.node.begin >= token.begin && stack.node.end <= token.end { - stack.node.next = node.up - node.up = stack.node - stack = stack.down - } - stack = &element{node: node, down: stack} - } - return stack.node -} - -func (t *tokens16) PreOrder() (<-chan state16, [][]token16) { - s, ordered := make(chan state16, 6), t.Order() - go func() { - var states [8]state16 - for i, _ := range states { - states[i].depths = make([]int16, len(ordered)) - } - depths, state, depth := make([]int16, len(ordered)), 0, 1 - write := func(t token16, leaf bool) { - S := states[state] - state, S.pegRule, S.begin, S.end, S.next, S.leaf = (state+1)%8, t.pegRule, t.begin, t.end, int16(depth), leaf - copy(S.depths, depths) - s <- S - } - - states[state].token16 = ordered[0][0] - depths[0]++ - state++ - a, b := ordered[depth-1][depths[depth-1]-1], ordered[depth][depths[depth]] - depthFirstSearch: - for { - for { - if i := depths[depth]; i > 0 { - if c, j := ordered[depth][i-1], depths[depth-1]; a.isParentOf(c) && - (j < 2 || !ordered[depth-1][j-2].isParentOf(c)) { - if c.end != b.begin { - write(token16{pegRule: rule_In_, begin: c.end, end: b.begin}, true) - } - break - } - } - - if a.begin < b.begin { - write(token16{pegRule: rulePre_, begin: a.begin, end: b.begin}, true) - } - break - } - - next := depth + 1 - if c := ordered[next][depths[next]]; c.pegRule != ruleUnknown && b.isParentOf(c) { - write(b, false) - depths[depth]++ - depth, a, b = next, b, c - continue - } - - write(b, true) - depths[depth]++ - c, parent := ordered[depth][depths[depth]], true - for { - if c.pegRule != ruleUnknown && a.isParentOf(c) { - b = c - continue depthFirstSearch - } else if parent && b.end != a.end { - write(token16{pegRule: rule_Suf, begin: b.end, end: a.end}, true) - } - - depth-- - if depth > 0 { - a, b, c = ordered[depth-1][depths[depth-1]-1], a, ordered[depth][depths[depth]] - parent = a.isParentOf(b) - continue - } - - break depthFirstSearch - } - } - - close(s) - }() - return s, ordered -} - -func (t *tokens16) PrintSyntax() { - tokens, ordered := t.PreOrder() - max := -1 - for token := range tokens { - if !token.leaf { - fmt.Printf("%v", token.begin) - for i, leaf, depths := 0, int(token.next), token.depths; i < leaf; i++ { - fmt.Printf(" \x1B[36m%v\x1B[m", rul3s[ordered[i][depths[i]-1].pegRule]) - } - fmt.Printf(" \x1B[36m%v\x1B[m\n", rul3s[token.pegRule]) - } else if token.begin == token.end { - fmt.Printf("%v", token.begin) - for i, leaf, depths := 0, int(token.next), token.depths; i < leaf; i++ { - fmt.Printf(" \x1B[31m%v\x1B[m", rul3s[ordered[i][depths[i]-1].pegRule]) - } - fmt.Printf(" \x1B[31m%v\x1B[m\n", rul3s[token.pegRule]) - } else { - for c, end := token.begin, token.end; c < end; c++ { - if i := int(c); max+1 < i { - for j := max; j < i; j++ { - fmt.Printf("skip %v %v\n", j, token.String()) - } - max = i - } else if i := int(c); i <= max { - for j := i; j <= max; j++ { - fmt.Printf("dupe %v %v\n", j, token.String()) - } - } else { - max = int(c) - } - fmt.Printf("%v", c) - for i, leaf, depths := 0, int(token.next), token.depths; i < leaf; i++ { - fmt.Printf(" \x1B[34m%v\x1B[m", rul3s[ordered[i][depths[i]-1].pegRule]) - } - fmt.Printf(" \x1B[34m%v\x1B[m\n", rul3s[token.pegRule]) - } - fmt.Printf("\n") - } - } -} - -func (t *tokens16) PrintSyntaxTree(buffer string) { - tokens, _ := t.PreOrder() - for token := range tokens { - for c := 0; c < int(token.next); c++ { - fmt.Printf(" ") - } - fmt.Printf("\x1B[34m%v\x1B[m %v\n", rul3s[token.pegRule], strconv.Quote(string(([]rune(buffer)[token.begin:token.end])))) - } -} - -func (t *tokens16) Add(rule pegRule, begin, end, depth, index int) { - t.tree[index] = token16{pegRule: rule, begin: int16(begin), end: int16(end), next: int16(depth)} -} - -func (t *tokens16) Tokens() <-chan token32 { - s := make(chan token32, 16) - go func() { - for _, v := range t.tree { - s <- v.getToken32() - } - close(s) - }() - return s -} - -func (t *tokens16) Error() []token32 { - ordered := t.Order() - length := len(ordered) - tokens, length := make([]token32, length), length-1 - for i, _ := range tokens { - o := ordered[length-i] - if len(o) > 1 { - tokens[i] = o[len(o)-2].getToken32() - } - } - return tokens -} - -/* ${@} bit structure for abstract syntax tree */ -type token32 struct { - pegRule - begin, end, next int32 -} - -func (t *token32) isZero() bool { - return t.pegRule == ruleUnknown && t.begin == 0 && t.end == 0 && t.next == 0 -} - -func (t *token32) isParentOf(u token32) bool { - return t.begin <= u.begin && t.end >= u.end && t.next > u.next -} - -func (t *token32) getToken32() token32 { - return token32{pegRule: t.pegRule, begin: int32(t.begin), end: int32(t.end), next: int32(t.next)} -} - -func (t *token32) String() string { - return fmt.Sprintf("\x1B[34m%v\x1B[m %v %v %v", rul3s[t.pegRule], t.begin, t.end, t.next) -} - -type tokens32 struct { - tree []token32 - ordered [][]token32 -} - -func (t *tokens32) trim(length int) { - t.tree = t.tree[0:length] -} - -func (t *tokens32) Print() { - for _, token := range t.tree { - fmt.Println(token.String()) - } -} - -func (t *tokens32) Order() [][]token32 { - if t.ordered != nil { - return t.ordered - } - - depths := make([]int32, 1, math.MaxInt16) - for i, token := range t.tree { - if token.pegRule == ruleUnknown { - t.tree = t.tree[:i] - break - } - depth := int(token.next) - if length := len(depths); depth >= length { - depths = depths[:depth+1] - } - depths[depth]++ - } - depths = append(depths, 0) - - ordered, pool := make([][]token32, len(depths)), make([]token32, len(t.tree)+len(depths)) - for i, depth := range depths { - depth++ - ordered[i], pool, depths[i] = pool[:depth], pool[depth:], 0 - } - - for i, token := range t.tree { - depth := token.next - token.next = int32(i) - ordered[depth][depths[depth]] = token - depths[depth]++ - } - t.ordered = ordered - return ordered -} - -type state32 struct { - token32 - depths []int32 - leaf bool -} - -func (t *tokens32) AST() *node32 { - tokens := t.Tokens() - stack := &element{node: &node32{token32: <-tokens}} - for token := range tokens { - if token.begin == token.end { - continue - } - node := &node32{token32: token} - for stack != nil && stack.node.begin >= token.begin && stack.node.end <= token.end { - stack.node.next = node.up - node.up = stack.node - stack = stack.down - } - stack = &element{node: node, down: stack} - } - return stack.node -} - -func (t *tokens32) PreOrder() (<-chan state32, [][]token32) { - s, ordered := make(chan state32, 6), t.Order() - go func() { - var states [8]state32 - for i, _ := range states { - states[i].depths = make([]int32, len(ordered)) - } - depths, state, depth := make([]int32, len(ordered)), 0, 1 - write := func(t token32, leaf bool) { - S := states[state] - state, S.pegRule, S.begin, S.end, S.next, S.leaf = (state+1)%8, t.pegRule, t.begin, t.end, int32(depth), leaf - copy(S.depths, depths) - s <- S - } - - states[state].token32 = ordered[0][0] - depths[0]++ - state++ - a, b := ordered[depth-1][depths[depth-1]-1], ordered[depth][depths[depth]] - depthFirstSearch: - for { - for { - if i := depths[depth]; i > 0 { - if c, j := ordered[depth][i-1], depths[depth-1]; a.isParentOf(c) && - (j < 2 || !ordered[depth-1][j-2].isParentOf(c)) { - if c.end != b.begin { - write(token32{pegRule: rule_In_, begin: c.end, end: b.begin}, true) - } - break - } - } - - if a.begin < b.begin { - write(token32{pegRule: rulePre_, begin: a.begin, end: b.begin}, true) - } - break - } - - next := depth + 1 - if c := ordered[next][depths[next]]; c.pegRule != ruleUnknown && b.isParentOf(c) { - write(b, false) - depths[depth]++ - depth, a, b = next, b, c - continue - } - - write(b, true) - depths[depth]++ - c, parent := ordered[depth][depths[depth]], true - for { - if c.pegRule != ruleUnknown && a.isParentOf(c) { - b = c - continue depthFirstSearch - } else if parent && b.end != a.end { - write(token32{pegRule: rule_Suf, begin: b.end, end: a.end}, true) - } - - depth-- - if depth > 0 { - a, b, c = ordered[depth-1][depths[depth-1]-1], a, ordered[depth][depths[depth]] - parent = a.isParentOf(b) - continue - } - - break depthFirstSearch - } - } - - close(s) - }() - return s, ordered -} - -func (t *tokens32) PrintSyntax() { - tokens, ordered := t.PreOrder() - max := -1 - for token := range tokens { - if !token.leaf { - fmt.Printf("%v", token.begin) - for i, leaf, depths := 0, int(token.next), token.depths; i < leaf; i++ { - fmt.Printf(" \x1B[36m%v\x1B[m", rul3s[ordered[i][depths[i]-1].pegRule]) - } - fmt.Printf(" \x1B[36m%v\x1B[m\n", rul3s[token.pegRule]) - } else if token.begin == token.end { - fmt.Printf("%v", token.begin) - for i, leaf, depths := 0, int(token.next), token.depths; i < leaf; i++ { - fmt.Printf(" \x1B[31m%v\x1B[m", rul3s[ordered[i][depths[i]-1].pegRule]) - } - fmt.Printf(" \x1B[31m%v\x1B[m\n", rul3s[token.pegRule]) - } else { - for c, end := token.begin, token.end; c < end; c++ { - if i := int(c); max+1 < i { - for j := max; j < i; j++ { - fmt.Printf("skip %v %v\n", j, token.String()) - } - max = i - } else if i := int(c); i <= max { - for j := i; j <= max; j++ { - fmt.Printf("dupe %v %v\n", j, token.String()) - } - } else { - max = int(c) - } - fmt.Printf("%v", c) - for i, leaf, depths := 0, int(token.next), token.depths; i < leaf; i++ { - fmt.Printf(" \x1B[34m%v\x1B[m", rul3s[ordered[i][depths[i]-1].pegRule]) - } - fmt.Printf(" \x1B[34m%v\x1B[m\n", rul3s[token.pegRule]) - } - fmt.Printf("\n") - } - } -} - -func (t *tokens32) PrintSyntaxTree(buffer string) { - tokens, _ := t.PreOrder() - for token := range tokens { - for c := 0; c < int(token.next); c++ { - fmt.Printf(" ") - } - fmt.Printf("\x1B[34m%v\x1B[m %v\n", rul3s[token.pegRule], strconv.Quote(string(([]rune(buffer)[token.begin:token.end])))) - } -} - -func (t *tokens32) Add(rule pegRule, begin, end, depth, index int) { - t.tree[index] = token32{pegRule: rule, begin: int32(begin), end: int32(end), next: int32(depth)} -} - -func (t *tokens32) Tokens() <-chan token32 { - s := make(chan token32, 16) - go func() { - for _, v := range t.tree { - s <- v.getToken32() - } - close(s) - }() - return s -} - -func (t *tokens32) Error() []token32 { - ordered := t.Order() - length := len(ordered) - tokens, length := make([]token32, length), length-1 - for i, _ := range tokens { - o := ordered[length-i] - if len(o) > 1 { - tokens[i] = o[len(o)-2].getToken32() - } - } - return tokens -} - -func (t *tokens16) Expand(index int) tokenTree { - tree := t.tree - if index >= len(tree) { - expanded := make([]token32, 2*len(tree)) - for i, v := range tree { - expanded[i] = v.getToken32() - } - return &tokens32{tree: expanded} - } - return nil -} - -func (t *tokens32) Expand(index int) tokenTree { - tree := t.tree - if index >= len(tree) { - expanded := make([]token32, 2*len(tree)) - copy(expanded, tree) - t.tree = expanded - } - return nil -} - -type tomlParser struct { - toml - - Buffer string - buffer []rune - rules [85]func() bool - Parse func(rule ...int) error - Reset func() - tokenTree -} - -type textPosition struct { - line, symbol int -} - -type textPositionMap map[int]textPosition - -func translatePositions(buffer string, positions []int) textPositionMap { - length, translations, j, line, symbol := len(positions), make(textPositionMap, len(positions)), 0, 1, 0 - sort.Ints(positions) - -search: - for i, c := range buffer[0:] { - if c == '\n' { - line, symbol = line+1, 0 - } else { - symbol++ - } - if i == positions[j] { - translations[positions[j]] = textPosition{line, symbol} - for j++; j < length; j++ { - if i != positions[j] { - continue search - } - } - break search - } - } - - return translations -} - -type parseError struct { - p *tomlParser -} - -func (e *parseError) Error() string { - tokens, error := e.p.tokenTree.Error(), "\n" - positions, p := make([]int, 2*len(tokens)), 0 - for _, token := range tokens { - positions[p], p = int(token.begin), p+1 - positions[p], p = int(token.end), p+1 - } - translations := translatePositions(e.p.Buffer, positions) - for _, token := range tokens { - begin, end := int(token.begin), int(token.end) - error += fmt.Sprintf("parse error near \x1B[34m%v\x1B[m (line %v symbol %v - line %v symbol %v):\n%v\n", - rul3s[token.pegRule], - translations[begin].line, translations[begin].symbol, - translations[end].line, translations[end].symbol, - /*strconv.Quote(*/ e.p.Buffer[begin:end] /*)*/) - } - - return error -} - -func (p *tomlParser) PrintSyntaxTree() { - p.tokenTree.PrintSyntaxTree(p.Buffer) -} - -func (p *tomlParser) Highlighter() { - p.tokenTree.PrintSyntax() -} - -func (p *tomlParser) Execute() { - buffer, begin, end := p.Buffer, 0, 0 - for token := range p.tokenTree.Tokens() { - switch token.pegRule { - - case rulePegText: - begin, end = int(token.begin), int(token.end) - - case ruleAction0: - _ = buffer - case ruleAction1: - p.SetTableString(begin, end) - case ruleAction2: - p.AddLineCount(end - begin) - case ruleAction3: - p.AddLineCount(end - begin) - case ruleAction4: - p.AddKeyValue() - case ruleAction5: - p.SetKey(p.buffer, begin, end) - case ruleAction6: - p.SetKey(p.buffer, begin, end) - case ruleAction7: - p.SetTime(begin, end) - case ruleAction8: - p.SetFloat64(begin, end) - case ruleAction9: - p.SetInt64(begin, end) - case ruleAction10: - p.SetString(begin, end) - case ruleAction11: - p.SetBool(begin, end) - case ruleAction12: - p.SetArray(begin, end) - case ruleAction13: - p.SetTable(p.buffer, begin, end) - case ruleAction14: - p.SetArrayTable(p.buffer, begin, end) - case ruleAction15: - p.StartInlineTable() - case ruleAction16: - p.EndInlineTable() - case ruleAction17: - p.SetBasicString(p.buffer, begin, end) - case ruleAction18: - p.SetMultilineString() - case ruleAction19: - p.AddMultilineBasicBody(p.buffer, begin, end) - case ruleAction20: - p.SetLiteralString(p.buffer, begin, end) - case ruleAction21: - p.SetMultilineLiteralString(p.buffer, begin, end) - case ruleAction22: - p.StartArray() - case ruleAction23: - p.AddArrayVal() - - } - } - _, _, _ = buffer, begin, end -} - -func (p *tomlParser) Init() { - p.buffer = []rune(p.Buffer) - if len(p.buffer) == 0 || p.buffer[len(p.buffer)-1] != end_symbol { - p.buffer = append(p.buffer, end_symbol) - } - - var tree tokenTree = &tokens16{tree: make([]token16, math.MaxInt16)} - position, depth, tokenIndex, buffer, _rules := 0, 0, 0, p.buffer, p.rules - - p.Parse = func(rule ...int) error { - r := 1 - if len(rule) > 0 { - r = rule[0] - } - matches := p.rules[r]() - p.tokenTree = tree - if matches { - p.tokenTree.trim(tokenIndex) - return nil - } - return &parseError{p} - } - - p.Reset = func() { - position, tokenIndex, depth = 0, 0, 0 - } - - add := func(rule pegRule, begin int) { - if t := tree.Expand(tokenIndex); t != nil { - tree = t - } - tree.Add(rule, begin, position, depth, tokenIndex) - tokenIndex++ - } - - matchDot := func() bool { - if buffer[position] != end_symbol { - position++ - return true - } - return false - } - - /*matchChar := func(c byte) bool { - if buffer[position] == c { - position++ - return true - } - return false - }*/ - - /*matchRange := func(lower byte, upper byte) bool { - if c := buffer[position]; c >= lower && c <= upper { - position++ - return true - } - return false - }*/ - - _rules = [...]func() bool{ - nil, - /* 0 TOML <- <(Expression (newline Expression)* newline? !. Action0)> */ - func() bool { - position0, tokenIndex0, depth0 := position, tokenIndex, depth - { - position1 := position - depth++ - if !_rules[ruleExpression]() { - goto l0 - } - l2: - { - position3, tokenIndex3, depth3 := position, tokenIndex, depth - if !_rules[rulenewline]() { - goto l3 - } - if !_rules[ruleExpression]() { - goto l3 - } - goto l2 - l3: - position, tokenIndex, depth = position3, tokenIndex3, depth3 - } - { - position4, tokenIndex4, depth4 := position, tokenIndex, depth - if !_rules[rulenewline]() { - goto l4 - } - goto l5 - l4: - position, tokenIndex, depth = position4, tokenIndex4, depth4 - } - l5: - { - position6, tokenIndex6, depth6 := position, tokenIndex, depth - if !matchDot() { - goto l6 - } - goto l0 - l6: - position, tokenIndex, depth = position6, tokenIndex6, depth6 - } - { - add(ruleAction0, position) - } - depth-- - add(ruleTOML, position1) - } - return true - l0: - position, tokenIndex, depth = position0, tokenIndex0, depth0 - return false - }, - /* 1 Expression <- <((<(ws table ws comment? (wsnl keyval ws comment?)*)> Action1) / (ws keyval ws comment?) / (ws comment?) / ws)> */ - func() bool { - position8, tokenIndex8, depth8 := position, tokenIndex, depth - { - position9 := position - depth++ - { - position10, tokenIndex10, depth10 := position, tokenIndex, depth - { - position12 := position - depth++ - if !_rules[rulews]() { - goto l11 - } - { - position13 := position - depth++ - { - position14, tokenIndex14, depth14 := position, tokenIndex, depth - { - position16 := position - depth++ - if buffer[position] != rune('[') { - goto l15 - } - position++ - if !_rules[rulews]() { - goto l15 - } - { - position17 := position - depth++ - if !_rules[ruletableKey]() { - goto l15 - } - depth-- - add(rulePegText, position17) - } - if !_rules[rulews]() { - goto l15 - } - if buffer[position] != rune(']') { - goto l15 - } - position++ - { - add(ruleAction13, position) - } - depth-- - add(rulestdTable, position16) - } - goto l14 - l15: - position, tokenIndex, depth = position14, tokenIndex14, depth14 - { - position19 := position - depth++ - if buffer[position] != rune('[') { - goto l11 - } - position++ - if buffer[position] != rune('[') { - goto l11 - } - position++ - if !_rules[rulews]() { - goto l11 - } - { - position20 := position - depth++ - if !_rules[ruletableKey]() { - goto l11 - } - depth-- - add(rulePegText, position20) - } - if !_rules[rulews]() { - goto l11 - } - if buffer[position] != rune(']') { - goto l11 - } - position++ - if buffer[position] != rune(']') { - goto l11 - } - position++ - { - add(ruleAction14, position) - } - depth-- - add(rulearrayTable, position19) - } - } - l14: - depth-- - add(ruletable, position13) - } - if !_rules[rulews]() { - goto l11 - } - { - position22, tokenIndex22, depth22 := position, tokenIndex, depth - if !_rules[rulecomment]() { - goto l22 - } - goto l23 - l22: - position, tokenIndex, depth = position22, tokenIndex22, depth22 - } - l23: - l24: - { - position25, tokenIndex25, depth25 := position, tokenIndex, depth - if !_rules[rulewsnl]() { - goto l25 - } - if !_rules[rulekeyval]() { - goto l25 - } - if !_rules[rulews]() { - goto l25 - } - { - position26, tokenIndex26, depth26 := position, tokenIndex, depth - if !_rules[rulecomment]() { - goto l26 - } - goto l27 - l26: - position, tokenIndex, depth = position26, tokenIndex26, depth26 - } - l27: - goto l24 - l25: - position, tokenIndex, depth = position25, tokenIndex25, depth25 - } - depth-- - add(rulePegText, position12) - } - { - add(ruleAction1, position) - } - goto l10 - l11: - position, tokenIndex, depth = position10, tokenIndex10, depth10 - if !_rules[rulews]() { - goto l29 - } - if !_rules[rulekeyval]() { - goto l29 - } - if !_rules[rulews]() { - goto l29 - } - { - position30, tokenIndex30, depth30 := position, tokenIndex, depth - if !_rules[rulecomment]() { - goto l30 - } - goto l31 - l30: - position, tokenIndex, depth = position30, tokenIndex30, depth30 - } - l31: - goto l10 - l29: - position, tokenIndex, depth = position10, tokenIndex10, depth10 - if !_rules[rulews]() { - goto l32 - } - { - position33, tokenIndex33, depth33 := position, tokenIndex, depth - if !_rules[rulecomment]() { - goto l33 - } - goto l34 - l33: - position, tokenIndex, depth = position33, tokenIndex33, depth33 - } - l34: - goto l10 - l32: - position, tokenIndex, depth = position10, tokenIndex10, depth10 - if !_rules[rulews]() { - goto l8 - } - } - l10: - depth-- - add(ruleExpression, position9) - } - return true - l8: - position, tokenIndex, depth = position8, tokenIndex8, depth8 - return false - }, - /* 2 newline <- <(<('\r' / '\n')+> Action2)> */ - func() bool { - position35, tokenIndex35, depth35 := position, tokenIndex, depth - { - position36 := position - depth++ - { - position37 := position - depth++ - { - position40, tokenIndex40, depth40 := position, tokenIndex, depth - if buffer[position] != rune('\r') { - goto l41 - } - position++ - goto l40 - l41: - position, tokenIndex, depth = position40, tokenIndex40, depth40 - if buffer[position] != rune('\n') { - goto l35 - } - position++ - } - l40: - l38: - { - position39, tokenIndex39, depth39 := position, tokenIndex, depth - { - position42, tokenIndex42, depth42 := position, tokenIndex, depth - if buffer[position] != rune('\r') { - goto l43 - } - position++ - goto l42 - l43: - position, tokenIndex, depth = position42, tokenIndex42, depth42 - if buffer[position] != rune('\n') { - goto l39 - } - position++ - } - l42: - goto l38 - l39: - position, tokenIndex, depth = position39, tokenIndex39, depth39 - } - depth-- - add(rulePegText, position37) - } - { - add(ruleAction2, position) - } - depth-- - add(rulenewline, position36) - } - return true - l35: - position, tokenIndex, depth = position35, tokenIndex35, depth35 - return false - }, - /* 3 ws <- <(' ' / '\t')*> */ - func() bool { - { - position46 := position - depth++ - l47: - { - position48, tokenIndex48, depth48 := position, tokenIndex, depth - { - position49, tokenIndex49, depth49 := position, tokenIndex, depth - if buffer[position] != rune(' ') { - goto l50 - } - position++ - goto l49 - l50: - position, tokenIndex, depth = position49, tokenIndex49, depth49 - if buffer[position] != rune('\t') { - goto l48 - } - position++ - } - l49: - goto l47 - l48: - position, tokenIndex, depth = position48, tokenIndex48, depth48 - } - depth-- - add(rulews, position46) - } - return true - }, - /* 4 wsnl <- <((&('\t') '\t') | (&(' ') ' ') | (&('\n' | '\r') (<('\r' / '\n')> Action3)))*> */ - func() bool { - { - position52 := position - depth++ - l53: - { - position54, tokenIndex54, depth54 := position, tokenIndex, depth - { - switch buffer[position] { - case '\t': - if buffer[position] != rune('\t') { - goto l54 - } - position++ - break - case ' ': - if buffer[position] != rune(' ') { - goto l54 - } - position++ - break - default: - { - position56 := position - depth++ - { - position57, tokenIndex57, depth57 := position, tokenIndex, depth - if buffer[position] != rune('\r') { - goto l58 - } - position++ - goto l57 - l58: - position, tokenIndex, depth = position57, tokenIndex57, depth57 - if buffer[position] != rune('\n') { - goto l54 - } - position++ - } - l57: - depth-- - add(rulePegText, position56) - } - { - add(ruleAction3, position) - } - break - } - } - - goto l53 - l54: - position, tokenIndex, depth = position54, tokenIndex54, depth54 - } - depth-- - add(rulewsnl, position52) - } - return true - }, - /* 5 comment <- <('#' <('\t' / [ -􏿿])*>)> */ - func() bool { - position60, tokenIndex60, depth60 := position, tokenIndex, depth - { - position61 := position - depth++ - if buffer[position] != rune('#') { - goto l60 - } - position++ - { - position62 := position - depth++ - l63: - { - position64, tokenIndex64, depth64 := position, tokenIndex, depth - { - position65, tokenIndex65, depth65 := position, tokenIndex, depth - if buffer[position] != rune('\t') { - goto l66 - } - position++ - goto l65 - l66: - position, tokenIndex, depth = position65, tokenIndex65, depth65 - if c := buffer[position]; c < rune(' ') || c > rune('\U0010ffff') { - goto l64 - } - position++ - } - l65: - goto l63 - l64: - position, tokenIndex, depth = position64, tokenIndex64, depth64 - } - depth-- - add(rulePegText, position62) - } - depth-- - add(rulecomment, position61) - } - return true - l60: - position, tokenIndex, depth = position60, tokenIndex60, depth60 - return false - }, - /* 6 keyval <- <(key ws '=' ws val Action4)> */ - func() bool { - position67, tokenIndex67, depth67 := position, tokenIndex, depth - { - position68 := position - depth++ - if !_rules[rulekey]() { - goto l67 - } - if !_rules[rulews]() { - goto l67 - } - if buffer[position] != rune('=') { - goto l67 - } - position++ - if !_rules[rulews]() { - goto l67 - } - if !_rules[ruleval]() { - goto l67 - } - { - add(ruleAction4, position) - } - depth-- - add(rulekeyval, position68) - } - return true - l67: - position, tokenIndex, depth = position67, tokenIndex67, depth67 - return false - }, - /* 7 key <- <(bareKey / quotedKey)> */ - func() bool { - position70, tokenIndex70, depth70 := position, tokenIndex, depth - { - position71 := position - depth++ - { - position72, tokenIndex72, depth72 := position, tokenIndex, depth - { - position74 := position - depth++ - { - position75 := position - depth++ - { - switch buffer[position] { - case '_': - if buffer[position] != rune('_') { - goto l73 - } - position++ - break - case '-': - if buffer[position] != rune('-') { - goto l73 - } - position++ - break - case 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z': - if c := buffer[position]; c < rune('a') || c > rune('z') { - goto l73 - } - position++ - break - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l73 - } - position++ - break - default: - if c := buffer[position]; c < rune('A') || c > rune('Z') { - goto l73 - } - position++ - break - } - } - - l76: - { - position77, tokenIndex77, depth77 := position, tokenIndex, depth - { - switch buffer[position] { - case '_': - if buffer[position] != rune('_') { - goto l77 - } - position++ - break - case '-': - if buffer[position] != rune('-') { - goto l77 - } - position++ - break - case 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z': - if c := buffer[position]; c < rune('a') || c > rune('z') { - goto l77 - } - position++ - break - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l77 - } - position++ - break - default: - if c := buffer[position]; c < rune('A') || c > rune('Z') { - goto l77 - } - position++ - break - } - } - - goto l76 - l77: - position, tokenIndex, depth = position77, tokenIndex77, depth77 - } - depth-- - add(rulePegText, position75) - } - { - add(ruleAction5, position) - } - depth-- - add(rulebareKey, position74) - } - goto l72 - l73: - position, tokenIndex, depth = position72, tokenIndex72, depth72 - { - position81 := position - depth++ - if buffer[position] != rune('"') { - goto l70 - } - position++ - { - position82 := position - depth++ - if !_rules[rulebasicChar]() { - goto l70 - } - l83: - { - position84, tokenIndex84, depth84 := position, tokenIndex, depth - if !_rules[rulebasicChar]() { - goto l84 - } - goto l83 - l84: - position, tokenIndex, depth = position84, tokenIndex84, depth84 - } - depth-- - add(rulePegText, position82) - } - if buffer[position] != rune('"') { - goto l70 - } - position++ - { - add(ruleAction6, position) - } - depth-- - add(rulequotedKey, position81) - } - } - l72: - depth-- - add(rulekey, position71) - } - return true - l70: - position, tokenIndex, depth = position70, tokenIndex70, depth70 - return false - }, - /* 8 bareKey <- <(<((&('_') '_') | (&('-') '-') | (&('a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z') [a-z]) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') [0-9]) | (&('A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z') [A-Z]))+> Action5)> */ - nil, - /* 9 quotedKey <- <('"' '"' Action6)> */ - nil, - /* 10 val <- <(( Action7) / ( Action8) / ((&('{') inlineTable) | (&('[') ( Action12)) | (&('f' | 't') ( Action11)) | (&('"' | '\'') ( Action10)) | (&('+' | '-' | '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') ( Action9))))> */ - func() bool { - position88, tokenIndex88, depth88 := position, tokenIndex, depth - { - position89 := position - depth++ - { - position90, tokenIndex90, depth90 := position, tokenIndex, depth - { - position92 := position - depth++ - { - position93 := position - depth++ - { - position94 := position - depth++ - { - position95 := position - depth++ - { - position96 := position - depth++ - if !_rules[ruledigitDual]() { - goto l91 - } - if !_rules[ruledigitDual]() { - goto l91 - } - depth-- - add(ruledigitQuad, position96) - } - depth-- - add(ruledateFullYear, position95) - } - if buffer[position] != rune('-') { - goto l91 - } - position++ - { - position97 := position - depth++ - if !_rules[ruledigitDual]() { - goto l91 - } - depth-- - add(ruledateMonth, position97) - } - if buffer[position] != rune('-') { - goto l91 - } - position++ - { - position98 := position - depth++ - if !_rules[ruledigitDual]() { - goto l91 - } - depth-- - add(ruledateMDay, position98) - } - depth-- - add(rulefullDate, position94) - } - if buffer[position] != rune('T') { - goto l91 - } - position++ - { - position99 := position - depth++ - { - position100 := position - depth++ - if !_rules[ruletimeHour]() { - goto l91 - } - if buffer[position] != rune(':') { - goto l91 - } - position++ - if !_rules[ruletimeMinute]() { - goto l91 - } - if buffer[position] != rune(':') { - goto l91 - } - position++ - { - position101 := position - depth++ - if !_rules[ruledigitDual]() { - goto l91 - } - depth-- - add(ruletimeSecond, position101) - } - { - position102, tokenIndex102, depth102 := position, tokenIndex, depth - { - position104 := position - depth++ - if buffer[position] != rune('.') { - goto l102 - } - position++ - if !_rules[ruledigit]() { - goto l102 - } - l105: - { - position106, tokenIndex106, depth106 := position, tokenIndex, depth - if !_rules[ruledigit]() { - goto l106 - } - goto l105 - l106: - position, tokenIndex, depth = position106, tokenIndex106, depth106 - } - depth-- - add(ruletimeSecfrac, position104) - } - goto l103 - l102: - position, tokenIndex, depth = position102, tokenIndex102, depth102 - } - l103: - depth-- - add(rulepartialTime, position100) - } - { - position107 := position - depth++ - { - position108, tokenIndex108, depth108 := position, tokenIndex, depth - if buffer[position] != rune('Z') { - goto l109 - } - position++ - goto l108 - l109: - position, tokenIndex, depth = position108, tokenIndex108, depth108 - { - position110 := position - depth++ - { - position111, tokenIndex111, depth111 := position, tokenIndex, depth - if buffer[position] != rune('-') { - goto l112 - } - position++ - goto l111 - l112: - position, tokenIndex, depth = position111, tokenIndex111, depth111 - if buffer[position] != rune('+') { - goto l91 - } - position++ - } - l111: - if !_rules[ruletimeHour]() { - goto l91 - } - if buffer[position] != rune(':') { - goto l91 - } - position++ - if !_rules[ruletimeMinute]() { - goto l91 - } - depth-- - add(ruletimeNumoffset, position110) - } - } - l108: - depth-- - add(ruletimeOffset, position107) - } - depth-- - add(rulefullTime, position99) - } - depth-- - add(ruledatetime, position93) - } - depth-- - add(rulePegText, position92) - } - { - add(ruleAction7, position) - } - goto l90 - l91: - position, tokenIndex, depth = position90, tokenIndex90, depth90 - { - position115 := position - depth++ - { - position116 := position - depth++ - if !_rules[ruleinteger]() { - goto l114 - } - { - position117, tokenIndex117, depth117 := position, tokenIndex, depth - if !_rules[rulefrac]() { - goto l118 - } - { - position119, tokenIndex119, depth119 := position, tokenIndex, depth - if !_rules[ruleexp]() { - goto l119 - } - goto l120 - l119: - position, tokenIndex, depth = position119, tokenIndex119, depth119 - } - l120: - goto l117 - l118: - position, tokenIndex, depth = position117, tokenIndex117, depth117 - { - position121, tokenIndex121, depth121 := position, tokenIndex, depth - if !_rules[rulefrac]() { - goto l121 - } - goto l122 - l121: - position, tokenIndex, depth = position121, tokenIndex121, depth121 - } - l122: - if !_rules[ruleexp]() { - goto l114 - } - } - l117: - depth-- - add(rulefloat, position116) - } - depth-- - add(rulePegText, position115) - } - { - add(ruleAction8, position) - } - goto l90 - l114: - position, tokenIndex, depth = position90, tokenIndex90, depth90 - { - switch buffer[position] { - case '{': - { - position125 := position - depth++ - if buffer[position] != rune('{') { - goto l88 - } - position++ - { - add(ruleAction15, position) - } - if !_rules[rulews]() { - goto l88 - } - { - position127 := position - depth++ - l128: - { - position129, tokenIndex129, depth129 := position, tokenIndex, depth - if !_rules[rulekeyval]() { - goto l129 - } - { - position130, tokenIndex130, depth130 := position, tokenIndex, depth - { - position132 := position - depth++ - if !_rules[rulews]() { - goto l130 - } - if buffer[position] != rune(',') { - goto l130 - } - position++ - if !_rules[rulews]() { - goto l130 - } - depth-- - add(ruleinlineTableValSep, position132) - } - goto l131 - l130: - position, tokenIndex, depth = position130, tokenIndex130, depth130 - } - l131: - goto l128 - l129: - position, tokenIndex, depth = position129, tokenIndex129, depth129 - } - depth-- - add(ruleinlineTableKeyValues, position127) - } - if !_rules[rulews]() { - goto l88 - } - if buffer[position] != rune('}') { - goto l88 - } - position++ - { - add(ruleAction16, position) - } - depth-- - add(ruleinlineTable, position125) - } - break - case '[': - { - position134 := position - depth++ - { - position135 := position - depth++ - if buffer[position] != rune('[') { - goto l88 - } - position++ - { - add(ruleAction22, position) - } - if !_rules[rulewsnl]() { - goto l88 - } - { - position137 := position - depth++ - l138: - { - position139, tokenIndex139, depth139 := position, tokenIndex, depth - if !_rules[ruleval]() { - goto l139 - } - { - add(ruleAction23, position) - } - { - position141, tokenIndex141, depth141 := position, tokenIndex, depth - { - position143 := position - depth++ - if !_rules[rulews]() { - goto l141 - } - if buffer[position] != rune(',') { - goto l141 - } - position++ - if !_rules[rulewsnl]() { - goto l141 - } - depth-- - add(rulearraySep, position143) - } - goto l142 - l141: - position, tokenIndex, depth = position141, tokenIndex141, depth141 - } - l142: - { - position144, tokenIndex144, depth144 := position, tokenIndex, depth - { - position146, tokenIndex146, depth146 := position, tokenIndex, depth - if !_rules[rulecomment]() { - goto l146 - } - goto l147 - l146: - position, tokenIndex, depth = position146, tokenIndex146, depth146 - } - l147: - if !_rules[rulenewline]() { - goto l144 - } - goto l145 - l144: - position, tokenIndex, depth = position144, tokenIndex144, depth144 - } - l145: - goto l138 - l139: - position, tokenIndex, depth = position139, tokenIndex139, depth139 - } - depth-- - add(rulearrayValues, position137) - } - if !_rules[rulewsnl]() { - goto l88 - } - if buffer[position] != rune(']') { - goto l88 - } - position++ - depth-- - add(rulearray, position135) - } - depth-- - add(rulePegText, position134) - } - { - add(ruleAction12, position) - } - break - case 'f', 't': - { - position149 := position - depth++ - { - position150 := position - depth++ - { - position151, tokenIndex151, depth151 := position, tokenIndex, depth - if buffer[position] != rune('t') { - goto l152 - } - position++ - if buffer[position] != rune('r') { - goto l152 - } - position++ - if buffer[position] != rune('u') { - goto l152 - } - position++ - if buffer[position] != rune('e') { - goto l152 - } - position++ - goto l151 - l152: - position, tokenIndex, depth = position151, tokenIndex151, depth151 - if buffer[position] != rune('f') { - goto l88 - } - position++ - if buffer[position] != rune('a') { - goto l88 - } - position++ - if buffer[position] != rune('l') { - goto l88 - } - position++ - if buffer[position] != rune('s') { - goto l88 - } - position++ - if buffer[position] != rune('e') { - goto l88 - } - position++ - } - l151: - depth-- - add(ruleboolean, position150) - } - depth-- - add(rulePegText, position149) - } - { - add(ruleAction11, position) - } - break - case '"', '\'': - { - position154 := position - depth++ - { - position155 := position - depth++ - { - position156, tokenIndex156, depth156 := position, tokenIndex, depth - { - position158 := position - depth++ - if buffer[position] != rune('\'') { - goto l157 - } - position++ - if buffer[position] != rune('\'') { - goto l157 - } - position++ - if buffer[position] != rune('\'') { - goto l157 - } - position++ - { - position159 := position - depth++ - { - position160 := position - depth++ - l161: - { - position162, tokenIndex162, depth162 := position, tokenIndex, depth - { - position163, tokenIndex163, depth163 := position, tokenIndex, depth - if buffer[position] != rune('\'') { - goto l163 - } - position++ - if buffer[position] != rune('\'') { - goto l163 - } - position++ - if buffer[position] != rune('\'') { - goto l163 - } - position++ - goto l162 - l163: - position, tokenIndex, depth = position163, tokenIndex163, depth163 - } - { - position164, tokenIndex164, depth164 := position, tokenIndex, depth - { - position166 := position - depth++ - { - position167, tokenIndex167, depth167 := position, tokenIndex, depth - if buffer[position] != rune('\t') { - goto l168 - } - position++ - goto l167 - l168: - position, tokenIndex, depth = position167, tokenIndex167, depth167 - if c := buffer[position]; c < rune(' ') || c > rune('\U0010ffff') { - goto l165 - } - position++ - } - l167: - depth-- - add(rulemlLiteralChar, position166) - } - goto l164 - l165: - position, tokenIndex, depth = position164, tokenIndex164, depth164 - if !_rules[rulenewline]() { - goto l162 - } - } - l164: - goto l161 - l162: - position, tokenIndex, depth = position162, tokenIndex162, depth162 - } - depth-- - add(rulemlLiteralBody, position160) - } - depth-- - add(rulePegText, position159) - } - if buffer[position] != rune('\'') { - goto l157 - } - position++ - if buffer[position] != rune('\'') { - goto l157 - } - position++ - if buffer[position] != rune('\'') { - goto l157 - } - position++ - { - add(ruleAction21, position) - } - depth-- - add(rulemlLiteralString, position158) - } - goto l156 - l157: - position, tokenIndex, depth = position156, tokenIndex156, depth156 - { - position171 := position - depth++ - if buffer[position] != rune('\'') { - goto l170 - } - position++ - { - position172 := position - depth++ - l173: - { - position174, tokenIndex174, depth174 := position, tokenIndex, depth - { - position175 := position - depth++ - { - switch buffer[position] { - case '\t': - if buffer[position] != rune('\t') { - goto l174 - } - position++ - break - case ' ', '!', '"', '#', '$', '%', '&': - if c := buffer[position]; c < rune(' ') || c > rune('&') { - goto l174 - } - position++ - break - default: - if c := buffer[position]; c < rune('(') || c > rune('\U0010ffff') { - goto l174 - } - position++ - break - } - } - - depth-- - add(ruleliteralChar, position175) - } - goto l173 - l174: - position, tokenIndex, depth = position174, tokenIndex174, depth174 - } - depth-- - add(rulePegText, position172) - } - if buffer[position] != rune('\'') { - goto l170 - } - position++ - { - add(ruleAction20, position) - } - depth-- - add(ruleliteralString, position171) - } - goto l156 - l170: - position, tokenIndex, depth = position156, tokenIndex156, depth156 - { - position179 := position - depth++ - if buffer[position] != rune('"') { - goto l178 - } - position++ - if buffer[position] != rune('"') { - goto l178 - } - position++ - if buffer[position] != rune('"') { - goto l178 - } - position++ - { - position180 := position - depth++ - l181: - { - position182, tokenIndex182, depth182 := position, tokenIndex, depth - { - position183, tokenIndex183, depth183 := position, tokenIndex, depth - { - position185 := position - depth++ - { - position186, tokenIndex186, depth186 := position, tokenIndex, depth - if !_rules[rulebasicChar]() { - goto l187 - } - goto l186 - l187: - position, tokenIndex, depth = position186, tokenIndex186, depth186 - if !_rules[rulenewline]() { - goto l184 - } - } - l186: - depth-- - add(rulePegText, position185) - } - { - add(ruleAction19, position) - } - goto l183 - l184: - position, tokenIndex, depth = position183, tokenIndex183, depth183 - if !_rules[ruleescape]() { - goto l182 - } - if !_rules[rulenewline]() { - goto l182 - } - if !_rules[rulewsnl]() { - goto l182 - } - } - l183: - goto l181 - l182: - position, tokenIndex, depth = position182, tokenIndex182, depth182 - } - depth-- - add(rulemlBasicBody, position180) - } - if buffer[position] != rune('"') { - goto l178 - } - position++ - if buffer[position] != rune('"') { - goto l178 - } - position++ - if buffer[position] != rune('"') { - goto l178 - } - position++ - { - add(ruleAction18, position) - } - depth-- - add(rulemlBasicString, position179) - } - goto l156 - l178: - position, tokenIndex, depth = position156, tokenIndex156, depth156 - { - position190 := position - depth++ - { - position191 := position - depth++ - if buffer[position] != rune('"') { - goto l88 - } - position++ - l192: - { - position193, tokenIndex193, depth193 := position, tokenIndex, depth - if !_rules[rulebasicChar]() { - goto l193 - } - goto l192 - l193: - position, tokenIndex, depth = position193, tokenIndex193, depth193 - } - if buffer[position] != rune('"') { - goto l88 - } - position++ - depth-- - add(rulePegText, position191) - } - { - add(ruleAction17, position) - } - depth-- - add(rulebasicString, position190) - } - } - l156: - depth-- - add(rulestring, position155) - } - depth-- - add(rulePegText, position154) - } - { - add(ruleAction10, position) - } - break - default: - { - position196 := position - depth++ - if !_rules[ruleinteger]() { - goto l88 - } - depth-- - add(rulePegText, position196) - } - { - add(ruleAction9, position) - } - break - } - } - - } - l90: - depth-- - add(ruleval, position89) - } - return true - l88: - position, tokenIndex, depth = position88, tokenIndex88, depth88 - return false - }, - /* 11 table <- <(stdTable / arrayTable)> */ - nil, - /* 12 stdTable <- <('[' ws ws ']' Action13)> */ - nil, - /* 13 arrayTable <- <('[' '[' ws ws (']' ']') Action14)> */ - nil, - /* 14 inlineTable <- <('{' Action15 ws inlineTableKeyValues ws '}' Action16)> */ - nil, - /* 15 inlineTableKeyValues <- <(keyval inlineTableValSep?)*> */ - nil, - /* 16 tableKey <- <(key (tableKeySep key)*)> */ - func() bool { - position203, tokenIndex203, depth203 := position, tokenIndex, depth - { - position204 := position - depth++ - if !_rules[rulekey]() { - goto l203 - } - l205: - { - position206, tokenIndex206, depth206 := position, tokenIndex, depth - { - position207 := position - depth++ - if !_rules[rulews]() { - goto l206 - } - if buffer[position] != rune('.') { - goto l206 - } - position++ - if !_rules[rulews]() { - goto l206 - } - depth-- - add(ruletableKeySep, position207) - } - if !_rules[rulekey]() { - goto l206 - } - goto l205 - l206: - position, tokenIndex, depth = position206, tokenIndex206, depth206 - } - depth-- - add(ruletableKey, position204) - } - return true - l203: - position, tokenIndex, depth = position203, tokenIndex203, depth203 - return false - }, - /* 17 tableKeySep <- <(ws '.' ws)> */ - nil, - /* 18 inlineTableValSep <- <(ws ',' ws)> */ - nil, - /* 19 integer <- <(('-' / '+')? int)> */ - func() bool { - position210, tokenIndex210, depth210 := position, tokenIndex, depth - { - position211 := position - depth++ - { - position212, tokenIndex212, depth212 := position, tokenIndex, depth - { - position214, tokenIndex214, depth214 := position, tokenIndex, depth - if buffer[position] != rune('-') { - goto l215 - } - position++ - goto l214 - l215: - position, tokenIndex, depth = position214, tokenIndex214, depth214 - if buffer[position] != rune('+') { - goto l212 - } - position++ - } - l214: - goto l213 - l212: - position, tokenIndex, depth = position212, tokenIndex212, depth212 - } - l213: - { - position216 := position - depth++ - { - position217, tokenIndex217, depth217 := position, tokenIndex, depth - if c := buffer[position]; c < rune('1') || c > rune('9') { - goto l218 - } - position++ - { - position221, tokenIndex221, depth221 := position, tokenIndex, depth - if !_rules[ruledigit]() { - goto l222 - } - goto l221 - l222: - position, tokenIndex, depth = position221, tokenIndex221, depth221 - if buffer[position] != rune('_') { - goto l218 - } - position++ - if !_rules[ruledigit]() { - goto l218 - } - } - l221: - l219: - { - position220, tokenIndex220, depth220 := position, tokenIndex, depth - { - position223, tokenIndex223, depth223 := position, tokenIndex, depth - if !_rules[ruledigit]() { - goto l224 - } - goto l223 - l224: - position, tokenIndex, depth = position223, tokenIndex223, depth223 - if buffer[position] != rune('_') { - goto l220 - } - position++ - if !_rules[ruledigit]() { - goto l220 - } - } - l223: - goto l219 - l220: - position, tokenIndex, depth = position220, tokenIndex220, depth220 - } - goto l217 - l218: - position, tokenIndex, depth = position217, tokenIndex217, depth217 - if !_rules[ruledigit]() { - goto l210 - } - } - l217: - depth-- - add(ruleint, position216) - } - depth-- - add(ruleinteger, position211) - } - return true - l210: - position, tokenIndex, depth = position210, tokenIndex210, depth210 - return false - }, - /* 20 int <- <(([1-9] (digit / ('_' digit))+) / digit)> */ - nil, - /* 21 float <- <(integer ((frac exp?) / (frac? exp)))> */ - nil, - /* 22 frac <- <('.' digit (digit / ('_' digit))*)> */ - func() bool { - position227, tokenIndex227, depth227 := position, tokenIndex, depth - { - position228 := position - depth++ - if buffer[position] != rune('.') { - goto l227 - } - position++ - if !_rules[ruledigit]() { - goto l227 - } - l229: - { - position230, tokenIndex230, depth230 := position, tokenIndex, depth - { - position231, tokenIndex231, depth231 := position, tokenIndex, depth - if !_rules[ruledigit]() { - goto l232 - } - goto l231 - l232: - position, tokenIndex, depth = position231, tokenIndex231, depth231 - if buffer[position] != rune('_') { - goto l230 - } - position++ - if !_rules[ruledigit]() { - goto l230 - } - } - l231: - goto l229 - l230: - position, tokenIndex, depth = position230, tokenIndex230, depth230 - } - depth-- - add(rulefrac, position228) - } - return true - l227: - position, tokenIndex, depth = position227, tokenIndex227, depth227 - return false - }, - /* 23 exp <- <(('e' / 'E') ('-' / '+')? digit (digit / ('_' digit))*)> */ - func() bool { - position233, tokenIndex233, depth233 := position, tokenIndex, depth - { - position234 := position - depth++ - { - position235, tokenIndex235, depth235 := position, tokenIndex, depth - if buffer[position] != rune('e') { - goto l236 - } - position++ - goto l235 - l236: - position, tokenIndex, depth = position235, tokenIndex235, depth235 - if buffer[position] != rune('E') { - goto l233 - } - position++ - } - l235: - { - position237, tokenIndex237, depth237 := position, tokenIndex, depth - { - position239, tokenIndex239, depth239 := position, tokenIndex, depth - if buffer[position] != rune('-') { - goto l240 - } - position++ - goto l239 - l240: - position, tokenIndex, depth = position239, tokenIndex239, depth239 - if buffer[position] != rune('+') { - goto l237 - } - position++ - } - l239: - goto l238 - l237: - position, tokenIndex, depth = position237, tokenIndex237, depth237 - } - l238: - if !_rules[ruledigit]() { - goto l233 - } - l241: - { - position242, tokenIndex242, depth242 := position, tokenIndex, depth - { - position243, tokenIndex243, depth243 := position, tokenIndex, depth - if !_rules[ruledigit]() { - goto l244 - } - goto l243 - l244: - position, tokenIndex, depth = position243, tokenIndex243, depth243 - if buffer[position] != rune('_') { - goto l242 - } - position++ - if !_rules[ruledigit]() { - goto l242 - } - } - l243: - goto l241 - l242: - position, tokenIndex, depth = position242, tokenIndex242, depth242 - } - depth-- - add(ruleexp, position234) - } - return true - l233: - position, tokenIndex, depth = position233, tokenIndex233, depth233 - return false - }, - /* 24 string <- <(mlLiteralString / literalString / mlBasicString / basicString)> */ - nil, - /* 25 basicString <- <(<('"' basicChar* '"')> Action17)> */ - nil, - /* 26 basicChar <- <(basicUnescaped / escaped)> */ - func() bool { - position247, tokenIndex247, depth247 := position, tokenIndex, depth - { - position248 := position - depth++ - { - position249, tokenIndex249, depth249 := position, tokenIndex, depth - { - position251 := position - depth++ - { - switch buffer[position] { - case ' ', '!': - if c := buffer[position]; c < rune(' ') || c > rune('!') { - goto l250 - } - position++ - break - case '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[': - if c := buffer[position]; c < rune('#') || c > rune('[') { - goto l250 - } - position++ - break - default: - if c := buffer[position]; c < rune(']') || c > rune('\U0010ffff') { - goto l250 - } - position++ - break - } - } - - depth-- - add(rulebasicUnescaped, position251) - } - goto l249 - l250: - position, tokenIndex, depth = position249, tokenIndex249, depth249 - { - position253 := position - depth++ - if !_rules[ruleescape]() { - goto l247 - } - { - switch buffer[position] { - case 'U': - if buffer[position] != rune('U') { - goto l247 - } - position++ - if !_rules[rulehexQuad]() { - goto l247 - } - if !_rules[rulehexQuad]() { - goto l247 - } - break - case 'u': - if buffer[position] != rune('u') { - goto l247 - } - position++ - if !_rules[rulehexQuad]() { - goto l247 - } - break - case '\\': - if buffer[position] != rune('\\') { - goto l247 - } - position++ - break - case '/': - if buffer[position] != rune('/') { - goto l247 - } - position++ - break - case '"': - if buffer[position] != rune('"') { - goto l247 - } - position++ - break - case 'r': - if buffer[position] != rune('r') { - goto l247 - } - position++ - break - case 'f': - if buffer[position] != rune('f') { - goto l247 - } - position++ - break - case 'n': - if buffer[position] != rune('n') { - goto l247 - } - position++ - break - case 't': - if buffer[position] != rune('t') { - goto l247 - } - position++ - break - default: - if buffer[position] != rune('b') { - goto l247 - } - position++ - break - } - } - - depth-- - add(ruleescaped, position253) - } - } - l249: - depth-- - add(rulebasicChar, position248) - } - return true - l247: - position, tokenIndex, depth = position247, tokenIndex247, depth247 - return false - }, - /* 27 escaped <- <(escape ((&('U') ('U' hexQuad hexQuad)) | (&('u') ('u' hexQuad)) | (&('\\') '\\') | (&('/') '/') | (&('"') '"') | (&('r') 'r') | (&('f') 'f') | (&('n') 'n') | (&('t') 't') | (&('b') 'b')))> */ - nil, - /* 28 basicUnescaped <- <((&(' ' | '!') [ -!]) | (&('#' | '$' | '%' | '&' | '\'' | '(' | ')' | '*' | '+' | ',' | '-' | '.' | '/' | '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | ':' | ';' | '<' | '=' | '>' | '?' | '@' | 'A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z' | '[') [#-[]) | (&(']' | '^' | '_' | '`' | 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z' | '{' | '|' | '}' | '~' | '\u007f' | '\u0080' | '\u0081' | '\u0082' | '\u0083' | '\u0084' | '\u0085' | '\u0086' | '\u0087' | '\u0088' | '\u0089' | '\u008a' | '\u008b' | '\u008c' | '\u008d' | '\u008e' | '\u008f' | '\u0090' | '\u0091' | '\u0092' | '\u0093' | '\u0094' | '\u0095' | '\u0096' | '\u0097' | '\u0098' | '\u0099' | '\u009a' | '\u009b' | '\u009c' | '\u009d' | '\u009e' | '\u009f' | '\u00a0' | '¡' | '¢' | '£' | '¤' | '¥' | '¦' | '§' | '¨' | '©' | 'ª' | '«' | '¬' | '\u00ad' | '®' | '¯' | '°' | '±' | '²' | '³' | '´' | 'µ' | '¶' | '·' | '¸' | '¹' | 'º' | '»' | '¼' | '½' | '¾' | '¿' | 'À' | 'Á' | 'Â' | 'Ã' | 'Ä' | 'Å' | 'Æ' | 'Ç' | 'È' | 'É' | 'Ê' | 'Ë' | 'Ì' | 'Í' | 'Î' | 'Ï' | 'Ð' | 'Ñ' | 'Ò' | 'Ó' | 'Ô' | 'Õ' | 'Ö' | '×' | 'Ø' | 'Ù' | 'Ú' | 'Û' | 'Ü' | 'Ý' | 'Þ' | 'ß' | 'à' | 'á' | 'â' | 'ã' | 'ä' | 'å' | 'æ' | 'ç' | 'è' | 'é' | 'ê' | 'ë' | 'ì' | 'í' | 'î' | 'ï' | 'ð' | 'ñ' | 'ò' | 'ó' | 'ô') []-􏿿]))> */ - nil, - /* 29 escape <- <'\\'> */ - func() bool { - position257, tokenIndex257, depth257 := position, tokenIndex, depth - { - position258 := position - depth++ - if buffer[position] != rune('\\') { - goto l257 - } - position++ - depth-- - add(ruleescape, position258) - } - return true - l257: - position, tokenIndex, depth = position257, tokenIndex257, depth257 - return false - }, - /* 30 mlBasicString <- <('"' '"' '"' mlBasicBody ('"' '"' '"') Action18)> */ - nil, - /* 31 mlBasicBody <- <((<(basicChar / newline)> Action19) / (escape newline wsnl))*> */ - nil, - /* 32 literalString <- <('\'' '\'' Action20)> */ - nil, - /* 33 literalChar <- <((&('\t') '\t') | (&(' ' | '!' | '"' | '#' | '$' | '%' | '&') [ -&]) | (&('(' | ')' | '*' | '+' | ',' | '-' | '.' | '/' | '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | ':' | ';' | '<' | '=' | '>' | '?' | '@' | 'A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z' | '[' | '\\' | ']' | '^' | '_' | '`' | 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z' | '{' | '|' | '}' | '~' | '\u007f' | '\u0080' | '\u0081' | '\u0082' | '\u0083' | '\u0084' | '\u0085' | '\u0086' | '\u0087' | '\u0088' | '\u0089' | '\u008a' | '\u008b' | '\u008c' | '\u008d' | '\u008e' | '\u008f' | '\u0090' | '\u0091' | '\u0092' | '\u0093' | '\u0094' | '\u0095' | '\u0096' | '\u0097' | '\u0098' | '\u0099' | '\u009a' | '\u009b' | '\u009c' | '\u009d' | '\u009e' | '\u009f' | '\u00a0' | '¡' | '¢' | '£' | '¤' | '¥' | '¦' | '§' | '¨' | '©' | 'ª' | '«' | '¬' | '\u00ad' | '®' | '¯' | '°' | '±' | '²' | '³' | '´' | 'µ' | '¶' | '·' | '¸' | '¹' | 'º' | '»' | '¼' | '½' | '¾' | '¿' | 'À' | 'Á' | 'Â' | 'Ã' | 'Ä' | 'Å' | 'Æ' | 'Ç' | 'È' | 'É' | 'Ê' | 'Ë' | 'Ì' | 'Í' | 'Î' | 'Ï' | 'Ð' | 'Ñ' | 'Ò' | 'Ó' | 'Ô' | 'Õ' | 'Ö' | '×' | 'Ø' | 'Ù' | 'Ú' | 'Û' | 'Ü' | 'Ý' | 'Þ' | 'ß' | 'à' | 'á' | 'â' | 'ã' | 'ä' | 'å' | 'æ' | 'ç' | 'è' | 'é' | 'ê' | 'ë' | 'ì' | 'í' | 'î' | 'ï' | 'ð' | 'ñ' | 'ò' | 'ó' | 'ô') [(-􏿿]))> */ - nil, - /* 34 mlLiteralString <- <('\'' '\'' '\'' ('\'' '\'' '\'') Action21)> */ - nil, - /* 35 mlLiteralBody <- <(!('\'' '\'' '\'') (mlLiteralChar / newline))*> */ - nil, - /* 36 mlLiteralChar <- <('\t' / [ -􏿿])> */ - nil, - /* 37 hexdigit <- <((&('a' | 'b' | 'c' | 'd' | 'e' | 'f') [a-f]) | (&('A' | 'B' | 'C' | 'D' | 'E' | 'F') [A-F]) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') [0-9]))> */ - func() bool { - position266, tokenIndex266, depth266 := position, tokenIndex, depth - { - position267 := position - depth++ - { - switch buffer[position] { - case 'a', 'b', 'c', 'd', 'e', 'f': - if c := buffer[position]; c < rune('a') || c > rune('f') { - goto l266 - } - position++ - break - case 'A', 'B', 'C', 'D', 'E', 'F': - if c := buffer[position]; c < rune('A') || c > rune('F') { - goto l266 - } - position++ - break - default: - if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l266 - } - position++ - break - } - } - - depth-- - add(rulehexdigit, position267) - } - return true - l266: - position, tokenIndex, depth = position266, tokenIndex266, depth266 - return false - }, - /* 38 hexQuad <- <(hexdigit hexdigit hexdigit hexdigit)> */ - func() bool { - position269, tokenIndex269, depth269 := position, tokenIndex, depth - { - position270 := position - depth++ - if !_rules[rulehexdigit]() { - goto l269 - } - if !_rules[rulehexdigit]() { - goto l269 - } - if !_rules[rulehexdigit]() { - goto l269 - } - if !_rules[rulehexdigit]() { - goto l269 - } - depth-- - add(rulehexQuad, position270) - } - return true - l269: - position, tokenIndex, depth = position269, tokenIndex269, depth269 - return false - }, - /* 39 boolean <- <(('t' 'r' 'u' 'e') / ('f' 'a' 'l' 's' 'e'))> */ - nil, - /* 40 dateFullYear <- */ - nil, - /* 41 dateMonth <- */ - nil, - /* 42 dateMDay <- */ - nil, - /* 43 timeHour <- */ - func() bool { - position275, tokenIndex275, depth275 := position, tokenIndex, depth - { - position276 := position - depth++ - if !_rules[ruledigitDual]() { - goto l275 - } - depth-- - add(ruletimeHour, position276) - } - return true - l275: - position, tokenIndex, depth = position275, tokenIndex275, depth275 - return false - }, - /* 44 timeMinute <- */ - func() bool { - position277, tokenIndex277, depth277 := position, tokenIndex, depth - { - position278 := position - depth++ - if !_rules[ruledigitDual]() { - goto l277 - } - depth-- - add(ruletimeMinute, position278) - } - return true - l277: - position, tokenIndex, depth = position277, tokenIndex277, depth277 - return false - }, - /* 45 timeSecond <- */ - nil, - /* 46 timeSecfrac <- <('.' digit+)> */ - nil, - /* 47 timeNumoffset <- <(('-' / '+') timeHour ':' timeMinute)> */ - nil, - /* 48 timeOffset <- <('Z' / timeNumoffset)> */ - nil, - /* 49 partialTime <- <(timeHour ':' timeMinute ':' timeSecond timeSecfrac?)> */ - nil, - /* 50 fullDate <- <(dateFullYear '-' dateMonth '-' dateMDay)> */ - nil, - /* 51 fullTime <- <(partialTime timeOffset)> */ - nil, - /* 52 datetime <- <(fullDate 'T' fullTime)> */ - nil, - /* 53 digit <- <[0-9]> */ - func() bool { - position287, tokenIndex287, depth287 := position, tokenIndex, depth - { - position288 := position - depth++ - if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l287 - } - position++ - depth-- - add(ruledigit, position288) - } - return true - l287: - position, tokenIndex, depth = position287, tokenIndex287, depth287 - return false - }, - /* 54 digitDual <- <(digit digit)> */ - func() bool { - position289, tokenIndex289, depth289 := position, tokenIndex, depth - { - position290 := position - depth++ - if !_rules[ruledigit]() { - goto l289 - } - if !_rules[ruledigit]() { - goto l289 - } - depth-- - add(ruledigitDual, position290) - } - return true - l289: - position, tokenIndex, depth = position289, tokenIndex289, depth289 - return false - }, - /* 55 digitQuad <- <(digitDual digitDual)> */ - nil, - /* 56 array <- <('[' Action22 wsnl arrayValues wsnl ']')> */ - nil, - /* 57 arrayValues <- <(val Action23 arraySep? (comment? newline)?)*> */ - nil, - /* 58 arraySep <- <(ws ',' wsnl)> */ - nil, - /* 60 Action0 <- <{ _ = buffer }> */ - nil, - nil, - /* 62 Action1 <- <{ p.SetTableString(begin, end) }> */ - nil, - /* 63 Action2 <- <{ p.AddLineCount(end - begin) }> */ - nil, - /* 64 Action3 <- <{ p.AddLineCount(end - begin) }> */ - nil, - /* 65 Action4 <- <{ p.AddKeyValue() }> */ - nil, - /* 66 Action5 <- <{ p.SetKey(p.buffer, begin, end) }> */ - nil, - /* 67 Action6 <- <{ p.SetKey(p.buffer, begin, end) }> */ - nil, - /* 68 Action7 <- <{ p.SetTime(begin, end) }> */ - nil, - /* 69 Action8 <- <{ p.SetFloat64(begin, end) }> */ - nil, - /* 70 Action9 <- <{ p.SetInt64(begin, end) }> */ - nil, - /* 71 Action10 <- <{ p.SetString(begin, end) }> */ - nil, - /* 72 Action11 <- <{ p.SetBool(begin, end) }> */ - nil, - /* 73 Action12 <- <{ p.SetArray(begin, end) }> */ - nil, - /* 74 Action13 <- <{ p.SetTable(p.buffer, begin, end) }> */ - nil, - /* 75 Action14 <- <{ p.SetArrayTable(p.buffer, begin, end) }> */ - nil, - /* 76 Action15 <- <{ p.StartInlineTable() }> */ - nil, - /* 77 Action16 <- <{ p.EndInlineTable() }> */ - nil, - /* 78 Action17 <- <{ p.SetBasicString(p.buffer, begin, end) }> */ - nil, - /* 79 Action18 <- <{ p.SetMultilineString() }> */ - nil, - /* 80 Action19 <- <{ p.AddMultilineBasicBody(p.buffer, begin, end) }> */ - nil, - /* 81 Action20 <- <{ p.SetLiteralString(p.buffer, begin, end) }> */ - nil, - /* 82 Action21 <- <{ p.SetMultilineLiteralString(p.buffer, begin, end) }> */ - nil, - /* 83 Action22 <- <{ p.StartArray() }> */ - nil, - /* 84 Action23 <- <{ p.AddArrayVal() }> */ - nil, - } - p.rules = _rules -} diff --git a/Godeps/_workspace/src/github.com/naoina/toml/testdata/test.toml b/Godeps/_workspace/src/github.com/naoina/toml/testdata/test.toml deleted file mode 100644 index ec119752d..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/testdata/test.toml +++ /dev/null @@ -1,244 +0,0 @@ -################################################################################ -## Comment - -# Speak your mind with the hash symbol. They go from the symbol to the end of -# the line. - - -################################################################################ -## Table - -# Tables (also known as hash tables or dictionaries) are collections of -# key/value pairs. They appear in square brackets on a line by themselves. - -[table] - -key = "value" # Yeah, you can do this. - -# Nested tables are denoted by table names with dots in them. Name your tables -# whatever crap you please, just don't use #, ., [ or ]. - -[table.subtable] - -key = "another value" - -# You don't need to specify all the super-tables if you don't want to. TOML -# knows how to do it for you. - -# [x] you -# [x.y] don't -# [x.y.z] need these -[x.y.z.w] # for this to work - - -################################################################################ -## Inline Table - -# Inline tables provide a more compact syntax for expressing tables. They are -# especially useful for grouped data that can otherwise quickly become verbose. -# Inline tables are enclosed in curly braces `{` and `}`. No newlines are -# allowed between the curly braces unless they are valid within a value. - -[table.inline] - -name = { first = "Tom", last = "Preston-Werner" } -point = { x = 1, y = 2 } - - -################################################################################ -## String - -# There are four ways to express strings: basic, multi-line basic, literal, and -# multi-line literal. All strings must contain only valid UTF-8 characters. - -[string.basic] - -basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF." - -[string.multiline] - -# The following strings are byte-for-byte equivalent: -key1 = "One\nTwo" -key2 = """One\nTwo""" -key3 = """ -One -Two""" - -[string.multiline.continued] - -# The following strings are byte-for-byte equivalent: -key1 = "The quick brown fox jumps over the lazy dog." - -key2 = """ -The quick brown \ - - - fox jumps over \ - the lazy dog.""" - -key3 = """\ - The quick brown \ - fox jumps over \ - the lazy dog.\ - """ - -[string.literal] - -# What you see is what you get. -winpath = 'C:\Users\nodejs\templates' -winpath2 = '\\ServerX\admin$\system32\' -quoted = 'Tom "Dubs" Preston-Werner' -regex = '<\i\c*\s*>' - - -[string.literal.multiline] - -regex2 = '''I [dw]on't need \d{2} apples''' -lines = ''' -The first newline is -trimmed in raw strings. - All other whitespace - is preserved. -''' - - -################################################################################ -## Integer - -# Integers are whole numbers. Positive numbers may be prefixed with a plus sign. -# Negative numbers are prefixed with a minus sign. - -[integer] - -key1 = +99 -key2 = 42 -key3 = 0 -key4 = -17 - -[integer.underscores] - -# For large numbers, you may use underscores to enhance readability. Each -# underscore must be surrounded by at least one digit. -key1 = 1_000 -key2 = 5_349_221 -key3 = 1_2_3_4_5 # valid but inadvisable - - -################################################################################ -## Float - -# A float consists of an integer part (which may be prefixed with a plus or -# minus sign) followed by a fractional part and/or an exponent part. - -[float.fractional] - -key1 = +1.0 -key2 = 3.1415 -key3 = -0.01 - -[float.exponent] - -key1 = 5e+22 -key2 = 1e6 -key3 = -2E-2 - -[float.both] - -key = 6.626e-34 - -[float.underscores] - -key1 = 9_224_617.445_991_228_313 -key2 = 1e1_00 - - -################################################################################ -## Boolean - -# Booleans are just the tokens you're used to. Always lowercase. - -[boolean] - -True = true -False = false - - -################################################################################ -## Datetime - -# Datetimes are RFC 3339 dates. - -[datetime] - -key1 = 1979-05-27T07:32:00Z -key2 = 1979-05-27T00:32:00-07:00 -key3 = 1979-05-27T00:32:00.999999-07:00 - - -################################################################################ -## Array - -# Arrays are square brackets with other primitives inside. Whitespace is -# ignored. Elements are separated by commas. Data types may not be mixed. - -[array] - -key1 = [ 1, 2, 3 ] -key2 = [ "red", "yellow", "green" ] -key3 = [ [ 1, 2 ], [3, 4, 5] ] -key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok - -# Arrays can also be multiline. So in addition to ignoring whitespace, arrays -# also ignore newlines between the brackets. Terminating commas are ok before -# the closing bracket. - -key5 = [ - 1, 2, 3 -] -key6 = [ - 1, - 2, # this is ok -] - - -################################################################################ -## Array of Tables - -# These can be expressed by using a table name in double brackets. Each table -# with the same double bracketed name will be an element in the array. The -# tables are inserted in the order encountered. - -[[products]] - -name = "Hammer" -sku = 738594937 - -[[products]] - -[[products]] - -name = "Nail" -sku = 284758393 -color = "gray" - - -# You can create nested arrays of tables as well. - -[[fruit]] - name = "apple" - - [fruit.physical] - color = "red" - shape = "round" - - [[fruit.variety]] - name = "red delicious" - - [[fruit.variety]] - name = "granny smith" - -[[fruit]] - name = "banana" - - [[fruit.variety]] - name = "plantain" diff --git a/Godeps/_workspace/src/github.com/naoina/toml/util.go b/Godeps/_workspace/src/github.com/naoina/toml/util.go deleted file mode 100644 index dc6a548d7..000000000 --- a/Godeps/_workspace/src/github.com/naoina/toml/util.go +++ /dev/null @@ -1,79 +0,0 @@ -package toml - -import ( - "go/ast" - "reflect" - "strings" - "unicode" -) - -// toCamelCase returns a copy of the string s with all Unicode letters mapped to their camel case. -// It will convert to upper case previous letter of '_' and first letter, and remove letter of '_'. -func toCamelCase(s string) string { - if s == "" { - return "" - } - result := make([]rune, 0, len(s)) - upper := false - for _, r := range s { - if r == '_' { - upper = true - continue - } - if upper { - result = append(result, unicode.ToUpper(r)) - upper = false - continue - } - result = append(result, r) - } - result[0] = unicode.ToUpper(result[0]) - return string(result) -} - -const ( - fieldTagName = "toml" -) - -func findField(rv reflect.Value, name string) (field reflect.Value, fieldName string, found bool) { - switch rv.Kind() { - case reflect.Struct: - rt := rv.Type() - for i := 0; i < rt.NumField(); i++ { - ft := rt.Field(i) - if !ast.IsExported(ft.Name) { - continue - } - if col, _ := extractTag(ft.Tag.Get(fieldTagName)); col == name { - return rv.Field(i), ft.Name, true - } - } - for _, name := range []string{ - strings.Title(name), - toCamelCase(name), - strings.ToUpper(name), - } { - if field := rv.FieldByName(name); field.IsValid() { - return field, name, true - } - } - case reflect.Map: - return reflect.New(rv.Type().Elem()).Elem(), name, true - } - return field, "", false -} - -func extractTag(tag string) (col, rest string) { - tags := strings.SplitN(tag, ",", 2) - if len(tags) == 2 { - return strings.TrimSpace(tags[0]), strings.TrimSpace(tags[1]) - } - return strings.TrimSpace(tags[0]), "" -} - -func tableName(prefix, name string) string { - if prefix != "" { - return prefix + string(tableSeparator) + name - } - return name -} diff --git a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/LICENSE b/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/LICENSE deleted file mode 100644 index da139850f..000000000 --- a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) 2012, Sam Freiberg -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/README.md b/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/README.md deleted file mode 100644 index 63907ffbf..000000000 --- a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/README.md +++ /dev/null @@ -1,65 +0,0 @@ -## Overview -This is the start of a library for [Twilio](http://www.twilio.com/). Gotwilio supports making voice calls and sending text messages. - -## License -Gotwilio is licensed under a BSD license. - -## Installation -To install gotwilio, simply run `go get github.com/sfreiberg/gotwilio`. - -## SMS Example - - package main - - import ( - "github.com/sfreiberg/gotwilio" - ) - - func main() { - accountSid := "ABC123..........ABC123" - authToken := "ABC123..........ABC123" - twilio := gotwilio.NewTwilioClient(accountSid, authToken) - - from := "+15555555555" - to := "+15555555555" - message := "Welcome to gotwilio!" - twilio.SendSMS(from, to, message, "", "") - } - -## MMS Example - - package main - - import ( - "github.com/sfreiberg/gotwilio" - ) - - func main() { - accountSid := "ABC123..........ABC123" - authToken := "ABC123..........ABC123" - twilio := gotwilio.NewTwilioClient(accountSid, authToken) - - from := "+15555555555" - to := "+15555555555" - message := "Welcome to gotwilio!" - twilio.SendMMS(from, to, message, "http://host/myimage.gif", "", "") - } - -## Voice Example - - package main - - import ( - "github.com/sfreiberg/gotwilio" - ) - - func main() { - accountSid := "ABC123..........ABC123" - authToken := "ABC123..........ABC123" - twilio := gotwilio.NewTwilioClient(accountSid, authToken) - - from := "+15555555555" - to := "+15555555555" - callbackParams := gotwilio.NewCallbackParameters("http://example.com") - twilio.CallWithUrlCallbacks(from, to, callbackParams) - } diff --git a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/gotwilio.go b/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/gotwilio.go deleted file mode 100644 index 2a966b19e..000000000 --- a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/gotwilio.go +++ /dev/null @@ -1,57 +0,0 @@ -// Package gotwilio is a library for interacting with http://www.twilio.com/ API. -package gotwilio - -import ( - "net/http" - "net/url" - "strings" -) - -// Twilio stores basic information important for connecting to the -// twilio.com REST api such as AccountSid and AuthToken. -type Twilio struct { - AccountSid string - AuthToken string - BaseUrl string - HTTPClient *http.Client -} - -// Exception is a representation of a twilio exception. -type Exception struct { - Status int `json:"status"` // HTTP specific error code - Message string `json:"message"` // HTTP error message - Code int `json:"code"` // Twilio specific error code - MoreInfo string `json:"more_info"` // Additional info from Twilio -} - -// Create a new Twilio struct. -func NewTwilioClient(accountSid, authToken string) *Twilio { - return NewTwilioClientCustomHTTP(accountSid, authToken, nil) -} - -// Create a new Twilio client, optionally using a custom http.Client -func NewTwilioClientCustomHTTP(accountSid, authToken string, HTTPClient *http.Client) *Twilio { - twilioUrl := "https://api.twilio.com/2010-04-01" // Should this be moved into a constant? - - if HTTPClient == nil { - HTTPClient = http.DefaultClient - } - - return &Twilio{accountSid, authToken, twilioUrl, HTTPClient} -} - -func (twilio *Twilio) post(formValues url.Values, twilioUrl string) (*http.Response, error) { - req, err := http.NewRequest("POST", twilioUrl, strings.NewReader(formValues.Encode())) - if err != nil { - return nil, err - } - req.SetBasicAuth(twilio.AccountSid, twilio.AuthToken) - req.Header.Add("Content-Type", "application/x-www-form-urlencoded") - - client := twilio.HTTPClient - if client == nil { - client = http.DefaultClient - } - - return client.Do(req) -} diff --git a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/gotwilio_test.go b/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/gotwilio_test.go deleted file mode 100644 index 8903683b6..000000000 --- a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/gotwilio_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package gotwilio - -import ( - "testing" -) - -var params map[string]string - -func init() { - params = make(map[string]string) - params["SID"] = "AC0f30491286ab4abb4a108abefbd05d8a" - params["TOKEN"] = "1dcf52d7a1f3853ed78f0ee20d056dd0" - params["FROM"] = "+15005550006" - params["TO"] = "+19135551234" -} - -func TestSMS(t *testing.T) { - msg := "Welcome to gotwilio" - twilio := NewTwilioClient(params["SID"], params["TOKEN"]) - _, exc, err := twilio.SendSMS(params["FROM"], params["TO"], msg, "", "") - if err != nil { - t.Fatal(err) - } - - if exc != nil { - t.Fatal(exc) - } -} - -func TestMMS(t *testing.T) { - msg := "Welcome to gotwilio" - twilio := NewTwilioClient(params["SID"], params["TOKEN"]) - _, exc, err := twilio.SendMMS(params["FROM"], params["TO"], msg, "http://www.google.com/images/logo.png", "", "") - if err != nil { - t.Fatal(err) - } - - if exc != nil { - t.Fatal(exc) - } -} - -func TestVoice(t *testing.T) { - callback := NewCallbackParameters("http://example.com") - twilio := NewTwilioClient(params["SID"], params["TOKEN"]) - _, exc, err := twilio.CallWithUrlCallbacks(params["FROM"], params["TO"], callback) - if err != nil { - t.Fatal(err) - } - - if exc != nil { - t.Fatal(exc) - } -} diff --git a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/sms.go b/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/sms.go deleted file mode 100644 index d87e36bd9..000000000 --- a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/sms.go +++ /dev/null @@ -1,112 +0,0 @@ -package gotwilio - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - "time" -) - -// SmsResponse is returned after a text/sms message is posted to Twilio -type SmsResponse struct { - Sid string `json:"sid"` - DateCreated string `json:"date_created"` - DateUpdate string `json:"date_updated"` - DateSent string `json:"date_sent"` - AccountSid string `json:"account_sid"` - To string `json:"to"` - From string `json:"from"` - MediaUrl string `json:"media_url"` - Body string `json:"body"` - Status string `json:"status"` - Direction string `json:"direction"` - ApiVersion string `json:"api_version"` - Price *float32 `json:"price,omitempty"` - Url string `json:"uri"` -} - -// Returns SmsResponse.DateCreated as a time.Time object -// instead of a string. -func (sms *SmsResponse) DateCreatedAsTime() (time.Time, error) { - return time.Parse(time.RFC1123Z, sms.DateCreated) -} - -// Returns SmsResponse.DateUpdate as a time.Time object -// instead of a string. -func (sms *SmsResponse) DateUpdateAsTime() (time.Time, error) { - return time.Parse(time.RFC1123Z, sms.DateUpdate) -} - -// Returns SmsResponse.DateSent as a time.Time object -// instead of a string. -func (sms *SmsResponse) DateSentAsTime() (time.Time, error) { - return time.Parse(time.RFC1123Z, sms.DateSent) -} - -// SendTextMessage uses Twilio to send a text message. -// See http://www.twilio.com/docs/api/rest/sending-sms for more information. -func (twilio *Twilio) SendSMS(from, to, body, statusCallback, applicationSid string) (smsResponse *SmsResponse, exception *Exception, err error) { - formValues := initFormValues(from, to, body, "", statusCallback, applicationSid) - smsResponse, exception, err = twilio.sendMessage(formValues) - return -} - -// SendMultimediaMessage uses Twilio to send a multimedia message. -func (twilio *Twilio) SendMMS(from, to, body, mediaUrl, statusCallback, applicationSid string) (smsResponse *SmsResponse, exception *Exception, err error) { - formValues := initFormValues(from, to, body, mediaUrl, statusCallback, applicationSid) - smsResponse, exception, err = twilio.sendMessage(formValues) - return -} - -// Core method to send message -func (twilio *Twilio) sendMessage(formValues url.Values) (smsResponse *SmsResponse, exception *Exception, err error) { - twilioUrl := twilio.BaseUrl + "/Accounts/" + twilio.AccountSid + "/Messages.json" - - res, err := twilio.post(formValues, twilioUrl) - if err != nil { - return smsResponse, exception, err - } - defer res.Body.Close() - - responseBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return smsResponse, exception, err - } - - if res.StatusCode != http.StatusCreated { - exception = new(Exception) - err = json.Unmarshal(responseBody, exception) - - // We aren't checking the error because we don't actually care. - // It's going to be passed to the client either way. - return smsResponse, exception, err - } - - smsResponse = new(SmsResponse) - err = json.Unmarshal(responseBody, smsResponse) - return smsResponse, exception, err -} - -// Form values initialization -func initFormValues(from, to, body, mediaUrl, statusCallback, applicationSid string) url.Values { - formValues := url.Values{} - - formValues.Set("From", from) - formValues.Set("To", to) - formValues.Set("Body", body) - - if mediaUrl != "" { - formValues.Set("MediaUrl", mediaUrl) - } - - if statusCallback != "" { - formValues.Set("StatusCallback", statusCallback) - } - - if applicationSid != "" { - formValues.Set("ApplicationSid", applicationSid) - } - - return formValues -} diff --git a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/util.go b/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/util.go deleted file mode 100644 index 81aa1930c..000000000 --- a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/util.go +++ /dev/null @@ -1,84 +0,0 @@ -package gotwilio - -import ( - "bytes" - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "errors" - "net/http" - "net/url" - "sort" -) - -// GenerateSignature computes the Twilio signature for verifying the -// authenticity of a request. It is based on the specification at: -// https://www.twilio.com/docs/security#validating-requests -func (twilio *Twilio) GenerateSignature(url string, form url.Values) ([]byte, error) { - var buf bytes.Buffer - - buf.WriteString(url) - - keys := make(sort.StringSlice, 0, len(form)) - for k := range form { - keys = append(keys, k) - } - - keys.Sort() - - for _, k := range keys { - buf.WriteString(k) - for _, v := range form[k] { - buf.WriteString(v) - } - } - - mac := hmac.New(sha1.New, []byte(twilio.AuthToken)) - mac.Write(buf.Bytes()) - - var expected bytes.Buffer - coder := base64.NewEncoder(base64.StdEncoding, &expected) - _, err := coder.Write(mac.Sum(nil)) - if err != nil { - return nil, err - } - - err = coder.Close() - if err != nil { - return nil, err - } - - return expected.Bytes(), nil -} - -// CheckRequestSignature checks that the X-Twilio-Signature header on a request -// matches the expected signature defined by the GenerateSignature function. -// -// The baseUrl parameter will be prepended to the request URL. It is useful for -// specifying the protocol and host parts of the server URL hosting your endpoint. -// -// Passing a non-POST request or a request without the X-Twilio-Signature -// header is an error. -func (twilio *Twilio) CheckRequestSignature(r *http.Request, baseURL string) (bool, error) { - if r.Method != "POST" { - return false, errors.New("Checking signatures on non-POST requests is not implemented") - } - - if err := r.ParseForm(); err != nil { - return false, err - } - - url := baseURL + r.URL.String() - - expected, err := twilio.GenerateSignature(url, r.PostForm) - if err != nil { - return false, err - } - - actual := r.Header.Get("X-Twilio-Signature") - if actual == "" { - return false, errors.New("Request does not have a twilio signature header") - } - - return hmac.Equal(expected, []byte(actual)), nil -} diff --git a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/util_test.go b/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/util_test.go deleted file mode 100644 index 4d53899a4..000000000 --- a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/util_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package gotwilio - -import ( - "bytes" - "io/ioutil" - "net/http" - "net/url" - "testing" -) - -const ( - // Magic strings from https://github.com/twilio/twilio-python - testServerURL = "http://www.postbin.org" - testAuthToken = "1c892n40nd03kdnc0112slzkl3091j20" - testValidSignature = "fF+xx6dTinOaCdZ0aIeNkHr/ZAA=" -) - -func TestCheckSignature(t *testing.T) { - twilio := Twilio{ - AuthToken: testAuthToken, - } - - // Magic strings from https://github.com/twilio/twilio-python - u, err := url.Parse("/1ed898x") - if err != nil { - t.Fatal(err) - } - h := http.Header{ - "Content-Type": []string{"application/x-www-form-urlencoded"}, - "X-Twilio-Signature": []string{testValidSignature}, - } - b := bytes.NewBufferString(`FromZip=89449&From=%2B15306666666&` + - `FromCity=SOUTH+LAKE+TAHOE&ApiVersion=2010-04-01&To=%2B15306384866&` + - `CallStatus=ringing&CalledState=CA&FromState=CA&Direction=inbound&` + - `ToCity=OAKLAND&ToZip=94612&CallerCity=SOUTH+LAKE+TAHOE&FromCountry=US&` + - `CallerName=CA+Wireless+Call&CalledCity=OAKLAND&CalledCountry=US&` + - `Caller=%2B15306666666&CallerZip=89449&AccountSid=AC9a9f9392lad99kla0sklakjs90j092j3&` + - `Called=%2B15306384866&CallerCountry=US&CalledZip=94612&CallSid=CAd800bb12c0426a7ea4230e492fef2a4f&` + - `CallerState=CA&ToCountry=US&ToState=CA`) - - r := http.Request{ - Method: "POST", - URL: u, - Header: h, - Body: ioutil.NopCloser(b), - } - - valid, err := twilio.CheckRequestSignature(&r, testServerURL) - if err != nil { - t.Fatal(err) - } - if !valid { - t.Fatal("Expected signature to be valid") - } - - h["X-Twilio-Signature"] = []string{"foo"} - valid, err = twilio.CheckRequestSignature(&r, testServerURL) - if err != nil { - t.Fatal(err) - } - if valid { - t.Fatal("Expected signature to be invalid") - } - - delete(h, "X-Twilio-Signature") - valid, err = twilio.CheckRequestSignature(&r, testServerURL) - if err == nil { - t.Fatal("Expected an error verifying a request without a signature header") - } -} diff --git a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/voice.go b/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/voice.go deleted file mode 100644 index 091db41b3..000000000 --- a/Godeps/_workspace/src/github.com/sfreiberg/gotwilio/voice.go +++ /dev/null @@ -1,161 +0,0 @@ -package gotwilio - -import ( - "encoding/json" - "net/http" - "net/url" - "strconv" - "time" -) - -// These are the paramters to use when you want Twilio to use callback urls. -// See http://www.twilio.com/docs/api/rest/making-calls for more info. -type CallbackParameters struct { - Url string // Required - Method string // Optional - FallbackUrl string // Optional - FallbackMethod string // Optional - StatusCallback string // Optional - StatusCallbackMethod string // Optional - SendDigits string // Optional - IfMachine string // False, Continue or Hangup; http://www.twilio.com/docs/errors/21207 - Timeout int // Optional - Record bool // Optional -} - -// VoiceResponse contains the details about successful voice calls. -type VoiceResponse struct { - Sid string `json:"sid"` - DateCreated string `json:"date_created"` - DateUpdated string `json:"date_updated"` - ParentCallSid string `json:"parent_call_sid"` - AccountSid string `json:"account_sid"` - To string `json:"to"` - ToFormatted string `json:"to_formatted"` - From string `json:"from"` - FromFormatted string `json:"from_formatted"` - PhoneNumberSid string `json:"phone_number_sid"` - Status string `json:"status"` - StartTime string `json:"start_time"` - EndTime string `json:"end_time"` - Duration int `json:"duration"` - Price *float32 `json:"price,omitempty"` - Direction string `json:"direction"` - AnsweredBy string `json:"answered_by"` - ApiVersion string `json:"api_version"` - Annotation string `json:"annotation"` - ForwardedFrom string `json:"forwarded_from"` - GroupSid string `json:"group_sid"` - CallerName string `json:"caller_name"` - Uri string `json:"uri"` - // TODO: handle SubresourceUris -} - -// Returns VoiceResponse.DateCreated as a time.Time object -// instead of a string. -func (vr *VoiceResponse) DateCreatedAsTime() (time.Time, error) { - return time.Parse(time.RFC1123Z, vr.DateCreated) -} - -// Returns VoiceResponse.DateUpdated as a time.Time object -// instead of a string. -func (vr *VoiceResponse) DateUpdatedAsTime() (time.Time, error) { - return time.Parse(time.RFC1123Z, vr.DateUpdated) -} - -// Returns VoiceResponse.StartTime as a time.Time object -// instead of a string. -func (vr *VoiceResponse) StartTimeAsTime() (time.Time, error) { - return time.Parse(time.RFC1123Z, vr.StartTime) -} - -// Returns VoiceResponse.EndTime as a time.Time object -// instead of a string. -func (vr *VoiceResponse) EndTimeAsTime() (time.Time, error) { - return time.Parse(time.RFC1123Z, vr.EndTime) -} - -// Returns a CallbackParameters type with the specified url and -// CallbackParameters.Timeout set to 60. -func NewCallbackParameters(url string) *CallbackParameters { - return &CallbackParameters{Url: url, Timeout: 60} -} - -// Place a voice call with a list of callbacks specified. -func (twilio *Twilio) CallWithUrlCallbacks(from, to string, callbackParameters *CallbackParameters) (*VoiceResponse, *Exception, error) { - formValues := url.Values{} - formValues.Set("From", from) - formValues.Set("To", to) - formValues.Set("Url", callbackParameters.Url) - - // Optional values - if callbackParameters.Method != "" { - formValues.Set("Method", callbackParameters.Method) - } - if callbackParameters.FallbackUrl != "" { - formValues.Set("FallbackUrl", callbackParameters.FallbackUrl) - } - if callbackParameters.FallbackMethod != "" { - formValues.Set("FallbackMethod", callbackParameters.FallbackMethod) - } - if callbackParameters.StatusCallback != "" { - formValues.Set("StatusCallback", callbackParameters.StatusCallback) - } - if callbackParameters.StatusCallbackMethod != "" { - formValues.Set("StatusCallbackMethod", callbackParameters.StatusCallbackMethod) - } - if callbackParameters.SendDigits != "" { - formValues.Set("SendDigits", callbackParameters.SendDigits) - } - if callbackParameters.IfMachine != "" { - formValues.Set("IfMachine", callbackParameters.IfMachine) - } - if callbackParameters.Timeout != 0 { - formValues.Set("Timeout", strconv.Itoa(callbackParameters.Timeout)) - } - if callbackParameters.Record { - formValues.Set("Record", "true") - } else { - formValues.Set("Record", "false") - } - - return twilio.voicePost(formValues) -} - -// Place a voice call with an ApplicationSid specified. -func (twilio *Twilio) CallWithApplicationCallbacks(from, to, applicationSid string) (*VoiceResponse, *Exception, error) { - formValues := url.Values{} - formValues.Set("From", from) - formValues.Set("To", to) - formValues.Set("ApplicationSid", applicationSid) - - return twilio.voicePost(formValues) -} - -// This is a private method that has the common bits for making a voice call. -func (twilio *Twilio) voicePost(formValues url.Values) (*VoiceResponse, *Exception, error) { - var voiceResponse *VoiceResponse - var exception *Exception - twilioUrl := twilio.BaseUrl + "/Accounts/" + twilio.AccountSid + "/Calls.json" - - res, err := twilio.post(formValues, twilioUrl) - if err != nil { - return voiceResponse, exception, err - } - defer res.Body.Close() - - decoder := json.NewDecoder(res.Body) - - if res.StatusCode != http.StatusCreated { - exception = new(Exception) - err = decoder.Decode(exception) - - // We aren't checking the error because we don't actually care. - // It's going to be passed to the client either way. - return voiceResponse, exception, err - } - - voiceResponse = new(VoiceResponse) - err = decoder.Decode(voiceResponse) - return voiceResponse, exception, err -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/.travis.yml b/Godeps/_workspace/src/github.com/spf13/pflag/.travis.yml deleted file mode 100644 index c4d88e374..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -sudo: false - -language: go - -go: - - 1.3 - - 1.4 - - tip diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/LICENSE b/Godeps/_workspace/src/github.com/spf13/pflag/LICENSE deleted file mode 100644 index 63ed1cfea..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/README.md b/Godeps/_workspace/src/github.com/spf13/pflag/README.md deleted file mode 100644 index f7d63500f..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,191 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/ogier/pflag - -Run tests by running: - - go test github.com/ogier/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/ogier/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP("boolname", "b", true, "help message") -} -flag.VarP(&flagVar, "varname", "v", 1234, "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Command line flag syntax - -``` ---flag // boolean flags only ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags. - -``` -// boolean flags --f --abc - -// non-boolean flags --n 1234 --Ifile - -// mixed --abcs "hello" --abcn1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -```go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -```go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/ogier/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/ogier/pflag -[3]: http://godoc.org/github.com/ogier/pflag diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/bool.go b/Godeps/_workspace/src/github.com/spf13/pflag/bool.go deleted file mode 100644 index 70e2e0a6b..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/bool.go +++ /dev/null @@ -1,83 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Type() string { - return "bool" -} - -func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { - f.VarP(newBoolValue(value, p), name, "", usage) -} - -// Like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - f.VarP(newBoolValue(value, p), name, shorthand, usage) -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, name string, value bool, usage string) { - CommandLine.VarP(newBoolValue(value, p), name, "", usage) -} - -// Like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - CommandLine.VarP(newBoolValue(value, p), name, shorthand, usage) -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (f *FlagSet) Bool(name string, value bool, usage string) *bool { - p := new(bool) - f.BoolVarP(p, name, "", value, usage) - return p -} - -// Like Bool, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { - p := new(bool) - f.BoolVarP(p, name, shorthand, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(name string, value bool, usage string) *bool { - return CommandLine.BoolP(name, "", value, usage) -} - -// Like Bool, but accepts a shorthand letter that can be used after a single dash. -func BoolP(name, shorthand string, value bool, usage string) *bool { - return CommandLine.BoolP(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/bool_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/bool_test.go deleted file mode 100644 index a2e1c5dcc..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/bool_test.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "bytes" - "fmt" - "strconv" - "testing" -) - -// This value can be a boolean ("true", "false") or "maybe" -type triStateValue int - -const ( - triStateFalse triStateValue = 0 - triStateTrue triStateValue = 1 - triStateMaybe triStateValue = 2 -) - -const strTriStateMaybe = "maybe" - -func (v *triStateValue) IsBoolFlag() bool { - return true -} - -func (v *triStateValue) Get() interface{} { - return triStateValue(*v) -} - -func (v *triStateValue) Set(s string) error { - if s == strTriStateMaybe { - *v = triStateMaybe - return nil - } - boolVal, err := strconv.ParseBool(s) - if boolVal { - *v = triStateTrue - } else { - *v = triStateFalse - } - return err -} - -func (v *triStateValue) String() string { - if *v == triStateMaybe { - return strTriStateMaybe - } - return fmt.Sprintf("%v", bool(*v == triStateTrue)) -} - -// The type of the flag as requred by the pflag.Value interface -func (v *triStateValue) Type() string { - return "version" -} - -func setUpFlagSet(tristate *triStateValue) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - *tristate = triStateFalse - f.VarP(tristate, "tristate", "t", "tristate value (true, maybe or false)") - return f -} - -func TestExplicitTrue(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"--tristate=true"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateTrue { - t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") - } -} - -func TestImplicitTrue(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"--tristate"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateTrue { - t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") - } -} - -func TestShortFlag(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"-t"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateTrue { - t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") - } -} - -func TestShortFlagExtraArgument(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - // The"maybe"turns into an arg, since short boolean options will only do true/false - err := f.Parse([]string{"-t", "maybe"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateTrue { - t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") - } - args := f.Args() - if len(args) != 1 || args[0] != "maybe" { - t.Fatal("expected an extra 'maybe' argument to stick around") - } -} - -func TestExplicitMaybe(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"--tristate=maybe"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateMaybe { - t.Fatal("expected", triStateMaybe, "(triStateMaybe) but got", tristate, "instead") - } -} - -func TestExplicitFalse(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"--tristate=false"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateFalse { - t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead") - } -} - -func TestImplicitFalse(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateFalse { - t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead") - } -} - -func TestInvalidValue(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - var buf bytes.Buffer - f.SetOutput(&buf) - err := f.Parse([]string{"--tristate=invalid"}) - if err == nil { - t.Fatal("expected an error but did not get any, tristate has value", tristate) - } -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/duration.go b/Godeps/_workspace/src/github.com/spf13/pflag/duration.go deleted file mode 100644 index 66ed7ac93..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/duration.go +++ /dev/null @@ -1,71 +0,0 @@ -package pflag - -import "time" - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Type() string { - return "duration" -} - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, "", usage) -} - -// Like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, "", usage) -} - -// Like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, "", value, usage) - return p -} - -// Like Duration, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, shorthand, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(name string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, "", value, usage) -} - -// Like Duration, but accepts a shorthand letter that can be used after a single dash. -func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go deleted file mode 100644 index 9be7a49f2..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// These examples demonstrate more intricate uses of the flag package. -package pflag_test - -import ( - "errors" - "fmt" - "strings" - "time" - - flag "github.com/spf13/pflag" -) - -// Example 1: A single string flag called "species" with default value "gopher". -var species = flag.String("species", "gopher", "the species we are studying") - -// Example 2: A flag with a shorthand letter. -var gopherType = flag.StringP("gopher_type", "g", "pocket", "the variety of gopher") - -// Example 3: A user-defined flag type, a slice of durations. -type interval []time.Duration - -// String is the method to format the flag's value, part of the flag.Value interface. -// The String method's output will be used in diagnostics. -func (i *interval) String() string { - return fmt.Sprint(*i) -} - -func (i *interval) Type() string { - return "interval" -} - -// Set is the method to set the flag value, part of the flag.Value interface. -// Set's argument is a string to be parsed to set the flag. -// It's a comma-separated list, so we split it. -func (i *interval) Set(value string) error { - // If we wanted to allow the flag to be set multiple times, - // accumulating values, we would delete this if statement. - // That would permit usages such as - // -deltaT 10s -deltaT 15s - // and other combinations. - if len(*i) > 0 { - return errors.New("interval flag already set") - } - for _, dt := range strings.Split(value, ",") { - duration, err := time.ParseDuration(dt) - if err != nil { - return err - } - *i = append(*i, duration) - } - return nil -} - -// Define a flag to accumulate durations. Because it has a special type, -// we need to use the Var function and therefore create the flag during -// init. - -var intervalFlag interval - -func init() { - // Tie the command-line flag to the intervalFlag variable and - // set a usage message. - flag.Var(&intervalFlag, "deltaT", "comma-separated list of intervals to use between events") -} - -func Example() { - // All the interesting pieces are with the variables declared above, but - // to enable the flag package to see the flags defined there, one must - // execute, typically at the start of main (not init!): - // flag.Parse() - // We don't run it here because this is not a main function and - // the testing suite has already parsed the flags. -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/export_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/export_test.go deleted file mode 100644 index 9318fee00..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/export_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "io/ioutil" - "os" -) - -// Additional routines compiled into the package only during testing. - -// ResetForTesting clears all flag state and sets the usage function as directed. -// After calling ResetForTesting, parse errors in flag handling will not -// exit the program. -func ResetForTesting(usage func()) { - CommandLine = &FlagSet{ - name: os.Args[0], - errorHandling: ContinueOnError, - output: ioutil.Discard, - } - Usage = usage -} - -// GetCommandLine returns the default FlagSet. -func GetCommandLine() *FlagSet { - return CommandLine -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/flag.go b/Godeps/_workspace/src/github.com/spf13/pflag/flag.go deleted file mode 100644 index 0070b93ee..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/flag.go +++ /dev/null @@ -1,695 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* - pflag is a drop-in replacement for Go's flag package, implementing - POSIX/GNU-style --flags. - - pflag is compatible with the GNU extensions to the POSIX recommendations - for command-line options. See - http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - - Usage: - - pflag is a drop-in replacement of Go's native flag package. If you import - pflag under the name "flag" then all code should continue to function - with no changes. - - import flag "github.com/ogier/pflag" - - There is one exception to this: if you directly instantiate the Flag struct - there is one more field "Shorthand" that you will need to set. - Most code never instantiates this struct directly, and instead uses - functions such as String(), BoolVar(), and Var(), and is therefore - unaffected. - - Define flags using flag.String(), Bool(), Int(), etc. - - This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - var ip = flag.Int("flagname", 1234, "help message for flagname") - If you like, you can bind the flag to a variable using the Var() functions. - var flagvar int - func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") - } - Or you can create custom flags that satisfy the Value interface (with - pointer receivers) and couple them to flag parsing by - flag.Var(&flagVal, "name", "help message for flagname") - For such flags, the default value is just the initial value of the variable. - - After all flags are defined, call - flag.Parse() - to parse the command line into the defined flags. - - Flags may then be used directly. If you're using the flags themselves, - they are all pointers; if you bind to variables, they're values. - fmt.Println("ip has value ", *ip) - fmt.Println("flagvar has value ", flagvar) - - After parsing, the arguments after the flag are available as the - slice flag.Args() or individually as flag.Arg(i). - The arguments are indexed from 0 through flag.NArg()-1. - - The pflag package also defines some new functions that are not in flag, - that give one-letter shorthands for flags. You can use these by appending - 'P' to the name of any function that defines a flag. - var ip = flag.IntP("flagname", "f", 1234, "help message") - var flagvar bool - func init() { - flag.BoolVarP("boolname", "b", true, "help message") - } - flag.VarP(&flagVar, "varname", "v", 1234, "help message") - Shorthand letters can be used with single dashes on the command line. - Boolean shorthand flags can be combined with other shorthand flags. - - Command line flag syntax: - --flag // boolean flags only - --flag=x - - Unlike the flag package, a single dash before an option means something - different than a double dash. Single dashes signify a series of shorthand - letters for flags. All but the last shorthand letter must be boolean flags. - // boolean flags - -f - -abc - // non-boolean flags - -n 1234 - -Ifile - // mixed - -abcs "hello" - -abcn1234 - - Flag parsing stops after the terminator "--". Unlike the flag package, - flags can be interspersed with arguments anywhere on the command line - before this terminator. - - Integer flags accept 1234, 0664, 0x1234 and may be negative. - Boolean flags (in their long form) accept 1, 0, t, f, true, false, - TRUE, FALSE, True, False. - Duration flags accept any input valid for time.ParseDuration. - - The default set of command-line flags is controlled by - top-level functions. The FlagSet type allows one to define - independent sets of flags, such as to implement subcommands - in a command-line interface. The methods of FlagSet are - analogous to the top-level functions for the command-line - flag set. -*/ -package pflag - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "sort" - "strings" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("pflag: help requested") - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -const ( - ContinueOnError ErrorHandling = iota - ExitOnError - PanicOnError -) - -// NormalizedName is a flag name that has been normalized according to rules -// for the FlagSet (e.g. making '-' and '_' equivalent). -type NormalizedName string - -// A FlagSet represents a set of defined flags. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - - name string - parsed bool - actual map[NormalizedName]*Flag - formal map[NormalizedName]*Flag - shorthands map[byte]*Flag - args []string // arguments after flags - exitOnError bool // does the program exit if there's an error? - errorHandling ErrorHandling - output io.Writer // nil means stderr; use out() accessor - interspersed bool // allow interspersed option/non-option args - normalizeNameFunc func(f *FlagSet, name string) NormalizedName -} - -// A Flag represents the state of a flag. -type Flag struct { - Name string // name as it appears on command line - Shorthand string // one-letter abbreviated flag - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message - Changed bool // If the user set the value (or if left to default) - Deprecated string // If this flag is deprecated, this string is the new or now thing to use - Annotations map[string][]string // used by cobra.Command bash autocomple code -} - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -type Value interface { - String() string - Set(string) error - Type() string -} - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[NormalizedName]*Flag) []*Flag { - list := make(sort.StringSlice, len(flags)) - i := 0 - for k := range flags { - list[i] = string(k) - i++ - } - list.Sort() - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[NormalizedName(name)] - } - return result -} - -func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { - f.normalizeNameFunc = n - for k, v := range f.formal { - delete(f.formal, k) - nname := f.normalizeFlagName(string(k)) - f.formal[nname] = v - v.Name = string(nname) - } -} - -func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { - if f.normalizeNameFunc != nil { - return f.normalizeNameFunc - } - return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } -} - -func (f *FlagSet) normalizeFlagName(name string) NormalizedName { - n := f.GetNormalizeFunc() - return n(f, name) -} - -func (f *FlagSet) out() io.Writer { - if f.output == nil { - return os.Stderr - } - return f.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (f *FlagSet) SetOutput(output io.Writer) { - f.output = output -} - -// VisitAll visits the flags in lexicographical order, calling fn for each. -// It visits all flags, even those not set. -func (f *FlagSet) VisitAll(fn func(*Flag)) { - for _, flag := range sortFlags(f.formal) { - fn(flag) - } -} - -func (f *FlagSet) HasFlags() bool { - return len(f.formal) > 0 -} - -// VisitAll visits the command-line flags in lexicographical order, calling -// fn for each. It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order, calling fn for each. -// It visits only those flags that have been set. -func (f *FlagSet) Visit(fn func(*Flag)) { - for _, flag := range sortFlags(f.actual) { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order, calling fn -// for each. It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) Lookup(name string) *Flag { - return f.lookup(f.normalizeFlagName(name)) -} - -// lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) lookup(name NormalizedName) *Flag { - return f.formal[name] -} - -// Mark a flag deprecated in your program -func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - flag.Deprecated = usageMessage - return nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.Lookup(name) -} - -// Set sets the value of the named flag. -func (f *FlagSet) Set(name, value string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - err := flag.Value.Set(value) - if err != nil { - return err - } - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[normalName] = flag - flag.Changed = true - if len(flag.Deprecated) > 0 { - fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) - } - return nil -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (f *FlagSet) PrintDefaults() { - f.VisitAll(func(flag *Flag) { - if len(flag.Deprecated) > 0 { - return - } - format := "--%s=%s: %s\n" - if _, ok := flag.Value.(*stringValue); ok { - // put quotes on the value - format = "--%s=%q: %s\n" - } - if len(flag.Shorthand) > 0 { - format = " -%s, " + format - } else { - format = " %s " + format - } - fmt.Fprintf(f.out(), format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) - }) -} - -func (f *FlagSet) FlagUsages() string { - x := new(bytes.Buffer) - - f.VisitAll(func(flag *Flag) { - if len(flag.Deprecated) > 0 { - return - } - format := "--%s=%s: %s\n" - if _, ok := flag.Value.(*stringValue); ok { - // put quotes on the value - format = "--%s=%q: %s\n" - } - if len(flag.Shorthand) > 0 { - format = " -%s, " + format - } else { - format = " %s " + format - } - fmt.Fprintf(x, format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) - }) - - return x.String() -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) - f.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -var Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// NFlag returns the number of flags that have been set. -func (f *FlagSet) NFlag() int { return len(f.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (f *FlagSet) Arg(i int) string { - if i < 0 || i >= len(f.args) { - return "" - } - return f.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (f *FlagSet) NArg() int { return len(f.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (f *FlagSet) Args() []string { return f.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (f *FlagSet) Var(value Value, name string, usage string) { - f.VarP(value, name, "", usage) -} - -// Like Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: name, - Shorthand: shorthand, - Usage: usage, - Value: value, - DefValue: value.String(), - } - f.AddFlag(flag) -} - -func (f *FlagSet) AddFlag(flag *Flag) { - // Call normalizeFlagName function only once - var normalizedFlagName NormalizedName = f.normalizeFlagName(flag.Name) - - _, alreadythere := f.formal[normalizedFlagName] - if alreadythere { - msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if f.formal == nil { - f.formal = make(map[NormalizedName]*Flag) - } - - flag.Name = string(normalizedFlagName) - f.formal[normalizedFlagName] = flag - - if len(flag.Shorthand) == 0 { - return - } - if len(flag.Shorthand) > 1 { - fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand) - panic("shorthand is more than one character") - } - if f.shorthands == nil { - f.shorthands = make(map[byte]*Flag) - } - c := flag.Shorthand[0] - old, alreadythere := f.shorthands[c] - if alreadythere { - fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name) - panic("shorthand redefinition") - } - f.shorthands[c] = flag -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, name string, usage string) { - CommandLine.VarP(value, name, "", usage) -} - -// Like Var, but accepts a shorthand letter that can be used after a single dash. -func VarP(value Value, name, shorthand, usage string) { - CommandLine.VarP(value, name, shorthand, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - fmt.Fprintln(f.out(), err) - f.usage() - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (f *FlagSet) usage() { - if f == CommandLine { - Usage() - } else if f.Usage == nil { - defaultUsage(f) - } else { - f.Usage() - } -} - -func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error { - if err := flag.Value.Set(value); err != nil { - return f.failf("invalid argument %q for %s: %v", value, origArg, err) - } - // mark as visited for Visit() - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[f.normalizeFlagName(flag.Name)] = flag - flag.Changed = true - if len(flag.Deprecated) > 0 { - fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) - } - return nil -} - -func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) { - a = args - name := s[2:] - if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) - return - } - split := strings.SplitN(name, "=", 2) - name = split[0] - flag, alreadythere := f.formal[f.normalizeFlagName(name)] - if !alreadythere { - if name == "help" { // special case for nice help message. - f.usage() - return a, ErrHelp - } - err = f.failf("unknown flag: --%s", name) - return - } - var value string - if len(split) == 2 { - // '--flag=arg' - value = split[1] - } else if bv, ok := flag.Value.(boolFlag); ok && bv.IsBoolFlag() { - // '--flag' (where flag is a bool) - value = "true" - } else { - // '--flag' (where flag was not a bool) - err = f.failf("flag needs an argument: %s", s) - return - } - err = f.setFlag(flag, value, s) - return -} - -func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) { - outArgs = args - outShorts = shorthands[1:] - c := shorthands[0] - - flag, alreadythere := f.shorthands[c] - if !alreadythere { - if c == 'h' { // special case for nice help message. - f.usage() - err = ErrHelp - return - } - //TODO continue on error - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) - return - } - var value string - if len(shorthands) > 2 && shorthands[1] == '=' { - value = shorthands[2:] - outShorts = "" - } else if bv, ok := flag.Value.(boolFlag); ok && bv.IsBoolFlag() { - value = "true" - } else if len(shorthands) > 1 { - value = shorthands[1:] - outShorts = "" - } else if len(args) > 0 { - value = args[0] - outArgs = args[1:] - } else { - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) - return - } - err = f.setFlag(flag, value, shorthands) - return -} - -func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) { - a = args - shorthands := s[1:] - - for len(shorthands) > 0 { - shorthands, a, err = f.parseSingleShortArg(shorthands, args) - if err != nil { - return - } - } - - return -} - -func (f *FlagSet) parseArgs(args []string) (err error) { - for len(args) > 0 { - s := args[0] - args = args[1:] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - if !f.interspersed { - f.args = append(f.args, s) - f.args = append(f.args, args...) - return nil - } - f.args = append(f.args, s) - continue - } - - if s[1] == '-' { - if len(s) == 2 { // "--" terminates the flags - f.args = append(f.args, args...) - break - } - args, err = f.parseLongArg(s, args) - } else { - args, err = f.parseShortArg(s, args) - } - if err != nil { - return - } - } - return -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (f *FlagSet) Parse(arguments []string) error { - f.parsed = true - f.args = make([]string, 0, len(arguments)) - err := f.parseArgs(arguments) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -// Parsed reports whether f.Parse has been called. -func (f *FlagSet) Parsed() bool { - return f.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// Whether to support interspersed option/non-option arguments. -func SetInterspersed(interspersed bool) { - CommandLine.SetInterspersed(interspersed) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// The default set of command-line flags, parsed from os.Args. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name and -// error handling property. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - interspersed: true, - } - return f -} - -// Whether to support interspersed option/non-option arguments. -func (f *FlagSet) SetInterspersed(interspersed bool) { - f.interspersed = interspersed -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { - f.name = name - f.errorHandling = errorHandling -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go deleted file mode 100644 index d3c1714b2..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go +++ /dev/null @@ -1,658 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "sort" - "strings" - "testing" - "time" -) - -var ( - test_bool = Bool("test_bool", false, "bool value") - test_int = Int("test_int", 0, "int value") - test_int64 = Int64("test_int64", 0, "int64 value") - test_uint = Uint("test_uint", 0, "uint value") - test_uint64 = Uint64("test_uint64", 0, "uint64 value") - test_string = String("test_string", "0", "string value") - test_float64 = Float64("test_float64", 0, "float64 value") - test_duration = Duration("test_duration", 0, "time.Duration value") - normalizeFlagNameInvocations = 0 -) - -func boolString(s string) string { - if s == "0" { - return "false" - } - return "true" -} - -func TestEverything(t *testing.T) { - m := make(map[string]*Flag) - desired := "0" - visitor := func(f *Flag) { - if len(f.Name) > 5 && f.Name[0:5] == "test_" { - m[f.Name] = f - ok := false - switch { - case f.Value.String() == desired: - ok = true - case f.Name == "test_bool" && f.Value.String() == boolString(desired): - ok = true - case f.Name == "test_duration" && f.Value.String() == desired+"s": - ok = true - } - if !ok { - t.Error("Visit: bad value", f.Value.String(), "for", f.Name) - } - } - } - VisitAll(visitor) - if len(m) != 8 { - t.Error("VisitAll misses some flags") - for k, v := range m { - t.Log(k, *v) - } - } - m = make(map[string]*Flag) - Visit(visitor) - if len(m) != 0 { - t.Errorf("Visit sees unset flags") - for k, v := range m { - t.Log(k, *v) - } - } - // Now set all flags - Set("test_bool", "true") - Set("test_int", "1") - Set("test_int64", "1") - Set("test_uint", "1") - Set("test_uint64", "1") - Set("test_string", "1") - Set("test_float64", "1") - Set("test_duration", "1s") - desired = "1" - Visit(visitor) - if len(m) != 8 { - t.Error("Visit fails after set") - for k, v := range m { - t.Log(k, *v) - } - } - // Now test they're visited in sort order. - var flagNames []string - Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) }) - if !sort.StringsAreSorted(flagNames) { - t.Errorf("flag names not sorted: %v", flagNames) - } -} - -func TestUsage(t *testing.T) { - called := false - ResetForTesting(func() { called = true }) - if GetCommandLine().Parse([]string{"--x"}) == nil { - t.Error("parse did not fail for unknown flag") - } - if !called { - t.Error("did not call Usage for unknown flag") - } -} - -func testParse(f *FlagSet, t *testing.T) { - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - boolFlag := f.Bool("bool", false, "bool value") - bool2Flag := f.Bool("bool2", false, "bool2 value") - bool3Flag := f.Bool("bool3", false, "bool3 value") - intFlag := f.Int("int", 0, "int value") - int8Flag := f.Int8("int8", 0, "int value") - int32Flag := f.Int32("int32", 0, "int value") - int64Flag := f.Int64("int64", 0, "int64 value") - uintFlag := f.Uint("uint", 0, "uint value") - uint8Flag := f.Uint8("uint8", 0, "uint value") - uint16Flag := f.Uint16("uint16", 0, "uint value") - uint32Flag := f.Uint32("uint32", 0, "uint value") - uint64Flag := f.Uint64("uint64", 0, "uint64 value") - stringFlag := f.String("string", "0", "string value") - float32Flag := f.Float32("float32", 0, "float32 value") - float64Flag := f.Float64("float64", 0, "float64 value") - ipFlag := f.IP("ip", net.ParseIP("127.0.0.1"), "ip value") - maskFlag := f.IPMask("mask", ParseIPv4Mask("0.0.0.0"), "mask value") - durationFlag := f.Duration("duration", 5*time.Second, "time.Duration value") - extra := "one-extra-argument" - args := []string{ - "--bool", - "--bool2=true", - "--bool3=false", - "--int=22", - "--int8=-8", - "--int32=-32", - "--int64=0x23", - "--uint=24", - "--uint8=8", - "--uint16=16", - "--uint32=32", - "--uint64=25", - "--string=hello", - "--float32=-172e12", - "--float64=2718e28", - "--ip=10.11.12.13", - "--mask=255.255.255.0", - "--duration=2m", - extra, - } - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolFlag != true { - t.Error("bool flag should be true, is ", *boolFlag) - } - if *bool2Flag != true { - t.Error("bool2 flag should be true, is ", *bool2Flag) - } - if *bool3Flag != false { - t.Error("bool3 flag should be false, is ", *bool2Flag) - } - if *intFlag != 22 { - t.Error("int flag should be 22, is ", *intFlag) - } - if *int8Flag != -8 { - t.Error("int8 flag should be 0x23, is ", *int8Flag) - } - if *int32Flag != -32 { - t.Error("int32 flag should be 0x23, is ", *int32Flag) - } - if *int64Flag != 0x23 { - t.Error("int64 flag should be 0x23, is ", *int64Flag) - } - if *uintFlag != 24 { - t.Error("uint flag should be 24, is ", *uintFlag) - } - if *uint8Flag != 8 { - t.Error("uint8 flag should be 8, is ", *uint8Flag) - } - if *uint16Flag != 16 { - t.Error("uint16 flag should be 16, is ", *uint16Flag) - } - if *uint32Flag != 32 { - t.Error("uint32 flag should be 32, is ", *uint32Flag) - } - if *uint64Flag != 25 { - t.Error("uint64 flag should be 25, is ", *uint64Flag) - } - if *stringFlag != "hello" { - t.Error("string flag should be `hello`, is ", *stringFlag) - } - if *float32Flag != -172e12 { - t.Error("float64 flag should be -172e12, is ", *float64Flag) - } - if *float64Flag != 2718e28 { - t.Error("float64 flag should be 2718e28, is ", *float64Flag) - } - if (*maskFlag).String() != ParseIPv4Mask("255.255.255.0").String() { - t.Error("mask flag should be 255.255.255.0, is ", (*maskFlag).String()) - } - if !(*ipFlag).Equal(net.ParseIP("10.11.12.13")) { - t.Error("ip flag should be 10.11.12.13, is ", *ipFlag) - } - if *durationFlag != 2*time.Minute { - t.Error("duration flag should be 2m, is ", *durationFlag) - } - if len(f.Args()) != 1 { - t.Error("expected one argument, got", len(f.Args())) - } else if f.Args()[0] != extra { - t.Errorf("expected argument %q got %q", extra, f.Args()[0]) - } -} - -func TestShorthand(t *testing.T) { - f := NewFlagSet("shorthand", ContinueOnError) - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - boolaFlag := f.BoolP("boola", "a", false, "bool value") - boolbFlag := f.BoolP("boolb", "b", false, "bool2 value") - boolcFlag := f.BoolP("boolc", "c", false, "bool3 value") - booldFlag := f.BoolP("boold", "d", false, "bool4 value") - stringaFlag := f.StringP("stringa", "s", "0", "string value") - stringzFlag := f.StringP("stringz", "z", "0", "string value") - extra := "interspersed-argument" - notaflag := "--i-look-like-a-flag" - args := []string{ - "-ab", - extra, - "-cs", - "hello", - "-z=something", - "-d=true", - "--", - notaflag, - } - f.SetOutput(ioutil.Discard) - if err := f.Parse(args); err != nil { - t.Error("expected no error, got ", err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolaFlag != true { - t.Error("boola flag should be true, is ", *boolaFlag) - } - if *boolbFlag != true { - t.Error("boolb flag should be true, is ", *boolbFlag) - } - if *boolcFlag != true { - t.Error("boolc flag should be true, is ", *boolcFlag) - } - if *booldFlag != true { - t.Error("boold flag should be true, is ", *booldFlag) - } - if *stringaFlag != "hello" { - t.Error("stringa flag should be `hello`, is ", *stringaFlag) - } - if *stringzFlag != "something" { - t.Error("stringz flag should be `something`, is ", *stringzFlag) - } - if len(f.Args()) != 2 { - t.Error("expected one argument, got", len(f.Args())) - } else if f.Args()[0] != extra { - t.Errorf("expected argument %q got %q", extra, f.Args()[0]) - } else if f.Args()[1] != notaflag { - t.Errorf("expected argument %q got %q", notaflag, f.Args()[1]) - } -} - -func TestParse(t *testing.T) { - ResetForTesting(func() { t.Error("bad parse") }) - testParse(GetCommandLine(), t) -} - -func TestFlagSetParse(t *testing.T) { - testParse(NewFlagSet("test", ContinueOnError), t) -} - -func replaceSeparators(name string, from []string, to string) string { - result := name - for _, sep := range from { - result = strings.Replace(result, sep, to, -1) - } - // Type convert to indicate normalization has been done. - return result -} - -func wordSepNormalizeFunc(f *FlagSet, name string) NormalizedName { - seps := []string{"-", "_"} - name = replaceSeparators(name, seps, ".") - normalizeFlagNameInvocations++ - - return NormalizedName(name) -} - -func testWordSepNormalizedNames(args []string, t *testing.T) { - f := NewFlagSet("normalized", ContinueOnError) - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - withDashFlag := f.Bool("with-dash-flag", false, "bool value") - // Set this after some flags have been added and before others. - f.SetNormalizeFunc(wordSepNormalizeFunc) - withUnderFlag := f.Bool("with_under_flag", false, "bool value") - withBothFlag := f.Bool("with-both_flag", false, "bool value") - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *withDashFlag != true { - t.Error("withDashFlag flag should be true, is ", *withDashFlag) - } - if *withUnderFlag != true { - t.Error("withUnderFlag flag should be true, is ", *withUnderFlag) - } - if *withBothFlag != true { - t.Error("withBothFlag flag should be true, is ", *withBothFlag) - } -} - -func TestWordSepNormalizedNames(t *testing.T) { - args := []string{ - "--with-dash-flag", - "--with-under-flag", - "--with-both-flag", - } - testWordSepNormalizedNames(args, t) - - args = []string{ - "--with_dash_flag", - "--with_under_flag", - "--with_both_flag", - } - testWordSepNormalizedNames(args, t) - - args = []string{ - "--with-dash_flag", - "--with-under_flag", - "--with-both_flag", - } - testWordSepNormalizedNames(args, t) -} - -func aliasAndWordSepFlagNames(f *FlagSet, name string) NormalizedName { - seps := []string{"-", "_"} - - oldName := replaceSeparators("old-valid_flag", seps, ".") - newName := replaceSeparators("valid-flag", seps, ".") - - name = replaceSeparators(name, seps, ".") - switch name { - case oldName: - name = newName - break - } - - return NormalizedName(name) -} - -func TestCustomNormalizedNames(t *testing.T) { - f := NewFlagSet("normalized", ContinueOnError) - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - - validFlag := f.Bool("valid-flag", false, "bool value") - f.SetNormalizeFunc(aliasAndWordSepFlagNames) - someOtherFlag := f.Bool("some-other-flag", false, "bool value") - - args := []string{"--old_valid_flag", "--some-other_flag"} - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - - if *validFlag != true { - t.Errorf("validFlag is %v even though we set the alias --old_valid_falg", *validFlag) - } - if *someOtherFlag != true { - t.Error("someOtherFlag should be true, is ", *someOtherFlag) - } -} - -// Every flag we add, the name (displayed also in usage) should normalized -func TestNormalizationFuncShouldChangeFlagName(t *testing.T) { - // Test normalization after addition - f := NewFlagSet("normalized", ContinueOnError) - - f.Bool("valid_flag", false, "bool value") - if f.Lookup("valid_flag").Name != "valid_flag" { - t.Error("The new flag should have the name 'valid_flag' instead of ", f.Lookup("valid_flag").Name) - } - - f.SetNormalizeFunc(wordSepNormalizeFunc) - if f.Lookup("valid_flag").Name != "valid.flag" { - t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name) - } - - // Test normalization before addition - f = NewFlagSet("normalized", ContinueOnError) - f.SetNormalizeFunc(wordSepNormalizeFunc) - - f.Bool("valid_flag", false, "bool value") - if f.Lookup("valid_flag").Name != "valid.flag" { - t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name) - } -} - -// Declare a user-defined flag type. -type flagVar []string - -func (f *flagVar) String() string { - return fmt.Sprint([]string(*f)) -} - -func (f *flagVar) Set(value string) error { - *f = append(*f, value) - return nil -} - -func (f *flagVar) Type() string { - return "flagVar" -} - -func TestUserDefined(t *testing.T) { - var flags FlagSet - flags.Init("test", ContinueOnError) - var v flagVar - flags.VarP(&v, "v", "v", "usage") - if err := flags.Parse([]string{"--v=1", "-v2", "-v", "3"}); err != nil { - t.Error(err) - } - if len(v) != 3 { - t.Fatal("expected 3 args; got ", len(v)) - } - expect := "[1 2 3]" - if v.String() != expect { - t.Errorf("expected value %q got %q", expect, v.String()) - } -} - -func TestSetOutput(t *testing.T) { - var flags FlagSet - var buf bytes.Buffer - flags.SetOutput(&buf) - flags.Init("test", ContinueOnError) - flags.Parse([]string{"--unknown"}) - if out := buf.String(); !strings.Contains(out, "--unknown") { - t.Logf("expected output mentioning unknown; got %q", out) - } -} - -// This tests that one can reset the flags. This still works but not well, and is -// superseded by FlagSet. -func TestChangingArgs(t *testing.T) { - ResetForTesting(func() { t.Fatal("bad parse") }) - oldArgs := os.Args - defer func() { os.Args = oldArgs }() - os.Args = []string{"cmd", "--before", "subcmd"} - before := Bool("before", false, "") - if err := GetCommandLine().Parse(os.Args[1:]); err != nil { - t.Fatal(err) - } - cmd := Arg(0) - os.Args = []string{"subcmd", "--after", "args"} - after := Bool("after", false, "") - Parse() - args := Args() - - if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { - t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) - } -} - -// Test that -help invokes the usage message and returns ErrHelp. -func TestHelp(t *testing.T) { - var helpCalled = false - fs := NewFlagSet("help test", ContinueOnError) - fs.Usage = func() { helpCalled = true } - var flag bool - fs.BoolVar(&flag, "flag", false, "regular flag") - // Regular flag invocation should work - err := fs.Parse([]string{"--flag=true"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - if !flag { - t.Error("flag was not set by --flag") - } - if helpCalled { - t.Error("help called for regular flag") - helpCalled = false // reset for next test - } - // Help flag should work as expected. - err = fs.Parse([]string{"--help"}) - if err == nil { - t.Fatal("error expected") - } - if err != ErrHelp { - t.Fatal("expected ErrHelp; got ", err) - } - if !helpCalled { - t.Fatal("help was not called") - } - // If we define a help flag, that should override. - var help bool - fs.BoolVar(&help, "help", false, "help flag") - helpCalled = false - err = fs.Parse([]string{"--help"}) - if err != nil { - t.Fatal("expected no error for defined --help; got ", err) - } - if helpCalled { - t.Fatal("help was called; should not have been for defined help flag") - } -} - -func TestNoInterspersed(t *testing.T) { - f := NewFlagSet("test", ContinueOnError) - f.SetInterspersed(false) - f.Bool("true", true, "always true") - f.Bool("false", false, "always false") - err := f.Parse([]string{"--true", "break", "--false"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - args := f.Args() - if len(args) != 2 || args[0] != "break" || args[1] != "--false" { - t.Fatal("expected interspersed options/non-options to fail") - } -} - -func TestTermination(t *testing.T) { - f := NewFlagSet("termination", ContinueOnError) - boolFlag := f.BoolP("bool", "l", false, "bool value") - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - arg1 := "ls" - arg2 := "-l" - args := []string{ - "--", - arg1, - arg2, - } - f.SetOutput(ioutil.Discard) - if err := f.Parse(args); err != nil { - t.Fatal("expected no error; got ", err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolFlag { - t.Error("expected boolFlag=false, got true") - } - if len(f.Args()) != 2 { - t.Errorf("expected 2 arguments, got %d: %v", len(f.Args()), f.Args()) - } - if f.Args()[0] != arg1 { - t.Errorf("expected argument %q got %q", arg1, f.Args()[0]) - } - if f.Args()[1] != arg2 { - t.Errorf("expected argument %q got %q", arg2, f.Args()[1]) - } -} - -func TestDeprecatedFlagInDocs(t *testing.T) { - f := NewFlagSet("bob", ContinueOnError) - f.Bool("badflag", true, "always true") - f.MarkDeprecated("badflag", "use --good-flag instead") - - out := new(bytes.Buffer) - f.SetOutput(out) - f.PrintDefaults() - - if strings.Contains(out.String(), "badflag") { - t.Errorf("found deprecated flag in usage!") - } -} - -func parseReturnStderr(t *testing.T, f *FlagSet, args []string) (string, error) { - oldStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stderr = w - - err := f.Parse(args) - - outC := make(chan string) - // copy the output in a separate goroutine so printing can't block indefinitely - go func() { - var buf bytes.Buffer - io.Copy(&buf, r) - outC <- buf.String() - }() - - w.Close() - os.Stderr = oldStderr - out := <-outC - - return out, err -} - -func TestDeprecatedFlagUsage(t *testing.T) { - f := NewFlagSet("bob", ContinueOnError) - f.Bool("badflag", true, "always true") - usageMsg := "use --good-flag instead" - f.MarkDeprecated("badflag", usageMsg) - - args := []string{"--badflag"} - out, err := parseReturnStderr(t, f, args) - if err != nil { - t.Fatal("expected no error; got ", err) - } - - if !strings.Contains(out, usageMsg) { - t.Errorf("usageMsg not printed when using a deprecated flag!") - } -} - -func TestDeprecatedFlagUsageNormalized(t *testing.T) { - f := NewFlagSet("bob", ContinueOnError) - f.Bool("bad-double_flag", true, "always true") - f.SetNormalizeFunc(wordSepNormalizeFunc) - usageMsg := "use --good-flag instead" - f.MarkDeprecated("bad_double-flag", usageMsg) - - args := []string{"--bad_double_flag"} - out, err := parseReturnStderr(t, f, args) - if err != nil { - t.Fatal("expected no error; got ", err) - } - - if !strings.Contains(out, usageMsg) { - t.Errorf("usageMsg not printed when using a deprecated flag!") - } -} - -// Name normalization function should be called only once on flag addition -func TestMultipleNormalizeFlagNameInvocations(t *testing.T) { - normalizeFlagNameInvocations = 0 - - f := NewFlagSet("normalized", ContinueOnError) - f.SetNormalizeFunc(wordSepNormalizeFunc) - f.Bool("with_under_flag", false, "bool value") - - if normalizeFlagNameInvocations != 1 { - t.Fatal("Expected normalizeFlagNameInvocations to be 1; got ", normalizeFlagNameInvocations) - } -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/float32.go b/Godeps/_workspace/src/github.com/spf13/pflag/float32.go deleted file mode 100644 index b7ad67d9a..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/float32.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- float32 Value -type float32Value float32 - -func newFloat32Value(val float32, p *float32) *float32Value { - *p = val - return (*float32Value)(p) -} - -func (f *float32Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 32) - *f = float32Value(v) - return err -} - -func (f *float32Value) Type() string { - return "float32" -} - -func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) } - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func Float32Var(p *float32, name string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, "", value, usage) - return p -} - -// Like Float32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, shorthand, value, usage) - return p -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func Float32(name string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, "", value, usage) -} - -// Like Float32, but accepts a shorthand letter that can be used after a single dash. -func Float32P(name, shorthand string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/float64.go b/Godeps/_workspace/src/github.com/spf13/pflag/float64.go deleted file mode 100644 index 031551233..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/float64.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Type() string { - return "float64" -} - -func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, name string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, "", value, usage) - return p -} - -// Like Float64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, shorthand, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(name string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, "", value, usage) -} - -// Like Float64, but accepts a shorthand letter that can be used after a single dash. -func Float64P(name, shorthand string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int.go b/Godeps/_workspace/src/github.com/spf13/pflag/int.go deleted file mode 100644 index dca9da6e6..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/int.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Type() string { - return "int" -} - -func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { - f.VarP(newIntValue(value, p), name, "", usage) -} - -// Like IntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { - f.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, name string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, "", usage) -} - -// Like IntVar, but accepts a shorthand letter that can be used after a single dash. -func IntVarP(p *int, name, shorthand string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (f *FlagSet) Int(name string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, "", value, usage) - return p -} - -// Like Int, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, shorthand, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(name string, value int, usage string) *int { - return CommandLine.IntP(name, "", value, usage) -} - -// Like Int, but accepts a shorthand letter that can be used after a single dash. -func IntP(name, shorthand string, value int, usage string) *int { - return CommandLine.IntP(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int32.go b/Godeps/_workspace/src/github.com/spf13/pflag/int32.go deleted file mode 100644 index 18eaacd60..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/int32.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- int32 Value -type int32Value int32 - -func newInt32Value(val int32, p *int32) *int32Value { - *p = val - return (*int32Value)(p) -} - -func (i *int32Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 32) - *i = int32Value(v) - return err -} - -func (i *int32Value) Type() string { - return "int32" -} - -func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) } - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, "", usage) -} - -// Like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func Int32Var(p *int32, name string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, "", usage) -} - -// Like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, "", value, usage) - return p -} - -// Like Int32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, shorthand, value, usage) - return p -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func Int32(name string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, "", value, usage) -} - -// Like Int32, but accepts a shorthand letter that can be used after a single dash. -func Int32P(name, shorthand string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int64.go b/Godeps/_workspace/src/github.com/spf13/pflag/int64.go deleted file mode 100644 index 0114aaaa8..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/int64.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Type() string { - return "int64" -} - -func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, "", usage) -} - -// Like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, name string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, "", usage) -} - -// Like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, "", value, usage) - return p -} - -// Like Int64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, shorthand, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(name string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, "", value, usage) -} - -// Like Int64, but accepts a shorthand letter that can be used after a single dash. -func Int64P(name, shorthand string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int8.go b/Godeps/_workspace/src/github.com/spf13/pflag/int8.go deleted file mode 100644 index aab1022f9..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/int8.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- int8 Value -type int8Value int8 - -func newInt8Value(val int8, p *int8) *int8Value { - *p = val - return (*int8Value)(p) -} - -func (i *int8Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 8) - *i = int8Value(v) - return err -} - -func (i *int8Value) Type() string { - return "int8" -} - -func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) } - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, "", usage) -} - -// Like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func Int8Var(p *int8, name string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, "", usage) -} - -// Like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, "", value, usage) - return p -} - -// Like Int8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, shorthand, value, usage) - return p -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func Int8(name string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, "", value, usage) -} - -// Like Int8, but accepts a shorthand letter that can be used after a single dash. -func Int8P(name, shorthand string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/ip.go b/Godeps/_workspace/src/github.com/spf13/pflag/ip.go deleted file mode 100644 index efa75fbc3..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/ip.go +++ /dev/null @@ -1,76 +0,0 @@ -package pflag - -import ( - "fmt" - "net" -) - -// -- net.IP value -type ipValue net.IP - -func newIPValue(val net.IP, p *net.IP) *ipValue { - *p = val - return (*ipValue)(p) -} - -func (i *ipValue) String() string { return net.IP(*i).String() } -func (i *ipValue) Set(s string) error { - ip := net.ParseIP(s) - if ip == nil { - return fmt.Errorf("failed to parse IP: %q", s) - } - *i = ipValue(ip) - return nil -} - -func (i *ipValue) Type() string { - return "ip" -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, "", usage) -} - -// Like IPVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func IPVar(p *net.IP, name string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, "", usage) -} - -// Like IPVar, but accepts a shorthand letter that can be used after a single dash. -func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, "", value, usage) - return p -} - -// Like IP, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, shorthand, value, usage) - return p -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func IP(name string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, "", value, usage) -} - -// Like IP, but accepts a shorthand letter that can be used after a single dash. -func IPP(name, shorthand string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/ipmask.go b/Godeps/_workspace/src/github.com/spf13/pflag/ipmask.go deleted file mode 100644 index 09b9533e7..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/ipmask.go +++ /dev/null @@ -1,86 +0,0 @@ -package pflag - -import ( - "fmt" - "net" -) - -// -- net.IPMask value -type ipMaskValue net.IPMask - -func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { - *p = val - return (*ipMaskValue)(p) -} - -func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } -func (i *ipMaskValue) Set(s string) error { - ip := ParseIPv4Mask(s) - if ip == nil { - return fmt.Errorf("failed to parse IP mask: %q", s) - } - *i = ipMaskValue(ip) - return nil -} - -func (i *ipMaskValue) Type() string { - return "ipMask" -} - -// Parse IPv4 netmask written in IP form (e.g. 255.255.255.0). -// This function should really belong to the net package. -func ParseIPv4Mask(s string) net.IPMask { - mask := net.ParseIP(s) - if mask == nil { - return nil - } - return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// Like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// Like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, "", value, usage) - return p -} - -// Like IPMask, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, shorthand, value, usage) - return p -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func IPMask(name string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, "", value, usage) -} - -// Like IP, but accepts a shorthand letter that can be used after a single dash. -func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/string.go b/Godeps/_workspace/src/github.com/spf13/pflag/string.go deleted file mode 100644 index 362fbf8a8..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/string.go +++ /dev/null @@ -1,69 +0,0 @@ -package pflag - -import "fmt" - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} -func (s *stringValue) Type() string { - return "string" -} - -func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { - f.VarP(newStringValue(value, p), name, "", usage) -} - -// Like StringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { - f.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, name string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, "", usage) -} - -// Like StringVar, but accepts a shorthand letter that can be used after a single dash. -func StringVarP(p *string, name, shorthand string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (f *FlagSet) String(name string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, "", value, usage) - return p -} - -// Like String, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, shorthand, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(name string, value string, usage string) *string { - return CommandLine.StringP(name, "", value, usage) -} - -// Like String, but accepts a shorthand letter that can be used after a single dash. -func StringP(name, shorthand string, value string, usage string) *string { - return CommandLine.StringP(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint.go b/Godeps/_workspace/src/github.com/spf13/pflag/uint.go deleted file mode 100644 index c063fe7cb..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/uint.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Type() string { - return "uint" -} - -func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, "", usage) -} - -// Like UintVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, name string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, "", usage) -} - -// Like UintVar, but accepts a shorthand letter that can be used after a single dash. -func UintVarP(p *uint, name, shorthand string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint(name string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, "", value, usage) - return p -} - -// Like Uint, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, shorthand, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(name string, value uint, usage string) *uint { - return CommandLine.UintP(name, "", value, usage) -} - -// Like Uint, but accepts a shorthand letter that can be used after a single dash. -func UintP(name, shorthand string, value uint, usage string) *uint { - return CommandLine.UintP(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint16.go b/Godeps/_workspace/src/github.com/spf13/pflag/uint16.go deleted file mode 100644 index ab1c1f9ee..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/uint16.go +++ /dev/null @@ -1,72 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- uint16 value -type uint16Value uint16 - -func newUint16Value(val uint16, p *uint16) *uint16Value { - *p = val - return (*uint16Value)(p) -} -func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) } -func (i *uint16Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 16) - *i = uint16Value(v) - return err -} - -func (i *uint16Value) Type() string { - return "uint16" -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, "", usage) -} - -// Like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func Uint16Var(p *uint16, name string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, "", usage) -} - -// Like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, "", value, usage) - return p -} - -// Like Uint16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, shorthand, value, usage) - return p -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint16(name string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, "", value, usage) -} - -// Like Uint16, but accepts a shorthand letter that can be used after a single dash. -func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint32.go b/Godeps/_workspace/src/github.com/spf13/pflag/uint32.go deleted file mode 100644 index db635ae88..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/uint32.go +++ /dev/null @@ -1,72 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- uint16 value -type uint32Value uint32 - -func newUint32Value(val uint32, p *uint32) *uint32Value { - *p = val - return (*uint32Value)(p) -} -func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) } -func (i *uint32Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 32) - *i = uint32Value(v) - return err -} - -func (i *uint32Value) Type() string { - return "uint32" -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, "", usage) -} - -// Like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func Uint32Var(p *uint32, name string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, "", usage) -} - -// Like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, "", value, usage) - return p -} - -// Like Uint32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, shorthand, value, usage) - return p -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func Uint32(name string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, "", value, usage) -} - -// Like Uint32, but accepts a shorthand letter that can be used after a single dash. -func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint64.go b/Godeps/_workspace/src/github.com/spf13/pflag/uint64.go deleted file mode 100644 index 99c7e805d..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/uint64.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Type() string { - return "uint64" -} - -func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, "", usage) -} - -// Like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, name string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, "", usage) -} - -// Like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, "", value, usage) - return p -} - -// Like Uint64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, shorthand, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(name string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, "", value, usage) -} - -// Like Uint64, but accepts a shorthand letter that can be used after a single dash. -func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint8.go b/Godeps/_workspace/src/github.com/spf13/pflag/uint8.go deleted file mode 100644 index 6fef508de..000000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/uint8.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- uint8 Value -type uint8Value uint8 - -func newUint8Value(val uint8, p *uint8) *uint8Value { - *p = val - return (*uint8Value)(p) -} - -func (i *uint8Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 8) - *i = uint8Value(v) - return err -} - -func (i *uint8Value) Type() string { - return "uint8" -} - -func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) } - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, "", usage) -} - -// Like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func Uint8Var(p *uint8, name string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, "", usage) -} - -// Like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, "", value, usage) - return p -} - -// Like Uint8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, shorthand, value, usage) - return p -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func Uint8(name string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, "", value, usage) -} - -// Like Uint8, but accepts a shorthand letter that can be used after a single dash. -func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, shorthand, value, usage) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go deleted file mode 100644 index ccf390c9c..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/memdb" -) - -type ErrBatchCorrupted struct { - Reason string -} - -func (e *ErrBatchCorrupted) Error() string { - return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason) -} - -func newErrBatchCorrupted(reason string) error { - return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason}) -} - -const ( - batchHdrLen = 8 + 4 - batchGrowRec = 3000 -) - -type BatchReplay interface { - Put(key, value []byte) - Delete(key []byte) -} - -// Batch is a write batch. -type Batch struct { - data []byte - rLen, bLen int - seq uint64 - sync bool -} - -func (b *Batch) grow(n int) { - off := len(b.data) - if off == 0 { - off = batchHdrLen - if b.data != nil { - b.data = b.data[:off] - } - } - if cap(b.data)-off < n { - if b.data == nil { - b.data = make([]byte, off, off+n) - } else { - odata := b.data - div := 1 - if b.rLen > batchGrowRec { - div = b.rLen / batchGrowRec - } - b.data = make([]byte, off, off+n+(off-batchHdrLen)/div) - copy(b.data, odata) - } - } -} - -func (b *Batch) appendRec(kt kType, key, value []byte) { - n := 1 + binary.MaxVarintLen32 + len(key) - if kt == ktVal { - n += binary.MaxVarintLen32 + len(value) - } - b.grow(n) - off := len(b.data) - data := b.data[:off+n] - data[off] = byte(kt) - off += 1 - off += binary.PutUvarint(data[off:], uint64(len(key))) - copy(data[off:], key) - off += len(key) - if kt == ktVal { - off += binary.PutUvarint(data[off:], uint64(len(value))) - copy(data[off:], value) - off += len(value) - } - b.data = data[:off] - b.rLen++ - // Include 8-byte ikey header - b.bLen += len(key) + len(value) + 8 -} - -// Put appends 'put operation' of the given key/value pair to the batch. -// It is safe to modify the contents of the argument after Put returns. -func (b *Batch) Put(key, value []byte) { - b.appendRec(ktVal, key, value) -} - -// Delete appends 'delete operation' of the given key to the batch. -// It is safe to modify the contents of the argument after Delete returns. -func (b *Batch) Delete(key []byte) { - b.appendRec(ktDel, key, nil) -} - -// Dump dumps batch contents. The returned slice can be loaded into the -// batch using Load method. -// The returned slice is not its own copy, so the contents should not be -// modified. -func (b *Batch) Dump() []byte { - return b.encode() -} - -// Load loads given slice into the batch. Previous contents of the batch -// will be discarded. -// The given slice will not be copied and will be used as batch buffer, so -// it is not safe to modify the contents of the slice. -func (b *Batch) Load(data []byte) error { - return b.decode(0, data) -} - -// Replay replays batch contents. -func (b *Batch) Replay(r BatchReplay) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - switch kt { - case ktVal: - r.Put(key, value) - case ktDel: - r.Delete(key) - } - }) -} - -// Len returns number of records in the batch. -func (b *Batch) Len() int { - return b.rLen -} - -// Reset resets the batch. -func (b *Batch) Reset() { - b.data = b.data[:0] - b.seq = 0 - b.rLen = 0 - b.bLen = 0 - b.sync = false -} - -func (b *Batch) init(sync bool) { - b.sync = sync -} - -func (b *Batch) append(p *Batch) { - if p.rLen > 0 { - b.grow(len(p.data) - batchHdrLen) - b.data = append(b.data, p.data[batchHdrLen:]...) - b.rLen += p.rLen - } - if p.sync { - b.sync = true - } -} - -// size returns sums of key/value pair length plus 8-bytes ikey. -func (b *Batch) size() int { - return b.bLen -} - -func (b *Batch) encode() []byte { - b.grow(0) - binary.LittleEndian.PutUint64(b.data, b.seq) - binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen)) - - return b.data -} - -func (b *Batch) decode(prevSeq uint64, data []byte) error { - if len(data) < batchHdrLen { - return newErrBatchCorrupted("too short") - } - - b.seq = binary.LittleEndian.Uint64(data) - if b.seq < prevSeq { - return newErrBatchCorrupted("invalid sequence number") - } - b.rLen = int(binary.LittleEndian.Uint32(data[8:])) - if b.rLen < 0 { - return newErrBatchCorrupted("invalid records length") - } - // No need to be precise at this point, it won't be used anyway - b.bLen = len(data) - batchHdrLen - b.data = data - - return nil -} - -func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) { - off := batchHdrLen - for i := 0; i < b.rLen; i++ { - if off >= len(b.data) { - return newErrBatchCorrupted("invalid records length") - } - - kt := kType(b.data[off]) - if kt > ktVal { - return newErrBatchCorrupted("bad record: invalid type") - } - off += 1 - - x, n := binary.Uvarint(b.data[off:]) - off += n - if n <= 0 || off+int(x) > len(b.data) { - return newErrBatchCorrupted("bad record: invalid key length") - } - key := b.data[off : off+int(x)] - off += int(x) - var value []byte - if kt == ktVal { - x, n := binary.Uvarint(b.data[off:]) - off += n - if n <= 0 || off+int(x) > len(b.data) { - return newErrBatchCorrupted("bad record: invalid value length") - } - value = b.data[off : off+int(x)] - off += int(x) - } - - f(i, kt, key, value) - } - - return nil -} - -func (b *Batch) memReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - ikey := newIkey(key, b.seq+uint64(i), kt) - to.Put(ikey, value) - }) -} - -func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error { - if err := b.decode(prevSeq, data); err != nil { - return err - } - return b.memReplay(to) -} - -func (b *Batch) revertMemReplay(to *memdb.DB) error { - return b.decodeRec(func(i int, kt kType, key, value []byte) { - ikey := newIkey(key, b.seq+uint64(i), kt) - to.Delete(ikey) - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go deleted file mode 100644 index 7fc842f4f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/memdb" -) - -type tbRec struct { - kt kType - key, value []byte -} - -type testBatch struct { - rec []*tbRec -} - -func (p *testBatch) Put(key, value []byte) { - p.rec = append(p.rec, &tbRec{ktVal, key, value}) -} - -func (p *testBatch) Delete(key []byte) { - p.rec = append(p.rec, &tbRec{ktDel, key, nil}) -} - -func compareBatch(t *testing.T, b1, b2 *Batch) { - if b1.seq != b2.seq { - t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq) - } - if b1.Len() != b2.Len() { - t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len()) - } - p1, p2 := new(testBatch), new(testBatch) - err := b1.Replay(p1) - if err != nil { - t.Fatal("error when replaying batch 1: ", err) - } - err = b2.Replay(p2) - if err != nil { - t.Fatal("error when replaying batch 2: ", err) - } - for i := range p1.rec { - r1, r2 := p1.rec[i], p2.rec[i] - if r1.kt != r2.kt { - t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt) - } - if !bytes.Equal(r1.key, r2.key) { - t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key)) - } - if r1.kt == ktVal { - if !bytes.Equal(r1.value, r2.value) { - t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value)) - } - } - } -} - -func TestBatch_EncodeDecode(t *testing.T) { - b1 := new(Batch) - b1.seq = 10009 - b1.Put([]byte("key1"), []byte("value1")) - b1.Put([]byte("key2"), []byte("value2")) - b1.Delete([]byte("key1")) - b1.Put([]byte("k"), []byte("")) - b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz")) - b1.Delete([]byte("key10000")) - b1.Delete([]byte("k")) - buf := b1.encode() - b2 := new(Batch) - err := b2.decode(0, buf) - if err != nil { - t.Error("error when decoding batch: ", err) - } - compareBatch(t, b1, b2) -} - -func TestBatch_Append(t *testing.T) { - b1 := new(Batch) - b1.seq = 10009 - b1.Put([]byte("key1"), []byte("value1")) - b1.Put([]byte("key2"), []byte("value2")) - b1.Delete([]byte("key1")) - b1.Put([]byte("foo"), []byte("foovalue")) - b1.Put([]byte("bar"), []byte("barvalue")) - b2a := new(Batch) - b2a.seq = 10009 - b2a.Put([]byte("key1"), []byte("value1")) - b2a.Put([]byte("key2"), []byte("value2")) - b2a.Delete([]byte("key1")) - b2b := new(Batch) - b2b.Put([]byte("foo"), []byte("foovalue")) - b2b.Put([]byte("bar"), []byte("barvalue")) - b2a.append(b2b) - compareBatch(t, b1, b2a) -} - -func TestBatch_Size(t *testing.T) { - b := new(Batch) - for i := 0; i < 2; i++ { - b.Put([]byte("key1"), []byte("value1")) - b.Put([]byte("key2"), []byte("value2")) - b.Delete([]byte("key1")) - b.Put([]byte("foo"), []byte("foovalue")) - b.Put([]byte("bar"), []byte("barvalue")) - mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0) - b.memReplay(mem) - if b.size() != mem.Size() { - t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size()) - } - b.Reset() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go deleted file mode 100644 index 91b426709..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "testing" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -func randomString(r *rand.Rand, n int) []byte { - b := new(bytes.Buffer) - for i := 0; i < n; i++ { - b.WriteByte(' ' + byte(r.Intn(95))) - } - return b.Bytes() -} - -func compressibleStr(r *rand.Rand, frac float32, n int) []byte { - nn := int(float32(n) * frac) - rb := randomString(r, nn) - b := make([]byte, 0, n+nn) - for len(b) < n { - b = append(b, rb...) - } - return b[:n] -} - -type valueGen struct { - src []byte - pos int -} - -func newValueGen(frac float32) *valueGen { - v := new(valueGen) - r := rand.New(rand.NewSource(301)) - v.src = make([]byte, 0, 1048576+100) - for len(v.src) < 1048576 { - v.src = append(v.src, compressibleStr(r, frac, 100)...) - } - return v -} - -func (v *valueGen) get(n int) []byte { - if v.pos+n > len(v.src) { - v.pos = 0 - } - v.pos += n - return v.src[v.pos-n : v.pos] -} - -var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid())) - -type dbBench struct { - b *testing.B - stor storage.Storage - db *DB - - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions - - keys, values [][]byte -} - -func openDBBench(b *testing.B, noCompress bool) *dbBench { - _, err := os.Stat(benchDB) - if err == nil { - err = os.RemoveAll(benchDB) - if err != nil { - b.Fatal("cannot remove old db: ", err) - } - } - - p := &dbBench{ - b: b, - o: &opt.Options{}, - ro: &opt.ReadOptions{}, - wo: &opt.WriteOptions{}, - } - p.stor, err = storage.OpenFile(benchDB) - if err != nil { - b.Fatal("cannot open stor: ", err) - } - if noCompress { - p.o.Compression = opt.NoCompression - } - - p.db, err = Open(p.stor, p.o) - if err != nil { - b.Fatal("cannot open db: ", err) - } - - runtime.GOMAXPROCS(runtime.NumCPU()) - return p -} - -func (p *dbBench) reopen() { - p.db.Close() - var err error - p.db, err = Open(p.stor, p.o) - if err != nil { - p.b.Fatal("Reopen: got error: ", err) - } -} - -func (p *dbBench) populate(n int) { - p.keys, p.values = make([][]byte, n), make([][]byte, n) - v := newValueGen(0.5) - for i := range p.keys { - p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100) - } -} - -func (p *dbBench) randomize() { - m := len(p.keys) - times := m * 2 - r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface)) - for n := 0; n < times; n++ { - i, j := r1.Int()%m, r2.Int()%m - if i == j { - continue - } - p.keys[i], p.keys[j] = p.keys[j], p.keys[i] - p.values[i], p.values[j] = p.values[j], p.values[i] - } -} - -func (p *dbBench) writes(perBatch int) { - b := p.b - db := p.db - - n := len(p.keys) - m := n / perBatch - if n%perBatch > 0 { - m++ - } - batches := make([]Batch, m) - j := 0 - for i := range batches { - first := true - for ; j < n && ((j+1)%perBatch != 0 || first); j++ { - first = false - batches[i].Put(p.keys[j], p.values[j]) - } - } - runtime.GC() - - b.ResetTimer() - b.StartTimer() - for i := range batches { - err := db.Write(&(batches[i]), p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) gc() { - p.keys, p.values = nil, nil - runtime.GC() -} - -func (p *dbBench) puts() { - b := p.b - db := p.db - - b.ResetTimer() - b.StartTimer() - for i := range p.keys { - err := db.Put(p.keys[i], p.values[i], p.wo) - if err != nil { - b.Fatal("put failed: ", err) - } - } - b.StopTimer() - b.SetBytes(116) -} - -func (p *dbBench) fill() { - b := p.b - db := p.db - - perBatch := 10000 - batch := new(Batch) - for i, n := 0, len(p.keys); i < n; { - first := true - for ; i < n && ((i+1)%perBatch != 0 || first); i++ { - first = false - batch.Put(p.keys[i], p.values[i]) - } - err := db.Write(batch, p.wo) - if err != nil { - b.Fatal("write failed: ", err) - } - batch.Reset() - } -} - -func (p *dbBench) gets() { - b := p.b - db := p.db - - b.ResetTimer() - for i := range p.keys { - _, err := db.Get(p.keys[i], p.ro) - if err != nil { - b.Error("got error: ", err) - } - } - b.StopTimer() -} - -func (p *dbBench) seeks() { - b := p.b - - iter := p.newIter() - defer iter.Release() - b.ResetTimer() - for i := range p.keys { - if !iter.Seek(p.keys[i]) { - b.Error("value not found for: ", string(p.keys[i])) - } - } - b.StopTimer() -} - -func (p *dbBench) newIter() iterator.Iterator { - iter := p.db.NewIterator(nil, p.ro) - err := iter.Error() - if err != nil { - p.b.Fatal("cannot create iterator: ", err) - } - return iter -} - -func (p *dbBench) close() { - if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil { - p.b.Log("Block pool stats: ", bp) - } - p.db.Close() - p.stor.Close() - os.RemoveAll(benchDB) - p.db = nil - p.keys = nil - p.values = nil - runtime.GC() - runtime.GOMAXPROCS(1) -} - -func BenchmarkDBWrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatch(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBWriteBatchUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.writes(1000) - p.close() -} - -func BenchmarkDBWriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBWriteRandomSync(b *testing.B) { - p := openDBBench(b, false) - p.wo.Sync = true - p.populate(b.N) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwrite(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.writes(1) - p.close() -} - -func BenchmarkDBOverwriteRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.writes(1) - p.randomize() - p.writes(1) - p.close() -} - -func BenchmarkDBPut(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.puts() - p.close() -} - -func BenchmarkDBRead(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadGC(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadUncompressed(b *testing.B) { - p := openDBBench(b, true) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - for iter.Next() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverse(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBReadReverseTable(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.reopen() - p.gc() - - iter := p.newIter() - b.ResetTimer() - iter.Last() - for iter.Prev() { - } - iter.Release() - b.StopTimer() - b.SetBytes(116) - p.close() -} - -func BenchmarkDBSeek(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.seeks() - p.close() -} - -func BenchmarkDBSeekRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.seeks() - p.close() -} - -func BenchmarkDBGet(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gets() - p.close() -} - -func BenchmarkDBGetRandom(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.randomize() - p.gets() - p.close() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go deleted file mode 100644 index c9670de5d..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go +++ /dev/null @@ -1,676 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package cache provides interface and implementation of a cache algorithms. -package cache - -import ( - "sync" - "sync/atomic" - "unsafe" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Cacher provides interface to implements a caching functionality. -// An implementation must be goroutine-safe. -type Cacher interface { - // Capacity returns cache capacity. - Capacity() int - - // SetCapacity sets cache capacity. - SetCapacity(capacity int) - - // Promote promotes the 'cache node'. - Promote(n *Node) - - // Ban evicts the 'cache node' and prevent subsequent 'promote'. - Ban(n *Node) - - // Evict evicts the 'cache node'. - Evict(n *Node) - - // EvictNS evicts 'cache node' with the given namespace. - EvictNS(ns uint64) - - // EvictAll evicts all 'cache node'. - EvictAll() - - // Close closes the 'cache tree' - Close() error -} - -// Value is a 'cacheable object'. It may implements util.Releaser, if -// so the the Release method will be called once object is released. -type Value interface{} - -type CacheGetter struct { - Cache *Cache - NS uint64 -} - -func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { - return g.Cache.Get(g.NS, key, setFunc) -} - -// The hash tables implementation is based on: -// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014. - -const ( - mInitialSize = 1 << 4 - mOverflowThreshold = 1 << 5 - mOverflowGrowThreshold = 1 << 7 -) - -type mBucket struct { - mu sync.Mutex - node []*Node - frozen bool -} - -func (b *mBucket) freeze() []*Node { - b.mu.Lock() - defer b.mu.Unlock() - if !b.frozen { - b.frozen = true - } - return b.node -} - -func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) { - b.mu.Lock() - - if b.frozen { - b.mu.Unlock() - return - } - - // Scan the node. - for _, n := range b.node { - if n.hash == hash && n.ns == ns && n.key == key { - atomic.AddInt32(&n.ref, 1) - b.mu.Unlock() - return true, false, n - } - } - - // Get only. - if noset { - b.mu.Unlock() - return true, false, nil - } - - // Create node. - n = &Node{ - r: r, - hash: hash, - ns: ns, - key: key, - ref: 1, - } - // Add node to bucket. - b.node = append(b.node, n) - bLen := len(b.node) - b.mu.Unlock() - - // Update counter. - grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold - if bLen > mOverflowThreshold { - grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold - } - - // Grow. - if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { - nhLen := len(h.buckets) << 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), - mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), - } - ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) - if !ok { - panic("BUG: failed swapping head") - } - go nh.initBuckets() - } - - return true, true, n -} - -func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) { - b.mu.Lock() - - if b.frozen { - b.mu.Unlock() - return - } - - // Scan the node. - var ( - n *Node - bLen int - ) - for i := range b.node { - n = b.node[i] - if n.ns == ns && n.key == key { - if atomic.LoadInt32(&n.ref) == 0 { - deleted = true - - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil - } - - // Remove node from bucket. - b.node = append(b.node[:i], b.node[i+1:]...) - bLen = len(b.node) - } - break - } - } - b.mu.Unlock() - - if deleted { - // Call OnDel. - for _, f := range n.onDel { - f() - } - - // Update counter. - atomic.AddInt32(&r.size, int32(n.size)*-1) - shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold - if bLen >= mOverflowThreshold { - atomic.AddInt32(&h.overflow, -1) - } - - // Shrink. - if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { - nhLen := len(h.buckets) >> 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), - mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), - } - ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) - if !ok { - panic("BUG: failed swapping head") - } - go nh.initBuckets() - } - } - - return true, deleted -} - -type mNode struct { - buckets []unsafe.Pointer // []*mBucket - mask uint32 - pred unsafe.Pointer // *mNode - resizeInProgess int32 - - overflow int32 - growThreshold int32 - shrinkThreshold int32 -} - -func (n *mNode) initBucket(i uint32) *mBucket { - if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil { - return b - } - - p := (*mNode)(atomic.LoadPointer(&n.pred)) - if p != nil { - var node []*Node - if n.mask > p.mask { - // Grow. - pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask])) - if pb == nil { - pb = p.initBucket(i & p.mask) - } - m := pb.freeze() - // Split nodes. - for _, x := range m { - if x.hash&n.mask == i { - node = append(node, x) - } - } - } else { - // Shrink. - pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i])) - if pb0 == nil { - pb0 = p.initBucket(i) - } - pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))])) - if pb1 == nil { - pb1 = p.initBucket(i + uint32(len(n.buckets))) - } - m0 := pb0.freeze() - m1 := pb1.freeze() - // Merge nodes. - node = make([]*Node, 0, len(m0)+len(m1)) - node = append(node, m0...) - node = append(node, m1...) - } - b := &mBucket{node: node} - if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) { - if len(node) > mOverflowThreshold { - atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold)) - } - return b - } - } - - return (*mBucket)(atomic.LoadPointer(&n.buckets[i])) -} - -func (n *mNode) initBuckets() { - for i := range n.buckets { - n.initBucket(uint32(i)) - } - atomic.StorePointer(&n.pred, nil) -} - -// Cache is a 'cache map'. -type Cache struct { - mu sync.RWMutex - mHead unsafe.Pointer // *mNode - nodes int32 - size int32 - cacher Cacher - closed bool -} - -// NewCache creates a new 'cache map'. The cacher is optional and -// may be nil. -func NewCache(cacher Cacher) *Cache { - h := &mNode{ - buckets: make([]unsafe.Pointer, mInitialSize), - mask: mInitialSize - 1, - growThreshold: int32(mInitialSize * mOverflowThreshold), - shrinkThreshold: 0, - } - for i := range h.buckets { - h.buckets[i] = unsafe.Pointer(&mBucket{}) - } - r := &Cache{ - mHead: unsafe.Pointer(h), - cacher: cacher, - } - return r -} - -func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) { - h := (*mNode)(atomic.LoadPointer(&r.mHead)) - i := hash & h.mask - b := (*mBucket)(atomic.LoadPointer(&h.buckets[i])) - if b == nil { - b = h.initBucket(i) - } - return h, b -} - -func (r *Cache) delete(n *Node) bool { - for { - h, b := r.getBucket(n.hash) - done, deleted := b.delete(r, h, n.hash, n.ns, n.key) - if done { - return deleted - } - } - return false -} - -// Nodes returns number of 'cache node' in the map. -func (r *Cache) Nodes() int { - return int(atomic.LoadInt32(&r.nodes)) -} - -// Size returns sums of 'cache node' size in the map. -func (r *Cache) Size() int { - return int(atomic.LoadInt32(&r.size)) -} - -// Capacity returns cache capacity. -func (r *Cache) Capacity() int { - if r.cacher == nil { - return 0 - } - return r.cacher.Capacity() -} - -// SetCapacity sets cache capacity. -func (r *Cache) SetCapacity(capacity int) { - if r.cacher != nil { - r.cacher.SetCapacity(capacity) - } -} - -// Get gets 'cache node' with the given namespace and key. -// If cache node is not found and setFunc is not nil, Get will atomically creates -// the 'cache node' by calling setFunc. Otherwise Get will returns nil. -// -// The returned 'cache handle' should be released after use by calling Release -// method. -func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return nil - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, setFunc == nil) - if done { - if n != nil { - n.mu.Lock() - if n.value == nil { - if setFunc == nil { - n.mu.Unlock() - n.unref() - return nil - } - - n.size, n.value = setFunc() - if n.value == nil { - n.size = 0 - n.mu.Unlock() - n.unref() - return nil - } - atomic.AddInt32(&r.size, int32(n.size)) - } - n.mu.Unlock() - if r.cacher != nil { - r.cacher.Promote(n) - } - return &Handle{unsafe.Pointer(n)} - } - - break - } - } - return nil -} - -// Delete removes and ban 'cache node' with the given namespace and key. -// A banned 'cache node' will never inserted into the 'cache tree'. Ban -// only attributed to the particular 'cache node', so when a 'cache node' -// is recreated it will not be banned. -// -// If onDel is not nil, then it will be executed if such 'cache node' -// doesn't exist or once the 'cache node' is released. -// -// Delete return true is such 'cache node' exist. -func (r *Cache) Delete(ns, key uint64, onDel func()) bool { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return false - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, true) - if done { - if n != nil { - if onDel != nil { - n.mu.Lock() - n.onDel = append(n.onDel, onDel) - n.mu.Unlock() - } - if r.cacher != nil { - r.cacher.Ban(n) - } - n.unref() - return true - } - - break - } - } - - if onDel != nil { - onDel() - } - - return false -} - -// Evict evicts 'cache node' with the given namespace and key. This will -// simply call Cacher.Evict. -// -// Evict return true is such 'cache node' exist. -func (r *Cache) Evict(ns, key uint64) bool { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return false - } - - hash := murmur32(ns, key, 0xf00) - for { - h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, true) - if done { - if n != nil { - if r.cacher != nil { - r.cacher.Evict(n) - } - n.unref() - return true - } - - break - } - } - - return false -} - -// EvictNS evicts 'cache node' with the given namespace. This will -// simply call Cacher.EvictNS. -func (r *Cache) EvictNS(ns uint64) { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return - } - - if r.cacher != nil { - r.cacher.EvictNS(ns) - } -} - -// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll. -func (r *Cache) EvictAll() { - r.mu.RLock() - defer r.mu.RUnlock() - if r.closed { - return - } - - if r.cacher != nil { - r.cacher.EvictAll() - } -} - -// Close closes the 'cache map' and releases all 'cache node'. -func (r *Cache) Close() error { - r.mu.Lock() - if !r.closed { - r.closed = true - - if r.cacher != nil { - if err := r.cacher.Close(); err != nil { - return err - } - } - - h := (*mNode)(r.mHead) - h.initBuckets() - - for i := range h.buckets { - b := (*mBucket)(h.buckets[i]) - for _, n := range b.node { - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil - } - - // Call OnDel. - for _, f := range n.onDel { - f() - } - } - } - } - r.mu.Unlock() - return nil -} - -// Node is a 'cache node'. -type Node struct { - r *Cache - - hash uint32 - ns, key uint64 - - mu sync.Mutex - size int - value Value - - ref int32 - onDel []func() - - CacheData unsafe.Pointer -} - -// NS returns this 'cache node' namespace. -func (n *Node) NS() uint64 { - return n.ns -} - -// Key returns this 'cache node' key. -func (n *Node) Key() uint64 { - return n.key -} - -// Size returns this 'cache node' size. -func (n *Node) Size() int { - return n.size -} - -// Value returns this 'cache node' value. -func (n *Node) Value() Value { - return n.value -} - -// Ref returns this 'cache node' ref counter. -func (n *Node) Ref() int32 { - return atomic.LoadInt32(&n.ref) -} - -// GetHandle returns an handle for this 'cache node'. -func (n *Node) GetHandle() *Handle { - if atomic.AddInt32(&n.ref, 1) <= 1 { - panic("BUG: Node.GetHandle on zero ref") - } - return &Handle{unsafe.Pointer(n)} -} - -func (n *Node) unref() { - if atomic.AddInt32(&n.ref, -1) == 0 { - n.r.delete(n) - } -} - -func (n *Node) unrefLocked() { - if atomic.AddInt32(&n.ref, -1) == 0 { - n.r.mu.RLock() - if !n.r.closed { - n.r.delete(n) - } - n.r.mu.RUnlock() - } -} - -type Handle struct { - n unsafe.Pointer // *Node -} - -func (h *Handle) Value() Value { - n := (*Node)(atomic.LoadPointer(&h.n)) - if n != nil { - return n.value - } - return nil -} - -func (h *Handle) Release() { - nPtr := atomic.LoadPointer(&h.n) - if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { - n := (*Node)(nPtr) - n.unrefLocked() - } -} - -func murmur32(ns, key uint64, seed uint32) uint32 { - const ( - m = uint32(0x5bd1e995) - r = 24 - ) - - k1 := uint32(ns >> 32) - k2 := uint32(ns) - k3 := uint32(key >> 32) - k4 := uint32(key) - - k1 *= m - k1 ^= k1 >> r - k1 *= m - - k2 *= m - k2 ^= k2 >> r - k2 *= m - - k3 *= m - k3 ^= k3 >> r - k3 *= m - - k4 *= m - k4 ^= k4 >> r - k4 *= m - - h := seed - - h *= m - h ^= k1 - h *= m - h ^= k2 - h *= m - h ^= k3 - h *= m - h ^= k4 - - h ^= h >> 13 - h *= m - h ^= h >> 15 - - return h -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go deleted file mode 100644 index 5575583dc..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "math/rand" - "runtime" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" -) - -type int32o int32 - -func (o *int32o) acquire() { - if atomic.AddInt32((*int32)(o), 1) != 1 { - panic("BUG: invalid ref") - } -} - -func (o *int32o) Release() { - if atomic.AddInt32((*int32)(o), -1) != 0 { - panic("BUG: invalid ref") - } -} - -type releaserFunc struct { - fn func() - value Value -} - -func (r releaserFunc) Release() { - if r.fn != nil { - r.fn() - } -} - -func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle { - return c.Get(ns, key, func() (int, Value) { - if relf != nil { - return charge, releaserFunc{relf, value} - } else { - return charge, value - } - }) -} - -func TestCacheMap(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - nsx := []struct { - nobjects, nhandles, concurrent, repeat int - }{ - {10000, 400, 50, 3}, - {100000, 1000, 100, 10}, - } - - var ( - objects [][]int32o - handles [][]unsafe.Pointer - ) - - for _, x := range nsx { - objects = append(objects, make([]int32o, x.nobjects)) - handles = append(handles, make([]unsafe.Pointer, x.nhandles)) - } - - c := NewCache(nil) - - wg := new(sync.WaitGroup) - var done int32 - - for ns, x := range nsx { - for i := 0; i < x.concurrent; i++ { - wg.Add(1) - go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for j := len(objects) * repeat; j >= 0; j-- { - key := uint64(r.Intn(len(objects))) - h := c.Get(uint64(ns), key, func() (int, Value) { - o := &objects[key] - o.acquire() - return 1, o - }) - if v := h.Value().(*int32o); v != &objects[key] { - t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v) - } - if objects[key] != 1 { - t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key]) - } - if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) { - h.Release() - } - } - }(ns, i, x.repeat, objects[ns], handles[ns]) - } - - go func(handles []unsafe.Pointer) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for atomic.LoadInt32(&done) == 0 { - i := r.Intn(len(handles)) - h := (*Handle)(atomic.LoadPointer(&handles[i])) - if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) { - h.Release() - } - time.Sleep(time.Millisecond) - } - }(handles[ns]) - } - - go func() { - handles := make([]*Handle, 100000) - for atomic.LoadInt32(&done) == 0 { - for i := range handles { - handles[i] = c.Get(999999999, uint64(i), func() (int, Value) { - return 1, 1 - }) - } - for _, h := range handles { - h.Release() - } - } - }() - - wg.Wait() - - atomic.StoreInt32(&done, 1) - - for _, handles0 := range handles { - for i := range handles0 { - h := (*Handle)(atomic.LoadPointer(&handles0[i])) - if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) { - h.Release() - } - } - } - - for ns, objects0 := range objects { - for i, o := range objects0 { - if o != 0 { - t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o) - } - } - } -} - -func TestCacheMap_NodesAndSize(t *testing.T) { - c := NewCache(nil) - if c.Nodes() != 0 { - t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) - } - if c.Size() != 0 { - t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) - } - set(c, 0, 1, 1, 1, nil) - set(c, 0, 2, 2, 2, nil) - set(c, 1, 1, 3, 3, nil) - set(c, 2, 1, 4, 1, nil) - if c.Nodes() != 4 { - t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes()) - } - if c.Size() != 7 { - t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size()) - } -} - -func TestLRUCache_Capacity(t *testing.T) { - c := NewCache(NewLRU(10)) - if c.Capacity() != 10 { - t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity()) - } - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 2, nil).Release() - set(c, 1, 1, 3, 3, nil).Release() - set(c, 2, 1, 4, 1, nil).Release() - set(c, 2, 2, 5, 1, nil).Release() - set(c, 2, 3, 6, 1, nil).Release() - set(c, 2, 4, 7, 1, nil).Release() - set(c, 2, 5, 8, 1, nil).Release() - if c.Nodes() != 7 { - t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes()) - } - if c.Size() != 10 { - t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size()) - } - c.SetCapacity(9) - if c.Capacity() != 9 { - t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity()) - } - if c.Nodes() != 6 { - t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes()) - } - if c.Size() != 8 { - t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size()) - } -} - -func TestCacheMap_NilValue(t *testing.T) { - c := NewCache(NewLRU(10)) - h := c.Get(0, 0, func() (size int, value Value) { - return 1, nil - }) - if h != nil { - t.Error("cache handle is non-nil") - } - if c.Nodes() != 0 { - t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) - } - if c.Size() != 0 { - t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) - } -} - -func TestLRUCache_GetLatency(t *testing.T) { - runtime.GOMAXPROCS(runtime.NumCPU()) - - const ( - concurrentSet = 30 - concurrentGet = 3 - duration = 3 * time.Second - delay = 3 * time.Millisecond - maxkey = 100000 - ) - - var ( - set, getHit, getAll int32 - getMaxLatency, getDuration int64 - ) - - c := NewCache(NewLRU(5000)) - wg := &sync.WaitGroup{} - until := time.Now().Add(duration) - for i := 0; i < concurrentSet; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - for time.Now().Before(until) { - c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) { - time.Sleep(delay) - atomic.AddInt32(&set, 1) - return 1, 1 - }).Release() - } - }(i) - } - for i := 0; i < concurrentGet; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - for { - mark := time.Now() - if mark.Before(until) { - h := c.Get(0, uint64(r.Intn(maxkey)), nil) - latency := int64(time.Now().Sub(mark)) - m := atomic.LoadInt64(&getMaxLatency) - if latency > m { - atomic.CompareAndSwapInt64(&getMaxLatency, m, latency) - } - atomic.AddInt64(&getDuration, latency) - if h != nil { - atomic.AddInt32(&getHit, 1) - h.Release() - } - atomic.AddInt32(&getAll, 1) - } else { - break - } - } - }(i) - } - - wg.Wait() - getAvglatency := time.Duration(getDuration) / time.Duration(getAll) - t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v", - set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency) - - if getAvglatency > delay/3 { - t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency) - } -} - -func TestLRUCache_HitMiss(t *testing.T) { - cases := []struct { - key uint64 - value string - }{ - {1, "vvvvvvvvv"}, - {100, "v1"}, - {0, "v2"}, - {12346, "v3"}, - {777, "v4"}, - {999, "v5"}, - {7654, "v6"}, - {2, "v7"}, - {3, "v8"}, - {9, "v9"}, - } - - setfin := 0 - c := NewCache(NewLRU(1000)) - for i, x := range cases { - set(c, 0, x.key, x.value, len(x.value), func() { - setfin++ - }).Release() - for j, y := range cases { - h := c.Get(0, y.key, nil) - if j <= i { - // should hit - if h == nil { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else { - if x := h.Value().(releaserFunc).value.(string); x != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) - } - } - } else { - // should miss - if h != nil { - t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string)) - } - } - if h != nil { - h.Release() - } - } - } - - for i, x := range cases { - finalizerOk := false - c.Delete(0, x.key, func() { - finalizerOk = true - }) - - if !finalizerOk { - t.Errorf("case %d delete finalizer not executed", i) - } - - for j, y := range cases { - h := c.Get(0, y.key, nil) - if j > i { - // should hit - if h == nil { - t.Errorf("case '%d' iteration '%d' is miss", i, j) - } else { - if x := h.Value().(releaserFunc).value.(string); x != y.value { - t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) - } - } - } else { - // should miss - if h != nil { - t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string)) - } - } - if h != nil { - h.Release() - } - } - } - - if setfin != len(cases) { - t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin) - } -} - -func TestLRUCache_Eviction(t *testing.T) { - c := NewCache(NewLRU(12)) - o1 := set(c, 0, 1, 1, 1, nil) - set(c, 0, 2, 2, 1, nil).Release() - set(c, 0, 3, 3, 1, nil).Release() - set(c, 0, 4, 4, 1, nil).Release() - set(c, 0, 5, 5, 1, nil).Release() - if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2 - h.Release() - } - set(c, 0, 9, 9, 10, nil).Release() // 5,2,9 - - for _, key := range []uint64{9, 2, 5, 1} { - h := c.Get(0, key, nil) - if h == nil { - t.Errorf("miss for key '%d'", key) - } else { - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } - o1.Release() - for _, key := range []uint64{1, 2, 5} { - h := c.Get(0, key, nil) - if h == nil { - t.Errorf("miss for key '%d'", key) - } else { - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } - for _, key := range []uint64{3, 4, 9} { - h := c.Get(0, key, nil) - if h != nil { - t.Errorf("hit for key '%d'", key) - if x := h.Value().(int); x != int(key) { - t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) - } - h.Release() - } - } -} - -func TestLRUCache_Evict(t *testing.T) { - c := NewCache(NewLRU(6)) - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 1, nil).Release() - set(c, 1, 1, 4, 1, nil).Release() - set(c, 1, 2, 5, 1, nil).Release() - set(c, 2, 1, 6, 1, nil).Release() - set(c, 2, 2, 7, 1, nil).Release() - - for ns := 0; ns < 3; ns++ { - for key := 1; key < 3; key++ { - if h := c.Get(uint64(ns), uint64(key), nil); h != nil { - h.Release() - } else { - t.Errorf("Cache.Get on #%d.%d return nil", ns, key) - } - } - } - - if ok := c.Evict(0, 1); !ok { - t.Error("first Cache.Evict on #0.1 return false") - } - if ok := c.Evict(0, 1); ok { - t.Error("second Cache.Evict on #0.1 return true") - } - if h := c.Get(0, 1, nil); h != nil { - t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value()) - } - - c.EvictNS(1) - if h := c.Get(1, 1, nil); h != nil { - t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value()) - } - if h := c.Get(1, 2, nil); h != nil { - t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value()) - } - - c.EvictAll() - for ns := 0; ns < 3; ns++ { - for key := 1; key < 3; key++ { - if h := c.Get(uint64(ns), uint64(key), nil); h != nil { - t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value()) - } - } - } -} - -func TestLRUCache_Delete(t *testing.T) { - delFuncCalled := 0 - delFunc := func() { - delFuncCalled++ - } - - c := NewCache(NewLRU(2)) - set(c, 0, 1, 1, 1, nil).Release() - set(c, 0, 2, 2, 1, nil).Release() - - if ok := c.Delete(0, 1, delFunc); !ok { - t.Error("Cache.Delete on #1 return false") - } - if h := c.Get(0, 1, nil); h != nil { - t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value()) - } - if ok := c.Delete(0, 1, delFunc); ok { - t.Error("Cache.Delete on #1 return true") - } - - h2 := c.Get(0, 2, nil) - if h2 == nil { - t.Error("Cache.Get on #2 return nil") - } - if ok := c.Delete(0, 2, delFunc); !ok { - t.Error("(1) Cache.Delete on #2 return false") - } - if ok := c.Delete(0, 2, delFunc); !ok { - t.Error("(2) Cache.Delete on #2 return false") - } - - set(c, 0, 3, 3, 1, nil).Release() - set(c, 0, 4, 4, 1, nil).Release() - c.Get(0, 2, nil).Release() - - for key := 2; key <= 4; key++ { - if h := c.Get(0, uint64(key), nil); h != nil { - h.Release() - } else { - t.Errorf("Cache.Get on #%d return nil", key) - } - } - - h2.Release() - if h := c.Get(0, 2, nil); h != nil { - t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value()) - } - - if delFuncCalled != 4 { - t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled) - } -} - -func TestLRUCache_Close(t *testing.T) { - relFuncCalled := 0 - relFunc := func() { - relFuncCalled++ - } - delFuncCalled := 0 - delFunc := func() { - delFuncCalled++ - } - - c := NewCache(NewLRU(2)) - set(c, 0, 1, 1, 1, relFunc).Release() - set(c, 0, 2, 2, 1, relFunc).Release() - - h3 := set(c, 0, 3, 3, 1, relFunc) - if h3 == nil { - t.Error("Cache.Get on #3 return nil") - } - if ok := c.Delete(0, 3, delFunc); !ok { - t.Error("Cache.Delete on #3 return false") - } - - c.Close() - - if relFuncCalled != 3 { - t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled) - } - if delFuncCalled != 1 { - t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled) - } -} - -func BenchmarkLRUCache(b *testing.B) { - c := NewCache(NewLRU(10000)) - - b.SetParallelism(10) - b.RunParallel(func(pb *testing.PB) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for pb.Next() { - key := uint64(r.Intn(1000000)) - c.Get(0, key, func() (int, Value) { - return 1, key - }).Release() - } - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go deleted file mode 100644 index d9a84cde1..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package cache - -import ( - "sync" - "unsafe" -) - -type lruNode struct { - n *Node - h *Handle - ban bool - - next, prev *lruNode -} - -func (n *lruNode) insert(at *lruNode) { - x := at.next - at.next = n - n.prev = at - n.next = x - x.prev = n -} - -func (n *lruNode) remove() { - if n.prev != nil { - n.prev.next = n.next - n.next.prev = n.prev - n.prev = nil - n.next = nil - } else { - panic("BUG: removing removed node") - } -} - -type lru struct { - mu sync.Mutex - capacity int - used int - recent lruNode -} - -func (r *lru) reset() { - r.recent.next = &r.recent - r.recent.prev = &r.recent - r.used = 0 -} - -func (r *lru) Capacity() int { - r.mu.Lock() - defer r.mu.Unlock() - return r.capacity -} - -func (r *lru) SetCapacity(capacity int) { - var evicted []*lruNode - - r.mu.Lock() - r.capacity = capacity - for r.used > r.capacity { - rn := r.recent.prev - if rn == nil { - panic("BUG: invalid LRU used or capacity counter") - } - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) Promote(n *Node) { - var evicted []*lruNode - - r.mu.Lock() - if n.CacheData == nil { - if n.Size() <= r.capacity { - rn := &lruNode{n: n, h: n.GetHandle()} - rn.insert(&r.recent) - n.CacheData = unsafe.Pointer(rn) - r.used += n.Size() - - for r.used > r.capacity { - rn := r.recent.prev - if rn == nil { - panic("BUG: invalid LRU used or capacity counter") - } - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - } else { - rn := (*lruNode)(n.CacheData) - if !rn.ban { - rn.remove() - rn.insert(&r.recent) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) Ban(n *Node) { - r.mu.Lock() - if n.CacheData == nil { - n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true}) - } else { - rn := (*lruNode)(n.CacheData) - if !rn.ban { - rn.remove() - rn.ban = true - r.used -= rn.n.Size() - r.mu.Unlock() - - rn.h.Release() - rn.h = nil - return - } - } - r.mu.Unlock() -} - -func (r *lru) Evict(n *Node) { - r.mu.Lock() - rn := (*lruNode)(n.CacheData) - if rn == nil || rn.ban { - r.mu.Unlock() - return - } - n.CacheData = nil - r.mu.Unlock() - - rn.h.Release() -} - -func (r *lru) EvictNS(ns uint64) { - var evicted []*lruNode - - r.mu.Lock() - for e := r.recent.prev; e != &r.recent; { - rn := e - e = e.prev - if rn.n.NS() == ns { - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) EvictAll() { - r.mu.Lock() - back := r.recent.prev - for rn := back; rn != &r.recent; rn = rn.prev { - rn.n.CacheData = nil - } - r.reset() - r.mu.Unlock() - - for rn := back; rn != &r.recent; rn = rn.prev { - rn.h.Release() - } -} - -func (r *lru) Close() error { - return nil -} - -// NewLRU create a new LRU-cache. -func NewLRU(capacity int) Cacher { - r := &lru{capacity: capacity} - r.reset() - return r -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go deleted file mode 100644 index d33d5e9c7..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import "github.com/syndtr/goleveldb/leveldb/comparer" - -type iComparer struct { - ucmp comparer.Comparer -} - -func (icmp *iComparer) uName() string { - return icmp.ucmp.Name() -} - -func (icmp *iComparer) uCompare(a, b []byte) int { - return icmp.ucmp.Compare(a, b) -} - -func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { - return icmp.ucmp.Separator(dst, a, b) -} - -func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { - return icmp.ucmp.Successor(dst, b) -} - -func (icmp *iComparer) Name() string { - return icmp.uName() -} - -func (icmp *iComparer) Compare(a, b []byte) int { - x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey()) - if x == 0 { - if m, n := iKey(a).num(), iKey(b).num(); m > n { - x = -1 - } else if m < n { - x = 1 - } - } - return x -} - -func (icmp *iComparer) Separator(dst, a, b []byte) []byte { - ua, ub := iKey(a).ukey(), iKey(b).ukey() - dst = icmp.ucmp.Separator(dst, ua, ub) - if dst == nil { - return nil - } - if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { - dst = append(dst, kMaxNumBytes...) - } else { - // Did not close possibilities that n maybe longer than len(ub). - dst = append(dst, a[len(a)-8:]...) - } - return dst -} - -func (icmp *iComparer) Successor(dst, b []byte) []byte { - ub := iKey(b).ukey() - dst = icmp.ucmp.Successor(dst, ub) - if dst == nil { - return nil - } - if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { - dst = append(dst, kMaxNumBytes...) - } else { - // Did not close possibilities that n maybe longer than len(ub). - dst = append(dst, b[len(b)-8:]...) - } - return dst -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go deleted file mode 100644 index 14dddf88d..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package comparer - -import "bytes" - -type bytesComparer struct{} - -func (bytesComparer) Compare(a, b []byte) int { - return bytes.Compare(a, b) -} - -func (bytesComparer) Name() string { - return "leveldb.BytewiseComparator" -} - -func (bytesComparer) Separator(dst, a, b []byte) []byte { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && a[i] == b[i]; i++ { - } - if i >= n { - // Do not shorten if one string is a prefix of the other - } else if c := a[i]; c < 0xff && c+1 < b[i] { - dst = append(dst, a[:i+1]...) - dst[i]++ - return dst - } - return nil -} - -func (bytesComparer) Successor(dst, b []byte) []byte { - for i, c := range b { - if c != 0xff { - dst = append(dst, b[:i+1]...) - dst[i]++ - return dst - } - } - return nil -} - -// DefaultComparer are default implementation of the Comparer interface. -// It uses the natural ordering, consistent with bytes.Compare. -var DefaultComparer = bytesComparer{} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go deleted file mode 100644 index 14a28f16f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package comparer provides interface and implementation for ordering -// sets of data. -package comparer - -// BasicComparer is the interface that wraps the basic Compare method. -type BasicComparer interface { - // Compare returns -1, 0, or +1 depending on whether a is 'less than', - // 'equal to' or 'greater than' b. The two arguments can only be 'equal' - // if their contents are exactly equal. Furthermore, the empty slice - // must be 'less than' any non-empty slice. - Compare(a, b []byte) int -} - -// Comparer defines a total ordering over the space of []byte keys: a 'less -// than' relationship. -type Comparer interface { - BasicComparer - - // Name returns name of the comparer. - // - // The Level-DB on-disk format stores the comparer name, and opening a - // database with a different comparer from the one it was created with - // will result in an error. - // - // An implementation to a new name whenever the comparer implementation - // changes in a way that will cause the relative ordering of any two keys - // to change. - // - // Names starting with "leveldb." are reserved and should not be used - // by any users of this package. - Name() string - - // Bellow are advanced functions used used to reduce the space requirements - // for internal data structures such as index blocks. - - // Separator appends a sequence of bytes x to dst such that a <= x && x < b, - // where 'less than' is consistent with Compare. An implementation should - // return nil if x equal to a. - // - // Either contents of a or b should not by any means modified. Doing so - // may cause corruption on the internal state. - Separator(dst, a, b []byte) []byte - - // Successor appends a sequence of bytes x to dst such that x >= b, where - // 'less than' is consistent with Compare. An implementation should return - // nil if x equal to b. - // - // Contents of b should not by any means modified. Doing so may cause - // corruption on the internal state. - Successor(dst, b []byte) []byte -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go deleted file mode 100644 index a351874ed..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go +++ /dev/null @@ -1,500 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "fmt" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "io" - "math/rand" - "testing" -) - -const ctValSize = 1000 - -type dbCorruptHarness struct { - dbHarness -} - -func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness { - h := new(dbCorruptHarness) - h.init(t, o) - return h -} - -func newDbCorruptHarness(t *testing.T) *dbCorruptHarness { - return newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCacheCapacity: 100, - Strict: opt.StrictJournalChecksum, - }) -} - -func (h *dbCorruptHarness) recover() { - p := &h.dbHarness - t := p.t - - var err error - p.db, err = Recover(h.stor, h.o) - if err != nil { - t.Fatal("Repair: got error: ", err) - } -} - -func (h *dbCorruptHarness) build(n int) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := range rnd.Perm(n) { - batch.Reset() - batch.Put(tkey(i), tval(i, ctValSize)) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) { - p := &h.dbHarness - t := p.t - db := p.db - - batch := new(Batch) - for i := 0; i < n; i++ { - batch.Reset() - batch.Delete(tkey(rnd.Intn(max))) - err := db.Write(batch, p.wo) - if err != nil { - t.Fatal("write error: ", err) - } - } -} - -func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) { - p := &h.dbHarness - t := p.t - - ff, _ := p.stor.GetFiles(ft) - sff := files(ff) - sff.sort() - if fi < 0 { - fi = len(sff) - 1 - } - if fi >= len(sff) { - t.Fatalf("no such file with type %q with index %d", ft, fi) - } - - file := sff[fi] - - r, err := file.Open() - if err != nil { - t.Fatal("cannot open file: ", err) - } - x, err := r.Seek(0, 2) - if err != nil { - t.Fatal("cannot query file size: ", err) - } - m := int(x) - if _, err := r.Seek(0, 0); err != nil { - t.Fatal(err) - } - - if offset < 0 { - if -offset > m { - offset = 0 - } else { - offset = m + offset - } - } - if offset > m { - offset = m - } - if offset+n > m { - n = m - offset - } - - buf := make([]byte, m) - _, err = io.ReadFull(r, buf) - if err != nil { - t.Fatal("cannot read file: ", err) - } - r.Close() - - for i := 0; i < n; i++ { - buf[offset+i] ^= 0x80 - } - - err = file.Remove() - if err != nil { - t.Fatal("cannot remove old file: ", err) - } - w, err := file.Create() - if err != nil { - t.Fatal("cannot create new file: ", err) - } - _, err = w.Write(buf) - if err != nil { - t.Fatal("cannot write new file: ", err) - } - w.Close() -} - -func (h *dbCorruptHarness) removeAll(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - for _, f := range ff { - if err := f.Remove(); err != nil { - h.t.Error("remove file: ", err) - } - } -} - -func (h *dbCorruptHarness) removeOne(ft storage.FileType) { - ff, err := h.stor.GetFiles(ft) - if err != nil { - h.t.Fatal("get files: ", err) - } - f := ff[rand.Intn(len(ff))] - h.t.Logf("removing file @%d", f.Num()) - if err := f.Remove(); err != nil { - h.t.Error("remove file: ", err) - } -} - -func (h *dbCorruptHarness) check(min, max int) { - p := &h.dbHarness - t := p.t - db := p.db - - var n, badk, badv, missed, good int - iter := db.NewIterator(nil, p.ro) - for iter.Next() { - k := 0 - fmt.Sscanf(string(iter.Key()), "%d", &k) - if k < n { - badk++ - continue - } - missed += k - n - n = k + 1 - if !bytes.Equal(iter.Value(), tval(k, ctValSize)) { - badv++ - } else { - good++ - } - } - err := iter.Error() - iter.Release() - t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v", - min, max, good, badk, badv, missed, err) - if good < min || good > max { - t.Errorf("good entries number not in range") - } -} - -func TestCorruptDB_Journal(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(100) - h.check(100, 100) - h.closeDB() - h.corrupt(storage.TypeJournal, -1, 19, 1) - h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1) - - h.openDB() - h.check(36, 36) - - h.close() -} - -func TestCorruptDB_Table(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(100) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.check(99, 99) - - h.close() -} - -func TestCorruptDB_TableIndex(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10000) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, -2000, 500) - - h.openDB() - h.check(5000, 9999) - - h.close() -} - -func TestCorruptDB_MissingManifest(t *testing.T) { - rnd := rand.New(rand.NewSource(0x0badda7a)) - h := newDbCorruptHarnessWopt(t, &opt.Options{ - BlockCacheCapacity: 100, - Strict: opt.StrictJournalChecksum, - WriteBuffer: 1000 * 60, - }) - - h.build(1000) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.deleteRand(500, 1000, rnd) - h.compactMem() - h.buildShuffled(1000, rnd) - h.compactMem() - h.closeDB() - - h.stor.SetIgnoreOpenErr(storage.TypeManifest) - h.removeAll(storage.TypeManifest) - h.openAssert(false) - h.stor.SetIgnoreOpenErr(0) - - h.recover() - h.check(1000, 1000) - h.build(1000) - h.compactMem() - h.compactRange("", "") - h.closeDB() - - h.recover() - h.check(1000, 1000) - - h.close() -} - -func TestCorruptDB_SequenceNumberRecovery(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.put("foo", "v4") - h.put("foo", "v5") - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") - - h.close() -} - -func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "v1") - h.put("foo", "v2") - h.put("foo", "v3") - h.compactMem() - h.put("foo", "v4") - h.put("foo", "v5") - h.compactMem() - h.closeDB() - - h.recover() - h.getVal("foo", "v5") - h.put("foo", "v6") - h.getVal("foo", "v6") - - h.reopenDB() - h.getVal("foo", "v6") - - h.close() -} - -func TestCorruptDB_CorruptedManifest(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("foo", "hello") - h.compactMem() - h.compactRange("", "") - h.closeDB() - h.corrupt(storage.TypeManifest, -1, 0, 1000) - h.openAssert(false) - - h.recover() - h.getVal("foo", "hello") - - h.close() -} - -func TestCorruptDB_CompactionInputError(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.check(9, 9) - - h.build(10000) - h.check(10000, 10000) - - h.close() -} - -func TestCorruptDB_UnrelatedKeys(t *testing.T) { - h := newDbCorruptHarness(t) - - h.build(10) - h.compactMem() - h.closeDB() - h.corrupt(storage.TypeTable, -1, 100, 1) - - h.openDB() - h.put(string(tkey(1000)), string(tval(1000, ctValSize))) - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - h.compactMem() - h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) - - h.close() -} - -func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(1, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") - - h.close() -} - -func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("a", "v2") - h.put("b", "v2") - h.compactMem() - h.put("a", "v3") - h.put("b", "v3") - h.compactMem() - h.put("c", "v0") - h.put("d", "v0") - h.compactMem() - h.compactRangeAt(0, "", "") - h.closeDB() - - h.recover() - h.getVal("a", "v3") - h.getVal("b", "v3") - h.getVal("c", "v0") - h.getVal("d", "v0") - - h.close() -} - -func TestCorruptDB_MissingTableFiles(t *testing.T) { - h := newDbCorruptHarness(t) - - h.put("a", "v1") - h.put("b", "v1") - h.compactMem() - h.put("c", "v2") - h.put("d", "v2") - h.compactMem() - h.put("e", "v3") - h.put("f", "v3") - h.closeDB() - - h.removeOne(storage.TypeTable) - h.openAssert(false) - - h.close() -} - -func TestCorruptDB_RecoverTable(t *testing.T) { - h := newDbCorruptHarnessWopt(t, &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - Filter: filter.NewBloomFilter(10), - }) - - h.build(1000) - h.compactMem() - h.compactRangeAt(0, "", "") - h.compactRangeAt(1, "", "") - seq := h.db.seq - h.closeDB() - h.corrupt(storage.TypeTable, 0, 1000, 1) - h.corrupt(storage.TypeTable, 3, 10000, 1) - // Corrupted filter shouldn't affect recovery. - h.corrupt(storage.TypeTable, 3, 113888, 10) - h.corrupt(storage.TypeTable, -1, 20000, 1) - - h.recover() - if h.db.seq != seq { - t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq) - } - h.check(985, 985) - - h.close() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go deleted file mode 100644 index d50a00856..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go +++ /dev/null @@ -1,943 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "container/list" - "fmt" - "io" - "os" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/table" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// DB is a LevelDB database. -type DB struct { - // Need 64-bit alignment. - seq uint64 - - // Session. - s *session - - // MemDB. - memMu sync.RWMutex - memPool chan *memdb.DB - mem, frozenMem *memDB - journal *journal.Writer - journalWriter storage.Writer - journalFile storage.File - frozenJournalFile storage.File - frozenSeq uint64 - - // Snapshot. - snapsMu sync.Mutex - snapsList *list.List - - // Stats. - aliveSnaps, aliveIters int32 - - // Write. - writeC chan *Batch - writeMergedC chan bool - writeLockC chan struct{} - writeAckC chan error - writeDelay time.Duration - writeDelayN int - journalC chan *Batch - journalAckC chan error - - // Compaction. - tcompCmdC chan cCmd - tcompPauseC chan chan<- struct{} - mcompCmdC chan cCmd - compErrC chan error - compPerErrC chan error - compErrSetC chan error - compStats []cStats - - // Close. - closeW sync.WaitGroup - closeC chan struct{} - closed uint32 - closer io.Closer -} - -func openDB(s *session) (*DB, error) { - s.log("db@open opening") - start := time.Now() - db := &DB{ - s: s, - // Initial sequence - seq: s.stSeqNum, - // MemDB - memPool: make(chan *memdb.DB, 1), - // Snapshot - snapsList: list.New(), - // Write - writeC: make(chan *Batch), - writeMergedC: make(chan bool), - writeLockC: make(chan struct{}, 1), - writeAckC: make(chan error), - journalC: make(chan *Batch), - journalAckC: make(chan error), - // Compaction - tcompCmdC: make(chan cCmd), - tcompPauseC: make(chan chan<- struct{}), - mcompCmdC: make(chan cCmd), - compErrC: make(chan error), - compPerErrC: make(chan error), - compErrSetC: make(chan error), - compStats: make([]cStats, s.o.GetNumLevel()), - // Close - closeC: make(chan struct{}), - } - - if err := db.recoverJournal(); err != nil { - return nil, err - } - - // Remove any obsolete files. - if err := db.checkAndCleanFiles(); err != nil { - // Close journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return nil, err - } - - // Doesn't need to be included in the wait group. - go db.compactionError() - go db.mpoolDrain() - - db.closeW.Add(3) - go db.tCompaction() - go db.mCompaction() - go db.jWriter() - - s.logf("db@open done T·%v", time.Since(start)) - - runtime.SetFinalizer(db, (*DB).Close) - return db, nil -} - -// Open opens or creates a DB for the given storage. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist Open will returns -// os.ErrExist error. -// -// Open will return an error with type of ErrCorrupted if corruption -// detected in the DB. Corrupted DB can be recovered with Recover -// function. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = s.recover() - if err != nil { - if !os.IsNotExist(err) || s.o.GetErrorIfMissing() { - return - } - err = s.create() - if err != nil { - return - } - } else if s.o.GetErrorIfExist() { - err = os.ErrExist - return - } - - return openDB(s) -} - -// OpenFile opens or creates a DB for the given path. -// The DB will be created if not exist, unless ErrorIfMissing is true. -// Also, if ErrorIfExist is true and the DB exist OpenFile will returns -// os.ErrExist error. -// -// OpenFile uses standard file-system backed storage implementation as -// desribed in the leveldb/storage package. -// -// OpenFile will return an error with type of ErrCorrupted if corruption -// detected in the DB. Corrupted DB can be recovered with Recover -// function. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func OpenFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) - if err != nil { - return - } - db, err = Open(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -// Recover recovers and opens a DB with missing or corrupted manifest files -// for the given storage. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) { - s, err := newSession(stor, o) - if err != nil { - return - } - defer func() { - if err != nil { - s.close() - s.release() - } - }() - - err = recoverTable(s, o) - if err != nil { - return - } - return openDB(s) -} - -// RecoverFile recovers and opens a DB with missing or corrupted manifest files -// for the given path. It will ignore any manifest files, valid or not. -// The DB must already exist or it will returns an error. -// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options. -// -// RecoverFile uses standard file-system backed storage implementation as desribed -// in the leveldb/storage package. -// -// The returned DB instance is goroutine-safe. -// The DB must be closed after use, by calling Close method. -func RecoverFile(path string, o *opt.Options) (db *DB, err error) { - stor, err := storage.OpenFile(path) - if err != nil { - return - } - db, err = Recover(stor, o) - if err != nil { - stor.Close() - } else { - db.closer = stor - } - return -} - -func recoverTable(s *session, o *opt.Options) error { - o = dupOptions(o) - // Mask StrictReader, lets StrictRecovery doing its job. - o.Strict &= ^opt.StrictReader - - // Get all tables and sort it by file number. - tableFiles_, err := s.getFiles(storage.TypeTable) - if err != nil { - return err - } - tableFiles := files(tableFiles_) - tableFiles.sort() - - var ( - maxSeq uint64 - recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int - - // We will drop corrupted table. - strict = o.GetStrict(opt.StrictRecovery) - - rec = &sessionRecord{numLevel: o.GetNumLevel()} - bpool = util.NewBufferPool(o.GetBlockSize() + 5) - ) - buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) { - tmp = s.newTemp() - writer, err := tmp.Create() - if err != nil { - return - } - defer func() { - writer.Close() - if err != nil { - tmp.Remove() - tmp = nil - } - }() - - // Copy entries. - tw := table.NewWriter(writer, o) - for iter.Next() { - key := iter.Key() - if validIkey(key) { - err = tw.Append(key, iter.Value()) - if err != nil { - return - } - } - } - err = iter.Error() - if err != nil { - return - } - err = tw.Close() - if err != nil { - return - } - err = writer.Sync() - if err != nil { - return - } - size = int64(tw.BytesLen()) - return - } - recoverTable := func(file storage.File) error { - s.logf("table@recovery recovering @%d", file.Num()) - reader, err := file.Open() - if err != nil { - return err - } - var closed bool - defer func() { - if !closed { - reader.Close() - } - }() - - // Get file size. - size, err := reader.Seek(0, 2) - if err != nil { - return err - } - - var ( - tSeq uint64 - tgoodKey, tcorruptedKey, tcorruptedBlock int - imin, imax []byte - ) - tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o) - if err != nil { - return err - } - iter := tr.NewIterator(nil, nil) - iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) { - if errors.IsCorrupted(err) { - s.logf("table@recovery block corruption @%d %q", file.Num(), err) - tcorruptedBlock++ - } - }) - - // Scan the table. - for iter.Next() { - key := iter.Key() - _, seq, _, kerr := parseIkey(key) - if kerr != nil { - tcorruptedKey++ - continue - } - tgoodKey++ - if seq > tSeq { - tSeq = seq - } - if imin == nil { - imin = append([]byte{}, key...) - } - imax = append(imax[:0], key...) - } - if err := iter.Error(); err != nil { - iter.Release() - return err - } - iter.Release() - - goodKey += tgoodKey - corruptedKey += tcorruptedKey - corruptedBlock += tcorruptedBlock - - if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) { - droppedTable++ - s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) - return nil - } - - if tgoodKey > 0 { - if tcorruptedKey > 0 || tcorruptedBlock > 0 { - // Rebuild the table. - s.logf("table@recovery rebuilding @%d", file.Num()) - iter := tr.NewIterator(nil, nil) - tmp, newSize, err := buildTable(iter) - iter.Release() - if err != nil { - return err - } - closed = true - reader.Close() - if err := file.Replace(tmp); err != nil { - return err - } - size = newSize - } - if tSeq > maxSeq { - maxSeq = tSeq - } - recoveredKey += tgoodKey - // Add table to level 0. - rec.addTable(0, file.Num(), uint64(size), imin, imax) - s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq) - } else { - droppedTable++ - s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size) - } - - return nil - } - - // Recover all tables. - if len(tableFiles) > 0 { - s.logf("table@recovery F·%d", len(tableFiles)) - - // Mark file number as used. - s.markFileNum(tableFiles[len(tableFiles)-1].Num()) - - for _, file := range tableFiles { - if err := recoverTable(file); err != nil { - return err - } - } - - s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq) - } - - // Set sequence number. - rec.setSeqNum(maxSeq) - - // Create new manifest. - if err := s.create(); err != nil { - return err - } - - // Commit. - return s.commit(rec) -} - -func (db *DB) recoverJournal() error { - // Get all tables and sort it by file number. - journalFiles_, err := db.s.getFiles(storage.TypeJournal) - if err != nil { - return err - } - journalFiles := files(journalFiles_) - journalFiles.sort() - - // Discard older journal. - prev := -1 - for i, file := range journalFiles { - if file.Num() >= db.s.stJournalNum { - if prev >= 0 { - i-- - journalFiles[i] = journalFiles[prev] - } - journalFiles = journalFiles[i:] - break - } else if file.Num() == db.s.stPrevJournalNum { - prev = i - } - } - - var jr *journal.Reader - var of storage.File - var mem *memdb.DB - batch := new(Batch) - cm := newCMem(db.s) - buf := new(util.Buffer) - // Options. - strict := db.s.o.GetStrict(opt.StrictJournal) - checksum := db.s.o.GetStrict(opt.StrictJournalChecksum) - writeBuffer := db.s.o.GetWriteBuffer() - recoverJournal := func(file storage.File) error { - db.logf("journal@recovery recovering @%d", file.Num()) - reader, err := file.Open() - if err != nil { - return err - } - defer reader.Close() - - // Create/reset journal reader instance. - if jr == nil { - jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum) - } else { - jr.Reset(reader, dropper{db.s, file}, strict, checksum) - } - - // Flush memdb and remove obsolete journal file. - if of != nil { - if mem.Len() > 0 { - if err := cm.flush(mem, 0); err != nil { - return err - } - } - if err := cm.commit(file.Num(), db.seq); err != nil { - return err - } - cm.reset() - of.Remove() - of = nil - } - - // Replay journal to memdb. - mem.Reset() - for { - r, err := jr.Next() - if err != nil { - if err == io.EOF { - break - } - return errors.SetFile(err, file) - } - - buf.Reset() - if _, err := buf.ReadFrom(r); err != nil { - if err == io.ErrUnexpectedEOF { - // This is error returned due to corruption, with strict == false. - continue - } else { - return errors.SetFile(err, file) - } - } - if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil { - if strict || !errors.IsCorrupted(err) { - return errors.SetFile(err, file) - } else { - db.s.logf("journal error: %v (skipped)", err) - // We won't apply sequence number as it might be corrupted. - continue - } - } - - // Save sequence number. - db.seq = batch.seq + uint64(batch.Len()) - - // Flush it if large enough. - if mem.Size() >= writeBuffer { - if err := cm.flush(mem, 0); err != nil { - return err - } - mem.Reset() - } - } - - of = file - return nil - } - - // Recover all journals. - if len(journalFiles) > 0 { - db.logf("journal@recovery F·%d", len(journalFiles)) - - // Mark file number as used. - db.s.markFileNum(journalFiles[len(journalFiles)-1].Num()) - - mem = memdb.New(db.s.icmp, writeBuffer) - for _, file := range journalFiles { - if err := recoverJournal(file); err != nil { - return err - } - } - - // Flush the last journal. - if mem.Len() > 0 { - if err := cm.flush(mem, 0); err != nil { - return err - } - } - } - - // Create a new journal. - if _, err := db.newMem(0); err != nil { - return err - } - - // Commit. - if err := cm.commit(db.journalFile.Num(), db.seq); err != nil { - // Close journal. - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - return err - } - - // Remove the last obsolete journal file. - if of != nil { - of.Remove() - } - - return nil -} - -func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { - ikey := newIkey(key, seq, ktSeek) - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - mk, mv, me := m.mdb.Find(ikey) - if me == nil { - ukey, _, kt, kerr := parseIkey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if db.s.icmp.uCompare(ukey, key) == 0 { - if kt == ktDel { - return nil, ErrNotFound - } - return append([]byte{}, mv...), nil - } - } else if me != ErrNotFound { - return nil, me - } - } - - v := db.s.version() - value, cSched, err := v.get(ikey, ro, false) - v.release() - if cSched { - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) - } - return -} - -func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) { - ikey := newIkey(key, seq, ktSeek) - - em, fm := db.getMems() - for _, m := range [...]*memDB{em, fm} { - if m == nil { - continue - } - defer m.decref() - - mk, _, me := m.mdb.Find(ikey) - if me == nil { - ukey, _, kt, kerr := parseIkey(mk) - if kerr != nil { - // Shouldn't have had happen. - panic(kerr) - } - if db.s.icmp.uCompare(ukey, key) == 0 { - if kt == ktDel { - return false, nil - } - return true, nil - } - } else if me != ErrNotFound { - return false, me - } - } - - v := db.s.version() - _, cSched, err := v.get(ikey, ro, true) - v.release() - if cSched { - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) - } - if err == nil { - ret = true - } else if err == ErrNotFound { - err = nil - } - return -} - -// Get gets the value for the given key. It returns ErrNotFound if the -// DB does not contains the key. -// -// The returned slice is its own copy, it is safe to modify the contents -// of the returned slice. -// It is safe to modify the contents of the argument after Get returns. -func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = db.ok() - if err != nil { - return - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - return db.get(key, se.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { - err = db.ok() - if err != nil { - return - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - return db.has(key, se.seq, ro) -} - -// NewIterator returns an iterator for the latest snapshot of the -// uderlying DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - - se := db.acquireSnapshot() - defer db.releaseSnapshot(se) - // Iterator holds 'version' lock, 'version' is immutable so snapshot - // can be released after iterator created. - return db.newIterator(se.seq, slice, ro) -} - -// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot -// is a frozen snapshot of a DB state at a particular point in time. The -// content of snapshot are guaranteed to be consistent. -// -// The snapshot must be released after use, by calling Release method. -func (db *DB) GetSnapshot() (*Snapshot, error) { - if err := db.ok(); err != nil { - return nil, err - } - - return db.newSnapshot(), nil -} - -// GetProperty returns value of the given property name. -// -// Property names: -// leveldb.num-files-at-level{n} -// Returns the number of files at level 'n'. -// leveldb.stats -// Returns statistics of the underlying DB. -// leveldb.sstables -// Returns sstables list for each level. -// leveldb.blockpool -// Returns block pool stats. -// leveldb.cachedblock -// Returns size of cached block. -// leveldb.openedtables -// Returns number of opened tables. -// leveldb.alivesnaps -// Returns number of alive snapshots. -// leveldb.aliveiters -// Returns number of alive iterators. -func (db *DB) GetProperty(name string) (value string, err error) { - err = db.ok() - if err != nil { - return - } - - const prefix = "leveldb." - if !strings.HasPrefix(name, prefix) { - return "", errors.New("leveldb: GetProperty: unknown property: " + name) - } - p := name[len(prefix):] - - v := db.s.version() - defer v.release() - - numFilesPrefix := "num-files-at-level" - switch { - case strings.HasPrefix(p, numFilesPrefix): - var level uint - var rest string - n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest) - if n != 1 || int(level) >= db.s.o.GetNumLevel() { - err = errors.New("leveldb: GetProperty: invalid property: " + name) - } else { - value = fmt.Sprint(v.tLen(int(level))) - } - case p == "stats": - value = "Compactions\n" + - " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + - "-------+------------+---------------+---------------+---------------+---------------\n" - for level, tables := range v.tables { - duration, read, write := db.compStats[level].get() - if len(tables) == 0 && duration == 0 { - continue - } - value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", - level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), - float64(read)/1048576.0, float64(write)/1048576.0) - } - case p == "sstables": - for level, tables := range v.tables { - value += fmt.Sprintf("--- level %d ---\n", level) - for _, t := range tables { - value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax) - } - } - case p == "blockpool": - value = fmt.Sprintf("%v", db.s.tops.bpool) - case p == "cachedblock": - if db.s.tops.bcache != nil { - value = fmt.Sprintf("%d", db.s.tops.bcache.Size()) - } else { - value = "" - } - case p == "openedtables": - value = fmt.Sprintf("%d", db.s.tops.cache.Size()) - case p == "alivesnaps": - value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) - case p == "aliveiters": - value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters)) - default: - err = errors.New("leveldb: GetProperty: unknown property: " + name) - } - - return -} - -// SizeOf calculates approximate sizes of the given key ranges. -// The length of the returned sizes are equal with the length of the given -// ranges. The returned sizes measure storage space usage, so if the user -// data compresses by a factor of ten, the returned sizes will be one-tenth -// the size of the corresponding user data size. -// The results may not include the sizes of recently written data. -func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) { - if err := db.ok(); err != nil { - return nil, err - } - - v := db.s.version() - defer v.release() - - sizes := make(Sizes, 0, len(ranges)) - for _, r := range ranges { - imin := newIkey(r.Start, kMaxSeq, ktSeek) - imax := newIkey(r.Limit, kMaxSeq, ktSeek) - start, err := v.offsetOf(imin) - if err != nil { - return nil, err - } - limit, err := v.offsetOf(imax) - if err != nil { - return nil, err - } - var size uint64 - if limit >= start { - size = limit - start - } - sizes = append(sizes, size) - } - - return sizes, nil -} - -// Close closes the DB. This will also releases any outstanding snapshot and -// abort any in-flight compaction. -// -// It is not safe to close a DB until all outstanding iterators are released. -// It is valid to call Close multiple times. Other methods should not be -// called after the DB has been closed. -func (db *DB) Close() error { - if !db.setClosed() { - return ErrClosed - } - - start := time.Now() - db.log("db@close closing") - - // Clear the finalizer. - runtime.SetFinalizer(db, nil) - - // Get compaction error. - var err error - select { - case err = <-db.compErrC: - default: - } - - // Signal all goroutines. - close(db.closeC) - - // Wait for all gorotines to exit. - db.closeW.Wait() - - // Lock writer and closes journal. - db.writeLockC <- struct{}{} - if db.journal != nil { - db.journal.Close() - db.journalWriter.Close() - } - - if db.writeDelayN > 0 { - db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) - } - - // Close session. - db.s.close() - db.logf("db@close done T·%v", time.Since(start)) - db.s.release() - - if db.closer != nil { - if err1 := db.closer.Close(); err == nil { - err = err1 - } - } - - // NIL'ing pointers. - db.s = nil - db.mem = nil - db.frozenMem = nil - db.journal = nil - db.journalWriter = nil - db.journalFile = nil - db.frozenJournalFile = nil - db.closer = nil - - return err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go deleted file mode 100644 index 447407aba..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go +++ /dev/null @@ -1,835 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync" - "time" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/opt" -) - -var ( - errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") -) - -type cStats struct { - sync.Mutex - duration time.Duration - read uint64 - write uint64 -} - -func (p *cStats) add(n *cStatsStaging) { - p.Lock() - p.duration += n.duration - p.read += n.read - p.write += n.write - p.Unlock() -} - -func (p *cStats) get() (duration time.Duration, read, write uint64) { - p.Lock() - defer p.Unlock() - return p.duration, p.read, p.write -} - -type cStatsStaging struct { - start time.Time - duration time.Duration - on bool - read uint64 - write uint64 -} - -func (p *cStatsStaging) startTimer() { - if !p.on { - p.start = time.Now() - p.on = true - } -} - -func (p *cStatsStaging) stopTimer() { - if p.on { - p.duration += time.Since(p.start) - p.on = false - } -} - -type cMem struct { - s *session - level int - rec *sessionRecord -} - -func newCMem(s *session) *cMem { - return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}} -} - -func (c *cMem) flush(mem *memdb.DB, level int) error { - s := c.s - - // Write memdb to table. - iter := mem.NewIterator(nil) - defer iter.Release() - t, n, err := s.tops.createFrom(iter) - if err != nil { - return err - } - - // Pick level. - if level < 0 { - v := s.version() - level = v.pickLevel(t.imin.ukey(), t.imax.ukey()) - v.release() - } - c.rec.addTableFile(level, t) - - s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax) - - c.level = level - return nil -} - -func (c *cMem) reset() { - c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()} -} - -func (c *cMem) commit(journal, seq uint64) error { - c.rec.setJournalNum(journal) - c.rec.setSeqNum(seq) - - // Commit changes. - return c.s.commit(c.rec) -} - -func (db *DB) compactionError() { - var ( - err error - wlocked bool - ) -noerr: - // No error. - for { - select { - case err = <-db.compErrSetC: - switch { - case err == nil: - case errors.IsCorrupted(err): - goto hasperr - default: - goto haserr - } - case _, _ = <-db.closeC: - return - } - } -haserr: - // Transient error. - for { - select { - case db.compErrC <- err: - case err = <-db.compErrSetC: - switch { - case err == nil: - goto noerr - case errors.IsCorrupted(err): - goto hasperr - default: - } - case _, _ = <-db.closeC: - return - } - } -hasperr: - // Persistent error. - for { - select { - case db.compErrC <- err: - case db.compPerErrC <- err: - case db.writeLockC <- struct{}{}: - // Hold write lock, so that write won't pass-through. - wlocked = true - case _, _ = <-db.closeC: - if wlocked { - // We should release the lock or Close will hang. - <-db.writeLockC - } - return - } - } -} - -type compactionTransactCounter int - -func (cnt *compactionTransactCounter) incr() { - *cnt++ -} - -type compactionTransactInterface interface { - run(cnt *compactionTransactCounter) error - revert() error -} - -func (db *DB) compactionTransact(name string, t compactionTransactInterface) { - defer func() { - if x := recover(); x != nil { - if x == errCompactionTransactExiting { - if err := t.revert(); err != nil { - db.logf("%s revert error %q", name, err) - } - } - panic(x) - } - }() - - const ( - backoffMin = 1 * time.Second - backoffMax = 8 * time.Second - backoffMul = 2 * time.Second - ) - var ( - backoff = backoffMin - backoffT = time.NewTimer(backoff) - lastCnt = compactionTransactCounter(0) - - disableBackoff = db.s.o.GetDisableCompactionBackoff() - ) - for n := 0; ; n++ { - // Check wether the DB is closed. - if db.isClosed() { - db.logf("%s exiting", name) - db.compactionExitTransact() - } else if n > 0 { - db.logf("%s retrying N·%d", name, n) - } - - // Execute. - cnt := compactionTransactCounter(0) - err := t.run(&cnt) - if err != nil { - db.logf("%s error I·%d %q", name, cnt, err) - } - - // Set compaction error status. - select { - case db.compErrSetC <- err: - case perr := <-db.compPerErrC: - if err != nil { - db.logf("%s exiting (persistent error %q)", name, perr) - db.compactionExitTransact() - } - case _, _ = <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - if err == nil { - return - } - if errors.IsCorrupted(err) { - db.logf("%s exiting (corruption detected)", name) - db.compactionExitTransact() - } - - if !disableBackoff { - // Reset backoff duration if counter is advancing. - if cnt > lastCnt { - backoff = backoffMin - lastCnt = cnt - } - - // Backoff. - backoffT.Reset(backoff) - if backoff < backoffMax { - backoff *= backoffMul - if backoff > backoffMax { - backoff = backoffMax - } - } - select { - case <-backoffT.C: - case _, _ = <-db.closeC: - db.logf("%s exiting", name) - db.compactionExitTransact() - } - } - } -} - -type compactionTransactFunc struct { - runFunc func(cnt *compactionTransactCounter) error - revertFunc func() error -} - -func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error { - return t.runFunc(cnt) -} - -func (t *compactionTransactFunc) revert() error { - if t.revertFunc != nil { - return t.revertFunc() - } - return nil -} - -func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) { - db.compactionTransact(name, &compactionTransactFunc{run, revert}) -} - -func (db *DB) compactionExitTransact() { - panic(errCompactionTransactExiting) -} - -func (db *DB) memCompaction() { - mem := db.getFrozenMem() - if mem == nil { - return - } - defer mem.decref() - - c := newCMem(db.s) - stats := new(cStatsStaging) - - db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size())) - - // Don't compact empty memdb. - if mem.mdb.Len() == 0 { - db.logf("mem@flush skipping") - // drop frozen mem - db.dropFrozenMem() - return - } - - // Pause table compaction. - resumeC := make(chan struct{}) - select { - case db.tcompPauseC <- (chan<- struct{})(resumeC): - case <-db.compPerErrC: - close(resumeC) - resumeC = nil - case _, _ = <-db.closeC: - return - } - - db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - defer stats.stopTimer() - return c.flush(mem.mdb, -1) - }, func() error { - for _, r := range c.rec.addedTables { - db.logf("mem@flush revert @%d", r.num) - f := db.s.getTableFile(r.num) - if err := f.Remove(); err != nil { - return err - } - } - return nil - }) - - db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) { - stats.startTimer() - defer stats.stopTimer() - return c.commit(db.journalFile.Num(), db.frozenSeq) - }, nil) - - db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration) - - for _, r := range c.rec.addedTables { - stats.write += r.size - } - db.compStats[c.level].add(stats) - - // Drop frozen mem. - db.dropFrozenMem() - - // Resume table compaction. - if resumeC != nil { - select { - case <-resumeC: - close(resumeC) - case _, _ = <-db.closeC: - return - } - } - - // Trigger table compaction. - db.compSendTrigger(db.tcompCmdC) -} - -type tableCompactionBuilder struct { - db *DB - s *session - c *compaction - rec *sessionRecord - stat0, stat1 *cStatsStaging - - snapHasLastUkey bool - snapLastUkey []byte - snapLastSeq uint64 - snapIter int - snapKerrCnt int - snapDropCnt int - - kerrCnt int - dropCnt int - - minSeq uint64 - strict bool - tableSize int - - tw *tWriter -} - -func (b *tableCompactionBuilder) appendKV(key, value []byte) error { - // Create new table if not already. - if b.tw == nil { - // Check for pause event. - if b.db != nil { - select { - case ch := <-b.db.tcompPauseC: - b.db.pauseCompaction(ch) - case _, _ = <-b.db.closeC: - b.db.compactionExitTransact() - default: - } - } - - // Create new table. - var err error - b.tw, err = b.s.tops.create() - if err != nil { - return err - } - } - - // Write key/value into table. - return b.tw.append(key, value) -} - -func (b *tableCompactionBuilder) needFlush() bool { - return b.tw.tw.BytesLen() >= b.tableSize -} - -func (b *tableCompactionBuilder) flush() error { - t, err := b.tw.finish() - if err != nil { - return err - } - b.rec.addTableFile(b.c.level+1, t) - b.stat1.write += t.size - b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) - b.tw = nil - return nil -} - -func (b *tableCompactionBuilder) cleanup() { - if b.tw != nil { - b.tw.drop() - b.tw = nil - } -} - -func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { - snapResumed := b.snapIter > 0 - hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary. - lastUkey := append([]byte{}, b.snapLastUkey...) - lastSeq := b.snapLastSeq - b.kerrCnt = b.snapKerrCnt - b.dropCnt = b.snapDropCnt - // Restore compaction state. - b.c.restore() - - defer b.cleanup() - - b.stat1.startTimer() - defer b.stat1.stopTimer() - - iter := b.c.newIterator() - defer iter.Release() - for i := 0; iter.Next(); i++ { - // Incr transact counter. - cnt.incr() - - // Skip until last state. - if i < b.snapIter { - continue - } - - resumed := false - if snapResumed { - resumed = true - snapResumed = false - } - - ikey := iter.Key() - ukey, seq, kt, kerr := parseIkey(ikey) - - if kerr == nil { - shouldStop := !resumed && b.c.shouldStopBefore(ikey) - - if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 { - // First occurrence of this user key. - - // Only rotate tables if ukey doesn't hop across. - if b.tw != nil && (shouldStop || b.needFlush()) { - if err := b.flush(); err != nil { - return err - } - - // Creates snapshot of the state. - b.c.save() - b.snapHasLastUkey = hasLastUkey - b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...) - b.snapLastSeq = lastSeq - b.snapIter = i - b.snapKerrCnt = b.kerrCnt - b.snapDropCnt = b.dropCnt - } - - hasLastUkey = true - lastUkey = append(lastUkey[:0], ukey...) - lastSeq = kMaxSeq - } - - switch { - case lastSeq <= b.minSeq: - // Dropped because newer entry for same user key exist - fallthrough // (A) - case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): - // For this user key: - // (1) there is no data in higher levels - // (2) data in lower levels will have larger seq numbers - // (3) data in layers that are being compacted here and have - // smaller seq numbers will be dropped in the next - // few iterations of this loop (by rule (A) above). - // Therefore this deletion marker is obsolete and can be dropped. - lastSeq = seq - b.dropCnt++ - continue - default: - lastSeq = seq - } - } else { - if b.strict { - return kerr - } - - // Don't drop corrupted keys. - hasLastUkey = false - lastUkey = lastUkey[:0] - lastSeq = kMaxSeq - b.kerrCnt++ - } - - if err := b.appendKV(ikey, iter.Value()); err != nil { - return err - } - } - - if err := iter.Error(); err != nil { - return err - } - - // Finish last table. - if b.tw != nil && !b.tw.empty() { - return b.flush() - } - return nil -} - -func (b *tableCompactionBuilder) revert() error { - for _, at := range b.rec.addedTables { - b.s.logf("table@build revert @%d", at.num) - f := b.s.getTableFile(at.num) - if err := f.Remove(); err != nil { - return err - } - } - return nil -} - -func (db *DB) tableCompaction(c *compaction, noTrivial bool) { - defer c.release() - - rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()} - rec.addCompPtr(c.level, c.imax) - - if !noTrivial && c.trivial() { - t := c.tables[0][0] - db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1) - rec.delTable(c.level, t.file.Num()) - rec.addTableFile(c.level+1, t) - db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) { - return db.s.commit(rec) - }, nil) - return - } - - var stats [2]cStatsStaging - for i, tables := range c.tables { - for _, t := range tables { - stats[i].read += t.size - // Insert deleted tables into record - rec.delTable(c.level+i, t.file.Num()) - } - } - sourceSize := int(stats[0].read + stats[1].read) - minSeq := db.minSeq() - db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq) - - b := &tableCompactionBuilder{ - db: db, - s: db.s, - c: c, - rec: rec, - stat1: &stats[1], - minSeq: minSeq, - strict: db.s.o.GetStrict(opt.StrictCompaction), - tableSize: db.s.o.GetCompactionTableSize(c.level + 1), - } - db.compactionTransact("table@build", b) - - // Commit changes - db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) { - stats[1].startTimer() - defer stats[1].stopTimer() - return db.s.commit(rec) - }, nil) - - resultSize := int(stats[1].write) - db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) - - // Save compaction stats - for i := range stats { - db.compStats[c.level+1].add(&stats[i]) - } -} - -func (db *DB) tableRangeCompaction(level int, umin, umax []byte) { - db.logf("table@compaction range L%d %q:%q", level, umin, umax) - - if level >= 0 { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { - db.tableCompaction(c, true) - } - } else { - v := db.s.version() - m := 1 - for i, t := range v.tables[1:] { - if t.overlaps(db.s.icmp, umin, umax, false) { - m = i + 1 - } - } - v.release() - - for level := 0; level < m; level++ { - if c := db.s.getCompactionRange(level, umin, umax); c != nil { - db.tableCompaction(c, true) - } - } - } -} - -func (db *DB) tableAutoCompaction() { - if c := db.s.pickCompaction(); c != nil { - db.tableCompaction(c, false) - } -} - -func (db *DB) tableNeedCompaction() bool { - v := db.s.version() - defer v.release() - return v.needCompaction() -} - -func (db *DB) pauseCompaction(ch chan<- struct{}) { - select { - case ch <- struct{}{}: - case _, _ = <-db.closeC: - db.compactionExitTransact() - } -} - -type cCmd interface { - ack(err error) -} - -type cIdle struct { - ackC chan<- error -} - -func (r cIdle) ack(err error) { - if r.ackC != nil { - defer func() { - recover() - }() - r.ackC <- err - } -} - -type cRange struct { - level int - min, max []byte - ackC chan<- error -} - -func (r cRange) ack(err error) { - if r.ackC != nil { - defer func() { - recover() - }() - r.ackC <- err - } -} - -// This will trigger auto compation and/or wait for all compaction to be done. -func (db *DB) compSendIdle(compC chan<- cCmd) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cIdle{ch}: - case err = <-db.compErrC: - return - case _, _ = <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-ch: - case err = <-db.compErrC: - case _, _ = <-db.closeC: - return ErrClosed - } - return err -} - -// This will trigger auto compaction but will not wait for it. -func (db *DB) compSendTrigger(compC chan<- cCmd) { - select { - case compC <- cIdle{}: - default: - } -} - -// Send range compaction request. -func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) { - ch := make(chan error) - defer close(ch) - // Send cmd. - select { - case compC <- cRange{level, min, max, ch}: - case err := <-db.compErrC: - return err - case _, _ = <-db.closeC: - return ErrClosed - } - // Wait cmd. - select { - case err = <-ch: - case err = <-db.compErrC: - case _, _ = <-db.closeC: - return ErrClosed - } - return err -} - -func (db *DB) mCompaction() { - var x cCmd - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - select { - case x = <-db.mcompCmdC: - switch x.(type) { - case cIdle: - db.memCompaction() - x.ack(nil) - x = nil - default: - panic("leveldb: unknown command") - } - case _, _ = <-db.closeC: - return - } - } -} - -func (db *DB) tCompaction() { - var x cCmd - var ackQ []cCmd - - defer func() { - if x := recover(); x != nil { - if x != errCompactionTransactExiting { - panic(x) - } - } - for i := range ackQ { - ackQ[i].ack(ErrClosed) - ackQ[i] = nil - } - if x != nil { - x.ack(ErrClosed) - } - db.closeW.Done() - }() - - for { - if db.tableNeedCompaction() { - select { - case x = <-db.tcompCmdC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case _, _ = <-db.closeC: - return - default: - } - } else { - for i := range ackQ { - ackQ[i].ack(nil) - ackQ[i] = nil - } - ackQ = ackQ[:0] - select { - case x = <-db.tcompCmdC: - case ch := <-db.tcompPauseC: - db.pauseCompaction(ch) - continue - case _, _ = <-db.closeC: - return - } - } - if x != nil { - switch cmd := x.(type) { - case cIdle: - ackQ = append(ackQ, x) - case cRange: - db.tableRangeCompaction(cmd.level, cmd.min, cmd.max) - x.ack(nil) - default: - panic("leveldb: unknown command") - } - x = nil - } - db.tableAutoCompaction() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go deleted file mode 100644 index 4607e5daf..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "errors" - "runtime" - "sync" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key") -) - -type memdbReleaser struct { - once sync.Once - m *memDB -} - -func (mr *memdbReleaser) Release() { - mr.once.Do(func() { - mr.m.decref() - }) -} - -func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - em, fm := db.getMems() - v := db.s.version() - - ti := v.getIterators(slice, ro) - n := len(ti) + 2 - i := make([]iterator.Iterator, 0, n) - emi := em.mdb.NewIterator(slice) - emi.SetReleaser(&memdbReleaser{m: em}) - i = append(i, emi) - if fm != nil { - fmi := fm.mdb.NewIterator(slice) - fmi.SetReleaser(&memdbReleaser{m: fm}) - i = append(i, fmi) - } - i = append(i, ti...) - strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) - mi := iterator.NewMergedIterator(i, db.s.icmp, strict) - mi.SetReleaser(&versionReleaser{v: v}) - return mi -} - -func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { - var islice *util.Range - if slice != nil { - islice = &util.Range{} - if slice.Start != nil { - islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek) - } - if slice.Limit != nil { - islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek) - } - } - rawIter := db.newRawIterator(islice, ro) - iter := &dbIter{ - db: db, - icmp: db.s.icmp, - iter: rawIter, - seq: seq, - strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader), - key: make([]byte, 0), - value: make([]byte, 0), - } - atomic.AddInt32(&db.aliveIters, 1) - runtime.SetFinalizer(iter, (*dbIter).Release) - return iter -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -// dbIter represent an interator states over a database session. -type dbIter struct { - db *DB - icmp *iComparer - iter iterator.Iterator - seq uint64 - strict bool - - dir dir - key []byte - value []byte - err error - releaser util.Releaser -} - -func (i *dbIter) setErr(err error) { - i.err = err - i.key = nil - i.value = nil -} - -func (i *dbIter) iterErr() { - if err := i.iter.Error(); err != nil { - i.setErr(err) - } -} - -func (i *dbIter) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *dbIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.First() { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.iter.Last() { - return i.prev() - } - i.dir = dirSOI - i.iterErr() - return false -} - -func (i *dbIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ikey := newIkey(key, i.seq, ktSeek) - if i.iter.Seek(ikey) { - i.dir = dirSOI - return i.next() - } - i.dir = dirEOI - i.iterErr() - return false -} - -func (i *dbIter) next() bool { - for { - if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil { - if seq <= i.seq { - switch kt { - case ktDel: - // Skip deleted key. - i.key = append(i.key[:0], ukey...) - i.dir = dirForward - case ktVal: - if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - i.dir = dirForward - return true - } - } - } - } else if i.strict { - i.setErr(kerr) - break - } - if !i.iter.Next() { - i.dir = dirEOI - i.iterErr() - break - } - } - return false -} - -func (i *dbIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { - i.dir = dirEOI - i.iterErr() - return false - } - return i.next() -} - -func (i *dbIter) prev() bool { - i.dir = dirBackward - del := true - if i.iter.Valid() { - for { - if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil { - if seq <= i.seq { - if !del && i.icmp.uCompare(ukey, i.key) < 0 { - return true - } - del = (kt == ktDel) - if !del { - i.key = append(i.key[:0], ukey...) - i.value = append(i.value[:0], i.iter.Value()...) - } - } - } else if i.strict { - i.setErr(kerr) - return false - } - if !i.iter.Prev() { - break - } - } - } - if del { - i.dir = dirSOI - i.iterErr() - return false - } - return true -} - -func (i *dbIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - for i.iter.Prev() { - if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil { - if i.icmp.uCompare(ukey, i.key) < 0 { - goto cont - } - } else if i.strict { - i.setErr(kerr) - return false - } - } - i.dir = dirSOI - i.iterErr() - return false - } - -cont: - return i.prev() -} - -func (i *dbIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *dbIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *dbIter) Release() { - if i.dir != dirReleased { - // Clear the finalizer. - runtime.SetFinalizer(i, nil) - - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - - i.dir = dirReleased - i.key = nil - i.value = nil - i.iter.Release() - i.iter = nil - atomic.AddInt32(&i.db.aliveIters, -1) - i.db = nil - } -} - -func (i *dbIter) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *dbIter) Error() error { - return i.err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go deleted file mode 100644 index 0372848ff..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "container/list" - "fmt" - "runtime" - "sync" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type snapshotElement struct { - seq uint64 - ref int - e *list.Element -} - -// Acquires a snapshot, based on latest sequence. -func (db *DB) acquireSnapshot() *snapshotElement { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - seq := db.getSeq() - - if e := db.snapsList.Back(); e != nil { - se := e.Value.(*snapshotElement) - if se.seq == seq { - se.ref++ - return se - } else if seq < se.seq { - panic("leveldb: sequence number is not increasing") - } - } - se := &snapshotElement{seq: seq, ref: 1} - se.e = db.snapsList.PushBack(se) - return se -} - -// Releases given snapshot element. -func (db *DB) releaseSnapshot(se *snapshotElement) { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - se.ref-- - if se.ref == 0 { - db.snapsList.Remove(se.e) - se.e = nil - } else if se.ref < 0 { - panic("leveldb: Snapshot: negative element reference") - } -} - -// Gets minimum sequence that not being snapshoted. -func (db *DB) minSeq() uint64 { - db.snapsMu.Lock() - defer db.snapsMu.Unlock() - - if e := db.snapsList.Front(); e != nil { - return e.Value.(*snapshotElement).seq - } - - return db.getSeq() -} - -// Snapshot is a DB snapshot. -type Snapshot struct { - db *DB - elem *snapshotElement - mu sync.RWMutex - released bool -} - -// Creates new snapshot object. -func (db *DB) newSnapshot() *Snapshot { - snap := &Snapshot{ - db: db, - elem: db.acquireSnapshot(), - } - atomic.AddInt32(&db.aliveSnaps, 1) - runtime.SetFinalizer(snap, (*Snapshot).Release) - return snap -} - -func (snap *Snapshot) String() string { - return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq) -} - -// Get gets the value for the given key. It returns ErrNotFound if -// the DB does not contains the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.RLock() - defer snap.mu.RUnlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.get(key, snap.elem.seq, ro) -} - -// Has returns true if the DB does contains the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { - err = snap.db.ok() - if err != nil { - return - } - snap.mu.RLock() - defer snap.mu.RUnlock() - if snap.released { - err = ErrSnapshotReleased - return - } - return snap.db.has(key, snap.elem.seq, ro) -} - -// NewIterator returns an iterator for the snapshot of the uderlying DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. The resultant key/value pairs are guaranteed to be -// consistent. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// Releasing the snapshot doesn't mean releasing the iterator too, the -// iterator would be still valid until released. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - if err := snap.db.ok(); err != nil { - return iterator.NewEmptyIterator(err) - } - snap.mu.Lock() - defer snap.mu.Unlock() - if snap.released { - return iterator.NewEmptyIterator(ErrSnapshotReleased) - } - // Since iterator already hold version ref, it doesn't need to - // hold snapshot ref. - return snap.db.newIterator(snap.elem.seq, slice, ro) -} - -// Release releases the snapshot. This will not release any returned -// iterators, the iterators would still be valid until released or the -// underlying DB is closed. -// -// Other methods should not be called after the snapshot has been released. -func (snap *Snapshot) Release() { - snap.mu.Lock() - defer snap.mu.Unlock() - - if !snap.released { - // Clear the finalizer. - runtime.SetFinalizer(snap, nil) - - snap.released = true - snap.db.releaseSnapshot(snap.elem) - atomic.AddInt32(&snap.db.aliveSnaps, -1) - snap.db = nil - snap.elem = nil - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go deleted file mode 100644 index 24ecab504..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/memdb" -) - -type memDB struct { - db *DB - mdb *memdb.DB - ref int32 -} - -func (m *memDB) incref() { - atomic.AddInt32(&m.ref, 1) -} - -func (m *memDB) decref() { - if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { - // Only put back memdb with std capacity. - if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() { - m.mdb.Reset() - m.db.mpoolPut(m.mdb) - } - m.db = nil - m.mdb = nil - } else if ref < 0 { - panic("negative memdb ref") - } -} - -// Get latest sequence number. -func (db *DB) getSeq() uint64 { - return atomic.LoadUint64(&db.seq) -} - -// Atomically adds delta to seq. -func (db *DB) addSeq(delta uint64) { - atomic.AddUint64(&db.seq, delta) -} - -func (db *DB) mpoolPut(mem *memdb.DB) { - defer func() { - recover() - }() - select { - case db.memPool <- mem: - default: - } -} - -func (db *DB) mpoolGet() *memdb.DB { - select { - case mem := <-db.memPool: - return mem - default: - return nil - } -} - -func (db *DB) mpoolDrain() { - ticker := time.NewTicker(30 * time.Second) - for { - select { - case <-ticker.C: - select { - case <-db.memPool: - default: - } - case _, _ = <-db.closeC: - close(db.memPool) - return - } - } -} - -// Create new memdb and froze the old one; need external synchronization. -// newMem only called synchronously by the writer. -func (db *DB) newMem(n int) (mem *memDB, err error) { - num := db.s.allocFileNum() - file := db.s.getJournalFile(num) - w, err := file.Create() - if err != nil { - db.s.reuseFileNum(num) - return - } - - db.memMu.Lock() - defer db.memMu.Unlock() - - if db.frozenMem != nil { - panic("still has frozen mem") - } - - if db.journal == nil { - db.journal = journal.NewWriter(w) - } else { - db.journal.Reset(w) - db.journalWriter.Close() - db.frozenJournalFile = db.journalFile - } - db.journalWriter = w - db.journalFile = file - db.frozenMem = db.mem - mdb := db.mpoolGet() - if mdb == nil || mdb.Capacity() < n { - mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) - } - mem = &memDB{ - db: db, - mdb: mdb, - ref: 2, - } - db.mem = mem - // The seq only incremented by the writer. And whoever called newMem - // should hold write lock, so no need additional synchronization here. - db.frozenSeq = db.seq - return -} - -// Get all memdbs. -func (db *DB) getMems() (e, f *memDB) { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem == nil { - panic("nil effective mem") - } - db.mem.incref() - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.mem, db.frozenMem -} - -// Get frozen memdb. -func (db *DB) getEffectiveMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.mem == nil { - panic("nil effective mem") - } - db.mem.incref() - return db.mem -} - -// Check whether we has frozen memdb. -func (db *DB) hasFrozenMem() bool { - db.memMu.RLock() - defer db.memMu.RUnlock() - return db.frozenMem != nil -} - -// Get frozen memdb. -func (db *DB) getFrozenMem() *memDB { - db.memMu.RLock() - defer db.memMu.RUnlock() - if db.frozenMem != nil { - db.frozenMem.incref() - } - return db.frozenMem -} - -// Drop frozen memdb; assume that frozen memdb isn't nil. -func (db *DB) dropFrozenMem() { - db.memMu.Lock() - if err := db.frozenJournalFile.Remove(); err != nil { - db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err) - } else { - db.logf("journal@remove removed @%d", db.frozenJournalFile.Num()) - } - db.frozenJournalFile = nil - db.frozenMem.decref() - db.frozenMem = nil - db.memMu.Unlock() -} - -// Set closed flag; return true if not already closed. -func (db *DB) setClosed() bool { - return atomic.CompareAndSwapUint32(&db.closed, 0, 1) -} - -// Check whether DB was closed. -func (db *DB) isClosed() bool { - return atomic.LoadUint32(&db.closed) != 0 -} - -// Check read ok status. -func (db *DB) ok() error { - if db.isClosed() { - return ErrClosed - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go deleted file mode 100644 index 0acb567a1..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go +++ /dev/null @@ -1,2579 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "container/list" - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - "unsafe" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func tkey(i int) []byte { - return []byte(fmt.Sprintf("%016d", i)) -} - -func tval(seed, n int) []byte { - r := rand.New(rand.NewSource(int64(seed))) - return randomString(r, n) -} - -type dbHarness struct { - t *testing.T - - stor *testStorage - db *DB - o *opt.Options - ro *opt.ReadOptions - wo *opt.WriteOptions -} - -func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness { - h := new(dbHarness) - h.init(t, o) - return h -} - -func newDbHarness(t *testing.T) *dbHarness { - return newDbHarnessWopt(t, &opt.Options{}) -} - -func (h *dbHarness) init(t *testing.T, o *opt.Options) { - h.t = t - h.stor = newTestStorage(t) - h.o = o - h.ro = nil - h.wo = nil - - if err := h.openDB0(); err != nil { - // So that it will come after fatal message. - defer h.stor.Close() - h.t.Fatal("Open (init): got error: ", err) - } -} - -func (h *dbHarness) openDB0() (err error) { - h.t.Log("opening DB") - h.db, err = Open(h.stor, h.o) - return -} - -func (h *dbHarness) openDB() { - if err := h.openDB0(); err != nil { - h.t.Fatal("Open: got error: ", err) - } -} - -func (h *dbHarness) closeDB0() error { - h.t.Log("closing DB") - return h.db.Close() -} - -func (h *dbHarness) closeDB() { - if err := h.closeDB0(); err != nil { - h.t.Error("Close: got error: ", err) - } - h.stor.CloseCheck() - runtime.GC() -} - -func (h *dbHarness) reopenDB() { - h.closeDB() - h.openDB() -} - -func (h *dbHarness) close() { - h.closeDB0() - h.db = nil - h.stor.Close() - h.stor = nil - runtime.GC() -} - -func (h *dbHarness) openAssert(want bool) { - db, err := Open(h.stor, h.o) - if err != nil { - if want { - h.t.Error("Open: assert: got error: ", err) - } else { - h.t.Log("Open: assert: got error (expected): ", err) - } - } else { - if !want { - h.t.Error("Open: assert: expect error") - } - db.Close() - } -} - -func (h *dbHarness) write(batch *Batch) { - if err := h.db.Write(batch, h.wo); err != nil { - h.t.Error("Write: got error: ", err) - } -} - -func (h *dbHarness) put(key, value string) { - if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil { - h.t.Error("Put: got error: ", err) - } -} - -func (h *dbHarness) putMulti(n int, low, hi string) { - for i := 0; i < n; i++ { - h.put(low, "begin") - h.put(hi, "end") - h.compactMem() - } -} - -func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) { - t := h.t - db := h.db - - var ( - maxOverlaps uint64 - maxLevel int - ) - v := db.s.version() - for i, tt := range v.tables[1 : len(v.tables)-1] { - level := i + 1 - next := v.tables[level+1] - for _, t := range tt { - r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false) - sum := r.size() - if sum > maxOverlaps { - maxOverlaps = sum - maxLevel = level - } - } - } - v.release() - - if maxOverlaps > want { - t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel) - } else { - t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want) - } -} - -func (h *dbHarness) delete(key string) { - t := h.t - db := h.db - - err := db.Delete([]byte(key), h.wo) - if err != nil { - t.Error("Delete: got error: ", err) - } -} - -func (h *dbHarness) assertNumKeys(want int) { - iter := h.db.NewIterator(nil, h.ro) - defer iter.Release() - got := 0 - for iter.Next() { - got++ - } - if err := iter.Error(); err != nil { - h.t.Error("assertNumKeys: ", err) - } - if want != got { - h.t.Errorf("assertNumKeys: want=%d got=%d", want, got) - } -} - -func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) { - t := h.t - v, err := db.Get([]byte(key), h.ro) - switch err { - case ErrNotFound: - if expectFound { - t.Errorf("Get: key '%s' not found, want found", key) - } - case nil: - found = true - if !expectFound { - t.Errorf("Get: key '%s' found, want not found", key) - } - default: - t.Error("Get: got error: ", err) - } - return -} - -func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) { - return h.getr(h.db, key, expectFound) -} - -func (h *dbHarness) getValr(db Reader, key, value string) { - t := h.t - found, r := h.getr(db, key, true) - if !found { - return - } - rval := string(r) - if rval != value { - t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value) - } -} - -func (h *dbHarness) getVal(key, value string) { - h.getValr(h.db, key, value) -} - -func (h *dbHarness) allEntriesFor(key, want string) { - t := h.t - db := h.db - s := db.s - - ikey := newIkey([]byte(key), kMaxSeq, ktVal) - iter := db.newRawIterator(nil, nil) - if !iter.Seek(ikey) && iter.Error() != nil { - t.Error("AllEntries: error during seek, err: ", iter.Error()) - return - } - res := "[ " - first := true - for iter.Valid() { - if ukey, _, kt, kerr := parseIkey(iter.Key()); kerr == nil { - if s.icmp.uCompare(ikey.ukey(), ukey) != 0 { - break - } - if !first { - res += ", " - } - first = false - switch kt { - case ktVal: - res += string(iter.Value()) - case ktDel: - res += "DEL" - } - } else { - if !first { - res += ", " - } - first = false - res += "CORRUPTED" - } - iter.Next() - } - if !first { - res += " " - } - res += "]" - if res != want { - t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want) - } -} - -// Return a string that contains all key,value pairs in order, -// formatted like "(k1->v1)(k2->v2)". -func (h *dbHarness) getKeyVal(want string) { - t := h.t - db := h.db - - s, err := db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - res := "" - iter := s.NewIterator(nil, nil) - for iter.Next() { - res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value())) - } - iter.Release() - - if res != want { - t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want) - } - s.Release() -} - -func (h *dbHarness) waitCompaction() { - t := h.t - db := h.db - if err := db.compSendIdle(db.tcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) waitMemCompaction() { - t := h.t - db := h.db - - if err := db.compSendIdle(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } -} - -func (h *dbHarness) compactMem() { - t := h.t - db := h.db - - t.Log("starting memdb compaction") - - db.writeLockC <- struct{}{} - defer func() { - <-db.writeLockC - }() - - if _, err := db.rotateMem(0); err != nil { - t.Error("compaction error: ", err) - } - if err := db.compSendIdle(db.mcompCmdC); err != nil { - t.Error("compaction error: ", err) - } - - if h.totalTables() == 0 { - t.Error("zero tables after mem compaction") - } - - t.Log("memdb compaction done") -} - -func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) { - t := h.t - db := h.db - - var _min, _max []byte - if min != "" { - _min = []byte(min) - } - if max != "" { - _max = []byte(max) - } - - t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max) - - if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil { - if wanterr { - t.Log("CompactRangeAt: got error (expected): ", err) - } else { - t.Error("CompactRangeAt: got error: ", err) - } - } else if wanterr { - t.Error("CompactRangeAt: expect error") - } - - t.Log("table range compaction done") -} - -func (h *dbHarness) compactRangeAt(level int, min, max string) { - h.compactRangeAtErr(level, min, max, false) -} - -func (h *dbHarness) compactRange(min, max string) { - t := h.t - db := h.db - - t.Logf("starting DB range compaction: min=%q, max=%q", min, max) - - var r util.Range - if min != "" { - r.Start = []byte(min) - } - if max != "" { - r.Limit = []byte(max) - } - if err := db.CompactRange(r); err != nil { - t.Error("CompactRange: got error: ", err) - } - - t.Log("DB range compaction done") -} - -func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) { - t := h.t - db := h.db - - s, err := db.SizeOf([]util.Range{ - {[]byte(start), []byte(limit)}, - }) - if err != nil { - t.Error("SizeOf: got error: ", err) - } - if s.Sum() < low || s.Sum() > hi { - t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d", - shorten(start), shorten(limit), low, hi, s.Sum()) - } -} - -func (h *dbHarness) getSnapshot() (s *Snapshot) { - s, err := h.db.GetSnapshot() - if err != nil { - h.t.Fatal("GetSnapshot: got error: ", err) - } - return -} -func (h *dbHarness) tablesPerLevel(want string) { - res := "" - nz := 0 - v := h.db.s.version() - for level, tt := range v.tables { - if level > 0 { - res += "," - } - res += fmt.Sprint(len(tt)) - if len(tt) > 0 { - nz = len(res) - } - } - v.release() - res = res[:nz] - if res != want { - h.t.Errorf("invalid tables len, want=%s, got=%s", want, res) - } -} - -func (h *dbHarness) totalTables() (n int) { - v := h.db.s.version() - for _, tt := range v.tables { - n += len(tt) - } - v.release() - return -} - -type keyValue interface { - Key() []byte - Value() []byte -} - -func testKeyVal(t *testing.T, kv keyValue, want string) { - res := string(kv.Key()) + "->" + string(kv.Value()) - if res != want { - t.Errorf("invalid key/value, want=%q, got=%q", want, res) - } -} - -func numKey(num int) string { - return fmt.Sprintf("key%06d", num) -} - -var _bloom_filter = filter.NewBloomFilter(10) - -func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) { - for i := 0; i < 4; i++ { - func() { - switch i { - case 0: - case 1: - if o == nil { - o = &opt.Options{Filter: _bloom_filter} - } else { - old := o - o = &opt.Options{} - *o = *old - o.Filter = _bloom_filter - } - case 2: - if o == nil { - o = &opt.Options{Compression: opt.NoCompression} - } else { - old := o - o = &opt.Options{} - *o = *old - o.Compression = opt.NoCompression - } - } - h := newDbHarnessWopt(t, o) - defer h.close() - switch i { - case 3: - h.reopenDB() - } - f(h) - }() - } -} - -func trun(t *testing.T, f func(h *dbHarness)) { - truno(t, nil, f) -} - -func testAligned(t *testing.T, name string, offset uintptr) { - if offset%8 != 0 { - t.Errorf("field %s offset is not 64-bit aligned", name) - } -} - -func Test_FieldsAligned(t *testing.T) { - p1 := new(DB) - testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq)) - p2 := new(session) - testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum)) - testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum)) - testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum)) - testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum)) -} - -func TestDB_Locking(t *testing.T) { - h := newDbHarness(t) - defer h.stor.Close() - h.openAssert(false) - h.closeDB() - h.openAssert(true) -} - -func TestDB_Empty(t *testing.T) { - trun(t, func(h *dbHarness) { - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDB_ReadWrite(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("bar", "v2") - h.put("foo", "v3") - h.getVal("foo", "v3") - h.getVal("bar", "v2") - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "v2") - }) -} - -func TestDB_PutDeleteGet(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.getVal("foo", "v2") - h.delete("foo") - h.get("foo", false) - - h.reopenDB() - h.get("foo", false) - }) -} - -func TestDB_EmptyBatch(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.get("foo", false) - err := h.db.Write(new(Batch), h.wo) - if err != nil { - t.Error("writing empty batch yield error: ", err) - } - h.get("foo", false) -} - -func TestDB_GetFromFrozen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100}) - defer h.close() - - h.put("foo", "v1") - h.getVal("foo", "v1") - - h.stor.DelaySync(storage.TypeTable) // Block sync calls - h.put("k1", strings.Repeat("x", 100000)) // Fill memtable - h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction - for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ { - time.Sleep(10 * time.Microsecond) - } - if h.db.getFrozenMem() == nil { - h.stor.ReleaseSync(storage.TypeTable) - t.Fatal("No frozen mem") - } - h.getVal("foo", "v1") - h.stor.ReleaseSync(storage.TypeTable) // Release sync calls - - h.reopenDB() - h.getVal("foo", "v1") - h.get("k1", true) - h.get("k2", true) -} - -func TestDB_GetFromTable(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.getVal("foo", "v1") - }) -} - -func TestDB_GetSnapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - bar := strings.Repeat("b", 200) - h.put("foo", "v1") - h.put(bar, "v1") - - snap, err := h.db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.put("foo", "v2") - h.put(bar, "v2") - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - h.compactMem() - - h.getVal("foo", "v2") - h.getVal(bar, "v2") - h.getValr(snap, "foo", "v1") - h.getValr(snap, bar, "v1") - - snap.Release() - - h.reopenDB() - h.getVal("foo", "v2") - h.getVal(bar, "v2") - }) -} - -func TestDB_GetLevel0Ordering(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 4; i++ { - h.put("bar", fmt.Sprintf("b%d", i)) - h.put("foo", fmt.Sprintf("v%d", i)) - h.compactMem() - } - h.getVal("foo", "v3") - h.getVal("bar", "b3") - - v := h.db.s.version() - t0len := v.tLen(0) - v.release() - if t0len < 2 { - t.Errorf("level-0 tables is less than 2, got %d", t0len) - } - - h.reopenDB() - h.getVal("foo", "v3") - h.getVal("bar", "b3") - }) -} - -func TestDB_GetOrderedByLevels(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.compactMem() - h.compactRange("a", "z") - h.getVal("foo", "v1") - h.put("foo", "v2") - h.compactMem() - h.getVal("foo", "v2") - }) -} - -func TestDB_GetPicksCorrectFile(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange to have multiple files in a non-level-0 level. - h.put("a", "va") - h.compactMem() - h.compactRange("a", "b") - h.put("x", "vx") - h.compactMem() - h.compactRange("x", "y") - h.put("f", "vf") - h.compactMem() - h.compactRange("f", "g") - - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - - h.compactRange("", "") - h.getVal("a", "va") - h.getVal("f", "vf") - h.getVal("x", "vx") - }) -} - -func TestDB_GetEncountersEmptyLevel(t *testing.T) { - trun(t, func(h *dbHarness) { - // Arrange for the following to happen: - // * sstable A in level 0 - // * nothing in level 1 - // * sstable B in level 2 - // Then do enough Get() calls to arrange for an automatic compaction - // of sstable A. A bug would cause the compaction to be marked as - // occuring at level 1 (instead of the correct level 0). - - // Step 1: First place sstables in levels 0 and 2 - for i := 0; ; i++ { - if i >= 100 { - t.Fatal("could not fill levels-0 and level-2") - } - v := h.db.s.version() - if v.tLen(0) > 0 && v.tLen(2) > 0 { - v.release() - break - } - v.release() - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - - h.getVal("a", "begin") - h.getVal("z", "end") - } - - // Step 2: clear level 1 if necessary. - h.compactRangeAt(1, "", "") - h.tablesPerLevel("1,0,1") - - h.getVal("a", "begin") - h.getVal("z", "end") - - // Step 3: read a bunch of times - for i := 0; i < 200; i++ { - h.get("missing", false) - } - - // Step 4: Wait for compaction to finish - h.waitCompaction() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - h.getVal("a", "begin") - h.getVal("z", "end") - }) -} - -func TestDB_IterMultiWithDelete(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("a", "va") - h.put("b", "vb") - h.put("c", "vc") - h.delete("b") - h.get("b", false) - - iter := h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - - h.compactMem() - - iter = h.db.NewIterator(nil, nil) - iter.Seek([]byte("c")) - testKeyVal(t, iter, "c->vc") - iter.Prev() - testKeyVal(t, iter, "a->va") - iter.Release() - }) -} - -func TestDB_IteratorPinsRef(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "hello") - - // Get iterator that will yield the current contents of the DB. - iter := h.db.NewIterator(nil, nil) - - // Write to force compactions - h.put("foo", "newvalue1") - for i := 0; i < 100; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - h.put("foo", "newvalue2") - - iter.First() - testKeyVal(t, iter, "foo->hello") - if iter.Next() { - t.Errorf("expect eof") - } - iter.Release() -} - -func TestDB_Recover(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("baz", "v5") - - h.reopenDB() - h.getVal("foo", "v1") - - h.getVal("foo", "v1") - h.getVal("baz", "v5") - h.put("bar", "v2") - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - h.put("foo", "v4") - h.getVal("foo", "v4") - h.getVal("bar", "v2") - h.getVal("baz", "v5") - }) -} - -func TestDB_RecoverWithEmptyJournal(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - h.put("foo", "v2") - - h.reopenDB() - h.reopenDB() - h.put("foo", "v3") - - h.reopenDB() - h.getVal("foo", "v3") - }) -} - -func TestDB_RecoverDuringMemtableCompaction(t *testing.T) { - truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) { - - h.stor.DelaySync(storage.TypeTable) - h.put("big1", strings.Repeat("x", 10000000)) - h.put("big2", strings.Repeat("y", 1000)) - h.put("bar", "v2") - h.stor.ReleaseSync(storage.TypeTable) - - h.reopenDB() - h.getVal("bar", "v2") - h.getVal("big1", strings.Repeat("x", 10000000)) - h.getVal("big2", strings.Repeat("y", 1000)) - }) -} - -func TestDB_MinorCompactionsHappen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000}) - defer h.close() - - n := 500 - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - for i := 0; i < n; i++ { - h.put(key(i), key(i)+strings.Repeat("v", 1000)) - } - - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } - - h.reopenDB() - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) - } -} - -func TestDB_RecoverWithLargeJournal(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("big1", strings.Repeat("1", 200000)) - h.put("big2", strings.Repeat("2", 200000)) - h.put("small3", strings.Repeat("3", 10)) - h.put("small4", strings.Repeat("4", 10)) - h.tablesPerLevel("") - - // Make sure that if we re-open with a small write buffer size that - // we flush table files in the middle of a large journal file. - h.o.WriteBuffer = 100000 - h.reopenDB() - h.getVal("big1", strings.Repeat("1", 200000)) - h.getVal("big2", strings.Repeat("2", 200000)) - h.getVal("small3", strings.Repeat("3", 10)) - h.getVal("small4", strings.Repeat("4", 10)) - v := h.db.s.version() - if v.tLen(0) <= 1 { - t.Errorf("tables-0 less than one") - } - v.release() -} - -func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 10000000, - Compression: opt.NoCompression, - }) - defer h.close() - - v := h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - v.release() - - n := 80 - - // Write 8MB (80 values, each 100K) - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } - - // Reopening moves updates to level-0 - h.reopenDB() - h.compactRangeAt(0, "", "") - - v = h.db.s.version() - if v.tLen(0) > 0 { - t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) - } - if v.tLen(1) <= 1 { - t.Errorf("level-1 tables less than 1, got %d", v.tLen(1)) - } - v.release() - - for i := 0; i < n; i++ { - h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) - } -} - -func TestDB_RepeatedWritesToSameKey(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) - defer h.close() - - maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger() - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000}) - defer h.close() - - h.reopenDB() - - maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger() - - value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) - for i := 0; i < 5*maxTables; i++ { - h.put("key", value) - n := h.totalTables() - if n > maxTables { - t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) - } - } -} - -func TestDB_SparseMerge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - h.putMulti(h.o.GetNumLevel(), "A", "Z") - - // Suppose there is: - // small amount of data with prefix A - // large amount of data with prefix B - // small amount of data with prefix C - // and that recent updates have made small changes to all three prefixes. - // Check that we do not do a compaction that merges all of B in one shot. - h.put("A", "va") - value := strings.Repeat("x", 1000) - for i := 0; i < 100000; i++ { - h.put(fmt.Sprintf("B%010d", i), value) - } - h.put("C", "vc") - h.compactMem() - h.compactRangeAt(0, "", "") - h.waitCompaction() - - // Make sparse update - h.put("A", "va2") - h.put("B100", "bvalue2") - h.put("C", "vc2") - h.compactMem() - - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(0, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) - h.compactRangeAt(1, "", "") - h.waitCompaction() - h.maxNextLevelOverlappingBytes(20 * 1048576) -} - -func TestDB_SizeOf(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - Compression: opt.NoCompression, - WriteBuffer: 10000000, - }) - defer h.close() - - h.sizeAssert("", "xyz", 0, 0) - h.reopenDB() - h.sizeAssert("", "xyz", 0, 0) - - // Write 8MB (80 values, each 100K) - n := 80 - s1 := 100000 - s2 := 105000 - - for i := 0; i < n; i++ { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10)) - } - - // 0 because SizeOf() does not account for memtable space - h.sizeAssert("", numKey(50), 0, 0) - - for r := 0; r < 3; r++ { - h.reopenDB() - - for cs := 0; cs < n; cs += 10 { - for i := 0; i < n; i += 10 { - h.sizeAssert("", numKey(i), uint64(s1*i), uint64(s2*i)) - h.sizeAssert("", numKey(i)+".suffix", uint64(s1*(i+1)), uint64(s2*(i+1))) - h.sizeAssert(numKey(i), numKey(i+10), uint64(s1*10), uint64(s2*10)) - } - - h.sizeAssert("", numKey(50), uint64(s1*50), uint64(s2*50)) - h.sizeAssert("", numKey(50)+".suffix", uint64(s1*50), uint64(s2*50)) - - h.compactRangeAt(0, numKey(cs), numKey(cs+9)) - } - - v := h.db.s.version() - if v.tLen(0) != 0 { - t.Errorf("level-0 tables was not zero, got %d", v.tLen(0)) - } - if v.tLen(1) == 0 { - t.Error("level-1 tables was zero") - } - v.release() - } -} - -func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - sizes := []uint64{ - 10000, - 10000, - 100000, - 10000, - 100000, - 10000, - 300000, - 10000, - } - - for i, n := range sizes { - h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10)) - } - - for r := 0; r < 3; r++ { - h.reopenDB() - - var x uint64 - for i, n := range sizes { - y := x - if i > 0 { - y += 1000 - } - h.sizeAssert("", numKey(i), x, y) - x += n - } - - h.sizeAssert(numKey(3), numKey(5), 110000, 111000) - - h.compactRangeAt(0, "", "") - } -} - -func TestDB_Snapshot(t *testing.T) { - trun(t, func(h *dbHarness) { - h.put("foo", "v1") - s1 := h.getSnapshot() - h.put("foo", "v2") - s2 := h.getSnapshot() - h.put("foo", "v3") - s3 := h.getSnapshot() - h.put("foo", "v4") - - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getValr(s3, "foo", "v3") - h.getVal("foo", "v4") - - s3.Release() - h.getValr(s1, "foo", "v1") - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s1.Release() - h.getValr(s2, "foo", "v2") - h.getVal("foo", "v4") - - s2.Release() - h.getVal("foo", "v4") - }) -} - -func TestDB_SnapshotList(t *testing.T) { - db := &DB{snapsList: list.New()} - e0a := db.acquireSnapshot() - e0b := db.acquireSnapshot() - db.seq = 1 - e1 := db.acquireSnapshot() - db.seq = 2 - e2 := db.acquireSnapshot() - - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e0a) - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e2) - if db.minSeq() != 0 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e0b) - if db.minSeq() != 1 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - e2 = db.acquireSnapshot() - if db.minSeq() != 1 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e1) - if db.minSeq() != 2 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } - db.releaseSnapshot(e2) - if db.minSeq() != 2 { - t.Fatalf("invalid sequence number, got=%d", db.minSeq()) - } -} - -func TestDB_HiddenValuesAreRemoved(t *testing.T) { - trun(t, func(h *dbHarness) { - s := h.db.s - - h.put("foo", "v1") - h.compactMem() - m := h.o.GetMaxMemCompationLevel() - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.put("foo", "v2") - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactMem() - h.allEntriesFor("foo", "[ v2, DEL, v1 ]") - h.compactRangeAt(m-2, "", "z") - // DEL eliminated, but v1 remains because we aren't compacting that level - // (DEL can be eliminated because v2 hides v1). - h.allEntriesFor("foo", "[ v2, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ v2 ]") - }) -} - -func TestDB_DeletionMarkers2(t *testing.T) { - h := newDbHarness(t) - defer h.close() - s := h.db.s - - h.put("foo", "v1") - h.compactMem() - m := h.o.GetMaxMemCompationLevel() - v := s.version() - num := v.tLen(m) - v.release() - if num != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, num) - } - - // Place a table at level last-1 to prevent merging with preceding mutation - h.put("a", "begin") - h.put("z", "end") - h.compactMem() - v = s.version() - if v.tLen(m) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) - } - if v.tLen(m-1) != 1 { - t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) - } - v.release() - - h.delete("foo") - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactMem() // Moves to level last-2 - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-2, "", "") - // DEL kept: "last" file overlaps - h.allEntriesFor("foo", "[ DEL, v1 ]") - h.compactRangeAt(m-1, "", "") - // Merging last-1 w/ last, so we are the base level for "foo", so - // DEL is removed. (as is v1). - h.allEntriesFor("foo", "[ ]") -} - -func TestDB_CompactionTableOpenError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1}) - defer h.close() - - im := 10 - jm := 10 - for r := 0; r < 2; r++ { - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - h.compactMem() - } - } - - if n := h.totalTables(); n != im*2 { - t.Errorf("total tables is %d, want %d", n, im) - } - - h.stor.SetEmuErr(storage.TypeTable, tsOpOpen) - go h.db.CompactRange(util.Range{}) - if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil { - t.Log("compaction error: ", err) - } - h.closeDB0() - h.openDB() - h.stor.SetEmuErr(0, tsOpOpen) - - for i := 0; i < im; i++ { - for j := 0; j < jm; j++ { - h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) - } - } -} - -func TestDB_OverlapInLevel0(t *testing.T) { - trun(t, func(h *dbHarness) { - if h.o.GetMaxMemCompationLevel() != 2 { - t.Fatal("fix test to reflect the config") - } - - // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. - h.put("100", "v100") - h.put("999", "v999") - h.compactMem() - h.delete("100") - h.delete("999") - h.compactMem() - h.tablesPerLevel("0,1,1") - - // Make files spanning the following ranges in level-0: - // files[0] 200 .. 900 - // files[1] 300 .. 500 - // Note that files are sorted by min key. - h.put("300", "v300") - h.put("500", "v500") - h.compactMem() - h.put("200", "v200") - h.put("600", "v600") - h.put("900", "v900") - h.compactMem() - h.tablesPerLevel("2,1,1") - - // Compact away the placeholder files we created initially - h.compactRangeAt(1, "", "") - h.compactRangeAt(2, "", "") - h.tablesPerLevel("2") - - // Do a memtable compaction. Before bug-fix, the compaction would - // not detect the overlap with level-0 files and would incorrectly place - // the deletion in a deeper level. - h.delete("600") - h.compactMem() - h.tablesPerLevel("3") - h.get("600", false) - }) -} - -func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("b", "v") - h.reopenDB() - h.delete("b") - h.delete("a") - h.reopenDB() - h.delete("a") - h.reopenDB() - h.put("a", "v") - h.reopenDB() - h.reopenDB() - h.getKeyVal("(a->v)") - h.waitCompaction() - h.getKeyVal("(a->v)") -} - -func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("e") - h.put("", "") - h.reopenDB() - h.put("c", "cv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.put("", "") - h.waitCompaction() - h.reopenDB() - h.put("d", "dv") - h.reopenDB() - h.put("", "") - h.reopenDB() - h.delete("d") - h.delete("b") - h.reopenDB() - h.getKeyVal("(->)(c->cv)") - h.waitCompaction() - h.getKeyVal("(->)(c->cv)") -} - -func TestDB_SingleEntryMemCompaction(t *testing.T) { - trun(t, func(h *dbHarness) { - for i := 0; i < 10; i++ { - h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer)) - h.compactMem() - h.put("key", strings.Repeat("v", opt.DefaultBlockSize)) - h.compactMem() - h.put("k", "v") - h.compactMem() - h.put("", "") - h.compactMem() - h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2)) - h.compactMem() - } - }) -} - -func TestDB_ManifestWriteError(t *testing.T) { - for i := 0; i < 2; i++ { - func() { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "bar") - h.getVal("foo", "bar") - - // Mem compaction (will succeed) - h.compactMem() - h.getVal("foo", "bar") - v := h.db.s.version() - if n := v.tLen(h.o.GetMaxMemCompationLevel()); n != 1 { - t.Errorf("invalid total tables, want=1 got=%d", n) - } - v.release() - - if i == 0 { - h.stor.SetEmuErr(storage.TypeManifest, tsOpWrite) - } else { - h.stor.SetEmuErr(storage.TypeManifest, tsOpSync) - } - - // Merging compaction (will fail) - h.compactRangeAtErr(h.o.GetMaxMemCompationLevel(), "", "", true) - - h.db.Close() - h.stor.SetEmuErr(0, tsOpWrite) - h.stor.SetEmuErr(0, tsOpSync) - - // Should not lose data - h.openDB() - h.getVal("foo", "bar") - }() - } -} - -func assertErr(t *testing.T, err error, wanterr bool) { - if err != nil { - if wanterr { - t.Log("AssertErr: got error (expected): ", err) - } else { - t.Error("AssertErr: got error: ", err) - } - } else if wanterr { - t.Error("AssertErr: expect error") - } -} - -func TestDB_ClosedIsClosed(t *testing.T) { - h := newDbHarness(t) - db := h.db - - var iter, iter2 iterator.Iterator - var snap *Snapshot - func() { - defer h.close() - - h.put("k", "v") - h.getVal("k", "v") - - iter = db.NewIterator(nil, h.ro) - iter.Seek([]byte("k")) - testKeyVal(t, iter, "k->v") - - var err error - snap, err = db.GetSnapshot() - if err != nil { - t.Fatal("GetSnapshot: got error: ", err) - } - - h.getValr(snap, "k", "v") - - iter2 = snap.NewIterator(nil, h.ro) - iter2.Seek([]byte("k")) - testKeyVal(t, iter2, "k->v") - - h.put("foo", "v2") - h.delete("foo") - - // closing DB - iter.Release() - iter2.Release() - }() - - assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true) - _, err := db.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - if iter.Valid() { - t.Errorf("iter.Valid should false") - } - assertErr(t, iter.Error(), false) - testKeyVal(t, iter, "->") - if iter.Seek([]byte("k")) { - t.Errorf("iter.Seek should false") - } - assertErr(t, iter.Error(), true) - - assertErr(t, iter2.Error(), false) - - _, err = snap.Get([]byte("k"), h.ro) - assertErr(t, err, true) - - _, err = db.GetSnapshot() - assertErr(t, err, true) - - iter3 := db.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - iter3 = snap.NewIterator(nil, h.ro) - assertErr(t, iter3.Error(), true) - - assertErr(t, db.Delete([]byte("k"), h.wo), true) - - _, err = db.GetProperty("leveldb.stats") - assertErr(t, err, true) - - _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}}) - assertErr(t, err, true) - - assertErr(t, db.CompactRange(util.Range{}), true) - - assertErr(t, db.Close(), true) -} - -type numberComparer struct{} - -func (numberComparer) num(x []byte) (n int) { - fmt.Sscan(string(x[1:len(x)-1]), &n) - return -} - -func (numberComparer) Name() string { - return "test.NumberComparer" -} - -func (p numberComparer) Compare(a, b []byte) int { - return p.num(a) - p.num(b) -} - -func (numberComparer) Separator(dst, a, b []byte) []byte { return nil } -func (numberComparer) Successor(dst, b []byte) []byte { return nil } - -func TestDB_CustomComparer(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - Comparer: numberComparer{}, - WriteBuffer: 1000, - }) - defer h.close() - - h.put("[10]", "ten") - h.put("[0x14]", "twenty") - for i := 0; i < 2; i++ { - h.getVal("[10]", "ten") - h.getVal("[0xa]", "ten") - h.getVal("[20]", "twenty") - h.getVal("[0x14]", "twenty") - h.get("[15]", false) - h.get("[0xf]", false) - h.compactMem() - h.compactRange("[0]", "[9999]") - } - - for n := 0; n < 2; n++ { - for i := 0; i < 100; i++ { - v := fmt.Sprintf("[%d]", i*10) - h.put(v, v) - } - h.compactMem() - h.compactRange("[0]", "[1000000]") - } -} - -func TestDB_ManualCompaction(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - if h.o.GetMaxMemCompationLevel() != 2 { - t.Fatal("fix test to reflect the config") - } - - h.putMulti(3, "p", "q") - h.tablesPerLevel("1,1,1") - - // Compaction range falls before files - h.compactRange("", "c") - h.tablesPerLevel("1,1,1") - - // Compaction range falls after files - h.compactRange("r", "z") - h.tablesPerLevel("1,1,1") - - // Compaction range overlaps files - h.compactRange("p1", "p9") - h.tablesPerLevel("0,0,1") - - // Populate a different range - h.putMulti(3, "c", "e") - h.tablesPerLevel("1,1,2") - - // Compact just the new range - h.compactRange("b", "f") - h.tablesPerLevel("0,0,2") - - // Compact all - h.putMulti(1, "a", "z") - h.tablesPerLevel("0,1,2") - h.compactRange("", "") - h.tablesPerLevel("0,0,1") -} - -func TestDB_BloomFilter(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - DisableBlockCache: true, - Filter: filter.NewBloomFilter(10), - }) - defer h.close() - - key := func(i int) string { - return fmt.Sprintf("key%06d", i) - } - - const n = 10000 - - // Populate multiple layers - for i := 0; i < n; i++ { - h.put(key(i), key(i)) - } - h.compactMem() - h.compactRange("a", "z") - for i := 0; i < n; i += 100 { - h.put(key(i), key(i)) - } - h.compactMem() - - // Prevent auto compactions triggered by seeks - h.stor.DelaySync(storage.TypeTable) - - // Lookup present keys. Should rarely read from small sstable. - h.stor.SetReadCounter(storage.TypeTable) - for i := 0; i < n; i++ { - h.getVal(key(i), key(i)) - } - cnt := int(h.stor.ReadCounter()) - t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) - - if min, max := n, n+2*n/100; cnt < min || cnt > max { - t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) - } - - // Lookup missing keys. Should rarely read from either sstable. - h.stor.ResetReadCounter() - for i := 0; i < n; i++ { - h.get(key(i)+".missing", false) - } - cnt = int(h.stor.ReadCounter()) - t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) - if max := 3 * n / 100; cnt > max { - t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) - } - - h.stor.ReleaseSync(storage.TypeTable) -} - -func TestDB_Concurrent(t *testing.T) { - const n, secs, maxkey = 4, 2, 1000 - - runtime.GOMAXPROCS(n) - trun(t, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - var cnt [n]uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - var put, get, found uint - defer func() { - t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d", - i, cnt[i], put, get, found, get-found) - closeWg.Done() - }() - - rnd := rand.New(rand.NewSource(int64(1000 + i))) - for atomic.LoadUint32(&stop) == 0 { - x := cnt[i] - - k := rnd.Intn(maxkey) - kstr := fmt.Sprintf("%016d", k) - - if (rnd.Int() % 2) > 0 { - put++ - h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x)) - } else { - get++ - v, err := h.db.Get([]byte(kstr), h.ro) - if err == nil { - found++ - rk, ri, rx := 0, -1, uint32(0) - fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx) - if rk != k { - t.Errorf("invalid key want=%d got=%d", k, rk) - } - if ri < 0 || ri >= n { - t.Error("invalid goroutine number: ", ri) - } else { - tx := atomic.LoadUint32(&(cnt[ri])) - if rx > tx { - t.Errorf("invalid seq number, %d > %d ", rx, tx) - } - } - } else if err != ErrNotFound { - t.Error("Get: got error: ", err) - return - } - } - atomic.AddUint32(&cnt[i], 1) - } - }(i) - } - - time.Sleep(secs * time.Second) - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) - - runtime.GOMAXPROCS(1) -} - -func TestDB_Concurrent2(t *testing.T) { - const n, n2 = 4, 4000 - - runtime.GOMAXPROCS(n*2 + 2) - truno(t, &opt.Options{WriteBuffer: 30}, func(h *dbHarness) { - var closeWg sync.WaitGroup - var stop uint32 - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 0; atomic.LoadUint32(&stop) == 0; k++ { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - for i := 0; i < n; i++ { - closeWg.Add(1) - go func(i int) { - for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- { - h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) - } - closeWg.Done() - }(i) - } - - cmp := comparer.DefaultComparer - for i := 0; i < n2; i++ { - closeWg.Add(1) - go func(i int) { - it := h.db.NewIterator(nil, nil) - var pk []byte - for it.Next() { - kk := it.Key() - if cmp.Compare(kk, pk) <= 0 { - t.Errorf("iter %d: %q is successor of %q", i, pk, kk) - } - pk = append(pk[:0], kk...) - var k, vk, vi int - if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil { - t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err) - } else if n < 1 { - t.Errorf("iter %d: Cannot parse key %q", i, it.Key()) - } - if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil { - t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err) - } else if n < 2 { - t.Errorf("iter %d: Cannot parse value %q", i, it.Value()) - } - - if vk != k { - t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk) - } - } - if err := it.Error(); err != nil { - t.Errorf("iter %d: Got error: %v", i, err) - } - it.Release() - closeWg.Done() - }(i) - } - - atomic.StoreUint32(&stop, 1) - closeWg.Wait() - }) - - runtime.GOMAXPROCS(1) -} - -func TestDB_CreateReopenDbOnFile(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - stor, err := storage.OpenFile(dbpath) - if err != nil { - t.Fatalf("(%d) cannot open storage: %s", i, err) - } - db, err := Open(stor, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - if err := stor.Close(); err != nil { - t.Fatalf("(%d) cannot close storage: %s", i, err) - } - } -} - -func TestDB_CreateReopenDbOnFile2(t *testing.T) { - dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid())) - if err := os.RemoveAll(dbpath); err != nil { - t.Fatal("cannot remove old db: ", err) - } - defer os.RemoveAll(dbpath) - - for i := 0; i < 3; i++ { - db, err := OpenFile(dbpath, nil) - if err != nil { - t.Fatalf("(%d) cannot open db: %s", i, err) - } - if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { - t.Fatalf("(%d) cannot write to db: %s", i, err) - } - if err := db.Close(); err != nil { - t.Fatalf("(%d) cannot close db: %s", i, err) - } - } -} - -func TestDB_DeletionMarkersOnMemdb(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("foo", "v1") - h.compactMem() - h.delete("foo") - h.get("foo", false) - h.getKeyVal("") -} - -func TestDB_LeveldbIssue178(t *testing.T) { - nKeys := (opt.DefaultCompactionTableSize / 30) * 5 - key1 := func(i int) string { - return fmt.Sprintf("my_key_%d", i) - } - key2 := func(i int) string { - return fmt.Sprintf("my_key_%d_xxx", i) - } - - // Disable compression since it affects the creation of layers and the - // code below is trying to test against a very specific scenario. - h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) - defer h.close() - - // Create first key range. - batch := new(Batch) - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key1(i)), []byte("value for range 1 key")) - } - h.write(batch) - - // Create second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Put([]byte(key2(i)), []byte("value for range 2 key")) - } - h.write(batch) - - // Delete second key range. - batch.Reset() - for i := 0; i < nKeys; i++ { - batch.Delete([]byte(key2(i))) - } - h.write(batch) - h.waitMemCompaction() - - // Run manual compaction. - h.compactRange(key1(0), key1(nKeys-1)) - - // Checking the keys. - h.assertNumKeys(nKeys) -} - -func TestDB_LeveldbIssue200(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - h.put("1", "b") - h.put("2", "c") - h.put("3", "d") - h.put("4", "e") - h.put("5", "f") - - iter := h.db.NewIterator(nil, h.ro) - - // Add an element that should not be reflected in the iterator. - h.put("25", "cd") - - iter.Seek([]byte("5")) - assertBytes(t, []byte("5"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("4"), iter.Key()) - iter.Prev() - assertBytes(t, []byte("3"), iter.Key()) - iter.Next() - assertBytes(t, []byte("4"), iter.Key()) - iter.Next() - assertBytes(t, []byte("5"), iter.Key()) -} - -func TestDB_GoleveldbIssue74(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 1 * opt.MiB, - }) - defer h.close() - - const n, dur = 10000, 5 * time.Second - - runtime.GOMAXPROCS(runtime.NumCPU()) - - until := time.Now().Add(dur) - wg := new(sync.WaitGroup) - wg.Add(2) - var done uint32 - go func() { - var i int - defer func() { - t.Logf("WRITER DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - - b := new(Batch) - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - iv := fmt.Sprintf("VAL%010d", i) - for k := 0; k < n; k++ { - key := fmt.Sprintf("KEY%06d", k) - b.Put([]byte(key), []byte(key+iv)) - b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key)) - } - h.write(b) - - b.Reset() - snap := h.getSnapshot() - iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) - var k int - for ; iter.Next(); k++ { - ptrKey := iter.Key() - key := iter.Value() - - if _, err := snap.Get(ptrKey, nil); err != nil { - t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err) - } - if value, err := snap.Get(key, nil); err != nil { - t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err) - } else if string(value) != string(key)+iv { - t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value) - } - - b.Delete(key) - b.Delete(ptrKey) - } - h.write(b) - iter.Release() - snap.Release() - if k != n { - t.Fatalf("#%d %d != %d", i, k, n) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - snap := h.getSnapshot() - iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) - var prevValue string - var k int - for ; iter.Next(); k++ { - ptrKey := iter.Key() - key := iter.Value() - - if _, err := snap.Get(ptrKey, nil); err != nil { - t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err) - } - - if value, err := snap.Get(key, nil); err != nil { - t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err) - } else if prevValue != "" && string(value) != string(key)+prevValue { - t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value) - } else { - prevValue = string(value[len(key):]) - } - } - iter.Release() - snap.Release() - if k > 0 && k != n { - t.Fatalf("#%d %d != %d", i, k, n) - } - } - }() - wg.Wait() -} - -func TestDB_GetProperties(t *testing.T) { - h := newDbHarness(t) - defer h.close() - - _, err := h.db.GetProperty("leveldb.num-files-at-level") - if err == nil { - t.Error("GetProperty() failed to detect missing level") - } - - _, err = h.db.GetProperty("leveldb.num-files-at-level0") - if err != nil { - t.Error("got unexpected error", err) - } - - _, err = h.db.GetProperty("leveldb.num-files-at-level0x") - if err == nil { - t.Error("GetProperty() failed to detect invalid level") - } -} - -func TestDB_GoleveldbIssue72and83(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 1 * opt.MiB, - OpenFilesCacheCapacity: 3, - }) - defer h.close() - - const n, wn, dur = 10000, 100, 30 * time.Second - - runtime.GOMAXPROCS(runtime.NumCPU()) - - randomData := func(prefix byte, i int) []byte { - data := make([]byte, 1+4+32+64+32) - _, err := crand.Reader.Read(data[1 : len(data)-8]) - if err != nil { - panic(err) - } - data[0] = prefix - binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i)) - binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value()) - return data - } - - keys := make([][]byte, n) - for i := range keys { - keys[i] = randomData(1, 0) - } - - until := time.Now().Add(dur) - wg := new(sync.WaitGroup) - wg.Add(3) - var done uint32 - go func() { - i := 0 - defer func() { - t.Logf("WRITER DONE #%d", i) - wg.Done() - }() - - b := new(Batch) - for ; i < wn && atomic.LoadUint32(&done) == 0; i++ { - b.Reset() - for _, k1 := range keys { - k2 := randomData(2, i) - b.Put(k2, randomData(42, i)) - b.Put(k1, k2) - } - if err := h.db.Write(b, h.wo); err != nil { - atomic.StoreUint32(&done, 1) - t.Fatalf("WRITER #%d db.Write: %v", i, err) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER0 DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - snap := h.getSnapshot() - seq := snap.elem.seq - if seq == 0 { - snap.Release() - continue - } - iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil) - writei := int(seq/(n*2) - 1) - var k int - for ; iter.Next(); k++ { - k1 := iter.Key() - k2 := iter.Value() - k1checksum0 := binary.LittleEndian.Uint32(k1[len(k1)-4:]) - k1checksum1 := util.NewCRC(k1[:len(k1)-4]).Value() - if k1checksum0 != k1checksum1 { - t.Fatalf("READER0 #%d.%d W#%d invalid K1 checksum: %#x != %#x", i, k, k1checksum0, k1checksum0) - } - k2checksum0 := binary.LittleEndian.Uint32(k2[len(k2)-4:]) - k2checksum1 := util.NewCRC(k2[:len(k2)-4]).Value() - if k2checksum0 != k2checksum1 { - t.Fatalf("READER0 #%d.%d W#%d invalid K2 checksum: %#x != %#x", i, k, k2checksum0, k2checksum1) - } - kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-8:])) - if writei != kwritei { - t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei) - } - if _, err := snap.Get(k2, nil); err != nil { - t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2) - } - } - if err := iter.Error(); err != nil { - t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, writei, err) - } - iter.Release() - snap.Release() - if k > 0 && k != n { - t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n) - } - } - }() - go func() { - var i int - defer func() { - t.Logf("READER1 DONE #%d", i) - atomic.StoreUint32(&done, 1) - wg.Done() - }() - for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { - iter := h.db.NewIterator(nil, nil) - seq := iter.(*dbIter).seq - if seq == 0 { - iter.Release() - continue - } - writei := int(seq/(n*2) - 1) - var k int - for ok := iter.Last(); ok; ok = iter.Prev() { - k++ - } - if err := iter.Error(); err != nil { - t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err) - } - iter.Release() - if m := (writei+1)*n + n; k != m { - t.Fatalf("READER1 #%d W#%d short read, got=%d want=%d", i, writei, k, m) - } - } - }() - - wg.Wait() -} - -func TestDB_TransientError(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 128 * opt.KiB, - OpenFilesCacheCapacity: 3, - DisableCompactionBackoff: true, - }) - defer h.close() - - const ( - nSnap = 20 - nKey = 10000 - ) - - var ( - snaps [nSnap]*Snapshot - b = &Batch{} - ) - for i := range snaps { - vtail := fmt.Sprintf("VAL%030d", i) - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%8d", k) - b.Put([]byte(key), []byte(key+vtail)) - } - h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt) - if err := h.db.Write(b, nil); err != nil { - t.Logf("WRITE #%d error: %v", i, err) - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt, tsOpWrite) - for { - if err := h.db.Write(b, nil); err == nil { - break - } else if errors.IsCorrupted(err) { - t.Fatalf("WRITE #%d corrupted: %v", i, err) - } - } - } - - snaps[i] = h.db.newSnapshot() - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%8d", k) - b.Delete([]byte(key)) - } - h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt) - if err := h.db.Write(b, nil); err != nil { - t.Logf("WRITE #%d error: %v", i, err) - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt) - for { - if err := h.db.Write(b, nil); err == nil { - break - } else if errors.IsCorrupted(err) { - t.Fatalf("WRITE #%d corrupted: %v", i, err) - } - } - } - } - h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt) - - runtime.GOMAXPROCS(runtime.NumCPU()) - - rnd := rand.New(rand.NewSource(0xecafdaed)) - wg := &sync.WaitGroup{} - for i, snap := range snaps { - wg.Add(2) - - go func(i int, snap *Snapshot, sk []int) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - for _, k := range sk { - key := fmt.Sprintf("KEY%8d", k) - xvalue, err := snap.Get([]byte(key), nil) - if err != nil { - t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) - } - value := key + vtail - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) - } - } - }(i, snap, rnd.Perm(nKey)) - - go func(i int, snap *Snapshot) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - iter := snap.NewIterator(nil, nil) - defer iter.Release() - for k := 0; k < nKey; k++ { - if !iter.Next() { - if err := iter.Error(); err != nil { - t.Fatalf("READER_ITER #%d K%d error: %v", i, k, err) - } else { - t.Fatalf("READER_ITER #%d K%d eoi", i, k) - } - } - key := fmt.Sprintf("KEY%8d", k) - xkey := iter.Key() - if !bytes.Equal([]byte(key), xkey) { - t.Fatalf("READER_ITER #%d K%d invalid key: want %q, got %q", i, k, key, xkey) - } - value := key + vtail - xvalue := iter.Value() - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_ITER #%d K%d invalid value: want %q, got %q", i, k, value, xvalue) - } - } - }(i, snap) - } - - wg.Wait() -} - -func TestDB_UkeyShouldntHopAcrossTable(t *testing.T) { - h := newDbHarnessWopt(t, &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 90 * opt.KiB, - CompactionExpandLimitFactor: 1, - }) - defer h.close() - - const ( - nSnap = 190 - nKey = 140 - ) - - var ( - snaps [nSnap]*Snapshot - b = &Batch{} - ) - for i := range snaps { - vtail := fmt.Sprintf("VAL%030d", i) - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - b.Put([]byte(key), []byte(key+vtail)) - } - if err := h.db.Write(b, nil); err != nil { - t.Fatalf("WRITE #%d error: %v", i, err) - } - - snaps[i] = h.db.newSnapshot() - b.Reset() - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - b.Delete([]byte(key)) - } - if err := h.db.Write(b, nil); err != nil { - t.Fatalf("WRITE #%d error: %v", i, err) - } - } - - h.compactMem() - - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - - h.compactRangeAt(0, "", "") - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - h.compactRangeAt(1, "", "") - h.waitCompaction() - for level, tables := range h.db.s.stVersion.tables { - for _, table := range tables { - t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax) - } - } - runtime.GOMAXPROCS(runtime.NumCPU()) - - wg := &sync.WaitGroup{} - for i, snap := range snaps { - wg.Add(1) - - go func(i int, snap *Snapshot) { - defer wg.Done() - - vtail := fmt.Sprintf("VAL%030d", i) - for k := 0; k < nKey; k++ { - key := fmt.Sprintf("KEY%08d", k) - xvalue, err := snap.Get([]byte(key), nil) - if err != nil { - t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) - } - value := key + vtail - if !bytes.Equal([]byte(value), xvalue) { - t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) - } - } - }(i, snap) - } - - wg.Wait() -} - -func TestDB_TableCompactionBuilder(t *testing.T) { - stor := newTestStorage(t) - defer stor.Close() - - const nSeq = 99 - - o := &opt.Options{ - WriteBuffer: 112 * opt.KiB, - CompactionTableSize: 43 * opt.KiB, - CompactionExpandLimitFactor: 1, - CompactionGPOverlapsFactor: 1, - DisableBlockCache: true, - } - s, err := newSession(stor, o) - if err != nil { - t.Fatal(err) - } - if err := s.create(); err != nil { - t.Fatal(err) - } - defer s.close() - var ( - seq uint64 - targetSize = 5 * o.CompactionTableSize - value = bytes.Repeat([]byte{'0'}, 100) - ) - for i := 0; i < 2; i++ { - tw, err := s.tops.create() - if err != nil { - t.Fatal(err) - } - for k := 0; tw.tw.BytesLen() < targetSize; k++ { - key := []byte(fmt.Sprintf("%09d", k)) - seq += nSeq - 1 - for x := uint64(0); x < nSeq; x++ { - if err := tw.append(newIkey(key, seq-x, ktVal), value); err != nil { - t.Fatal(err) - } - } - } - tf, err := tw.finish() - if err != nil { - t.Fatal(err) - } - rec := &sessionRecord{numLevel: s.o.GetNumLevel()} - rec.addTableFile(i, tf) - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - } - - // Build grandparent. - v := s.version() - c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...)) - rec := &sessionRecord{numLevel: s.o.GetNumLevel()} - b := &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize/3 + 961, - } - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Fatal(err) - } - for _, t := range c.tables[0] { - rec.delTable(c.level, t.file.Num()) - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - // Build level-1. - v = s.version() - c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...)) - rec = &sessionRecord{numLevel: s.o.GetNumLevel()} - b = &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize, - } - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Fatal(err) - } - for _, t := range c.tables[0] { - rec.delTable(c.level, t.file.Num()) - } - // Move grandparent to level-3 - for _, t := range v.tables[2] { - rec.delTable(2, t.file.Num()) - rec.addTableFile(3, t) - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - v = s.version() - for level, want := range []bool{false, true, false, true, false} { - got := len(v.tables[level]) > 0 - if want != got { - t.Fatalf("invalid level-%d tables len: want %v, got %v", level, want, got) - } - } - for i, f := range v.tables[1][:len(v.tables[1])-1] { - nf := v.tables[1][i+1] - if bytes.Equal(f.imax.ukey(), nf.imin.ukey()) { - t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.file.Num(), nf.file.Num()) - } - } - v.release() - - // Compaction with transient error. - v = s.version() - c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...)) - rec = &sessionRecord{numLevel: s.o.GetNumLevel()} - b = &tableCompactionBuilder{ - s: s, - c: c, - rec: rec, - stat1: new(cStatsStaging), - minSeq: 0, - strict: true, - tableSize: o.CompactionTableSize, - } - stor.SetEmuErrOnce(storage.TypeTable, tsOpSync) - stor.SetEmuRandErr(storage.TypeTable, tsOpRead, tsOpReadAt, tsOpWrite) - stor.SetEmuRandErrProb(0xf0) - for { - if err := b.run(new(compactionTransactCounter)); err != nil { - t.Logf("(expected) b.run: %v", err) - } else { - break - } - } - if err := s.commit(rec); err != nil { - t.Fatal(err) - } - c.release() - - stor.SetEmuErrOnce(0, tsOpSync) - stor.SetEmuRandErr(0, tsOpRead, tsOpReadAt, tsOpWrite) - - v = s.version() - if len(v.tables[1]) != len(v.tables[2]) { - t.Fatalf("invalid tables length, want %d, got %d", len(v.tables[1]), len(v.tables[2])) - } - for i, f0 := range v.tables[1] { - f1 := v.tables[2][i] - iter0 := s.tops.newIterator(f0, nil, nil) - iter1 := s.tops.newIterator(f1, nil, nil) - for j := 0; true; j++ { - next0 := iter0.Next() - next1 := iter1.Next() - if next0 != next1 { - t.Fatalf("#%d.%d invalid eoi: want %v, got %v", i, j, next0, next1) - } - key0 := iter0.Key() - key1 := iter1.Key() - if !bytes.Equal(key0, key1) { - t.Fatalf("#%d.%d invalid key: want %q, got %q", i, j, key0, key1) - } - if next0 == false { - break - } - } - iter0.Release() - iter1.Release() - } - v.release() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go deleted file mode 100644 index a8a2bdf72..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// Reader is the interface that wraps basic Get and NewIterator methods. -// This interface implemented by both DB and Snapshot. -type Reader interface { - Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) - NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator -} - -type Sizes []uint64 - -// Sum returns sum of the sizes. -func (p Sizes) Sum() (n uint64) { - for _, s := range p { - n += s - } - return n -} - -// Logging. -func (db *DB) log(v ...interface{}) { db.s.log(v...) } -func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } - -// Check and clean files. -func (db *DB) checkAndCleanFiles() error { - v := db.s.version() - defer v.release() - - tablesMap := make(map[uint64]bool) - for _, tables := range v.tables { - for _, t := range tables { - tablesMap[t.file.Num()] = false - } - } - - files, err := db.s.getFiles(storage.TypeAll) - if err != nil { - return err - } - - var nTables int - var rem []storage.File - for _, f := range files { - keep := true - switch f.Type() { - case storage.TypeManifest: - keep = f.Num() >= db.s.manifestFile.Num() - case storage.TypeJournal: - if db.frozenJournalFile != nil { - keep = f.Num() >= db.frozenJournalFile.Num() - } else { - keep = f.Num() >= db.journalFile.Num() - } - case storage.TypeTable: - _, keep = tablesMap[f.Num()] - if keep { - tablesMap[f.Num()] = true - nTables++ - } - } - - if !keep { - rem = append(rem, f) - } - } - - if nTables != len(tablesMap) { - var missing []*storage.FileInfo - for num, present := range tablesMap { - if !present { - missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num}) - db.logf("db@janitor table missing @%d", num) - } - } - return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing}) - } - - db.logf("db@janitor F·%d G·%d", len(files), len(rem)) - for _, f := range rem { - db.logf("db@janitor removing %s-%d", f.Type(), f.Num()) - if err := f.Remove(); err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go deleted file mode 100644 index e1cf30c53..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "time" - - "github.com/syndtr/goleveldb/leveldb/memdb" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func (db *DB) writeJournal(b *Batch) error { - w, err := db.journal.Next() - if err != nil { - return err - } - if _, err := w.Write(b.encode()); err != nil { - return err - } - if err := db.journal.Flush(); err != nil { - return err - } - if b.sync { - return db.journalWriter.Sync() - } - return nil -} - -func (db *DB) jWriter() { - defer db.closeW.Done() - for { - select { - case b := <-db.journalC: - if b != nil { - db.journalAckC <- db.writeJournal(b) - } - case _, _ = <-db.closeC: - return - } - } -} - -func (db *DB) rotateMem(n int) (mem *memDB, err error) { - // Wait for pending memdb compaction. - err = db.compSendIdle(db.mcompCmdC) - if err != nil { - return - } - - // Create new memdb and journal. - mem, err = db.newMem(n) - if err != nil { - return - } - - // Schedule memdb compaction. - db.compSendTrigger(db.mcompCmdC) - return -} - -func (db *DB) flush(n int) (mem *memDB, nn int, err error) { - delayed := false - flush := func() (retry bool) { - v := db.s.version() - defer v.release() - mem = db.getEffectiveMem() - defer func() { - if retry { - mem.decref() - mem = nil - } - }() - nn = mem.mdb.Free() - switch { - case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed: - delayed = true - time.Sleep(time.Millisecond) - case nn >= n: - return false - case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger(): - delayed = true - err = db.compSendIdle(db.tcompCmdC) - if err != nil { - return false - } - default: - // Allow memdb to grow if it has no entry. - if mem.mdb.Len() == 0 { - nn = n - } else { - mem.decref() - mem, err = db.rotateMem(n) - if err == nil { - nn = mem.mdb.Free() - } else { - nn = 0 - } - } - return false - } - return true - } - start := time.Now() - for flush() { - } - if delayed { - db.writeDelay += time.Since(start) - db.writeDelayN++ - } else if db.writeDelayN > 0 { - db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) - db.writeDelay = 0 - db.writeDelayN = 0 - } - return -} - -// Write apply the given batch to the DB. The batch will be applied -// sequentially. -// -// It is safe to modify the contents of the arguments after Write returns. -func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) { - err = db.ok() - if err != nil || b == nil || b.Len() == 0 { - return - } - - b.init(wo.GetSync()) - - // The write happen synchronously. - select { - case db.writeC <- b: - if <-db.writeMergedC { - return <-db.writeAckC - } - case db.writeLockC <- struct{}{}: - case err = <-db.compPerErrC: - return - case _, _ = <-db.closeC: - return ErrClosed - } - - merged := 0 - danglingMerge := false - defer func() { - if danglingMerge { - db.writeMergedC <- false - } else { - <-db.writeLockC - } - for i := 0; i < merged; i++ { - db.writeAckC <- err - } - }() - - mem, memFree, err := db.flush(b.size()) - if err != nil { - return - } - defer mem.decref() - - // Calculate maximum size of the batch. - m := 1 << 20 - if x := b.size(); x <= 128<<10 { - m = x + (128 << 10) - } - m = minInt(m, memFree) - - // Merge with other batch. -drain: - for b.size() < m && !b.sync { - select { - case nb := <-db.writeC: - if b.size()+nb.size() <= m { - b.append(nb) - db.writeMergedC <- true - merged++ - } else { - danglingMerge = true - break drain - } - default: - break drain - } - } - - // Set batch first seq number relative from last seq. - b.seq = db.seq + 1 - - // Write journal concurrently if it is large enough. - if b.size() >= (128 << 10) { - // Push the write batch to the journal writer - select { - case db.journalC <- b: - // Write into memdb - if berr := b.memReplay(mem.mdb); berr != nil { - panic(berr) - } - case err = <-db.compPerErrC: - return - case _, _ = <-db.closeC: - err = ErrClosed - return - } - // Wait for journal writer - select { - case err = <-db.journalAckC: - if err != nil { - // Revert memdb if error detected - if berr := b.revertMemReplay(mem.mdb); berr != nil { - panic(berr) - } - return - } - case _, _ = <-db.closeC: - err = ErrClosed - return - } - } else { - err = db.writeJournal(b) - if err != nil { - return - } - if berr := b.memReplay(mem.mdb); berr != nil { - panic(berr) - } - } - - // Set last seq number. - db.addSeq(uint64(b.Len())) - - if b.size() >= memFree { - db.rotateMem(0) - } - return -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// -// It is safe to modify the contents of the arguments after Put returns. -func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { - b := new(Batch) - b.Put(key, value) - return db.Write(b, wo) -} - -// Delete deletes the value for the given key. It returns ErrNotFound if -// the DB does not contain the key. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { - b := new(Batch) - b.Delete(key) - return db.Write(b, wo) -} - -func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { - iter := mem.NewIterator(nil) - defer iter.Release() - return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) && - (min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0)) -} - -// CompactRange compacts the underlying DB for the given key range. -// In particular, deleted and overwritten versions are discarded, -// and the data is rearranged to reduce the cost of operations -// needed to access the data. This operation should typically only -// be invoked by users who understand the underlying implementation. -// -// A nil Range.Start is treated as a key before all keys in the DB. -// And a nil Range.Limit is treated as a key after all keys in the DB. -// Therefore if both is nil then it will compact entire DB. -func (db *DB) CompactRange(r util.Range) error { - if err := db.ok(); err != nil { - return err - } - - // Lock writer. - select { - case db.writeLockC <- struct{}{}: - case err := <-db.compPerErrC: - return err - case _, _ = <-db.closeC: - return ErrClosed - } - - // Check for overlaps in memdb. - mem := db.getEffectiveMem() - defer mem.decref() - if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) { - // Memdb compaction. - if _, err := db.rotateMem(0); err != nil { - <-db.writeLockC - return err - } - <-db.writeLockC - if err := db.compSendIdle(db.mcompCmdC); err != nil { - return err - } - } else { - <-db.writeLockC - } - - // Table compaction. - return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go deleted file mode 100644 index 53f13bb24..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package leveldb provides implementation of LevelDB key/value database. -// -// Create or open a database: -// -// db, err := leveldb.OpenFile("path/to/db", nil) -// ... -// defer db.Close() -// ... -// -// Read or modify the database content: -// -// // Remember that the contents of the returned slice should not be modified. -// data, err := db.Get([]byte("key"), nil) -// ... -// err = db.Put([]byte("key"), []byte("value"), nil) -// ... -// err = db.Delete([]byte("key"), nil) -// ... -// -// Iterate over database content: -// -// iter := db.NewIterator(nil, nil) -// for iter.Next() { -// // Remember that the contents of the returned slice should not be modified, and -// // only valid until the next call to Next. -// key := iter.Key() -// value := iter.Value() -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content with a particular prefix: -// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Seek-then-Iterate: -// -// iter := db.NewIterator(nil, nil) -// for ok := iter.Seek(key); ok; ok = iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Iterate over subset of database content: -// -// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) -// for iter.Next() { -// // Use key/value. -// ... -// } -// iter.Release() -// err = iter.Error() -// ... -// -// Batch writes: -// -// batch := new(leveldb.Batch) -// batch.Put([]byte("foo"), []byte("value")) -// batch.Put([]byte("bar"), []byte("another value")) -// batch.Delete([]byte("baz")) -// err = db.Write(batch, nil) -// ... -// -// Use bloom filter: -// -// o := &opt.Options{ -// Filter: filter.NewBloomFilter(10), -// } -// db, err := leveldb.OpenFile("path/to/db", o) -// ... -// defer db.Close() -// ... -package leveldb diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go deleted file mode 100644 index 29d0d2f27..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/errors" -) - -var ( - ErrNotFound = errors.ErrNotFound - ErrSnapshotReleased = errors.New("leveldb: snapshot released") - ErrIterReleased = errors.New("leveldb: iterator released") - ErrClosed = errors.New("leveldb: closed") -) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go deleted file mode 100644 index 84b5d6b7b..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package errors provides common error types used throughout leveldb. -package errors - -import ( - "errors" - "fmt" - - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = New("leveldb: not found") - ErrReleased = util.ErrReleased - ErrHasReleaser = util.ErrHasReleaser -) - -// New returns an error that formats as the given text. -func New(text string) error { - return errors.New(text) -} - -// ErrCorrupted is the type that wraps errors that indicate corruption in -// the database. -type ErrCorrupted struct { - File *storage.FileInfo - Err error -} - -func (e *ErrCorrupted) Error() string { - if e.File != nil { - return fmt.Sprintf("%v [file=%v]", e.Err, e.File) - } else { - return e.Err.Error() - } -} - -// NewErrCorrupted creates new ErrCorrupted error. -func NewErrCorrupted(f storage.File, err error) error { - return &ErrCorrupted{storage.NewFileInfo(f), err} -} - -// IsCorrupted returns a boolean indicating whether the error is indicating -// a corruption. -func IsCorrupted(err error) bool { - switch err.(type) { - case *ErrCorrupted: - return true - } - return false -} - -// ErrMissingFiles is the type that indicating a corruption due to missing -// files. -type ErrMissingFiles struct { - Files []*storage.FileInfo -} - -func (e *ErrMissingFiles) Error() string { return "file missing" } - -// SetFile sets 'file info' of the given error with the given file. -// Currently only ErrCorrupted is supported, otherwise will do nothing. -func SetFile(err error, f storage.File) error { - switch x := err.(type) { - case *ErrCorrupted: - x.File = storage.NewFileInfo(f) - return x - } - return err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go deleted file mode 100644 index b328ece4e..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Leveldb external", func() { - o := &opt.Options{ - DisableBlockCache: true, - BlockRestartInterval: 5, - BlockSize: 80, - Compression: opt.NoCompression, - OpenFilesCacheCapacity: -1, - Strict: opt.StrictAll, - WriteBuffer: 1000, - CompactionTableSize: 2000, - } - - Describe("write test", func() { - It("should do write correctly", func(done Done) { - db := newTestingDB(o, nil, nil) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(), - } - testutil.DoDBTesting(&t) - db.TestClose() - done <- true - }, 20.0) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := newTestingDB(o, nil, nil) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - err := db.TestPut(key, value) - Expect(err).NotTo(HaveOccurred()) - }) - - return db - }, func(db testutil.DB) { - db.(*testingDB).TestClose() - }) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go deleted file mode 100644 index 37c1e146b..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/filter" -) - -type iFilter struct { - filter.Filter -} - -func (f iFilter) Contains(filter, key []byte) bool { - return f.Filter.Contains(filter, iKey(key).ukey()) -} - -func (f iFilter) NewGenerator() filter.FilterGenerator { - return iFilterGenerator{f.Filter.NewGenerator()} -} - -type iFilterGenerator struct { - filter.FilterGenerator -} - -func (g iFilterGenerator) Add(key []byte) { - g.FilterGenerator.Add(iKey(key).ukey()) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go deleted file mode 100644 index bab0e9970..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "github.com/syndtr/goleveldb/leveldb/util" -) - -func bloomHash(key []byte) uint32 { - return util.Hash(key, 0xbc9f1d34) -} - -type bloomFilter int - -// The bloom filter serializes its parameters and is backward compatible -// with respect to them. Therefor, its parameters are not added to its -// name. -func (bloomFilter) Name() string { - return "leveldb.BuiltinBloomFilter" -} - -func (f bloomFilter) Contains(filter, key []byte) bool { - nBytes := len(filter) - 1 - if nBytes < 1 { - return false - } - nBits := uint32(nBytes * 8) - - // Use the encoded k so that we can read filters generated by - // bloom filters created using different parameters. - k := filter[nBytes] - if k > 30 { - // Reserved for potentially new encodings for short bloom filters. - // Consider it a match. - return true - } - - kh := bloomHash(key) - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < k; j++ { - bitpos := kh % nBits - if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 { - return false - } - kh += delta - } - return true -} - -func (f bloomFilter) NewGenerator() FilterGenerator { - // Round down to reduce probing cost a little bit. - k := uint8(f * 69 / 100) // 0.69 =~ ln(2) - if k < 1 { - k = 1 - } else if k > 30 { - k = 30 - } - return &bloomFilterGenerator{ - n: int(f), - k: k, - } -} - -type bloomFilterGenerator struct { - n int - k uint8 - - keyHashes []uint32 -} - -func (g *bloomFilterGenerator) Add(key []byte) { - // Use double-hashing to generate a sequence of hash values. - // See analysis in [Kirsch,Mitzenmacher 2006]. - g.keyHashes = append(g.keyHashes, bloomHash(key)) -} - -func (g *bloomFilterGenerator) Generate(b Buffer) { - // Compute bloom filter size (in both bits and bytes) - nBits := uint32(len(g.keyHashes) * g.n) - // For small n, we can see a very high false positive rate. Fix it - // by enforcing a minimum bloom filter length. - if nBits < 64 { - nBits = 64 - } - nBytes := (nBits + 7) / 8 - nBits = nBytes * 8 - - dest := b.Alloc(int(nBytes) + 1) - dest[nBytes] = g.k - for _, kh := range g.keyHashes { - delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits - for j := uint8(0); j < g.k; j++ { - bitpos := kh % nBits - dest[bitpos/8] |= (1 << (bitpos % 8)) - kh += delta - } - } - - g.keyHashes = g.keyHashes[:0] -} - -// NewBloomFilter creates a new initialized bloom filter for given -// bitsPerKey. -// -// Since bitsPerKey is persisted individually for each bloom filter -// serialization, bloom filters are backwards compatible with respect to -// changing bitsPerKey. This means that no big performance penalty will -// be experienced when changing the parameter. See documentation for -// opt.Options.Filter for more information. -func NewBloomFilter(bitsPerKey int) Filter { - return bloomFilter(bitsPerKey) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go deleted file mode 100644 index 1fb56f071..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package filter - -import ( - "encoding/binary" - "github.com/syndtr/goleveldb/leveldb/util" - "testing" -) - -type harness struct { - t *testing.T - - bloom Filter - generator FilterGenerator - filter []byte -} - -func newHarness(t *testing.T) *harness { - bloom := NewBloomFilter(10) - return &harness{ - t: t, - bloom: bloom, - generator: bloom.NewGenerator(), - } -} - -func (h *harness) add(key []byte) { - h.generator.Add(key) -} - -func (h *harness) addNum(key uint32) { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - h.add(b[:]) -} - -func (h *harness) build() { - b := &util.Buffer{} - h.generator.Generate(b) - h.filter = b.Bytes() -} - -func (h *harness) reset() { - h.filter = nil -} - -func (h *harness) filterLen() int { - return len(h.filter) -} - -func (h *harness) assert(key []byte, want, silent bool) bool { - got := h.bloom.Contains(h.filter, key) - if !silent && got != want { - h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want) - } - return got -} - -func (h *harness) assertNum(key uint32, want, silent bool) bool { - var b [4]byte - binary.LittleEndian.PutUint32(b[:], key) - return h.assert(b[:], want, silent) -} - -func TestBloomFilter_Empty(t *testing.T) { - h := newHarness(t) - h.build() - h.assert([]byte("hello"), false, false) - h.assert([]byte("world"), false, false) -} - -func TestBloomFilter_Small(t *testing.T) { - h := newHarness(t) - h.add([]byte("hello")) - h.add([]byte("world")) - h.build() - h.assert([]byte("hello"), true, false) - h.assert([]byte("world"), true, false) - h.assert([]byte("x"), false, false) - h.assert([]byte("foo"), false, false) -} - -func nextN(n int) int { - switch { - case n < 10: - n += 1 - case n < 100: - n += 10 - case n < 1000: - n += 100 - default: - n += 1000 - } - return n -} - -func TestBloomFilter_VaryingLengths(t *testing.T) { - h := newHarness(t) - var mediocre, good int - for n := 1; n < 10000; n = nextN(n) { - h.reset() - for i := 0; i < n; i++ { - h.addNum(uint32(i)) - } - h.build() - - got := h.filterLen() - want := (n * 10 / 8) + 40 - if got > want { - t.Errorf("filter len test failed, '%d' > '%d'", got, want) - } - - for i := 0; i < n; i++ { - h.assertNum(uint32(i), true, false) - } - - var rate float32 - for i := 0; i < 10000; i++ { - if h.assertNum(uint32(i+1000000000), true, true) { - rate++ - } - } - rate /= 10000 - if rate > 0.02 { - t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n) - } - if rate > 0.0125 { - mediocre++ - } else { - good++ - } - } - t.Logf("false positive rate: %d good, %d mediocre", good, mediocre) - if mediocre > good/5 { - t.Error("mediocre false positive rate is more than expected") - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go deleted file mode 100644 index 7a925c5a8..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/filter/filter.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package filter provides interface and implementation of probabilistic -// data structure. -// -// The filter is resposible for creating small filter from a set of keys. -// These filter will then used to test whether a key is a member of the set. -// In many cases, a filter can cut down the number of disk seeks from a -// handful to a single disk seek per DB.Get call. -package filter - -// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods. -type Buffer interface { - // Alloc allocs n bytes of slice from the buffer. This also advancing - // write offset. - Alloc(n int) []byte - - // Write appends the contents of p to the buffer. - Write(p []byte) (n int, err error) - - // WriteByte appends the byte c to the buffer. - WriteByte(c byte) error -} - -// Filter is the filter. -type Filter interface { - // Name returns the name of this policy. - // - // Note that if the filter encoding changes in an incompatible way, - // the name returned by this method must be changed. Otherwise, old - // incompatible filters may be passed to methods of this type. - Name() string - - // NewGenerator creates a new filter generator. - NewGenerator() FilterGenerator - - // Contains returns true if the filter contains the given key. - // - // The filter are filters generated by the filter generator. - Contains(filter, key []byte) bool -} - -// FilterGenerator is the filter generator. -type FilterGenerator interface { - // Add adds a key to the filter generator. - // - // The key may become invalid after call to this method end, therefor - // key must be copied if implementation require keeping key for later - // use. The key should not modified directly, doing so may cause - // undefined results. - Add(key []byte) - - // Generate generates filters based on keys passed so far. After call - // to Generate the filter generator maybe resetted, depends on implementation. - Generate(b Buffer) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go deleted file mode 100644 index e76657e5e..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/go13_bench_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build go1.3 - -package leveldb - -import ( - "sync/atomic" - "testing" -) - -func BenchmarkDBReadConcurrent(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - for pb.Next() && iter.Next() { - } - }) -} - -func BenchmarkDBReadConcurrent2(b *testing.B) { - p := openDBBench(b, false) - p.populate(b.N) - p.fill() - p.gc() - defer p.close() - - b.ResetTimer() - b.SetBytes(116) - - var dir uint32 - b.RunParallel(func(pb *testing.PB) { - iter := p.newIter() - defer iter.Release() - if atomic.AddUint32(&dir, 1)%2 == 0 { - for pb.Next() && iter.Next() { - } - } else { - if pb.Next() && iter.Last() { - for pb.Next() && iter.Prev() { - } - } - } - }) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go deleted file mode 100644 index a23ab05f7..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/syndtr/goleveldb/leveldb/util" -) - -// BasicArray is the interface that wraps basic Len and Search method. -type BasicArray interface { - // Len returns length of the array. - Len() int - - // Search finds smallest index that point to a key that is greater - // than or equal to the given key. - Search(key []byte) int -} - -// Array is the interface that wraps BasicArray and basic Index method. -type Array interface { - BasicArray - - // Index returns key/value pair with index of i. - Index(i int) (key, value []byte) -} - -// Array is the interface that wraps BasicArray and basic Get method. -type ArrayIndexer interface { - BasicArray - - // Get returns a new data iterator with index of i. - Get(i int) Iterator -} - -type basicArrayIterator struct { - util.BasicReleaser - array BasicArray - pos int - err error -} - -func (i *basicArrayIterator) Valid() bool { - return i.pos >= 0 && i.pos < i.array.Len() && !i.Released() -} - -func (i *basicArrayIterator) First() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.array.Len() == 0 { - i.pos = -1 - return false - } - i.pos = 0 - return true -} - -func (i *basicArrayIterator) Last() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = n - 1 - return true -} - -func (i *basicArrayIterator) Seek(key []byte) bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - n := i.array.Len() - if n == 0 { - i.pos = 0 - return false - } - i.pos = i.array.Search(key) - if i.pos >= n { - return false - } - return true -} - -func (i *basicArrayIterator) Next() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.pos++ - if n := i.array.Len(); i.pos >= n { - i.pos = n - return false - } - return true -} - -func (i *basicArrayIterator) Prev() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.pos-- - if i.pos < 0 { - i.pos = -1 - return false - } - return true -} - -func (i *basicArrayIterator) Error() error { return i.err } - -type arrayIterator struct { - basicArrayIterator - array Array - pos int - key, value []byte -} - -func (i *arrayIterator) updateKV() { - if i.pos == i.basicArrayIterator.pos { - return - } - i.pos = i.basicArrayIterator.pos - if i.Valid() { - i.key, i.value = i.array.Index(i.pos) - } else { - i.key = nil - i.value = nil - } -} - -func (i *arrayIterator) Key() []byte { - i.updateKV() - return i.key -} - -func (i *arrayIterator) Value() []byte { - i.updateKV() - return i.value -} - -type arrayIteratorIndexer struct { - basicArrayIterator - array ArrayIndexer -} - -func (i *arrayIteratorIndexer) Get() Iterator { - if i.Valid() { - return i.array.Get(i.basicArrayIterator.pos) - } - return nil -} - -// NewArrayIterator returns an iterator from the given array. -func NewArrayIterator(array Array) Iterator { - return &arrayIterator{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - pos: -1, - } -} - -// NewArrayIndexer returns an index iterator from the given array. -func NewArrayIndexer(array ArrayIndexer) IteratorIndexer { - return &arrayIteratorIndexer{ - basicArrayIterator: basicArrayIterator{array: array, pos: -1}, - array: array, - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go deleted file mode 100644 index 1ed6d07cb..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/onsi/ginkgo" - - . "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Array iterator", func() { - It("Should iterates and seeks correctly", func() { - // Build key/value. - kv := testutil.KeyValue_Generate(nil, 70, 1, 5, 3, 3) - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewArrayIterator(kv), - } - testutil.DoIteratorTesting(&t) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go deleted file mode 100644 index 939adbb93..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// IteratorIndexer is the interface that wraps CommonIterator and basic Get -// method. IteratorIndexer provides index for indexed iterator. -type IteratorIndexer interface { - CommonIterator - - // Get returns a new data iterator for the current position, or nil if - // done. - Get() Iterator -} - -type indexedIterator struct { - util.BasicReleaser - index IteratorIndexer - strict bool - - data Iterator - err error - errf func(err error) - closed bool -} - -func (i *indexedIterator) setData() { - if i.data != nil { - i.data.Release() - } - i.data = i.index.Get() -} - -func (i *indexedIterator) clearData() { - if i.data != nil { - i.data.Release() - } - i.data = nil -} - -func (i *indexedIterator) indexErr() { - if err := i.index.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - i.err = err - } -} - -func (i *indexedIterator) dataErr() bool { - if err := i.data.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - if i.strict || !errors.IsCorrupted(err) { - i.err = err - return true - } - } - return false -} - -func (i *indexedIterator) Valid() bool { - return i.data != nil && i.data.Valid() -} - -func (i *indexedIterator) First() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.First() { - i.indexErr() - i.clearData() - return false - } - i.setData() - return i.Next() -} - -func (i *indexedIterator) Last() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.Last() { - i.indexErr() - i.clearData() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - return true -} - -func (i *indexedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - if !i.index.Seek(key) { - i.indexErr() - i.clearData() - return false - } - i.setData() - if !i.data.Seek(key) { - if i.dataErr() { - return false - } - i.clearData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Next() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - switch { - case i.data != nil && !i.data.Next(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Next() { - i.indexErr() - return false - } - i.setData() - return i.Next() - } - return true -} - -func (i *indexedIterator) Prev() bool { - if i.err != nil { - return false - } else if i.Released() { - i.err = ErrIterReleased - return false - } - - switch { - case i.data != nil && !i.data.Prev(): - if i.dataErr() { - return false - } - i.clearData() - fallthrough - case i.data == nil: - if !i.index.Prev() { - i.indexErr() - return false - } - i.setData() - if !i.data.Last() { - if i.dataErr() { - return false - } - i.clearData() - return i.Prev() - } - } - return true -} - -func (i *indexedIterator) Key() []byte { - if i.data == nil { - return nil - } - return i.data.Key() -} - -func (i *indexedIterator) Value() []byte { - if i.data == nil { - return nil - } - return i.data.Value() -} - -func (i *indexedIterator) Release() { - i.clearData() - i.index.Release() - i.BasicReleaser.Release() -} - -func (i *indexedIterator) Error() error { - if i.err != nil { - return i.err - } - if err := i.index.Error(); err != nil { - return err - } - return nil -} - -func (i *indexedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewIndexedIterator returns an 'indexed iterator'. An index is iterator -// that returns another iterator, a 'data iterator'. A 'data iterator' is the -// iterator that contains actual key/value pairs. -// -// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) -// won't be ignored and will halt 'indexed iterator', otherwise the iterator will -// continue to the next 'data iterator'. Corruption on 'index iterator' will not be -// ignored and will halt the iterator. -func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator { - return &indexedIterator{index: index, strict: strict} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go deleted file mode 100644 index 72a797892..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - "sort" - - . "github.com/onsi/ginkgo" - - "github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -type keyValue struct { - key []byte - testutil.KeyValue -} - -type keyValueIndex []keyValue - -func (x keyValueIndex) Search(key []byte) int { - return sort.Search(x.Len(), func(i int) bool { - return comparer.DefaultComparer.Compare(x[i].key, key) >= 0 - }) -} - -func (x keyValueIndex) Len() int { return len(x) } -func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil } -func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) } - -var _ = testutil.Defer(func() { - Describe("Indexed iterator", func() { - Test := func(n ...int) func() { - if len(n) == 0 { - rnd := testutil.NewRand() - n = make([]int, rnd.Intn(17)+3) - for i := range n { - n[i] = rnd.Intn(19) + 1 - } - } - - return func() { - It("Should iterates and seeks correctly", func(done Done) { - // Build key/value. - index := make(keyValueIndex, len(n)) - sum := 0 - for _, x := range n { - sum += x - } - kv := testutil.KeyValue_Generate(nil, sum, 1, 10, 4, 4) - for i, j := 0, 0; i < len(n); i++ { - for x := n[i]; x > 0; x-- { - key, value := kv.Index(j) - index[i].key = key - index[i].Put(key, value) - j++ - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewIndexedIterator(NewArrayIndexer(index), true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 1.5) - } - } - - Describe("with 100 keys", Test(100)) - Describe("with 50-50 keys", Test(50, 50)) - Describe("with 50-1 keys", Test(50, 1)) - Describe("with 50-1-50 keys", Test(50, 1, 50)) - Describe("with 1-50 keys", Test(1, 50)) - Describe("with random N-keys", Test()) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go deleted file mode 100644 index c2522860b..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package iterator provides interface and implementation to traverse over -// contents of a database. -package iterator - -import ( - "errors" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrIterReleased = errors.New("leveldb/iterator: iterator released") -) - -// IteratorSeeker is the interface that wraps the 'seeks method'. -type IteratorSeeker interface { - // First moves the iterator to the first key/value pair. If the iterator - // only contains one key/value pair then First and Last whould moves - // to the same key/value pair. - // It returns whether such pair exist. - First() bool - - // Last moves the iterator to the last key/value pair. If the iterator - // only contains one key/value pair then First and Last whould moves - // to the same key/value pair. - // It returns whether such pair exist. - Last() bool - - // Seek moves the iterator to the first key/value pair whose key is greater - // than or equal to the given key. - // It returns whether such pair exist. - // - // It is safe to modify the contents of the argument after Seek returns. - Seek(key []byte) bool - - // Next moves the iterator to the next key/value pair. - // It returns whether the iterator is exhausted. - Next() bool - - // Prev moves the iterator to the previous key/value pair. - // It returns whether the iterator is exhausted. - Prev() bool -} - -// CommonIterator is the interface that wraps common interator methods. -type CommonIterator interface { - IteratorSeeker - - // util.Releaser is the interface that wraps basic Release method. - // When called Release will releases any resources associated with the - // iterator. - util.Releaser - - // util.ReleaseSetter is the interface that wraps the basic SetReleaser - // method. - util.ReleaseSetter - - // TODO: Remove this when ready. - Valid() bool - - // Error returns any accumulated error. Exhausting all the key/value pairs - // is not considered to be an error. - Error() error -} - -// Iterator iterates over a DB's key/value pairs in key order. -// -// When encouter an error any 'seeks method' will return false and will -// yield no key/value pairs. The error can be queried by calling the Error -// method. Calling Release is still necessary. -// -// An iterator must be released after use, but it is not necessary to read -// an iterator until exhaustion. -// Also, an iterator is not necessarily goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -type Iterator interface { - CommonIterator - - // Key returns the key of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Key() []byte - - // Value returns the key of the current key/value pair, or nil if done. - // The caller should not modify the contents of the returned slice, and - // its contents may change on the next call to any 'seeks method'. - Value() []byte -} - -// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback -// method. -// -// ErrorCallbackSetter implemented by indexed and merged iterator. -type ErrorCallbackSetter interface { - // SetErrorCallback allows set an error callback of the coresponding - // iterator. Use nil to clear the callback. - SetErrorCallback(f func(err error)) -} - -type emptyIterator struct { - util.BasicReleaser - err error -} - -func (i *emptyIterator) rErr() { - if i.err == nil && i.Released() { - i.err = ErrIterReleased - } -} - -func (*emptyIterator) Valid() bool { return false } -func (i *emptyIterator) First() bool { i.rErr(); return false } -func (i *emptyIterator) Last() bool { i.rErr(); return false } -func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false } -func (i *emptyIterator) Next() bool { i.rErr(); return false } -func (i *emptyIterator) Prev() bool { i.rErr(); return false } -func (*emptyIterator) Key() []byte { return nil } -func (*emptyIterator) Value() []byte { return nil } -func (i *emptyIterator) Error() error { return i.err } - -// NewEmptyIterator creates an empty iterator. The err parameter can be -// nil, but if not nil the given err will be returned by Error method. -func NewEmptyIterator(err error) Iterator { - return &emptyIterator{err: err} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go deleted file mode 100644 index 5ef8d5baf..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package iterator_test - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestIterator(t *testing.T) { - testutil.RunSuite(t, "Iterator Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go deleted file mode 100644 index 1a7e29df8..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator - -import ( - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type mergedIterator struct { - cmp comparer.Comparer - iters []Iterator - strict bool - - keys [][]byte - index int - dir dir - err error - errf func(err error) - releaser util.Releaser -} - -func assertKey(key []byte) []byte { - if key == nil { - panic("leveldb/iterator: nil key") - } - return key -} - -func (i *mergedIterator) iterErr(iter Iterator) bool { - if err := iter.Error(); err != nil { - if i.errf != nil { - i.errf(err) - } - if i.strict || !errors.IsCorrupted(err) { - i.err = err - return true - } - } - return false -} - -func (i *mergedIterator) Valid() bool { - return i.err == nil && i.dir > dirEOI -} - -func (i *mergedIterator) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.First(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirEOI - return i.prev() -} - -func (i *mergedIterator) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - for x, iter := range i.iters { - switch { - case iter.Seek(key): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - i.dir = dirSOI - return i.next() -} - -func (i *mergedIterator) next() bool { - var key []byte - if i.dir == dirForward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirEOI - return false - } - i.dir = dirForward - return true -} - -func (i *mergedIterator) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirSOI: - return i.First() - case dirBackward: - key := append([]byte{}, i.keys[i.index]...) - if !i.Seek(key) { - return false - } - return i.Next() - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Next(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.next() -} - -func (i *mergedIterator) prev() bool { - var key []byte - if i.dir == dirBackward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { - key = tkey - i.index = x - } - } - if key == nil { - i.dir = dirSOI - return false - } - i.dir = dirBackward - return true -} - -func (i *mergedIterator) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - switch i.dir { - case dirEOI: - return i.Last() - case dirForward: - key := append([]byte{}, i.keys[i.index]...) - for x, iter := range i.iters { - if x == i.index { - continue - } - seek := iter.Seek(key) - switch { - case seek && iter.Prev(), !seek && iter.Last(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - } - } - - x := i.index - iter := i.iters[x] - switch { - case iter.Prev(): - i.keys[x] = assertKey(iter.Key()) - case i.iterErr(iter): - return false - default: - i.keys[x] = nil - } - return i.prev() -} - -func (i *mergedIterator) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.keys[i.index] -} - -func (i *mergedIterator) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.iters[i.index].Value() -} - -func (i *mergedIterator) Release() { - if i.dir != dirReleased { - i.dir = dirReleased - for _, iter := range i.iters { - iter.Release() - } - i.iters = nil - i.keys = nil - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *mergedIterator) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *mergedIterator) Error() error { - return i.err -} - -func (i *mergedIterator) SetErrorCallback(f func(err error)) { - i.errf = f -} - -// NewMergedIterator returns an iterator that merges its input. Walking the -// resultant iterator will return all key/value pairs of all input iterators -// in strictly increasing key order, as defined by cmp. -// The input's key ranges may overlap, but there are assumed to be no duplicate -// keys: if iters[i] contains a key k then iters[j] will not contain that key k. -// None of the iters may be nil. -// -// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true) -// won't be ignored and will halt 'merged iterator', otherwise the iterator will -// continue to the next 'input iterator'. -func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { - return &mergedIterator{ - iters: iters, - cmp: cmp, - strict: strict, - keys: make([][]byte, len(iters)), - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go deleted file mode 100644 index e523b63e4..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package iterator_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - . "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -var _ = testutil.Defer(func() { - Describe("Merged iterator", func() { - Test := func(filled int, empty int) func() { - return func() { - It("Should iterates and seeks correctly", func(done Done) { - rnd := testutil.NewRand() - - // Build key/value. - filledKV := make([]testutil.KeyValue, filled) - kv := testutil.KeyValue_Generate(nil, 100, 1, 10, 4, 4) - kv.Iterate(func(i int, key, value []byte) { - filledKV[rnd.Intn(filled)].Put(key, value) - }) - - // Create itearators. - iters := make([]Iterator, filled+empty) - for i := range iters { - if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) { - filled-- - Expect(filledKV[filled].Len()).ShouldNot(BeZero()) - iters[i] = NewArrayIterator(filledKV[filled]) - } else { - empty-- - iters[i] = NewEmptyIterator(nil) - } - } - - // Test the iterator. - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: NewMergedIterator(iters, comparer.DefaultComparer, true), - } - testutil.DoIteratorTesting(&t) - done <- true - }, 1.5) - } - } - - Describe("with three, all filled iterators", Test(3, 0)) - Describe("with one filled, one empty iterators", Test(1, 1)) - Describe("with one filled, two empty iterators", Test(1, 2)) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go deleted file mode 100644 index 6519ec660..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -// Package journal reads and writes sequences of journals. Each journal is a stream -// of bytes that completes before the next journal starts. -// -// When reading, call Next to obtain an io.Reader for the next journal. Next will -// return io.EOF when there are no more journals. It is valid to call Next -// without reading the current journal to exhaustion. -// -// When writing, call Next to obtain an io.Writer for the next journal. Calling -// Next finishes the current journal. Call Close to finish the final journal. -// -// Optionally, call Flush to finish the current journal and flush the underlying -// writer without starting a new journal. To start a new journal after flushing, -// call Next. -// -// Neither Readers or Writers are safe to use concurrently. -// -// Example code: -// func read(r io.Reader) ([]string, error) { -// var ss []string -// journals := journal.NewReader(r, nil, true, true) -// for { -// j, err := journals.Next() -// if err == io.EOF { -// break -// } -// if err != nil { -// return nil, err -// } -// s, err := ioutil.ReadAll(j) -// if err != nil { -// return nil, err -// } -// ss = append(ss, string(s)) -// } -// return ss, nil -// } -// -// func write(w io.Writer, ss []string) error { -// journals := journal.NewWriter(w) -// for _, s := range ss { -// j, err := journals.Next() -// if err != nil { -// return err -// } -// if _, err := j.Write([]byte(s)), err != nil { -// return err -// } -// } -// return journals.Close() -// } -// -// The wire format is that the stream is divided into 32KiB blocks, and each -// block contains a number of tightly packed chunks. Chunks cannot cross block -// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a -// block must be zero. -// -// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4 -// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type) -// followed by a payload. The checksum is over the chunk type and the payload. -// -// There are four chunk types: whether the chunk is the full journal, or the -// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal -// has one first chunk, zero or more middle chunks, and one last chunk. -// -// The wire format allows for limited recovery in the face of data corruption: -// on a format error (such as a checksum mismatch), the reader moves to the -// next block and looks for the next full or first chunk. -package journal - -import ( - "encoding/binary" - "fmt" - "io" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// These constants are part of the wire format and should not be changed. -const ( - fullChunkType = 1 - firstChunkType = 2 - middleChunkType = 3 - lastChunkType = 4 -) - -const ( - blockSize = 32 * 1024 - headerSize = 7 -) - -type flusher interface { - Flush() error -} - -// ErrCorrupted is the error type that generated by corrupted block or chunk. -type ErrCorrupted struct { - Size int - Reason string -} - -func (e *ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size) -} - -// Dropper is the interface that wrap simple Drop method. The Drop -// method will be called when the journal reader dropping a block or chunk. -type Dropper interface { - Drop(err error) -} - -// Reader reads journals from an underlying io.Reader. -type Reader struct { - // r is the underlying reader. - r io.Reader - // the dropper. - dropper Dropper - // strict flag. - strict bool - // checksum flag. - checksum bool - // seq is the sequence number of the current journal. - seq int - // buf[i:j] is the unread portion of the current chunk's payload. - // The low bound, i, excludes the chunk header. - i, j int - // n is the number of bytes of buf that are valid. Once reading has started, - // only the final block can have n < blockSize. - n int - // last is whether the current chunk is the last chunk of the journal. - last bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewReader returns a new reader. The dropper may be nil, and if -// strict is true then corrupted or invalid chunk will halt the journal -// reader entirely. -func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { - return &Reader{ - r: r, - dropper: dropper, - strict: strict, - checksum: checksum, - last: true, - } -} - -var errSkip = errors.New("leveldb/journal: skipped") - -func (r *Reader) corrupt(n int, reason string, skip bool) error { - if r.dropper != nil { - r.dropper.Drop(&ErrCorrupted{n, reason}) - } - if r.strict && !skip { - r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason}) - return r.err - } - return errSkip -} - -// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the -// next block into the buffer if necessary. -func (r *Reader) nextChunk(first bool) error { - for { - if r.j+headerSize <= r.n { - checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4]) - length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6]) - chunkType := r.buf[r.j+6] - - if checksum == 0 && length == 0 && chunkType == 0 { - // Drop entire block. - m := r.n - r.j - r.i = r.n - r.j = r.n - return r.corrupt(m, "zero header", false) - } else { - m := r.n - r.j - r.i = r.j + headerSize - r.j = r.j + headerSize + int(length) - if r.j > r.n { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(m, "chunk length overflows block", false) - } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() { - // Drop entire block. - r.i = r.n - r.j = r.n - return r.corrupt(m, "checksum mismatch", false) - } - } - if first && chunkType != fullChunkType && chunkType != firstChunkType { - m := r.j - r.i - r.i = r.j - // Report the error, but skip it. - return r.corrupt(m+headerSize, "orphan chunk", true) - } - r.last = chunkType == fullChunkType || chunkType == lastChunkType - return nil - } - - // The last block. - if r.n < blockSize && r.n > 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - - // Read block. - n, err := io.ReadFull(r.r, r.buf[:]) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return err - } - if n == 0 { - if !first { - return r.corrupt(0, "missing chunk part", false) - } - r.err = io.EOF - return r.err - } - r.i, r.j, r.n = 0, 0, n - } -} - -// Next returns a reader for the next journal. It returns io.EOF if there are no -// more journals. The reader returned becomes stale after the next Next call, -// and should no longer be used. If strict is false, the reader will returns -// io.ErrUnexpectedEOF error when found corrupted journal. -func (r *Reader) Next() (io.Reader, error) { - r.seq++ - if r.err != nil { - return nil, r.err - } - r.i = r.j - for { - if err := r.nextChunk(true); err == nil { - break - } else if err != errSkip { - return nil, err - } - } - return &singleReader{r, r.seq, nil}, nil -} - -// Reset resets the journal reader, allows reuse of the journal reader. Reset returns -// last accumulated error. -func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error { - r.seq++ - err := r.err - r.r = reader - r.dropper = dropper - r.strict = strict - r.checksum = checksum - r.i = 0 - r.j = 0 - r.n = 0 - r.last = true - r.err = nil - return err -} - -type singleReader struct { - r *Reader - seq int - err error -} - -func (x *singleReader) Read(p []byte) (int, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - n := copy(p, r.buf[r.i:r.j]) - r.i += n - return n, nil -} - -func (x *singleReader) ReadByte() (byte, error) { - r := x.r - if r.seq != x.seq { - return 0, errors.New("leveldb/journal: stale reader") - } - if x.err != nil { - return 0, x.err - } - if r.err != nil { - return 0, r.err - } - for r.i == r.j { - if r.last { - return 0, io.EOF - } - x.err = r.nextChunk(false) - if x.err != nil { - if x.err == errSkip { - x.err = io.ErrUnexpectedEOF - } - return 0, x.err - } - } - c := r.buf[r.i] - r.i++ - return c, nil -} - -// Writer writes journals to an underlying io.Writer. -type Writer struct { - // w is the underlying writer. - w io.Writer - // seq is the sequence number of the current journal. - seq int - // f is w as a flusher. - f flusher - // buf[i:j] is the bytes that will become the current chunk. - // The low bound, i, includes the chunk header. - i, j int - // buf[:written] has already been written to w. - // written is zero unless Flush has been called. - written int - // first is whether the current chunk is the first chunk of the journal. - first bool - // pending is whether a chunk is buffered but not yet written. - pending bool - // err is any accumulated error. - err error - // buf is the buffer. - buf [blockSize]byte -} - -// NewWriter returns a new Writer. -func NewWriter(w io.Writer) *Writer { - f, _ := w.(flusher) - return &Writer{ - w: w, - f: f, - } -} - -// fillHeader fills in the header for the pending chunk. -func (w *Writer) fillHeader(last bool) { - if w.i+headerSize > w.j || w.j > blockSize { - panic("leveldb/journal: bad writer state") - } - if last { - if w.first { - w.buf[w.i+6] = fullChunkType - } else { - w.buf[w.i+6] = lastChunkType - } - } else { - if w.first { - w.buf[w.i+6] = firstChunkType - } else { - w.buf[w.i+6] = middleChunkType - } - } - binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value()) - binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize)) -} - -// writeBlock writes the buffered block to the underlying writer, and reserves -// space for the next chunk's header. -func (w *Writer) writeBlock() { - _, w.err = w.w.Write(w.buf[w.written:]) - w.i = 0 - w.j = headerSize - w.written = 0 -} - -// writePending finishes the current journal and writes the buffer to the -// underlying writer. -func (w *Writer) writePending() { - if w.err != nil { - return - } - if w.pending { - w.fillHeader(true) - w.pending = false - } - _, w.err = w.w.Write(w.buf[w.written:w.j]) - w.written = w.j -} - -// Close finishes the current journal and closes the writer. -func (w *Writer) Close() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - w.err = errors.New("leveldb/journal: closed Writer") - return nil -} - -// Flush finishes the current journal, writes to the underlying writer, and -// flushes it if that writer implements interface{ Flush() error }. -func (w *Writer) Flush() error { - w.seq++ - w.writePending() - if w.err != nil { - return w.err - } - if w.f != nil { - w.err = w.f.Flush() - return w.err - } - return nil -} - -// Reset resets the journal writer, allows reuse of the journal writer. Reset -// will also closes the journal writer if not already. -func (w *Writer) Reset(writer io.Writer) (err error) { - w.seq++ - if w.err == nil { - w.writePending() - err = w.err - } - w.w = writer - w.f, _ = writer.(flusher) - w.i = 0 - w.j = 0 - w.written = 0 - w.first = false - w.pending = false - w.err = nil - return -} - -// Next returns a writer for the next journal. The writer returned becomes stale -// after the next Close, Flush or Next call, and should no longer be used. -func (w *Writer) Next() (io.Writer, error) { - w.seq++ - if w.err != nil { - return nil, w.err - } - if w.pending { - w.fillHeader(true) - } - w.i = w.j - w.j = w.j + headerSize - // Check if there is room in the block for the header. - if w.j > blockSize { - // Fill in the rest of the block with zeroes. - for k := w.i; k < blockSize; k++ { - w.buf[k] = 0 - } - w.writeBlock() - if w.err != nil { - return nil, w.err - } - } - w.first = true - w.pending = true - return singleWriter{w, w.seq}, nil -} - -type singleWriter struct { - w *Writer - seq int -} - -func (x singleWriter) Write(p []byte) (int, error) { - w := x.w - if w.seq != x.seq { - return 0, errors.New("leveldb/journal: stale writer") - } - if w.err != nil { - return 0, w.err - } - n0 := len(p) - for len(p) > 0 { - // Write a block, if it is full. - if w.j == blockSize { - w.fillHeader(false) - w.writeBlock() - if w.err != nil { - return 0, w.err - } - w.first = false - } - // Copy bytes into the buffer. - n := copy(w.buf[w.j:], p) - w.j += n - p = p[n:] - } - return n0, nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go deleted file mode 100644 index 0fcf22599..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go +++ /dev/null @@ -1,818 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135 -// License, authors and contributors informations can be found at bellow URLs respectively: -// https://code.google.com/p/leveldb-go/source/browse/LICENSE -// https://code.google.com/p/leveldb-go/source/browse/AUTHORS -// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS - -package journal - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "math/rand" - "strings" - "testing" -) - -type dropper struct { - t *testing.T -} - -func (d dropper) Drop(err error) { - d.t.Log(err) -} - -func short(s string) string { - if len(s) < 64 { - return s - } - return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:]) -} - -// big returns a string of length n, composed of repetitions of partial. -func big(partial string, n int) string { - return strings.Repeat(partial, n/len(partial)+1)[:n] -} - -func TestEmpty(t *testing.T) { - buf := new(bytes.Buffer) - r := NewReader(buf, dropper{t}, true, true) - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) { - buf := new(bytes.Buffer) - - reset() - w := NewWriter(buf) - for { - s, ok := gen() - if !ok { - break - } - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write([]byte(s)); err != nil { - t.Fatal(err) - } - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - reset() - r := NewReader(buf, dropper{t}, true, true) - for { - s, ok := gen() - if !ok { - break - } - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - x, err := ioutil.ReadAll(rr) - if err != nil { - t.Fatal(err) - } - if string(x) != s { - t.Fatalf("got %q, want %q", short(string(x)), short(s)) - } - } - if _, err := r.Next(); err != io.EOF { - t.Fatalf("got %v, want %v", err, io.EOF) - } -} - -func testLiterals(t *testing.T, s []string) { - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == len(s) { - return "", false - } - i++ - return s[i-1], true - } - testGenerator(t, reset, gen) -} - -func TestMany(t *testing.T) { - const n = 1e5 - var i int - reset := func() { - i = 0 - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return fmt.Sprintf("%d.", i-1), true - } - testGenerator(t, reset, gen) -} - -func TestRandom(t *testing.T) { - const n = 1e2 - var ( - i int - r *rand.Rand - ) - reset := func() { - i, r = 0, rand.New(rand.NewSource(0)) - } - gen := func() (string, bool) { - if i == n { - return "", false - } - i++ - return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true - } - testGenerator(t, reset, gen) -} - -func TestBasic(t *testing.T) { - testLiterals(t, []string{ - strings.Repeat("a", 1000), - strings.Repeat("b", 97270), - strings.Repeat("c", 8000), - }) -} - -func TestBoundary(t *testing.T) { - for i := blockSize - 16; i < blockSize+16; i++ { - s0 := big("abcd", i) - for j := blockSize - 16; j < blockSize+16; j++ { - s1 := big("ABCDE", j) - testLiterals(t, []string{s0, s1}) - testLiterals(t, []string{s0, "", s1}) - testLiterals(t, []string{s0, "x", s1}) - } - } -} - -func TestFlush(t *testing.T) { - buf := new(bytes.Buffer) - w := NewWriter(buf) - // Write a couple of records. Everything should still be held - // in the record.Writer buffer, so that buf.Len should be 0. - w0, _ := w.Next() - w0.Write([]byte("0")) - w1, _ := w.Next() - w1.Write([]byte("11")) - if got, want := buf.Len(), 0; got != want { - t.Fatalf("buffer length #0: got %d want %d", got, want) - } - // Flush the record.Writer buffer, which should yield 17 bytes. - // 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #1: got %d want %d", got, want) - } - // Do another write, one that isn't large enough to complete the block. - // The write should not have flowed through to buf. - w2, _ := w.Next() - w2.Write(bytes.Repeat([]byte("2"), 10000)) - if got, want := buf.Len(), 17; got != want { - t.Fatalf("buffer length #2: got %d want %d", got, want) - } - // Flushing should get us up to 10024 bytes written. - // 10024 = 17 + 7 + 10000. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 10024; got != want { - t.Fatalf("buffer length #3: got %d want %d", got, want) - } - // Do a bigger write, one that completes the current block. - // We should now have 32768 bytes (a complete block), without - // an explicit flush. - w3, _ := w.Next() - w3.Write(bytes.Repeat([]byte("3"), 40000)) - if got, want := buf.Len(), 32768; got != want { - t.Fatalf("buffer length #4: got %d want %d", got, want) - } - // Flushing should get us up to 50038 bytes written. - // 50038 = 10024 + 2*7 + 40000. There are two headers because - // the one record was split into two chunks. - if err := w.Flush(); err != nil { - t.Fatal(err) - } - if got, want := buf.Len(), 50038; got != want { - t.Fatalf("buffer length #5: got %d want %d", got, want) - } - // Check that reading those records give the right lengths. - r := NewReader(buf, dropper{t}, true, true) - wants := []int64{1, 2, 10000, 40000} - for i, want := range wants { - rr, _ := r.Next() - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #%d: %v", i, err) - } - if n != want { - t.Fatalf("read #%d: got %d bytes want %d", i, n, want) - } - } -} - -func TestNonExhaustiveRead(t *testing.T) { - const n = 100 - buf := new(bytes.Buffer) - p := make([]byte, 10) - rnd := rand.New(rand.NewSource(1)) - - w := NewWriter(buf) - for i := 0; i < n; i++ { - length := len(p) + rnd.Intn(3*blockSize) - s := string(uint8(i)) + "123456789abcdefgh" - ww, _ := w.Next() - ww.Write([]byte(big(s, length))) - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - for i := 0; i < n; i++ { - rr, _ := r.Next() - _, err := io.ReadFull(rr, p) - if err != nil { - t.Fatal(err) - } - want := string(uint8(i)) + "123456789" - if got := string(p); got != want { - t.Fatalf("read #%d: got %q want %q", i, got, want) - } - } -} - -func TestStaleReader(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w0.Write([]byte("0")) - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1.Write([]byte("11")) - if err := w.Close(); err != nil { - t.Fatal(err) - } - - r := NewReader(buf, dropper{t}, true, true) - r0, err := r.Next() - if err != nil { - t.Fatal(err) - } - r1, err := r.Next() - if err != nil { - t.Fatal(err) - } - p := make([]byte, 1) - if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale read #0: unexpected error: %v", err) - } - if _, err := r1.Read(p); err != nil { - t.Fatalf("fresh read #1: got %v want nil error", err) - } - if p[0] != '1' { - t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0]) - } -} - -func TestStaleWriter(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - w0, err := w.Next() - if err != nil { - t.Fatal(err) - } - w1, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #0: unexpected error: %v", err) - } - if _, err := w1.Write([]byte("11")); err != nil { - t.Fatalf("fresh write #1: got %v want nil error", err) - } - if err := w.Flush(); err != nil { - t.Fatalf("flush: %v", err) - } - if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { - t.Fatalf("stale write #1: unexpected error: %v", err) - } -} - -func TestCorrupt_MissingLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Cut the last block. - b := buf.Bytes()[:blockSize] - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read. - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if n != blockSize-1024 { - t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024) - } - - // Second read. - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedFirstBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #0. - for i := 0; i < 1024; i++ { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (third record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedMiddleBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #1. - for i := 0; i < 1024; i++ { - b[blockSize+i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - // Third read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 2; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_CorruptedLastBlock(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - // Fourth record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { - t.Fatalf("write #3: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting block #3. - for i := len(b) - 1; i > len(b)-1024; i-- { - b[i] = '1' - } - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize - headerSize); n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - // Third read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #2: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #2: got %d bytes want %d", n, want) - } - - // Fourth read (fourth record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #3: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (second record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != io.ErrUnexpectedEOF { - t.Fatalf("read #1: unexpected error: %v", err) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} - -func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) { - buf := new(bytes.Buffer) - - w := NewWriter(buf) - - // First record. - ww, err := w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { - t.Fatalf("write #0: unexpected error: %v", err) - } - - // Second record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { - t.Fatalf("write #1: unexpected error: %v", err) - } - - // Third record. - ww, err = w.Next() - if err != nil { - t.Fatal(err) - } - if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { - t.Fatalf("write #2: unexpected error: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - // Corrupting record #1. - x := blockSize/2 + headerSize - binary.LittleEndian.PutUint16(b[x+4:], 0xffff) - - r := NewReader(bytes.NewReader(b), dropper{t}, false, true) - - // First read (first record). - rr, err := r.Next() - if err != nil { - t.Fatal(err) - } - n, err := io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #0: %v", err) - } - if want := int64(blockSize / 2); n != want { - t.Fatalf("read #0: got %d bytes want %d", n, want) - } - - // Second read (third record). - rr, err = r.Next() - if err != nil { - t.Fatal(err) - } - n, err = io.Copy(ioutil.Discard, rr) - if err != nil { - t.Fatalf("read #1: %v", err) - } - if want := int64(blockSize-headerSize) + 1; n != want { - t.Fatalf("read #1: got %d bytes want %d", n, want) - } - - if _, err := r.Next(); err != io.EOF { - t.Fatalf("last next: unexpected error: %v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go deleted file mode 100644 index 572ae8150..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "encoding/binary" - "fmt" - - "github.com/syndtr/goleveldb/leveldb/errors" -) - -type ErrIkeyCorrupted struct { - Ikey []byte - Reason string -} - -func (e *ErrIkeyCorrupted) Error() string { - return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason) -} - -func newErrIkeyCorrupted(ikey []byte, reason string) error { - return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason}) -} - -type kType int - -func (kt kType) String() string { - switch kt { - case ktDel: - return "d" - case ktVal: - return "v" - } - return "x" -} - -// Value types encoded as the last component of internal keys. -// Don't modify; this value are saved to disk. -const ( - ktDel kType = iota - ktVal -) - -// ktSeek defines the kType that should be passed when constructing an -// internal key for seeking to a particular sequence number (since we -// sort sequence numbers in decreasing order and the value type is -// embedded as the low 8 bits in the sequence number in internal keys, -// we need to use the highest-numbered ValueType, not the lowest). -const ktSeek = ktVal - -const ( - // Maximum value possible for sequence number; the 8-bits are - // used by value type, so its can packed together in single - // 64-bit integer. - kMaxSeq uint64 = (uint64(1) << 56) - 1 - // Maximum value possible for packed sequence number and type. - kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek) -) - -// Maximum number encoded in bytes. -var kMaxNumBytes = make([]byte, 8) - -func init() { - binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum) -} - -type iKey []byte - -func newIkey(ukey []byte, seq uint64, kt kType) iKey { - if seq > kMaxSeq { - panic("leveldb: invalid sequence number") - } else if kt > ktVal { - panic("leveldb: invalid type") - } - - ik := make(iKey, len(ukey)+8) - copy(ik, ukey) - binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt)) - return ik -} - -func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) { - if len(ik) < 8 { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length") - } - num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type") - } - ukey = ik[:len(ik)-8] - return -} - -func validIkey(ik []byte) bool { - _, _, _, err := parseIkey(ik) - return err == nil -} - -func (ik iKey) assert() { - if ik == nil { - panic("leveldb: nil iKey") - } - if len(ik) < 8 { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik))) - } -} - -func (ik iKey) ukey() []byte { - ik.assert() - return ik[:len(ik)-8] -} - -func (ik iKey) num() uint64 { - ik.assert() - return binary.LittleEndian.Uint64(ik[len(ik)-8:]) -} - -func (ik iKey) parseNum() (seq uint64, kt kType) { - num := ik.num() - seq, kt = uint64(num>>8), kType(num&0xff) - if kt > ktVal { - panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) - } - return -} - -func (ik iKey) String() string { - if ik == nil { - return "" - } - - if ukey, seq, kt, err := parseIkey(ik); err == nil { - return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq) - } else { - return "" - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go deleted file mode 100644 index 30eadf784..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -var defaultIComparer = &iComparer{comparer.DefaultComparer} - -func ikey(key string, seq uint64, kt kType) iKey { - return newIkey([]byte(key), uint64(seq), kt) -} - -func shortSep(a, b []byte) []byte { - dst := make([]byte, len(a)) - dst = defaultIComparer.Separator(dst[:0], a, b) - if dst == nil { - return a - } - return dst -} - -func shortSuccessor(b []byte) []byte { - dst := make([]byte, len(b)) - dst = defaultIComparer.Successor(dst[:0], b) - if dst == nil { - return b - } - return dst -} - -func testSingleKey(t *testing.T, key string, seq uint64, kt kType) { - ik := ikey(key, seq, kt) - - if !bytes.Equal(ik.ukey(), []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - - rseq, rt := ik.parseNum() - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - if rt != kt { - t.Errorf("type does not equal, got %v, want %v", rt, kt) - } - - if rukey, rseq, rt, kerr := parseIkey(ik); kerr == nil { - if !bytes.Equal(rukey, []byte(key)) { - t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) - } - if rseq != seq { - t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) - } - if rt != kt { - t.Errorf("type does not equal, got %v, want %v", rt, kt) - } - } else { - t.Errorf("key error: %v", kerr) - } -} - -func TestIkey_EncodeDecode(t *testing.T) { - keys := []string{"", "k", "hello", "longggggggggggggggggggggg"} - seqs := []uint64{ - 1, 2, 3, - (1 << 8) - 1, 1 << 8, (1 << 8) + 1, - (1 << 16) - 1, 1 << 16, (1 << 16) + 1, - (1 << 32) - 1, 1 << 32, (1 << 32) + 1, - } - for _, key := range keys { - for _, seq := range seqs { - testSingleKey(t, key, seq, ktVal) - testSingleKey(t, "hello", 1, ktDel) - } - } -} - -func assertBytes(t *testing.T, want, got []byte) { - if !bytes.Equal(got, want) { - t.Errorf("assert failed, got %v, want %v", got, want) - } -} - -func TestIkeyShortSeparator(t *testing.T) { - // When user keys are same - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 99, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 101, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 100, ktVal))) - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foo", 100, ktDel))) - - // When user keys are misordered - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("bar", 99, ktVal))) - - // When user keys are different, but correctly ordered - assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek), - shortSep(ikey("foo", 100, ktVal), - ikey("hello", 200, ktVal))) - - // When start user key is prefix of limit user key - assertBytes(t, ikey("foo", 100, ktVal), - shortSep(ikey("foo", 100, ktVal), - ikey("foobar", 200, ktVal))) - - // When limit user key is prefix of start user key - assertBytes(t, ikey("foobar", 100, ktVal), - shortSep(ikey("foobar", 100, ktVal), - ikey("foo", 200, ktVal))) -} - -func TestIkeyShortestSuccessor(t *testing.T) { - assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek), - shortSuccessor(ikey("foo", 100, ktVal))) - assertBytes(t, ikey("\xff\xff", 100, ktVal), - shortSuccessor(ikey("\xff\xff", 100, ktVal))) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go deleted file mode 100644 index fefa007a7..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package leveldb - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestLevelDB(t *testing.T) { - testutil.RunSuite(t, "LevelDB Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go deleted file mode 100644 index b05084caa..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - "encoding/binary" - "math/rand" - "testing" - - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -func BenchmarkPut(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkPutRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int())) - } - - b.ResetTimer() - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } -} - -func BenchmarkGet(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := range buf { - p.Get(buf[i][:]) - } -} - -func BenchmarkGetRandom(b *testing.B) { - buf := make([][4]byte, b.N) - for i := range buf { - binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) - } - - p := New(comparer.DefaultComparer, 0) - for i := range buf { - p.Put(buf[i][:], nil) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - p.Get(buf[rand.Int()%b.N][:]) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go deleted file mode 100644 index e5398873b..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package memdb provides in-memory key/value database implementation. -package memdb - -import ( - "math/rand" - "sync" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = errors.ErrNotFound - ErrIterReleased = errors.New("leveldb/memdb: iterator released") -) - -const tMaxHeight = 12 - -type dbIter struct { - util.BasicReleaser - p *DB - slice *util.Range - node int - forward bool - key, value []byte - err error -} - -func (i *dbIter) fill(checkStart, checkLimit bool) bool { - if i.node != 0 { - n := i.p.nodeData[i.node] - m := n + i.p.nodeData[i.node+nKey] - i.key = i.p.kvData[n:m] - if i.slice != nil { - switch { - case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0: - fallthrough - case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0: - i.node = 0 - goto bail - } - } - i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]] - return true - } -bail: - i.key = nil - i.value = nil - return false -} - -func (i *dbIter) Valid() bool { - return i.node != 0 -} - -func (i *dbIter) First() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil { - i.node, _ = i.p.findGE(i.slice.Start, false) - } else { - i.node = i.p.nodeData[nNext] - } - return i.fill(false, true) -} - -func (i *dbIter) Last() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Limit != nil { - i.node = i.p.findLT(i.slice.Limit) - } else { - i.node = i.p.findLast() - } - return i.fill(true, false) -} - -func (i *dbIter) Seek(key []byte) bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 { - key = i.slice.Start - } - i.node, _ = i.p.findGE(key, false) - return i.fill(false, true) -} - -func (i *dbIter) Next() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.node == 0 { - if !i.forward { - return i.First() - } - return false - } - i.forward = true - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.nodeData[i.node+nNext] - return i.fill(false, true) -} - -func (i *dbIter) Prev() bool { - if i.Released() { - i.err = ErrIterReleased - return false - } - - if i.node == 0 { - if i.forward { - return i.Last() - } - return false - } - i.forward = false - i.p.mu.RLock() - defer i.p.mu.RUnlock() - i.node = i.p.findLT(i.key) - return i.fill(true, false) -} - -func (i *dbIter) Key() []byte { - return i.key -} - -func (i *dbIter) Value() []byte { - return i.value -} - -func (i *dbIter) Error() error { return i.err } - -func (i *dbIter) Release() { - if !i.Released() { - i.p = nil - i.node = 0 - i.key = nil - i.value = nil - i.BasicReleaser.Release() - } -} - -const ( - nKV = iota - nKey - nVal - nHeight - nNext -) - -// DB is an in-memory key/value database. -type DB struct { - cmp comparer.BasicComparer - rnd *rand.Rand - - mu sync.RWMutex - kvData []byte - // Node data: - // [0] : KV offset - // [1] : Key length - // [2] : Value length - // [3] : Height - // [3..height] : Next nodes - nodeData []int - prevNode [tMaxHeight]int - maxHeight int - n int - kvSize int -} - -func (p *DB) randHeight() (h int) { - const branching = 4 - h = 1 - for h < tMaxHeight && p.rnd.Int()%branching == 0 { - h++ - } - return -} - -func (p *DB) findGE(key []byte, prev bool) (int, bool) { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - cmp := 1 - if next != 0 { - o := p.nodeData[next] - cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) - } - if cmp < 0 { - // Keep searching in this list - node = next - } else { - if prev { - p.prevNode[h] = node - } else if cmp == 0 { - return next, true - } - if h == 0 { - return next, cmp == 0 - } - h-- - } - } -} - -func (p *DB) findLT(key []byte) int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - o := p.nodeData[next] - if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -func (p *DB) findLast() int { - node := 0 - h := p.maxHeight - 1 - for { - next := p.nodeData[node+nNext+h] - if next == 0 { - if h == 0 { - break - } - h-- - } else { - node = next - } - } - return node -} - -// Put sets the value for the given key. It overwrites any previous value -// for that key; a DB is not a multi-map. -// -// It is safe to modify the contents of the arguments after Put returns. -func (p *DB) Put(key []byte, value []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - if node, exact := p.findGE(key, true); exact { - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - p.nodeData[node] = kvOffset - m := p.nodeData[node+nVal] - p.nodeData[node+nVal] = len(value) - p.kvSize += len(value) - m - return nil - } - - h := p.randHeight() - if h > p.maxHeight { - for i := p.maxHeight; i < h; i++ { - p.prevNode[i] = 0 - } - p.maxHeight = h - } - - kvOffset := len(p.kvData) - p.kvData = append(p.kvData, key...) - p.kvData = append(p.kvData, value...) - // Node - node := len(p.nodeData) - p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h) - for i, n := range p.prevNode[:h] { - m := n + 4 + i - p.nodeData = append(p.nodeData, p.nodeData[m]) - p.nodeData[m] = node - } - - p.kvSize += len(key) + len(value) - p.n++ - return nil -} - -// Delete deletes the value for the given key. It returns ErrNotFound if -// the DB does not contain the key. -// -// It is safe to modify the contents of the arguments after Delete returns. -func (p *DB) Delete(key []byte) error { - p.mu.Lock() - defer p.mu.Unlock() - - node, exact := p.findGE(key, true) - if !exact { - return ErrNotFound - } - - h := p.nodeData[node+nHeight] - for i, n := range p.prevNode[:h] { - m := n + 4 + i - p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i] - } - - p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal] - p.n-- - return nil -} - -// Contains returns true if the given key are in the DB. -// -// It is safe to modify the contents of the arguments after Contains returns. -func (p *DB) Contains(key []byte) bool { - p.mu.RLock() - _, exact := p.findGE(key, false) - p.mu.RUnlock() - return exact -} - -// Get gets the value for the given key. It returns error.ErrNotFound if the -// DB does not contain the key. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Get returns. -func (p *DB) Get(key []byte) (value []byte, err error) { - p.mu.RLock() - if node, exact := p.findGE(key, false); exact { - o := p.nodeData[node] + p.nodeData[node+nKey] - value = p.kvData[o : o+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// -// The caller should not modify the contents of the returned slice, but -// it is safe to modify the contents of the argument after Find returns. -func (p *DB) Find(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node, _ := p.findGE(key, false); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -// NewIterator returns an iterator of the DB. -// The returned iterator is not goroutine-safe, but it is safe to use -// multiple iterators concurrently, with each in a dedicated goroutine. -// It is also safe to use an iterator concurrently with modifying its -// underlying DB. However, the resultant key/value pairs are not guaranteed -// to be a consistent snapshot of the DB at a particular point in time. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// DB. And a nil Range.Limit is treated as a key after all keys in -// the DB. -// -// The iterator must be released after use, by calling Release method. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (p *DB) NewIterator(slice *util.Range) iterator.Iterator { - return &dbIter{p: p, slice: slice} -} - -// Capacity returns keys/values buffer capacity. -func (p *DB) Capacity() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) -} - -// Size returns sum of keys and values length. Note that deleted -// key/value will not be accouted for, but it will still consume -// the buffer, since the buffer is append only. -func (p *DB) Size() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.kvSize -} - -// Free returns keys/values free buffer before need to grow. -func (p *DB) Free() int { - p.mu.RLock() - defer p.mu.RUnlock() - return cap(p.kvData) - len(p.kvData) -} - -// Len returns the number of entries in the DB. -func (p *DB) Len() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.n -} - -// Reset resets the DB to initial empty state. Allows reuse the buffer. -func (p *DB) Reset() { - p.rnd = rand.New(rand.NewSource(0xdeadbeef)) - p.maxHeight = 1 - p.n = 0 - p.kvSize = 0 - p.kvData = p.kvData[:0] - p.nodeData = p.nodeData[:4+tMaxHeight] - p.nodeData[nKV] = 0 - p.nodeData[nKey] = 0 - p.nodeData[nVal] = 0 - p.nodeData[nHeight] = tMaxHeight - for n := 0; n < tMaxHeight; n++ { - p.nodeData[4+n] = 0 - p.prevNode[n] = 0 - } -} - -// New creates a new initalized in-memory key/value DB. The capacity -// is the initial key/value buffer capacity. The capacity is advisory, -// not enforced. -// -// The returned DB instance is goroutine-safe. -func New(cmp comparer.BasicComparer, capacity int) *DB { - p := &DB{ - cmp: cmp, - rnd: rand.New(rand.NewSource(0xdeadbeef)), - maxHeight: 1, - kvData: make([]byte, 0, capacity), - nodeData: make([]int, 4+tMaxHeight), - } - p.nodeData[nHeight] = tMaxHeight - return p -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go deleted file mode 100644 index 18c304b7f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package memdb - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestMemDB(t *testing.T) { - testutil.RunSuite(t, "MemDB Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go deleted file mode 100644 index 5dd6dbc7b..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package memdb - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLT(key); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestFindLast() (rkey, value []byte, err error) { - p.mu.RLock() - if node := p.findLast(); node != 0 { - n := p.nodeData[node] - m := n + p.nodeData[node+nKey] - rkey = p.kvData[n:m] - value = p.kvData[m : m+p.nodeData[node+nVal]] - } else { - err = ErrNotFound - } - p.mu.RUnlock() - return -} - -func (p *DB) TestPut(key []byte, value []byte) error { - p.Put(key, value) - return nil -} - -func (p *DB) TestDelete(key []byte) error { - p.Delete(key) - return nil -} - -func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return p.Find(key) -} - -func (p *DB) TestGet(key []byte) (value []byte, err error) { - return p.Get(key) -} - -func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator { - return p.NewIterator(slice) -} - -var _ = testutil.Defer(func() { - Describe("Memdb", func() { - Describe("write test", func() { - It("should do write correctly", func() { - db := New(comparer.DefaultComparer, 0) - t := testutil.DBTesting{ - DB: db, - Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 30, 5, 5).Clone(), - PostFn: func(t *testutil.DBTesting) { - Expect(db.Len()).Should(Equal(t.Present.Len())) - Expect(db.Size()).Should(Equal(t.Present.Size())) - switch t.Act { - case testutil.DBPut, testutil.DBOverwrite: - Expect(db.Contains(t.ActKey)).Should(BeTrue()) - default: - Expect(db.Contains(t.ActKey)).Should(BeFalse()) - } - }, - } - testutil.DoDBTesting(&t) - }) - }) - - Describe("read test", func() { - testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB { - // Building the DB. - db := New(comparer.DefaultComparer, 0) - kv.IterateShuffled(nil, func(i int, key, value []byte) { - db.Put(key, value) - }) - - if kv.Len() > 1 { - It("Should find correct keys with findLT", func() { - testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) { - key_, key, _ := kv.IndexInexact(i + 1) - expectedKey, expectedValue := kv.Index(i) - - // Using key that exist. - rkey, rvalue, err := db.TestFindLT(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey) - Expect(rkey).Should(Equal(expectedKey), "Key") - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey) - - // Using key that doesn't exist. - rkey, rvalue, err = db.TestFindLT(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey) - Expect(rkey).Should(Equal(expectedKey)) - Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey) - }) - }) - } - - if kv.Len() > 0 { - It("Should find last key with findLast", func() { - key, value := kv.Index(kv.Len() - 1) - rkey, rvalue, err := db.TestFindLast() - Expect(err).ShouldNot(HaveOccurred()) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value)) - }) - } - - return db - }, nil, nil) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go deleted file mode 100644 index 86828f47b..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go +++ /dev/null @@ -1,624 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package opt provides sets of options used by LevelDB. -package opt - -import ( - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/filter" - "math" -) - -const ( - KiB = 1024 - MiB = KiB * 1024 - GiB = MiB * 1024 -) - -var ( - DefaultBlockCacher = LRUCacher - DefaultBlockCacheCapacity = 8 * MiB - DefaultBlockRestartInterval = 16 - DefaultBlockSize = 4 * KiB - DefaultCompactionExpandLimitFactor = 25 - DefaultCompactionGPOverlapsFactor = 10 - DefaultCompactionL0Trigger = 4 - DefaultCompactionSourceLimitFactor = 1 - DefaultCompactionTableSize = 2 * MiB - DefaultCompactionTableSizeMultiplier = 1.0 - DefaultCompactionTotalSize = 10 * MiB - DefaultCompactionTotalSizeMultiplier = 10.0 - DefaultCompressionType = SnappyCompression - DefaultOpenFilesCacher = LRUCacher - DefaultOpenFilesCacheCapacity = 500 - DefaultMaxMemCompationLevel = 2 - DefaultNumLevel = 7 - DefaultWriteBuffer = 4 * MiB - DefaultWriteL0PauseTrigger = 12 - DefaultWriteL0SlowdownTrigger = 8 -) - -// Cacher is a caching algorithm. -type Cacher interface { - New(capacity int) cache.Cacher -} - -type CacherFunc struct { - NewFunc func(capacity int) cache.Cacher -} - -func (f *CacherFunc) New(capacity int) cache.Cacher { - if f.NewFunc != nil { - return f.NewFunc(capacity) - } - return nil -} - -func noCacher(int) cache.Cacher { return nil } - -var ( - // LRUCacher is the LRU-cache algorithm. - LRUCacher = &CacherFunc{cache.NewLRU} - - // NoCacher is the value to disable caching algorithm. - NoCacher = &CacherFunc{} -) - -// Compression is the 'sorted table' block compression algorithm to use. -type Compression uint - -func (c Compression) String() string { - switch c { - case DefaultCompression: - return "default" - case NoCompression: - return "none" - case SnappyCompression: - return "snappy" - } - return "invalid" -} - -const ( - DefaultCompression Compression = iota - NoCompression - SnappyCompression - nCompression -) - -// Strict is the DB 'strict level'. -type Strict uint - -const ( - // If present then a corrupted or invalid chunk or block in manifest - // journal will cause an error instead of being dropped. - // This will prevent database with corrupted manifest to be opened. - StrictManifest Strict = 1 << iota - - // If present then journal chunk checksum will be verified. - StrictJournalChecksum - - // If present then a corrupted or invalid chunk or block in journal - // will cause an error instead of being dropped. - // This will prevent database with corrupted journal to be opened. - StrictJournal - - // If present then 'sorted table' block checksum will be verified. - // This has effect on both 'read operation' and compaction. - StrictBlockChecksum - - // If present then a corrupted 'sorted table' will fails compaction. - // The database will enter read-only mode. - StrictCompaction - - // If present then a corrupted 'sorted table' will halts 'read operation'. - StrictReader - - // If present then leveldb.Recover will drop corrupted 'sorted table'. - StrictRecovery - - // This only applicable for ReadOptions, if present then this ReadOptions - // 'strict level' will override global ones. - StrictOverride - - // StrictAll enables all strict flags. - StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery - - // DefaultStrict is the default strict flags. Specify any strict flags - // will override default strict flags as whole (i.e. not OR'ed). - DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader - - // NoStrict disables all strict flags. Override default strict flags. - NoStrict = ^StrictAll -) - -// Options holds the optional parameters for the DB at large. -type Options struct { - // AltFilters defines one or more 'alternative filters'. - // 'alternative filters' will be used during reads if a filter block - // does not match with the 'effective filter'. - // - // The default value is nil - AltFilters []filter.Filter - - // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching. - // Specify NoCacher to disable caching algorithm. - // - // The default value is LRUCacher. - BlockCacher Cacher - - // BlockCacheCapacity defines the capacity of the 'sorted table' block caching. - // Use -1 for zero, this has same effect with specifying NoCacher to BlockCacher. - // - // The default value is 8MiB. - BlockCacheCapacity int - - // BlockRestartInterval is the number of keys between restart points for - // delta encoding of keys. - // - // The default value is 16. - BlockRestartInterval int - - // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' - // block. - // - // The default value is 4KiB. - BlockSize int - - // CompactionExpandLimitFactor limits compaction size after expanded. - // This will be multiplied by table size limit at compaction target level. - // - // The default value is 25. - CompactionExpandLimitFactor int - - // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a - // single 'sorted table' generates. - // This will be multiplied by table size limit at grandparent level. - // - // The default value is 10. - CompactionGPOverlapsFactor int - - // CompactionL0Trigger defines number of 'sorted table' at level-0 that will - // trigger compaction. - // - // The default value is 4. - CompactionL0Trigger int - - // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to - // level-0. - // This will be multiplied by table size limit at compaction target level. - // - // The default value is 1. - CompactionSourceLimitFactor int - - // CompactionTableSize limits size of 'sorted table' that compaction generates. - // The limits for each level will be calculated as: - // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level) - // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel. - // - // The default value is 2MiB. - CompactionTableSize int - - // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize. - // - // The default value is 1. - CompactionTableSizeMultiplier float64 - - // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for - // CompactionTableSize. - // Use zero to skip a level. - // - // The default value is nil. - CompactionTableSizeMultiplierPerLevel []float64 - - // CompactionTotalSize limits total size of 'sorted table' for each level. - // The limits for each level will be calculated as: - // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level) - // The multiplier for each level can also fine-tuned using - // CompactionTotalSizeMultiplierPerLevel. - // - // The default value is 10MiB. - CompactionTotalSize int - - // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize. - // - // The default value is 10. - CompactionTotalSizeMultiplier float64 - - // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for - // CompactionTotalSize. - // Use zero to skip a level. - // - // The default value is nil. - CompactionTotalSizeMultiplierPerLevel []float64 - - // Comparer defines a total ordering over the space of []byte keys: a 'less - // than' relationship. The same comparison algorithm must be used for reads - // and writes over the lifetime of the DB. - // - // The default value uses the same ordering as bytes.Compare. - Comparer comparer.Comparer - - // Compression defines the 'sorted table' block compression to use. - // - // The default value (DefaultCompression) uses snappy compression. - Compression Compression - - // DisableBlockCache allows disable use of cache.Cache functionality on - // 'sorted table' block. - // - // The default value is false. - DisableBlockCache bool - - // DisableCompactionBackoff allows disable compaction retry backoff. - // - // The default value is false. - DisableCompactionBackoff bool - - // ErrorIfExist defines whether an error should returned if the DB already - // exist. - // - // The default value is false. - ErrorIfExist bool - - // ErrorIfMissing defines whether an error should returned if the DB is - // missing. If false then the database will be created if missing, otherwise - // an error will be returned. - // - // The default value is false. - ErrorIfMissing bool - - // Filter defines an 'effective filter' to use. An 'effective filter' - // if defined will be used to generate per-table filter block. - // The filter name will be stored on disk. - // During reads LevelDB will try to find matching filter from - // 'effective filter' and 'alternative filters'. - // - // Filter can be changed after a DB has been created. It is recommended - // to put old filter to the 'alternative filters' to mitigate lack of - // filter during transition period. - // - // A filter is used to reduce disk reads when looking for a specific key. - // - // The default value is nil. - Filter filter.Filter - - // MaxMemCompationLevel defines maximum level a newly compacted 'memdb' - // will be pushed into if doesn't creates overlap. This should less than - // NumLevel. Use -1 for level-0. - // - // The default is 2. - MaxMemCompationLevel int - - // NumLevel defines number of database level. The level shouldn't changed - // between opens, or the database will panic. - // - // The default is 7. - NumLevel int - - // OpenFilesCacher provides cache algorithm for open files caching. - // Specify NoCacher to disable caching algorithm. - // - // The default value is LRUCacher. - OpenFilesCacher Cacher - - // OpenFilesCacheCapacity defines the capacity of the open files caching. - // Use -1 for zero, this has same effect with specifying NoCacher to OpenFilesCacher. - // - // The default value is 500. - OpenFilesCacheCapacity int - - // Strict defines the DB strict level. - Strict Strict - - // WriteBuffer defines maximum size of a 'memdb' before flushed to - // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk - // unsorted journal. - // - // LevelDB may held up to two 'memdb' at the same time. - // - // The default value is 4MiB. - WriteBuffer int - - // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will - // pause write. - // - // The default value is 12. - WriteL0PauseTrigger int - - // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that - // will trigger write slowdown. - // - // The default value is 8. - WriteL0SlowdownTrigger int -} - -func (o *Options) GetAltFilters() []filter.Filter { - if o == nil { - return nil - } - return o.AltFilters -} - -func (o *Options) GetBlockCacher() Cacher { - if o == nil || o.BlockCacher == nil { - return DefaultBlockCacher - } else if o.BlockCacher == NoCacher { - return nil - } - return o.BlockCacher -} - -func (o *Options) GetBlockCacheCapacity() int { - if o == nil || o.BlockCacheCapacity <= 0 { - return DefaultBlockCacheCapacity - } else if o.BlockCacheCapacity == -1 { - return 0 - } - return o.BlockCacheCapacity -} - -func (o *Options) GetBlockRestartInterval() int { - if o == nil || o.BlockRestartInterval <= 0 { - return DefaultBlockRestartInterval - } - return o.BlockRestartInterval -} - -func (o *Options) GetBlockSize() int { - if o == nil || o.BlockSize <= 0 { - return DefaultBlockSize - } - return o.BlockSize -} - -func (o *Options) GetCompactionExpandLimit(level int) int { - factor := DefaultCompactionExpandLimitFactor - if o != nil && o.CompactionExpandLimitFactor > 0 { - factor = o.CompactionExpandLimitFactor - } - return o.GetCompactionTableSize(level+1) * factor -} - -func (o *Options) GetCompactionGPOverlaps(level int) int { - factor := DefaultCompactionGPOverlapsFactor - if o != nil && o.CompactionGPOverlapsFactor > 0 { - factor = o.CompactionGPOverlapsFactor - } - return o.GetCompactionTableSize(level+2) * factor -} - -func (o *Options) GetCompactionL0Trigger() int { - if o == nil || o.CompactionL0Trigger == 0 { - return DefaultCompactionL0Trigger - } - return o.CompactionL0Trigger -} - -func (o *Options) GetCompactionSourceLimit(level int) int { - factor := DefaultCompactionSourceLimitFactor - if o != nil && o.CompactionSourceLimitFactor > 0 { - factor = o.CompactionSourceLimitFactor - } - return o.GetCompactionTableSize(level+1) * factor -} - -func (o *Options) GetCompactionTableSize(level int) int { - var ( - base = DefaultCompactionTableSize - mult float64 - ) - if o != nil { - if o.CompactionTableSize > 0 { - base = o.CompactionTableSize - } - if len(o.CompactionTableSizeMultiplierPerLevel) > level && o.CompactionTableSizeMultiplierPerLevel[level] > 0 { - mult = o.CompactionTableSizeMultiplierPerLevel[level] - } else if o.CompactionTableSizeMultiplier > 0 { - mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level)) - } - } - if mult == 0 { - mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level)) - } - return int(float64(base) * mult) -} - -func (o *Options) GetCompactionTotalSize(level int) int64 { - var ( - base = DefaultCompactionTotalSize - mult float64 - ) - if o != nil { - if o.CompactionTotalSize > 0 { - base = o.CompactionTotalSize - } - if len(o.CompactionTotalSizeMultiplierPerLevel) > level && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 { - mult = o.CompactionTotalSizeMultiplierPerLevel[level] - } else if o.CompactionTotalSizeMultiplier > 0 { - mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level)) - } - } - if mult == 0 { - mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level)) - } - return int64(float64(base) * mult) -} - -func (o *Options) GetComparer() comparer.Comparer { - if o == nil || o.Comparer == nil { - return comparer.DefaultComparer - } - return o.Comparer -} - -func (o *Options) GetCompression() Compression { - if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression { - return DefaultCompressionType - } - return o.Compression -} - -func (o *Options) GetDisableCompactionBackoff() bool { - if o == nil { - return false - } - return o.DisableCompactionBackoff -} - -func (o *Options) GetErrorIfExist() bool { - if o == nil { - return false - } - return o.ErrorIfExist -} - -func (o *Options) GetErrorIfMissing() bool { - if o == nil { - return false - } - return o.ErrorIfMissing -} - -func (o *Options) GetFilter() filter.Filter { - if o == nil { - return nil - } - return o.Filter -} - -func (o *Options) GetMaxMemCompationLevel() int { - level := DefaultMaxMemCompationLevel - if o != nil { - if o.MaxMemCompationLevel > 0 { - level = o.MaxMemCompationLevel - } else if o.MaxMemCompationLevel == -1 { - level = 0 - } - } - if level >= o.GetNumLevel() { - return o.GetNumLevel() - 1 - } - return level -} - -func (o *Options) GetNumLevel() int { - if o == nil || o.NumLevel <= 0 { - return DefaultNumLevel - } - return o.NumLevel -} - -func (o *Options) GetOpenFilesCacher() Cacher { - if o == nil || o.OpenFilesCacher == nil { - return DefaultOpenFilesCacher - } - if o.OpenFilesCacher == NoCacher { - return nil - } - return o.OpenFilesCacher -} - -func (o *Options) GetOpenFilesCacheCapacity() int { - if o == nil || o.OpenFilesCacheCapacity <= 0 { - return DefaultOpenFilesCacheCapacity - } else if o.OpenFilesCacheCapacity == -1 { - return 0 - } - return o.OpenFilesCacheCapacity -} - -func (o *Options) GetStrict(strict Strict) bool { - if o == nil || o.Strict == 0 { - return DefaultStrict&strict != 0 - } - return o.Strict&strict != 0 -} - -func (o *Options) GetWriteBuffer() int { - if o == nil || o.WriteBuffer <= 0 { - return DefaultWriteBuffer - } - return o.WriteBuffer -} - -func (o *Options) GetWriteL0PauseTrigger() int { - if o == nil || o.WriteL0PauseTrigger == 0 { - return DefaultWriteL0PauseTrigger - } - return o.WriteL0PauseTrigger -} - -func (o *Options) GetWriteL0SlowdownTrigger() int { - if o == nil || o.WriteL0SlowdownTrigger == 0 { - return DefaultWriteL0SlowdownTrigger - } - return o.WriteL0SlowdownTrigger -} - -// ReadOptions holds the optional parameters for 'read operation'. The -// 'read operation' includes Get, Find and NewIterator. -type ReadOptions struct { - // DontFillCache defines whether block reads for this 'read operation' - // should be cached. If false then the block will be cached. This does - // not affects already cached block. - // - // The default value is false. - DontFillCache bool - - // Strict will be OR'ed with global DB 'strict level' unless StrictOverride - // is present. Currently only StrictReader that has effect here. - Strict Strict -} - -func (ro *ReadOptions) GetDontFillCache() bool { - if ro == nil { - return false - } - return ro.DontFillCache -} - -func (ro *ReadOptions) GetStrict(strict Strict) bool { - if ro == nil { - return false - } - return ro.Strict&strict != 0 -} - -// WriteOptions holds the optional parameters for 'write operation'. The -// 'write operation' includes Write, Put and Delete. -type WriteOptions struct { - // Sync is whether to sync underlying writes from the OS buffer cache - // through to actual disk, if applicable. Setting Sync can result in - // slower writes. - // - // If false, and the machine crashes, then some recent writes may be lost. - // Note that if it is just the process that crashes (and the machine does - // not) then no writes will be lost. - // - // In other words, Sync being false has the same semantics as a write - // system call. Sync being true means write followed by fsync. - // - // The default value is false. - Sync bool -} - -func (wo *WriteOptions) GetSync() bool { - if wo == nil { - return false - } - return wo.Sync -} - -func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool { - if ro.GetStrict(StrictOverride) { - return ro.GetStrict(strict) - } else { - return o.GetStrict(strict) || ro.GetStrict(strict) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go deleted file mode 100644 index a3d84ef60..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" -) - -func dupOptions(o *opt.Options) *opt.Options { - newo := &opt.Options{} - if o != nil { - *newo = *o - } - if newo.Strict == 0 { - newo.Strict = opt.DefaultStrict - } - return newo -} - -func (s *session) setOptions(o *opt.Options) { - no := dupOptions(o) - // Alternative filters. - if filters := o.GetAltFilters(); len(filters) > 0 { - no.AltFilters = make([]filter.Filter, len(filters)) - for i, filter := range filters { - no.AltFilters[i] = &iFilter{filter} - } - } - // Comparer. - s.icmp = &iComparer{o.GetComparer()} - no.Comparer = s.icmp - // Filter. - if filter := o.GetFilter(); filter != nil { - no.Filter = &iFilter{filter} - } - - s.o = &cachedOptions{Options: no} - s.o.cache() -} - -type cachedOptions struct { - *opt.Options - - compactionExpandLimit []int - compactionGPOverlaps []int - compactionSourceLimit []int - compactionTableSize []int - compactionTotalSize []int64 -} - -func (co *cachedOptions) cache() { - numLevel := co.Options.GetNumLevel() - - co.compactionExpandLimit = make([]int, numLevel) - co.compactionGPOverlaps = make([]int, numLevel) - co.compactionSourceLimit = make([]int, numLevel) - co.compactionTableSize = make([]int, numLevel) - co.compactionTotalSize = make([]int64, numLevel) - - for level := 0; level < numLevel; level++ { - co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level) - co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level) - co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level) - co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level) - co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level) - } -} - -func (co *cachedOptions) GetCompactionExpandLimit(level int) int { - return co.compactionExpandLimit[level] -} - -func (co *cachedOptions) GetCompactionGPOverlaps(level int) int { - return co.compactionGPOverlaps[level] -} - -func (co *cachedOptions) GetCompactionSourceLimit(level int) int { - return co.compactionSourceLimit[level] -} - -func (co *cachedOptions) GetCompactionTableSize(level int) int { - return co.compactionTableSize[level] -} - -func (co *cachedOptions) GetCompactionTotalSize(level int) int64 { - return co.compactionTotalSize[level] -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go deleted file mode 100644 index b3906f7fc..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "io" - "os" - "sync" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type ErrManifestCorrupted struct { - Field string - Reason string -} - -func (e *ErrManifestCorrupted) Error() string { - return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason) -} - -func newErrManifestCorrupted(f storage.File, field, reason string) error { - return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason}) -} - -// session represent a persistent database session. -type session struct { - // Need 64-bit alignment. - stNextFileNum uint64 // current unused file number - stJournalNum uint64 // current journal file number; need external synchronization - stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb - stSeqNum uint64 // last mem compacted seq; need external synchronization - stTempFileNum uint64 - - stor storage.Storage - storLock util.Releaser - o *cachedOptions - icmp *iComparer - tops *tOps - - manifest *journal.Writer - manifestWriter storage.Writer - manifestFile storage.File - - stCompPtrs []iKey // compaction pointers; need external synchronization - stVersion *version // current version - vmu sync.Mutex -} - -// Creates new initialized session instance. -func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) { - if stor == nil { - return nil, os.ErrInvalid - } - storLock, err := stor.Lock() - if err != nil { - return - } - s = &session{ - stor: stor, - storLock: storLock, - stCompPtrs: make([]iKey, o.GetNumLevel()), - } - s.setOptions(o) - s.tops = newTableOps(s) - s.setVersion(newVersion(s)) - s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed") - return -} - -// Close session. -func (s *session) close() { - s.tops.close() - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - s.manifest = nil - s.manifestWriter = nil - s.manifestFile = nil - s.stVersion = nil -} - -// Release session lock. -func (s *session) release() { - s.storLock.Release() -} - -// Create a new database session; need external synchronization. -func (s *session) create() error { - // create manifest - return s.newManifest(nil, nil) -} - -// Recover a database session; need external synchronization. -func (s *session) recover() (err error) { - defer func() { - if os.IsNotExist(err) { - // Don't return os.ErrNotExist if the underlying storage contains - // other files that belong to LevelDB. So the DB won't get trashed. - if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 { - err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}} - } - } - }() - - m, err := s.stor.GetManifest() - if err != nil { - return - } - - reader, err := m.Open() - if err != nil { - return - } - defer reader.Close() - strict := s.o.GetStrict(opt.StrictManifest) - jr := journal.NewReader(reader, dropper{s, m}, strict, true) - - staging := s.stVersion.newStaging() - rec := &sessionRecord{numLevel: s.o.GetNumLevel()} - for { - var r io.Reader - r, err = jr.Next() - if err != nil { - if err == io.EOF { - err = nil - break - } - return errors.SetFile(err, m) - } - - err = rec.decode(r) - if err == nil { - // save compact pointers - for _, r := range rec.compPtrs { - s.stCompPtrs[r.level] = iKey(r.ikey) - } - // commit record to version staging - staging.commit(rec) - } else { - err = errors.SetFile(err, m) - if strict || !errors.IsCorrupted(err) { - return - } else { - s.logf("manifest error: %v (skipped)", errors.SetFile(err, m)) - } - } - rec.resetCompPtrs() - rec.resetAddedTables() - rec.resetDeletedTables() - } - - switch { - case !rec.has(recComparer): - return newErrManifestCorrupted(m, "comparer", "missing") - case rec.comparer != s.icmp.uName(): - return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer)) - case !rec.has(recNextFileNum): - return newErrManifestCorrupted(m, "next-file-num", "missing") - case !rec.has(recJournalNum): - return newErrManifestCorrupted(m, "journal-file-num", "missing") - case !rec.has(recSeqNum): - return newErrManifestCorrupted(m, "seq-num", "missing") - } - - s.manifestFile = m - s.setVersion(staging.finish()) - s.setNextFileNum(rec.nextFileNum) - s.recordCommited(rec) - return nil -} - -// Commit session; need external synchronization. -func (s *session) commit(r *sessionRecord) (err error) { - v := s.version() - defer v.release() - - // spawn new version based on current version - nv := v.spawn(r) - - if s.manifest == nil { - // manifest journal writer not yet created, create one - err = s.newManifest(r, nv) - } else { - err = s.flushManifest(r) - } - - // finally, apply new version if no error rise - if err == nil { - s.setVersion(nv) - } - - return -} - -// Pick a compaction based on current state; need external synchronization. -func (s *session) pickCompaction() *compaction { - v := s.version() - - var level int - var t0 tFiles - if v.cScore >= 1 { - level = v.cLevel - cptr := s.stCompPtrs[level] - tables := v.tables[level] - for _, t := range tables { - if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 { - t0 = append(t0, t) - break - } - } - if len(t0) == 0 { - t0 = append(t0, tables[0]) - } - } else { - if p := atomic.LoadPointer(&v.cSeek); p != nil { - ts := (*tSet)(p) - level = ts.level - t0 = append(t0, ts.table) - } else { - v.release() - return nil - } - } - - return newCompaction(s, v, level, t0) -} - -// Create compaction from given level and range; need external synchronization. -func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction { - v := s.version() - - t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0) - if len(t0) == 0 { - v.release() - return nil - } - - // Avoid compacting too much in one shot in case the range is large. - // But we cannot do this for level-0 since level-0 files can overlap - // and we must not pick one file and drop another older file if the - // two files overlap. - if level > 0 { - limit := uint64(v.s.o.GetCompactionSourceLimit(level)) - total := uint64(0) - for i, t := range t0 { - total += t.size - if total >= limit { - s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1) - t0 = t0[:i+1] - break - } - } - } - - return newCompaction(s, v, level, t0) -} - -func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction { - c := &compaction{ - s: s, - v: v, - level: level, - tables: [2]tFiles{t0, nil}, - maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)), - tPtrs: make([]int, s.o.GetNumLevel()), - } - c.expand() - c.save() - return c -} - -// compaction represent a compaction state. -type compaction struct { - s *session - v *version - - level int - tables [2]tFiles - maxGPOverlaps uint64 - - gp tFiles - gpi int - seenKey bool - gpOverlappedBytes uint64 - imin, imax iKey - tPtrs []int - released bool - - snapGPI int - snapSeenKey bool - snapGPOverlappedBytes uint64 - snapTPtrs []int -} - -func (c *compaction) save() { - c.snapGPI = c.gpi - c.snapSeenKey = c.seenKey - c.snapGPOverlappedBytes = c.gpOverlappedBytes - c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...) -} - -func (c *compaction) restore() { - c.gpi = c.snapGPI - c.seenKey = c.snapSeenKey - c.gpOverlappedBytes = c.snapGPOverlappedBytes - c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...) -} - -func (c *compaction) release() { - if !c.released { - c.released = true - c.v.release() - } -} - -// Expand compacted tables; need external synchronization. -func (c *compaction) expand() { - limit := uint64(c.s.o.GetCompactionExpandLimit(c.level)) - vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1] - - t0, t1 := c.tables[0], c.tables[1] - imin, imax := t0.getRange(c.s.icmp) - // We expand t0 here just incase ukey hop across tables. - t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0) - if len(t0) != len(c.tables[0]) { - imin, imax = t0.getRange(c.s.icmp) - } - t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false) - // Get entire range covered by compaction. - amin, amax := append(t0, t1...).getRange(c.s.icmp) - - // See if we can grow the number of inputs in "level" without - // changing the number of "level+1" files we pick up. - if len(t1) > 0 { - exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0) - if len(exp0) > len(t0) && t1.size()+exp0.size() < limit { - xmin, xmax := exp0.getRange(c.s.icmp) - exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) - if len(exp1) == len(t1) { - c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", - c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), - len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) - imin, imax = xmin, xmax - t0, t1 = exp0, exp1 - amin, amax = append(t0, t1...).getRange(c.s.icmp) - } - } - } - - // Compute the set of grandparent files that overlap this compaction - // (parent == level+1; grandparent == level+2) - if c.level+2 < c.s.o.GetNumLevel() { - c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false) - } - - c.tables[0], c.tables[1] = t0, t1 - c.imin, c.imax = imin, imax -} - -// Check whether compaction is trivial. -func (c *compaction) trivial() bool { - return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps -} - -func (c *compaction) baseLevelForKey(ukey []byte) bool { - for level, tables := range c.v.tables[c.level+2:] { - for c.tPtrs[level] < len(tables) { - t := tables[c.tPtrs[level]] - if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 { - // We've advanced far enough. - if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - // Key falls in this file's range, so definitely not base level. - return false - } - break - } - c.tPtrs[level]++ - } - } - return true -} - -func (c *compaction) shouldStopBefore(ikey iKey) bool { - for ; c.gpi < len(c.gp); c.gpi++ { - gp := c.gp[c.gpi] - if c.s.icmp.Compare(ikey, gp.imax) <= 0 { - break - } - if c.seenKey { - c.gpOverlappedBytes += gp.size - } - } - c.seenKey = true - - if c.gpOverlappedBytes > c.maxGPOverlaps { - // Too much overlap for current output; start new output. - c.gpOverlappedBytes = 0 - return true - } - return false -} - -// Creates an iterator. -func (c *compaction) newIterator() iterator.Iterator { - // Creates iterator slice. - icap := len(c.tables) - if c.level == 0 { - // Special case for level-0 - icap = len(c.tables[0]) + 1 - } - its := make([]iterator.Iterator, 0, icap) - - // Options. - ro := &opt.ReadOptions{ - DontFillCache: true, - Strict: opt.StrictOverride, - } - strict := c.s.o.GetStrict(opt.StrictCompaction) - if strict { - ro.Strict |= opt.StrictReader - } - - for i, tables := range c.tables { - if len(tables) == 0 { - continue - } - - // Level-0 is not sorted and may overlaps each other. - if c.level+i == 0 { - for _, t := range tables { - its = append(its, c.s.tops.newIterator(t, nil, ro)) - } - } else { - it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict) - its = append(its, it) - } - } - - return iterator.NewMergedIterator(its, c.s.icmp, strict) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go deleted file mode 100644 index 1bdcc68f5..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bufio" - "encoding/binary" - "io" - "strings" - - "github.com/syndtr/goleveldb/leveldb/errors" -) - -type byteReader interface { - io.Reader - io.ByteReader -} - -// These numbers are written to disk and should not be changed. -const ( - recComparer = 1 - recJournalNum = 2 - recNextFileNum = 3 - recSeqNum = 4 - recCompPtr = 5 - recDelTable = 6 - recAddTable = 7 - // 8 was used for large value refs - recPrevJournalNum = 9 -) - -type cpRecord struct { - level int - ikey iKey -} - -type atRecord struct { - level int - num uint64 - size uint64 - imin iKey - imax iKey -} - -type dtRecord struct { - level int - num uint64 -} - -type sessionRecord struct { - numLevel int - - hasRec int - comparer string - journalNum uint64 - prevJournalNum uint64 - nextFileNum uint64 - seqNum uint64 - compPtrs []cpRecord - addedTables []atRecord - deletedTables []dtRecord - - scratch [binary.MaxVarintLen64]byte - err error -} - -func (p *sessionRecord) has(rec int) bool { - return p.hasRec&(1<= uint64(p.numLevel) { - p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"}) - return 0 - } - return int(x) -} - -func (p *sessionRecord) decode(r io.Reader) error { - br, ok := r.(byteReader) - if !ok { - br = bufio.NewReader(r) - } - p.err = nil - for p.err == nil { - rec := p.readUvarintMayEOF("field-header", br, true) - if p.err != nil { - if p.err == io.EOF { - return nil - } - return p.err - } - switch rec { - case recComparer: - x := p.readBytes("comparer", br) - if p.err == nil { - p.setComparer(string(x)) - } - case recJournalNum: - x := p.readUvarint("journal-num", br) - if p.err == nil { - p.setJournalNum(x) - } - case recPrevJournalNum: - x := p.readUvarint("prev-journal-num", br) - if p.err == nil { - p.setPrevJournalNum(x) - } - case recNextFileNum: - x := p.readUvarint("next-file-num", br) - if p.err == nil { - p.setNextFileNum(x) - } - case recSeqNum: - x := p.readUvarint("seq-num", br) - if p.err == nil { - p.setSeqNum(x) - } - case recCompPtr: - level := p.readLevel("comp-ptr.level", br) - ikey := p.readBytes("comp-ptr.ikey", br) - if p.err == nil { - p.addCompPtr(level, iKey(ikey)) - } - case recAddTable: - level := p.readLevel("add-table.level", br) - num := p.readUvarint("add-table.num", br) - size := p.readUvarint("add-table.size", br) - imin := p.readBytes("add-table.imin", br) - imax := p.readBytes("add-table.imax", br) - if p.err == nil { - p.addTable(level, num, size, imin, imax) - } - case recDelTable: - level := p.readLevel("del-table.level", br) - num := p.readUvarint("del-table.num", br) - if p.err == nil { - p.delTable(level, num) - } - } - } - - return p.err -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go deleted file mode 100644 index c0c035ae3..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "bytes" - "testing" - - "github.com/syndtr/goleveldb/leveldb/opt" -) - -func decodeEncode(v *sessionRecord) (res bool, err error) { - b := new(bytes.Buffer) - err = v.encode(b) - if err != nil { - return - } - v2 := &sessionRecord{numLevel: opt.DefaultNumLevel} - err = v.decode(b) - if err != nil { - return - } - b2 := new(bytes.Buffer) - err = v2.encode(b2) - if err != nil { - return - } - return bytes.Equal(b.Bytes(), b2.Bytes()), nil -} - -func TestSessionRecord_EncodeDecode(t *testing.T) { - big := uint64(1) << 50 - v := &sessionRecord{numLevel: opt.DefaultNumLevel} - i := uint64(0) - test := func() { - res, err := decodeEncode(v) - if err != nil { - t.Fatalf("error when testing encode/decode sessionRecord: %v", err) - } - if !res { - t.Error("encode/decode test failed at iteration:", i) - } - } - - for ; i < 4; i++ { - test() - v.addTable(3, big+300+i, big+400+i, - newIkey([]byte("foo"), big+500+1, ktVal), - newIkey([]byte("zoo"), big+600+1, ktDel)) - v.delTable(4, big+700+i) - v.addCompPtr(int(i), newIkey([]byte("x"), big+900+1, ktVal)) - } - - v.setComparer("foo") - v.setJournalNum(big + 100) - v.setPrevJournalNum(big + 99) - v.setNextFileNum(big + 200) - v.setSeqNum(big + 1000) - test() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go deleted file mode 100644 index 007c02cde..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/journal" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -// Logging. - -type dropper struct { - s *session - file storage.File -} - -func (d dropper) Drop(err error) { - if e, ok := err.(*journal.ErrCorrupted); ok { - d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason) - } else { - d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err) - } -} - -func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) } -func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) } - -// File utils. - -func (s *session) getJournalFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeJournal) -} - -func (s *session) getTableFile(num uint64) storage.File { - return s.stor.GetFile(num, storage.TypeTable) -} - -func (s *session) getFiles(t storage.FileType) ([]storage.File, error) { - return s.stor.GetFiles(t) -} - -func (s *session) newTemp() storage.File { - num := atomic.AddUint64(&s.stTempFileNum, 1) - 1 - return s.stor.GetFile(num, storage.TypeTemp) -} - -func (s *session) tableFileFromRecord(r atRecord) *tFile { - return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax) -} - -// Session state. - -// Get current version. This will incr version ref, must call -// version.release (exactly once) after use. -func (s *session) version() *version { - s.vmu.Lock() - defer s.vmu.Unlock() - s.stVersion.ref++ - return s.stVersion -} - -// Set current version to v. -func (s *session) setVersion(v *version) { - s.vmu.Lock() - v.ref = 1 // Holds by session. - if old := s.stVersion; old != nil { - v.ref++ // Holds by old version. - old.next = v - old.releaseNB() - } - s.stVersion = v - s.vmu.Unlock() -} - -// Get current unused file number. -func (s *session) nextFileNum() uint64 { - return atomic.LoadUint64(&s.stNextFileNum) -} - -// Set current unused file number to num. -func (s *session) setNextFileNum(num uint64) { - atomic.StoreUint64(&s.stNextFileNum, num) -} - -// Mark file number as used. -func (s *session) markFileNum(num uint64) { - nextFileNum := num + 1 - for { - old, x := s.stNextFileNum, nextFileNum - if old > x { - x = old - } - if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) { - break - } - } -} - -// Allocate a file number. -func (s *session) allocFileNum() uint64 { - return atomic.AddUint64(&s.stNextFileNum, 1) - 1 -} - -// Reuse given file number. -func (s *session) reuseFileNum(num uint64) { - for { - old, x := s.stNextFileNum, num - if old != x+1 { - x = old - } - if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) { - break - } - } -} - -// Manifest related utils. - -// Fill given session record obj with current states; need external -// synchronization. -func (s *session) fillRecord(r *sessionRecord, snapshot bool) { - r.setNextFileNum(s.nextFileNum()) - - if snapshot { - if !r.has(recJournalNum) { - r.setJournalNum(s.stJournalNum) - } - - if !r.has(recSeqNum) { - r.setSeqNum(s.stSeqNum) - } - - for level, ik := range s.stCompPtrs { - if ik != nil { - r.addCompPtr(level, ik) - } - } - - r.setComparer(s.icmp.uName()) - } -} - -// Mark if record has been committed, this will update session state; -// need external synchronization. -func (s *session) recordCommited(r *sessionRecord) { - if r.has(recJournalNum) { - s.stJournalNum = r.journalNum - } - - if r.has(recPrevJournalNum) { - s.stPrevJournalNum = r.prevJournalNum - } - - if r.has(recSeqNum) { - s.stSeqNum = r.seqNum - } - - for _, p := range r.compPtrs { - s.stCompPtrs[p.level] = iKey(p.ikey) - } -} - -// Create a new manifest file; need external synchronization. -func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { - num := s.allocFileNum() - file := s.stor.GetFile(num, storage.TypeManifest) - writer, err := file.Create() - if err != nil { - return - } - jw := journal.NewWriter(writer) - - if v == nil { - v = s.version() - defer v.release() - } - if rec == nil { - rec = &sessionRecord{numLevel: s.o.GetNumLevel()} - } - s.fillRecord(rec, true) - v.fillRecord(rec) - - defer func() { - if err == nil { - s.recordCommited(rec) - if s.manifest != nil { - s.manifest.Close() - } - if s.manifestWriter != nil { - s.manifestWriter.Close() - } - if s.manifestFile != nil { - s.manifestFile.Remove() - } - s.manifestFile = file - s.manifestWriter = writer - s.manifest = jw - } else { - writer.Close() - file.Remove() - s.reuseFileNum(num) - } - }() - - w, err := jw.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = jw.Flush() - if err != nil { - return - } - err = s.stor.SetManifest(file) - return -} - -// Flush record to disk. -func (s *session) flushManifest(rec *sessionRecord) (err error) { - s.fillRecord(rec, false) - w, err := s.manifest.Next() - if err != nil { - return - } - err = rec.encode(w) - if err != nil { - return - } - err = s.manifest.Flush() - if err != nil { - return - } - err = s.manifestWriter.Sync() - if err != nil { - return - } - s.recordCommited(rec) - return -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go deleted file mode 100644 index 46cc9d070..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go +++ /dev/null @@ -1,534 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reservefs. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -var errFileOpen = errors.New("leveldb/storage: file still open") - -type fileLock interface { - release() error -} - -type fileStorageLock struct { - fs *fileStorage -} - -func (lock *fileStorageLock) Release() { - fs := lock.fs - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.slock == lock { - fs.slock = nil - } - return -} - -// fileStorage is a file-system backed storage. -type fileStorage struct { - path string - - mu sync.Mutex - flock fileLock - slock *fileStorageLock - logw *os.File - buf []byte - // Opened file counter; if open < 0 means closed. - open int - day int -} - -// OpenFile returns a new filesytem-backed storage implementation with the given -// path. This also hold a file lock, so any subsequent attempt to open the same -// path will fail. -// -// The storage must be closed after use, by calling Close method. -func OpenFile(path string) (Storage, error) { - if err := os.MkdirAll(path, 0755); err != nil { - return nil, err - } - - flock, err := newFileLock(filepath.Join(path, "LOCK")) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - flock.release() - } - }() - - rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old")) - logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, err - } - - fs := &fileStorage{path: path, flock: flock, logw: logw} - runtime.SetFinalizer(fs, (*fileStorage).Close) - return fs, nil -} - -func (fs *fileStorage) Lock() (util.Releaser, error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - if fs.slock != nil { - return nil, ErrLocked - } - fs.slock = &fileStorageLock{fs: fs} - return fs.slock, nil -} - -func itoa(buf []byte, i int, wid int) []byte { - var u uint = uint(i) - if u == 0 && wid <= 1 { - return append(buf, '0') - } - - // Assemble decimal in reverse order. - var b [32]byte - bp := len(b) - for ; u > 0 || wid > 0; u /= 10 { - bp-- - wid-- - b[bp] = byte(u%10) + '0' - } - return append(buf, b[bp:]...) -} - -func (fs *fileStorage) printDay(t time.Time) { - if fs.day == t.Day() { - return - } - fs.day = t.Day() - fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) -} - -func (fs *fileStorage) doLog(t time.Time, str string) { - fs.printDay(t) - hour, min, sec := t.Clock() - msec := t.Nanosecond() / 1e3 - // time - fs.buf = itoa(fs.buf[:0], hour, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, min, 2) - fs.buf = append(fs.buf, ':') - fs.buf = itoa(fs.buf, sec, 2) - fs.buf = append(fs.buf, '.') - fs.buf = itoa(fs.buf, msec, 6) - fs.buf = append(fs.buf, ' ') - // write - fs.buf = append(fs.buf, []byte(str)...) - fs.buf = append(fs.buf, '\n') - fs.logw.Write(fs.buf) -} - -func (fs *fileStorage) Log(str string) { - t := time.Now() - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return - } - fs.doLog(t, str) -} - -func (fs *fileStorage) log(str string) { - fs.doLog(time.Now(), str) -} - -func (fs *fileStorage) GetFile(num uint64, t FileType) File { - return &file{fs: fs, num: num, t: t} -} - -func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) - } - if err != nil { - return - } - f := &file{fs: fs} - for _, fn := range fnn { - if f.parse(fn) && (f.t&t) != 0 { - ff = append(ff, f) - f = &file{fs: fs} - } - } - return -} - -func (fs *fileStorage) GetManifest() (f File, err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return nil, ErrClosed - } - dir, err := os.Open(fs.path) - if err != nil { - return - } - fnn, err := dir.Readdirnames(0) - // Close the dir first before checking for Readdirnames error. - if err := dir.Close(); err != nil { - fs.log(fmt.Sprintf("close dir: %v", err)) - } - if err != nil { - return - } - // Find latest CURRENT file. - var rem []string - var pend bool - var cerr error - for _, fn := range fnn { - if strings.HasPrefix(fn, "CURRENT") { - pend1 := len(fn) > 7 - // Make sure it is valid name for a CURRENT file, otherwise skip it. - if pend1 { - if fn[7] != '.' || len(fn) < 9 { - fs.log(fmt.Sprintf("skipping %s: invalid file name", fn)) - continue - } - if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil { - fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1)) - continue - } - } - path := filepath.Join(fs.path, fn) - r, e1 := os.OpenFile(path, os.O_RDONLY, 0) - if e1 != nil { - return nil, e1 - } - b, e1 := ioutil.ReadAll(r) - if e1 != nil { - r.Close() - return nil, e1 - } - f1 := &file{fs: fs} - if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) { - fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn)) - if pend1 { - rem = append(rem, fn) - } - if !pend1 || cerr == nil { - cerr = fmt.Errorf("leveldb/storage: corrupted or incomplete %s file", fn) - } - } else if f != nil && f1.Num() < f.Num() { - fs.log(fmt.Sprintf("skipping %s: obsolete", fn)) - if pend1 { - rem = append(rem, fn) - } - } else { - f = f1 - pend = pend1 - } - if err := r.Close(); err != nil { - fs.log(fmt.Sprintf("close %s: %v", fn, err)) - } - } - } - // Don't remove any files if there is no valid CURRENT file. - if f == nil { - if cerr != nil { - err = cerr - } else { - err = os.ErrNotExist - } - return - } - // Rename pending CURRENT file to an effective CURRENT. - if pend { - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num()) - if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil { - fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err)) - } - } - // Remove obsolete or incomplete pending CURRENT files. - for _, fn := range rem { - path := filepath.Join(fs.path, fn) - if err := os.Remove(path); err != nil { - fs.log(fmt.Sprintf("remove %s: %v", fn, err)) - } - } - return -} - -func (fs *fileStorage) SetManifest(f File) (err error) { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - f2, ok := f.(*file) - if !ok || f2.t != TypeManifest { - return ErrInvalidFile - } - defer func() { - if err != nil { - fs.log(fmt.Sprintf("CURRENT: %v", err)) - } - }() - path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num()) - w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - _, err = fmt.Fprintln(w, f2.name()) - // Close the file first. - if err := w.Close(); err != nil { - fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err)) - } - if err != nil { - return err - } - return rename(path, filepath.Join(fs.path, "CURRENT")) -} - -func (fs *fileStorage) Close() error { - fs.mu.Lock() - defer fs.mu.Unlock() - if fs.open < 0 { - return ErrClosed - } - // Clear the finalizer. - runtime.SetFinalizer(fs, nil) - - if fs.open > 0 { - fs.log(fmt.Sprintf("refuse to close, %d files still open", fs.open)) - return fmt.Errorf("leveldb/storage: cannot close, %d files still open", fs.open) - } - fs.open = -1 - e1 := fs.logw.Close() - err := fs.flock.release() - if err == nil { - err = e1 - } - return err -} - -type fileWrap struct { - *os.File - f *file -} - -func (fw fileWrap) Sync() error { - if err := fw.File.Sync(); err != nil { - return err - } - if fw.f.Type() == TypeManifest { - // Also sync parent directory if file type is manifest. - // See: https://code.google.com/p/leveldb/issues/detail?id=190. - if err := syncDir(fw.f.fs.path); err != nil { - return err - } - } - return nil -} - -func (fw fileWrap) Close() error { - f := fw.f - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if !f.open { - return ErrClosed - } - f.open = false - f.fs.open-- - err := fw.File.Close() - if err != nil { - f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err)) - } - return err -} - -type file struct { - fs *fileStorage - num uint64 - t FileType - open bool -} - -func (f *file) Open() (Reader, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed - } - if f.open { - return nil, errFileOpen - } - of, err := os.OpenFile(f.path(), os.O_RDONLY, 0) - if err != nil { - if f.hasOldName() && os.IsNotExist(err) { - of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0) - if err == nil { - goto ok - } - } - return nil, err - } -ok: - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil -} - -func (f *file) Create() (Writer, error) { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return nil, ErrClosed - } - if f.open { - return nil, errFileOpen - } - of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil, err - } - f.open = true - f.fs.open++ - return fileWrap{of, f}, nil -} - -func (f *file) Replace(newfile File) error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return ErrClosed - } - newfile2, ok := newfile.(*file) - if !ok { - return ErrInvalidFile - } - if f.open || newfile2.open { - return errFileOpen - } - return rename(newfile2.path(), f.path()) -} - -func (f *file) Type() FileType { - return f.t -} - -func (f *file) Num() uint64 { - return f.num -} - -func (f *file) Remove() error { - f.fs.mu.Lock() - defer f.fs.mu.Unlock() - if f.fs.open < 0 { - return ErrClosed - } - if f.open { - return errFileOpen - } - err := os.Remove(f.path()) - if err != nil { - f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err)) - } - // Also try remove file with old name, just in case. - if f.hasOldName() { - if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) { - f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err)) - err = e1 - } - } - return err -} - -func (f *file) hasOldName() bool { - return f.t == TypeTable -} - -func (f *file) oldName() string { - switch f.t { - case TypeTable: - return fmt.Sprintf("%06d.sst", f.num) - } - return f.name() -} - -func (f *file) oldPath() string { - return filepath.Join(f.fs.path, f.oldName()) -} - -func (f *file) name() string { - switch f.t { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", f.num) - case TypeJournal: - return fmt.Sprintf("%06d.log", f.num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", f.num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", f.num) - default: - panic("invalid file type") - } -} - -func (f *file) path() string { - return filepath.Join(f.fs.path, f.name()) -} - -func (f *file) parse(name string) bool { - var num uint64 - var tail string - _, err := fmt.Sscanf(name, "%d.%s", &num, &tail) - if err == nil { - switch tail { - case "log": - f.t = TypeJournal - case "ldb", "sst": - f.t = TypeTable - case "tmp": - f.t = TypeTemp - default: - return false - } - f.num = num - return true - } - n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &num, &tail) - if n == 1 { - f.t = TypeManifest - f.num = num - return true - } - - return false -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go deleted file mode 100644 index 42940d769..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "os" - "path/filepath" -) - -type plan9FileLock struct { - f *os.File -} - -func (fl *plan9FileLock) release() error { - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644) - if err != nil { - return - } - fl = &plan9FileLock{f: f} - return -} - -func rename(oldpath, newpath string) error { - if _, err := os.Stat(newpath); err == nil { - if err := os.Remove(newpath); err != nil { - return err - } - } - - _, fname := filepath.Split(newpath) - return os.Rename(oldpath, fname) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go deleted file mode 100644 index 102031bfd..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build solaris - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return - } - err = setFileLock(f, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, lock bool) error { - flock := syscall.Flock_t{ - Type: syscall.F_UNLCK, - Start: 0, - Len: 0, - Whence: 1, - } - if lock { - flock.Type = syscall.F_WRLCK - } - return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go deleted file mode 100644 index 92abcbb7d..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "fmt" - "os" - "path/filepath" - "testing" -) - -var cases = []struct { - oldName []string - name string - ftype FileType - num uint64 -}{ - {nil, "000100.log", TypeJournal, 100}, - {nil, "000000.log", TypeJournal, 0}, - {[]string{"000000.sst"}, "000000.ldb", TypeTable, 0}, - {nil, "MANIFEST-000002", TypeManifest, 2}, - {nil, "MANIFEST-000007", TypeManifest, 7}, - {nil, "18446744073709551615.log", TypeJournal, 18446744073709551615}, - {nil, "000100.tmp", TypeTemp, 100}, -} - -var invalidCases = []string{ - "", - "foo", - "foo-dx-100.log", - ".log", - "", - "manifest", - "CURREN", - "CURRENTX", - "MANIFES", - "MANIFEST", - "MANIFEST-", - "XMANIFEST-3", - "MANIFEST-3x", - "LOC", - "LOCKx", - "LO", - "LOGx", - "18446744073709551616.log", - "184467440737095516150.log", - "100", - "100.", - "100.lop", -} - -func TestFileStorage_CreateFileName(t *testing.T) { - for _, c := range cases { - f := &file{num: c.num, t: c.ftype} - if f.name() != c.name { - t.Errorf("invalid filename got '%s', want '%s'", f.name(), c.name) - } - } -} - -func TestFileStorage_ParseFileName(t *testing.T) { - for _, c := range cases { - for _, name := range append([]string{c.name}, c.oldName...) { - f := new(file) - if !f.parse(name) { - t.Errorf("cannot parse filename '%s'", name) - continue - } - if f.Type() != c.ftype { - t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, f.Type(), c.ftype) - } - if f.Num() != c.num { - t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, f.Num(), c.num) - } - } - } -} - -func TestFileStorage_InvalidFileName(t *testing.T) { - for _, name := range invalidCases { - f := new(file) - if f.parse(name) { - t.Errorf("filename '%s' should be invalid", name) - } - } -} - -func TestFileStorage_Locking(t *testing.T) { - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestfd-%d", os.Getuid())) - - _, err := os.Stat(path) - if err == nil { - err = os.RemoveAll(path) - if err != nil { - t.Fatal("RemoveAll: got error: ", err) - } - } - - p1, err := OpenFile(path) - if err != nil { - t.Fatal("OpenFile(1): got error: ", err) - } - - defer os.RemoveAll(path) - - p2, err := OpenFile(path) - if err != nil { - t.Logf("OpenFile(2): got error: %s (expected)", err) - } else { - p2.Close() - p1.Close() - t.Fatal("OpenFile(2): expect error") - } - - p1.Close() - - p3, err := OpenFile(path) - if err != nil { - t.Fatal("OpenFile(3): got error: ", err) - } - defer p3.Close() - - l, err := p3.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = p3.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Release() - _, err = p3.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go deleted file mode 100644 index d0a604b7a..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package storage - -import ( - "os" - "syscall" -) - -type unixFileLock struct { - f *os.File -} - -func (fl *unixFileLock) release() error { - if err := setFileLock(fl.f, false); err != nil { - return err - } - return fl.f.Close() -} - -func newFileLock(path string) (fl fileLock, err error) { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return - } - err = setFileLock(f, true) - if err != nil { - f.Close() - return - } - fl = &unixFileLock{f: f} - return -} - -func setFileLock(f *os.File, lock bool) error { - how := syscall.LOCK_UN - if lock { - how = syscall.LOCK_EX - } - return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB) -} - -func rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -func syncDir(name string) error { - f, err := os.Open(name) - if err != nil { - return err - } - defer f.Close() - if err := f.Sync(); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go deleted file mode 100644 index 50c3c454e..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procMoveFileExW = modkernel32.NewProc("MoveFileExW") -) - -const ( - _MOVEFILE_REPLACE_EXISTING = 1 -) - -type windowsFileLock struct { - fd syscall.Handle -} - -func (fl *windowsFileLock) release() error { - return syscall.Close(fl.fd) -} - -func newFileLock(path string) (fl fileLock, err error) { - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return - } - fd, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.CREATE_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0) - if err != nil { - return - } - fl = &windowsFileLock{fd: fd} - return -} - -func moveFileEx(from *uint16, to *uint16, flags uint32) error { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) - if r1 == 0 { - if e1 != 0 { - return error(e1) - } else { - return syscall.EINVAL - } - } - return nil -} - -func rename(oldpath, newpath string) error { - from, err := syscall.UTF16PtrFromString(oldpath) - if err != nil { - return err - } - to, err := syscall.UTF16PtrFromString(newpath) - if err != nil { - return err - } - return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING) -} - -func syncDir(name string) error { return nil } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go deleted file mode 100644 index fc1c8165d..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "os" - "sync" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 3 - -type memStorageLock struct { - ms *memStorage -} - -func (lock *memStorageLock) Release() { - ms := lock.ms - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock == lock { - ms.slock = nil - } - return -} - -// memStorage is a memory-backed storage. -type memStorage struct { - mu sync.Mutex - slock *memStorageLock - files map[uint64]*memFile - manifest *memFilePtr -} - -// NewMemStorage returns a new memory-backed storage implementation. -func NewMemStorage() Storage { - return &memStorage{ - files: make(map[uint64]*memFile), - } -} - -func (ms *memStorage) Lock() (util.Releaser, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.slock != nil { - return nil, ErrLocked - } - ms.slock = &memStorageLock{ms: ms} - return ms.slock, nil -} - -func (*memStorage) Log(str string) {} - -func (ms *memStorage) GetFile(num uint64, t FileType) File { - return &memFilePtr{ms: ms, num: num, t: t} -} - -func (ms *memStorage) GetFiles(t FileType) ([]File, error) { - ms.mu.Lock() - var ff []File - for x, _ := range ms.files { - num, mt := x>>typeShift, FileType(x)&TypeAll - if mt&t == 0 { - continue - } - ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt}) - } - ms.mu.Unlock() - return ff, nil -} - -func (ms *memStorage) GetManifest() (File, error) { - ms.mu.Lock() - defer ms.mu.Unlock() - if ms.manifest == nil { - return nil, os.ErrNotExist - } - return ms.manifest, nil -} - -func (ms *memStorage) SetManifest(f File) error { - fm, ok := f.(*memFilePtr) - if !ok || fm.t != TypeManifest { - return ErrInvalidFile - } - ms.mu.Lock() - ms.manifest = fm - ms.mu.Unlock() - return nil -} - -func (*memStorage) Close() error { return nil } - -type memReader struct { - *bytes.Reader - m *memFile -} - -func (mr *memReader) Close() error { - return mr.m.Close() -} - -type memFile struct { - bytes.Buffer - ms *memStorage - open bool -} - -func (*memFile) Sync() error { return nil } -func (m *memFile) Close() error { - m.ms.mu.Lock() - m.open = false - m.ms.mu.Unlock() - return nil -} - -type memFilePtr struct { - ms *memStorage - num uint64 - t FileType -} - -func (p *memFilePtr) x() uint64 { - return p.Num()< -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package storage - -import ( - "bytes" - "testing" -) - -func TestMemStorage(t *testing.T) { - m := NewMemStorage() - - l, err := m.Lock() - if err != nil { - t.Fatal("storage lock failed(1): ", err) - } - _, err = m.Lock() - if err == nil { - t.Fatal("expect error for second storage lock attempt") - } else { - t.Logf("storage lock got error: %s (expected)", err) - } - l.Release() - _, err = m.Lock() - if err != nil { - t.Fatal("storage lock failed(2): ", err) - } - - f := m.GetFile(1, TypeTable) - if f.Num() != 1 && f.Type() != TypeTable { - t.Fatal("invalid file number and type") - } - w, _ := f.Create() - w.Write([]byte("abc")) - w.Close() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 1 { - t.Fatal("invalid GetFiles len") - } - buf := new(bytes.Buffer) - r, err := f.Open() - if err != nil { - t.Fatal("Open: got error: ", err) - } - buf.ReadFrom(r) - r.Close() - if got := buf.String(); got != "abc" { - t.Fatalf("Read: invalid value, want=abc got=%s", got) - } - if _, err := f.Open(); err != nil { - t.Fatal("Open: got error: ", err) - } - if _, err := m.GetFile(1, TypeTable).Open(); err == nil { - t.Fatal("expecting error") - } - f.Remove() - if ff, _ := m.GetFiles(TypeAll); len(ff) != 0 { - t.Fatal("invalid GetFiles len", len(ff)) - } - if _, err := f.Open(); err == nil { - t.Fatal("expecting error") - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go deleted file mode 100644 index 85dd70b06..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package storage provides storage abstraction for LevelDB. -package storage - -import ( - "errors" - "fmt" - "io" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -type FileType uint32 - -const ( - TypeManifest FileType = 1 << iota - TypeJournal - TypeTable - TypeTemp - - TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp -) - -func (t FileType) String() string { - switch t { - case TypeManifest: - return "manifest" - case TypeJournal: - return "journal" - case TypeTable: - return "table" - case TypeTemp: - return "temp" - } - return fmt.Sprintf("", t) -} - -var ( - ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument") - ErrLocked = errors.New("leveldb/storage: already locked") - ErrClosed = errors.New("leveldb/storage: closed") -) - -// Syncer is the interface that wraps basic Sync method. -type Syncer interface { - // Sync commits the current contents of the file to stable storage. - Sync() error -} - -// Reader is the interface that groups the basic Read, Seek, ReadAt and Close -// methods. -type Reader interface { - io.ReadSeeker - io.ReaderAt - io.Closer -} - -// Writer is the interface that groups the basic Write, Sync and Close -// methods. -type Writer interface { - io.WriteCloser - Syncer -} - -// File is the file. A file instance must be goroutine-safe. -type File interface { - // Open opens the file for read. Returns os.ErrNotExist error - // if the file does not exist. - // Returns ErrClosed if the underlying storage is closed. - Open() (r Reader, err error) - - // Create creates the file for writting. Truncate the file if - // already exist. - // Returns ErrClosed if the underlying storage is closed. - Create() (w Writer, err error) - - // Replace replaces file with newfile. - // Returns ErrClosed if the underlying storage is closed. - Replace(newfile File) error - - // Type returns the file type - Type() FileType - - // Num returns the file number. - Num() uint64 - - // Remove removes the file. - // Returns ErrClosed if the underlying storage is closed. - Remove() error -} - -// Storage is the storage. A storage instance must be goroutine-safe. -type Storage interface { - // Lock locks the storage. Any subsequent attempt to call Lock will fail - // until the last lock released. - // After use the caller should call the Release method. - Lock() (l util.Releaser, err error) - - // Log logs a string. This is used for logging. An implementation - // may write to a file, stdout or simply do nothing. - Log(str string) - - // GetFile returns a file for the given number and type. GetFile will never - // returns nil, even if the underlying storage is closed. - GetFile(num uint64, t FileType) File - - // GetFiles returns a slice of files that match the given file types. - // The file types may be OR'ed together. - GetFiles(t FileType) ([]File, error) - - // GetManifest returns a manifest file. Returns os.ErrNotExist if manifest - // file does not exist. - GetManifest() (File, error) - - // SetManifest sets the given file as manifest file. The given file should - // be a manifest file type or error will be returned. - SetManifest(f File) error - - // Close closes the storage. It is valid to call Close multiple times. - // Other methods should not be called after the storage has been closed. - Close() error -} - -// FileInfo wraps basic file info. -type FileInfo struct { - Type FileType - Num uint64 -} - -func (fi FileInfo) String() string { - switch fi.Type { - case TypeManifest: - return fmt.Sprintf("MANIFEST-%06d", fi.Num) - case TypeJournal: - return fmt.Sprintf("%06d.log", fi.Num) - case TypeTable: - return fmt.Sprintf("%06d.ldb", fi.Num) - case TypeTemp: - return fmt.Sprintf("%06d.tmp", fi.Num) - default: - return fmt.Sprintf("%#x-%d", fi.Type, fi.Num) - } -} - -// NewFileInfo creates new FileInfo from the given File. It will returns nil -// if File is nil. -func NewFileInfo(f File) *FileInfo { - if f == nil { - return nil - } - return &FileInfo{f.Type(), f.Num()} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go deleted file mode 100644 index dc1f1fb54..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go +++ /dev/null @@ -1,539 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENE file. - -package leveldb - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -const typeShift = 4 - -var ( - tsErrInvalidFile = errors.New("leveldb.testStorage: invalid file for argument") - tsErrFileOpen = errors.New("leveldb.testStorage: file still open") -) - -var ( - tsFSEnv = os.Getenv("GOLEVELDB_USEFS") - tsTempdir = os.Getenv("GOLEVELDB_TEMPDIR") - tsKeepFS = tsFSEnv == "2" - tsFS = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1" - tsMU = &sync.Mutex{} - tsNum = 0 -) - -type tsOp uint - -const ( - tsOpOpen tsOp = iota - tsOpCreate - tsOpRead - tsOpReadAt - tsOpWrite - tsOpSync - - tsOpNum -) - -type tsLock struct { - ts *testStorage - r util.Releaser -} - -func (l tsLock) Release() { - l.r.Release() - l.ts.t.Log("I: storage lock released") -} - -type tsReader struct { - tf tsFile - storage.Reader -} - -func (tr tsReader) Read(b []byte) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - if tr.tf.shouldErrLocked(tsOpRead) { - return 0, errors.New("leveldb.testStorage: emulated read error") - } - n, err = tr.Reader.Read(b) - if err != nil && err != io.EOF { - ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err) - } - return -} - -func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) { - ts := tr.tf.ts - ts.countRead(tr.tf.Type()) - if tr.tf.shouldErrLocked(tsOpReadAt) { - return 0, errors.New("leveldb.testStorage: emulated readAt error") - } - n, err = tr.Reader.ReadAt(b, off) - if err != nil && err != io.EOF { - ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err) - } - return -} - -func (tr tsReader) Close() (err error) { - err = tr.Reader.Close() - tr.tf.close("reader", err) - return -} - -type tsWriter struct { - tf tsFile - storage.Writer -} - -func (tw tsWriter) Write(b []byte) (n int, err error) { - if tw.tf.shouldErrLocked(tsOpWrite) { - return 0, errors.New("leveldb.testStorage: emulated write error") - } - n, err = tw.Writer.Write(b) - if err != nil { - tw.tf.ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err) - } - return -} - -func (tw tsWriter) Sync() (err error) { - ts := tw.tf.ts - ts.mu.Lock() - for ts.emuDelaySync&tw.tf.Type() != 0 { - ts.cond.Wait() - } - ts.mu.Unlock() - if tw.tf.shouldErrLocked(tsOpSync) { - return errors.New("leveldb.testStorage: emulated sync error") - } - err = tw.Writer.Sync() - if err != nil { - tw.tf.ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err) - } - return -} - -func (tw tsWriter) Close() (err error) { - err = tw.Writer.Close() - tw.tf.close("writer", err) - return -} - -type tsFile struct { - ts *testStorage - storage.File -} - -func (tf tsFile) x() uint64 { - return tf.Num()<>typeShift, storage.FileType(x)&storage.TypeAll - ts.t.Errorf("E: * num=%d type=%v writer=%v", num, tt, writer) - } - } - ts.mu.Unlock() -} - -func newTestStorage(t *testing.T) *testStorage { - var stor storage.Storage - var closeFn func() error - if tsFS { - for { - tsMU.Lock() - num := tsNum - tsNum++ - tsMU.Unlock() - tempdir := tsTempdir - if tempdir == "" { - tempdir = os.TempDir() - } - path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); err != nil { - stor, err = storage.OpenFile(path) - if err != nil { - t.Fatalf("F: cannot create storage: %v", err) - } - t.Logf("I: storage created: %s", path) - closeFn = func() error { - for _, name := range []string{"LOG.old", "LOG"} { - f, err := os.Open(filepath.Join(path, name)) - if err != nil { - continue - } - if log, err := ioutil.ReadAll(f); err != nil { - t.Logf("---------------------- %s ----------------------", name) - t.Logf("cannot read log: %v", err) - t.Logf("---------------------- %s ----------------------", name) - } else if len(log) > 0 { - t.Logf("---------------------- %s ----------------------\n%s", name, string(log)) - t.Logf("---------------------- %s ----------------------", name) - } - f.Close() - } - if t.Failed() { - t.Logf("testing failed, test DB preserved at %s", path) - return nil - } - if tsKeepFS { - return nil - } - return os.RemoveAll(path) - } - - break - } - } - } else { - stor = storage.NewMemStorage() - } - ts := &testStorage{ - t: t, - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - emuErrOnceMap: make(map[uint64]uint), - emuRandErrProb: 0x999, - emuRandRand: rand.New(rand.NewSource(0xfacedead)), - } - ts.cond.L = &ts.mu - return ts -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go deleted file mode 100644 index 3e8df6af5..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/table" - "github.com/syndtr/goleveldb/leveldb/util" -) - -// tFile holds basic information about a table. -type tFile struct { - file storage.File - seekLeft int32 - size uint64 - imin, imax iKey -} - -// Returns true if given key is after largest key of this table. -func (t *tFile) after(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 -} - -// Returns true if given key is before smallest key of this table. -func (t *tFile) before(icmp *iComparer, ukey []byte) bool { - return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 -} - -// Returns true if given key range overlaps with this table key range. -func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { - return !t.after(icmp, umin) && !t.before(icmp, umax) -} - -// Cosumes one seek and return current seeks left. -func (t *tFile) consumeSeek() int32 { - return atomic.AddInt32(&t.seekLeft, -1) -} - -// Creates new tFile. -func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile { - f := &tFile{ - file: file, - size: size, - imin: imin, - imax: imax, - } - - // We arrange to automatically compact this file after - // a certain number of seeks. Let's assume: - // (1) One seek costs 10ms - // (2) Writing or reading 1MB costs 10ms (100MB/s) - // (3) A compaction of 1MB does 25MB of IO: - // 1MB read from this level - // 10-12MB read from next level (boundaries may be misaligned) - // 10-12MB written to next level - // This implies that 25 seeks cost the same as the compaction - // of 1MB of data. I.e., one seek costs approximately the - // same as the compaction of 40KB of data. We are a little - // conservative and allow approximately one seek for every 16KB - // of data before triggering a compaction. - f.seekLeft = int32(size / 16384) - if f.seekLeft < 100 { - f.seekLeft = 100 - } - - return f -} - -// tFiles hold multiple tFile. -type tFiles []*tFile - -func (tf tFiles) Len() int { return len(tf) } -func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } - -func (tf tFiles) nums() string { - x := "[ " - for i, f := range tf { - if i != 0 { - x += ", " - } - x += fmt.Sprint(f.file.Num()) - } - x += " ]" - return x -} - -// Returns true if i smallest key is less than j. -// This used for sort by key in ascending order. -func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { - a, b := tf[i], tf[j] - n := icmp.Compare(a.imin, b.imin) - if n == 0 { - return a.file.Num() < b.file.Num() - } - return n < 0 -} - -// Returns true if i file number is greater than j. -// This used for sort by file number in descending order. -func (tf tFiles) lessByNum(i, j int) bool { - return tf[i].file.Num() > tf[j].file.Num() -} - -// Sorts tables by key in ascending order. -func (tf tFiles) sortByKey(icmp *iComparer) { - sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) -} - -// Sorts tables by file number in descending order. -func (tf tFiles) sortByNum() { - sort.Sort(&tFilesSortByNum{tFiles: tf}) -} - -// Returns sum of all tables size. -func (tf tFiles) size() (sum uint64) { - for _, t := range tf { - sum += t.size - } - return sum -} - -// Searches smallest index of tables whose its smallest -// key is after or equal with given key. -func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imin, ikey) >= 0 - }) -} - -// Searches smallest index of tables whose its largest -// key is after or equal with given key. -func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int { - return sort.Search(len(tf), func(i int) bool { - return icmp.Compare(tf[i].imax, ikey) >= 0 - }) -} - -// Returns true if given key range overlaps with one or more -// tables key range. If unsorted is true then binary search will not be used. -func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { - if unsorted { - // Check against all files. - for _, t := range tf { - if t.overlaps(icmp, umin, umax) { - return true - } - } - return false - } - - i := 0 - if len(umin) > 0 { - // Find the earliest possible internal key for min. - i = tf.searchMax(icmp, newIkey(umin, kMaxSeq, ktSeek)) - } - if i >= len(tf) { - // Beginning of range is after all files, so no overlap. - return false - } - return !tf[i].before(icmp, umax) -} - -// Returns tables whose its key range overlaps with given key range. -// Range will be expanded if ukey found hop across tables. -// If overlapped is true then the search will be restarted if umax -// expanded. -// The dst content will be overwritten. -func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { - dst = dst[:0] - for i := 0; i < len(tf); { - t := tf[i] - if t.overlaps(icmp, umin, umax) { - if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { - umin = t.imin.ukey() - dst = dst[:0] - i = 0 - continue - } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { - umax = t.imax.ukey() - // Restart search if it is overlapped. - if overlapped { - dst = dst[:0] - i = 0 - continue - } - } - - dst = append(dst, t) - } - i++ - } - - return dst -} - -// Returns tables key range. -func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) { - for i, t := range tf { - if i == 0 { - imin, imax = t.imin, t.imax - continue - } - if icmp.Compare(t.imin, imin) < 0 { - imin = t.imin - } - if icmp.Compare(t.imax, imax) > 0 { - imax = t.imax - } - } - - return -} - -// Creates iterator index from tables. -func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { - if slice != nil { - var start, limit int - if slice.Start != nil { - start = tf.searchMax(icmp, iKey(slice.Start)) - } - if slice.Limit != nil { - limit = tf.searchMin(icmp, iKey(slice.Limit)) - } else { - limit = tf.Len() - } - tf = tf[start:limit] - } - return iterator.NewArrayIndexer(&tFilesArrayIndexer{ - tFiles: tf, - tops: tops, - icmp: icmp, - slice: slice, - ro: ro, - }) -} - -// Tables iterator index. -type tFilesArrayIndexer struct { - tFiles - tops *tOps - icmp *iComparer - slice *util.Range - ro *opt.ReadOptions -} - -func (a *tFilesArrayIndexer) Search(key []byte) int { - return a.searchMax(a.icmp, iKey(key)) -} - -func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { - if i == 0 || i == a.Len()-1 { - return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) - } - return a.tops.newIterator(a.tFiles[i], nil, a.ro) -} - -// Helper type for sortByKey. -type tFilesSortByKey struct { - tFiles - icmp *iComparer -} - -func (x *tFilesSortByKey) Less(i, j int) bool { - return x.lessByKey(x.icmp, i, j) -} - -// Helper type for sortByNum. -type tFilesSortByNum struct { - tFiles -} - -func (x *tFilesSortByNum) Less(i, j int) bool { - return x.lessByNum(i, j) -} - -// Table operations. -type tOps struct { - s *session - cache *cache.Cache - bcache *cache.Cache - bpool *util.BufferPool -} - -// Creates an empty table and returns table writer. -func (t *tOps) create() (*tWriter, error) { - file := t.s.getTableFile(t.s.allocFileNum()) - fw, err := file.Create() - if err != nil { - return nil, err - } - return &tWriter{ - t: t, - file: file, - w: fw, - tw: table.NewWriter(fw, t.s.o.Options), - }, nil -} - -// Builds table from src iterator. -func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { - w, err := t.create() - if err != nil { - return - } - - defer func() { - if err != nil { - w.drop() - } - }() - - for src.Next() { - err = w.append(src.Key(), src.Value()) - if err != nil { - return - } - } - err = src.Error() - if err != nil { - return - } - - n = w.tw.EntriesLen() - f, err = w.finish() - return -} - -// Opens table. It returns a cache handle, which should -// be released after use. -func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { - num := f.file.Num() - ch = t.cache.Get(0, num, func() (size int, value cache.Value) { - var r storage.Reader - r, err = f.file.Open() - if err != nil { - return 0, nil - } - - var bcache *cache.CacheGetter - if t.bcache != nil { - bcache = &cache.CacheGetter{Cache: t.bcache, NS: num} - } - - var tr *table.Reader - tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options) - if err != nil { - r.Close() - return 0, nil - } - return 1, tr - - }) - if ch == nil && err == nil { - err = ErrClosed - } - return -} - -// Finds key/value pair whose key is greater than or equal to the -// given key. -func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { - ch, err := t.open(f) - if err != nil { - return nil, nil, err - } - defer ch.Release() - return ch.Value().(*table.Reader).Find(key, true, ro) -} - -// Finds key that is greater than or equal to the given key. -func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { - ch, err := t.open(f) - if err != nil { - return nil, err - } - defer ch.Release() - return ch.Value().(*table.Reader).FindKey(key, true, ro) -} - -// Returns approximate offset of the given key. -func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) { - ch, err := t.open(f) - if err != nil { - return - } - defer ch.Release() - offset_, err := ch.Value().(*table.Reader).OffsetOf(key) - return uint64(offset_), err -} - -// Creates an iterator from the given table. -func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - ch, err := t.open(f) - if err != nil { - return iterator.NewEmptyIterator(err) - } - iter := ch.Value().(*table.Reader).NewIterator(slice, ro) - iter.SetReleaser(ch) - return iter -} - -// Removes table from persistent storage. It waits until -// no one use the the table. -func (t *tOps) remove(f *tFile) { - num := f.file.Num() - t.cache.Delete(0, num, func() { - if err := f.file.Remove(); err != nil { - t.s.logf("table@remove removing @%d %q", num, err) - } else { - t.s.logf("table@remove removed @%d", num) - } - if t.bcache != nil { - t.bcache.EvictNS(num) - } - }) -} - -// Closes the table ops instance. It will close all tables, -// regadless still used or not. -func (t *tOps) close() { - t.bpool.Close() - t.cache.Close() - if t.bcache != nil { - t.bcache.Close() - } -} - -// Creates new initialized table ops instance. -func newTableOps(s *session) *tOps { - var ( - cacher cache.Cacher - bcache *cache.Cache - ) - if s.o.GetOpenFilesCacheCapacity() > 0 { - cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) - } - if !s.o.DisableBlockCache { - var bcacher cache.Cacher - if s.o.GetBlockCacheCapacity() > 0 { - bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity()) - } - bcache = cache.NewCache(bcacher) - } - return &tOps{ - s: s, - cache: cache.NewCache(cacher), - bcache: bcache, - bpool: util.NewBufferPool(s.o.GetBlockSize() + 5), - } -} - -// tWriter wraps the table writer. It keep track of file descriptor -// and added key range. -type tWriter struct { - t *tOps - - file storage.File - w storage.Writer - tw *table.Writer - - first, last []byte -} - -// Append key/value pair to the table. -func (w *tWriter) append(key, value []byte) error { - if w.first == nil { - w.first = append([]byte{}, key...) - } - w.last = append(w.last[:0], key...) - return w.tw.Append(key, value) -} - -// Returns true if the table is empty. -func (w *tWriter) empty() bool { - return w.first == nil -} - -// Closes the storage.Writer. -func (w *tWriter) close() { - if w.w != nil { - w.w.Close() - w.w = nil - } -} - -// Finalizes the table and returns table file. -func (w *tWriter) finish() (f *tFile, err error) { - defer w.close() - err = w.tw.Close() - if err != nil { - return - } - err = w.w.Sync() - if err != nil { - return - } - f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last)) - return -} - -// Drops the table. -func (w *tWriter) drop() { - w.close() - w.file.Remove() - w.t.s.reuseFileNum(w.file.Num()) - w.file = nil - w.tw = nil - w.first = nil - w.last = nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go deleted file mode 100644 index 00e6f9eea..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type blockTesting struct { - tr *Reader - b *block -} - -func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.tr.newBlockIter(t.b, nil, slice, false) -} - -var _ = testutil.Defer(func() { - Describe("Block", func() { - Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting { - // Building the block. - bw := &blockWriter{ - restartInterval: restartInterval, - scratch: make([]byte, 30), - } - kv.Iterate(func(i int, key, value []byte) { - bw.append(key, value) - }) - bw.finish() - - // Opening the block. - data := bw.buf.Bytes() - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - return &blockTesting{ - tr: &Reader{cmp: comparer.DefaultComparer}, - b: &block{ - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - }, - } - } - - Describe("read test", func() { - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - kv := &testutil.KeyValue{} - Text := func() string { - return fmt.Sprintf("and %d keys", kv.Len()) - } - - Test := func() { - // Make block. - br := Build(kv, restartInterval) - // Do testing. - testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil) - } - - Describe(Text(), Test) - - kv.PutString("", "empty") - Describe(Text(), Test) - - kv.PutString("a1", "foo") - Describe(Text(), Test) - - kv.PutString("a2", "v") - Describe(Text(), Test) - - kv.PutString("a3qqwrkks", "hello") - Describe(Text(), Test) - - kv.PutString("a4", "bar") - Describe(Text(), Test) - - kv.PutString("a5111111", "v5") - kv.PutString("a6", "") - kv.PutString("a7", "v7") - kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8") - kv.PutString("b", "v9") - kv.PutString("c9", "v9") - kv.PutString("c91", "v9") - kv.PutString("d0", "v9") - Describe(Text(), Test) - }) - } - }) - - Describe("out-of-bound slice test", func() { - kv := &testutil.KeyValue{} - kv.PutString("k1", "v1") - kv.PutString("k2", "v2") - kv.PutString("k3abcdefgg", "v3") - kv.PutString("k4", "v4") - kv.PutString("k5", "v5") - for restartInterval := 1; restartInterval <= 5; restartInterval++ { - Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { - // Make block. - bt := Build(kv, restartInterval) - - Test := func(r *util.Range) func(done Done) { - return func(done Done) { - iter := bt.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := testutil.IteratorTesting{ - KeyValue: kv.Clone(), - Iter: iter, - } - - testutil.DoIteratorTesting(&t) - iter.Release() - done <- true - } - } - - It("Should do iterations and seeks correctly #0", - Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0) - - It("Should do iterations and seeks correctly #1", - Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0) - }) - } - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go deleted file mode 100644 index 6f38e84b3..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go +++ /dev/null @@ -1,1107 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "fmt" - "io" - "sort" - "strings" - "sync" - - "github.com/syndtr/gosnappy/snappy" - - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - ErrNotFound = errors.ErrNotFound - ErrReaderReleased = errors.New("leveldb/table: reader released") - ErrIterReleased = errors.New("leveldb/table: iterator released") -) - -type ErrCorrupted struct { - Pos int64 - Size int64 - Kind string - Reason string -} - -func (e *ErrCorrupted) Error() string { - return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason) -} - -func max(x, y int) int { - if x > y { - return x - } - return y -} - -type block struct { - bpool *util.BufferPool - bh blockHandle - data []byte - restartsLen int - restartsOffset int -} - -func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) { - index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) - offset += 1 // shared always zero, since this is a restart point - v1, n1 := binary.Uvarint(b.data[offset:]) // key length - _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length - m := offset + n1 + n2 - return cmp.Compare(b.data[m:m+int(v1)], key) > 0 - }) + rstart - 1 - if index < rstart { - // The smallest key is greater-than key sought. - index = rstart - } - offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) - return -} - -func (b *block) restartIndex(rstart, rlimit, offset int) int { - return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset - }) + rstart - 1 -} - -func (b *block) restartOffset(index int) int { - return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:])) -} - -func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) { - if offset >= b.restartsOffset { - if offset != b.restartsOffset { - err = &ErrCorrupted{Reason: "entries offset not aligned"} - } - return - } - v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length - v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length - v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length - m := n0 + n1 + n2 - n = m + int(v1) + int(v2) - if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset { - err = &ErrCorrupted{Reason: "entries corrupted"} - return - } - key = b.data[offset+m : offset+m+int(v1)] - value = b.data[offset+m+int(v1) : offset+n] - nShared = int(v0) - return -} - -func (b *block) Release() { - b.bpool.Put(b.data) - b.bpool = nil - b.data = nil -} - -type dir int - -const ( - dirReleased dir = iota - 1 - dirSOI - dirEOI - dirBackward - dirForward -) - -type blockIter struct { - tr *Reader - block *block - blockReleaser util.Releaser - releaser util.Releaser - key, value []byte - offset int - // Previous offset, only filled by Next. - prevOffset int - prevNode []int - prevKeys []byte - restartIndex int - // Iterator direction. - dir dir - // Restart index slice range. - riStart int - riLimit int - // Offset slice range. - offsetStart int - offsetRealStart int - offsetLimit int - // Error. - err error -} - -func (i *blockIter) sErr(err error) { - i.err = err - i.key = nil - i.value = nil - i.prevNode = nil - i.prevKeys = nil -} - -func (i *blockIter) reset() { - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.restartIndex = i.riStart - i.offset = i.offsetStart - i.dir = dirSOI - i.key = i.key[:0] - i.value = nil -} - -func (i *blockIter) isFirst() bool { - switch i.dir { - case dirForward: - return i.prevOffset == i.offsetRealStart - case dirBackward: - return len(i.prevNode) == 1 && i.restartIndex == i.riStart - } - return false -} - -func (i *blockIter) isLast() bool { - switch i.dir { - case dirForward, dirBackward: - return i.offset == i.offsetLimit - } - return false -} - -func (i *blockIter) First() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirSOI - return i.Next() -} - -func (i *blockIter) Last() bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - i.dir = dirEOI - return i.Prev() -} - -func (i *blockIter) Seek(key []byte) bool { - if i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key) - if err != nil { - i.sErr(err) - return false - } - i.restartIndex = ri - i.offset = max(i.offsetStart, offset) - if i.dir == dirSOI || i.dir == dirEOI { - i.dir = dirForward - } - for i.Next() { - if i.tr.cmp.Compare(i.key, key) >= 0 { - return true - } - } - return false -} - -func (i *blockIter) Next() bool { - if i.dir == dirEOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - if i.dir == dirSOI { - i.restartIndex = i.riStart - i.offset = i.offsetStart - } else if i.dir == dirBackward { - i.prevNode = i.prevNode[:0] - i.prevKeys = i.prevKeys[:0] - } - for i.offset < i.offsetRealStart { - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.offset += n - } - if i.offset >= i.offsetLimit { - i.dir = dirEOI - if i.offset != i.offsetLimit { - i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) - } - return false - } - key, value, nShared, n, err := i.block.entry(i.offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if n == 0 { - i.dir = dirEOI - return false - } - i.key = append(i.key[:nShared], key...) - i.value = value - i.prevOffset = i.offset - i.offset += n - i.dir = dirForward - return true -} - -func (i *blockIter) Prev() bool { - if i.dir == dirSOI || i.err != nil { - return false - } else if i.dir == dirReleased { - i.err = ErrIterReleased - return false - } - - var ri int - if i.dir == dirForward { - // Change direction. - i.offset = i.prevOffset - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset) - i.dir = dirBackward - } else if i.dir == dirEOI { - // At the end of iterator. - i.restartIndex = i.riLimit - i.offset = i.offsetLimit - if i.offset == i.offsetRealStart { - i.dir = dirSOI - return false - } - ri = i.riLimit - 1 - i.dir = dirBackward - } else if len(i.prevNode) == 1 { - // This is the end of a restart range. - i.offset = i.prevNode[0] - i.prevNode = i.prevNode[:0] - if i.restartIndex == i.riStart { - i.dir = dirSOI - return false - } - i.restartIndex-- - ri = i.restartIndex - } else { - // In the middle of restart range, get from cache. - n := len(i.prevNode) - 3 - node := i.prevNode[n:] - i.prevNode = i.prevNode[:n] - // Get the key. - ko := node[0] - i.key = append(i.key[:0], i.prevKeys[ko:]...) - i.prevKeys = i.prevKeys[:ko] - // Get the value. - vo := node[1] - vl := vo + node[2] - i.value = i.block.data[vo:vl] - i.offset = vl - return true - } - // Build entries cache. - i.key = i.key[:0] - i.value = nil - offset := i.block.restartOffset(ri) - if offset == i.offset { - ri -= 1 - if ri < 0 { - i.dir = dirSOI - return false - } - offset = i.block.restartOffset(ri) - } - i.prevNode = append(i.prevNode, offset) - for { - key, value, nShared, n, err := i.block.entry(offset) - if err != nil { - i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err)) - return false - } - if offset >= i.offsetRealStart { - if i.value != nil { - // Appends 3 variables: - // 1. Previous keys offset - // 2. Value offset in the data block - // 3. Value length - i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value)) - i.prevKeys = append(i.prevKeys, i.key...) - } - i.value = value - } - i.key = append(i.key[:nShared], key...) - offset += n - // Stop if target offset reached. - if offset >= i.offset { - if offset != i.offset { - i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned")) - return false - } - - break - } - } - i.restartIndex = ri - i.offset = offset - return true -} - -func (i *blockIter) Key() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.key -} - -func (i *blockIter) Value() []byte { - if i.err != nil || i.dir <= dirEOI { - return nil - } - return i.value -} - -func (i *blockIter) Release() { - if i.dir != dirReleased { - i.tr = nil - i.block = nil - i.prevNode = nil - i.prevKeys = nil - i.key = nil - i.value = nil - i.dir = dirReleased - if i.blockReleaser != nil { - i.blockReleaser.Release() - i.blockReleaser = nil - } - if i.releaser != nil { - i.releaser.Release() - i.releaser = nil - } - } -} - -func (i *blockIter) SetReleaser(releaser util.Releaser) { - if i.dir == dirReleased { - panic(util.ErrReleased) - } - if i.releaser != nil && releaser != nil { - panic(util.ErrHasReleaser) - } - i.releaser = releaser -} - -func (i *blockIter) Valid() bool { - return i.err == nil && (i.dir == dirBackward || i.dir == dirForward) -} - -func (i *blockIter) Error() error { - return i.err -} - -type filterBlock struct { - bpool *util.BufferPool - data []byte - oOffset int - baseLg uint - filtersNum int -} - -func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool { - i := int(offset >> b.baseLg) - if i < b.filtersNum { - o := b.data[b.oOffset+i*4:] - n := int(binary.LittleEndian.Uint32(o)) - m := int(binary.LittleEndian.Uint32(o[4:])) - if n < m && m <= b.oOffset { - return filter.Contains(b.data[n:m], key) - } else if n == m { - return false - } - } - return true -} - -func (b *filterBlock) Release() { - b.bpool.Put(b.data) - b.bpool = nil - b.data = nil -} - -type indexIter struct { - *blockIter - tr *Reader - slice *util.Range - // Options - fillCache bool -} - -func (i *indexIter) Get() iterator.Iterator { - value := i.Value() - if value == nil { - return nil - } - dataBH, n := decodeBlockHandle(value) - if n == 0 { - return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle")) - } - - var slice *util.Range - if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) { - slice = i.slice - } - return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache) -} - -// Reader is a table reader. -type Reader struct { - mu sync.RWMutex - fi *storage.FileInfo - reader io.ReaderAt - cache *cache.CacheGetter - err error - bpool *util.BufferPool - // Options - o *opt.Options - cmp comparer.Comparer - filter filter.Filter - verifyChecksum bool - - dataEnd int64 - metaBH, indexBH, filterBH blockHandle - indexBlock *block - filterBlock *filterBlock -} - -func (r *Reader) blockKind(bh blockHandle) string { - switch bh.offset { - case r.metaBH.offset: - return "meta-block" - case r.indexBH.offset: - return "index-block" - case r.filterBH.offset: - if r.filterBH.length > 0 { - return "filter-block" - } - } - return "data-block" -} - -func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error { - return &errors.ErrCorrupted{File: r.fi, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}} -} - -func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error { - return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason) -} - -func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error { - if cerr, ok := err.(*ErrCorrupted); ok { - cerr.Pos = int64(bh.offset) - cerr.Size = int64(bh.length) - cerr.Kind = r.blockKind(bh) - return &errors.ErrCorrupted{File: r.fi, Err: cerr} - } - return err -} - -func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) { - data := r.bpool.Get(int(bh.length + blockTrailerLen)) - if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF { - return nil, err - } - - if verifyChecksum { - n := bh.length + 1 - checksum0 := binary.LittleEndian.Uint32(data[n:]) - checksum1 := util.NewCRC(data[:n]).Value() - if checksum0 != checksum1 { - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1)) - } - } - - switch data[bh.length] { - case blockTypeNoCompression: - data = data[:bh.length] - case blockTypeSnappyCompression: - decLen, err := snappy.DecodedLen(data[:bh.length]) - if err != nil { - return nil, r.newErrCorruptedBH(bh, err.Error()) - } - decData := r.bpool.Get(decLen) - decData, err = snappy.Decode(decData, data[:bh.length]) - r.bpool.Put(data) - if err != nil { - r.bpool.Put(decData) - return nil, r.newErrCorruptedBH(bh, err.Error()) - } - data = decData - default: - r.bpool.Put(data) - return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length])) - } - return data, nil -} - -func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) { - data, err := r.readRawBlock(bh, verifyChecksum) - if err != nil { - return nil, err - } - restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) - b := &block{ - bpool: r.bpool, - bh: bh, - data: data, - restartsLen: restartsLen, - restartsOffset: len(data) - (restartsLen+1)*4, - } - return b, nil -} - -func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) { - if r.cache != nil { - var ( - err error - ch *cache.Handle - ) - if fillCache { - ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { - var b *block - b, err = r.readBlock(bh, verifyChecksum) - if err != nil { - return 0, nil - } - return cap(b.data), b - }) - } else { - ch = r.cache.Get(bh.offset, nil) - } - if ch != nil { - b, ok := ch.Value().(*block) - if !ok { - ch.Release() - return nil, nil, errors.New("leveldb/table: inconsistent block type") - } - return b, ch, err - } else if err != nil { - return nil, nil, err - } - } - - b, err := r.readBlock(bh, verifyChecksum) - return b, b, err -} - -func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) { - data, err := r.readRawBlock(bh, true) - if err != nil { - return nil, err - } - n := len(data) - if n < 5 { - return nil, r.newErrCorruptedBH(bh, "too short") - } - m := n - 5 - oOffset := int(binary.LittleEndian.Uint32(data[m:])) - if oOffset > m { - return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset") - } - b := &filterBlock{ - bpool: r.bpool, - data: data, - oOffset: oOffset, - baseLg: uint(data[n-1]), - filtersNum: (m - oOffset) / 4, - } - return b, nil -} - -func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) { - if r.cache != nil { - var ( - err error - ch *cache.Handle - ) - if fillCache { - ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { - var b *filterBlock - b, err = r.readFilterBlock(bh) - if err != nil { - return 0, nil - } - return cap(b.data), b - }) - } else { - ch = r.cache.Get(bh.offset, nil) - } - if ch != nil { - b, ok := ch.Value().(*filterBlock) - if !ok { - ch.Release() - return nil, nil, errors.New("leveldb/table: inconsistent block type") - } - return b, ch, err - } else if err != nil { - return nil, nil, err - } - } - - b, err := r.readFilterBlock(bh) - return b, b, err -} - -func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) { - if r.indexBlock == nil { - return r.readBlockCached(r.indexBH, true, fillCache) - } - return r.indexBlock, util.NoopReleaser{}, nil -} - -func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) { - if r.filterBlock == nil { - return r.readFilterBlockCached(r.filterBH, fillCache) - } - return r.filterBlock, util.NoopReleaser{}, nil -} - -func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter { - bi := &blockIter{ - tr: r, - block: b, - blockReleaser: bReleaser, - // Valid key should never be nil. - key: make([]byte, 0), - dir: dirSOI, - riStart: 0, - riLimit: b.restartsLen, - offsetStart: 0, - offsetRealStart: 0, - offsetLimit: b.restartsOffset, - } - if slice != nil { - if slice.Start != nil { - if bi.Seek(slice.Start) { - bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) - bi.offsetStart = b.restartOffset(bi.riStart) - bi.offsetRealStart = bi.prevOffset - } else { - bi.riStart = b.restartsLen - bi.offsetStart = b.restartsOffset - bi.offsetRealStart = b.restartsOffset - } - } - if slice.Limit != nil { - if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { - bi.offsetLimit = bi.prevOffset - bi.riLimit = bi.restartIndex + 1 - } - } - bi.reset() - if bi.offsetStart > bi.offsetLimit { - bi.sErr(errors.New("leveldb/table: invalid slice range")) - } - } - return bi -} - -func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { - b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache) - if err != nil { - return iterator.NewEmptyIterator(err) - } - return r.newBlockIter(b, rel, slice, false) -} - -func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - return r.getDataIter(dataBH, slice, verifyChecksum, fillCache) -} - -// NewIterator creates an iterator from the table. -// -// Slice allows slicing the iterator to only contains keys in the given -// range. A nil Range.Start is treated as a key before all keys in the -// table. And a nil Range.Limit is treated as a key after all keys in -// the table. -// -// The returned iterator is not goroutine-safe and should be released -// when not used. -// -// Also read Iterator documentation of the leveldb/iterator package. -func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - return iterator.NewEmptyIterator(r.err) - } - - fillCache := !ro.GetDontFillCache() - indexBlock, rel, err := r.getIndexBlock(fillCache) - if err != nil { - return iterator.NewEmptyIterator(err) - } - index := &indexIter{ - blockIter: r.newBlockIter(indexBlock, rel, slice, true), - tr: r, - slice: slice, - fillCache: !ro.GetDontFillCache(), - } - return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader)) -} - -func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - indexBlock, rel, err := r.getIndexBlock(true) - if err != nil { - return - } - defer rel.Release() - - index := r.newBlockIter(indexBlock, nil, nil, true) - defer index.Release() - if !index.Seek(key) { - err = index.Error() - if err == nil { - err = ErrNotFound - } - return - } - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return - } - if filtered && r.filter != nil { - filterBlock, frel, ferr := r.getFilterBlock(true) - if ferr == nil { - if !filterBlock.contains(r.filter, dataBH.offset, key) { - frel.Release() - return nil, nil, ErrNotFound - } - frel.Release() - } else if !errors.IsCorrupted(ferr) { - err = ferr - return - } - } - data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache()) - defer data.Release() - if !data.Seek(key) { - err = data.Error() - if err == nil { - err = ErrNotFound - } - return - } - // Don't use block buffer, no need to copy the buffer. - rkey = data.Key() - if !noValue { - if r.bpool == nil { - value = data.Value() - } else { - // Use block buffer, and since the buffer will be recycled, the buffer - // need to be copied. - value = append([]byte{}, data.Value()...) - } - } - return -} - -// Find finds key/value pair whose key is greater than or equal to the -// given key. It returns ErrNotFound if the table doesn't contain -// such pair. -// If filtered is true then the nearest 'block' will be checked against -// 'filter data' (if present) and will immediately return ErrNotFound if -// 'filter data' indicates that such pair doesn't exist. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) { - return r.find(key, filtered, ro, false) -} - -// Find finds key that is greater than or equal to the given key. -// It returns ErrNotFound if the table doesn't contain such key. -// If filtered is true then the nearest 'block' will be checked against -// 'filter data' (if present) and will immediately return ErrNotFound if -// 'filter data' indicates that such key doesn't exist. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) { - rkey, _, err = r.find(key, filtered, ro, true) - return -} - -// Get gets the value for the given key. It returns errors.ErrNotFound -// if the table does not contain the key. -// -// The caller may modify the contents of the returned slice as it is its -// own copy. -// It is safe to modify the contents of the argument after Find returns. -func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - rkey, value, err := r.find(key, false, ro, false) - if err == nil && r.cmp.Compare(rkey, key) != 0 { - value = nil - err = ErrNotFound - } - return -} - -// OffsetOf returns approximate offset for the given key. -// -// It is safe to modify the contents of the argument after Get returns. -func (r *Reader) OffsetOf(key []byte) (offset int64, err error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.err != nil { - err = r.err - return - } - - indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true) - if err != nil { - return - } - defer rel.Release() - - index := r.newBlockIter(indexBlock, nil, nil, true) - defer index.Release() - if index.Seek(key) { - dataBH, n := decodeBlockHandle(index.Value()) - if n == 0 { - r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle") - return - } - offset = int64(dataBH.offset) - return - } - err = index.Error() - if err == nil { - offset = r.dataEnd - } - return -} - -// Release implements util.Releaser. -// It also close the file if it is an io.Closer. -func (r *Reader) Release() { - r.mu.Lock() - defer r.mu.Unlock() - - if closer, ok := r.reader.(io.Closer); ok { - closer.Close() - } - if r.indexBlock != nil { - r.indexBlock.Release() - r.indexBlock = nil - } - if r.filterBlock != nil { - r.filterBlock.Release() - r.filterBlock = nil - } - r.reader = nil - r.cache = nil - r.bpool = nil - r.err = ErrReaderReleased -} - -// NewReader creates a new initialized table reader for the file. -// The fi, cache and bpool is optional and can be nil. -// -// The returned table reader instance is goroutine-safe. -func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { - if f == nil { - return nil, errors.New("leveldb/table: nil file") - } - - r := &Reader{ - fi: fi, - reader: f, - cache: cache, - bpool: bpool, - o: o, - cmp: o.GetComparer(), - verifyChecksum: o.GetStrict(opt.StrictBlockChecksum), - } - - if size < footerLen { - r.err = r.newErrCorrupted(0, size, "table", "too small") - return r, nil - } - - footerPos := size - footerLen - var footer [footerLen]byte - if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF { - return nil, err - } - if string(footer[footerLen-len(magic):footerLen]) != magic { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number") - return r, nil - } - - var n int - // Decode the metaindex block handle. - r.metaBH, n = decodeBlockHandle(footer[:]) - if n == 0 { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle") - return r, nil - } - - // Decode the index block handle. - r.indexBH, n = decodeBlockHandle(footer[n:]) - if n == 0 { - r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle") - return r, nil - } - - // Read metaindex block. - metaBlock, err := r.readBlock(r.metaBH, true) - if err != nil { - if errors.IsCorrupted(err) { - r.err = err - return r, nil - } else { - return nil, err - } - } - - // Set data end. - r.dataEnd = int64(r.metaBH.offset) - - // Read metaindex. - metaIter := r.newBlockIter(metaBlock, nil, nil, true) - for metaIter.Next() { - key := string(metaIter.Key()) - if !strings.HasPrefix(key, "filter.") { - continue - } - fn := key[7:] - if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { - r.filter = f0 - } else { - for _, f0 := range o.GetAltFilters() { - if f0.Name() == fn { - r.filter = f0 - break - } - } - } - if r.filter != nil { - filterBH, n := decodeBlockHandle(metaIter.Value()) - if n == 0 { - continue - } - r.filterBH = filterBH - // Update data end. - r.dataEnd = int64(filterBH.offset) - break - } - } - metaIter.Release() - metaBlock.Release() - - // Cache index and filter block locally, since we don't have global cache. - if cache == nil { - r.indexBlock, err = r.readBlock(r.indexBH, true) - if err != nil { - if errors.IsCorrupted(err) { - r.err = err - return r, nil - } else { - return nil, err - } - } - if r.filter != nil { - r.filterBlock, err = r.readFilterBlock(r.filterBH) - if err != nil { - if !errors.IsCorrupted(err) { - return nil, err - } - - // Don't use filter then. - r.filter = nil - } - } - } - - return r, nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go deleted file mode 100644 index beacdc1f0..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package table allows read and write sorted key/value. -package table - -import ( - "encoding/binary" -) - -/* -Table: - -Table is consist of one or more data blocks, an optional filter block -a metaindex block, an index block and a table footer. Metaindex block -is a special block used to keep parameters of the table, such as filter -block name and its block handle. Index block is a special block used to -keep record of data blocks offset and length, index block use one as -restart interval. The key used by index block are the last key of preceding -block, shorter separator of adjacent blocks or shorter successor of the -last key of the last block. Filter block is an optional block contains -sequence of filter data generated by a filter generator. - -Table data structure: - + optional - / - +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+ - | data block 1 | ... | data block n | filter block | metaindex block | index block | footer | - +--------------+--------------+--------------+--------------+-----------------+-------------+--------+ - - Each block followed by a 5-bytes trailer contains compression type and checksum. - -Table block trailer: - - +---------------------------+-------------------+ - | compression type (1-byte) | checksum (4-byte) | - +---------------------------+-------------------+ - - The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression - type also included in the checksum. - -Table footer: - - +------------------- 40-bytes -------------------+ - / \ - +------------------------+--------------------+------+-----------------+ - | metaindex block handle / index block handle / ---- | magic (8-bytes) | - +------------------------+--------------------+------+-----------------+ - - The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/". - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Block: - -Block is consist of one or more key/value entries and a block trailer. -Block entry shares key prefix with its preceding key until a restart -point reached. A block should contains at least one restart point. -First restart point are always zero. - -Block data structure: - - + restart point + restart point (depends on restart interval) - / / - +---------------+---------------+---------------+---------------+---------+ - | block entry 1 | block entry 2 | ... | block entry n | trailer | - +---------------+---------------+---------------+---------------+---------+ - -Key/value entry: - - +---- key len ----+ - / \ - +-------+---------+-----------+---------+--------------------+--------------+----------------+ - | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) | - +-----------------+---------------------+--------------------+--------------+----------------+ - - Block entry shares key prefix with its preceding key: - Conditions: - restart_interval=2 - entry one : key=deck,value=v1 - entry two : key=dock,value=v2 - entry three: key=duck,value=v3 - The entries will be encoded as follow: - - + restart point (offset=0) + restart point (offset=16) - / / - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" | - +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+ - \ / \ / \ / - +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+ - - The block trailer will contains two restart points: - - +------------+-----------+--------+ - | 0 | 16 | 2 | - +------------+-----------+---+----+ - \ / \ - +-- restart points --+ + restart points length - -Block trailer: - - +-- 4-bytes --+ - / \ - +-----------------+-----------------+-----------------+------------------------------+ - | restart point 1 | .... | restart point n | restart points len (4-bytes) | - +-----------------+-----------------+-----------------+------------------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -/* -Filter block: - -Filter block consist of one or more filter data and a filter block trailer. -The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg. - -Filter block data structure: - - + offset 1 + offset 2 + offset n + trailer offset - / / / / - +---------------+---------------+---------------+---------+ - | filter data 1 | ... | filter data n | trailer | - +---------------+---------------+---------------+---------+ - -Filter block trailer: - - +- 4-bytes -+ - / \ - +---------------+---------------+---------------+-------------------------------+------------------+ - | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) | - +-------------- +---------------+---------------+-------------------------------+------------------+ - - -NOTE: All fixed-length integer are little-endian. -*/ - -const ( - blockTrailerLen = 5 - footerLen = 48 - - magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb" - - // The block type gives the per-block compression format. - // These constants are part of the file format and should not be changed. - blockTypeNoCompression = 0 - blockTypeSnappyCompression = 1 - - // Generate new filter every 2KB of data - filterBaseLg = 11 - filterBase = 1 << filterBaseLg -) - -type blockHandle struct { - offset, length uint64 -} - -func decodeBlockHandle(src []byte) (blockHandle, int) { - offset, n := binary.Uvarint(src) - length, m := binary.Uvarint(src[n:]) - if n == 0 || m == 0 { - return blockHandle{}, 0 - } - return blockHandle{offset, length}, n + m -} - -func encodeBlockHandle(dst []byte, b blockHandle) int { - n := binary.PutUvarint(dst, b.offset) - m := binary.PutUvarint(dst[n:], b.length) - return n + m -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go deleted file mode 100644 index 6465da6e3..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package table - -import ( - "testing" - - "github.com/syndtr/goleveldb/leveldb/testutil" -) - -func TestTable(t *testing.T) { - testutil.RunSuite(t, "Table Suite") -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go deleted file mode 100644 index 4b59b31f5..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "bytes" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type tableWrapper struct { - *Reader -} - -func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) { - return t.Reader.Find(key, false, nil) -} - -func (t tableWrapper) TestGet(key []byte) (value []byte, err error) { - return t.Reader.Get(key, nil) -} - -func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.Reader.NewIterator(slice, nil) -} - -var _ = testutil.Defer(func() { - Describe("Table", func() { - Describe("approximate offset test", func() { - var ( - buf = &bytes.Buffer{} - o = &opt.Options{ - BlockSize: 1024, - Compression: opt.NoCompression, - } - ) - - // Building the table. - tw := NewWriter(buf, o) - tw.Append([]byte("k01"), []byte("hello")) - tw.Append([]byte("k02"), []byte("hello2")) - tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000)) - tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000)) - tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000)) - tw.Append([]byte("k06"), []byte("hello3")) - tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000)) - err := tw.Close() - - It("Should be able to approximate offset of a key correctly", func() { - Expect(err).ShouldNot(HaveOccurred()) - - tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o) - Expect(err).ShouldNot(HaveOccurred()) - CheckOffset := func(key string, expect, threshold int) { - offset, err := tr.OffsetOf([]byte(key)) - Expect(err).ShouldNot(HaveOccurred()) - Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key) - } - - CheckOffset("k0", 0, 0) - CheckOffset("k01a", 0, 0) - CheckOffset("k02", 0, 0) - CheckOffset("k03", 0, 0) - CheckOffset("k04", 10000, 1000) - CheckOffset("k04a", 210000, 1000) - CheckOffset("k05", 210000, 1000) - CheckOffset("k06", 510000, 1000) - CheckOffset("k07", 510000, 1000) - CheckOffset("xyz", 610000, 2000) - }) - }) - - Describe("read test", func() { - Build := func(kv testutil.KeyValue) testutil.DB { - o := &opt.Options{ - BlockSize: 512, - BlockRestartInterval: 3, - } - buf := &bytes.Buffer{} - - // Building the table. - tw := NewWriter(buf, o) - kv.Iterate(func(i int, key, value []byte) { - tw.Append(key, value) - }) - tw.Close() - - // Opening the table. - tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o) - return tableWrapper{tr} - } - Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() { - return func() { - db := Build(*kv) - if body != nil { - body(db.(tableWrapper).Reader) - } - testutil.KeyValueTesting(nil, *kv, db, nil, nil) - } - } - - testutil.AllKeyValueTesting(nil, Build, nil, nil) - Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) { - It("should have correct blocks number", func() { - indexBlock, err := r.readBlock(r.indexBH, true) - Expect(err).To(BeNil()) - Expect(indexBlock.restartsLen).Should(Equal(9)) - }) - })) - }) - }) -}) diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go deleted file mode 100644 index 274c95fad..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package table - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/syndtr/gosnappy/snappy" - - "github.com/syndtr/goleveldb/leveldb/comparer" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func sharedPrefixLen(a, b []byte) int { - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for i < n && a[i] == b[i] { - i++ - } - return i -} - -type blockWriter struct { - restartInterval int - buf util.Buffer - nEntries int - prevKey []byte - restarts []uint32 - scratch []byte -} - -func (w *blockWriter) append(key, value []byte) { - nShared := 0 - if w.nEntries%w.restartInterval == 0 { - w.restarts = append(w.restarts, uint32(w.buf.Len())) - } else { - nShared = sharedPrefixLen(w.prevKey, key) - } - n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) - n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) - w.buf.Write(w.scratch[:n]) - w.buf.Write(key[nShared:]) - w.buf.Write(value) - w.prevKey = append(w.prevKey[:0], key...) - w.nEntries++ -} - -func (w *blockWriter) finish() { - // Write restarts entry. - if w.nEntries == 0 { - // Must have at least one restart entry. - w.restarts = append(w.restarts, 0) - } - w.restarts = append(w.restarts, uint32(len(w.restarts))) - for _, x := range w.restarts { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } -} - -func (w *blockWriter) reset() { - w.buf.Reset() - w.nEntries = 0 - w.restarts = w.restarts[:0] -} - -func (w *blockWriter) bytesLen() int { - restartsLen := len(w.restarts) - if restartsLen == 0 { - restartsLen = 1 - } - return w.buf.Len() + 4*restartsLen + 4 -} - -type filterWriter struct { - generator filter.FilterGenerator - buf util.Buffer - nKeys int - offsets []uint32 -} - -func (w *filterWriter) add(key []byte) { - if w.generator == nil { - return - } - w.generator.Add(key) - w.nKeys++ -} - -func (w *filterWriter) flush(offset uint64) { - if w.generator == nil { - return - } - for x := int(offset / filterBase); x > len(w.offsets); { - w.generate() - } -} - -func (w *filterWriter) finish() { - if w.generator == nil { - return - } - // Generate last keys. - - if w.nKeys > 0 { - w.generate() - } - w.offsets = append(w.offsets, uint32(w.buf.Len())) - for _, x := range w.offsets { - buf4 := w.buf.Alloc(4) - binary.LittleEndian.PutUint32(buf4, x) - } - w.buf.WriteByte(filterBaseLg) -} - -func (w *filterWriter) generate() { - // Record offset. - w.offsets = append(w.offsets, uint32(w.buf.Len())) - // Generate filters. - if w.nKeys > 0 { - w.generator.Generate(&w.buf) - w.nKeys = 0 - } -} - -// Writer is a table writer. -type Writer struct { - writer io.Writer - err error - // Options - cmp comparer.Comparer - filter filter.Filter - compression opt.Compression - blockSize int - - dataBlock blockWriter - indexBlock blockWriter - filterBlock filterWriter - pendingBH blockHandle - offset uint64 - nEntries int - // Scratch allocated enough for 5 uvarint. Block writer should not use - // first 20-bytes since it will be used to encode block handle, which - // then passed to the block writer itself. - scratch [50]byte - comparerScratch []byte - compressionScratch []byte -} - -func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) { - // Compress the buffer if necessary. - var b []byte - if compression == opt.SnappyCompression { - // Allocate scratch enough for compression and block trailer. - if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n { - w.compressionScratch = make([]byte, n) - } - var compressed []byte - compressed, err = snappy.Encode(w.compressionScratch, buf.Bytes()) - if err != nil { - return - } - n := len(compressed) - b = compressed[:n+blockTrailerLen] - b[n] = blockTypeSnappyCompression - } else { - tmp := buf.Alloc(blockTrailerLen) - tmp[0] = blockTypeNoCompression - b = buf.Bytes() - } - - // Calculate the checksum. - n := len(b) - 4 - checksum := util.NewCRC(b[:n]).Value() - binary.LittleEndian.PutUint32(b[n:], checksum) - - // Write the buffer to the file. - _, err = w.writer.Write(b) - if err != nil { - return - } - bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)} - w.offset += uint64(len(b)) - return -} - -func (w *Writer) flushPendingBH(key []byte) { - if w.pendingBH.length == 0 { - return - } - var separator []byte - if len(key) == 0 { - separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey) - } else { - separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key) - } - if separator == nil { - separator = w.dataBlock.prevKey - } else { - w.comparerScratch = separator - } - n := encodeBlockHandle(w.scratch[:20], w.pendingBH) - // Append the block handle to the index block. - w.indexBlock.append(separator, w.scratch[:n]) - // Reset prev key of the data block. - w.dataBlock.prevKey = w.dataBlock.prevKey[:0] - // Clear pending block handle. - w.pendingBH = blockHandle{} -} - -func (w *Writer) finishBlock() error { - w.dataBlock.finish() - bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - return err - } - w.pendingBH = bh - // Reset the data block. - w.dataBlock.reset() - // Flush the filter block. - w.filterBlock.flush(w.offset) - return nil -} - -// Append appends key/value pair to the table. The keys passed must -// be in increasing order. -// -// It is safe to modify the contents of the arguments after Append returns. -func (w *Writer) Append(key, value []byte) error { - if w.err != nil { - return w.err - } - if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 { - w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key) - return w.err - } - - w.flushPendingBH(key) - // Append key/value pair to the data block. - w.dataBlock.append(key, value) - // Add key to the filter block. - w.filterBlock.add(key) - - // Finish the data block if block size target reached. - if w.dataBlock.bytesLen() >= w.blockSize { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.nEntries++ - return nil -} - -// BlocksLen returns number of blocks written so far. -func (w *Writer) BlocksLen() int { - n := w.indexBlock.nEntries - if w.pendingBH.length > 0 { - // Includes the pending block. - n++ - } - return n -} - -// EntriesLen returns number of entries added so far. -func (w *Writer) EntriesLen() int { - return w.nEntries -} - -// BytesLen returns number of bytes written so far. -func (w *Writer) BytesLen() int { - return int(w.offset) -} - -// Close will finalize the table. Calling Append is not possible -// after Close, but calling BlocksLen, EntriesLen and BytesLen -// is still possible. -func (w *Writer) Close() error { - if w.err != nil { - return w.err - } - - // Write the last data block. Or empty data block if there - // aren't any data blocks at all. - if w.dataBlock.nEntries > 0 || w.nEntries == 0 { - if err := w.finishBlock(); err != nil { - w.err = err - return w.err - } - } - w.flushPendingBH(nil) - - // Write the filter block. - var filterBH blockHandle - w.filterBlock.finish() - if buf := &w.filterBlock.buf; buf.Len() > 0 { - filterBH, w.err = w.writeBlock(buf, opt.NoCompression) - if w.err != nil { - return w.err - } - } - - // Write the metaindex block. - if filterBH.length > 0 { - key := []byte("filter." + w.filter.Name()) - n := encodeBlockHandle(w.scratch[:20], filterBH) - w.dataBlock.append(key, w.scratch[:n]) - } - w.dataBlock.finish() - metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the index block. - w.indexBlock.finish() - indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) - if err != nil { - w.err = err - return w.err - } - - // Write the table footer. - footer := w.scratch[:footerLen] - for i := range footer { - footer[i] = 0 - } - n := encodeBlockHandle(footer, metaindexBH) - encodeBlockHandle(footer[n:], indexBH) - copy(footer[footerLen-len(magic):], magic) - if _, err := w.writer.Write(footer); err != nil { - w.err = err - return w.err - } - w.offset += footerLen - - w.err = errors.New("leveldb/table: writer is closed") - return nil -} - -// NewWriter creates a new initialized table writer for the file. -// -// Table writer is not goroutine-safe. -func NewWriter(f io.Writer, o *opt.Options) *Writer { - w := &Writer{ - writer: f, - cmp: o.GetComparer(), - filter: o.GetFilter(), - compression: o.GetCompression(), - blockSize: o.GetBlockSize(), - comparerScratch: make([]byte, 0), - } - // data block - w.dataBlock.restartInterval = o.GetBlockRestartInterval() - // The first 20-bytes are used for encoding block handle. - w.dataBlock.scratch = w.scratch[20:] - // index block - w.indexBlock.restartInterval = 1 - w.indexBlock.scratch = w.scratch[20:] - // filter block - if w.filter != nil { - w.filterBlock.generator = w.filter.NewGenerator() - w.filterBlock.flush(0) - } - return w -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go deleted file mode 100644 index ec3f177a1..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type DB interface{} - -type Put interface { - TestPut(key []byte, value []byte) error -} - -type Delete interface { - TestDelete(key []byte) error -} - -type Find interface { - TestFind(key []byte) (rkey, rvalue []byte, err error) -} - -type Get interface { - TestGet(key []byte) (value []byte, err error) -} - -type Has interface { - TestHas(key []byte) (ret bool, err error) -} - -type NewIterator interface { - TestNewIterator(slice *util.Range) iterator.Iterator -} - -type DBAct int - -func (a DBAct) String() string { - switch a { - case DBNone: - return "none" - case DBPut: - return "put" - case DBOverwrite: - return "overwrite" - case DBDelete: - return "delete" - case DBDeleteNA: - return "delete_na" - } - return "unknown" -} - -const ( - DBNone DBAct = iota - DBPut - DBOverwrite - DBDelete - DBDeleteNA -) - -type DBTesting struct { - Rand *rand.Rand - DB interface { - Get - Put - Delete - } - PostFn func(t *DBTesting) - Deleted, Present KeyValue - Act, LastAct DBAct - ActKey, LastActKey []byte -} - -func (t *DBTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *DBTesting) setAct(act DBAct, key []byte) { - t.LastAct, t.Act = t.Act, act - t.LastActKey, t.ActKey = t.ActKey, key -} - -func (t *DBTesting) text() string { - return fmt.Sprintf("last action was <%v> %q, <%v> %q", t.LastAct, t.LastActKey, t.Act, t.ActKey) -} - -func (t *DBTesting) Text() string { - return "DBTesting " + t.text() -} - -func (t *DBTesting) TestPresentKV(key, value []byte) { - rvalue, err := t.DB.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Get on key %q, %s", key, t.text()) - Expect(rvalue).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllPresent() { - t.Present.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestPresentKV(key, value) - }) -} - -func (t *DBTesting) TestDeletedKey(key []byte) { - _, err := t.DB.TestGet(key) - Expect(err).Should(Equal(errors.ErrNotFound), "Get on deleted key %q, %s", key, t.text()) -} - -func (t *DBTesting) TestAllDeleted() { - t.Deleted.IterateShuffled(t.Rand, func(i int, key, value []byte) { - t.TestDeletedKey(key) - }) -} - -func (t *DBTesting) TestAll() { - dn := t.Deleted.Len() - pn := t.Present.Len() - ShuffledIndex(t.Rand, dn+pn, 1, func(i int) { - if i >= dn { - key, value := t.Present.Index(i - dn) - t.TestPresentKV(key, value) - } else { - t.TestDeletedKey(t.Deleted.KeyAt(i)) - } - }) -} - -func (t *DBTesting) Put(key, value []byte) { - if new := t.Present.PutU(key, value); new { - t.setAct(DBPut, key) - } else { - t.setAct(DBOverwrite, key) - } - t.Deleted.Delete(key) - err := t.DB.TestPut(key, value) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestPresentKV(key, value) - t.post() -} - -func (t *DBTesting) PutRandom() bool { - if t.Deleted.Len() > 0 { - i := t.Rand.Intn(t.Deleted.Len()) - key, value := t.Deleted.Index(i) - t.Put(key, value) - return true - } - return false -} - -func (t *DBTesting) Delete(key []byte) { - if exist, value := t.Present.Delete(key); exist { - t.setAct(DBDelete, key) - t.Deleted.PutU(key, value) - } else { - t.setAct(DBDeleteNA, key) - } - err := t.DB.TestDelete(key) - Expect(err).ShouldNot(HaveOccurred(), t.Text()) - t.TestDeletedKey(key) - t.post() -} - -func (t *DBTesting) DeleteRandom() bool { - if t.Present.Len() > 0 { - i := t.Rand.Intn(t.Present.Len()) - t.Delete(t.Present.KeyAt(i)) - return true - } - return false -} - -func (t *DBTesting) RandomAct(round int) { - for i := 0; i < round; i++ { - if t.Rand.Int()%2 == 0 { - t.PutRandom() - } else { - t.DeleteRandom() - } - } -} - -func DoDBTesting(t *DBTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - - t.DeleteRandom() - t.PutRandom() - t.DeleteRandom() - t.DeleteRandom() - for i := t.Deleted.Len() / 2; i >= 0; i-- { - t.PutRandom() - } - t.RandomAct((t.Deleted.Len() + t.Present.Len()) * 10) - - // Additional iterator testing - if db, ok := t.DB.(NewIterator); ok { - iter := db.TestNewIterator(nil) - Expect(iter.Error()).NotTo(HaveOccurred()) - - it := IteratorTesting{ - KeyValue: t.Present, - Iter: iter, - } - - DoIteratorTesting(&it) - iter.Release() - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go deleted file mode 100644 index 82f3d0e81..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go +++ /dev/null @@ -1,21 +0,0 @@ -package testutil - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func RunSuite(t GinkgoTestingT, name string) { - RunDefer() - - SynchronizedBeforeSuite(func() []byte { - RunDefer("setup") - return nil - }, func(data []byte) {}) - SynchronizedAfterSuite(func() { - RunDefer("teardown") - }, func() {}) - - RegisterFailHandler(Fail) - RunSpecs(t, name) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go deleted file mode 100644 index df6d9db6a..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/iter.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/iterator" -) - -type IterAct int - -func (a IterAct) String() string { - switch a { - case IterNone: - return "none" - case IterFirst: - return "first" - case IterLast: - return "last" - case IterPrev: - return "prev" - case IterNext: - return "next" - case IterSeek: - return "seek" - case IterSOI: - return "soi" - case IterEOI: - return "eoi" - } - return "unknown" -} - -const ( - IterNone IterAct = iota - IterFirst - IterLast - IterPrev - IterNext - IterSeek - IterSOI - IterEOI -) - -type IteratorTesting struct { - KeyValue - Iter iterator.Iterator - Rand *rand.Rand - PostFn func(t *IteratorTesting) - Pos int - Act, LastAct IterAct - - once bool -} - -func (t *IteratorTesting) init() { - if !t.once { - t.Pos = -1 - t.once = true - } -} - -func (t *IteratorTesting) post() { - if t.PostFn != nil { - t.PostFn(t) - } -} - -func (t *IteratorTesting) setAct(act IterAct) { - t.LastAct, t.Act = t.Act, act -} - -func (t *IteratorTesting) text() string { - return fmt.Sprintf("at pos %d and last action was <%v> -> <%v>", t.Pos, t.LastAct, t.Act) -} - -func (t *IteratorTesting) Text() string { - return "IteratorTesting is " + t.text() -} - -func (t *IteratorTesting) IsFirst() bool { - t.init() - return t.Len() > 0 && t.Pos == 0 -} - -func (t *IteratorTesting) IsLast() bool { - t.init() - return t.Len() > 0 && t.Pos == t.Len()-1 -} - -func (t *IteratorTesting) TestKV() { - t.init() - key, value := t.Index(t.Pos) - Expect(t.Iter.Key()).NotTo(BeNil()) - Expect(t.Iter.Key()).Should(Equal(key), "Key is invalid, %s", t.text()) - Expect(t.Iter.Value()).Should(Equal(value), "Value for key %q, %s", key, t.text()) -} - -func (t *IteratorTesting) First() { - t.init() - t.setAct(IterFirst) - - ok := t.Iter.First() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = 0 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Last() { - t.init() - t.setAct(IterLast) - - ok := t.Iter.Last() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Len() > 0 { - t.Pos = t.Len() - 1 - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = 0 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Next() { - t.init() - t.setAct(IterNext) - - ok := t.Iter.Next() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos < t.Len()-1 { - t.Pos++ - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = t.Len() - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Prev() { - t.init() - t.setAct(IterPrev) - - ok := t.Iter.Prev() - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if t.Pos > 0 { - t.Pos-- - Expect(ok).Should(BeTrue(), t.Text()) - t.TestKV() - } else { - t.Pos = -1 - Expect(ok).ShouldNot(BeTrue(), t.Text()) - } - t.post() -} - -func (t *IteratorTesting) Seek(i int) { - t.init() - t.setAct(IterSeek) - - key, _ := t.Index(i) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q, to pos %d, %s", oldKey, key, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekInexact(i int) { - t.init() - t.setAct(IterSeek) - var key0 []byte - key1, _ := t.Index(i) - if i > 0 { - key0, _ = t.Index(i - 1) - } - key := BytesSeparator(key0, key1) - oldKey, _ := t.IndexOrNil(t.Pos) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key1, i, t.text())) - - t.Pos = i - t.TestKV() - t.post() -} - -func (t *IteratorTesting) SeekKey(key []byte) { - t.init() - t.setAct(IterSeek) - oldKey, _ := t.IndexOrNil(t.Pos) - i := t.Search(key) - - ok := t.Iter.Seek(key) - Expect(t.Iter.Error()).ShouldNot(HaveOccurred()) - if i < t.Len() { - key_, _ := t.Index(i) - Expect(ok).Should(BeTrue(), fmt.Sprintf("Seek from key %q to %q (%q), to pos %d, %s", oldKey, key, key_, i, t.text())) - t.Pos = i - t.TestKV() - } else { - Expect(ok).ShouldNot(BeTrue(), fmt.Sprintf("Seek from key %q to %q, %s", oldKey, key, t.text())) - } - - t.Pos = i - t.post() -} - -func (t *IteratorTesting) SOI() { - t.init() - t.setAct(IterSOI) - Expect(t.Pos).Should(BeNumerically("<=", 0), t.Text()) - for i := 0; i < 3; i++ { - t.Prev() - } - t.post() -} - -func (t *IteratorTesting) EOI() { - t.init() - t.setAct(IterEOI) - Expect(t.Pos).Should(BeNumerically(">=", t.Len()-1), t.Text()) - for i := 0; i < 3; i++ { - t.Next() - } - t.post() -} - -func (t *IteratorTesting) WalkPrev(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos > 0; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically("<", old), t.Text()) - } -} - -func (t *IteratorTesting) WalkNext(fn func(t *IteratorTesting)) { - t.init() - for old := t.Pos; t.Pos < t.Len()-1; old = t.Pos { - fn(t) - Expect(t.Pos).Should(BeNumerically(">", old), t.Text()) - } -} - -func (t *IteratorTesting) PrevAll() { - t.WalkPrev(func(t *IteratorTesting) { - t.Prev() - }) -} - -func (t *IteratorTesting) NextAll() { - t.WalkNext(func(t *IteratorTesting) { - t.Next() - }) -} - -func DoIteratorTesting(t *IteratorTesting) { - if t.Rand == nil { - t.Rand = NewRand() - } - t.SOI() - t.NextAll() - t.First() - t.SOI() - t.NextAll() - t.EOI() - t.PrevAll() - t.Last() - t.EOI() - t.PrevAll() - t.SOI() - - t.NextAll() - t.PrevAll() - t.NextAll() - t.Last() - t.PrevAll() - t.First() - t.NextAll() - t.EOI() - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.SeekInexact(i) - }) - - ShuffledIndex(t.Rand, t.Len(), 1, func(i int) { - t.Seek(i) - if i%2 != 0 { - t.PrevAll() - t.SOI() - } else { - t.NextAll() - t.EOI() - } - }) - - for _, key := range []string{"", "foo", "bar", "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"} { - t.SeekKey([]byte(key)) - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go deleted file mode 100644 index 471d5708c..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kv.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - "sort" - "strings" - - "github.com/syndtr/goleveldb/leveldb/util" -) - -type KeyValueEntry struct { - key, value []byte -} - -type KeyValue struct { - entries []KeyValueEntry - nbytes int -} - -func (kv *KeyValue) Put(key, value []byte) { - if n := len(kv.entries); n > 0 && cmp.Compare(kv.entries[n-1].key, key) >= 0 { - panic(fmt.Sprintf("Put: keys are not in increasing order: %q, %q", kv.entries[n-1].key, key)) - } - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - kv.nbytes += len(key) + len(value) -} - -func (kv *KeyValue) PutString(key, value string) { - kv.Put([]byte(key), []byte(value)) -} - -func (kv *KeyValue) PutU(key, value []byte) bool { - if i, exist := kv.Get(key); !exist { - if i < kv.Len() { - kv.entries = append(kv.entries[:i+1], kv.entries[i:]...) - kv.entries[i] = KeyValueEntry{key, value} - } else { - kv.entries = append(kv.entries, KeyValueEntry{key, value}) - } - kv.nbytes += len(key) + len(value) - return true - } else { - kv.nbytes += len(value) - len(kv.ValueAt(i)) - kv.entries[i].value = value - } - return false -} - -func (kv *KeyValue) PutUString(key, value string) bool { - return kv.PutU([]byte(key), []byte(value)) -} - -func (kv *KeyValue) Delete(key []byte) (exist bool, value []byte) { - i, exist := kv.Get(key) - if exist { - value = kv.entries[i].value - kv.DeleteIndex(i) - } - return -} - -func (kv *KeyValue) DeleteIndex(i int) bool { - if i < kv.Len() { - kv.nbytes -= len(kv.KeyAt(i)) + len(kv.ValueAt(i)) - kv.entries = append(kv.entries[:i], kv.entries[i+1:]...) - return true - } - return false -} - -func (kv KeyValue) Len() int { - return len(kv.entries) -} - -func (kv *KeyValue) Size() int { - return kv.nbytes -} - -func (kv KeyValue) KeyAt(i int) []byte { - return kv.entries[i].key -} - -func (kv KeyValue) ValueAt(i int) []byte { - return kv.entries[i].value -} - -func (kv KeyValue) Index(i int) (key, value []byte) { - if i < 0 || i >= len(kv.entries) { - panic(fmt.Sprintf("Index #%d: out of range", i)) - } - return kv.entries[i].key, kv.entries[i].value -} - -func (kv KeyValue) IndexInexact(i int) (key_, key, value []byte) { - key, value = kv.Index(i) - var key0 []byte - var key1 = kv.KeyAt(i) - if i > 0 { - key0 = kv.KeyAt(i - 1) - } - key_ = BytesSeparator(key0, key1) - return -} - -func (kv KeyValue) IndexOrNil(i int) (key, value []byte) { - if i >= 0 && i < len(kv.entries) { - return kv.entries[i].key, kv.entries[i].value - } - return nil, nil -} - -func (kv KeyValue) IndexString(i int) (key, value string) { - key_, _value := kv.Index(i) - return string(key_), string(_value) -} - -func (kv KeyValue) Search(key []byte) int { - return sort.Search(kv.Len(), func(i int) bool { - return cmp.Compare(kv.KeyAt(i), key) >= 0 - }) -} - -func (kv KeyValue) SearchString(key string) int { - return kv.Search([]byte(key)) -} - -func (kv KeyValue) Get(key []byte) (i int, exist bool) { - i = kv.Search(key) - if i < kv.Len() && cmp.Compare(kv.KeyAt(i), key) == 0 { - exist = true - } - return -} - -func (kv KeyValue) GetString(key string) (i int, exist bool) { - return kv.Get([]byte(key)) -} - -func (kv KeyValue) Iterate(fn func(i int, key, value []byte)) { - for i, x := range kv.entries { - fn(i, x.key, x.value) - } -} - -func (kv KeyValue) IterateString(fn func(i int, key, value string)) { - kv.Iterate(func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateShuffled(rnd *rand.Rand, fn func(i int, key, value []byte)) { - ShuffledIndex(rnd, kv.Len(), 1, func(i int) { - fn(i, kv.entries[i].key, kv.entries[i].value) - }) -} - -func (kv KeyValue) IterateShuffledString(rnd *rand.Rand, fn func(i int, key, value string)) { - kv.IterateShuffled(rnd, func(i int, key, value []byte) { - fn(i, string(key), string(value)) - }) -} - -func (kv KeyValue) IterateInexact(fn func(i int, key_, key, value []byte)) { - for i := range kv.entries { - key_, key, value := kv.IndexInexact(i) - fn(i, key_, key, value) - } -} - -func (kv KeyValue) IterateInexactString(fn func(i int, key_, key, value string)) { - kv.IterateInexact(func(i int, key_, key, value []byte) { - fn(i, string(key_), string(key), string(value)) - }) -} - -func (kv KeyValue) Clone() KeyValue { - return KeyValue{append([]KeyValueEntry{}, kv.entries...), kv.nbytes} -} - -func (kv KeyValue) Slice(start, limit int) KeyValue { - if start < 0 || limit > kv.Len() { - panic(fmt.Sprintf("Slice %d .. %d: out of range", start, limit)) - } else if limit < start { - panic(fmt.Sprintf("Slice %d .. %d: invalid range", start, limit)) - } - return KeyValue{append([]KeyValueEntry{}, kv.entries[start:limit]...), kv.nbytes} -} - -func (kv KeyValue) SliceKey(start, limit []byte) KeyValue { - start_ := 0 - limit_ := kv.Len() - if start != nil { - start_ = kv.Search(start) - } - if limit != nil { - limit_ = kv.Search(limit) - } - return kv.Slice(start_, limit_) -} - -func (kv KeyValue) SliceKeyString(start, limit string) KeyValue { - return kv.SliceKey([]byte(start), []byte(limit)) -} - -func (kv KeyValue) SliceRange(r *util.Range) KeyValue { - if r != nil { - return kv.SliceKey(r.Start, r.Limit) - } - return kv.Clone() -} - -func (kv KeyValue) Range(start, limit int) (r util.Range) { - if kv.Len() > 0 { - if start == kv.Len() { - r.Start = BytesAfter(kv.KeyAt(start - 1)) - } else { - r.Start = kv.KeyAt(start) - } - } - if limit < kv.Len() { - r.Limit = kv.KeyAt(limit) - } - return -} - -func KeyValue_EmptyKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("", "v") - return kv -} - -func KeyValue_EmptyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "") - kv.PutString("abcd", "") - return kv -} - -func KeyValue_OneKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("abc", "v") - return kv -} - -func KeyValue_BigValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("big1", strings.Repeat("1", 200000)) - return kv -} - -func KeyValue_SpecialKey() *KeyValue { - kv := &KeyValue{} - kv.PutString("\xff\xff", "v3") - return kv -} - -func KeyValue_MultipleKeyValue() *KeyValue { - kv := &KeyValue{} - kv.PutString("a", "v") - kv.PutString("aa", "v1") - kv.PutString("aaa", "v2") - kv.PutString("aaacccccccccc", "v2") - kv.PutString("aaaccccccccccd", "v3") - kv.PutString("aaaccccccccccf", "v4") - kv.PutString("aaaccccccccccfg", "v5") - kv.PutString("ab", "v6") - kv.PutString("abc", "v7") - kv.PutString("abcd", "v8") - kv.PutString("accccccccccccccc", "v9") - kv.PutString("b", "v10") - kv.PutString("bb", "v11") - kv.PutString("bc", "v12") - kv.PutString("c", "v13") - kv.PutString("c1", "v13") - kv.PutString("czzzzzzzzzzzzzz", "v14") - kv.PutString("fffffffffffffff", "v15") - kv.PutString("g11", "v15") - kv.PutString("g111", "v15") - kv.PutString("g111\xff", "v15") - kv.PutString("zz", "v16") - kv.PutString("zzzzzzz", "v16") - kv.PutString("zzzzzzzzzzzzzzzz", "v16") - return kv -} - -var keymap = []byte("012345678ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxy") - -func KeyValue_Generate(rnd *rand.Rand, n, minlen, maxlen, vminlen, vmaxlen int) *KeyValue { - if rnd == nil { - rnd = NewRand() - } - if maxlen < minlen { - panic("max len should >= min len") - } - - rrand := func(min, max int) int { - if min == max { - return max - } - return rnd.Intn(max-min) + min - } - - kv := &KeyValue{} - endC := byte(len(keymap) - 1) - gen := make([]byte, 0, maxlen) - for i := 0; i < n; i++ { - m := rrand(minlen, maxlen) - last := gen - retry: - gen = last[:m] - if k := len(last); m > k { - for j := k; j < m; j++ { - gen[j] = 0 - } - } else { - for j := m - 1; j >= 0; j-- { - c := last[j] - if c == endC { - continue - } - gen[j] = c + 1 - for j += 1; j < m; j++ { - gen[j] = 0 - } - goto ok - } - if m < maxlen { - m++ - goto retry - } - panic(fmt.Sprintf("only able to generate %d keys out of %d keys, try increasing max len", kv.Len(), n)) - ok: - } - key := make([]byte, m) - for j := 0; j < m; j++ { - key[j] = keymap[gen[j]] - } - value := make([]byte, rrand(vminlen, vmaxlen)) - for n := copy(value, []byte(fmt.Sprintf("v%d", i))); n < len(value); n++ { - value[n] = 'x' - } - kv.Put(key, value) - } - return kv -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go deleted file mode 100644 index a0b58f0e7..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "fmt" - "math/rand" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/util" -) - -func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) { - if rnd == nil { - rnd = NewRand() - } - - if p == nil { - BeforeEach(func() { - p = setup(kv) - }) - if teardown != nil { - AfterEach(func() { - teardown(p) - }) - } - } - - It("Should find all keys with Find", func() { - if db, ok := p.(Find); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rkey, rvalue, err := db.TestFind(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rkey).Should(Equal(key), "Key") - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - rkey, rvalue, err = db.TestFind(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key) - Expect(rkey).Should(Equal(key)) - Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key) - }) - } - }) - - It("Should return error if the key is not present", func() { - if db, ok := p.(Find); ok { - var key []byte - if kv.Len() > 0 { - key_, _ := kv.Index(kv.Len() - 1) - key = BytesAfter(key_) - } - rkey, _, err := db.TestFind(key) - Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey) - Expect(err).Should(Equal(errors.ErrNotFound)) - } - }) - - It("Should only find exact key with Get", func() { - if db, ok := p.(Get); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, value := kv.IndexInexact(i) - - // Using exact key. - rvalue, err := db.TestGet(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(rvalue).Should(Equal(value), "Value for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - _, err = db.TestGet(key_) - Expect(err).Should(HaveOccurred(), "Error for key %q", key_) - Expect(err).Should(Equal(errors.ErrNotFound)) - } - }) - } - }) - - It("Should only find present key with Has", func() { - if db, ok := p.(Has); ok { - ShuffledIndex(nil, kv.Len(), 1, func(i int) { - key_, key, _ := kv.IndexInexact(i) - - // Using exact key. - ret, err := db.TestHas(key) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key) - Expect(ret).Should(BeTrue(), "False for key %q", key) - - // Using inexact key. - if len(key_) > 0 { - ret, err = db.TestHas(key_) - Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_) - Expect(ret).ShouldNot(BeTrue(), "True for key %q", key) - } - }) - } - }) - - TestIter := func(r *util.Range, _kv KeyValue) { - if db, ok := p.(NewIterator); ok { - iter := db.TestNewIterator(r) - Expect(iter.Error()).ShouldNot(HaveOccurred()) - - t := IteratorTesting{ - KeyValue: _kv, - Iter: iter, - } - - DoIteratorTesting(&t) - iter.Release() - } - } - - It("Should iterates and seeks correctly", func(done Done) { - TestIter(nil, kv.Clone()) - done <- true - }, 3.0) - - RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) { - type slice struct { - r *util.Range - start, limit int - } - - key_, _, _ := kv.IndexInexact(i) - for _, x := range []slice{ - {&util.Range{Start: key_, Limit: nil}, i, kv.Len()}, - {&util.Range{Start: nil, Limit: key_}, 0, i}, - } { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) { - TestIter(x.r, kv.Slice(x.start, x.limit)) - done <- true - }, 3.0) - } - }) - - RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) { - It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) { - r := kv.Range(start, limit) - TestIter(&r, kv.Slice(start, limit)) - done <- true - }, 3.0) - }) -} - -func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) { - Test := func(kv *KeyValue) func() { - return func() { - var p DB - if setup != nil { - Defer("setup", func() { - p = setup(*kv) - }) - } - if teardown != nil { - Defer("teardown", func() { - teardown(p) - }) - } - if body != nil { - p = body(*kv) - } - KeyValueTesting(rnd, *kv, p, func(KeyValue) DB { - return p - }, nil) - } - } - - Describe("with no key/value (empty)", Test(&KeyValue{})) - Describe("with empty key", Test(KeyValue_EmptyKey())) - Describe("with empty value", Test(KeyValue_EmptyValue())) - Describe("with one key/value", Test(KeyValue_OneKeyValue())) - Describe("with big value", Test(KeyValue_BigValue())) - Describe("with special key", Test(KeyValue_SpecialKey())) - Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue())) - Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120))) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go deleted file mode 100644 index 59c496d54..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - storageMu sync.Mutex - storageUseFS bool = true - storageKeepFS bool = false - storageNum int -) - -type StorageMode int - -const ( - ModeOpen StorageMode = 1 << iota - ModeCreate - ModeRemove - ModeRead - ModeWrite - ModeSync - ModeClose -) - -const ( - modeOpen = iota - modeCreate - modeRemove - modeRead - modeWrite - modeSync - modeClose - - modeCount -) - -const ( - typeManifest = iota - typeJournal - typeTable - typeTemp - - typeCount -) - -const flattenCount = modeCount * typeCount - -func flattenType(m StorageMode, t storage.FileType) int { - var x int - switch m { - case ModeOpen: - x = modeOpen - case ModeCreate: - x = modeCreate - case ModeRemove: - x = modeRemove - case ModeRead: - x = modeRead - case ModeWrite: - x = modeWrite - case ModeSync: - x = modeSync - case ModeClose: - x = modeClose - default: - panic("invalid storage mode") - } - x *= typeCount - switch t { - case storage.TypeManifest: - return x + typeManifest - case storage.TypeJournal: - return x + typeJournal - case storage.TypeTable: - return x + typeTable - case storage.TypeTemp: - return x + typeTemp - default: - panic("invalid file type") - } -} - -func listFlattenType(m StorageMode, t storage.FileType) []int { - ret := make([]int, 0, flattenCount) - add := func(x int) { - x *= typeCount - switch { - case t&storage.TypeManifest != 0: - ret = append(ret, x+typeManifest) - case t&storage.TypeJournal != 0: - ret = append(ret, x+typeJournal) - case t&storage.TypeTable != 0: - ret = append(ret, x+typeTable) - case t&storage.TypeTemp != 0: - ret = append(ret, x+typeTemp) - } - } - switch { - case m&ModeOpen != 0: - add(modeOpen) - case m&ModeCreate != 0: - add(modeCreate) - case m&ModeRemove != 0: - add(modeRemove) - case m&ModeRead != 0: - add(modeRead) - case m&ModeWrite != 0: - add(modeWrite) - case m&ModeSync != 0: - add(modeSync) - case m&ModeClose != 0: - add(modeClose) - } - return ret -} - -func packFile(num uint64, t storage.FileType) uint64 { - if num>>(64-typeCount) != 0 { - panic("overflow") - } - return num<> typeCount, storage.FileType(x) & storage.TypeAll -} - -type emulatedError struct { - err error -} - -func (err emulatedError) Error() string { - return fmt.Sprintf("emulated storage error: %v", err.err) -} - -type storageLock struct { - s *Storage - r util.Releaser -} - -func (l storageLock) Release() { - l.r.Release() - l.s.logI("storage lock released") -} - -type reader struct { - f *file - storage.Reader -} - -func (r *reader) Read(p []byte) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) - if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) - n, err = r.Reader.Read(p) - } - r.f.s.count(ModeRead, r.f.Type(), n) - if err != nil && err != io.EOF { - r.f.s.logI("read error, num=%d type=%v n=%d err=%v", r.f.Num(), r.f.Type(), n, err) - } - return -} - -func (r *reader) ReadAt(p []byte, off int64) (n int, err error) { - err = r.f.s.emulateError(ModeRead, r.f.Type()) - if err == nil { - r.f.s.stall(ModeRead, r.f.Type()) - n, err = r.Reader.ReadAt(p, off) - } - r.f.s.count(ModeRead, r.f.Type(), n) - if err != nil && err != io.EOF { - r.f.s.logI("readAt error, num=%d type=%v offset=%d n=%d err=%v", r.f.Num(), r.f.Type(), off, n, err) - } - return -} - -func (r *reader) Close() (err error) { - return r.f.doClose(r.Reader) -} - -type writer struct { - f *file - storage.Writer -} - -func (w *writer) Write(p []byte) (n int, err error) { - err = w.f.s.emulateError(ModeWrite, w.f.Type()) - if err == nil { - w.f.s.stall(ModeWrite, w.f.Type()) - n, err = w.Writer.Write(p) - } - w.f.s.count(ModeWrite, w.f.Type(), n) - if err != nil && err != io.EOF { - w.f.s.logI("write error, num=%d type=%v n=%d err=%v", w.f.Num(), w.f.Type(), n, err) - } - return -} - -func (w *writer) Sync() (err error) { - err = w.f.s.emulateError(ModeSync, w.f.Type()) - if err == nil { - w.f.s.stall(ModeSync, w.f.Type()) - err = w.Writer.Sync() - } - w.f.s.count(ModeSync, w.f.Type(), 0) - if err != nil { - w.f.s.logI("sync error, num=%d type=%v err=%v", w.f.Num(), w.f.Type(), err) - } - return -} - -func (w *writer) Close() (err error) { - return w.f.doClose(w.Writer) -} - -type file struct { - s *Storage - storage.File -} - -func (f *file) pack() uint64 { - return packFile(f.Num(), f.Type()) -} - -func (f *file) assertOpen() { - ExpectWithOffset(2, f.s.opens).NotTo(HaveKey(f.pack()), "File open, num=%d type=%v writer=%v", f.Num(), f.Type(), f.s.opens[f.pack()]) -} - -func (f *file) doClose(closer io.Closer) (err error) { - err = f.s.emulateError(ModeClose, f.Type()) - if err == nil { - f.s.stall(ModeClose, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - ExpectWithOffset(2, f.s.opens).To(HaveKey(f.pack()), "File closed, num=%d type=%v", f.Num(), f.Type()) - err = closer.Close() - } - f.s.countNB(ModeClose, f.Type(), 0) - writer := f.s.opens[f.pack()] - if err != nil { - f.s.logISkip(1, "file close failed, num=%d type=%v writer=%v err=%v", f.Num(), f.Type(), writer, err) - } else { - f.s.logISkip(1, "file closed, num=%d type=%v writer=%v", f.Num(), f.Type(), writer) - delete(f.s.opens, f.pack()) - } - return -} - -func (f *file) Open() (r storage.Reader, err error) { - err = f.s.emulateError(ModeOpen, f.Type()) - if err == nil { - f.s.stall(ModeOpen, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeOpen, f.Type(), 0) - r, err = f.File.Open() - } - if err != nil { - f.s.logI("file open failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file opened, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = false - r = &reader{f, r} - } - return -} - -func (f *file) Create() (w storage.Writer, err error) { - err = f.s.emulateError(ModeCreate, f.Type()) - if err == nil { - f.s.stall(ModeCreate, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeCreate, f.Type(), 0) - w, err = f.File.Create() - } - if err != nil { - f.s.logI("file create failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file created, num=%d type=%v", f.Num(), f.Type()) - f.s.opens[f.pack()] = true - w = &writer{f, w} - } - return -} - -func (f *file) Remove() (err error) { - err = f.s.emulateError(ModeRemove, f.Type()) - if err == nil { - f.s.stall(ModeRemove, f.Type()) - } - f.s.mu.Lock() - defer f.s.mu.Unlock() - if err == nil { - f.assertOpen() - f.s.countNB(ModeRemove, f.Type(), 0) - err = f.File.Remove() - } - if err != nil { - f.s.logI("file remove failed, num=%d type=%v err=%v", f.Num(), f.Type(), err) - } else { - f.s.logI("file removed, num=%d type=%v", f.Num(), f.Type()) - } - return -} - -type Storage struct { - storage.Storage - closeFn func() error - - lmu sync.Mutex - lb bytes.Buffer - - mu sync.Mutex - // Open files, true=writer, false=reader - opens map[uint64]bool - counters [flattenCount]int - bytesCounter [flattenCount]int64 - emulatedError [flattenCount]error - stallCond sync.Cond - stalled [flattenCount]bool -} - -func (s *Storage) log(skip int, str string) { - s.lmu.Lock() - defer s.lmu.Unlock() - _, file, line, ok := runtime.Caller(skip + 2) - if ok { - // Truncate file name at last file name separator. - if index := strings.LastIndex(file, "/"); index >= 0 { - file = file[index+1:] - } else if index = strings.LastIndex(file, "\\"); index >= 0 { - file = file[index+1:] - } - } else { - file = "???" - line = 1 - } - fmt.Fprintf(&s.lb, "%s:%d: ", file, line) - lines := strings.Split(str, "\n") - if l := len(lines); l > 1 && lines[l-1] == "" { - lines = lines[:l-1] - } - for i, line := range lines { - if i > 0 { - s.lb.WriteString("\n\t") - } - s.lb.WriteString(line) - } - s.lb.WriteByte('\n') -} - -func (s *Storage) logISkip(skip int, format string, args ...interface{}) { - pc, _, _, ok := runtime.Caller(skip + 1) - if ok { - if f := runtime.FuncForPC(pc); f != nil { - fname := f.Name() - if index := strings.LastIndex(fname, "."); index >= 0 { - fname = fname[index+1:] - } - format = fname + ": " + format - } - } - s.log(skip+1, fmt.Sprintf(format, args...)) -} - -func (s *Storage) logI(format string, args ...interface{}) { - s.logISkip(1, format, args...) -} - -func (s *Storage) Log(str string) { - s.log(1, "Log: "+str) - s.Storage.Log(str) -} - -func (s *Storage) Lock() (r util.Releaser, err error) { - r, err = s.Storage.Lock() - if err != nil { - s.logI("storage locking failed, err=%v", err) - } else { - s.logI("storage locked") - r = storageLock{s, r} - } - return -} - -func (s *Storage) GetFile(num uint64, t storage.FileType) storage.File { - return &file{s, s.Storage.GetFile(num, t)} -} - -func (s *Storage) GetFiles(t storage.FileType) (files []storage.File, err error) { - rfiles, err := s.Storage.GetFiles(t) - if err != nil { - s.logI("get files failed, err=%v", err) - return - } - files = make([]storage.File, len(rfiles)) - for i, f := range rfiles { - files[i] = &file{s, f} - } - s.logI("get files, type=0x%x count=%d", int(t), len(files)) - return -} - -func (s *Storage) GetManifest() (f storage.File, err error) { - manifest, err := s.Storage.GetManifest() - if err != nil { - if !os.IsNotExist(err) { - s.logI("get manifest failed, err=%v", err) - } - return - } - s.logI("get manifest, num=%d", manifest.Num()) - return &file{s, manifest}, nil -} - -func (s *Storage) SetManifest(f storage.File) error { - f_, ok := f.(*file) - ExpectWithOffset(1, ok).To(BeTrue()) - ExpectWithOffset(1, f_.Type()).To(Equal(storage.TypeManifest)) - err := s.Storage.SetManifest(f_.File) - if err != nil { - s.logI("set manifest failed, err=%v", err) - } else { - s.logI("set manifest, num=%d", f_.Num()) - } - return err -} - -func (s *Storage) openFiles() string { - out := "Open files:" - for x, writer := range s.opens { - num, t := unpackFile(x) - out += fmt.Sprintf("\n · num=%d type=%v writer=%v", num, t, writer) - } - return out -} - -func (s *Storage) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles()) - err := s.Storage.Close() - if err != nil { - s.logI("storage closing failed, err=%v", err) - } else { - s.logI("storage closed") - } - if s.closeFn != nil { - if err1 := s.closeFn(); err1 != nil { - s.logI("close func error, err=%v", err1) - } - } - return err -} - -func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) { - s.counters[flattenType(m, t)]++ - s.bytesCounter[flattenType(m, t)] += int64(n) -} - -func (s *Storage) count(m StorageMode, t storage.FileType, n int) { - s.mu.Lock() - defer s.mu.Unlock() - s.countNB(m, t, n) -} - -func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) { - for _, x := range listFlattenType(m, t) { - s.counters[x] = 0 - s.bytesCounter[x] = 0 - } -} - -func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) { - for _, x := range listFlattenType(m, t) { - count += s.counters[x] - bytes += s.bytesCounter[x] - } - return -} - -func (s *Storage) emulateError(m StorageMode, t storage.FileType) error { - s.mu.Lock() - defer s.mu.Unlock() - err := s.emulatedError[flattenType(m, t)] - if err != nil { - return emulatedError{err} - } - return nil -} - -func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.emulatedError[x] = err - } -} - -func (s *Storage) stall(m StorageMode, t storage.FileType) { - x := flattenType(m, t) - s.mu.Lock() - defer s.mu.Unlock() - for s.stalled[x] { - s.stallCond.Wait() - } -} - -func (s *Storage) Stall(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = true - } -} - -func (s *Storage) Release(m StorageMode, t storage.FileType) { - s.mu.Lock() - defer s.mu.Unlock() - for _, x := range listFlattenType(m, t) { - s.stalled[x] = false - } - s.stallCond.Broadcast() -} - -func NewStorage() *Storage { - var stor storage.Storage - var closeFn func() error - if storageUseFS { - for { - storageMu.Lock() - num := storageNum - storageNum++ - storageMu.Unlock() - path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num)) - if _, err := os.Stat(path); os.IsNotExist(err) { - stor, err = storage.OpenFile(path) - ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path) - closeFn = func() error { - if storageKeepFS { - return nil - } - return os.RemoveAll(path) - } - break - } - } - } else { - stor = storage.NewMemStorage() - } - s := &Storage{ - Storage: stor, - closeFn: closeFn, - opens: make(map[uint64]bool), - } - s.stallCond.L = &s.mu - return s -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go deleted file mode 100644 index 97c5294b1..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package testutil - -import ( - "bytes" - "flag" - "math/rand" - "reflect" - "sync" - - "github.com/onsi/ginkgo/config" - - "github.com/syndtr/goleveldb/leveldb/comparer" -) - -var ( - runfn = make(map[string][]func()) - runmu sync.Mutex -) - -func Defer(args ...interface{}) bool { - var ( - group string - fn func() - ) - for _, arg := range args { - v := reflect.ValueOf(arg) - switch v.Kind() { - case reflect.String: - group = v.String() - case reflect.Func: - r := reflect.ValueOf(&fn).Elem() - r.Set(v) - } - } - if fn != nil { - runmu.Lock() - runfn[group] = append(runfn[group], fn) - runmu.Unlock() - } - return true -} - -func RunDefer(groups ...string) bool { - if len(groups) == 0 { - groups = append(groups, "") - } - runmu.Lock() - var runfn_ []func() - for _, group := range groups { - runfn_ = append(runfn_, runfn[group]...) - delete(runfn, group) - } - runmu.Unlock() - for _, fn := range runfn_ { - fn() - } - return runfn_ != nil -} - -func RandomSeed() int64 { - if !flag.Parsed() { - panic("random seed not initialized") - } - return config.GinkgoConfig.RandomSeed -} - -func NewRand() *rand.Rand { - return rand.New(rand.NewSource(RandomSeed())) -} - -var cmp = comparer.DefaultComparer - -func BytesSeparator(a, b []byte) []byte { - if bytes.Equal(a, b) { - return b - } - i, n := 0, len(a) - if n > len(b) { - n = len(b) - } - for ; i < n && (a[i] == b[i]); i++ { - } - x := append([]byte{}, a[:i]...) - if i < n { - if c := a[i] + 1; c < b[i] { - return append(x, c) - } - x = append(x, a[i]) - i++ - } - for ; i < len(a); i++ { - if c := a[i]; c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - if len(b) > i && b[i] > 0 { - return append(x, b[i]-1) - } - return append(x, 'x') -} - -func BytesAfter(b []byte) []byte { - var x []byte - for _, c := range b { - if c < 0xff { - return append(x, c+1) - } else { - x = append(x, c) - } - } - return append(x, 'x') -} - -func RandomIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - fn(rnd.Intn(n)) - } - return -} - -func ShuffledIndex(rnd *rand.Rand, n, round int, fn func(i int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - for _, i := range rnd.Perm(n) { - fn(i) - } - } - return -} - -func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) { - if rnd == nil { - rnd = NewRand() - } - for x := 0; x < round; x++ { - start := rnd.Intn(n) - length := 0 - if j := n - start; j > 0 { - length = rnd.Intn(j) - } - fn(start, start+length) - } - return -} - -func Max(x, y int) int { - if x > y { - return x - } - return y -} - -func Min(x, y int) int { - if x < y { - return x - } - return y -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go deleted file mode 100644 index 25bf2b29f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - . "github.com/onsi/gomega" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/testutil" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type testingDB struct { - *DB - ro *opt.ReadOptions - wo *opt.WriteOptions - stor *testutil.Storage -} - -func (t *testingDB) TestPut(key []byte, value []byte) error { - return t.Put(key, value, t.wo) -} - -func (t *testingDB) TestDelete(key []byte) error { - return t.Delete(key, t.wo) -} - -func (t *testingDB) TestGet(key []byte) (value []byte, err error) { - return t.Get(key, t.ro) -} - -func (t *testingDB) TestHas(key []byte) (ret bool, err error) { - return t.Has(key, t.ro) -} - -func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator { - return t.NewIterator(slice, t.ro) -} - -func (t *testingDB) TestClose() { - err := t.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) - err = t.stor.Close() - ExpectWithOffset(1, err).NotTo(HaveOccurred()) -} - -func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB { - stor := testutil.NewStorage() - db, err := Open(stor, o) - // FIXME: This may be called from outside It, which may cause panic. - Expect(err).NotTo(HaveOccurred()) - return &testingDB{ - DB: db, - ro: ro, - wo: wo, - stor: stor, - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go deleted file mode 100644 index 1a5bf71a3..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "fmt" - "sort" - - "github.com/syndtr/goleveldb/leveldb/storage" -) - -func shorten(str string) string { - if len(str) <= 8 { - return str - } - return str[:3] + ".." + str[len(str)-3:] -} - -var bunits = [...]string{"", "Ki", "Mi", "Gi"} - -func shortenb(bytes int) string { - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%d%sB", bytes, bunits[i]) -} - -func sshortenb(bytes int) string { - if bytes == 0 { - return "~" - } - sign := "+" - if bytes < 0 { - sign = "-" - bytes *= -1 - } - i := 0 - for ; bytes > 1024 && i < 4; i++ { - bytes /= 1024 - } - return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i]) -} - -func sint(x int) string { - if x == 0 { - return "~" - } - sign := "+" - if x < 0 { - sign = "-" - x *= -1 - } - return fmt.Sprintf("%s%d", sign, x) -} - -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a > b { - return a - } - return b -} - -type files []storage.File - -func (p files) Len() int { - return len(p) -} - -func (p files) Less(i, j int) bool { - return p[i].Num() < p[j].Num() -} - -func (p files) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -func (p files) sort() { - sort.Sort(p) -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go deleted file mode 100644 index 21de24255..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -// This a copy of Go std bytes.Buffer with some modification -// and some features stripped. - -import ( - "bytes" - "io" -) - -// A Buffer is a variable-sized buffer of bytes with Read and Write methods. -// The zero value for Buffer is an empty buffer ready to use. -type Buffer struct { - buf []byte // contents are the bytes buf[off : len(buf)] - off int // read at &buf[off], write at &buf[len(buf)] - bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. -} - -// Bytes returns a slice of the contents of the unread portion of the buffer; -// len(b.Bytes()) == b.Len(). If the caller changes the contents of the -// returned slice, the contents of the buffer will change provided there -// are no intervening method calls on the Buffer. -func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } - -// String returns the contents of the unread portion of the buffer -// as a string. If the Buffer is a nil pointer, it returns "". -func (b *Buffer) String() string { - if b == nil { - // Special case, useful in debugging. - return "" - } - return string(b.buf[b.off:]) -} - -// Len returns the number of bytes of the unread portion of the buffer; -// b.Len() == len(b.Bytes()). -func (b *Buffer) Len() int { return len(b.buf) - b.off } - -// Truncate discards all but the first n unread bytes from the buffer. -// It panics if n is negative or greater than the length of the buffer. -func (b *Buffer) Truncate(n int) { - switch { - case n < 0 || n > b.Len(): - panic("leveldb/util.Buffer: truncation out of range") - case n == 0: - // Reuse buffer space. - b.off = 0 - } - b.buf = b.buf[0 : b.off+n] -} - -// Reset resets the buffer so it has no content. -// b.Reset() is the same as b.Truncate(0). -func (b *Buffer) Reset() { b.Truncate(0) } - -// grow grows the buffer to guarantee space for n more bytes. -// It returns the index where bytes should be written. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) grow(n int) int { - m := b.Len() - // If buffer is empty, reset to recover space. - if m == 0 && b.off != 0 { - b.Truncate(0) - } - if len(b.buf)+n > cap(b.buf) { - var buf []byte - if b.buf == nil && n <= len(b.bootstrap) { - buf = b.bootstrap[0:] - } else if m+n <= cap(b.buf)/2 { - // We can slide things down instead of allocating a new - // slice. We only need m+n <= cap(b.buf) to slide, but - // we instead let capacity get twice as large so we - // don't spend all our time copying. - copy(b.buf[:], b.buf[b.off:]) - buf = b.buf[:m] - } else { - // not enough space anywhere - buf = makeSlice(2*cap(b.buf) + n) - copy(buf, b.buf[b.off:]) - } - b.buf = buf - b.off = 0 - } - b.buf = b.buf[0 : b.off+m+n] - return b.off + m -} - -// Alloc allocs n bytes of slice from the buffer, growing the buffer as -// needed. If n is negative, Alloc will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Alloc(n int) []byte { - if n < 0 { - panic("leveldb/util.Buffer.Alloc: negative count") - } - m := b.grow(n) - return b.buf[m:] -} - -// Grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After Grow(n), at least n bytes can be written to the -// buffer without another allocation. -// If n is negative, Grow will panic. -// If the buffer can't grow it will panic with bytes.ErrTooLarge. -func (b *Buffer) Grow(n int) { - if n < 0 { - panic("leveldb/util.Buffer.Grow: negative count") - } - m := b.grow(n) - b.buf = b.buf[0:m] -} - -// Write appends the contents of p to the buffer, growing the buffer as -// needed. The return value n is the length of p; err is always nil. If the -// buffer becomes too large, Write will panic with bytes.ErrTooLarge. -func (b *Buffer) Write(p []byte) (n int, err error) { - m := b.grow(len(p)) - return copy(b.buf[m:], p), nil -} - -// MinRead is the minimum slice size passed to a Read call by -// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond -// what is required to hold the contents of r, ReadFrom will not grow the -// underlying buffer. -const MinRead = 512 - -// ReadFrom reads data from r until EOF and appends it to the buffer, growing -// the buffer as needed. The return value n is the number of bytes read. Any -// error except io.EOF encountered during the read is also returned. If the -// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge. -func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { - // If buffer is empty, reset to recover space. - if b.off >= len(b.buf) { - b.Truncate(0) - } - for { - if free := cap(b.buf) - len(b.buf); free < MinRead { - // not enough space at end - newBuf := b.buf - if b.off+free < MinRead { - // not enough space using beginning of buffer; - // double buffer capacity - newBuf = makeSlice(2*cap(b.buf) + MinRead) - } - copy(newBuf, b.buf[b.off:]) - b.buf = newBuf[:len(b.buf)-b.off] - b.off = 0 - } - m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) - b.buf = b.buf[0 : len(b.buf)+m] - n += int64(m) - if e == io.EOF { - break - } - if e != nil { - return n, e - } - } - return n, nil // err is EOF, so return nil explicitly -} - -// makeSlice allocates a slice of size n. If the allocation fails, it panics -// with bytes.ErrTooLarge. -func makeSlice(n int) []byte { - // If the make fails, give a known error. - defer func() { - if recover() != nil { - panic(bytes.ErrTooLarge) - } - }() - return make([]byte, n) -} - -// WriteTo writes data to w until the buffer is drained or an error occurs. -// The return value n is the number of bytes written; it always fits into an -// int, but it is int64 to match the io.WriterTo interface. Any error -// encountered during the write is also returned. -func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { - if b.off < len(b.buf) { - nBytes := b.Len() - m, e := w.Write(b.buf[b.off:]) - if m > nBytes { - panic("leveldb/util.Buffer.WriteTo: invalid Write count") - } - b.off += m - n = int64(m) - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != nBytes { - return n, io.ErrShortWrite - } - } - // Buffer is now empty; reset. - b.Truncate(0) - return -} - -// WriteByte appends the byte c to the buffer, growing the buffer as needed. -// The returned error is always nil, but is included to match bufio.Writer's -// WriteByte. If the buffer becomes too large, WriteByte will panic with -// bytes.ErrTooLarge. -func (b *Buffer) WriteByte(c byte) error { - m := b.grow(1) - b.buf[m] = c - return nil -} - -// Read reads the next len(p) bytes from the buffer or until the buffer -// is drained. The return value n is the number of bytes read. If the -// buffer has no data to return, err is io.EOF (unless len(p) is zero); -// otherwise it is nil. -func (b *Buffer) Read(p []byte) (n int, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - if len(p) == 0 { - return - } - return 0, io.EOF - } - n = copy(p, b.buf[b.off:]) - b.off += n - return -} - -// Next returns a slice containing the next n bytes from the buffer, -// advancing the buffer as if the bytes had been returned by Read. -// If there are fewer than n bytes in the buffer, Next returns the entire buffer. -// The slice is only valid until the next call to a read or write method. -func (b *Buffer) Next(n int) []byte { - m := b.Len() - if n > m { - n = m - } - data := b.buf[b.off : b.off+n] - b.off += n - return data -} - -// ReadByte reads and returns the next byte from the buffer. -// If no byte is available, it returns error io.EOF. -func (b *Buffer) ReadByte() (c byte, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - return 0, io.EOF - } - c = b.buf[b.off] - b.off++ - return c, nil -} - -// ReadBytes reads until the first occurrence of delim in the input, -// returning a slice containing the data up to and including the delimiter. -// If ReadBytes encounters an error before finding a delimiter, -// it returns the data read before the error and the error itself (often io.EOF). -// ReadBytes returns err != nil if and only if the returned data does not end in -// delim. -func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { - slice, err := b.readSlice(delim) - // return a copy of slice. The buffer's backing array may - // be overwritten by later calls. - line = append(line, slice...) - return -} - -// readSlice is like ReadBytes but returns a reference to internal buffer data. -func (b *Buffer) readSlice(delim byte) (line []byte, err error) { - i := bytes.IndexByte(b.buf[b.off:], delim) - end := b.off + i + 1 - if i < 0 { - end = len(b.buf) - err = io.EOF - } - line = b.buf[b.off:end] - b.off = end - return line, err -} - -// NewBuffer creates and initializes a new Buffer using buf as its initial -// contents. It is intended to prepare a Buffer to read existing data. It -// can also be used to size the internal buffer for writing. To do that, -// buf should have the desired capacity but a length of zero. -// -// In most cases, new(Buffer) (or just declaring a Buffer variable) is -// sufficient to initialize a Buffer. -func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go deleted file mode 100644 index 2b8453d75..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "fmt" - "sync" - "sync/atomic" - "time" -) - -type buffer struct { - b []byte - miss int -} - -// BufferPool is a 'buffer pool'. -type BufferPool struct { - pool [6]chan []byte - size [5]uint32 - sizeMiss [5]uint32 - sizeHalf [5]uint32 - baseline [4]int - baseline0 int - - mu sync.RWMutex - closed bool - closeC chan struct{} - - get uint32 - put uint32 - half uint32 - less uint32 - equal uint32 - greater uint32 - miss uint32 -} - -func (p *BufferPool) poolNum(n int) int { - if n <= p.baseline0 && n > p.baseline0/2 { - return 0 - } - for i, x := range p.baseline { - if n <= x { - return i + 1 - } - } - return len(p.baseline) + 1 -} - -// Get returns buffer with length of n. -func (p *BufferPool) Get(n int) []byte { - if p == nil { - return make([]byte, n) - } - - p.mu.RLock() - defer p.mu.RUnlock() - - if p.closed { - return make([]byte, n) - } - - atomic.AddUint32(&p.get, 1) - - poolNum := p.poolNum(n) - pool := p.pool[poolNum] - if poolNum == 0 { - // Fast path. - select { - case b := <-pool: - switch { - case cap(b) > n: - if cap(b)-n >= n { - atomic.AddUint32(&p.half, 1) - select { - case pool <- b: - default: - } - return make([]byte, n) - } else { - atomic.AddUint32(&p.less, 1) - return b[:n] - } - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - } - default: - atomic.AddUint32(&p.miss, 1) - } - - return make([]byte, n, p.baseline0) - } else { - sizePtr := &p.size[poolNum-1] - - select { - case b := <-pool: - switch { - case cap(b) > n: - if cap(b)-n >= n { - atomic.AddUint32(&p.half, 1) - sizeHalfPtr := &p.sizeHalf[poolNum-1] - if atomic.AddUint32(sizeHalfPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(cap(b)/2)) - atomic.StoreUint32(sizeHalfPtr, 0) - } else { - select { - case pool <- b: - default: - } - } - return make([]byte, n) - } else { - atomic.AddUint32(&p.less, 1) - return b[:n] - } - case cap(b) == n: - atomic.AddUint32(&p.equal, 1) - return b[:n] - default: - atomic.AddUint32(&p.greater, 1) - if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) { - select { - case pool <- b: - default: - } - } - } - default: - atomic.AddUint32(&p.miss, 1) - } - - if size := atomic.LoadUint32(sizePtr); uint32(n) > size { - if size == 0 { - atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n)) - } else { - sizeMissPtr := &p.sizeMiss[poolNum-1] - if atomic.AddUint32(sizeMissPtr, 1) == 20 { - atomic.StoreUint32(sizePtr, uint32(n)) - atomic.StoreUint32(sizeMissPtr, 0) - } - } - return make([]byte, n) - } else { - return make([]byte, n, size) - } - } -} - -// Put adds given buffer to the pool. -func (p *BufferPool) Put(b []byte) { - if p == nil { - return - } - - p.mu.RLock() - defer p.mu.RUnlock() - - if p.closed { - return - } - - atomic.AddUint32(&p.put, 1) - - pool := p.pool[p.poolNum(cap(b))] - select { - case pool <- b: - default: - } - -} - -func (p *BufferPool) Close() { - if p == nil { - return - } - - p.mu.Lock() - if !p.closed { - p.closed = true - p.closeC <- struct{}{} - } - p.mu.Unlock() -} - -func (p *BufferPool) String() string { - if p == nil { - return "" - } - - return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}", - p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss) -} - -func (p *BufferPool) drain() { - ticker := time.NewTicker(2 * time.Second) - for { - select { - case <-ticker.C: - for _, ch := range p.pool { - select { - case <-ch: - default: - } - } - case <-p.closeC: - close(p.closeC) - for _, ch := range p.pool { - close(ch) - } - return - } - } -} - -// NewBufferPool creates a new initialized 'buffer pool'. -func NewBufferPool(baseline int) *BufferPool { - if baseline <= 0 { - panic("baseline can't be <= 0") - } - p := &BufferPool{ - baseline0: baseline, - baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4}, - closeC: make(chan struct{}, 1), - } - for i, cap := range []int{2, 2, 4, 4, 2, 1} { - p.pool[i] = make(chan []byte, cap) - } - go p.drain() - return p -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go deleted file mode 100644 index 87d96739c..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package util - -import ( - "bytes" - "io" - "math/rand" - "runtime" - "testing" -) - -const N = 10000 // make this bigger for a larger (and slower) test -var data string // test data for write tests -var testBytes []byte // test data; same as data but as a slice. - -func init() { - testBytes = make([]byte, N) - for i := 0; i < N; i++ { - testBytes[i] = 'a' + byte(i%26) - } - data = string(testBytes) -} - -// Verify that contents of buf match the string s. -func check(t *testing.T, testname string, buf *Buffer, s string) { - bytes := buf.Bytes() - str := buf.String() - if buf.Len() != len(bytes) { - t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) - } - - if buf.Len() != len(str) { - t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) - } - - if buf.Len() != len(s) { - t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) - } - - if string(bytes) != s { - t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) - } -} - -// Fill buf through n writes of byte slice fub. -// The initial contents of buf corresponds to the string s; -// the result is the final contents of buf returned as a string. -func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { - check(t, testname+" (fill 1)", buf, s) - for ; n > 0; n-- { - m, err := buf.Write(fub) - if m != len(fub) { - t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) - } - if err != nil { - t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) - } - s += string(fub) - check(t, testname+" (fill 4)", buf, s) - } - return s -} - -func TestNewBuffer(t *testing.T) { - buf := NewBuffer(testBytes) - check(t, "NewBuffer", buf, data) -} - -// Empty buf through repeated reads into fub. -// The initial contents of buf corresponds to the string s. -func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { - check(t, testname+" (empty 1)", buf, s) - - for { - n, err := buf.Read(fub) - if n == 0 { - break - } - if err != nil { - t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) - } - s = s[n:] - check(t, testname+" (empty 3)", buf, s) - } - - check(t, testname+" (empty 4)", buf, "") -} - -func TestBasicOperations(t *testing.T) { - var buf Buffer - - for i := 0; i < 5; i++ { - check(t, "TestBasicOperations (1)", &buf, "") - - buf.Reset() - check(t, "TestBasicOperations (2)", &buf, "") - - buf.Truncate(0) - check(t, "TestBasicOperations (3)", &buf, "") - - n, err := buf.Write([]byte(data[0:1])) - if n != 1 { - t.Errorf("wrote 1 byte, but n == %d", n) - } - if err != nil { - t.Errorf("err should always be nil, but err == %s", err) - } - check(t, "TestBasicOperations (4)", &buf, "a") - - buf.WriteByte(data[1]) - check(t, "TestBasicOperations (5)", &buf, "ab") - - n, err = buf.Write([]byte(data[2:26])) - if n != 24 { - t.Errorf("wrote 25 bytes, but n == %d", n) - } - check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) - - buf.Truncate(26) - check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) - - buf.Truncate(20) - check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) - - empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) - empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) - - buf.WriteByte(data[1]) - c, err := buf.ReadByte() - if err != nil { - t.Error("ReadByte unexpected eof") - } - if c != data[1] { - t.Errorf("ReadByte wrong value c=%v", c) - } - c, err = buf.ReadByte() - if err == nil { - t.Error("ReadByte unexpected not eof") - } - } -} - -func TestLargeByteWrites(t *testing.T) { - var buf Buffer - limit := 30 - if testing.Short() { - limit = 9 - } - for i := 3; i < limit; i += 3 { - s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) - empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) - } - check(t, "TestLargeByteWrites (3)", &buf, "") -} - -func TestLargeByteReads(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) - } - check(t, "TestLargeByteReads (3)", &buf, "") -} - -func TestMixedReadsAndWrites(t *testing.T) { - var buf Buffer - s := "" - for i := 0; i < 50; i++ { - wlen := rand.Intn(len(data)) - s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) - rlen := rand.Intn(len(data)) - fub := make([]byte, rlen) - n, _ := buf.Read(fub) - s = s[n:] - } - empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) -} - -func TestNil(t *testing.T) { - var b *Buffer - if b.String() != "" { - t.Errorf("expected ; got %q", b.String()) - } -} - -func TestReadFrom(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - b.ReadFrom(&buf) - empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) - } -} - -func TestWriteTo(t *testing.T) { - var buf Buffer - for i := 3; i < 30; i += 3 { - s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - var b Buffer - buf.WriteTo(&b) - empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) - } -} - -func TestNext(t *testing.T) { - b := []byte{0, 1, 2, 3, 4} - tmp := make([]byte, 5) - for i := 0; i <= 5; i++ { - for j := i; j <= 5; j++ { - for k := 0; k <= 6; k++ { - // 0 <= i <= j <= 5; 0 <= k <= 6 - // Check that if we start with a buffer - // of length j at offset i and ask for - // Next(k), we get the right bytes. - buf := NewBuffer(b[0:j]) - n, _ := buf.Read(tmp[0:i]) - if n != i { - t.Fatalf("Read %d returned %d", i, n) - } - bb := buf.Next(k) - want := k - if want > j-i { - want = j - i - } - if len(bb) != want { - t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) - } - for l, v := range bb { - if v != byte(l+i) { - t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) - } - } - } - } - } -} - -var readBytesTests = []struct { - buffer string - delim byte - expected []string - err error -}{ - {"", 0, []string{""}, io.EOF}, - {"a\x00", 0, []string{"a\x00"}, nil}, - {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, - {"hello\x01world", 1, []string{"hello\x01"}, nil}, - {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF}, - {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, - {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF}, -} - -func TestReadBytes(t *testing.T) { - for _, test := range readBytesTests { - buf := NewBuffer([]byte(test.buffer)) - var err error - for _, expected := range test.expected { - var bytes []byte - bytes, err = buf.ReadBytes(test.delim) - if string(bytes) != expected { - t.Errorf("expected %q, got %q", expected, bytes) - } - if err != nil { - break - } - } - if err != test.err { - t.Errorf("expected error %v, got %v", test.err, err) - } - } -} - -func TestGrow(t *testing.T) { - x := []byte{'x'} - y := []byte{'y'} - tmp := make([]byte, 72) - for _, startLen := range []int{0, 100, 1000, 10000, 100000} { - xBytes := bytes.Repeat(x, startLen) - for _, growLen := range []int{0, 100, 1000, 10000, 100000} { - buf := NewBuffer(xBytes) - // If we read, this affects buf.off, which is good to test. - readBytes, _ := buf.Read(tmp) - buf.Grow(growLen) - yBytes := bytes.Repeat(y, growLen) - // Check no allocation occurs in write, as long as we're single-threaded. - var m1, m2 runtime.MemStats - runtime.ReadMemStats(&m1) - buf.Write(yBytes) - runtime.ReadMemStats(&m2) - if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { - t.Errorf("allocation occurred during write") - } - // Check that buffer has correct data. - if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { - t.Errorf("bad initial data at %d %d", startLen, growLen) - } - if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { - t.Errorf("bad written data at %d %d", startLen, growLen) - } - } - } -} - -// Was a bug: used to give EOF reading empty slice at EOF. -func TestReadEmptyAtEOF(t *testing.T) { - b := new(Buffer) - slice := make([]byte, 0) - n, err := b.Read(slice) - if err != nil { - t.Errorf("read error: %v", err) - } - if n != 0 { - t.Errorf("wrong count; got %d want 0", n) - } -} - -// Tests that we occasionally compact. Issue 5154. -func TestBufferGrowth(t *testing.T) { - var b Buffer - buf := make([]byte, 1024) - b.Write(buf[0:1]) - var cap0 int - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - if i == 0 { - cap0 = cap(b.buf) - } - } - cap1 := cap(b.buf) - // (*Buffer).grow allows for 2x capacity slop before sliding, - // so set our error threshold at 3x. - if cap1 > cap0*3 { - t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) - } -} - -// From Issue 5154. -func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf[0:1]) - for i := 0; i < 5<<10; i++ { - b.Write(buf) - b.Read(buf) - } - } -} - -// Check that we don't compact too often. From Issue 5154. -func BenchmarkBufferFullSmallReads(b *testing.B) { - buf := make([]byte, 1024) - for i := 0; i < b.N; i++ { - var b Buffer - b.Write(buf) - for b.Len()+20 < cap(b.buf) { - b.Write(buf[:10]) - } - for i := 0; i < 5<<10; i++ { - b.Read(buf[:1]) - b.Write(buf[:1]) - } - } -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go deleted file mode 100644 index 631c9d610..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/crc32.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2011 The LevelDB-Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "hash/crc32" -) - -var table = crc32.MakeTable(crc32.Castagnoli) - -// CRC is a CRC-32 checksum computed using Castagnoli's polynomial. -type CRC uint32 - -// NewCRC creates a new crc based on the given bytes. -func NewCRC(b []byte) CRC { - return CRC(0).Update(b) -} - -// Update updates the crc with the given bytes. -func (c CRC) Update(b []byte) CRC { - return CRC(crc32.Update(uint32(c), table, b)) -} - -// Value returns a masked crc. -func (c CRC) Value() uint32 { - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go deleted file mode 100644 index 54903660f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/hash.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -import ( - "bytes" - "encoding/binary" -) - -// Hash return hash of the given data. -func Hash(data []byte, seed uint32) uint32 { - // Similar to murmur hash - var m uint32 = 0xc6a4a793 - var r uint32 = 24 - h := seed ^ (uint32(len(data)) * m) - - buf := bytes.NewBuffer(data) - for buf.Len() >= 4 { - var w uint32 - binary.Read(buf, binary.LittleEndian, &w) - h += w - h *= m - h ^= (h >> 16) - } - - rest := buf.Bytes() - switch len(rest) { - default: - panic("not reached") - case 3: - h += uint32(rest[2]) << 16 - fallthrough - case 2: - h += uint32(rest[1]) << 8 - fallthrough - case 1: - h += uint32(rest[0]) - h *= m - h ^= (h >> r) - case 0: - } - - return h -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go deleted file mode 100644 index 1f7fdd41f..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build go1.3 - -package util - -import ( - "sync" -) - -type Pool struct { - sync.Pool -} - -func NewPool(cap int) *Pool { - return &Pool{} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go deleted file mode 100644 index 27b8d03be..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// +build !go1.3 - -package util - -type Pool struct { - pool chan interface{} -} - -func (p *Pool) Get() interface{} { - select { - case x := <-p.pool: - return x - default: - return nil - } -} - -func (p *Pool) Put(x interface{}) { - select { - case p.pool <- x: - default: - } -} - -func NewPool(cap int) *Pool { - return &Pool{pool: make(chan interface{}, cap)} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go deleted file mode 100644 index 85159583d..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2014, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package util - -// Range is a key range. -type Range struct { - // Start of the key range, include in the range. - Start []byte - - // Limit of the key range, not include in the range. - Limit []byte -} - -// BytesPrefix returns key range that satisfy the given prefix. -// This only applicable for the standard 'bytes comparer'. -func BytesPrefix(prefix []byte) *Range { - var limit []byte - for i := len(prefix) - 1; i >= 0; i-- { - c := prefix[i] - if c < 0xff { - limit = make([]byte, i+1) - copy(limit, prefix) - limit[i] = c + 1 - break - } - } - return &Range{prefix, limit} -} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go deleted file mode 100644 index f35976865..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2013, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Package util provides utilities used throughout leveldb. -package util - -import ( - "errors" -) - -var ( - ErrReleased = errors.New("leveldb: resource already relesed") - ErrHasReleaser = errors.New("leveldb: releaser already defined") -) - -// Releaser is the interface that wraps the basic Release method. -type Releaser interface { - // Release releases associated resources. Release should always success - // and can be called multipe times without causing error. - Release() -} - -// ReleaseSetter is the interface that wraps the basic SetReleaser method. -type ReleaseSetter interface { - // SetReleaser associates the given releaser to the resources. The - // releaser will be called once coresponding resources released. - // Calling SetReleaser with nil will clear the releaser. - // - // This will panic if a releaser already present or coresponding - // resource is already released. Releaser should be cleared first - // before assigned a new one. - SetReleaser(releaser Releaser) -} - -// BasicReleaser provides basic implementation of Releaser and ReleaseSetter. -type BasicReleaser struct { - releaser Releaser - released bool -} - -// Released returns whether Release method already called. -func (r *BasicReleaser) Released() bool { - return r.released -} - -// Release implements Releaser.Release. -func (r *BasicReleaser) Release() { - if !r.released { - if r.releaser != nil { - r.releaser.Release() - r.releaser = nil - } - r.released = true - } -} - -// SetReleaser implements ReleaseSetter.SetReleaser. -func (r *BasicReleaser) SetReleaser(releaser Releaser) { - if r.released { - panic(ErrReleased) - } - if r.releaser != nil && releaser != nil { - panic(ErrHasReleaser) - } - r.releaser = releaser -} - -type NoopReleaser struct{} - -func (NoopReleaser) Release() {} diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go deleted file mode 100644 index 5ab7b53d3..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright (c) 2012, Suryandaru Triandana -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package leveldb - -import ( - "sync/atomic" - "unsafe" - - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -type tSet struct { - level int - table *tFile -} - -type version struct { - s *session - - tables []tFiles - - // Level that should be compacted next and its compaction score. - // Score < 1 means compaction is not strictly needed. These fields - // are initialized by computeCompaction() - cLevel int - cScore float64 - - cSeek unsafe.Pointer - - ref int - // Succeeding version. - next *version -} - -func newVersion(s *session) *version { - return &version{s: s, tables: make([]tFiles, s.o.GetNumLevel())} -} - -func (v *version) releaseNB() { - v.ref-- - if v.ref > 0 { - return - } - if v.ref < 0 { - panic("negative version ref") - } - - tables := make(map[uint64]bool) - for _, tt := range v.next.tables { - for _, t := range tt { - num := t.file.Num() - tables[num] = true - } - } - - for _, tt := range v.tables { - for _, t := range tt { - num := t.file.Num() - if _, ok := tables[num]; !ok { - v.s.tops.remove(t) - } - } - } - - v.next.releaseNB() - v.next = nil -} - -func (v *version) release() { - v.s.vmu.Lock() - v.releaseNB() - v.s.vmu.Unlock() -} - -func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) { - ukey := ikey.ukey() - - // Walk tables level-by-level. - for level, tables := range v.tables { - if len(tables) == 0 { - continue - } - - if level == 0 { - // Level-0 files may overlap each other. Find all files that - // overlap ukey. - for _, t := range tables { - if t.overlaps(v.s.icmp, ukey, ukey) { - if !f(level, t) { - return - } - } - } - } else { - if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) { - t := tables[i] - if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 { - if !f(level, t) { - return - } - } - } - } - - if lf != nil && !lf(level) { - return - } - } -} - -func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) { - ukey := ikey.ukey() - - var ( - tset *tSet - tseek bool - - // Level-0. - zfound bool - zseq uint64 - zkt kType - zval []byte - ) - - err = ErrNotFound - - // Since entries never hope across level, finding key/value - // in smaller level make later levels irrelevant. - v.walkOverlapping(ikey, func(level int, t *tFile) bool { - if !tseek { - if tset == nil { - tset = &tSet{level, t} - } else if tset.table.consumeSeek() <= 0 { - tseek = true - tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset)) - } - } - - var ( - fikey, fval []byte - ferr error - ) - if noValue { - fikey, ferr = v.s.tops.findKey(t, ikey, ro) - } else { - fikey, fval, ferr = v.s.tops.find(t, ikey, ro) - } - switch ferr { - case nil: - case ErrNotFound: - return true - default: - err = ferr - return false - } - - if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil { - if v.s.icmp.uCompare(ukey, fukey) == 0 { - if level == 0 { - if fseq >= zseq { - zfound = true - zseq = fseq - zkt = fkt - zval = fval - } - } else { - switch fkt { - case ktVal: - value = fval - err = nil - case ktDel: - default: - panic("leveldb: invalid iKey type") - } - return false - } - } - } else { - err = fkerr - return false - } - - return true - }, func(level int) bool { - if zfound { - switch zkt { - case ktVal: - value = zval - err = nil - case ktDel: - default: - panic("leveldb: invalid iKey type") - } - return false - } - - return true - }) - - return -} - -func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { - // Merge all level zero files together since they may overlap - for _, t := range v.tables[0] { - it := v.s.tops.newIterator(t, slice, ro) - its = append(its, it) - } - - strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader) - for _, tables := range v.tables[1:] { - if len(tables) == 0 { - continue - } - - it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict) - its = append(its, it) - } - - return -} - -func (v *version) newStaging() *versionStaging { - return &versionStaging{base: v, tables: make([]tablesScratch, v.s.o.GetNumLevel())} -} - -// Spawn a new version based on this version. -func (v *version) spawn(r *sessionRecord) *version { - staging := v.newStaging() - staging.commit(r) - return staging.finish() -} - -func (v *version) fillRecord(r *sessionRecord) { - for level, ts := range v.tables { - for _, t := range ts { - r.addTableFile(level, t) - } - } -} - -func (v *version) tLen(level int) int { - return len(v.tables[level]) -} - -func (v *version) offsetOf(ikey iKey) (n uint64, err error) { - for level, tables := range v.tables { - for _, t := range tables { - if v.s.icmp.Compare(t.imax, ikey) <= 0 { - // Entire file is before "ikey", so just add the file size - n += t.size - } else if v.s.icmp.Compare(t.imin, ikey) > 0 { - // Entire file is after "ikey", so ignore - if level > 0 { - // Files other than level 0 are sorted by meta->min, so - // no further files in this level will contain data for - // "ikey". - break - } - } else { - // "ikey" falls in the range for this table. Add the - // approximate offset of "ikey" within the table. - var nn uint64 - nn, err = v.s.tops.offsetOf(t, ikey) - if err != nil { - return 0, err - } - n += nn - } - } - } - - return -} - -func (v *version) pickLevel(umin, umax []byte) (level int) { - if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) { - var overlaps tFiles - maxLevel := v.s.o.GetMaxMemCompationLevel() - for ; level < maxLevel; level++ { - if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) { - break - } - overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false) - if overlaps.size() > uint64(v.s.o.GetCompactionGPOverlaps(level)) { - break - } - } - } - - return -} - -func (v *version) computeCompaction() { - // Precomputed best level for next compaction - var bestLevel int = -1 - var bestScore float64 = -1 - - for level, tables := range v.tables { - var score float64 - if level == 0 { - // We treat level-0 specially by bounding the number of files - // instead of number of bytes for two reasons: - // - // (1) With larger write-buffer sizes, it is nice not to do too - // many level-0 compactions. - // - // (2) The files in level-0 are merged on every read and - // therefore we wish to avoid too many files when the individual - // file size is small (perhaps because of a small write-buffer - // setting, or very high compression ratios, or lots of - // overwrites/deletions). - score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger()) - } else { - score = float64(tables.size()) / float64(v.s.o.GetCompactionTotalSize(level)) - } - - if score > bestScore { - bestLevel = level - bestScore = score - } - } - - v.cLevel = bestLevel - v.cScore = bestScore -} - -func (v *version) needCompaction() bool { - return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil -} - -type tablesScratch struct { - added map[uint64]atRecord - deleted map[uint64]struct{} -} - -type versionStaging struct { - base *version - tables []tablesScratch -} - -func (p *versionStaging) commit(r *sessionRecord) { - // Deleted tables. - for _, r := range r.deletedTables { - tm := &(p.tables[r.level]) - - if len(p.base.tables[r.level]) > 0 { - if tm.deleted == nil { - tm.deleted = make(map[uint64]struct{}) - } - tm.deleted[r.num] = struct{}{} - } - - if tm.added != nil { - delete(tm.added, r.num) - } - } - - // New tables. - for _, r := range r.addedTables { - tm := &(p.tables[r.level]) - - if tm.added == nil { - tm.added = make(map[uint64]atRecord) - } - tm.added[r.num] = r - - if tm.deleted != nil { - delete(tm.deleted, r.num) - } - } -} - -func (p *versionStaging) finish() *version { - // Build new version. - nv := newVersion(p.base.s) - for level, tm := range p.tables { - btables := p.base.tables[level] - - n := len(btables) + len(tm.added) - len(tm.deleted) - if n < 0 { - n = 0 - } - nt := make(tFiles, 0, n) - - // Base tables. - for _, t := range btables { - if _, ok := tm.deleted[t.file.Num()]; ok { - continue - } - if _, ok := tm.added[t.file.Num()]; ok { - continue - } - nt = append(nt, t) - } - - // New tables. - for _, r := range tm.added { - nt = append(nt, p.base.s.tableFileFromRecord(r)) - } - - // Sort tables. - if level == 0 { - nt.sortByNum() - } else { - nt.sortByKey(p.base.s.icmp) - } - nv.tables[level] = nt - } - - // Compute compaction score for new version. - nv.computeCompaction() - - return nv -} - -type versionReleaser struct { - v *version - once bool -} - -func (vr *versionReleaser) Release() { - v := vr.v - v.s.vmu.Lock() - if !vr.once { - v.releaseNB() - vr.once = true - } - v.s.vmu.Unlock() -} diff --git a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go deleted file mode 100644 index d93c1b9db..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" -) - -// ErrCorrupt reports that the input is invalid. -var ErrCorrupt = errors.New("snappy: corrupt input") - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n == 0 { - return 0, 0, ErrCorrupt - } - if uint64(int(v)) != v { - return 0, 0, errors.New("snappy: decoded block is too large") - } - return int(v), n, nil -} - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if len(dst) < dLen { - dst = make([]byte, dLen) - } - - var d, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint(src[s] >> 2) - switch { - case x < 60: - s += 1 - case x == 60: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-1]) - case x == 61: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-2]) | uint(src[s-1])<<8 - case x == 62: - s += 4 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 - case x == 63: - s += 5 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 - } - length = int(x + 1) - if length <= 0 { - return nil, errors.New("snappy: unsupported literal length") - } - if length > len(dst)-d || length > len(src)-s { - return nil, ErrCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) - - case tagCopy2: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(src[s-2]) | int(src[s-1])<<8 - - case tagCopy4: - return nil, errors.New("snappy: unsupported COPY_4 tag") - } - - end := d + length - if offset > d || end > len(dst) { - return nil, ErrCorrupt - } - for ; d < end; d++ { - dst[d] = dst[d-offset] - } - } - if d != dLen { - return nil, ErrCorrupt - } - return dst[:d], nil -} diff --git a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go deleted file mode 100644 index b2371db11..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" -) - -// We limit how far copy back-references can go, the same as the C++ code. -const maxOffset = 1 << 15 - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - case n < 1<<16: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - case n < 1<<24: - dst[0] = 62<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - i = 4 - case int64(n) < 1<<32: - dst[0] = 63<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - dst[4] = uint8(n >> 24) - i = 5 - default: - panic("snappy: source buffer is too long") - } - if copy(dst[i:], lit) != len(lit) { - panic("snappy: destination buffer is too short") - } - return i + len(lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -func emitCopy(dst []byte, offset, length int) int { - i := 0 - for length > 0 { - x := length - 4 - if 0 <= x && x < 1<<3 && offset < 1<<11 { - dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - i += 2 - break - } - - x = length - if x > 1<<6 { - x = 1 << 6 - } - dst[i+0] = uint8(x-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= x - } - return i -} - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Encode(dst, src []byte) ([]byte, error) { - if n := MaxEncodedLen(len(src)); len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - // Return early if src is short. - if len(src) <= 4 { - if len(src) != 0 { - d += emitLiteral(dst[d:], src) - } - return dst[:d], nil - } - - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - const maxTableSize = 1 << 14 - shift, tableSize := uint(32-8), 1<<8 - for tableSize < maxTableSize && tableSize < len(src) { - shift-- - tableSize *= 2 - } - var table [maxTableSize]int - - // Iterate over the source bytes. - var ( - s int // The iterator position. - t int // The last position with the same hash as s. - lit int // The start position of any pending literal bytes. - ) - for s+3 < len(src) { - // Update the hash table. - b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] - h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 - p := &table[(h*0x1e35a7bd)>>shift] - // We need to to store values in [-1, inf) in table. To save - // some initialization time, (re)use the table's zero value - // and shift the values against this zero: add 1 on writes, - // subtract 1 on reads. - t, *p = *p-1, s+1 - // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. - if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { - s++ - continue - } - // Otherwise, we have a match. First, emit any pending literal bytes. - if lit != s { - d += emitLiteral(dst[d:], src[lit:s]) - } - // Extend the match to be as long as possible. - s0 := s - s, t = s+4, t+4 - for s < len(src) && src[s] == src[t] { - s++ - t++ - } - // Emit the copied bytes. - d += emitCopy(dst[d:], s-t, s-s0) - lit = s - } - - // Emit any final pending literal bytes and return. - if lit != len(src) { - d += emitLiteral(dst[d:], src[lit:]) - } - return dst[:d], nil -} - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -func MaxEncodedLen(srcLen int) int { - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - return 32 + srcLen + srcLen/6 -} diff --git a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go deleted file mode 100644 index 2f1b790d0..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the snappy block-based compression format. -// It aims for very high speeds and reasonable compression. -// -// The C++ snappy implementation is at http://code.google.com/p/snappy/ -package snappy - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer supported. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) diff --git a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go deleted file mode 100644 index 7ba839244..000000000 --- a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path/filepath" - "strings" - "testing" -) - -var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") - -func roundtrip(b, ebuf, dbuf []byte) error { - e, err := Encode(ebuf, b) - if err != nil { - return fmt.Errorf("encoding error: %v", err) - } - d, err := Decode(dbuf, e) - if err != nil { - return fmt.Errorf("decoding error: %v", err) - } - if !bytes.Equal(b, d) { - return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) - } - return nil -} - -func TestEmpty(t *testing.T) { - if err := roundtrip(nil, nil, nil); err != nil { - t.Fatal(err) - } -} - -func TestSmallCopy(t *testing.T) { - for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { - for i := 0; i < 32; i++ { - s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" - if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { - t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) - } - } - } - } -} - -func TestSmallRand(t *testing.T) { - rand.Seed(27354294) - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i, _ := range b { - b[i] = uint8(rand.Uint32()) - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func TestSmallRegular(t *testing.T) { - for n := 1; n < 20000; n += 23 { - b := make([]byte, n) - for i, _ := range b { - b[i] = uint8(i%10 + 'a') - } - if err := roundtrip(b, nil, nil); err != nil { - t.Fatal(err) - } - } -} - -func benchDecode(b *testing.B, src []byte) { - encoded, err := Encode(nil, src) - if err != nil { - b.Fatal(err) - } - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Decode(src, encoded) - } -} - -func benchEncode(b *testing.B, src []byte) { - // Bandwidth is in amount of uncompressed data. - b.SetBytes(int64(len(src))) - dst := make([]byte, MaxEncodedLen(len(src))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - Encode(dst, src) - } -} - -func readFile(b *testing.B, filename string) []byte { - src, err := ioutil.ReadFile(filename) - if err != nil { - b.Fatalf("failed reading %s: %s", filename, err) - } - if len(src) == 0 { - b.Fatalf("%s has zero length", filename) - } - return src -} - -// expand returns a slice of length n containing repeated copies of src. -func expand(src []byte, n int) []byte { - dst := make([]byte, n) - for x := dst; len(x) > 0; { - i := copy(x, src) - x = x[i:] - } - return dst -} - -func benchWords(b *testing.B, n int, decode bool) { - // Note: the file is OS-language dependent so the resulting values are not - // directly comparable for non-US-English OS installations. - data := expand(readFile(b, "/usr/share/dict/words"), n) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } -func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } -func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } -func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } -func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } -func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } -func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } -func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } - -// testFiles' values are copied directly from -// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc. -// The label field is unused in snappy-go. -var testFiles = []struct { - label string - filename string -}{ - {"html", "html"}, - {"urls", "urls.10K"}, - {"jpg", "house.jpg"}, - {"pdf", "mapreduce-osdi-1.pdf"}, - {"html4", "html_x_4"}, - {"cp", "cp.html"}, - {"c", "fields.c"}, - {"lsp", "grammar.lsp"}, - {"xls", "kennedy.xls"}, - {"txt1", "alice29.txt"}, - {"txt2", "asyoulik.txt"}, - {"txt3", "lcet10.txt"}, - {"txt4", "plrabn12.txt"}, - {"bin", "ptt5"}, - {"sum", "sum"}, - {"man", "xargs.1"}, - {"pb", "geo.protodata"}, - {"gaviota", "kppkn.gtb"}, -} - -// The test data files are present at this canonical URL. -const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/" - -func downloadTestdata(basename string) (errRet error) { - filename := filepath.Join("testdata", basename) - f, err := os.Create(filename) - if err != nil { - return fmt.Errorf("failed to create %s: %s", filename, err) - } - defer f.Close() - defer func() { - if errRet != nil { - os.Remove(filename) - } - }() - resp, err := http.Get(baseURL + basename) - if err != nil { - return fmt.Errorf("failed to download %s: %s", baseURL+basename, err) - } - defer resp.Body.Close() - _, err = io.Copy(f, resp.Body) - if err != nil { - return fmt.Errorf("failed to write %s: %s", filename, err) - } - return nil -} - -func benchFile(b *testing.B, n int, decode bool) { - filename := filepath.Join("testdata", testFiles[n].filename) - if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 { - if !*download { - b.Fatal("test data not found; skipping benchmark without the -download flag") - } - // Download the official snappy C++ implementation reference test data - // files for benchmarking. - if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) { - b.Fatalf("failed to create testdata: %s", err) - } - for _, tf := range testFiles { - if err := downloadTestdata(tf.filename); err != nil { - b.Fatalf("failed to download testdata: %s", err) - } - } - } - data := readFile(b, filename) - if decode { - benchDecode(b, data) - } else { - benchEncode(b, data) - } -} - -// Naming convention is kept similar to what snappy's C++ implementation uses. -func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } -func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } -func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } -func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } -func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } -func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } -func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } -func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } -func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } -func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } -func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } -func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } -func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) } -func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) } -func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) } -func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) } -func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) } -func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) } -func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } -func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } -func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } -func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } -func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } -func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } -func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } -func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } -func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } -func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } -func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } -func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } -func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) } -func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) } -func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) } -func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) } -func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) } -func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) } diff --git a/Godeps/_workspace/src/github.com/tendermint/ed25519/LICENSE b/Godeps/_workspace/src/github.com/tendermint/ed25519/LICENSE deleted file mode 100644 index 744875676..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/ed25519/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/tendermint/ed25519/ed25519.go b/Godeps/_workspace/src/github.com/tendermint/ed25519/ed25519.go deleted file mode 100644 index 48ac4a423..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/ed25519/ed25519.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// http://ed25519.cr.yp.to/. -package ed25519 - -// This code is a port of the public domain, "ref10" implementation of ed25519 -// from SUPERCOP. - -import ( - "crypto/sha512" - "crypto/subtle" - "io" - - "github.com/agl/ed25519/edwards25519" -) - -const ( - PublicKeySize = 32 - PrivateKeySize = 64 - SignatureSize = 64 -) - -// GenerateKey generates a public/private key pair using randomness from rand. -func GenerateKey(rand io.Reader) (publicKey *[PublicKeySize]byte, privateKey *[PrivateKeySize]byte, err error) { - privateKey = new([64]byte) - _, err = io.ReadFull(rand, privateKey[:32]) - if err != nil { - return nil, nil, err - } - - publicKey = MakePublicKey(privateKey) - return -} - -// MakePublicKey makes a publicKey from the first half of privateKey. -func MakePublicKey(privateKey *[PrivateKeySize]byte) (publicKey *[PublicKeySize]byte) { - publicKey = new([32]byte) - - h := sha512.New() - h.Write(privateKey[:32]) - digest := h.Sum(nil) - - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest) - edwards25519.GeScalarMultBase(&A, &hBytes) - A.ToBytes(publicKey) - - copy(privateKey[32:], publicKey[:]) - return -} - -// Sign signs the message with privateKey and returns a signature. -func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte { - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := new([64]byte) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - return signature -} - -// Verify returns true iff sig is a valid signature of message by publicKey. -func Verify(publicKey *[PublicKeySize]byte, message []byte, sig *[SignatureSize]byte) bool { - if sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - if !A.FromBytes(publicKey) { - return false - } - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var b [32]byte - copy(b[:], sig[32:]) - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b) - - var checkR [32]byte - R.ToBytes(&checkR) - return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1 -} diff --git a/Godeps/_workspace/src/github.com/tendermint/ed25519/ed25519_test.go b/Godeps/_workspace/src/github.com/tendermint/ed25519/ed25519_test.go deleted file mode 100644 index 0dc2b13b9..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/ed25519/ed25519_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ed25519 - -import ( - "bufio" - "bytes" - "compress/gzip" - "encoding/hex" - "io" - "os" - "strings" - "testing" -) - -type zeroReader struct{} - -func (zeroReader) Read(buf []byte) (int, error) { - for i := range buf { - buf[i] = 0 - } - return len(buf), nil -} - -func TestSignVerify(t *testing.T) { - var zero zeroReader - public, private, _ := GenerateKey(zero) - - message := []byte("test message") - sig := Sign(private, message) - if !Verify(public, message, sig) { - t.Errorf("valid signature rejected") - } - - wrongMessage := []byte("wrong message") - if Verify(public, wrongMessage, sig) { - t.Errorf("signature of different message accepted") - } -} - -func TestGolden(t *testing.T) { - // sign.input.gz is a selection of test cases from - // http://ed25519.cr.yp.to/python/sign.input - testDataZ, err := os.Open("testdata/sign.input.gz") - if err != nil { - t.Fatal(err) - } - defer testDataZ.Close() - testData, err := gzip.NewReader(testDataZ) - if err != nil { - t.Fatal(err) - } - defer testData.Close() - - in := bufio.NewReaderSize(testData, 1<<12) - lineNo := 0 - for { - lineNo++ - lineBytes, isPrefix, err := in.ReadLine() - if isPrefix { - t.Fatal("bufio buffer too small") - } - if err != nil { - if err == io.EOF { - break - } - t.Fatalf("error reading test data: %s", err) - } - - line := string(lineBytes) - parts := strings.Split(line, ":") - if len(parts) != 5 { - t.Fatalf("bad number of parts on line %d", lineNo) - } - - privBytes, _ := hex.DecodeString(parts[0]) - pubKeyBytes, _ := hex.DecodeString(parts[1]) - msg, _ := hex.DecodeString(parts[2]) - sig, _ := hex.DecodeString(parts[3]) - // The signatures in the test vectors also include the message - // at the end, but we just want R and S. - sig = sig[:SignatureSize] - - if l := len(pubKeyBytes); l != PublicKeySize { - t.Fatalf("bad public key length on line %d: got %d bytes", lineNo, l) - } - - var priv [PrivateKeySize]byte - copy(priv[:], privBytes) - copy(priv[32:], pubKeyBytes) - - sig2 := Sign(&priv, msg) - if !bytes.Equal(sig, sig2[:]) { - t.Errorf("different signature result on line %d: %x vs %x", lineNo, sig, sig2) - } - - var pubKey [PublicKeySize]byte - copy(pubKey[:], pubKeyBytes) - if !Verify(&pubKey, msg, sig2) { - t.Errorf("signature failed to verify on line %d", lineNo) - } - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/ed25519/edwards25519/const.go b/Godeps/_workspace/src/github.com/tendermint/ed25519/edwards25519/const.go deleted file mode 100644 index ea5b77a71..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/ed25519/edwards25519/const.go +++ /dev/null @@ -1,1411 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/Godeps/_workspace/src/github.com/tendermint/ed25519/edwards25519/edwards25519.go b/Godeps/_workspace/src/github.com/tendermint/ed25519/edwards25519/edwards25519.go deleted file mode 100644 index 184b4a859..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/ed25519/edwards25519/edwards25519.go +++ /dev/null @@ -1,2127 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package edwards25519 implements operations in GF(2**255-19) and on an -// Edwards curve that is isomorphic to curve25519. See -// http://ed25519.cr.yp.to/. -package edwards25519 - -// This code is a port of the public domain, "ref10" implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -func FeZero(fe *FieldElement) { - for i := range fe { - fe[i] = 0 - } -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - for i := range dst { - dst[i] = a[i] + b[i] - } -} - -func FeSub(dst, a, b *FieldElement) { - for i := range dst { - dst[i] = a[i] - b[i] - } -} - -func FeCopy(dst, src *FieldElement) { - for i := range dst { - dst[i] = src[i] - } -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - var x FieldElement - b = -b - for i := range x { - x[i] = b & (f[i] ^ g[i]) - } - - for i := range f { - f[i] ^= x[i] - } -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - var carry [10]int64 - carry[9] = (h9 + 1<<24) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + 1<<24) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + 1<<24) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + 1<<24) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + 1<<24) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + 1<<25) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + 1<<25) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + 1<<25) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + 1<<25) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + 1<<25) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - dst[0] = int32(h0) - dst[1] = int32(h1) - dst[2] = int32(h2) - dst[3] = int32(h3) - dst[4] = int32(h4) - dst[5] = int32(h5) - dst[6] = int32(h6) - dst[7] = int32(h7) - dst[8] = int32(h8) - dst[9] = int32(h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - for i := range h { - h[i] = -f[i] - } -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - g0 := g[0] - g1 := g[1] - g2 := g[2] - g3 := g[3] - g4 := g[4] - g5 := g[5] - g6 := g[6] - g7 := g[7] - g8 := g[8] - g9 := g[9] - g1_19 := 19 * g1 /* 1.4*2^29 */ - g2_19 := 19 * g2 /* 1.4*2^30; still ok */ - g3_19 := 19 * g3 - g4_19 := 19 * g4 - g5_19 := 19 * g5 - g6_19 := 19 * g6 - g7_19 := 19 * g7 - g8_19 := 19 * g8 - g9_19 := 19 * g9 - f1_2 := 2 * f1 - f3_2 := 2 * f3 - f5_2 := 2 * f5 - f7_2 := 2 * f7 - f9_2 := 2 * f9 - f0g0 := int64(f0) * int64(g0) - f0g1 := int64(f0) * int64(g1) - f0g2 := int64(f0) * int64(g2) - f0g3 := int64(f0) * int64(g3) - f0g4 := int64(f0) * int64(g4) - f0g5 := int64(f0) * int64(g5) - f0g6 := int64(f0) * int64(g6) - f0g7 := int64(f0) * int64(g7) - f0g8 := int64(f0) * int64(g8) - f0g9 := int64(f0) * int64(g9) - f1g0 := int64(f1) * int64(g0) - f1g1_2 := int64(f1_2) * int64(g1) - f1g2 := int64(f1) * int64(g2) - f1g3_2 := int64(f1_2) * int64(g3) - f1g4 := int64(f1) * int64(g4) - f1g5_2 := int64(f1_2) * int64(g5) - f1g6 := int64(f1) * int64(g6) - f1g7_2 := int64(f1_2) * int64(g7) - f1g8 := int64(f1) * int64(g8) - f1g9_38 := int64(f1_2) * int64(g9_19) - f2g0 := int64(f2) * int64(g0) - f2g1 := int64(f2) * int64(g1) - f2g2 := int64(f2) * int64(g2) - f2g3 := int64(f2) * int64(g3) - f2g4 := int64(f2) * int64(g4) - f2g5 := int64(f2) * int64(g5) - f2g6 := int64(f2) * int64(g6) - f2g7 := int64(f2) * int64(g7) - f2g8_19 := int64(f2) * int64(g8_19) - f2g9_19 := int64(f2) * int64(g9_19) - f3g0 := int64(f3) * int64(g0) - f3g1_2 := int64(f3_2) * int64(g1) - f3g2 := int64(f3) * int64(g2) - f3g3_2 := int64(f3_2) * int64(g3) - f3g4 := int64(f3) * int64(g4) - f3g5_2 := int64(f3_2) * int64(g5) - f3g6 := int64(f3) * int64(g6) - f3g7_38 := int64(f3_2) * int64(g7_19) - f3g8_19 := int64(f3) * int64(g8_19) - f3g9_38 := int64(f3_2) * int64(g9_19) - f4g0 := int64(f4) * int64(g0) - f4g1 := int64(f4) * int64(g1) - f4g2 := int64(f4) * int64(g2) - f4g3 := int64(f4) * int64(g3) - f4g4 := int64(f4) * int64(g4) - f4g5 := int64(f4) * int64(g5) - f4g6_19 := int64(f4) * int64(g6_19) - f4g7_19 := int64(f4) * int64(g7_19) - f4g8_19 := int64(f4) * int64(g8_19) - f4g9_19 := int64(f4) * int64(g9_19) - f5g0 := int64(f5) * int64(g0) - f5g1_2 := int64(f5_2) * int64(g1) - f5g2 := int64(f5) * int64(g2) - f5g3_2 := int64(f5_2) * int64(g3) - f5g4 := int64(f5) * int64(g4) - f5g5_38 := int64(f5_2) * int64(g5_19) - f5g6_19 := int64(f5) * int64(g6_19) - f5g7_38 := int64(f5_2) * int64(g7_19) - f5g8_19 := int64(f5) * int64(g8_19) - f5g9_38 := int64(f5_2) * int64(g9_19) - f6g0 := int64(f6) * int64(g0) - f6g1 := int64(f6) * int64(g1) - f6g2 := int64(f6) * int64(g2) - f6g3 := int64(f6) * int64(g3) - f6g4_19 := int64(f6) * int64(g4_19) - f6g5_19 := int64(f6) * int64(g5_19) - f6g6_19 := int64(f6) * int64(g6_19) - f6g7_19 := int64(f6) * int64(g7_19) - f6g8_19 := int64(f6) * int64(g8_19) - f6g9_19 := int64(f6) * int64(g9_19) - f7g0 := int64(f7) * int64(g0) - f7g1_2 := int64(f7_2) * int64(g1) - f7g2 := int64(f7) * int64(g2) - f7g3_38 := int64(f7_2) * int64(g3_19) - f7g4_19 := int64(f7) * int64(g4_19) - f7g5_38 := int64(f7_2) * int64(g5_19) - f7g6_19 := int64(f7) * int64(g6_19) - f7g7_38 := int64(f7_2) * int64(g7_19) - f7g8_19 := int64(f7) * int64(g8_19) - f7g9_38 := int64(f7_2) * int64(g9_19) - f8g0 := int64(f8) * int64(g0) - f8g1 := int64(f8) * int64(g1) - f8g2_19 := int64(f8) * int64(g2_19) - f8g3_19 := int64(f8) * int64(g3_19) - f8g4_19 := int64(f8) * int64(g4_19) - f8g5_19 := int64(f8) * int64(g5_19) - f8g6_19 := int64(f8) * int64(g6_19) - f8g7_19 := int64(f8) * int64(g7_19) - f8g8_19 := int64(f8) * int64(g8_19) - f8g9_19 := int64(f8) * int64(g9_19) - f9g0 := int64(f9) * int64(g0) - f9g1_38 := int64(f9_2) * int64(g1_19) - f9g2_19 := int64(f9) * int64(g2_19) - f9g3_38 := int64(f9_2) * int64(g3_19) - f9g4_19 := int64(f9) * int64(g4_19) - f9g5_38 := int64(f9_2) * int64(g5_19) - f9g6_19 := int64(f9) * int64(g6_19) - f9g7_38 := int64(f9_2) * int64(g7_19) - f9g8_19 := int64(f9) * int64(g8_19) - f9g9_38 := int64(f9_2) * int64(g9_19) - h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 - h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 - h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 - h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 - h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 - h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 - h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 - h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 - h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 - h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 - var carry [10]int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.959375*2^30 - f6_19 := 19 * f6 // 1.959375*2^30 - f7_38 := 38 * f7 // 1.959375*2^30 - f8_19 := 19 * f8 // 1.959375*2^30 - f9_38 := 38 * f9 // 1.959375*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) == (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/ed25519/extra25519/extra25519.go b/Godeps/_workspace/src/github.com/tendermint/ed25519/extra25519/extra25519.go deleted file mode 100644 index 571218f55..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/ed25519/extra25519/extra25519.go +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package extra25519 - -import ( - "crypto/sha512" - - "github.com/agl/ed25519/edwards25519" -) - -// PrivateKeyToCurve25519 converts an ed25519 private key into a corresponding -// curve25519 private key such that the resulting curve25519 public key will -// equal the result from PublicKeyToCurve25519. -func PrivateKeyToCurve25519(curve25519Private *[32]byte, privateKey *[64]byte) { - h := sha512.New() - h.Write(privateKey[:32]) - digest := h.Sum(nil) - - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - copy(curve25519Private[:], digest) -} - -func edwardsToMontgomeryX(outX, y *edwards25519.FieldElement) { - // We only need the x-coordinate of the curve25519 point, which I'll - // call u. The isomorphism is u=(y+1)/(1-y), since y=Y/Z, this gives - // u=(Y+Z)/(Z-Y). We know that Z=1, thus u=(Y+1)/(1-Y). - var oneMinusY edwards25519.FieldElement - edwards25519.FeOne(&oneMinusY) - edwards25519.FeSub(&oneMinusY, &oneMinusY, y) - edwards25519.FeInvert(&oneMinusY, &oneMinusY) - - edwards25519.FeOne(outX) - edwards25519.FeAdd(outX, outX, y) - - edwards25519.FeMul(outX, outX, &oneMinusY) -} - -// PublicKeyToCurve25519 converts an Ed25519 public key into the curve25519 -// public key that would be generated from the same private key. -func PublicKeyToCurve25519(curve25519Public *[32]byte, publicKey *[32]byte) bool { - var A edwards25519.ExtendedGroupElement - if !A.FromBytes(publicKey) { - return false - } - - // A.Z = 1 as a postcondition of FromBytes. - var x edwards25519.FieldElement - edwardsToMontgomeryX(&x, &A.Y) - edwards25519.FeToBytes(curve25519Public, &x) - return true -} - -// sqrtMinusA is sqrt(-486662) -var sqrtMinusA = edwards25519.FieldElement{ - 12222970, 8312128, 11511410, -9067497, 15300785, 241793, -25456130, -14121551, 12187136, -3972024, -} - -// sqrtMinusHalf is sqrt(-1/2) -var sqrtMinusHalf = edwards25519.FieldElement{ - -17256545, 3971863, 28865457, -1750208, 27359696, -16640980, 12573105, 1002827, -163343, 11073975, -} - -// halfQMinus1Bytes is (2^255-20)/2 expressed in little endian form. -var halfQMinus1Bytes = [32]byte{ - 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, -} - -// feBytesLess returns one if a <= b and zero otherwise. -func feBytesLE(a, b *[32]byte) int32 { - equalSoFar := int32(-1) - greater := int32(0) - - for i := uint(31); i < 32; i-- { - x := int32(a[i]) - y := int32(b[i]) - - greater = (^equalSoFar & greater) | (equalSoFar & ((x - y) >> 31)) - equalSoFar = equalSoFar & (((x ^ y) - 1) >> 31) - } - - return int32(^equalSoFar & 1 & greater) -} - -// ScalarBaseMult computes a curve25519 public key from a private key and also -// a uniform representative for that public key. Note that this function will -// fail and return false for about half of private keys. -// See http://elligator.cr.yp.to/elligator-20130828.pdf. -func ScalarBaseMult(publicKey, representative, privateKey *[32]byte) bool { - var maskedPrivateKey [32]byte - copy(maskedPrivateKey[:], privateKey[:]) - - maskedPrivateKey[0] &= 248 - maskedPrivateKey[31] &= 127 - maskedPrivateKey[31] |= 64 - - var A edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&A, &maskedPrivateKey) - - var inv1 edwards25519.FieldElement - edwards25519.FeSub(&inv1, &A.Z, &A.Y) - edwards25519.FeMul(&inv1, &inv1, &A.X) - edwards25519.FeInvert(&inv1, &inv1) - - var t0, u edwards25519.FieldElement - edwards25519.FeMul(&u, &inv1, &A.X) - edwards25519.FeAdd(&t0, &A.Y, &A.Z) - edwards25519.FeMul(&u, &u, &t0) - - var v edwards25519.FieldElement - edwards25519.FeMul(&v, &t0, &inv1) - edwards25519.FeMul(&v, &v, &A.Z) - edwards25519.FeMul(&v, &v, &sqrtMinusA) - - var b edwards25519.FieldElement - edwards25519.FeAdd(&b, &u, &edwards25519.A) - - var c, b3, b8 edwards25519.FieldElement - edwards25519.FeSquare(&b3, &b) // 2 - edwards25519.FeMul(&b3, &b3, &b) // 3 - edwards25519.FeSquare(&c, &b3) // 6 - edwards25519.FeMul(&c, &c, &b) // 7 - edwards25519.FeMul(&b8, &c, &b) // 8 - edwards25519.FeMul(&c, &c, &u) - q58(&c, &c) - - var chi edwards25519.FieldElement - edwards25519.FeSquare(&chi, &c) - edwards25519.FeSquare(&chi, &chi) - - edwards25519.FeSquare(&t0, &u) - edwards25519.FeMul(&chi, &chi, &t0) - - edwards25519.FeSquare(&t0, &b) // 2 - edwards25519.FeMul(&t0, &t0, &b) // 3 - edwards25519.FeSquare(&t0, &t0) // 6 - edwards25519.FeMul(&t0, &t0, &b) // 7 - edwards25519.FeSquare(&t0, &t0) // 14 - edwards25519.FeMul(&chi, &chi, &t0) - edwards25519.FeNeg(&chi, &chi) - - var chiBytes [32]byte - edwards25519.FeToBytes(&chiBytes, &chi) - // chi[1] is either 0 or 0xff - if chiBytes[1] == 0xff { - return false - } - - // Calculate r1 = sqrt(-u/(2*(u+A))) - var r1 edwards25519.FieldElement - edwards25519.FeMul(&r1, &c, &u) - edwards25519.FeMul(&r1, &r1, &b3) - edwards25519.FeMul(&r1, &r1, &sqrtMinusHalf) - - var maybeSqrtM1 edwards25519.FieldElement - edwards25519.FeSquare(&t0, &r1) - edwards25519.FeMul(&t0, &t0, &b) - edwards25519.FeAdd(&t0, &t0, &t0) - edwards25519.FeAdd(&t0, &t0, &u) - - edwards25519.FeOne(&maybeSqrtM1) - edwards25519.FeCMove(&maybeSqrtM1, &edwards25519.SqrtM1, edwards25519.FeIsNonZero(&t0)) - edwards25519.FeMul(&r1, &r1, &maybeSqrtM1) - - // Calculate r = sqrt(-(u+A)/(2u)) - var r edwards25519.FieldElement - edwards25519.FeSquare(&t0, &c) // 2 - edwards25519.FeMul(&t0, &t0, &c) // 3 - edwards25519.FeSquare(&t0, &t0) // 6 - edwards25519.FeMul(&r, &t0, &c) // 7 - - edwards25519.FeSquare(&t0, &u) // 2 - edwards25519.FeMul(&t0, &t0, &u) // 3 - edwards25519.FeMul(&r, &r, &t0) - - edwards25519.FeSquare(&t0, &b8) // 16 - edwards25519.FeMul(&t0, &t0, &b8) // 24 - edwards25519.FeMul(&t0, &t0, &b) // 25 - edwards25519.FeMul(&r, &r, &t0) - edwards25519.FeMul(&r, &r, &sqrtMinusHalf) - - edwards25519.FeSquare(&t0, &r) - edwards25519.FeMul(&t0, &t0, &u) - edwards25519.FeAdd(&t0, &t0, &t0) - edwards25519.FeAdd(&t0, &t0, &b) - edwards25519.FeOne(&maybeSqrtM1) - edwards25519.FeCMove(&maybeSqrtM1, &edwards25519.SqrtM1, edwards25519.FeIsNonZero(&t0)) - edwards25519.FeMul(&r, &r, &maybeSqrtM1) - - var vBytes [32]byte - edwards25519.FeToBytes(&vBytes, &v) - vInSquareRootImage := feBytesLE(&vBytes, &halfQMinus1Bytes) - edwards25519.FeCMove(&r, &r1, vInSquareRootImage) - - edwards25519.FeToBytes(publicKey, &u) - edwards25519.FeToBytes(representative, &r) - return true -} - -// q58 calculates out = z^((p-5)/8). -func q58(out, z *edwards25519.FieldElement) { - var t1, t2, t3 edwards25519.FieldElement - var i int - - edwards25519.FeSquare(&t1, z) // 2^1 - edwards25519.FeMul(&t1, &t1, z) // 2^1 + 2^0 - edwards25519.FeSquare(&t1, &t1) // 2^2 + 2^1 - edwards25519.FeSquare(&t2, &t1) // 2^3 + 2^2 - edwards25519.FeSquare(&t2, &t2) // 2^4 + 2^3 - edwards25519.FeMul(&t2, &t2, &t1) // 4,3,2,1 - edwards25519.FeMul(&t1, &t2, z) // 4..0 - edwards25519.FeSquare(&t2, &t1) // 5..1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - edwards25519.FeSquare(&t2, &t2) - } - edwards25519.FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - edwards25519.FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - edwards25519.FeSquare(&t2, &t2) - } - edwards25519.FeMul(&t2, &t2, &t1) // 19..0 - edwards25519.FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - edwards25519.FeSquare(&t3, &t3) - } - edwards25519.FeMul(&t2, &t3, &t2) // 39..0 - edwards25519.FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - edwards25519.FeSquare(&t2, &t2) - } - edwards25519.FeMul(&t1, &t2, &t1) // 49..0 - edwards25519.FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - edwards25519.FeSquare(&t2, &t2) - } - edwards25519.FeMul(&t2, &t2, &t1) // 99..0 - edwards25519.FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - edwards25519.FeSquare(&t3, &t3) - } - edwards25519.FeMul(&t2, &t3, &t2) // 199..0 - edwards25519.FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - edwards25519.FeSquare(&t2, &t2) - } - edwards25519.FeMul(&t1, &t2, &t1) // 249..0 - edwards25519.FeSquare(&t1, &t1) // 250..1 - edwards25519.FeSquare(&t1, &t1) // 251..2 - edwards25519.FeMul(out, &t1, z) // 251..2,0 -} - -// chi calculates out = z^((p-1)/2). The result is either 1, 0, or -1 depending -// on whether z is a non-zero square, zero, or a non-square. -func chi(out, z *edwards25519.FieldElement) { - var t0, t1, t2, t3 edwards25519.FieldElement - var i int - - edwards25519.FeSquare(&t0, z) // 2^1 - edwards25519.FeMul(&t1, &t0, z) // 2^1 + 2^0 - edwards25519.FeSquare(&t0, &t1) // 2^2 + 2^1 - edwards25519.FeSquare(&t2, &t0) // 2^3 + 2^2 - edwards25519.FeSquare(&t2, &t2) // 4,3 - edwards25519.FeMul(&t2, &t2, &t0) // 4,3,2,1 - edwards25519.FeMul(&t1, &t2, z) // 4..0 - edwards25519.FeSquare(&t2, &t1) // 5..1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - edwards25519.FeSquare(&t2, &t2) - } - edwards25519.FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - edwards25519.FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - edwards25519.FeSquare(&t2, &t2) - } - edwards25519.FeMul(&t2, &t2, &t1) // 19..0 - edwards25519.FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - edwards25519.FeSquare(&t3, &t3) - } - edwards25519.FeMul(&t2, &t3, &t2) // 39..0 - edwards25519.FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - edwards25519.FeSquare(&t2, &t2) - } - edwards25519.FeMul(&t1, &t2, &t1) // 49..0 - edwards25519.FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - edwards25519.FeSquare(&t2, &t2) - } - edwards25519.FeMul(&t2, &t2, &t1) // 99..0 - edwards25519.FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - edwards25519.FeSquare(&t3, &t3) - } - edwards25519.FeMul(&t2, &t3, &t2) // 199..0 - edwards25519.FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - edwards25519.FeSquare(&t2, &t2) - } - edwards25519.FeMul(&t1, &t2, &t1) // 249..0 - edwards25519.FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 4; i++ { // 253..4 - edwards25519.FeSquare(&t1, &t1) - } - edwards25519.FeMul(out, &t1, &t0) // 253..4,2,1 -} - -// RepresentativeToPublicKey converts a uniform representative value for a -// curve25519 public key, as produced by ScalarBaseMult, to a curve25519 public -// key. -func RepresentativeToPublicKey(publicKey, representative *[32]byte) { - var rr2, v, e edwards25519.FieldElement - edwards25519.FeFromBytes(&rr2, representative) - - edwards25519.FeSquare2(&rr2, &rr2) - rr2[0]++ - edwards25519.FeInvert(&rr2, &rr2) - edwards25519.FeMul(&v, &edwards25519.A, &rr2) - edwards25519.FeNeg(&v, &v) - - var v2, v3 edwards25519.FieldElement - edwards25519.FeSquare(&v2, &v) - edwards25519.FeMul(&v3, &v, &v2) - edwards25519.FeAdd(&e, &v3, &v) - edwards25519.FeMul(&v2, &v2, &edwards25519.A) - edwards25519.FeAdd(&e, &v2, &e) - chi(&e, &e) - var eBytes [32]byte - edwards25519.FeToBytes(&eBytes, &e) - // eBytes[1] is either 0 (for e = 1) or 0xff (for e = -1) - eIsMinus1 := int32(eBytes[1]) & 1 - var negV edwards25519.FieldElement - edwards25519.FeNeg(&negV, &v) - edwards25519.FeCMove(&v, &negV, eIsMinus1) - - edwards25519.FeZero(&v2) - edwards25519.FeCMove(&v2, &edwards25519.A, eIsMinus1) - edwards25519.FeSub(&v, &v, &v2) - - edwards25519.FeToBytes(publicKey, &v) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/ed25519/extra25519/extra25519_test.go b/Godeps/_workspace/src/github.com/tendermint/ed25519/extra25519/extra25519_test.go deleted file mode 100644 index 1e1dbdc0a..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/ed25519/extra25519/extra25519_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package extra25519 - -import ( - "bytes" - "crypto/rand" - "testing" - - "code.google.com/p/go.crypto/curve25519" - "github.com/agl/ed25519" -) - -func TestCurve25519Conversion(t *testing.T) { - public, private, _ := ed25519.GenerateKey(rand.Reader) - - var curve25519Public, curve25519Public2, curve25519Private [32]byte - PrivateKeyToCurve25519(&curve25519Private, private) - curve25519.ScalarBaseMult(&curve25519Public, &curve25519Private) - - if !PublicKeyToCurve25519(&curve25519Public2, public) { - t.Fatalf("PublicKeyToCurve25519 failed") - } - - if !bytes.Equal(curve25519Public[:], curve25519Public2[:]) { - t.Errorf("Values didn't match: curve25519 produced %x, conversion produced %x", curve25519Public[:], curve25519Public2[:]) - } -} - -func TestElligator(t *testing.T) { - var publicKey, publicKey2, publicKey3, representative, privateKey [32]byte - - for i := 0; i < 1000; i++ { - rand.Reader.Read(privateKey[:]) - - if !ScalarBaseMult(&publicKey, &representative, &privateKey) { - continue - } - RepresentativeToPublicKey(&publicKey2, &representative) - if !bytes.Equal(publicKey[:], publicKey2[:]) { - t.Fatal("The resulting public key doesn't match the initial one.") - } - - curve25519.ScalarBaseMult(&publicKey3, &privateKey) - if !bytes.Equal(publicKey[:], publicKey3[:]) { - t.Fatal("The public key doesn't match the value that curve25519 produced.") - } - } -} - -func BenchmarkKeyGeneration(b *testing.B) { - var publicKey, representative, privateKey [32]byte - - // Find the private key that results in a point that's in the image of the map. - for { - rand.Reader.Read(privateKey[:]) - if ScalarBaseMult(&publicKey, &representative, &privateKey) { - break - } - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - ScalarBaseMult(&publicKey, &representative, &privateKey); - } -} - -func BenchmarkMap(b *testing.B) { - var publicKey, representative [32]byte - rand.Reader.Read(representative[:]) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - RepresentativeToPublicKey(&publicKey, &representative); - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/ed25519/testdata/sign.input.gz b/Godeps/_workspace/src/github.com/tendermint/ed25519/testdata/sign.input.gz deleted file mode 100644 index 41030690c..000000000 Binary files a/Godeps/_workspace/src/github.com/tendermint/ed25519/testdata/sign.input.gz and /dev/null differ diff --git a/Godeps/_workspace/src/github.com/tendermint/flowcontrol/flowcontrol.go b/Godeps/_workspace/src/github.com/tendermint/flowcontrol/flowcontrol.go deleted file mode 100644 index c94735dbd..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/flowcontrol/flowcontrol.go +++ /dev/null @@ -1,275 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -// Package flowcontrol provides the tools for monitoring and limiting the -// transfer rate of an arbitrary data stream. -package flowcontrol - -import ( - "math" - "sync" - "time" -) - -// Monitor monitors and limits the transfer rate of a data stream. -type Monitor struct { - mu sync.Mutex // Mutex guarding access to all internal fields - active bool // Flag indicating an active transfer - start time.Duration // Transfer start time (clock() value) - bytes int64 // Total number of bytes transferred - samples int64 // Total number of samples taken - - rSample float64 // Most recent transfer rate sample (bytes per second) - rEMA float64 // Exponential moving average of rSample - rPeak float64 // Peak transfer rate (max of all rSamples) - rWindow float64 // rEMA window (seconds) - - sBytes int64 // Number of bytes transferred since sLast - sLast time.Duration // Most recent sample time (stop time when inactive) - sRate time.Duration // Sampling rate - - tBytes int64 // Number of bytes expected in the current transfer - tLast time.Duration // Time of the most recent transfer of at least 1 byte -} - -// New creates a new flow control monitor. Instantaneous transfer rate is -// measured and updated for each sampleRate interval. windowSize determines the -// weight of each sample in the exponential moving average (EMA) calculation. -// The exact formulas are: -// -// sampleTime = currentTime - prevSampleTime -// sampleRate = byteCount / sampleTime -// weight = 1 - exp(-sampleTime/windowSize) -// newRate = weight*sampleRate + (1-weight)*oldRate -// -// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, -// respectively. -func New(sampleRate, windowSize time.Duration) *Monitor { - if sampleRate = clockRound(sampleRate); sampleRate <= 0 { - sampleRate = 5 * clockRate - } - if windowSize <= 0 { - windowSize = 1 * time.Second - } - now := clock() - return &Monitor{ - active: true, - start: now, - rWindow: windowSize.Seconds(), - sLast: now, - sRate: sampleRate, - tLast: now, - } -} - -// Update records the transfer of n bytes and returns n. It should be called -// after each Read/Write operation, even if n is 0. -func (m *Monitor) Update(n int) int { - m.mu.Lock() - m.update(n) - m.mu.Unlock() - return n -} - -// Hack to set the current rEMA. -func (m *Monitor) SetREMA(rEMA float64) { - m.mu.Lock() - m.rEMA = rEMA - m.samples++ - m.mu.Unlock() -} - -// IO is a convenience method intended to wrap io.Reader and io.Writer method -// execution. It calls m.Update(n) and then returns (n, err) unmodified. -func (m *Monitor) IO(n int, err error) (int, error) { - return m.Update(n), err -} - -// Done marks the transfer as finished and prevents any further updates or -// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and -// Limit methods become NOOPs. It returns the total number of bytes transferred. -func (m *Monitor) Done() int64 { - m.mu.Lock() - if now := m.update(0); m.sBytes > 0 { - m.reset(now) - } - m.active = false - m.tLast = 0 - n := m.bytes - m.mu.Unlock() - return n -} - -// timeRemLimit is the maximum Status.TimeRem value. -const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second - -// Status represents the current Monitor status. All transfer rates are in bytes -// per second rounded to the nearest byte. -type Status struct { - Active bool // Flag indicating an active transfer - Start time.Time // Transfer start time - Duration time.Duration // Time period covered by the statistics - Idle time.Duration // Time since the last transfer of at least 1 byte - Bytes int64 // Total number of bytes transferred - Samples int64 // Total number of samples taken - InstRate int64 // Instantaneous transfer rate - CurRate int64 // Current transfer rate (EMA of InstRate) - AvgRate int64 // Average transfer rate (Bytes / Duration) - PeakRate int64 // Maximum instantaneous transfer rate - BytesRem int64 // Number of bytes remaining in the transfer - TimeRem time.Duration // Estimated time to completion - Progress Percent // Overall transfer progress -} - -// Status returns current transfer status information. The returned value -// becomes static after a call to Done. -func (m *Monitor) Status() Status { - m.mu.Lock() - now := m.update(0) - s := Status{ - Active: m.active, - Start: clockToTime(m.start), - Duration: m.sLast - m.start, - Idle: now - m.tLast, - Bytes: m.bytes, - Samples: m.samples, - PeakRate: round(m.rPeak), - BytesRem: m.tBytes - m.bytes, - Progress: percentOf(float64(m.bytes), float64(m.tBytes)), - } - if s.BytesRem < 0 { - s.BytesRem = 0 - } - if s.Duration > 0 { - rAvg := float64(s.Bytes) / s.Duration.Seconds() - s.AvgRate = round(rAvg) - if s.Active { - s.InstRate = round(m.rSample) - s.CurRate = round(m.rEMA) - if s.BytesRem > 0 { - if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { - ns := float64(s.BytesRem) / tRate * 1e9 - if ns > float64(timeRemLimit) { - ns = float64(timeRemLimit) - } - s.TimeRem = clockRound(time.Duration(ns)) - } - } - } - } - m.mu.Unlock() - return s -} - -// Limit restricts the instantaneous (per-sample) data flow to rate bytes per -// second. It returns the maximum number of bytes (0 <= n <= want) that may be -// transferred immediately without exceeding the limit. If block == true, the -// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, -// or the transfer is inactive (after a call to Done). -// -// At least one byte is always allowed to be transferred in any given sampling -// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate -// is 10 bytes per second. -// -// For usage examples, see the implementation of Reader and Writer in io.go. -func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { - if want < 1 || rate < 1 { - return want - } - m.mu.Lock() - - // Determine the maximum number of bytes that can be sent in one sample - limit := round(float64(rate) * m.sRate.Seconds()) - if limit <= 0 { - limit = 1 - } - - // If block == true, wait until m.sBytes < limit - if now := m.update(0); block { - for m.sBytes >= limit && m.active { - now = m.waitNextSample(now) - } - } - - // Make limit <= want (unlimited if the transfer is no longer active) - if limit -= m.sBytes; limit > int64(want) || !m.active { - limit = int64(want) - } - m.mu.Unlock() - - if limit < 0 { - limit = 0 - } - return int(limit) -} - -// SetTransferSize specifies the total size of the data transfer, which allows -// the Monitor to calculate the overall progress and time to completion. -func (m *Monitor) SetTransferSize(bytes int64) { - if bytes < 0 { - bytes = 0 - } - m.mu.Lock() - m.tBytes = bytes - m.mu.Unlock() -} - -// update accumulates the transferred byte count for the current sample until -// clock() - m.sLast >= m.sRate. The monitor status is updated once the current -// sample is done. -func (m *Monitor) update(n int) (now time.Duration) { - if !m.active { - return - } - if now = clock(); n > 0 { - m.tLast = now - } - m.sBytes += int64(n) - if sTime := now - m.sLast; sTime >= m.sRate { - t := sTime.Seconds() - if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { - m.rPeak = m.rSample - } - - // Exponential moving average using a method similar to *nix load - // average calculation. Longer sampling periods carry greater weight. - if m.samples > 0 { - w := math.Exp(-t / m.rWindow) - m.rEMA = m.rSample + w*(m.rEMA-m.rSample) - } else { - m.rEMA = m.rSample - } - m.reset(now) - } - return -} - -// reset clears the current sample state in preparation for the next sample. -func (m *Monitor) reset(sampleTime time.Duration) { - m.bytes += m.sBytes - m.samples++ - m.sBytes = 0 - m.sLast = sampleTime -} - -// waitNextSample sleeps for the remainder of the current sample. The lock is -// released and reacquired during the actual sleep period, so it's possible for -// the transfer to be inactive when this method returns. -func (m *Monitor) waitNextSample(now time.Duration) time.Duration { - const minWait = 5 * time.Millisecond - current := m.sLast - - // sleep until the last sample time changes (ideally, just one iteration) - for m.sLast == current && m.active { - d := current + m.sRate - now - m.mu.Unlock() - if d < minWait { - d = minWait - } - time.Sleep(d) - m.mu.Lock() - now = m.update(0) - } - return now -} diff --git a/Godeps/_workspace/src/github.com/tendermint/flowcontrol/io.go b/Godeps/_workspace/src/github.com/tendermint/flowcontrol/io.go deleted file mode 100644 index 12a753ddf..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/flowcontrol/io.go +++ /dev/null @@ -1,133 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowcontrol - -import ( - "errors" - "io" -) - -// ErrLimit is returned by the Writer when a non-blocking write is short due to -// the transfer rate limit. -var ErrLimit = errors.New("flowcontrol: transfer rate limit exceeded") - -// Limiter is implemented by the Reader and Writer to provide a consistent -// interface for monitoring and controlling data transfer. -type Limiter interface { - Done() int64 - Status() Status - SetTransferSize(bytes int64) - SetLimit(new int64) (old int64) - SetBlocking(new bool) (old bool) -} - -// Reader implements io.ReadCloser with a restriction on the rate of data -// transfer. -type Reader struct { - io.Reader // Data source - *Monitor // Flow control monitor - - limit int64 // Rate limit in bytes per second (unlimited when <= 0) - block bool // What to do when no new bytes can be read due to the limit -} - -// NewReader restricts all Read operations on r to limit bytes per second. -func NewReader(r io.Reader, limit int64) *Reader { - return &Reader{r, New(0, 0), limit, true} -} - -// Read reads up to len(p) bytes into p without exceeding the current transfer -// rate limit. It returns (0, nil) immediately if r is non-blocking and no new -// bytes can be read at this time. -func (r *Reader) Read(p []byte) (n int, err error) { - p = p[:r.Limit(len(p), r.limit, r.block)] - if len(p) > 0 { - n, err = r.IO(r.Reader.Read(p)) - } - return -} - -// SetLimit changes the transfer rate limit to new bytes per second and returns -// the previous setting. -func (r *Reader) SetLimit(new int64) (old int64) { - old, r.limit = r.limit, new - return -} - -// SetBlocking changes the blocking behavior and returns the previous setting. A -// Read call on a non-blocking reader returns immediately if no additional bytes -// may be read at this time due to the rate limit. -func (r *Reader) SetBlocking(new bool) (old bool) { - old, r.block = r.block, new - return -} - -// Close closes the underlying reader if it implements the io.Closer interface. -func (r *Reader) Close() error { - defer r.Done() - if c, ok := r.Reader.(io.Closer); ok { - return c.Close() - } - return nil -} - -// Writer implements io.WriteCloser with a restriction on the rate of data -// transfer. -type Writer struct { - io.Writer // Data destination - *Monitor // Flow control monitor - - limit int64 // Rate limit in bytes per second (unlimited when <= 0) - block bool // What to do when no new bytes can be written due to the limit -} - -// NewWriter restricts all Write operations on w to limit bytes per second. The -// transfer rate and the default blocking behavior (true) can be changed -// directly on the returned *Writer. -func NewWriter(w io.Writer, limit int64) *Writer { - return &Writer{w, New(0, 0), limit, true} -} - -// Write writes len(p) bytes from p to the underlying data stream without -// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is -// non-blocking and no additional bytes can be written at this time. -func (w *Writer) Write(p []byte) (n int, err error) { - var c int - for len(p) > 0 && err == nil { - s := p[:w.Limit(len(p), w.limit, w.block)] - if len(s) > 0 { - c, err = w.IO(w.Writer.Write(s)) - } else { - return n, ErrLimit - } - p = p[c:] - n += c - } - return -} - -// SetLimit changes the transfer rate limit to new bytes per second and returns -// the previous setting. -func (w *Writer) SetLimit(new int64) (old int64) { - old, w.limit = w.limit, new - return -} - -// SetBlocking changes the blocking behavior and returns the previous setting. A -// Write call on a non-blocking writer returns as soon as no additional bytes -// may be written at this time due to the rate limit. -func (w *Writer) SetBlocking(new bool) (old bool) { - old, w.block = w.block, new - return -} - -// Close closes the underlying writer if it implements the io.Closer interface. -func (w *Writer) Close() error { - defer w.Done() - if c, ok := w.Writer.(io.Closer); ok { - return c.Close() - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/tendermint/flowcontrol/io_test.go b/Godeps/_workspace/src/github.com/tendermint/flowcontrol/io_test.go deleted file mode 100644 index 318069366..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/flowcontrol/io_test.go +++ /dev/null @@ -1,146 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowcontrol - -import ( - "bytes" - "reflect" - "testing" - "time" -) - -const ( - _50ms = 50 * time.Millisecond - _100ms = 100 * time.Millisecond - _200ms = 200 * time.Millisecond - _300ms = 300 * time.Millisecond - _400ms = 400 * time.Millisecond - _500ms = 500 * time.Millisecond -) - -func nextStatus(m *Monitor) Status { - samples := m.samples - for i := 0; i < 30; i++ { - if s := m.Status(); s.Samples != samples { - return s - } - time.Sleep(5 * time.Millisecond) - } - return m.Status() -} - -func TestReader(t *testing.T) { - in := make([]byte, 100) - for i := range in { - in[i] = byte(i) - } - b := make([]byte, 100) - r := NewReader(bytes.NewReader(in), 100) - start := time.Now() - - // Make sure r implements Limiter - _ = Limiter(r) - - // 1st read of 10 bytes is performed immediately - if n, err := r.Read(b); n != 10 || err != nil { - t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("r.Read(b) took too long (%v)", rt) - } - - // No new Reads allowed in the current sample - r.SetBlocking(false) - if n, err := r.Read(b); n != 0 || err != nil { - t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("r.Read(b) took too long (%v)", rt) - } - - status := [6]Status{0: r.Status()} // No samples in the first status - - // 2nd read of 10 bytes blocks until the next sample - r.SetBlocking(true) - if n, err := r.Read(b[10:]); n != 10 || err != nil { - t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _100ms { - t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) - } - - status[1] = r.Status() // 1st sample - status[2] = nextStatus(r.Monitor) // 2nd sample - status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample - - if n := r.Done(); n != 20 { - t.Fatalf("r.Done() expected 20; got %v", n) - } - - status[4] = r.Status() - status[5] = nextStatus(r.Monitor) // Timeout - start = status[0].Start - - // Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress - want := []Status{ - Status{true, start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - Status{true, start, _100ms, 0, 10, 1, 100, 100, 100, 100, 0, 0, 0}, - Status{true, start, _200ms, _100ms, 20, 2, 100, 100, 100, 100, 0, 0, 0}, - Status{true, start, _300ms, _200ms, 20, 3, 0, 90, 67, 100, 0, 0, 0}, - Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, - Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, - } - for i, s := range status { - if !reflect.DeepEqual(&s, &want[i]) { - t.Errorf("r.Status(%v) expected %v; got %v", i, want[i], s) - } - } - if !bytes.Equal(b[:20], in[:20]) { - t.Errorf("r.Read() input doesn't match output") - } -} - -func TestWriter(t *testing.T) { - b := make([]byte, 100) - for i := range b { - b[i] = byte(i) - } - w := NewWriter(&bytes.Buffer{}, 200) - start := time.Now() - - // Make sure w implements Limiter - _ = Limiter(w) - - // Non-blocking 20-byte write for the first sample returns ErrLimit - w.SetBlocking(false) - if n, err := w.Write(b); n != 20 || err != ErrLimit { - t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("w.Write(b) took too long (%v)", rt) - } - - // Blocking 80-byte write - w.SetBlocking(true) - if n, err := w.Write(b[20:]); n != 80 || err != nil { - t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _400ms { - t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) - } - - w.SetTransferSize(100) - status := []Status{w.Status(), nextStatus(w.Monitor)} - start = status[0].Start - - // Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress - want := []Status{ - Status{true, start, _400ms, 0, 80, 4, 200, 200, 200, 200, 20, _100ms, 80000}, - Status{true, start, _500ms, _100ms, 100, 5, 200, 200, 200, 200, 0, 0, 100000}, - } - for i, s := range status { - if !reflect.DeepEqual(&s, &want[i]) { - t.Errorf("w.Status(%v) expected %v; got %v", i, want[i], s) - } - } - if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { - t.Errorf("w.Write() input doesn't match output") - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/flowcontrol/util.go b/Godeps/_workspace/src/github.com/tendermint/flowcontrol/util.go deleted file mode 100644 index 91efd8815..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/flowcontrol/util.go +++ /dev/null @@ -1,67 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowcontrol - -import ( - "math" - "strconv" - "time" -) - -// clockRate is the resolution and precision of clock(). -const clockRate = 20 * time.Millisecond - -// czero is the process start time rounded down to the nearest clockRate -// increment. -var czero = time.Duration(time.Now().UnixNano()) / clockRate * clockRate - -// clock returns a low resolution timestamp relative to the process start time. -func clock() time.Duration { - return time.Duration(time.Now().UnixNano())/clockRate*clockRate - czero -} - -// clockToTime converts a clock() timestamp to an absolute time.Time value. -func clockToTime(c time.Duration) time.Time { - return time.Unix(0, int64(czero+c)) -} - -// clockRound returns d rounded to the nearest clockRate increment. -func clockRound(d time.Duration) time.Duration { - return (d + clockRate>>1) / clockRate * clockRate -} - -// round returns x rounded to the nearest int64 (non-negative values only). -func round(x float64) int64 { - if _, frac := math.Modf(x); frac >= 0.5 { - return int64(math.Ceil(x)) - } - return int64(math.Floor(x)) -} - -// Percent represents a percentage in increments of 1/1000th of a percent. -type Percent uint32 - -// percentOf calculates what percent of the total is x. -func percentOf(x, total float64) Percent { - if x < 0 || total <= 0 { - return 0 - } else if p := round(x / total * 1e5); p <= math.MaxUint32 { - return Percent(p) - } - return Percent(math.MaxUint32) -} - -func (p Percent) Float() float64 { - return float64(p) * 1e-3 -} - -func (p Percent) String() string { - var buf [12]byte - b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) - n := len(b) - b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) - b[n] = '.' - return string(append(b, '%')) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/LICENSE.md b/Godeps/_workspace/src/github.com/tendermint/go-common/LICENSE.md deleted file mode 100644 index aaf0cf06d..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/LICENSE.md +++ /dev/null @@ -1,206 +0,0 @@ -Tendermint Go-Common -Copyright (C) 2015 Tendermint - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/array.go b/Godeps/_workspace/src/github.com/tendermint/go-common/array.go deleted file mode 100644 index adedc42be..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/array.go +++ /dev/null @@ -1,5 +0,0 @@ -package common - -func Arr(items ...interface{}) []interface{} { - return items -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/async.go b/Godeps/_workspace/src/github.com/tendermint/go-common/async.go deleted file mode 100644 index 1d302c344..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/async.go +++ /dev/null @@ -1,15 +0,0 @@ -package common - -import "sync" - -func Parallel(tasks ...func()) { - var wg sync.WaitGroup - wg.Add(len(tasks)) - for _, task := range tasks { - go func(task func()) { - task() - wg.Done() - }(task) - } - wg.Wait() -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/bit_array.go b/Godeps/_workspace/src/github.com/tendermint/go-common/bit_array.go deleted file mode 100644 index dc006f0eb..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/bit_array.go +++ /dev/null @@ -1,275 +0,0 @@ -package common - -import ( - "fmt" - "math/rand" - "strings" - "sync" -) - -type BitArray struct { - mtx sync.Mutex - Bits int `json:"bits"` // NOTE: persisted via reflect, must be exported - Elems []uint64 `json:"elems"` // NOTE: persisted via reflect, must be exported -} - -// There is no BitArray whose Size is 0. Use nil instead. -func NewBitArray(bits int) *BitArray { - if bits == 0 { - return nil - } - return &BitArray{ - Bits: bits, - Elems: make([]uint64, (bits+63)/64), - } -} - -func (bA *BitArray) Size() int { - if bA == nil { - return 0 - } - return bA.Bits -} - -// NOTE: behavior is undefined if i >= bA.Bits -func (bA *BitArray) GetIndex(i int) bool { - if bA == nil { - return false - } - bA.mtx.Lock() - defer bA.mtx.Unlock() - return bA.getIndex(i) -} - -func (bA *BitArray) getIndex(i int) bool { - if i >= bA.Bits { - return false - } - return bA.Elems[i/64]&(uint64(1)< 0 -} - -// NOTE: behavior is undefined if i >= bA.Bits -func (bA *BitArray) SetIndex(i int, v bool) bool { - if bA == nil { - return false - } - bA.mtx.Lock() - defer bA.mtx.Unlock() - return bA.setIndex(i, v) -} - -func (bA *BitArray) setIndex(i int, v bool) bool { - if i >= bA.Bits { - return false - } - if v { - bA.Elems[i/64] |= (uint64(1) << uint(i%64)) - } else { - bA.Elems[i/64] &= ^(uint64(1) << uint(i%64)) - } - return true -} - -func (bA *BitArray) Copy() *BitArray { - if bA == nil { - return nil - } - bA.mtx.Lock() - defer bA.mtx.Unlock() - return bA.copy() -} - -func (bA *BitArray) copy() *BitArray { - c := make([]uint64, len(bA.Elems)) - copy(c, bA.Elems) - return &BitArray{ - Bits: bA.Bits, - Elems: c, - } -} - -func (bA *BitArray) copyBits(bits int) *BitArray { - c := make([]uint64, (bits+63)/64) - copy(c, bA.Elems) - return &BitArray{ - Bits: bits, - Elems: c, - } -} - -// Returns a BitArray of larger bits size. -func (bA *BitArray) Or(o *BitArray) *BitArray { - if bA == nil { - o.Copy() - } - bA.mtx.Lock() - defer bA.mtx.Unlock() - c := bA.copyBits(MaxInt(bA.Bits, o.Bits)) - for i := 0; i < len(c.Elems); i++ { - c.Elems[i] |= o.Elems[i] - } - return c -} - -// Returns a BitArray of smaller bit size. -func (bA *BitArray) And(o *BitArray) *BitArray { - if bA == nil { - return nil - } - bA.mtx.Lock() - defer bA.mtx.Unlock() - return bA.and(o) -} - -func (bA *BitArray) and(o *BitArray) *BitArray { - c := bA.copyBits(MinInt(bA.Bits, o.Bits)) - for i := 0; i < len(c.Elems); i++ { - c.Elems[i] &= o.Elems[i] - } - return c -} - -func (bA *BitArray) Not() *BitArray { - if bA == nil { - return nil // Degenerate - } - bA.mtx.Lock() - defer bA.mtx.Unlock() - c := bA.copy() - for i := 0; i < len(c.Elems); i++ { - c.Elems[i] = ^c.Elems[i] - } - return c -} - -func (bA *BitArray) Sub(o *BitArray) *BitArray { - if bA == nil { - return nil - } - bA.mtx.Lock() - defer bA.mtx.Unlock() - if bA.Bits > o.Bits { - c := bA.copy() - for i := 0; i < len(o.Elems)-1; i++ { - c.Elems[i] &= ^c.Elems[i] - } - i := len(o.Elems) - 1 - if i >= 0 { - for idx := i * 64; idx < o.Bits; idx++ { - // NOTE: each individual GetIndex() call to o is safe. - c.setIndex(idx, c.getIndex(idx) && !o.GetIndex(idx)) - } - } - return c - } else { - return bA.and(o.Not()) // Note degenerate case where o == nil - } -} - -func (bA *BitArray) IsFull() bool { - if bA == nil { - return true - } - bA.mtx.Lock() - defer bA.mtx.Unlock() - - // Check all elements except the last - for _, elem := range bA.Elems[:len(bA.Elems)-1] { - if (^elem) != 0 { - return false - } - } - - // Check that the last element has (lastElemBits) 1's - lastElemBits := (bA.Bits+63)%64 + 1 - lastElem := bA.Elems[len(bA.Elems)-1] - return (lastElem+1)&((uint64(1)< 0 { - randBitStart := rand.Intn(64) - for j := 0; j < 64; j++ { - bitIdx := ((j + randBitStart) % 64) - if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { - return 64*elemIdx + bitIdx, true - } - } - PanicSanity("should not happen") - } - } else { - // Special case for last elem, to ignore straggler bits - elemBits := bA.Bits % 64 - if elemBits == 0 { - elemBits = 64 - } - randBitStart := rand.Intn(elemBits) - for j := 0; j < elemBits; j++ { - bitIdx := ((j + randBitStart) % elemBits) - if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { - return 64*elemIdx + bitIdx, true - } - } - } - } - return 0, false -} - -func (bA *BitArray) String() string { - if bA == nil { - return "nil-BitArray" - } - bA.mtx.Lock() - defer bA.mtx.Unlock() - return bA.stringIndented("") -} - -func (bA *BitArray) StringIndented(indent string) string { - if bA == nil { - return "nil-BitArray" - } - bA.mtx.Lock() - defer bA.mtx.Unlock() - return bA.stringIndented(indent) -} - -func (bA *BitArray) stringIndented(indent string) string { - - lines := []string{} - bits := "" - for i := 0; i < bA.Bits; i++ { - if bA.getIndex(i) { - bits += "X" - } else { - bits += "_" - } - if i%100 == 99 { - lines = append(lines, bits) - bits = "" - } - if i%10 == 9 { - bits += " " - } - if i%50 == 49 { - bits += " " - } - } - if len(bits) > 0 { - lines = append(lines, bits) - } - return fmt.Sprintf("BA{%v:%v}", bA.Bits, strings.Join(lines, indent)) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/bit_array_test.go b/Godeps/_workspace/src/github.com/tendermint/go-common/bit_array_test.go deleted file mode 100644 index 93274aab0..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/bit_array_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package common - -import ( - "testing" -) - -func randBitArray(bits int) (*BitArray, []byte) { - src := RandBytes((bits + 7) / 8) - bA := NewBitArray(bits) - for i := 0; i < len(src); i++ { - for j := 0; j < 8; j++ { - if i*8+j >= bits { - return bA, src - } - setBit := src[i]&(1< 0 - bA.SetIndex(i*8+j, setBit) - } - } - return bA, src -} - -func TestAnd(t *testing.T) { - - bA1, _ := randBitArray(51) - bA2, _ := randBitArray(31) - bA3 := bA1.And(bA2) - - if bA3.Bits != 31 { - t.Error("Expected min bits", bA3.Bits) - } - if len(bA3.Elems) != len(bA2.Elems) { - t.Error("Expected min elems length") - } - for i := 0; i < bA3.Bits; i++ { - expected := bA1.GetIndex(i) && bA2.GetIndex(i) - if bA3.GetIndex(i) != expected { - t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) - } - } -} - -func TestOr(t *testing.T) { - - bA1, _ := randBitArray(51) - bA2, _ := randBitArray(31) - bA3 := bA1.Or(bA2) - - if bA3.Bits != 51 { - t.Error("Expected max bits") - } - if len(bA3.Elems) != len(bA1.Elems) { - t.Error("Expected max elems length") - } - for i := 0; i < bA3.Bits; i++ { - expected := bA1.GetIndex(i) || bA2.GetIndex(i) - if bA3.GetIndex(i) != expected { - t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) - } - } -} - -func TestSub1(t *testing.T) { - - bA1, _ := randBitArray(31) - bA2, _ := randBitArray(51) - bA3 := bA1.Sub(bA2) - - if bA3.Bits != bA1.Bits { - t.Error("Expected bA1 bits") - } - if len(bA3.Elems) != len(bA1.Elems) { - t.Error("Expected bA1 elems length") - } - for i := 0; i < bA3.Bits; i++ { - expected := bA1.GetIndex(i) - if bA2.GetIndex(i) { - expected = false - } - if bA3.GetIndex(i) != expected { - t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) - } - } -} - -func TestSub2(t *testing.T) { - - bA1, _ := randBitArray(51) - bA2, _ := randBitArray(31) - bA3 := bA1.Sub(bA2) - - if bA3.Bits != bA1.Bits { - t.Error("Expected bA1 bits") - } - if len(bA3.Elems) != len(bA1.Elems) { - t.Error("Expected bA1 elems length") - } - for i := 0; i < bA3.Bits; i++ { - expected := bA1.GetIndex(i) - if i < bA2.Bits && bA2.GetIndex(i) { - expected = false - } - if bA3.GetIndex(i) != expected { - t.Error("Wrong bit from bA3") - } - } -} - -func TestPickRandom(t *testing.T) { - for idx := 0; idx < 123; idx++ { - bA1 := NewBitArray(123) - bA1.SetIndex(idx, true) - index, ok := bA1.PickRandom() - if !ok { - t.Fatal("Expected to pick element but got none") - } - if index != idx { - t.Fatalf("Expected to pick element at %v but got wrong index", idx) - } - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/byteslice.go b/Godeps/_workspace/src/github.com/tendermint/go-common/byteslice.go deleted file mode 100644 index be828f065..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/byteslice.go +++ /dev/null @@ -1,44 +0,0 @@ -package common - -import ( - "bytes" -) - -func Fingerprint(slice []byte) []byte { - fingerprint := make([]byte, 6) - copy(fingerprint, slice) - return fingerprint -} - -func IsZeros(slice []byte) bool { - for _, byt := range slice { - if byt != byte(0) { - return false - } - } - return true -} - -func RightPadBytes(slice []byte, l int) []byte { - if l < len(slice) { - return slice - } - padded := make([]byte, l) - copy(padded[0:len(slice)], slice) - return padded -} - -func LeftPadBytes(slice []byte, l int) []byte { - if l < len(slice) { - return slice - } - padded := make([]byte, l) - copy(padded[l-len(slice):], slice) - return padded -} - -func TrimmedString(b []byte) string { - trimSet := string([]byte{0}) - return string(bytes.TrimLeft(b, trimSet)) - -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/cmap.go b/Godeps/_workspace/src/github.com/tendermint/go-common/cmap.go deleted file mode 100644 index 5de6fa2fa..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/cmap.go +++ /dev/null @@ -1,62 +0,0 @@ -package common - -import "sync" - -// CMap is a goroutine-safe map -type CMap struct { - m map[string]interface{} - l sync.Mutex -} - -func NewCMap() *CMap { - return &CMap{ - m: make(map[string]interface{}, 0), - } -} - -func (cm *CMap) Set(key string, value interface{}) { - cm.l.Lock() - defer cm.l.Unlock() - cm.m[key] = value -} - -func (cm *CMap) Get(key string) interface{} { - cm.l.Lock() - defer cm.l.Unlock() - return cm.m[key] -} - -func (cm *CMap) Has(key string) bool { - cm.l.Lock() - defer cm.l.Unlock() - _, ok := cm.m[key] - return ok -} - -func (cm *CMap) Delete(key string) { - cm.l.Lock() - defer cm.l.Unlock() - delete(cm.m, key) -} - -func (cm *CMap) Size() int { - cm.l.Lock() - defer cm.l.Unlock() - return len(cm.m) -} - -func (cm *CMap) Clear() { - cm.l.Lock() - defer cm.l.Unlock() - cm.m = make(map[string]interface{}, 0) -} - -func (cm *CMap) Values() []interface{} { - cm.l.Lock() - defer cm.l.Unlock() - items := []interface{}{} - for _, v := range cm.m { - items = append(items, v) - } - return items -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/colors.go b/Godeps/_workspace/src/github.com/tendermint/go-common/colors.go deleted file mode 100644 index 776b22e2e..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/colors.go +++ /dev/null @@ -1,84 +0,0 @@ -package common - -import ( - "fmt" - "strings" -) - -const ( - ANSIReset = "\x1b[0m" - ANSIBright = "\x1b[1m" - ANSIDim = "\x1b[2m" - ANSIUnderscore = "\x1b[4m" - ANSIBlink = "\x1b[5m" - ANSIReverse = "\x1b[7m" - ANSIHidden = "\x1b[8m" - - ANSIFgBlack = "\x1b[30m" - ANSIFgRed = "\x1b[31m" - ANSIFgGreen = "\x1b[32m" - ANSIFgYellow = "\x1b[33m" - ANSIFgBlue = "\x1b[34m" - ANSIFgMagenta = "\x1b[35m" - ANSIFgCyan = "\x1b[36m" - ANSIFgWhite = "\x1b[37m" - - ANSIBgBlack = "\x1b[40m" - ANSIBgRed = "\x1b[41m" - ANSIBgGreen = "\x1b[42m" - ANSIBgYellow = "\x1b[43m" - ANSIBgBlue = "\x1b[44m" - ANSIBgMagenta = "\x1b[45m" - ANSIBgCyan = "\x1b[46m" - ANSIBgWhite = "\x1b[47m" -) - -// color the string s with color 'color' -// unless s is already colored -func treat(s string, color string) string { - if len(s) > 2 && s[:2] == "\x1b[" { - return s - } else { - return color + s + ANSIReset - } -} - -func treatAll(color string, args ...interface{}) string { - var parts []string - for _, arg := range args { - parts = append(parts, treat(fmt.Sprintf("%v", arg), color)) - } - return strings.Join(parts, "") -} - -func Black(args ...interface{}) string { - return treatAll(ANSIFgBlack, args...) -} - -func Red(args ...interface{}) string { - return treatAll(ANSIFgRed, args...) -} - -func Green(args ...interface{}) string { - return treatAll(ANSIFgGreen, args...) -} - -func Yellow(args ...interface{}) string { - return treatAll(ANSIFgYellow, args...) -} - -func Blue(args ...interface{}) string { - return treatAll(ANSIFgBlue, args...) -} - -func Magenta(args ...interface{}) string { - return treatAll(ANSIFgMagenta, args...) -} - -func Cyan(args ...interface{}) string { - return treatAll(ANSIFgCyan, args...) -} - -func White(args ...interface{}) string { - return treatAll(ANSIFgWhite, args...) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/errors.go b/Godeps/_workspace/src/github.com/tendermint/go-common/errors.go deleted file mode 100644 index e168a75b7..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/errors.go +++ /dev/null @@ -1,45 +0,0 @@ -package common - -import ( - "fmt" -) - -type StackError struct { - Err interface{} - Stack []byte -} - -func (se StackError) String() string { - return fmt.Sprintf("Error: %v\nStack: %s", se.Err, se.Stack) -} - -func (se StackError) Error() string { - return se.String() -} - -//-------------------------------------------------------------------------------------------------- -// panic wrappers - -// A panic resulting from a sanity check means there is a programmer error -// and some gaurantee is not satisfied. -func PanicSanity(v interface{}) { - panic(Fmt("Paniced on a Sanity Check: %v", v)) -} - -// A panic here means something has gone horribly wrong, in the form of data corruption or -// failure of the operating system. In a correct/healthy system, these should never fire. -// If they do, it's indicative of a much more serious problem. -func PanicCrisis(v interface{}) { - panic(Fmt("Paniced on a Crisis: %v", v)) -} - -// Indicates a failure of consensus. Someone was malicious or something has -// gone horribly wrong. These should really boot us into an "emergency-recover" mode -func PanicConsensus(v interface{}) { - panic(Fmt("Paniced on a Consensus Failure: %v", v)) -} - -// For those times when we're not sure if we should panic -func PanicQ(v interface{}) { - panic(Fmt("Paniced questionably: %v", v)) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/heap.go b/Godeps/_workspace/src/github.com/tendermint/go-common/heap.go deleted file mode 100644 index 4a96d7aaa..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/heap.go +++ /dev/null @@ -1,103 +0,0 @@ -package common - -import ( - "container/heap" -) - -type Comparable interface { - Less(o interface{}) bool -} - -//----------------------------------------------------------------------------- - -/* -Example usage: - h := NewHeap() - - h.Push(String("msg1"), 1) - h.Push(String("msg3"), 3) - h.Push(String("msg2"), 2) - - fmt.Println(h.Pop()) - fmt.Println(h.Pop()) - fmt.Println(h.Pop()) -*/ - -type Heap struct { - pq priorityQueue -} - -func NewHeap() *Heap { - return &Heap{pq: make([]*pqItem, 0)} -} - -func (h *Heap) Len() int64 { - return int64(len(h.pq)) -} - -func (h *Heap) Push(value interface{}, priority Comparable) { - heap.Push(&h.pq, &pqItem{value: value, priority: priority}) -} - -func (h *Heap) Peek() interface{} { - if len(h.pq) == 0 { - return nil - } - return h.pq[0].value -} - -func (h *Heap) Update(value interface{}, priority Comparable) { - h.pq.Update(h.pq[0], value, priority) -} - -func (h *Heap) Pop() interface{} { - item := heap.Pop(&h.pq).(*pqItem) - return item.value -} - -//----------------------------------------------------------------------------- - -/////////////////////// -// From: http://golang.org/pkg/container/heap/#example__priorityQueue - -type pqItem struct { - value interface{} - priority Comparable - index int -} - -type priorityQueue []*pqItem - -func (pq priorityQueue) Len() int { return len(pq) } - -func (pq priorityQueue) Less(i, j int) bool { - return pq[i].priority.Less(pq[j].priority) -} - -func (pq priorityQueue) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] - pq[i].index = i - pq[j].index = j -} - -func (pq *priorityQueue) Push(x interface{}) { - n := len(*pq) - item := x.(*pqItem) - item.index = n - *pq = append(*pq, item) -} - -func (pq *priorityQueue) Pop() interface{} { - old := *pq - n := len(old) - item := old[n-1] - item.index = -1 // for safety - *pq = old[0 : n-1] - return item -} - -func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority Comparable) { - item.value = value - item.priority = priority - heap.Fix(pq, item.index) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/int.go b/Godeps/_workspace/src/github.com/tendermint/go-common/int.go deleted file mode 100644 index 50e86a072..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/int.go +++ /dev/null @@ -1,55 +0,0 @@ -package common - -import ( - "encoding/binary" - "sort" -) - -// Sort for []uint64 - -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Uint64Slice) Sort() { sort.Sort(p) } - -func SearchUint64s(a []uint64, x uint64) int { - return sort.Search(len(a), func(i int) bool { return a[i] >= x }) -} - -func (p Uint64Slice) Search(x uint64) int { return SearchUint64s(p, x) } - -//----------------------------------------------------------------------------- - -func PutUint64LE(dest []byte, i uint64) { - binary.LittleEndian.PutUint64(dest, i) -} - -func GetUint64LE(src []byte) uint64 { - return binary.LittleEndian.Uint64(src) -} - -func PutUint64BE(dest []byte, i uint64) { - binary.BigEndian.PutUint64(dest, i) -} - -func GetUint64BE(src []byte) uint64 { - return binary.BigEndian.Uint64(src) -} - -func PutInt64LE(dest []byte, i int64) { - binary.LittleEndian.PutUint64(dest, uint64(i)) -} - -func GetInt64LE(src []byte) int64 { - return int64(binary.LittleEndian.Uint64(src)) -} - -func PutInt64BE(dest []byte, i int64) { - binary.BigEndian.PutUint64(dest, uint64(i)) -} - -func GetInt64BE(src []byte) int64 { - return int64(binary.BigEndian.Uint64(src)) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/io.go b/Godeps/_workspace/src/github.com/tendermint/go-common/io.go deleted file mode 100644 index 378c19fc6..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/io.go +++ /dev/null @@ -1,75 +0,0 @@ -package common - -import ( - "bytes" - "errors" - "io" -) - -type PrefixedReader struct { - Prefix []byte - reader io.Reader -} - -func NewPrefixedReader(prefix []byte, reader io.Reader) *PrefixedReader { - return &PrefixedReader{prefix, reader} -} - -func (pr *PrefixedReader) Read(p []byte) (n int, err error) { - if len(pr.Prefix) > 0 { - read := copy(p, pr.Prefix) - pr.Prefix = pr.Prefix[read:] - return read, nil - } else { - return pr.reader.Read(p) - } -} - -// NOTE: Not goroutine safe -type BufferCloser struct { - bytes.Buffer - Closed bool -} - -func NewBufferCloser(buf []byte) *BufferCloser { - return &BufferCloser{ - *bytes.NewBuffer(buf), - false, - } -} - -func (bc *BufferCloser) Close() error { - if bc.Closed { - return errors.New("BufferCloser already closed") - } - bc.Closed = true - return nil -} - -func (bc *BufferCloser) Write(p []byte) (n int, err error) { - if bc.Closed { - return 0, errors.New("Cannot write to closed BufferCloser") - } - return bc.Buffer.Write(p) -} - -func (bc *BufferCloser) WriteByte(c byte) error { - if bc.Closed { - return errors.New("Cannot write to closed BufferCloser") - } - return bc.Buffer.WriteByte(c) -} - -func (bc *BufferCloser) WriteRune(r rune) (n int, err error) { - if bc.Closed { - return 0, errors.New("Cannot write to closed BufferCloser") - } - return bc.Buffer.WriteRune(r) -} - -func (bc *BufferCloser) WriteString(s string) (n int, err error) { - if bc.Closed { - return 0, errors.New("Cannot write to closed BufferCloser") - } - return bc.Buffer.WriteString(s) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/math.go b/Godeps/_workspace/src/github.com/tendermint/go-common/math.go deleted file mode 100644 index b037d1a71..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/math.go +++ /dev/null @@ -1,157 +0,0 @@ -package common - -func MaxInt8(a, b int8) int8 { - if a > b { - return a - } - return b -} - -func MaxUint8(a, b uint8) uint8 { - if a > b { - return a - } - return b -} - -func MaxInt16(a, b int16) int16 { - if a > b { - return a - } - return b -} - -func MaxUint16(a, b uint16) uint16 { - if a > b { - return a - } - return b -} - -func MaxInt32(a, b int32) int32 { - if a > b { - return a - } - return b -} - -func MaxUint32(a, b uint32) uint32 { - if a > b { - return a - } - return b -} - -func MaxInt64(a, b int64) int64 { - if a > b { - return a - } - return b -} - -func MaxUint64(a, b uint64) uint64 { - if a > b { - return a - } - return b -} - -func MaxInt(a, b int) int { - if a > b { - return a - } - return b -} - -func MaxUint(a, b uint) uint { - if a > b { - return a - } - return b -} - -//----------------------------------------------------------------------------- - -func MinInt8(a, b int8) int8 { - if a < b { - return a - } - return b -} - -func MinUint8(a, b uint8) uint8 { - if a < b { - return a - } - return b -} - -func MinInt16(a, b int16) int16 { - if a < b { - return a - } - return b -} - -func MinUint16(a, b uint16) uint16 { - if a < b { - return a - } - return b -} - -func MinInt32(a, b int32) int32 { - if a < b { - return a - } - return b -} - -func MinUint32(a, b uint32) uint32 { - if a < b { - return a - } - return b -} - -func MinInt64(a, b int64) int64 { - if a < b { - return a - } - return b -} - -func MinUint64(a, b uint64) uint64 { - if a < b { - return a - } - return b -} - -func MinInt(a, b int) int { - if a < b { - return a - } - return b -} - -func MinUint(a, b uint) uint { - if a < b { - return a - } - return b -} - -//----------------------------------------------------------------------------- - -func ExpUint64(a, b uint64) uint64 { - accum := uint64(1) - for b > 0 { - if b&1 == 1 { - accum *= a - } - a *= a - b >>= 1 - } - return accum -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/os.go b/Godeps/_workspace/src/github.com/tendermint/go-common/os.go deleted file mode 100644 index 170c6f82a..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/os.go +++ /dev/null @@ -1,225 +0,0 @@ -package common - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "os/signal" - "strings" - "sync" - "time" -) - -var ( - GoPath = os.Getenv("GOPATH") -) - -func TrapSignal(cb func()) { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - signal.Notify(c, os.Kill) - go func() { - for sig := range c { - fmt.Printf("captured %v, exiting...\n", sig) - if cb != nil { - cb() - } - os.Exit(1) - } - }() - select {} -} - -func Exit(s string) { - fmt.Printf(s + "\n") - os.Exit(1) -} - -func EnsureDir(dir string) error { - if _, err := os.Stat(dir); os.IsNotExist(err) { - err := os.MkdirAll(dir, 0700) - if err != nil { - return fmt.Errorf("Could not create directory %v. %v", dir, err) - } - } - return nil -} - -func FileExists(filePath string) bool { - _, err := os.Stat(filePath) - return !os.IsNotExist(err) -} - -func ReadFile(filePath string) ([]byte, error) { - return ioutil.ReadFile(filePath) -} - -func MustReadFile(filePath string) []byte { - fileBytes, err := ioutil.ReadFile(filePath) - if err != nil { - Exit(Fmt("MustReadFile failed: %v", err)) - return nil - } - return fileBytes -} - -func WriteFile(filePath string, contents []byte) error { - err := ioutil.WriteFile(filePath, contents, 0600) - if err != nil { - return err - } - // fmt.Printf("File written to %v.\n", filePath) - return nil -} - -func MustWriteFile(filePath string, contents []byte) { - err := WriteFile(filePath, contents) - if err != nil { - Exit(Fmt("MustWriteFile failed: %v", err)) - } -} - -// Writes to newBytes to filePath. -// Guaranteed not to lose *both* oldBytes and newBytes, -// (assuming that the OS is perfect) -func WriteFileAtomic(filePath string, newBytes []byte) error { - // If a file already exists there, copy to filePath+".bak" (overwrite anything) - if _, err := os.Stat(filePath); !os.IsNotExist(err) { - fileBytes, err := ioutil.ReadFile(filePath) - if err != nil { - return fmt.Errorf("Could not read file %v. %v", filePath, err) - } - err = ioutil.WriteFile(filePath+".bak", fileBytes, 0600) - if err != nil { - return fmt.Errorf("Could not write file %v. %v", filePath+".bak", err) - } - } - // Write newBytes to filePath.new - err := ioutil.WriteFile(filePath+".new", newBytes, 0600) - if err != nil { - return fmt.Errorf("Could not write file %v. %v", filePath+".new", err) - } - // Move filePath.new to filePath - err = os.Rename(filePath+".new", filePath) - return err -} - -//-------------------------------------------------------------------------------- - -/* AutoFile usage - -// Create/Append to ./autofile_test -af, err := OpenAutoFile("autofile_test") -if err != nil { - panic(err) -} - -// Stream of writes. -// During this time, the file may be moved e.g. by logRotate. -for i := 0; i < 60; i++ { - af.Write([]byte(Fmt("LOOP(%v)", i))) - time.Sleep(time.Second) -} - -// Close the AutoFile -err = af.Close() -if err != nil { - panic(err) -} -*/ - -const autoFileOpenDuration = 1000 * time.Millisecond - -// Automatically closes and re-opens file for writing. -// This is useful for using a log file with the logrotate tool. -type AutoFile struct { - Path string - ticker *time.Ticker - mtx sync.Mutex - file *os.File -} - -func OpenAutoFile(path string) (af *AutoFile, err error) { - af = &AutoFile{ - Path: path, - ticker: time.NewTicker(autoFileOpenDuration), - } - if err = af.openFile(); err != nil { - return - } - go af.processTicks() - return -} - -func (af *AutoFile) Close() error { - af.ticker.Stop() - af.mtx.Lock() - err := af.closeFile() - af.mtx.Unlock() - return err -} - -func (af *AutoFile) processTicks() { - for { - _, ok := <-af.ticker.C - if !ok { - return // Done. - } - af.mtx.Lock() - af.closeFile() - af.mtx.Unlock() - } -} - -func (af *AutoFile) closeFile() (err error) { - file := af.file - if file == nil { - return nil - } - af.file = nil - return file.Close() -} - -func (af *AutoFile) Write(b []byte) (n int, err error) { - af.mtx.Lock() - defer af.mtx.Unlock() - if af.file == nil { - if err = af.openFile(); err != nil { - return - } - } - return af.file.Write(b) -} - -func (af *AutoFile) openFile() error { - file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) - if err != nil { - return err - } - af.file = file - return nil -} - -func Tempfile(prefix string) (*os.File, string) { - file, err := ioutil.TempFile("", prefix) - if err != nil { - PanicCrisis(err) - } - return file, file.Name() -} - -func Prompt(prompt string, defaultValue string) (string, error) { - fmt.Print(prompt) - reader := bufio.NewReader(os.Stdin) - line, err := reader.ReadString('\n') - if err != nil { - return defaultValue, err - } else { - line = strings.TrimSpace(line) - if line == "" { - return defaultValue, nil - } - return line, nil - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/random.go b/Godeps/_workspace/src/github.com/tendermint/go-common/random.go deleted file mode 100644 index 645601154..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/random.go +++ /dev/null @@ -1,145 +0,0 @@ -package common - -import ( - crand "crypto/rand" - "encoding/hex" - "math/rand" - "time" -) - -const ( - strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters -) - -func init() { - // Seed math/rand with "secure" int64 - b := CRandBytes(8) - var seed uint64 - for i := 0; i < 8; i++ { - seed |= uint64(b[i]) - seed <<= 8 - } - rand.Seed(int64(seed)) -} - -// Constructs an alphanumeric string of given length. -func RandStr(length int) string { - chars := []byte{} -MAIN_LOOP: - for { - val := rand.Int63() - for i := 0; i < 10; i++ { - v := int(val & 0x3f) // rightmost 6 bits - if v >= 62 { // only 62 characters in strChars - val >>= 6 - continue - } else { - chars = append(chars, strChars[v]) - if len(chars) == length { - break MAIN_LOOP - } - val >>= 6 - } - } - } - - return string(chars) -} - -func RandUint16() uint16 { - return uint16(rand.Uint32() & (1<<16 - 1)) -} - -func RandUint32() uint32 { - return rand.Uint32() -} - -func RandUint64() uint64 { - return uint64(rand.Uint32())<<32 + uint64(rand.Uint32()) -} - -func RandUint() uint { - return uint(rand.Int()) -} - -func RandInt16() int16 { - return int16(rand.Uint32() & (1<<16 - 1)) -} - -func RandInt32() int32 { - return int32(rand.Uint32()) -} - -func RandInt64() int64 { - return int64(rand.Uint32())<<32 + int64(rand.Uint32()) -} - -func RandInt() int { - return rand.Int() -} - -// Distributed pseudo-exponentially to test for various cases -func RandUint16Exp() uint16 { - bits := rand.Uint32() % 16 - if bits == 0 { - return 0 - } - n := uint16(1 << (bits - 1)) - n += uint16(rand.Int31()) & ((1 << (bits - 1)) - 1) - return n -} - -// Distributed pseudo-exponentially to test for various cases -func RandUint32Exp() uint32 { - bits := rand.Uint32() % 32 - if bits == 0 { - return 0 - } - n := uint32(1 << (bits - 1)) - n += uint32(rand.Int31()) & ((1 << (bits - 1)) - 1) - return n -} - -// Distributed pseudo-exponentially to test for various cases -func RandUint64Exp() uint64 { - bits := rand.Uint32() % 64 - if bits == 0 { - return 0 - } - n := uint64(1 << (bits - 1)) - n += uint64(rand.Int63()) & ((1 << (bits - 1)) - 1) - return n -} - -func RandFloat32() float32 { - return rand.Float32() -} - -func RandTime() time.Time { - return time.Unix(int64(RandUint64Exp()), 0) -} - -func RandBytes(n int) []byte { - bs := make([]byte, n) - for i := 0; i < n; i++ { - bs[i] = byte(rand.Intn(256)) - } - return bs -} - -//----------------------------------------------------------------------------- -// CRand* methods are crypto safe. - -func CRandBytes(numBytes int) []byte { - b := make([]byte, numBytes) - _, err := crand.Read(b) - if err != nil { - PanicCrisis(err) - } - return b -} - -// RandHex(24) gives 96 bits of randomness, strong enough for most purposes. -func CRandHex(numDigits int) string { - return hex.EncodeToString(CRandBytes(numDigits / 2)) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/repeat_timer.go b/Godeps/_workspace/src/github.com/tendermint/go-common/repeat_timer.go deleted file mode 100644 index e2aa18ea8..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/repeat_timer.go +++ /dev/null @@ -1,72 +0,0 @@ -package common - -import "time" -import "sync" - -/* -RepeatTimer repeatedly sends a struct{}{} to .Ch after each "dur" period. -It's good for keeping connections alive. -A RepeatTimer must be Stop()'d or it will keep a goroutine alive. -*/ -type RepeatTimer struct { - Ch chan time.Time - - mtx sync.Mutex - name string - ticker *time.Ticker - quit chan struct{} - dur time.Duration -} - -func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { - var t = &RepeatTimer{ - Ch: make(chan time.Time), - ticker: time.NewTicker(dur), - quit: make(chan struct{}), - name: name, - dur: dur, - } - go t.fireRoutine(t.ticker) - return t -} - -func (t *RepeatTimer) fireRoutine(ticker *time.Ticker) { - for { - select { - case t_ := <-ticker.C: - t.Ch <- t_ - case <-t.quit: - return - } - } -} - -// Wait the duration again before firing. -func (t *RepeatTimer) Reset() { - t.Stop() - - t.mtx.Lock() // Lock - defer t.mtx.Unlock() - - t.ticker = time.NewTicker(t.dur) - t.quit = make(chan struct{}) - go t.fireRoutine(t.ticker) -} - -// For ease of .Stop()'ing services before .Start()'ing them, -// we ignore .Stop()'s on nil RepeatTimers. -func (t *RepeatTimer) Stop() bool { - if t == nil { - return false - } - t.mtx.Lock() // Lock - defer t.mtx.Unlock() - - exists := t.ticker != nil - if exists { - t.ticker.Stop() - t.ticker = nil - close(t.quit) - } - return exists -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/service.go b/Godeps/_workspace/src/github.com/tendermint/go-common/service.go deleted file mode 100644 index ca923b1d5..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/service.go +++ /dev/null @@ -1,157 +0,0 @@ -/* - -Classical-inheritance-style service declarations. -Services can be started, then stopped. -Users can override the OnStart/OnStop methods. -These methods are guaranteed to be called at most once. -Caller must ensure that Start() and Stop() are not called concurrently. -It is ok to call Stop() without calling Start() first. -Services cannot be re-started unless otherwise documented. - -Typical usage: - -type FooService struct { - BaseService - // private fields -} - -func NewFooService() *FooService { - fs := &FooService{ - // init - } - fs.BaseService = *NewBaseService(log, "FooService", fs) - return fs -} - -func (fs *FooService) OnStart() error { - fs.BaseService.OnStart() // Always call the overridden method. - // initialize private fields - // start subroutines, etc. -} - -func (fs *FooService) OnStop() error { - fs.BaseService.OnStop() // Always call the overridden method. - // close/destroy private fields - // stop subroutines, etc. -} - -*/ -package common - -import ( - "sync/atomic" - - "github.com/tendermint/log15" -) - -type Service interface { - Start() (bool, error) - OnStart() error - - Stop() bool - OnStop() - - IsRunning() bool - - String() string -} - -type BaseService struct { - log log15.Logger - name string - started uint32 // atomic - stopped uint32 // atomic - - // The "subclass" of BaseService - impl Service -} - -func NewBaseService(log log15.Logger, name string, impl Service) *BaseService { - return &BaseService{ - log: log, - name: name, - impl: impl, - } -} - -// Implements Servce -func (bs *BaseService) Start() (bool, error) { - if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { - if atomic.LoadUint32(&bs.stopped) == 1 { - if bs.log != nil { - bs.log.Warn(Fmt("Not starting %v -- already stopped", bs.name), "impl", bs.impl) - } - return false, nil - } else { - if bs.log != nil { - bs.log.Notice(Fmt("Starting %v", bs.name), "impl", bs.impl) - } - } - err := bs.impl.OnStart() - return true, err - } else { - if bs.log != nil { - bs.log.Info(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) - } - return false, nil - } -} - -// Implements Service -func (bs *BaseService) OnStart() error { return nil } - -// Implements Service -func (bs *BaseService) Stop() bool { - if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { - if bs.log != nil { - bs.log.Notice(Fmt("Stopping %v", bs.name), "impl", bs.impl) - } - bs.impl.OnStop() - return true - } else { - if bs.log != nil { - bs.log.Notice(Fmt("Not stopping %v", bs.name), "impl", bs.impl) - } - return false - } -} - -// Implements Service -func (bs *BaseService) OnStop() {} - -// Implements Service -func (bs *BaseService) IsRunning() bool { - return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 -} - -// Implements Servce -func (bs *BaseService) String() string { - return bs.name -} - -//---------------------------------------- - -type QuitService struct { - BaseService - Quit chan struct{} -} - -func NewQuitService(log log15.Logger, name string, impl Service) *QuitService { - return &QuitService{ - BaseService: *NewBaseService(log, name, impl), - Quit: nil, - } -} - -// NOTE: when overriding OnStart, must call .QuitService.OnStart(). -func (qs *QuitService) OnStart() error { - qs.Quit = make(chan struct{}) - return nil -} - -// NOTE: when overriding OnStop, must call .QuitService.OnStop(). -func (qs *QuitService) OnStop() { - if qs.Quit != nil { - close(qs.Quit) - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/string.go b/Godeps/_workspace/src/github.com/tendermint/go-common/string.go deleted file mode 100644 index a4d221b74..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/string.go +++ /dev/null @@ -1,24 +0,0 @@ -package common - -import ( - "fmt" - "strings" -) - -var Fmt = fmt.Sprintf - -func RightPadString(s string, totalLength int) string { - remaining := totalLength - len(s) - if remaining > 0 { - s = s + strings.Repeat(" ", remaining) - } - return s -} - -func LeftPadString(s string, totalLength int) string { - remaining := totalLength - len(s) - if remaining > 0 { - s = strings.Repeat(" ", remaining) + s - } - return s -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/test/assert.go b/Godeps/_workspace/src/github.com/tendermint/go-common/test/assert.go deleted file mode 100644 index a6ffed0ce..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/test/assert.go +++ /dev/null @@ -1,14 +0,0 @@ -package test - -import ( - "testing" -) - -func AssertPanics(t *testing.T, msg string, f func()) { - defer func() { - if err := recover(); err == nil { - t.Errorf("Should have panic'd, but didn't: %v", msg) - } - }() - f() -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/test/mutate.go b/Godeps/_workspace/src/github.com/tendermint/go-common/test/mutate.go deleted file mode 100644 index 629e9f865..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/test/mutate.go +++ /dev/null @@ -1,28 +0,0 @@ -package test - -import ( - . "github.com/tendermint/go-common" -) - -// Contract: !bytes.Equal(input, output) && len(input) >= len(output) -func MutateByteSlice(bytez []byte) []byte { - // If bytez is empty, panic - if len(bytez) == 0 { - panic("Cannot mutate an empty bytez") - } - - // Copy bytez - mBytez := make([]byte, len(bytez)) - copy(mBytez, bytez) - bytez = mBytez - - // Try a random mutation - switch RandInt() % 2 { - case 0: // Mutate a single byte - bytez[RandInt()%len(bytez)] += byte(RandInt()%255 + 1) - case 1: // Remove an arbitrary byte - pos := RandInt() % len(bytez) - bytez = append(bytez[:pos], bytez[pos+1:]...) - } - return bytez -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/throttle_timer.go b/Godeps/_workspace/src/github.com/tendermint/go-common/throttle_timer.go deleted file mode 100644 index 0b40a60c2..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/throttle_timer.go +++ /dev/null @@ -1,57 +0,0 @@ -package common - -import ( - "sync/atomic" - "time" -) - -/* -ThrottleTimer fires an event at most "dur" after each .Set() call. -If a short burst of .Set() calls happens, ThrottleTimer fires once. -If a long continuous burst of .Set() calls happens, ThrottleTimer fires -at most once every "dur". -*/ -type ThrottleTimer struct { - Name string - Ch chan struct{} - quit chan struct{} - dur time.Duration - timer *time.Timer - isSet uint32 -} - -func NewThrottleTimer(name string, dur time.Duration) *ThrottleTimer { - var ch = make(chan struct{}) - var quit = make(chan struct{}) - var t = &ThrottleTimer{Name: name, Ch: ch, dur: dur, quit: quit} - t.timer = time.AfterFunc(dur, t.fireRoutine) - t.timer.Stop() - return t -} - -func (t *ThrottleTimer) fireRoutine() { - select { - case t.Ch <- struct{}{}: - atomic.StoreUint32(&t.isSet, 0) - case <-t.quit: - // do nothing - default: - t.timer.Reset(t.dur) - } -} - -func (t *ThrottleTimer) Set() { - if atomic.CompareAndSwapUint32(&t.isSet, 0, 1) { - t.timer.Reset(t.dur) - } -} - -// For ease of .Stop()'ing services before .Start()'ing them, -// we ignore .Stop()'s on nil ThrottleTimers -func (t *ThrottleTimer) Stop() bool { - if t == nil { - return false - } - close(t.quit) - return t.timer.Stop() -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-common/word.go b/Godeps/_workspace/src/github.com/tendermint/go-common/word.go deleted file mode 100644 index 4072482b8..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-common/word.go +++ /dev/null @@ -1,91 +0,0 @@ -package common - -import ( - "bytes" - "sort" -) - -var ( - Zero256 = Word256{0} - One256 = Word256{1} -) - -type Word256 [32]byte - -func (w Word256) String() string { return string(w[:]) } -func (w Word256) TrimmedString() string { return TrimmedString(w.Bytes()) } -func (w Word256) Copy() Word256 { return w } -func (w Word256) Bytes() []byte { return w[:] } // copied. -func (w Word256) Prefix(n int) []byte { return w[:n] } -func (w Word256) Postfix(n int) []byte { return w[32-n:] } -func (w Word256) IsZero() bool { - accum := byte(0) - for _, byt := range w { - accum |= byt - } - return accum == 0 -} -func (w Word256) Compare(other Word256) int { - return bytes.Compare(w[:], other[:]) -} - -func Uint64ToWord256(i uint64) Word256 { - buf := [8]byte{} - PutUint64BE(buf[:], i) - return LeftPadWord256(buf[:]) -} - -func Int64ToWord256(i int64) Word256 { - buf := [8]byte{} - PutInt64BE(buf[:], i) - return LeftPadWord256(buf[:]) -} - -func RightPadWord256(bz []byte) (word Word256) { - copy(word[:], bz) - return -} - -func LeftPadWord256(bz []byte) (word Word256) { - copy(word[32-len(bz):], bz) - return -} - -func Uint64FromWord256(word Word256) uint64 { - buf := word.Postfix(8) - return GetUint64BE(buf) -} - -func Int64FromWord256(word Word256) int64 { - buf := word.Postfix(8) - return GetInt64BE(buf) -} - -//------------------------------------- - -type Tuple256 struct { - First Word256 - Second Word256 -} - -func (tuple Tuple256) Compare(other Tuple256) int { - firstCompare := tuple.First.Compare(other.First) - if firstCompare == 0 { - return tuple.Second.Compare(other.Second) - } else { - return firstCompare - } -} - -func Tuple256Split(t Tuple256) (Word256, Word256) { - return t.First, t.Second -} - -type Tuple256Slice []Tuple256 - -func (p Tuple256Slice) Len() int { return len(p) } -func (p Tuple256Slice) Less(i, j int) bool { - return p[i].Compare(p[j]) < 0 -} -func (p Tuple256Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Tuple256Slice) Sort() { sort.Sort(p) } diff --git a/Godeps/_workspace/src/github.com/tendermint/go-config/LICENSE.md b/Godeps/_workspace/src/github.com/tendermint/go-config/LICENSE.md deleted file mode 100644 index 5315ab520..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-config/LICENSE.md +++ /dev/null @@ -1,206 +0,0 @@ -Tendermint Go-Config -Copyright (C) 2015 Tendermint - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS diff --git a/Godeps/_workspace/src/github.com/tendermint/go-config/config.go b/Godeps/_workspace/src/github.com/tendermint/go-config/config.go deleted file mode 100644 index abb081611..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-config/config.go +++ /dev/null @@ -1,116 +0,0 @@ -package config - -import ( - "github.com/naoina/toml" - "sync" - "time" - - . "github.com/tendermint/go-common" -) - -type Config interface { - Get(key string) interface{} - GetBool(key string) bool - GetFloat64(key string) float64 - GetInt(key string) int - GetString(key string) string - GetStringMap(key string) map[string]interface{} - GetStringMapString(key string) map[string]string - GetStringSlice(key string) []string - GetTime(key string) time.Time - IsSet(key string) bool - Set(key string, value interface{}) -} - -type MapConfig struct { - required map[string]struct{} // blows up if trying to use before setting. - data map[string]interface{} -} - -func ReadMapConfigFromFile(filePath string) (MapConfig, error) { - var configData = make(map[string]interface{}) - fileBytes := MustReadFile(filePath) - err := toml.Unmarshal(fileBytes, configData) - if err != nil { - return MapConfig{}, err - } - return NewMapConfig(configData), nil -} - -func NewMapConfig(data map[string]interface{}) MapConfig { - if data == nil { - data = make(map[string]interface{}) - } - return MapConfig{ - required: make(map[string]struct{}), - data: data, - } -} - -func (cfg MapConfig) Get(key string) interface{} { - if _, ok := cfg.required[key]; ok { - PanicSanity(Fmt("config key %v is required but was not set.", key)) - } - return cfg.data[key] -} -func (cfg MapConfig) GetBool(key string) bool { return cfg.Get(key).(bool) } -func (cfg MapConfig) GetFloat64(key string) float64 { return cfg.Get(key).(float64) } -func (cfg MapConfig) GetInt(key string) int { return cfg.Get(key).(int) } -func (cfg MapConfig) GetString(key string) string { return cfg.Get(key).(string) } -func (cfg MapConfig) GetStringMap(key string) map[string]interface{} { - return cfg.Get(key).(map[string]interface{}) -} -func (cfg MapConfig) GetStringMapString(key string) map[string]string { - return cfg.Get(key).(map[string]string) -} -func (cfg MapConfig) GetStringSlice(key string) []string { return cfg.Get(key).([]string) } -func (cfg MapConfig) GetTime(key string) time.Time { return cfg.Get(key).(time.Time) } -func (cfg MapConfig) IsSet(key string) bool { _, ok := cfg.data[key]; return ok } -func (cfg MapConfig) Set(key string, value interface{}) { - delete(cfg.required, key) - cfg.data[key] = value -} -func (cfg MapConfig) SetDefault(key string, value interface{}) { - delete(cfg.required, key) - if cfg.IsSet(key) { - return - } - cfg.data[key] = value -} -func (cfg MapConfig) SetRequired(key string) { - if cfg.IsSet(key) { - return - } - cfg.required[key] = struct{}{} -} - -//-------------------------------------------------------------------------------- -// A little convenient hack to notify listeners upon config changes. - -type Configurable func(Config) - -var mtx sync.Mutex -var globalConfig Config -var confs []Configurable - -func OnConfig(conf func(Config)) { - mtx.Lock() - defer mtx.Unlock() - - confs = append(confs, conf) - if globalConfig != nil { - conf(globalConfig) - } -} - -func ApplyConfig(config Config) { - mtx.Lock() - globalConfig = config - confsCopy := make([]Configurable, len(confs)) - copy(confsCopy, confs) - mtx.Unlock() - - for _, conf := range confsCopy { - conf(config) - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-db/LICENSE.md b/Godeps/_workspace/src/github.com/tendermint/go-db/LICENSE.md deleted file mode 100644 index 25c3191e9..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-db/LICENSE.md +++ /dev/null @@ -1,206 +0,0 @@ -Tendermint Go-DB -Copyright (C) 2015 Tendermint - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS diff --git a/Godeps/_workspace/src/github.com/tendermint/go-db/config.go b/Godeps/_workspace/src/github.com/tendermint/go-db/config.go deleted file mode 100644 index da66c2158..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-db/config.go +++ /dev/null @@ -1,13 +0,0 @@ -package db - -import ( - cfg "github.com/tendermint/go-config" -) - -var config cfg.Config = nil - -func init() { - cfg.OnConfig(func(newConfig cfg.Config) { - config = newConfig - }) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-db/db.go b/Godeps/_workspace/src/github.com/tendermint/go-db/db.go deleted file mode 100644 index 2d9c3d2b1..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-db/db.go +++ /dev/null @@ -1,50 +0,0 @@ -package db - -import ( - "path" - - . "github.com/tendermint/go-common" -) - -type DB interface { - Get([]byte) []byte - Set([]byte, []byte) - SetSync([]byte, []byte) - Delete([]byte) - DeleteSync([]byte) - Close() - - // For debugging - Print() -} - -//----------------------------------------------------------------------------- - -// Database types -const DBBackendMemDB = "memdb" -const DBBackendLevelDB = "leveldb" - -var dbs = NewCMap() - -func GetDB(name string) DB { - db := dbs.Get(name) - if db != nil { - return db.(DB) - } - switch config.GetString("db_backend") { - case DBBackendMemDB: - db := NewMemDB() - dbs.Set(name, db) - return db - case DBBackendLevelDB: - db, err := NewLevelDB(path.Join(config.GetString("db_dir"), name+".db")) - if err != nil { - PanicCrisis(err) - } - dbs.Set(name, db) - return db - default: - PanicSanity(Fmt("Unknown DB backend: %v", config.GetString("db_backend"))) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-db/level_db.go b/Godeps/_workspace/src/github.com/tendermint/go-db/level_db.go deleted file mode 100644 index dee57a321..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-db/level_db.go +++ /dev/null @@ -1,83 +0,0 @@ -package db - -import ( - "fmt" - "path" - - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/opt" - - . "github.com/tendermint/go-common" -) - -type LevelDB struct { - db *leveldb.DB -} - -func NewLevelDB(name string) (*LevelDB, error) { - dbPath := path.Join(name) - db, err := leveldb.OpenFile(dbPath, nil) - if err != nil { - return nil, err - } - database := &LevelDB{db: db} - return database, nil -} - -func (db *LevelDB) Get(key []byte) []byte { - res, err := db.db.Get(key, nil) - if err != nil { - if err == errors.ErrNotFound { - return nil - } else { - PanicCrisis(err) - } - } - return res -} - -func (db *LevelDB) Set(key []byte, value []byte) { - err := db.db.Put(key, value, nil) - if err != nil { - PanicCrisis(err) - } -} - -func (db *LevelDB) SetSync(key []byte, value []byte) { - err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) - if err != nil { - PanicCrisis(err) - } -} - -func (db *LevelDB) Delete(key []byte) { - err := db.db.Delete(key, nil) - if err != nil { - PanicCrisis(err) - } -} - -func (db *LevelDB) DeleteSync(key []byte) { - err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) - if err != nil { - PanicCrisis(err) - } -} - -func (db *LevelDB) DB() *leveldb.DB { - return db.db -} - -func (db *LevelDB) Close() { - db.db.Close() -} - -func (db *LevelDB) Print() { - iter := db.db.NewIterator(nil, nil) - for iter.Next() { - key := iter.Key() - value := iter.Value() - fmt.Printf("[%X]:\t[%X]\n", key, value) - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-db/mem_db.go b/Godeps/_workspace/src/github.com/tendermint/go-db/mem_db.go deleted file mode 100644 index b7d8918d4..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-db/mem_db.go +++ /dev/null @@ -1,44 +0,0 @@ -package db - -import ( - "fmt" -) - -type MemDB struct { - db map[string][]byte -} - -func NewMemDB() *MemDB { - database := &MemDB{db: make(map[string][]byte)} - return database -} - -func (db *MemDB) Get(key []byte) []byte { - return db.db[string(key)] -} - -func (db *MemDB) Set(key []byte, value []byte) { - db.db[string(key)] = value -} - -func (db *MemDB) SetSync(key []byte, value []byte) { - db.db[string(key)] = value -} - -func (db *MemDB) Delete(key []byte) { - delete(db.db, string(key)) -} - -func (db *MemDB) DeleteSync(key []byte) { - delete(db.db, string(key)) -} - -func (db *MemDB) Close() { - db = nil -} - -func (db *MemDB) Print() { - for key, value := range db.db { - fmt.Printf("[%X]:\t[%X]\n", []byte(key), value) - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-logger/LICENSE.md b/Godeps/_workspace/src/github.com/tendermint/go-logger/LICENSE.md deleted file mode 100644 index ee1d394da..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-logger/LICENSE.md +++ /dev/null @@ -1,206 +0,0 @@ -Tendermint Go-Logger -Copyright (C) 2015 Tendermint - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS diff --git a/Godeps/_workspace/src/github.com/tendermint/go-logger/config.go b/Godeps/_workspace/src/github.com/tendermint/go-logger/config.go deleted file mode 100644 index 4083152a0..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-logger/config.go +++ /dev/null @@ -1,14 +0,0 @@ -package logger - -import ( - cfg "github.com/tendermint/go-config" -) - -var config cfg.Config = nil - -func init() { - cfg.OnConfig(func(newConfig cfg.Config) { - config = newConfig - Reset() // reset log root upon config change. - }) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-logger/log.go b/Godeps/_workspace/src/github.com/tendermint/go-logger/log.go deleted file mode 100644 index e616d0ac8..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-logger/log.go +++ /dev/null @@ -1,64 +0,0 @@ -package logger - -import ( - "os" - - . "github.com/tendermint/go-common" - "github.com/tendermint/log15" -) - -var rootHandler log15.Handler - -func init() { - Reset() -} - -// You might want to call this after resetting tendermint/go-config. -func Reset() { - - var logLevel string = "debug" - if config != nil { - logLevel = config.GetString("log_level") - } - - // stdout handler - //handlers := []log15.Handler{} - stdoutHandler := log15.LvlFilterHandler( - getLevel(logLevel), - log15.StreamHandler(os.Stdout, log15.TerminalFormat()), - ) - //handlers = append(handlers, stdoutHandler) - - // Set rootHandler. - //rootHandler = log15.MultiHandler(handlers...) - rootHandler = stdoutHandler - - // By setting handlers on the root, we handle events from all loggers. - log15.Root().SetHandler(rootHandler) -} - -// See binary/log for an example of usage. -func RootHandler() log15.Handler { - return rootHandler -} - -func New(ctx ...interface{}) log15.Logger { - return log15.Root().New(ctx...) -} - -func getLevel(lvlString string) log15.Lvl { - lvl, err := log15.LvlFromString(lvlString) - if err != nil { - Exit(Fmt("Invalid log level %v: %v", lvlString, err)) - } - return lvl -} - -//---------------------------------------- -// Exported from log15 - -var LvlFilterHandler = log15.LvlFilterHandler -var LvlDebug = log15.LvlDebug -var LvlInfo = log15.LvlInfo -var LvlWarn = log15.LvlWarn -var LvlError = log15.LvlError diff --git a/Godeps/_workspace/src/github.com/tendermint/go-merkle/LICENSE.md b/Godeps/_workspace/src/github.com/tendermint/go-merkle/LICENSE.md deleted file mode 100644 index e6e48bfd0..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-merkle/LICENSE.md +++ /dev/null @@ -1,206 +0,0 @@ -Tendermint Go-Merkle -Copyright (C) 2015 Tendermint - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS diff --git a/Godeps/_workspace/src/github.com/tendermint/go-merkle/README.md b/Godeps/_workspace/src/github.com/tendermint/go-merkle/README.md deleted file mode 100644 index f7ae879f1..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-merkle/README.md +++ /dev/null @@ -1,18 +0,0 @@ -There are two types of merkle trees in this module. - -* IAVL+ Tree: A snapshottable (immutable) AVL+ tree for persistent data -* A simple merkle tree for static data - -## IAVL+ Tree - -The purpose of this data structure is to provide persistent storage for key-value pairs (say to store account balances) such that a deterministic merkle root hash can be computed. The tree is balanced using a variant of the [AVL algortihm](http://en.wikipedia.org/wiki/AVL_tree) so all operations are O(log(n)). - -Nodes of this tree are immutable and indexed by its hash. Thus any node serves as an immutable snapshot which lets us stage uncommitted transactions from the mempool cheaply, and we can instantly roll back to the last committed state to process transactions of a newly committed block (which may not be the same set of transactions as those from the mempool). - -In an AVL tree, the heights of the two child subtrees of any node differ by at most one. Whenever this condition is violated upon an update, the tree is rebalanced by creating O(log(n)) new nodes that point to unmodified nodes of the old tree. In the original AVL algorithm, inner nodes can also hold key-value pairs. The AVL+ algorithm (note the plus) modifies the AVL algorithm to keep all values on leaf nodes, while only using branch-nodes to store keys. This simplifies the algorithm while keeping the merkle hash trail short. - -In Ethereum, the analog is [Patricia tries](http://en.wikipedia.org/wiki/Radix_tree). There are tradeoffs. Keys do not need to be hashed prior to insertion in IAVL+ trees, so this provides faster iteration in the key space which may benefit some applications. The logic is simpler to implement, requiring only two types of nodes -- inner nodes and leaf nodes. On the other hand, while IAVL+ trees provide a deterministic merkle root hash, it depends on the order of transactions. In practice this shouldn't be a problem, since you can efficiently encode the tree structure when serializing the tree contents. - -## Simple Merkle Tree - -For smaller static data structures that don't require immutable snapshots or mutability, use the functions provided in `simple_tree.go`. The transactions and validation signatures of a block are hashed using this simple merkle tree logic. diff --git a/Godeps/_workspace/src/github.com/tendermint/go-merkle/iavl_node.go b/Godeps/_workspace/src/github.com/tendermint/go-merkle/iavl_node.go deleted file mode 100644 index 5b769ffb5..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-merkle/iavl_node.go +++ /dev/null @@ -1,459 +0,0 @@ -package merkle - -import ( - "bytes" - "code.google.com/p/go.crypto/ripemd160" - "io" - - . "github.com/tendermint/go-common" - "github.com/tendermint/go-wire" -) - -// Node - -type IAVLNode struct { - key interface{} - value interface{} - height int8 - size int - hash []byte - leftHash []byte - leftNode *IAVLNode - rightHash []byte - rightNode *IAVLNode - persisted bool -} - -func NewIAVLNode(key interface{}, value interface{}) *IAVLNode { - return &IAVLNode{ - key: key, - value: value, - height: 0, - size: 1, - } -} - -// NOTE: The hash is not saved or set. The caller should set the hash afterwards. -// (Presumably the caller already has the hash) -func ReadIAVLNode(t *IAVLTree, r io.Reader, n *int64, err *error) *IAVLNode { - node := &IAVLNode{} - - // node header - node.height = wire.ReadInt8(r, n, err) - node.size = wire.ReadVarint(r, n, err) - node.key = decodeByteSlice(t.keyCodec, r, n, err) - - if node.height == 0 { - // value - node.value = decodeByteSlice(t.valueCodec, r, n, err) - } else { - // children - node.leftHash = wire.ReadByteSlice(r, n, err) - node.rightHash = wire.ReadByteSlice(r, n, err) - } - return node -} - -func (node *IAVLNode) _copy() *IAVLNode { - if node.height == 0 { - PanicSanity("Why are you copying a value node?") - } - return &IAVLNode{ - key: node.key, - height: node.height, - size: node.size, - hash: nil, // Going to be mutated anyways. - leftHash: node.leftHash, - leftNode: node.leftNode, - rightHash: node.rightHash, - rightNode: node.rightNode, - persisted: false, // Going to be mutated, so it can't already be persisted. - } -} - -func (node *IAVLNode) has(t *IAVLTree, key interface{}) (has bool) { - if t.keyCodec.Compare(node.key, key) == 0 { - return true - } - if node.height == 0 { - return false - } else { - if t.keyCodec.Compare(key, node.key) < 0 { - return node.getLeftNode(t).has(t, key) - } else { - return node.getRightNode(t).has(t, key) - } - } -} - -func (node *IAVLNode) get(t *IAVLTree, key interface{}) (index int, value interface{}) { - if node.height == 0 { - if t.keyCodec.Compare(node.key, key) == 0 { - return 0, node.value - } else { - return 0, nil - } - } else { - if t.keyCodec.Compare(key, node.key) < 0 { - return node.getLeftNode(t).get(t, key) - } else { - rightNode := node.getRightNode(t) - index, value = rightNode.get(t, key) - index += node.size - rightNode.size - return index, value - } - } -} - -func (node *IAVLNode) getByIndex(t *IAVLTree, index int) (key interface{}, value interface{}) { - if node.height == 0 { - if index == 0 { - return node.key, node.value - } else { - PanicSanity("getByIndex asked for invalid index") - return nil, nil - } - } else { - // TODO: could improve this by storing the - // sizes as well as left/right hash. - leftNode := node.getLeftNode(t) - if index < leftNode.size { - return leftNode.getByIndex(t, index) - } else { - return node.getRightNode(t).getByIndex(t, index-leftNode.size) - } - } -} - -// NOTE: sets hashes recursively -func (node *IAVLNode) hashWithCount(t *IAVLTree) ([]byte, int) { - if node.hash != nil { - return node.hash, 0 - } - - hasher := ripemd160.New() - buf := new(bytes.Buffer) - _, hashCount, err := node.writeHashBytes(t, buf) - if err != nil { - PanicCrisis(err) - } - // fmt.Printf("Wrote IAVL hash bytes: %X\n", buf.Bytes()) - hasher.Write(buf.Bytes()) - node.hash = hasher.Sum(nil) - // fmt.Printf("Write IAVL hash: %X\n", node.hash) - - return node.hash, hashCount + 1 -} - -// NOTE: sets hashes recursively -func (node *IAVLNode) writeHashBytes(t *IAVLTree, w io.Writer) (n int64, hashCount int, err error) { - // height & size - wire.WriteInt8(node.height, w, &n, &err) - wire.WriteVarint(node.size, w, &n, &err) - // key is not written for inner nodes, unlike writePersistBytes - - if node.height == 0 { - // key & value - encodeByteSlice(node.key, t.keyCodec, w, &n, &err) - encodeByteSlice(node.value, t.valueCodec, w, &n, &err) - } else { - // left - if node.leftNode != nil { - leftHash, leftCount := node.leftNode.hashWithCount(t) - node.leftHash = leftHash - hashCount += leftCount - } - if node.leftHash == nil { - PanicSanity("node.leftHash was nil in writeHashBytes") - } - wire.WriteByteSlice(node.leftHash, w, &n, &err) - // right - if node.rightNode != nil { - rightHash, rightCount := node.rightNode.hashWithCount(t) - node.rightHash = rightHash - hashCount += rightCount - } - if node.rightHash == nil { - PanicSanity("node.rightHash was nil in writeHashBytes") - } - wire.WriteByteSlice(node.rightHash, w, &n, &err) - } - return -} - -// NOTE: sets hashes recursively -// NOTE: clears leftNode/rightNode recursively -func (node *IAVLNode) save(t *IAVLTree) []byte { - if node.hash == nil { - node.hash, _ = node.hashWithCount(t) - } - if node.persisted { - return node.hash - } - - // save children - if node.leftNode != nil { - node.leftHash = node.leftNode.save(t) - node.leftNode = nil - } - if node.rightNode != nil { - node.rightHash = node.rightNode.save(t) - node.rightNode = nil - } - - // save node - t.ndb.SaveNode(t, node) - return node.hash -} - -// NOTE: sets hashes recursively -func (node *IAVLNode) writePersistBytes(t *IAVLTree, w io.Writer) (n int64, err error) { - // node header - wire.WriteInt8(node.height, w, &n, &err) - wire.WriteVarint(node.size, w, &n, &err) - // key (unlike writeHashBytes, key is written for inner nodes) - encodeByteSlice(node.key, t.keyCodec, w, &n, &err) - - if node.height == 0 { - // value - encodeByteSlice(node.value, t.valueCodec, w, &n, &err) - } else { - // left - if node.leftHash == nil { - PanicSanity("node.leftHash was nil in writePersistBytes") - } - wire.WriteByteSlice(node.leftHash, w, &n, &err) - // right - if node.rightHash == nil { - PanicSanity("node.rightHash was nil in writePersistBytes") - } - wire.WriteByteSlice(node.rightHash, w, &n, &err) - } - return -} - -func (node *IAVLNode) set(t *IAVLTree, key interface{}, value interface{}) (newSelf *IAVLNode, updated bool) { - if node.height == 0 { - cmp := t.keyCodec.Compare(key, node.key) - if cmp < 0 { - return &IAVLNode{ - key: node.key, - height: 1, - size: 2, - leftNode: NewIAVLNode(key, value), - rightNode: node, - }, false - } else if cmp == 0 { - return NewIAVLNode(key, value), true - } else { - return &IAVLNode{ - key: key, - height: 1, - size: 2, - leftNode: node, - rightNode: NewIAVLNode(key, value), - }, false - } - } else { - node = node._copy() - if t.keyCodec.Compare(key, node.key) < 0 { - node.leftNode, updated = node.getLeftNode(t).set(t, key, value) - node.leftHash = nil - } else { - node.rightNode, updated = node.getRightNode(t).set(t, key, value) - node.rightHash = nil - } - if updated { - return node, updated - } else { - node.calcHeightAndSize(t) - return node.balance(t), updated - } - } -} - -// newHash/newNode: The new hash or node to replace node after remove. -// newKey: new leftmost leaf key for tree after successfully removing 'key' if changed. -// value: removed value. -func (node *IAVLNode) remove(t *IAVLTree, key interface{}) ( - newHash []byte, newNode *IAVLNode, newKey interface{}, value interface{}, removed bool) { - if node.height == 0 { - if t.keyCodec.Compare(key, node.key) == 0 { - return nil, nil, nil, node.value, true - } else { - return nil, node, nil, nil, false - } - } else { - if t.keyCodec.Compare(key, node.key) < 0 { - var newLeftHash []byte - var newLeftNode *IAVLNode - newLeftHash, newLeftNode, newKey, value, removed = node.getLeftNode(t).remove(t, key) - if !removed { - return nil, node, nil, value, false - } else if newLeftHash == nil && newLeftNode == nil { // left node held value, was removed - return node.rightHash, node.rightNode, node.key, value, true - } - node = node._copy() - node.leftHash, node.leftNode = newLeftHash, newLeftNode - node.calcHeightAndSize(t) - return nil, node.balance(t), newKey, value, true - } else { - var newRightHash []byte - var newRightNode *IAVLNode - newRightHash, newRightNode, newKey, value, removed = node.getRightNode(t).remove(t, key) - if !removed { - return nil, node, nil, value, false - } else if newRightHash == nil && newRightNode == nil { // right node held value, was removed - return node.leftHash, node.leftNode, nil, value, true - } - node = node._copy() - node.rightHash, node.rightNode = newRightHash, newRightNode - if newKey != nil { - node.key = newKey - newKey = nil - } - node.calcHeightAndSize(t) - return nil, node.balance(t), newKey, value, true - } - } -} - -func (node *IAVLNode) getLeftNode(t *IAVLTree) *IAVLNode { - if node.leftNode != nil { - return node.leftNode - } else { - return t.ndb.GetNode(t, node.leftHash) - } -} - -func (node *IAVLNode) getRightNode(t *IAVLTree) *IAVLNode { - if node.rightNode != nil { - return node.rightNode - } else { - return t.ndb.GetNode(t, node.rightHash) - } -} - -func (node *IAVLNode) rotateRight(t *IAVLTree) *IAVLNode { - node = node._copy() - sl := node.getLeftNode(t)._copy() - - slrHash, slrCached := sl.rightHash, sl.rightNode - sl.rightHash, sl.rightNode = nil, node - node.leftHash, node.leftNode = slrHash, slrCached - - node.calcHeightAndSize(t) - sl.calcHeightAndSize(t) - - return sl -} - -func (node *IAVLNode) rotateLeft(t *IAVLTree) *IAVLNode { - node = node._copy() - sr := node.getRightNode(t)._copy() - - srlHash, srlCached := sr.leftHash, sr.leftNode - sr.leftHash, sr.leftNode = nil, node - node.rightHash, node.rightNode = srlHash, srlCached - - node.calcHeightAndSize(t) - sr.calcHeightAndSize(t) - - return sr -} - -// NOTE: mutates height and size -func (node *IAVLNode) calcHeightAndSize(t *IAVLTree) { - node.height = maxInt8(node.getLeftNode(t).height, node.getRightNode(t).height) + 1 - node.size = node.getLeftNode(t).size + node.getRightNode(t).size -} - -func (node *IAVLNode) calcBalance(t *IAVLTree) int { - return int(node.getLeftNode(t).height) - int(node.getRightNode(t).height) -} - -func (node *IAVLNode) balance(t *IAVLTree) (newSelf *IAVLNode) { - balance := node.calcBalance(t) - if balance > 1 { - if node.getLeftNode(t).calcBalance(t) >= 0 { - // Left Left Case - return node.rotateRight(t) - } else { - // Left Right Case - node = node._copy() - node.leftHash, node.leftNode = nil, node.getLeftNode(t).rotateLeft(t) - //node.calcHeightAndSize() - return node.rotateRight(t) - } - } - if balance < -1 { - if node.getRightNode(t).calcBalance(t) <= 0 { - // Right Right Case - return node.rotateLeft(t) - } else { - // Right Left Case - node = node._copy() - node.rightHash, node.rightNode = nil, node.getRightNode(t).rotateRight(t) - //node.calcHeightAndSize() - return node.rotateLeft(t) - } - } - // Nothing changed - return node -} - -func (node *IAVLNode) traverse(t *IAVLTree, cb func(*IAVLNode) bool) bool { - stop := cb(node) - if stop { - return stop - } - if node.height > 0 { - stop = node.getLeftNode(t).traverse(t, cb) - if stop { - return stop - } - stop = node.getRightNode(t).traverse(t, cb) - if stop { - return stop - } - } - return false -} - -// Only used in testing... -func (node *IAVLNode) lmd(t *IAVLTree) *IAVLNode { - if node.height == 0 { - return node - } - return node.getLeftNode(t).lmd(t) -} - -// Only used in testing... -func (node *IAVLNode) rmd(t *IAVLTree) *IAVLNode { - if node.height == 0 { - return node - } - return node.getRightNode(t).rmd(t) -} - -//-------------------------------------------------------------------------------- - -// Read a (length prefixed) byteslice then decode the object using the codec -func decodeByteSlice(codec wire.Codec, r io.Reader, n *int64, err *error) interface{} { - bytez := wire.ReadByteSlice(r, n, err) - if *err != nil { - return nil - } - n_ := new(int64) - return codec.Decode(bytes.NewBuffer(bytez), n_, err) -} - -// Encode object using codec, then write a (length prefixed) byteslice. -func encodeByteSlice(o interface{}, codec wire.Codec, w io.Writer, n *int64, err *error) { - buf, n_ := new(bytes.Buffer), new(int64) - codec.Encode(o, buf, n_, err) - if *err != nil { - return - } - wire.WriteByteSlice(buf.Bytes(), w, n, err) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-merkle/iavl_proof.go b/Godeps/_workspace/src/github.com/tendermint/go-merkle/iavl_proof.go deleted file mode 100644 index 2003f6d4d..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-merkle/iavl_proof.go +++ /dev/null @@ -1,151 +0,0 @@ -package merkle - -import ( - "bytes" - - "code.google.com/p/go.crypto/ripemd160" - - . "github.com/tendermint/go-common" - "github.com/tendermint/go-wire" -) - -type IAVLProof struct { - LeafNode IAVLProofLeafNode - InnerNodes []IAVLProofInnerNode - RootHash []byte -} - -func (proof *IAVLProof) Verify(keyBytes, valueBytes, rootHash []byte) bool { - if !bytes.Equal(keyBytes, proof.LeafNode.KeyBytes) { - return false - } - if !bytes.Equal(valueBytes, proof.LeafNode.ValueBytes) { - return false - } - if !bytes.Equal(rootHash, proof.RootHash) { - return false - } - hash := proof.LeafNode.Hash() - // fmt.Printf("leaf hash: %X\n", hash) - for _, branch := range proof.InnerNodes { - hash = branch.Hash(hash) - // fmt.Printf("branch hash: %X\n", hash) - } - // fmt.Printf("root: %X, computed: %X\n", proof.RootHash, hash) - return bytes.Equal(proof.RootHash, hash) -} - -type IAVLProofInnerNode struct { - Height int8 - Size int - Left []byte - Right []byte -} - -func (branch IAVLProofInnerNode) Hash(childHash []byte) []byte { - hasher := ripemd160.New() - buf := new(bytes.Buffer) - n, err := int64(0), error(nil) - wire.WriteInt8(branch.Height, buf, &n, &err) - wire.WriteVarint(branch.Size, buf, &n, &err) - if len(branch.Left) == 0 { - wire.WriteByteSlice(childHash, buf, &n, &err) - wire.WriteByteSlice(branch.Right, buf, &n, &err) - } else { - wire.WriteByteSlice(branch.Left, buf, &n, &err) - wire.WriteByteSlice(childHash, buf, &n, &err) - } - if err != nil { - PanicCrisis(Fmt("Failed to hash IAVLProofInnerNode: %v", err)) - } - // fmt.Printf("InnerNode hash bytes: %X\n", buf.Bytes()) - hasher.Write(buf.Bytes()) - return hasher.Sum(nil) -} - -type IAVLProofLeafNode struct { - KeyBytes []byte - ValueBytes []byte -} - -func (leaf IAVLProofLeafNode) Hash() []byte { - hasher := ripemd160.New() - buf := new(bytes.Buffer) - n, err := int64(0), error(nil) - wire.WriteInt8(0, buf, &n, &err) - wire.WriteVarint(1, buf, &n, &err) - wire.WriteByteSlice(leaf.KeyBytes, buf, &n, &err) - wire.WriteByteSlice(leaf.ValueBytes, buf, &n, &err) - if err != nil { - PanicCrisis(Fmt("Failed to hash IAVLProofLeafNode: %v", err)) - } - // fmt.Printf("LeafNode hash bytes: %X\n", buf.Bytes()) - hasher.Write(buf.Bytes()) - return hasher.Sum(nil) -} - -func (node *IAVLNode) constructProof(t *IAVLTree, key interface{}, proof *IAVLProof) (exists bool) { - if node.height == 0 { - if t.keyCodec.Compare(node.key, key) == 0 { - keyBuf, valueBuf := new(bytes.Buffer), new(bytes.Buffer) - n, err := int64(0), error(nil) - t.keyCodec.Encode(node.key, keyBuf, &n, &err) - if err != nil { - PanicCrisis(Fmt("Failed to encode node.key: %v", err)) - } - t.valueCodec.Encode(node.value, valueBuf, &n, &err) - if err != nil { - PanicCrisis(Fmt("Failed to encode node.value: %v", err)) - } - leaf := IAVLProofLeafNode{ - KeyBytes: keyBuf.Bytes(), - ValueBytes: valueBuf.Bytes(), - } - proof.LeafNode = leaf - return true - } else { - return false - } - } else { - if t.keyCodec.Compare(key, node.key) < 0 { - exists := node.getLeftNode(t).constructProof(t, key, proof) - if !exists { - return false - } - branch := IAVLProofInnerNode{ - Height: node.height, - Size: node.size, - Left: nil, - Right: node.getRightNode(t).hash, - } - proof.InnerNodes = append(proof.InnerNodes, branch) - return true - } else { - exists := node.getRightNode(t).constructProof(t, key, proof) - if !exists { - return false - } - branch := IAVLProofInnerNode{ - Height: node.height, - Size: node.size, - Left: node.getLeftNode(t).hash, - Right: nil, - } - proof.InnerNodes = append(proof.InnerNodes, branch) - return true - } - } -} - -// Returns nil if key is not in tree. -func (t *IAVLTree) ConstructProof(key interface{}) *IAVLProof { - if t.root == nil { - return nil - } - t.root.hashWithCount(t) // Ensure that all hashes are calculated. - proof := &IAVLProof{ - RootHash: t.root.hash, - } - t.root.constructProof(t, key, proof) - return proof -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-merkle/iavl_test.go b/Godeps/_workspace/src/github.com/tendermint/go-merkle/iavl_test.go deleted file mode 100644 index 47afb8885..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-merkle/iavl_test.go +++ /dev/null @@ -1,334 +0,0 @@ -package merkle - -import ( - "bytes" - "fmt" - - . "github.com/tendermint/go-common" - . "github.com/tendermint/go-common/test" - "github.com/tendermint/go-wire" - - "runtime" - "testing" -) - -func randstr(length int) string { - return RandStr(length) -} - -// Convenience for a new node -func N(l, r interface{}) *IAVLNode { - var left, right *IAVLNode - if _, ok := l.(*IAVLNode); ok { - left = l.(*IAVLNode) - } else { - left = NewIAVLNode(l, "") - } - if _, ok := r.(*IAVLNode); ok { - right = r.(*IAVLNode) - } else { - right = NewIAVLNode(r, "") - } - - n := &IAVLNode{ - key: right.lmd(nil).key, - value: "", - leftNode: left, - rightNode: right, - } - n.calcHeightAndSize(nil) - return n -} - -// Setup a deep node -func T(n *IAVLNode) *IAVLTree { - t := NewIAVLTree(wire.BasicCodec, wire.BasicCodec, 0, nil) - n.hashWithCount(t) - t.root = n - return t -} - -// Convenience for simple printing of keys & tree structure -func P(n *IAVLNode) string { - if n.height == 0 { - return fmt.Sprintf("%v", n.key) - } else { - return fmt.Sprintf("(%v %v)", P(n.leftNode), P(n.rightNode)) - } -} - -func TestUnit(t *testing.T) { - - expectHash := func(tree *IAVLTree, hashCount int) { - // ensure number of new hash calculations is as expected. - hash, count := tree.HashWithCount() - if count != hashCount { - t.Fatalf("Expected %v new hashes, got %v", hashCount, count) - } - // nuke hashes and reconstruct hash, ensure it's the same. - tree.root.traverse(tree, func(node *IAVLNode) bool { - node.hash = nil - return false - }) - // ensure that the new hash after nuking is the same as the old. - newHash, _ := tree.HashWithCount() - if bytes.Compare(hash, newHash) != 0 { - t.Fatalf("Expected hash %v but got %v after nuking", hash, newHash) - } - } - - expectSet := func(tree *IAVLTree, i int, repr string, hashCount int) { - origNode := tree.root - updated := tree.Set(i, "") - // ensure node was added & structure is as expected. - if updated == true || P(tree.root) != repr { - t.Fatalf("Adding %v to %v:\nExpected %v\nUnexpectedly got %v updated:%v", - i, P(origNode), repr, P(tree.root), updated) - } - // ensure hash calculation requirements - expectHash(tree, hashCount) - tree.root = origNode - } - - expectRemove := func(tree *IAVLTree, i int, repr string, hashCount int) { - origNode := tree.root - value, removed := tree.Remove(i) - // ensure node was added & structure is as expected. - if value != "" || !removed || P(tree.root) != repr { - t.Fatalf("Removing %v from %v:\nExpected %v\nUnexpectedly got %v value:%v removed:%v", - i, P(origNode), repr, P(tree.root), value, removed) - } - // ensure hash calculation requirements - expectHash(tree, hashCount) - tree.root = origNode - } - - //////// Test Set cases: - - // Case 1: - t1 := T(N(4, 20)) - - expectSet(t1, 8, "((4 8) 20)", 3) - expectSet(t1, 25, "(4 (20 25))", 3) - - t2 := T(N(4, N(20, 25))) - - expectSet(t2, 8, "((4 8) (20 25))", 3) - expectSet(t2, 30, "((4 20) (25 30))", 4) - - t3 := T(N(N(1, 2), 6)) - - expectSet(t3, 4, "((1 2) (4 6))", 4) - expectSet(t3, 8, "((1 2) (6 8))", 3) - - t4 := T(N(N(1, 2), N(N(5, 6), N(7, 9)))) - - expectSet(t4, 8, "(((1 2) (5 6)) ((7 8) 9))", 5) - expectSet(t4, 10, "(((1 2) (5 6)) (7 (9 10)))", 5) - - //////// Test Remove cases: - - t10 := T(N(N(1, 2), 3)) - - expectRemove(t10, 2, "(1 3)", 1) - expectRemove(t10, 3, "(1 2)", 0) - - t11 := T(N(N(N(1, 2), 3), N(4, 5))) - - expectRemove(t11, 4, "((1 2) (3 5))", 2) - expectRemove(t11, 3, "((1 2) (4 5))", 1) - -} - -func TestIntegration(t *testing.T) { - - type record struct { - key string - value string - } - - records := make([]*record, 400) - var tree *IAVLTree = NewIAVLTree(wire.BasicCodec, wire.BasicCodec, 0, nil) - - randomRecord := func() *record { - return &record{randstr(20), randstr(20)} - } - - for i := range records { - r := randomRecord() - records[i] = r - //t.Log("New record", r) - //PrintIAVLNode(tree.root) - updated := tree.Set(r.key, "") - if updated { - t.Error("should have not been updated") - } - updated = tree.Set(r.key, r.value) - if !updated { - t.Error("should have been updated") - } - if tree.Size() != i+1 { - t.Error("size was wrong", tree.Size(), i+1) - } - } - - for _, r := range records { - if has := tree.Has(r.key); !has { - t.Error("Missing key", r.key) - } - if has := tree.Has(randstr(12)); has { - t.Error("Table has extra key") - } - if _, val := tree.Get(r.key); val.(string) != r.value { - t.Error("wrong value") - } - } - - for i, x := range records { - if val, removed := tree.Remove(x.key); !removed { - t.Error("Wasn't removed") - } else if val != x.value { - t.Error("Wrong value") - } - for _, r := range records[i+1:] { - if has := tree.Has(r.key); !has { - t.Error("Missing key", r.key) - } - if has := tree.Has(randstr(12)); has { - t.Error("Table has extra key") - } - _, val := tree.Get(r.key) - if val != r.value { - t.Error("wrong value") - } - } - if tree.Size() != len(records)-(i+1) { - t.Error("size was wrong", tree.Size(), (len(records) - (i + 1))) - } - } -} - -func TestPersistence(t *testing.T) { - db := db.NewMemDB() - - // Create some random key value pairs - records := make(map[string]string) - for i := 0; i < 10000; i++ { - records[randstr(20)] = randstr(20) - } - - // Construct some tree and save it - t1 := NewIAVLTree(wire.BasicCodec, wire.BasicCodec, 0, db) - for key, value := range records { - t1.Set(key, value) - } - t1.Save() - - hash, _ := t1.HashWithCount() - - // Load a tree - t2 := NewIAVLTree(wire.BasicCodec, wire.BasicCodec, 0, db) - t2.Load(hash) - for key, value := range records { - _, t2value := t2.Get(key) - if t2value != value { - t.Fatalf("Invalid value. Expected %v, got %v", value, t2value) - } - } -} - -func testProof(t *testing.T, proof *IAVLProof, keyBytes, valueBytes, rootHash []byte) { - // Proof must verify. - if !proof.Verify(keyBytes, valueBytes, rootHash) { - t.Errorf("Invalid proof. Verification failed.") - return - } - // Write/Read then verify. - proofBytes := wire.BinaryBytes(proof) - n, err := int64(0), error(nil) - proof2 := wire.ReadBinary(&IAVLProof{}, bytes.NewBuffer(proofBytes), &n, &err).(*IAVLProof) - if err != nil { - t.Errorf("Failed to read IAVLProof from bytes: %v", err) - return - } - if !proof2.Verify(keyBytes, valueBytes, rootHash) { - // t.Log(Fmt("%X\n%X\n", proofBytes, wire.BinaryBytes(proof2))) - t.Errorf("Invalid proof after write/read. Verification failed.") - return - } - // Random mutations must not verify - for i := 0; i < 5; i++ { - badProofBytes := MutateByteSlice(proofBytes) - n, err := int64(0), error(nil) - badProof := wire.ReadBinary(&IAVLProof{}, bytes.NewBuffer(badProofBytes), &n, &err).(*IAVLProof) - if err != nil { - continue // This is fine. - } - if badProof.Verify(keyBytes, valueBytes, rootHash) { - t.Errorf("Proof was still valid after a random mutation:\n%X\n%X", proofBytes, badProofBytes) - } - } -} - -func TestIAVLProof(t *testing.T) { - - // Convenient wrapper around wire.BasicCodec. - toBytes := func(o interface{}) []byte { - buf, n, err := new(bytes.Buffer), int64(0), error(nil) - wire.BasicCodec.Encode(o, buf, &n, &err) - if err != nil { - panic(Fmt("Failed to encode thing: %v", err)) - } - return buf.Bytes() - } - - // Construct some random tree - db := db.NewMemDB() - var tree *IAVLTree = NewIAVLTree(wire.BasicCodec, wire.BasicCodec, 100, db) - for i := 0; i < 1000; i++ { - key, value := randstr(20), randstr(20) - tree.Set(key, value) - } - - // Persist the items so far - tree.Save() - - // Add more items so it's not all persisted - for i := 0; i < 100; i++ { - key, value := randstr(20), randstr(20) - tree.Set(key, value) - } - - // Now for each item, construct a proof and verify - tree.Iterate(func(key interface{}, value interface{}) bool { - proof := tree.ConstructProof(key) - if !bytes.Equal(proof.RootHash, tree.Hash()) { - t.Errorf("Invalid proof. Expected root %X, got %X", tree.Hash(), proof.RootHash) - } - testProof(t, proof, toBytes(key), toBytes(value), tree.Hash()) - return false - }) - -} - -func BenchmarkImmutableAvlTree(b *testing.B) { - b.StopTimer() - - t := NewIAVLTree(wire.BasicCodec, wire.BasicCodec, 0, nil) - // 23000ns/op, 43000ops/s - // for i := 0; i < 10000000; i++ { - for i := 0; i < 1000000; i++ { - t.Set(RandInt64(), "") - } - - fmt.Println("ok, starting") - - runtime.GC() - - b.StartTimer() - for i := 0; i < b.N; i++ { - ri := RandInt64() - t.Set(ri, "") - t.Remove(ri) - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-merkle/iavl_tree.go b/Godeps/_workspace/src/github.com/tendermint/go-merkle/iavl_tree.go deleted file mode 100644 index 94395c991..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-merkle/iavl_tree.go +++ /dev/null @@ -1,262 +0,0 @@ -package merkle - -import ( - "bytes" - "container/list" - "sync" - - . "github.com/tendermint/go-common" - dbm "github.com/tendermint/go-db" - "github.com/tendermint/go-wire" -) - -/* -Immutable AVL Tree (wraps the Node root) -This tree is not goroutine safe. -*/ -type IAVLTree struct { - keyCodec wire.Codec - valueCodec wire.Codec - root *IAVLNode - ndb *nodeDB -} - -func NewIAVLTree(keyCodec, valueCodec wire.Codec, cacheSize int, db dbm.DB) *IAVLTree { - if db == nil { - // In-memory IAVLTree - return &IAVLTree{ - keyCodec: keyCodec, - valueCodec: valueCodec, - } - } else { - // Persistent IAVLTree - return &IAVLTree{ - keyCodec: keyCodec, - valueCodec: valueCodec, - ndb: newNodeDB(cacheSize, db), - } - } -} - -// The returned tree and the original tree are goroutine independent. -// That is, they can each run in their own goroutine. -func (t *IAVLTree) Copy() Tree { - if t.root == nil { - return &IAVLTree{ - keyCodec: t.keyCodec, - valueCodec: t.valueCodec, - root: nil, - ndb: t.ndb, - } - } - if t.ndb != nil && !t.root.persisted { - // Saving a tree finalizes all the nodes. - // It sets all the hashes recursively, - // clears all the leftNode/rightNode values recursively, - // and all the .persisted flags get set. - PanicSanity("It is unsafe to Copy() an unpersisted tree.") - } else if t.ndb == nil && t.root.hash == nil { - // An in-memory IAVLTree is finalized when the hashes are - // calculated. - t.root.hashWithCount(t) - } - return &IAVLTree{ - keyCodec: t.keyCodec, - valueCodec: t.valueCodec, - root: t.root, - ndb: t.ndb, - } -} - -func (t *IAVLTree) Size() int { - if t.root == nil { - return 0 - } - return t.root.size -} - -func (t *IAVLTree) Height() int8 { - if t.root == nil { - return 0 - } - return t.root.height -} - -func (t *IAVLTree) Has(key interface{}) bool { - if t.root == nil { - return false - } - return t.root.has(t, key) -} - -func (t *IAVLTree) Set(key interface{}, value interface{}) (updated bool) { - if t.root == nil { - t.root = NewIAVLNode(key, value) - return false - } - t.root, updated = t.root.set(t, key, value) - return updated -} - -func (t *IAVLTree) Hash() []byte { - if t.root == nil { - return nil - } - hash, _ := t.root.hashWithCount(t) - return hash -} - -func (t *IAVLTree) HashWithCount() ([]byte, int) { - if t.root == nil { - return nil, 0 - } - return t.root.hashWithCount(t) -} - -func (t *IAVLTree) Save() []byte { - if t.root == nil { - return nil - } - return t.root.save(t) -} - -// Sets the root node by reading from db. -// If the hash is empty, then sets root to nil. -func (t *IAVLTree) Load(hash []byte) { - if len(hash) == 0 { - t.root = nil - } else { - t.root = t.ndb.GetNode(t, hash) - } -} - -func (t *IAVLTree) Get(key interface{}) (index int, value interface{}) { - if t.root == nil { - return 0, nil - } - return t.root.get(t, key) -} - -func (t *IAVLTree) GetByIndex(index int) (key interface{}, value interface{}) { - if t.root == nil { - return nil, nil - } - return t.root.getByIndex(t, index) -} - -func (t *IAVLTree) Remove(key interface{}) (value interface{}, removed bool) { - if t.root == nil { - return nil, false - } - newRootHash, newRoot, _, value, removed := t.root.remove(t, key) - if !removed { - return nil, false - } - if newRoot == nil && newRootHash != nil { - t.root = t.ndb.GetNode(t, newRootHash) - } else { - t.root = newRoot - } - return value, true -} - -func (t *IAVLTree) Iterate(fn func(key interface{}, value interface{}) bool) (stopped bool) { - if t.root == nil { - return false - } - return t.root.traverse(t, func(node *IAVLNode) bool { - if node.height == 0 { - return fn(node.key, node.value) - } else { - return false - } - }) -} - -//----------------------------------------------------------------------------- - -type nodeElement struct { - node *IAVLNode - elem *list.Element -} - -type nodeDB struct { - mtx sync.Mutex - cache map[string]nodeElement - cacheSize int - cacheQueue *list.List - db dbm.DB -} - -func newNodeDB(cacheSize int, db dbm.DB) *nodeDB { - return &nodeDB{ - cache: make(map[string]nodeElement), - cacheSize: cacheSize, - cacheQueue: list.New(), - db: db, - } -} - -func (ndb *nodeDB) GetNode(t *IAVLTree, hash []byte) *IAVLNode { - ndb.mtx.Lock() - defer ndb.mtx.Unlock() - // Check the cache. - nodeElem, ok := ndb.cache[string(hash)] - if ok { - // Already exists. Move to back of cacheQueue. - ndb.cacheQueue.MoveToBack(nodeElem.elem) - return nodeElem.node - } else { - // Doesn't exist, load. - buf := ndb.db.Get(hash) - if len(buf) == 0 { - ndb.db.Print() - PanicSanity(Fmt("Value missing for key %X", hash)) - } - r := bytes.NewReader(buf) - var n int64 - var err error - node := ReadIAVLNode(t, r, &n, &err) - if err != nil { - PanicCrisis(Fmt("Error reading IAVLNode. bytes: %X error: %v", buf, err)) - } - node.hash = hash - node.persisted = true - ndb.cacheNode(node) - return node - } -} - -func (ndb *nodeDB) SaveNode(t *IAVLTree, node *IAVLNode) { - ndb.mtx.Lock() - defer ndb.mtx.Unlock() - if node.hash == nil { - PanicSanity("Expected to find node.hash, but none found.") - } - if node.persisted { - PanicSanity("Shouldn't be calling save on an already persisted node.") - } - /*if _, ok := ndb.cache[string(node.hash)]; ok { - panic("Shouldn't be calling save on an already cached node.") - }*/ - // Save node bytes to db - buf := bytes.NewBuffer(nil) - _, err := node.writePersistBytes(t, buf) - if err != nil { - PanicCrisis(err) - } - ndb.db.Set(node.hash, buf.Bytes()) - node.persisted = true - ndb.cacheNode(node) -} - -func (ndb *nodeDB) cacheNode(node *IAVLNode) { - // Create entry in cache and append to cacheQueue. - elem := ndb.cacheQueue.PushBack(node.hash) - ndb.cache[string(node.hash)] = nodeElement{node, elem} - // Maybe expire an item. - if ndb.cacheQueue.Len() > ndb.cacheSize { - hash := ndb.cacheQueue.Remove(ndb.cacheQueue.Front()).([]byte) - delete(ndb.cache, string(hash)) - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-merkle/simple_tree.go b/Godeps/_workspace/src/github.com/tendermint/go-merkle/simple_tree.go deleted file mode 100644 index 8d195f5a0..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-merkle/simple_tree.go +++ /dev/null @@ -1,298 +0,0 @@ -/* -Computes a deterministic minimal height merkle tree hash. -If the number of items is not a power of two, some leaves -will be at different levels. Tries to keep both sides of -the tree the same size, but the left may be one greater. - -Use this for short deterministic trees, such as the validator list. -For larger datasets, use IAVLTree. - - * - / \ - / \ - / \ - / \ - * * - / \ / \ - / \ / \ - / \ / \ - * * * h6 - / \ / \ / \ - h0 h1 h2 h3 h4 h5 - -*/ - -package merkle - -import ( - "bytes" - "fmt" - "sort" - - "code.google.com/p/go.crypto/ripemd160" - - . "github.com/tendermint/go-common" - "github.com/tendermint/go-wire" -) - -func SimpleHashFromTwoHashes(left []byte, right []byte) []byte { - var n int64 - var err error - var hasher = ripemd160.New() - wire.WriteByteSlice(left, hasher, &n, &err) - wire.WriteByteSlice(right, hasher, &n, &err) - if err != nil { - PanicCrisis(err) - } - return hasher.Sum(nil) -} - -func SimpleHashFromHashes(hashes [][]byte) []byte { - // Recursive impl. - switch len(hashes) { - case 0: - return nil - case 1: - return hashes[0] - default: - left := SimpleHashFromHashes(hashes[:(len(hashes)+1)/2]) - right := SimpleHashFromHashes(hashes[(len(hashes)+1)/2:]) - return SimpleHashFromTwoHashes(left, right) - } -} - -// Convenience for SimpleHashFromHashes. -func SimpleHashFromBinaries(items []interface{}) []byte { - hashes := [][]byte{} - for _, item := range items { - hashes = append(hashes, SimpleHashFromBinary(item)) - } - return SimpleHashFromHashes(hashes) -} - -// General Convenience -func SimpleHashFromBinary(item interface{}) []byte { - hasher, n, err := ripemd160.New(), new(int64), new(error) - wire.WriteBinary(item, hasher, n, err) - if *err != nil { - PanicCrisis(err) - } - return hasher.Sum(nil) -} - -// Convenience for SimpleHashFromHashes. -func SimpleHashFromHashables(items []Hashable) []byte { - hashes := [][]byte{} - for _, item := range items { - hash := item.Hash() - hashes = append(hashes, hash) - } - return SimpleHashFromHashes(hashes) -} - -// Convenience for SimpleHashFromHashes. -func SimpleHashFromMap(m map[string]interface{}) []byte { - kpPairsH := MakeSortedKVPairs(m) - return SimpleHashFromHashables(kpPairsH) -} - -//-------------------------------------------------------------------------------- - -/* Convenience struct for key-value pairs. -A list of KVPairs is hashed via `SimpleHashFromHashables`. -NOTE: Each `Value` is encoded for hashing without extra type information, -so the user is presumed to be aware of the Value types. -*/ -type KVPair struct { - Key string - Value interface{} -} - -func (kv KVPair) Hash() []byte { - hasher, n, err := ripemd160.New(), new(int64), new(error) - wire.WriteString(kv.Key, hasher, n, err) - if kvH, ok := kv.Value.(Hashable); ok { - wire.WriteByteSlice(kvH.Hash(), hasher, n, err) - } else { - wire.WriteBinary(kv.Value, hasher, n, err) - } - if *err != nil { - PanicSanity(*err) - } - return hasher.Sum(nil) -} - -type KVPairs []KVPair - -func (kvps KVPairs) Len() int { return len(kvps) } -func (kvps KVPairs) Less(i, j int) bool { return kvps[i].Key < kvps[j].Key } -func (kvps KVPairs) Swap(i, j int) { kvps[i], kvps[j] = kvps[j], kvps[i] } -func (kvps KVPairs) Sort() { sort.Sort(kvps) } - -func MakeSortedKVPairs(m map[string]interface{}) []Hashable { - kvPairs := []KVPair{} - for k, v := range m { - kvPairs = append(kvPairs, KVPair{k, v}) - } - KVPairs(kvPairs).Sort() - kvPairsH := []Hashable{} - for _, kvp := range kvPairs { - kvPairsH = append(kvPairsH, kvp) - } - return kvPairsH -} - -//-------------------------------------------------------------------------------- - -type SimpleProof struct { - Index int `json:"index"` - Total int `json:"total"` - LeafHash []byte `json:"leaf_hash"` - InnerHashes [][]byte `json:"inner_hashes"` // Hashes from leaf's sibling to a root's child. - RootHash []byte `json:"root_hash"` -} - -// proofs[0] is the proof for items[0]. -func SimpleProofsFromHashables(items []Hashable) (proofs []*SimpleProof) { - trails, root := trailsFromHashables(items) - proofs = make([]*SimpleProof, len(items)) - for i, trail := range trails { - proofs[i] = &SimpleProof{ - Index: i, - Total: len(items), - LeafHash: trail.Hash, - InnerHashes: trail.FlattenInnerHashes(), - RootHash: root.Hash, - } - } - return -} - -// Verify that leafHash is a leaf hash of the simple-merkle-tree -// which hashes to rootHash. -func (sp *SimpleProof) Verify(leafHash []byte, rootHash []byte) bool { - if !bytes.Equal(leafHash, sp.LeafHash) { - return false - } - if !bytes.Equal(rootHash, sp.RootHash) { - return false - } - computedHash := computeHashFromInnerHashes(sp.Index, sp.Total, sp.LeafHash, sp.InnerHashes) - if computedHash == nil { - return false - } - if !bytes.Equal(computedHash, rootHash) { - return false - } - return true -} - -func (sp *SimpleProof) String() string { - return sp.StringIndented("") -} - -func (sp *SimpleProof) StringIndented(indent string) string { - return fmt.Sprintf(`SimpleProof{ -%s Index: %v -%s Total: %v -%s LeafHash: %X -%s InnerHashes: %X -%s RootHash: %X -%s}`, - indent, sp.Index, - indent, sp.Total, - indent, sp.LeafHash, - indent, sp.InnerHashes, - indent, sp.RootHash, - indent) -} - -// Use the leafHash and innerHashes to get the root merkle hash. -// If the length of the innerHashes slice isn't exactly correct, the result is nil. -func computeHashFromInnerHashes(index int, total int, leafHash []byte, innerHashes [][]byte) []byte { - // Recursive impl. - if index >= total { - return nil - } - switch total { - case 0: - PanicSanity("Cannot call computeHashFromInnerHashes() with 0 total") - return nil - case 1: - if len(innerHashes) != 0 { - return nil - } - return leafHash - default: - if len(innerHashes) == 0 { - return nil - } - numLeft := (total + 1) / 2 - if index < numLeft { - leftHash := computeHashFromInnerHashes(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if leftHash == nil { - return nil - } - return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) - } else { - rightHash := computeHashFromInnerHashes(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if rightHash == nil { - return nil - } - return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) - } - } -} - -// Helper structure to construct merkle proof. -// The node and the tree is thrown away afterwards. -// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. -// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or -// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. -type SimpleProofNode struct { - Hash []byte - Parent *SimpleProofNode - Left *SimpleProofNode // Left sibling (only one of Left,Right is set) - Right *SimpleProofNode // Right sibling (only one of Left,Right is set) -} - -// Starting from a leaf SimpleProofNode, FlattenInnerHashes() will return -// the inner hashes for the item corresponding to the leaf. -func (spn *SimpleProofNode) FlattenInnerHashes() [][]byte { - // Nonrecursive impl. - innerHashes := [][]byte{} - for spn != nil { - if spn.Left != nil { - innerHashes = append(innerHashes, spn.Left.Hash) - } else if spn.Right != nil { - innerHashes = append(innerHashes, spn.Right.Hash) - } else { - break - } - spn = spn.Parent - } - return innerHashes -} - -// trails[0].Hash is the leaf hash for items[0]. -// trails[i].Parent.Parent....Parent == root for all i. -func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *SimpleProofNode) { - // Recursive impl. - switch len(items) { - case 0: - return nil, nil - case 1: - trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} - return []*SimpleProofNode{trail}, trail - default: - lefts, leftRoot := trailsFromHashables(items[:(len(items)+1)/2]) - rights, rightRoot := trailsFromHashables(items[(len(items)+1)/2:]) - rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) - root := &SimpleProofNode{rootHash, nil, nil, nil} - leftRoot.Parent = root - leftRoot.Right = rightRoot - rightRoot.Parent = root - rightRoot.Left = leftRoot - return append(lefts, rights...), root - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-merkle/simple_tree_test.go b/Godeps/_workspace/src/github.com/tendermint/go-merkle/simple_tree_test.go deleted file mode 100644 index 016d179da..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-merkle/simple_tree_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package merkle - -import ( - "bytes" - - . "github.com/tendermint/go-common" - . "github.com/tendermint/go-common/test" - - "fmt" - "testing" -) - -type testItem []byte - -func (tI testItem) Hash() []byte { - return []byte(tI) -} - -func TestSimpleProof(t *testing.T) { - - numItems := 100 - - items := make([]Hashable, numItems) - for i := 0; i < numItems; i++ { - items[i] = testItem(RandBytes(32)) - } - - rootHash := SimpleHashFromHashables(items) - - proofs := SimpleProofsFromHashables(items) - - // For each item, check the trail. - for i, item := range items { - itemHash := item.Hash() - proof := proofs[i] - - // Verify success - ok := proof.Verify(itemHash, rootHash) - if !ok { - t.Errorf("Verification failed for index %v.", i) - } - - // Wrong item index should make it fail - proof.Index += 1 - { - ok = proof.Verify(itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong index %v.", i) - } - } - proof.Index -= 1 - - // Trail too long should make it fail - origInnerHashes := proof.InnerHashes - proof.InnerHashes = append(proof.InnerHashes, RandBytes(32)) - { - ok = proof.Verify(itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong trail length.") - } - } - proof.InnerHashes = origInnerHashes - - // Trail too short should make it fail - proof.InnerHashes = proof.InnerHashes[0 : len(proof.InnerHashes)-1] - { - ok = proof.Verify(itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong trail length.") - } - } - proof.InnerHashes = origInnerHashes - - // Mutating the itemHash should make it fail. - ok = proof.Verify(MutateByteSlice(itemHash), rootHash) - if ok { - t.Errorf("Expected verification to fail for mutated leaf hash") - } - - // Mutating the rootHash should make it fail. - ok = proof.Verify(itemHash, MutateByteSlice(rootHash)) - if ok { - t.Errorf("Expected verification to fail for mutated root hash") - } - } -} - -func TestKVPairs(t *testing.T) { - // NOTE: in alphabetical order for convenience. - m := map[string]interface{}{} - m["bytez"] = []byte("hizz") // 0 - m["light"] = "shadow" // 1 - m["one"] = 1 // 2 - m["one_u64"] = uint64(1) // 3 - m["struct"] = struct { // 4 - A int - B int - }{0, 1} - - kvPairsH := MakeSortedKVPairs(m) - // rootHash := SimpleHashFromHashables(kvPairsH) - proofs := SimpleProofsFromHashables(kvPairsH) - - // Some manual tests - if !bytes.Equal(proofs[1].LeafHash, KVPair{"light", "shadow"}.Hash()) { - t.Errorf("\"light\": proof failed") - fmt.Printf("%v\n%X", proofs[0], KVPair{"light", "shadow"}.Hash()) - } - if !bytes.Equal(proofs[2].LeafHash, KVPair{"one", 1}.Hash()) { - t.Errorf("\"one\": proof failed") - } - if !bytes.Equal(proofs[4].LeafHash, KVPair{"struct", struct { - A int - B int - }{0, 1}}.Hash()) { - t.Errorf("\"struct\": proof failed") - } - -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-merkle/types.go b/Godeps/_workspace/src/github.com/tendermint/go-merkle/types.go deleted file mode 100644 index 87f716c78..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-merkle/types.go +++ /dev/null @@ -1,21 +0,0 @@ -package merkle - -type Tree interface { - Size() (size int) - Height() (height int8) - Has(key interface{}) (has bool) - Get(key interface{}) (index int, value interface{}) - GetByIndex(index int) (key interface{}, value interface{}) - Set(key interface{}, value interface{}) (updated bool) - Remove(key interface{}) (value interface{}, removed bool) - HashWithCount() (hash []byte, count int) - Hash() (hash []byte) - Save() (hash []byte) - Load(hash []byte) - Copy() Tree - Iterate(func(key interface{}, value interface{}) (stop bool)) (stopped bool) -} - -type Hashable interface { - Hash() []byte -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-merkle/util.go b/Godeps/_workspace/src/github.com/tendermint/go-merkle/util.go deleted file mode 100644 index 89fd2741a..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-merkle/util.go +++ /dev/null @@ -1,43 +0,0 @@ -package merkle - -import ( - "fmt" -) - -// Prints the in-memory children recursively. -func PrintIAVLNode(node *IAVLNode) { - fmt.Println("==== NODE") - if node != nil { - printIAVLNode(node, 0) - } - fmt.Println("==== END") -} - -func printIAVLNode(node *IAVLNode, indent int) { - indentPrefix := "" - for i := 0; i < indent; i++ { - indentPrefix += " " - } - - if node.rightNode != nil { - printIAVLNode(node.rightNode, indent+1) - } else if node.rightHash != nil { - fmt.Printf("%s %X\n", indentPrefix, node.rightHash) - } - - fmt.Printf("%s%v:%v\n", indentPrefix, node.key, node.height) - - if node.leftNode != nil { - printIAVLNode(node.leftNode, indent+1) - } else if node.leftHash != nil { - fmt.Printf("%s %X\n", indentPrefix, node.leftHash) - } - -} - -func maxInt8(a, b int8) int8 { - if a > b { - return a - } - return b -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/LICENSE.md b/Godeps/_workspace/src/github.com/tendermint/go-p2p/LICENSE.md deleted file mode 100644 index ec02173eb..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/LICENSE.md +++ /dev/null @@ -1,206 +0,0 @@ -Tendermint P2P -Copyright (C) 2015 Tendermint - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/README.md b/Godeps/_workspace/src/github.com/tendermint/go-p2p/README.md deleted file mode 100644 index 6149d9c0f..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# `tendermint/p2p` - -`tendermint/p2p` provides an abstraction around peer-to-peer communication.
- -## Peer/MConnection/Channel - -Each peer has one `MConnection` (multiplex connection) instance. - -__multiplex__ *noun* a system or signal involving simultaneous transmission of -several messages along a single channel of communication. - -Each `MConnection` handles message transmission on multiple abstract communication -`Channel`s. Each channel has a globally unique byte id. -The byte id and the relative priorities of each `Channel` are configured upon -initialization of the connection. - -There are two methods for sending messages: -```go -func (m MConnection) Send(chID byte, msg interface{}) bool {} -func (m MConnection) TrySend(chID byte, msg interface{}) bool {} -``` - -`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued -for the channel with the given id byte `chID`. The message `msg` is serialized -using the `tendermint/wire` submodule's `WriteBinary()` reflection routine. - -`TrySend(chID, msg)` is a nonblocking call that returns false if the channel's -queue is full. - -`Send()` and `TrySend()` are also exposed for each `Peer`. - -## Switch/Reactor - -The `Switch` handles peer connections and exposes an API to receive incoming messages -on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one -or more `Channels`. So while sending outgoing messages is typically performed on the peer, -incoming messages are received on the reactor. - -```go -// Declare a MyReactor reactor that handles messages on MyChannelID. -type MyReactor struct{} - -func (reactor MyReactor) GetChannels() []*ChannelDescriptor { - return []*ChannelDescriptor{ChannelDescriptor{ID:MyChannelID, Priority: 1}} -} - -func (reactor MyReactor) Receive(chID byte, peer *Peer, msgBytes []byte) { - r, n, err := bytes.NewBuffer(msgBytes), new(int64), new(error) - msgString := ReadString(r, n, err) - fmt.Println(msgString) -} - -// Other Reactor methods omitted for brevity -... - -switch := NewSwitch([]Reactor{MyReactor{}}) - -... - -// Send a random message to all outbound connections -for _, peer := range switch.Peers().List() { - if peer.IsOutbound() { - peer.Send(MyChannelID, "Here's a random message") - } -} -``` - -### PexReactor/AddrBook - -A `PEXReactor` reactor implementation is provided to automate peer discovery. - -```go -book := p2p.NewAddrBook(config.App.GetString("AddrBookFile")) -pexReactor := p2p.NewPEXReactor(book) -... -switch := NewSwitch([]Reactor{pexReactor, myReactor, ...}) -``` diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/addrbook.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/addrbook.go deleted file mode 100644 index 367ced8d9..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/addrbook.go +++ /dev/null @@ -1,813 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package p2p - -import ( - "encoding/binary" - "encoding/json" - "math" - "math/rand" - "net" - "os" - "sync" - "time" - - . "github.com/tendermint/go-common" -) - -const ( - // addresses under which the address manager will claim to need more addresses. - needAddressThreshold = 1000 - - // interval used to dump the address cache to disk for future use. - dumpAddressInterval = time.Minute * 2 - - // max addresses in each old address bucket. - oldBucketSize = 64 - - // buckets we split old addresses over. - oldBucketCount = 64 - - // max addresses in each new address bucket. - newBucketSize = 64 - - // buckets that we spread new addresses over. - newBucketCount = 256 - - // old buckets over which an address group will be spread. - oldBucketsPerGroup = 4 - - // new buckets over which an source address group will be spread. - newBucketsPerGroup = 32 - - // buckets a frequently seen new address may end up in. - maxNewBucketsPerAddress = 4 - - // days before which we assume an address has vanished - // if we have not seen it announced in that long. - numMissingDays = 30 - - // tries without a single success before we assume an address is bad. - numRetries = 3 - - // max failures we will accept without a success before considering an address bad. - maxFailures = 10 - - // days since the last success before we will consider evicting an address. - minBadDays = 7 - - // % of total addresses known returned by GetSelection. - getSelectionPercent = 23 - - // min addresses that must be returned by GetSelection. Useful for bootstrapping. - minGetSelection = 32 - - // max addresses returned by GetSelection - maxGetSelection = 2500 - - // current version of the on-disk format. - serializationVersion = 1 -) - -/* AddrBook - concurrency safe peer address manager */ -type AddrBook struct { - QuitService - - mtx sync.Mutex - filePath string - rand *rand.Rand - key string - ourAddrs map[string]*NetAddress - addrLookup map[string]*knownAddress // new & old - addrNew []map[string]*knownAddress - addrOld []map[string]*knownAddress - wg sync.WaitGroup - nOld int - nNew int -} - -const ( - bucketTypeNew = 0x01 - bucketTypeOld = 0x02 -) - -// Use Start to begin processing asynchronous address updates. -func NewAddrBook(filePath string) *AddrBook { - am := &AddrBook{ - rand: rand.New(rand.NewSource(time.Now().UnixNano())), - ourAddrs: make(map[string]*NetAddress), - addrLookup: make(map[string]*knownAddress), - filePath: filePath, - } - am.init() - am.QuitService = *NewQuitService(log, "AddrBook", am) - return am -} - -// When modifying this, don't forget to update loadFromFile() -func (a *AddrBook) init() { - a.key = CRandHex(24) // 24/2 * 8 = 96 bits - // New addr buckets - a.addrNew = make([]map[string]*knownAddress, newBucketCount) - for i := range a.addrNew { - a.addrNew[i] = make(map[string]*knownAddress) - } - // Old addr buckets - a.addrOld = make([]map[string]*knownAddress, oldBucketCount) - for i := range a.addrOld { - a.addrOld[i] = make(map[string]*knownAddress) - } -} - -func (a *AddrBook) OnStart() error { - a.QuitService.OnStart() - a.loadFromFile(a.filePath) - a.wg.Add(1) - go a.saveRoutine() - return nil -} - -func (a *AddrBook) OnStop() { - a.QuitService.OnStop() - a.wg.Wait() -} - -func (a *AddrBook) AddOurAddress(addr *NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - log.Info("Add our address to book", "addr", addr) - a.ourAddrs[addr.String()] = addr -} - -func (a *AddrBook) OurAddresses() []*NetAddress { - addrs := []*NetAddress{} - for _, addr := range a.ourAddrs { - addrs = append(addrs, addr) - } - return addrs -} - -func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - log.Info("Add address to book", "addr", addr, "src", src) - a.addAddress(addr, src) -} - -func (a *AddrBook) NeedMoreAddrs() bool { - return a.Size() < needAddressThreshold -} - -func (a *AddrBook) Size() int { - a.mtx.Lock() - defer a.mtx.Unlock() - return a.size() -} - -func (a *AddrBook) size() int { - return a.nNew + a.nOld -} - -// Pick an address to connect to with new/old bias. -func (a *AddrBook) PickAddress(newBias int) *NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - if a.size() == 0 { - return nil - } - if newBias > 100 { - newBias = 100 - } - if newBias < 0 { - newBias = 0 - } - - // Bias between new and old addresses. - oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias)) - newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias) - - if (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation { - // pick random Old bucket. - var bucket map[string]*knownAddress = nil - for len(bucket) == 0 { - bucket = a.addrOld[a.rand.Intn(len(a.addrOld))] - } - // pick a random ka from bucket. - randIndex := a.rand.Intn(len(bucket)) - for _, ka := range bucket { - if randIndex == 0 { - return ka.Addr - } - randIndex-- - } - PanicSanity("Should not happen") - } else { - // pick random New bucket. - var bucket map[string]*knownAddress = nil - for len(bucket) == 0 { - bucket = a.addrNew[a.rand.Intn(len(a.addrNew))] - } - // pick a random ka from bucket. - randIndex := a.rand.Intn(len(bucket)) - for _, ka := range bucket { - if randIndex == 0 { - return ka.Addr - } - randIndex-- - } - PanicSanity("Should not happen") - } - return nil -} - -func (a *AddrBook) MarkGood(addr *NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - ka := a.addrLookup[addr.String()] - if ka == nil { - return - } - ka.markGood() - if ka.isNew() { - a.moveToOld(ka) - } -} - -func (a *AddrBook) MarkAttempt(addr *NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - ka := a.addrLookup[addr.String()] - if ka == nil { - return - } - ka.markAttempt() -} - -func (a *AddrBook) MarkBad(addr *NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - ka := a.addrLookup[addr.String()] - if ka == nil { - return - } - // We currently just eject the address. - // In the future, consider blacklisting. - a.removeFromAllBuckets(ka) -} - -/* Peer exchange */ - -// GetSelection randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -func (a *AddrBook) GetSelection() []*NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - if a.size() == 0 { - return nil - } - - allAddr := make([]*NetAddress, a.size()) - i := 0 - for _, v := range a.addrLookup { - allAddr[i] = v.Addr - i++ - } - - numAddresses := MaxInt( - MinInt(minGetSelection, len(allAddr)), - len(allAddr)*getSelectionPercent/100) - numAddresses = MinInt(maxGetSelection, numAddresses) - - // Fisher-Yates shuffle the array. We only need to do the first - // `numAddresses' since we are throwing the rest. - for i := 0; i < numAddresses; i++ { - // pick a number between current index and the end - j := rand.Intn(len(allAddr)-i) + i - allAddr[i], allAddr[j] = allAddr[j], allAddr[i] - } - - // slice off the limit we are willing to share. - return allAddr[:numAddresses] -} - -/* Loading & Saving */ - -type addrBookJSON struct { - Key string - Addrs []*knownAddress -} - -func (a *AddrBook) saveToFile(filePath string) { - // Compile Addrs - addrs := []*knownAddress{} - for _, ka := range a.addrLookup { - addrs = append(addrs, ka) - } - - aJSON := &addrBookJSON{ - Key: a.key, - Addrs: addrs, - } - - jsonBytes, err := json.MarshalIndent(aJSON, "", "\t") - if err != nil { - log.Error("Failed to save AddrBook to file", "err", err) - return - } - err = WriteFileAtomic(filePath, jsonBytes) - if err != nil { - log.Error("Failed to save AddrBook to file", "file", filePath, "error", err) - } -} - -// Returns false if file does not exist. -// Panics if file is corrupt. -func (a *AddrBook) loadFromFile(filePath string) bool { - // If doesn't exist, do nothing. - _, err := os.Stat(filePath) - if os.IsNotExist(err) { - return false - } - - // Load addrBookJSON{} - r, err := os.Open(filePath) - if err != nil { - PanicCrisis(Fmt("Error opening file %s: %v", filePath, err)) - } - defer r.Close() - aJSON := &addrBookJSON{} - dec := json.NewDecoder(r) - err = dec.Decode(aJSON) - if err != nil { - PanicCrisis(Fmt("Error reading file %s: %v", filePath, err)) - } - - // Restore all the fields... - // Restore the key - a.key = aJSON.Key - // Restore .addrNew & .addrOld - for _, ka := range aJSON.Addrs { - for _, bucketIndex := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIndex) - bucket[ka.Addr.String()] = ka - } - a.addrLookup[ka.Addr.String()] = ka - if ka.BucketType == bucketTypeNew { - a.nNew++ - } else { - a.nOld++ - } - } - return true -} - -/* Private methods */ - -func (a *AddrBook) saveRoutine() { - dumpAddressTicker := time.NewTicker(dumpAddressInterval) -out: - for { - select { - case <-dumpAddressTicker.C: - log.Info("Saving AddrBook to file", "size", a.Size()) - a.saveToFile(a.filePath) - case <-a.Quit: - break out - } - } - dumpAddressTicker.Stop() - a.saveToFile(a.filePath) - a.wg.Done() - log.Notice("Address handler done") -} - -func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress { - switch bucketType { - case bucketTypeNew: - return a.addrNew[bucketIdx] - case bucketTypeOld: - return a.addrOld[bucketIdx] - default: - PanicSanity("Should not happen") - return nil - } -} - -// Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full. -// NOTE: currently it always returns true. -func (a *AddrBook) addToNewBucket(ka *knownAddress, bucketIdx int) bool { - // Sanity check - if ka.isOld() { - log.Warn(Fmt("Cannot add address already in old bucket to a new bucket: %v", ka)) - return false - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeNew, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return true - } - - // Enforce max addresses. - if len(bucket) > newBucketSize { - log.Notice("new bucket is full, expiring old ") - a.expireNew(bucketIdx) - } - - // Add to bucket. - bucket[addrStr] = ka - if ka.addBucketRef(bucketIdx) == 1 { - a.nNew++ - } - - // Ensure in addrLookup - a.addrLookup[addrStr] = ka - - return true -} - -// Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full. -func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { - // Sanity check - if ka.isNew() { - log.Warn(Fmt("Cannot add new address to old bucket: %v", ka)) - return false - } - if len(ka.Buckets) != 0 { - log.Warn(Fmt("Cannot add already old address to another old bucket: %v", ka)) - return false - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeNew, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return true - } - - // Enforce max addresses. - if len(bucket) > oldBucketSize { - return false - } - - // Add to bucket. - bucket[addrStr] = ka - if ka.addBucketRef(bucketIdx) == 1 { - a.nOld++ - } - - // Ensure in addrLookup - a.addrLookup[addrStr] = ka - - return true -} - -func (a *AddrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) { - if ka.BucketType != bucketType { - log.Warn(Fmt("Bucket type mismatch: %v", ka)) - return - } - bucket := a.getBucket(bucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - if ka.removeBucketRef(bucketIdx) == 0 { - if bucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.Addr.String()) - } -} - -func (a *AddrBook) removeFromAllBuckets(ka *knownAddress) { - for _, bucketIdx := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - } - ka.Buckets = nil - if ka.BucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.Addr.String()) -} - -func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress { - bucket := a.getBucket(bucketType, bucketIdx) - var oldest *knownAddress - for _, ka := range bucket { - if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) { - oldest = ka - } - } - return oldest -} - -func (a *AddrBook) addAddress(addr, src *NetAddress) { - if !addr.Routable() { - log.Warn(Fmt("Cannot add non-routable address %v", addr)) - return - } - if _, ok := a.ourAddrs[addr.String()]; ok { - // Ignore our own listener address. - return - } - - ka := a.addrLookup[addr.String()] - - if ka != nil { - // Already old. - if ka.isOld() { - return - } - // Already in max new buckets. - if len(ka.Buckets) == maxNewBucketsPerAddress { - return - } - // The more entries we have, the less likely we are to add more. - factor := int32(2 * len(ka.Buckets)) - if a.rand.Int31n(factor) != 0 { - return - } - } else { - ka = newKnownAddress(addr, src) - } - - bucket := a.calcNewBucket(addr, src) - a.addToNewBucket(ka, bucket) - - log.Notice("Added new address", "address", addr, "total", a.size()) -} - -// Make space in the new buckets by expiring the really bad entries. -// If no bad entries are available we remove the oldest. -func (a *AddrBook) expireNew(bucketIdx int) { - for addrStr, ka := range a.addrNew[bucketIdx] { - // If an entry is bad, throw it away - if ka.isBad() { - log.Notice(Fmt("expiring bad address %v", addrStr)) - a.removeFromBucket(ka, bucketTypeNew, bucketIdx) - return - } - } - - // If we haven't thrown out a bad entry, throw out the oldest entry - oldest := a.pickOldest(bucketTypeNew, bucketIdx) - a.removeFromBucket(oldest, bucketTypeNew, bucketIdx) -} - -// Promotes an address from new to old. -// TODO: Move to old probabilistically. -// The better a node is, the less likely it should be evicted from an old bucket. -func (a *AddrBook) moveToOld(ka *knownAddress) { - // Sanity check - if ka.isOld() { - log.Warn(Fmt("Cannot promote address that is already old %v", ka)) - return - } - if len(ka.Buckets) == 0 { - log.Warn(Fmt("Cannot promote address that isn't in any new buckets %v", ka)) - return - } - - // Remember one of the buckets in which ka is in. - freedBucket := ka.Buckets[0] - // Remove from all (new) buckets. - a.removeFromAllBuckets(ka) - // It's officially old now. - ka.BucketType = bucketTypeOld - - // Try to add it to its oldBucket destination. - oldBucketIdx := a.calcOldBucket(ka.Addr) - added := a.addToOldBucket(ka, oldBucketIdx) - if !added { - // No room, must evict something - oldest := a.pickOldest(bucketTypeOld, oldBucketIdx) - a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx) - // Find new bucket to put oldest in - newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src) - added := a.addToNewBucket(oldest, newBucketIdx) - // No space in newBucket either, just put it in freedBucket from above. - if !added { - added := a.addToNewBucket(oldest, freedBucket) - if !added { - log.Warn(Fmt("Could not migrate oldest %v to freedBucket %v", oldest, freedBucket)) - } - } - // Finally, add to bucket again. - added = a.addToOldBucket(ka, oldBucketIdx) - if !added { - log.Warn(Fmt("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) - } - } -} - -// doublesha256( key + sourcegroup + -// int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets -func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(groupKey(addr))...) - data1 = append(data1, []byte(groupKey(src))...) - hash1 := doubleSha256(data1) - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= newBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, groupKey(src)...) - data2 = append(data2, hashbuf[:]...) - - hash2 := doubleSha256(data2) - return int(binary.BigEndian.Uint64(hash2) % newBucketCount) -} - -// doublesha256( key + group + -// int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets -func (a *AddrBook) calcOldBucket(addr *NetAddress) int { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(addr.String())...) - hash1 := doubleSha256(data1) - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= oldBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, groupKey(addr)...) - data2 = append(data2, hashbuf[:]...) - - hash2 := doubleSha256(data2) - return int(binary.BigEndian.Uint64(hash2) % oldBucketCount) -} - -// Return a string representing the network group of this address. -// This is the /16 for IPv6, the /32 (/36 for he.net) for IPv6, the string -// "local" for a local address and the string "unroutable for an unroutable -// address. -func groupKey(na *NetAddress) string { - if na.Local() { - return "local" - } - if !na.Routable() { - return "unroutable" - } - - if ipv4 := na.IP.To4(); ipv4 != nil { - return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String() - } - if na.RFC6145() || na.RFC6052() { - // last four bytes are the ip address - ip := net.IP(na.IP[12:16]) - return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() - } - - if na.RFC3964() { - ip := net.IP(na.IP[2:7]) - return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() - - } - if na.RFC4380() { - // teredo tunnels have the last 4 bytes as the v4 address XOR - // 0xff. - ip := net.IP(make([]byte, 4)) - for i, byte := range na.IP[12:16] { - ip[i] = byte ^ 0xff - } - return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() - } - - // OK, so now we know ourselves to be a IPv6 address. - // bitcoind uses /32 for everything, except for Hurricane Electric's - // (he.net) IP range, which it uses /36 for. - bits := 32 - heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), - Mask: net.CIDRMask(32, 128)} - if heNet.Contains(na.IP) { - bits = 36 - } - - return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String() -} - -//----------------------------------------------------------------------------- - -/* - knownAddress - - tracks information about a known network address that is used - to determine how viable an address is. -*/ -type knownAddress struct { - Addr *NetAddress - Src *NetAddress - Attempts int32 - LastAttempt time.Time - LastSuccess time.Time - BucketType byte - Buckets []int -} - -func newKnownAddress(addr *NetAddress, src *NetAddress) *knownAddress { - return &knownAddress{ - Addr: addr, - Src: src, - Attempts: 0, - LastAttempt: time.Now(), - BucketType: bucketTypeNew, - Buckets: nil, - } -} - -func (ka *knownAddress) isOld() bool { - return ka.BucketType == bucketTypeOld -} - -func (ka *knownAddress) isNew() bool { - return ka.BucketType == bucketTypeNew -} - -func (ka *knownAddress) markAttempt() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts += 1 -} - -func (ka *knownAddress) markGood() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts = 0 - ka.LastSuccess = now -} - -func (ka *knownAddress) addBucketRef(bucketIdx int) int { - for _, bucket := range ka.Buckets { - if bucket == bucketIdx { - log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka)) - return -1 - } - } - ka.Buckets = append(ka.Buckets, bucketIdx) - return len(ka.Buckets) -} - -func (ka *knownAddress) removeBucketRef(bucketIdx int) int { - buckets := []int{} - for _, bucket := range ka.Buckets { - if bucket != bucketIdx { - buckets = append(buckets, bucket) - } - } - if len(buckets) != len(ka.Buckets)-1 { - log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka)) - return -1 - } - ka.Buckets = buckets - return len(ka.Buckets) -} - -/* - An address is bad if the address in question has not been tried in the last - minute and meets one of the following criteria: - - 1) It claims to be from the future - 2) It hasn't been seen in over a month - 3) It has failed at least three times and never succeeded - 4) It has failed ten times in the last week - - All addresses that meet these criteria are assumed to be worthless and not - worth keeping hold of. -*/ -func (ka *knownAddress) isBad() bool { - // Has been attempted in the last minute --> good - if ka.LastAttempt.Before(time.Now().Add(-1 * time.Minute)) { - return false - } - - // Over a month old? - if ka.LastAttempt.After(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) { - return true - } - - // Never succeeded? - if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries { - return true - } - - // Hasn't succeeded in too long? - if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) && - ka.Attempts >= maxFailures { - return true - } - - return false -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/addrbook_test.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/addrbook_test.go deleted file mode 100644 index 50986452e..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/addrbook_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package p2p - -import ( - "fmt" - "io/ioutil" - "math/rand" - "testing" -) - -func createTempFileName(prefix string) string { - f, err := ioutil.TempFile("", prefix) - if err != nil { - panic(err) - } - fname := f.Name() - err = f.Close() - if err != nil { - panic(err) - } - return fname -} - -func TestEmpty(t *testing.T) { - fname := createTempFileName("addrbook_test") - // t.Logf("New tempfile name: %v", fname) - - // Save an empty book & load it - book := NewAddrBook(fname) - book.saveToFile(fname) - - book = NewAddrBook(fname) - book.loadFromFile(fname) - - if book.Size() != 0 { - t.Errorf("Expected 0 addresses, found %v", book.Size()) - } -} - -func randIPv4Address() *NetAddress { - for { - ip := fmt.Sprintf("%v.%v.%v.%v", - rand.Intn(254)+1, - rand.Intn(255), - rand.Intn(255), - rand.Intn(255), - ) - port := rand.Intn(65535-1) + 1 - addr := NewNetAddressString(fmt.Sprintf("%v:%v", ip, port)) - if addr.Routable() { - return addr - } - } -} - -func TestSaveAddresses(t *testing.T) { - fname := createTempFileName("addrbook_test") - //t.Logf("New tempfile name: %v", fname) - - // Create some random addresses - randAddrs := []struct { - addr *NetAddress - src *NetAddress - }{} - for i := 0; i < 100; i++ { - addr := randIPv4Address() - src := randIPv4Address() - randAddrs = append(randAddrs, struct { - addr *NetAddress - src *NetAddress - }{ - addr: addr, - src: src, - }) - } - - // Create the book & populate & save - book := NewAddrBook(fname) - for _, addrSrc := range randAddrs { - book.AddAddress(addrSrc.addr, addrSrc.src) - } - if book.Size() != 100 { - t.Errorf("Expected 100 addresses, found %v", book.Size()) - } - book.saveToFile(fname) - - // Reload the book - book = NewAddrBook(fname) - book.loadFromFile(fname) - - // Test ... - - if book.Size() != 100 { - t.Errorf("Expected 100 addresses, found %v", book.Size()) - } - - for _, addrSrc := range randAddrs { - addr := addrSrc.addr - src := addrSrc.src - ka := book.addrLookup[addr.String()] - if ka == nil { - t.Fatalf("Expected to find KnownAddress %v but wasn't there.", addr) - } - if !(ka.Addr.Equals(addr) && ka.Src.Equals(src)) { - t.Fatalf("KnownAddress doesn't match addr & src") - } - } -} - -func TestPromoteToOld(t *testing.T) { - fname := createTempFileName("addrbook_test") - t.Logf("New tempfile name: %v", fname) - - // Create some random addresses - randAddrs := []struct { - addr *NetAddress - src *NetAddress - }{} - for i := 0; i < 100; i++ { - addr := randIPv4Address() - src := randIPv4Address() - randAddrs = append(randAddrs, struct { - addr *NetAddress - src *NetAddress - }{ - addr: addr, - src: src, - }) - } - - // Create the book & populate & save - book := NewAddrBook(fname) - for _, addrSrc := range randAddrs { - book.AddAddress(addrSrc.addr, addrSrc.src) - } - // Attempt all addresses. - for _, addrSrc := range randAddrs { - book.MarkAttempt(addrSrc.addr) - } - // Promote half of them - for i, addrSrc := range randAddrs { - if i%2 == 0 { - book.MarkGood(addrSrc.addr) - } - } - book.saveToFile(fname) - - // Reload the book - book = NewAddrBook(fname) - book.loadFromFile(fname) - - // Test ... - - if book.Size() != 100 { - t.Errorf("Expected 100 addresses, found %v", book.Size()) - } - - // TODO: do more testing :) - - selection := book.GetSelection() - t.Logf("selection: %v", selection) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/config.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/config.go deleted file mode 100644 index 2d6e4bcd7..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/config.go +++ /dev/null @@ -1,14 +0,0 @@ - -package p2p - -import ( - cfg "github.com/tendermint/go-config" -) - -var config cfg.Config = nil - -func init() { - cfg.OnConfig(func(newConfig cfg.Config) { - config = newConfig - }) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/connection.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/connection.go deleted file mode 100644 index baa0dc1b4..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/connection.go +++ /dev/null @@ -1,640 +0,0 @@ -package p2p - -import ( - "bufio" - "fmt" - "io" - "math" - "net" - "runtime/debug" - "sync/atomic" - "time" - - flow "github.com/tendermint/flowcontrol" - . "github.com/tendermint/go-common" - "github.com/tendermint/go-wire" //"github.com/tendermint/log15" -) - -const ( - numBatchMsgPackets = 10 - minReadBufferSize = 1024 - minWriteBufferSize = 1024 - idleTimeoutMinutes = 5 - updateStatsSeconds = 2 - pingTimeoutSeconds = 40 - defaultSendRate = 51200 // 50Kb/s - defaultRecvRate = 51200 // 50Kb/s - flushThrottleMS = 100 - defaultSendQueueCapacity = 1 - defaultRecvBufferCapacity = 4096 - defaultSendTimeoutSeconds = 10 -) - -type receiveCbFunc func(chID byte, msgBytes []byte) -type errorCbFunc func(interface{}) - -/* -Each peer has one `MConnection` (multiplex connection) instance. - -__multiplex__ *noun* a system or signal involving simultaneous transmission of -several messages along a single channel of communication. - -Each `MConnection` handles message transmission on multiple abstract communication -`Channel`s. Each channel has a globally unique byte id. -The byte id and the relative priorities of each `Channel` are configured upon -initialization of the connection. - -There are two methods for sending messages: - func (m MConnection) Send(chID byte, msg interface{}) bool {} - func (m MConnection) TrySend(chID byte, msg interface{}) bool {} - -`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued -for the channel with the given id byte `chID`, or until the request times out. -The message `msg` is serialized using the `tendermint/wire` submodule's -`WriteBinary()` reflection routine. - -`TrySend(chID, msg)` is a nonblocking call that returns false if the channel's -queue is full. - -Inbound message bytes are handled with an onReceive callback function. -*/ -type MConnection struct { - BaseService - - conn net.Conn - bufReader *bufio.Reader - bufWriter *bufio.Writer - sendMonitor *flow.Monitor - recvMonitor *flow.Monitor - sendRate int64 - recvRate int64 - send chan struct{} - pong chan struct{} - channels []*Channel - channelsIdx map[byte]*Channel - onReceive receiveCbFunc - onError errorCbFunc - errored uint32 - - quit chan struct{} - flushTimer *ThrottleTimer // flush writes as necessary but throttled. - pingTimer *RepeatTimer // send pings periodically - chStatsTimer *RepeatTimer // update channel stats periodically - - LocalAddress *NetAddress - RemoteAddress *NetAddress -} - -func NewMConnection(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc) *MConnection { - - mconn := &MConnection{ - conn: conn, - bufReader: bufio.NewReaderSize(conn, minReadBufferSize), - bufWriter: bufio.NewWriterSize(conn, minWriteBufferSize), - sendMonitor: flow.New(0, 0), - recvMonitor: flow.New(0, 0), - sendRate: defaultSendRate, - recvRate: defaultRecvRate, - send: make(chan struct{}, 1), - pong: make(chan struct{}), - onReceive: onReceive, - onError: onError, - - // Initialized in Start() - quit: nil, - flushTimer: nil, - pingTimer: nil, - chStatsTimer: nil, - - LocalAddress: NewNetAddress(conn.LocalAddr()), - RemoteAddress: NewNetAddress(conn.RemoteAddr()), - } - - // Create channels - var channelsIdx = map[byte]*Channel{} - var channels = []*Channel{} - - for _, desc := range chDescs { - channel := newChannel(mconn, desc) - channelsIdx[channel.id] = channel - channels = append(channels, channel) - } - mconn.channels = channels - mconn.channelsIdx = channelsIdx - - mconn.BaseService = *NewBaseService(log, "MConnection", mconn) - - return mconn -} - -func (c *MConnection) OnStart() error { - c.BaseService.OnStart() - c.quit = make(chan struct{}) - c.flushTimer = NewThrottleTimer("flush", flushThrottleMS*time.Millisecond) - c.pingTimer = NewRepeatTimer("ping", pingTimeoutSeconds*time.Second) - c.chStatsTimer = NewRepeatTimer("chStats", updateStatsSeconds*time.Second) - go c.sendRoutine() - go c.recvRoutine() - return nil -} - -func (c *MConnection) OnStop() { - c.BaseService.OnStop() - c.flushTimer.Stop() - c.pingTimer.Stop() - c.chStatsTimer.Stop() - if c.quit != nil { - close(c.quit) - } - c.conn.Close() - // We can't close pong safely here because - // recvRoutine may write to it after we've stopped. - // Though it doesn't need to get closed at all, - // we close it @ recvRoutine. - // close(c.pong) -} - -func (c *MConnection) String() string { - return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr()) -} - -func (c *MConnection) flush() { - log.Debug("Flush", "conn", c) - err := c.bufWriter.Flush() - if err != nil { - log.Warn("MConnection flush failed", "error", err) - } -} - -// Catch panics, usually caused by remote disconnects. -func (c *MConnection) _recover() { - if r := recover(); r != nil { - stack := debug.Stack() - err := StackError{r, stack} - c.stopForError(err) - } -} - -func (c *MConnection) stopForError(r interface{}) { - c.Stop() - if atomic.CompareAndSwapUint32(&c.errored, 0, 1) { - if c.onError != nil { - c.onError(r) - } - } -} - -// Queues a message to be sent to channel. -func (c *MConnection) Send(chID byte, msg interface{}) bool { - if !c.IsRunning() { - return false - } - - log.Info("Send", "channel", chID, "conn", c, "msg", msg) //, "bytes", wire.BinaryBytes(msg)) - - // Send message to channel. - channel, ok := c.channelsIdx[chID] - if !ok { - log.Error(Fmt("Cannot send bytes, unknown channel %X", chID)) - return false - } - - success := channel.sendBytes(wire.BinaryBytes(msg)) - if success { - // Wake up sendRoutine if necessary - select { - case c.send <- struct{}{}: - default: - } - } else { - log.Warn("Send failed", "channel", chID, "conn", c, "msg", msg) - } - return success -} - -// Queues a message to be sent to channel. -// Nonblocking, returns true if successful. -func (c *MConnection) TrySend(chID byte, msg interface{}) bool { - if !c.IsRunning() { - return false - } - - log.Info("TrySend", "channel", chID, "conn", c, "msg", msg) - - // Send message to channel. - channel, ok := c.channelsIdx[chID] - if !ok { - log.Error(Fmt("Cannot send bytes, unknown channel %X", chID)) - return false - } - - ok = channel.trySendBytes(wire.BinaryBytes(msg)) - if ok { - // Wake up sendRoutine if necessary - select { - case c.send <- struct{}{}: - default: - } - } - - return ok -} - -func (c *MConnection) CanSend(chID byte) bool { - if !c.IsRunning() { - return false - } - - channel, ok := c.channelsIdx[chID] - if !ok { - log.Error(Fmt("Unknown channel %X", chID)) - return false - } - return channel.canSend() -} - -// sendRoutine polls for packets to send from channels. -func (c *MConnection) sendRoutine() { - defer c._recover() - -FOR_LOOP: - for { - var n int64 - var err error - select { - case <-c.flushTimer.Ch: - // NOTE: flushTimer.Set() must be called every time - // something is written to .bufWriter. - c.flush() - case <-c.chStatsTimer.Ch: - for _, channel := range c.channels { - channel.updateStats() - } - case <-c.pingTimer.Ch: - log.Info("Send Ping") - wire.WriteByte(packetTypePing, c.bufWriter, &n, &err) - c.sendMonitor.Update(int(n)) - c.flush() - case <-c.pong: - log.Info("Send Pong") - wire.WriteByte(packetTypePong, c.bufWriter, &n, &err) - c.sendMonitor.Update(int(n)) - c.flush() - case <-c.quit: - break FOR_LOOP - case <-c.send: - // Send some msgPackets - eof := c.sendSomeMsgPackets() - if !eof { - // Keep sendRoutine awake. - select { - case c.send <- struct{}{}: - default: - } - } - } - - if !c.IsRunning() { - break FOR_LOOP - } - if err != nil { - log.Warn("Connection failed @ sendRoutine", "conn", c, "error", err) - c.stopForError(err) - break FOR_LOOP - } - } - - // Cleanup -} - -// Returns true if messages from channels were exhausted. -// Blocks in accordance to .sendMonitor throttling. -func (c *MConnection) sendSomeMsgPackets() bool { - // Block until .sendMonitor says we can write. - // Once we're ready we send more than we asked for, - // but amortized it should even out. - c.sendMonitor.Limit(maxMsgPacketSize, atomic.LoadInt64(&c.sendRate), true) - - // Now send some msgPackets. - for i := 0; i < numBatchMsgPackets; i++ { - if c.sendMsgPacket() { - return true - } - } - return false -} - -// Returns true if messages from channels were exhausted. -func (c *MConnection) sendMsgPacket() bool { - // Choose a channel to create a msgPacket from. - // The chosen channel will be the one whose recentlySent/priority is the least. - var leastRatio float32 = math.MaxFloat32 - var leastChannel *Channel - for _, channel := range c.channels { - // If nothing to send, skip this channel - if !channel.isSendPending() { - continue - } - // Get ratio, and keep track of lowest ratio. - ratio := float32(channel.recentlySent) / float32(channel.priority) - if ratio < leastRatio { - leastRatio = ratio - leastChannel = channel - } - } - - // Nothing to send? - if leastChannel == nil { - return true - } else { - // log.Info("Found a msgPacket to send") - } - - // Make & send a msgPacket from this channel - n, err := leastChannel.writeMsgPacketTo(c.bufWriter) - if err != nil { - log.Warn("Failed to write msgPacket", "error", err) - c.stopForError(err) - return true - } - c.sendMonitor.Update(int(n)) - c.flushTimer.Set() - return false -} - -// recvRoutine reads msgPackets and reconstructs the message using the channels' "recving" buffer. -// After a whole message has been assembled, it's pushed to onReceive(). -// Blocks depending on how the connection is throttled. -func (c *MConnection) recvRoutine() { - defer c._recover() - -FOR_LOOP: - for { - // Block until .recvMonitor says we can read. - c.recvMonitor.Limit(maxMsgPacketSize, atomic.LoadInt64(&c.recvRate), true) - - /* - // Peek into bufReader for debugging - if numBytes := c.bufReader.Buffered(); numBytes > 0 { - log.Info("Peek connection buffer", "numBytes", numBytes, "bytes", log15.Lazy{func() []byte { - bytes, err := c.bufReader.Peek(MinInt(numBytes, 100)) - if err == nil { - return bytes - } else { - log.Warn("Error peeking connection buffer", "error", err) - return nil - } - }}) - } - */ - - // Read packet type - var n int64 - var err error - pktType := wire.ReadByte(c.bufReader, &n, &err) - c.recvMonitor.Update(int(n)) - if err != nil { - if c.IsRunning() { - log.Warn("Connection failed @ recvRoutine (reading byte)", "conn", c, "error", err) - c.stopForError(err) - } - break FOR_LOOP - } - - // Read more depending on packet type. - switch pktType { - case packetTypePing: - // TODO: prevent abuse, as they cause flush()'s. - log.Info("Receive Ping") - c.pong <- struct{}{} - case packetTypePong: - // do nothing - log.Info("Receive Pong") - case packetTypeMsg: - pkt, n, err := msgPacket{}, int64(0), error(nil) - wire.ReadBinaryPtr(&pkt, c.bufReader, &n, &err) - c.recvMonitor.Update(int(n)) - if err != nil { - if c.IsRunning() { - log.Warn("Connection failed @ recvRoutine", "conn", c, "error", err) - c.stopForError(err) - } - break FOR_LOOP - } - channel, ok := c.channelsIdx[pkt.ChannelID] - if !ok || channel == nil { - PanicQ(Fmt("Unknown channel %X", pkt.ChannelID)) - } - msgBytes, err := channel.recvMsgPacket(pkt) - if err != nil { - if c.IsRunning() { - log.Warn("Connection failed @ recvRoutine", "conn", c, "error", err) - c.stopForError(err) - } - break FOR_LOOP - } - if msgBytes != nil { - log.Debug("Received bytes", "chID", pkt.ChannelID, "msgBytes", msgBytes) - c.onReceive(pkt.ChannelID, msgBytes) - } - default: - PanicSanity(Fmt("Unknown message type %X", pktType)) - } - - // TODO: shouldn't this go in the sendRoutine? - // Better to send a ping packet when *we* haven't sent anything for a while. - c.pingTimer.Reset() - } - - // Cleanup - close(c.pong) - for _ = range c.pong { - // Drain - } -} - -//----------------------------------------------------------------------------- - -type ChannelDescriptor struct { - ID byte - Priority int - SendQueueCapacity int - RecvBufferCapacity int -} - -func (chDesc *ChannelDescriptor) FillDefaults() { - if chDesc.SendQueueCapacity == 0 { - chDesc.SendQueueCapacity = defaultSendQueueCapacity - } - if chDesc.RecvBufferCapacity == 0 { - chDesc.RecvBufferCapacity = defaultRecvBufferCapacity - } -} - -// TODO: lowercase. -// NOTE: not goroutine-safe. -type Channel struct { - conn *MConnection - desc *ChannelDescriptor - id byte - sendQueue chan []byte - sendQueueSize int32 // atomic. - recving []byte - sending []byte - priority int - recentlySent int64 // exponential moving average -} - -func newChannel(conn *MConnection, desc *ChannelDescriptor) *Channel { - desc.FillDefaults() - if desc.Priority <= 0 { - PanicSanity("Channel default priority must be a postive integer") - } - return &Channel{ - conn: conn, - desc: desc, - id: desc.ID, - sendQueue: make(chan []byte, desc.SendQueueCapacity), - recving: make([]byte, 0, desc.RecvBufferCapacity), - priority: desc.Priority, - } -} - -// Queues message to send to this channel. -// Goroutine-safe -// Times out (and returns false) after defaultSendTimeoutSeconds -func (ch *Channel) sendBytes(bytes []byte) bool { - timeout := time.NewTimer(defaultSendTimeoutSeconds * time.Second) - select { - case <-timeout.C: - // timeout - return false - case ch.sendQueue <- bytes: - atomic.AddInt32(&ch.sendQueueSize, 1) - return true - } -} - -// Queues message to send to this channel. -// Nonblocking, returns true if successful. -// Goroutine-safe -func (ch *Channel) trySendBytes(bytes []byte) bool { - select { - case ch.sendQueue <- bytes: - atomic.AddInt32(&ch.sendQueueSize, 1) - return true - default: - return false - } -} - -// Goroutine-safe -func (ch *Channel) loadSendQueueSize() (size int) { - return int(atomic.LoadInt32(&ch.sendQueueSize)) -} - -// Goroutine-safe -// Use only as a heuristic. -func (ch *Channel) canSend() bool { - return ch.loadSendQueueSize() < defaultSendQueueCapacity -} - -// Returns true if any msgPackets are pending to be sent. -// Call before calling nextMsgPacket() -// Goroutine-safe -func (ch *Channel) isSendPending() bool { - if len(ch.sending) == 0 { - if len(ch.sendQueue) == 0 { - return false - } - ch.sending = <-ch.sendQueue - } - return true -} - -// Creates a new msgPacket to send. -// Not goroutine-safe -func (ch *Channel) nextMsgPacket() msgPacket { - packet := msgPacket{} - packet.ChannelID = byte(ch.id) - packet.Bytes = ch.sending[:MinInt(maxMsgPacketSize, len(ch.sending))] - if len(ch.sending) <= maxMsgPacketSize { - packet.EOF = byte(0x01) - ch.sending = nil - atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize - } else { - packet.EOF = byte(0x00) - ch.sending = ch.sending[MinInt(maxMsgPacketSize, len(ch.sending)):] - } - return packet -} - -// Writes next msgPacket to w. -// Not goroutine-safe -func (ch *Channel) writeMsgPacketTo(w io.Writer) (n int64, err error) { - packet := ch.nextMsgPacket() - log.Debug("Write Msg Packet", "conn", ch.conn, "packet", packet) - wire.WriteByte(packetTypeMsg, w, &n, &err) - wire.WriteBinary(packet, w, &n, &err) - if err != nil { - ch.recentlySent += n - } - return -} - -// Handles incoming msgPackets. Returns a msg bytes if msg is complete. -// Not goroutine-safe -func (ch *Channel) recvMsgPacket(packet msgPacket) ([]byte, error) { - // log.Debug("Read Msg Packet", "conn", ch.conn, "packet", packet) - if wire.MaxBinaryReadSize < len(ch.recving)+len(packet.Bytes) { - return nil, wire.ErrBinaryReadSizeOverflow - } - ch.recving = append(ch.recving, packet.Bytes...) - if packet.EOF == byte(0x01) { - msgBytes := ch.recving - ch.recving = make([]byte, 0, defaultRecvBufferCapacity) - return msgBytes, nil - } - return nil, nil -} - -// Call this periodically to update stats for throttling purposes. -// Not goroutine-safe -func (ch *Channel) updateStats() { - // Exponential decay of stats. - // TODO: optimize. - ch.recentlySent = int64(float64(ch.recentlySent) * 0.5) -} - -//----------------------------------------------------------------------------- - -const ( - maxMsgPacketSize = 1024 - packetTypePing = byte(0x01) - packetTypePong = byte(0x02) - packetTypeMsg = byte(0x03) -) - -// Messages in channels are chopped into smaller msgPackets for multiplexing. -type msgPacket struct { - ChannelID byte - EOF byte // 1 means message ends here. - Bytes []byte -} - -func (p msgPacket) String() string { - return fmt.Sprintf("MsgPacket{%X:%X T:%X}", p.ChannelID, p.Bytes, p.EOF) -} - -//----------------------------------------------------------------------------- - -// Convenience struct for writing typed messages. -// Reading requires a custom decoder that switches on the first type byte of a byteslice. -type TypedMessage struct { - Type byte - Msg interface{} -} - -func (tm TypedMessage) String() string { - return fmt.Sprintf("TMsg{%X:%v}", tm.Type, tm.Msg) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/listener.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/listener.go deleted file mode 100644 index b9312a551..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/listener.go +++ /dev/null @@ -1,212 +0,0 @@ -package p2p - -import ( - "fmt" - "net" - "strconv" - "time" - - . "github.com/tendermint/go-common" - "github.com/tendermint/go-p2p/upnp" -) - -type Listener interface { - Connections() <-chan net.Conn - InternalAddress() *NetAddress - ExternalAddress() *NetAddress - String() string - Stop() bool -} - -// Implements Listener -type DefaultListener struct { - BaseService - - listener net.Listener - intAddr *NetAddress - extAddr *NetAddress - connections chan net.Conn -} - -const ( - numBufferedConnections = 10 - defaultExternalPort = 8770 - tryListenSeconds = 5 -) - -func splitHostPort(addr string) (host string, port int) { - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - PanicSanity(err) - } - port, err = strconv.Atoi(portStr) - if err != nil { - PanicSanity(err) - } - return host, port -} - -func NewDefaultListener(protocol string, lAddr string) Listener { - // Local listen IP & port - lAddrIP, lAddrPort := splitHostPort(lAddr) - - // Create listener - var listener net.Listener - var err error - for i := 0; i < tryListenSeconds; i++ { - listener, err = net.Listen(protocol, lAddr) - if err == nil { - break - } else if i < tryListenSeconds-1 { - time.Sleep(time.Second * 1) - } - } - if err != nil { - PanicCrisis(err) - } - // Actual listener local IP & port - listenerIP, listenerPort := splitHostPort(listener.Addr().String()) - log.Info("Local listener", "ip", listenerIP, "port", listenerPort) - - // Determine internal address... - var intAddr *NetAddress = NewNetAddressString(lAddr) - - // Determine external address... - var extAddr *NetAddress - if !config.GetBool("skip_upnp") { - // If the lAddrIP is INADDR_ANY, try UPnP - if lAddrIP == "" || lAddrIP == "0.0.0.0" { - extAddr = getUPNPExternalAddress(lAddrPort, listenerPort) - } - } - // Otherwise just use the local address... - if extAddr == nil { - extAddr = getNaiveExternalAddress(listenerPort) - } - if extAddr == nil { - PanicCrisis("Could not determine external address!") - } - - dl := &DefaultListener{ - listener: listener, - intAddr: intAddr, - extAddr: extAddr, - connections: make(chan net.Conn, numBufferedConnections), - } - dl.BaseService = *NewBaseService(log, "DefaultListener", dl) - dl.Start() // Started upon construction - return dl -} - -func (l *DefaultListener) OnStart() error { - l.BaseService.OnStart() - go l.listenRoutine() - return nil -} - -func (l *DefaultListener) OnStop() { - l.BaseService.OnStop() - l.listener.Close() -} - -// Accept connections and pass on the channel -func (l *DefaultListener) listenRoutine() { - for { - conn, err := l.listener.Accept() - - if !l.IsRunning() { - break // Go to cleanup - } - - // listener wasn't stopped, - // yet we encountered an error. - if err != nil { - PanicCrisis(err) - } - - l.connections <- conn - } - - // Cleanup - close(l.connections) - for _ = range l.connections { - // Drain - } -} - -// A channel of inbound connections. -// It gets closed when the listener closes. -func (l *DefaultListener) Connections() <-chan net.Conn { - return l.connections -} - -func (l *DefaultListener) InternalAddress() *NetAddress { - return l.intAddr -} - -func (l *DefaultListener) ExternalAddress() *NetAddress { - return l.extAddr -} - -// NOTE: The returned listener is already Accept()'ing. -// So it's not suitable to pass into http.Serve(). -func (l *DefaultListener) NetListener() net.Listener { - return l.listener -} - -func (l *DefaultListener) String() string { - return fmt.Sprintf("Listener(@%v)", l.extAddr) -} - -/* external address helpers */ - -// UPNP external address discovery & port mapping -func getUPNPExternalAddress(externalPort, internalPort int) *NetAddress { - log.Info("Getting UPNP external address") - nat, err := upnp.Discover() - if err != nil { - log.Info("Could not perform UPNP discover", "error", err) - return nil - } - - ext, err := nat.GetExternalAddress() - if err != nil { - log.Info("Could not get UPNP external address", "error", err) - return nil - } - - // UPnP can't seem to get the external port, so let's just be explicit. - if externalPort == 0 { - externalPort = defaultExternalPort - } - - externalPort, err = nat.AddPortMapping("tcp", externalPort, internalPort, "tendermint", 0) - if err != nil { - log.Info("Could not add UPNP port mapping", "error", err) - return nil - } - - log.Info("Got UPNP external address", "address", ext) - return NewNetAddressIPPort(ext, uint16(externalPort)) -} - -// TODO: use syscalls: http://pastebin.com/9exZG4rh -func getNaiveExternalAddress(port int) *NetAddress { - addrs, err := net.InterfaceAddrs() - if err != nil { - PanicCrisis(Fmt("Could not fetch interface addresses: %v", err)) - } - - for _, a := range addrs { - ipnet, ok := a.(*net.IPNet) - if !ok { - continue - } - v4 := ipnet.IP.To4() - if v4 == nil || v4[0] == 127 { - continue - } // loopback - return NewNetAddressIPPort(ipnet.IP, uint16(port)) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/log.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/log.go deleted file mode 100644 index ac1ff22a5..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/log.go +++ /dev/null @@ -1,7 +0,0 @@ -package p2p - -import ( - "github.com/tendermint/go-logger" -) - -var log = logger.New("module", "p2p") diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/netaddress.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/netaddress.go deleted file mode 100644 index 79ee0f258..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/netaddress.go +++ /dev/null @@ -1,217 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package p2p - -import ( - "fmt" - "net" - "strconv" - "time" - - . "github.com/tendermint/go-common" -) - -type NetAddress struct { - IP net.IP - Port uint16 - str string -} - -// TODO: socks proxies? -func NewNetAddress(addr net.Addr) *NetAddress { - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - PanicSanity(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) - } - ip := tcpAddr.IP - port := uint16(tcpAddr.Port) - return NewNetAddressIPPort(ip, port) -} - -// Also resolves the host if host is not an IP. -func NewNetAddressString(addr string) *NetAddress { - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - PanicSanity(err) - } - ip := net.ParseIP(host) - if ip == nil { - if len(host) > 0 { - ips, err := net.LookupIP(host) - if err != nil { - PanicSanity(err) - } - ip = ips[0] - } - } - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - PanicSanity(err) - } - na := NewNetAddressIPPort(ip, uint16(port)) - return na -} - -func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { - na := &NetAddress{ - IP: ip, - Port: port, - str: net.JoinHostPort( - ip.String(), - strconv.FormatUint(uint64(port), 10), - ), - } - return na -} - -func (na *NetAddress) Equals(other interface{}) bool { - if o, ok := other.(*NetAddress); ok { - return na.String() == o.String() - } else { - return false - } -} - -func (na *NetAddress) Less(other interface{}) bool { - if o, ok := other.(*NetAddress); ok { - return na.String() < o.String() - } else { - PanicSanity("Cannot compare unequal types") - return false - } -} - -func (na *NetAddress) String() string { - if na.str == "" { - na.str = net.JoinHostPort( - na.IP.String(), - strconv.FormatUint(uint64(na.Port), 10), - ) - } - return na.str -} - -func (na *NetAddress) Dial() (net.Conn, error) { - conn, err := net.Dial("tcp", na.String()) - if err != nil { - return nil, err - } - return conn, nil -} - -func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) { - conn, err := net.DialTimeout("tcp", na.String(), timeout) - if err != nil { - return nil, err - } - return conn, nil -} - -func (na *NetAddress) Routable() bool { - if config.GetBool("local_routing") { - return na.Valid() - } - - // TODO(oga) bitcoind doesn't include RFC3849 here, but should we? - return na.Valid() && !(na.RFC1918() || na.RFC3927() || na.RFC4862() || - na.RFC4193() || na.RFC4843() || na.Local()) -} - -// For IPv4 these are either a 0 or all bits set address. For IPv6 a zero -// address or one that matches the RFC3849 documentation address format. -func (na *NetAddress) Valid() bool { - return na.IP != nil && !(na.IP.IsUnspecified() || na.RFC3849() || - na.IP.Equal(net.IPv4bcast)) -} - -func (na *NetAddress) Local() bool { - return na.IP.IsLoopback() || zero4.Contains(na.IP) -} - -func (na *NetAddress) ReachabilityTo(o *NetAddress) int { - const ( - Unreachable = 0 - Default = iota - Teredo - Ipv6_weak - Ipv4 - Ipv6_strong - Private - ) - if !na.Routable() { - return Unreachable - } else if na.RFC4380() { - if !o.Routable() { - return Default - } else if o.RFC4380() { - return Teredo - } else if o.IP.To4() != nil { - return Ipv4 - } else { // ipv6 - return Ipv6_weak - } - } else if na.IP.To4() != nil { - if o.Routable() && o.IP.To4() != nil { - return Ipv4 - } - return Default - } else /* ipv6 */ { - var tunnelled bool - // Is our v6 is tunnelled? - if o.RFC3964() || o.RFC6052() || o.RFC6145() { - tunnelled = true - } - if !o.Routable() { - return Default - } else if o.RFC4380() { - return Teredo - } else if o.IP.To4() != nil { - return Ipv4 - } else if tunnelled { - // only prioritise ipv6 if we aren't tunnelling it. - return Ipv6_weak - } - return Ipv6_strong - } -} - -// RFC1918: IPv4 Private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) -// RFC3849: IPv6 Documentation address (2001:0DB8::/32) -// RFC3927: IPv4 Autoconfig (169.254.0.0/16) -// RFC3964: IPv6 6to4 (2002::/16) -// RFC4193: IPv6 unique local (FC00::/7) -// RFC4380: IPv6 Teredo tunneling (2001::/32) -// RFC4843: IPv6 ORCHID: (2001:10::/28) -// RFC4862: IPv6 Autoconfig (FE80::/64) -// RFC6052: IPv6 well known prefix (64:FF9B::/96) -// RFC6145: IPv6 IPv4 translated address ::FFFF:0:0:0/96 -var rfc1918_10 = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)} -var rfc1918_192 = net.IPNet{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)} -var rfc1918_172 = net.IPNet{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)} -var rfc3849 = net.IPNet{IP: net.ParseIP("2001:0DB8::"), Mask: net.CIDRMask(32, 128)} -var rfc3927 = net.IPNet{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)} -var rfc3964 = net.IPNet{IP: net.ParseIP("2002::"), Mask: net.CIDRMask(16, 128)} -var rfc4193 = net.IPNet{IP: net.ParseIP("FC00::"), Mask: net.CIDRMask(7, 128)} -var rfc4380 = net.IPNet{IP: net.ParseIP("2001::"), Mask: net.CIDRMask(32, 128)} -var rfc4843 = net.IPNet{IP: net.ParseIP("2001:10::"), Mask: net.CIDRMask(28, 128)} -var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)} -var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)} -var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)} -var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)} - -func (na *NetAddress) RFC1918() bool { - return rfc1918_10.Contains(na.IP) || - rfc1918_192.Contains(na.IP) || - rfc1918_172.Contains(na.IP) -} -func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) } -func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) } -func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) } -func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) } -func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) } -func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) } -func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) } -func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) } -func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) } diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/peer.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/peer.go deleted file mode 100644 index 1a463b966..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/peer.go +++ /dev/null @@ -1,134 +0,0 @@ -package p2p - -import ( - "fmt" - "io" - "net" - - . "github.com/tendermint/go-common" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/go-wire" -) - -type Peer struct { - BaseService - - outbound bool - mconn *MConnection - - *types.NodeInfo - Key string - Data *CMap // User data. -} - -// NOTE: blocking -// Before creating a peer with newPeer(), perform a handshake on connection. -func peerHandshake(conn net.Conn, ourNodeInfo *types.NodeInfo) (*types.NodeInfo, error) { - var peerNodeInfo = new(types.NodeInfo) - var err1 error - var err2 error - Parallel( - func() { - var n int64 - wire.WriteBinary(ourNodeInfo, conn, &n, &err1) - }, - func() { - var n int64 - wire.ReadBinary(peerNodeInfo, conn, &n, &err2) - log.Notice("Peer handshake", "peerNodeInfo", peerNodeInfo) - }) - if err1 != nil { - return nil, err1 - } - if err2 != nil { - return nil, err2 - } - return peerNodeInfo, nil -} - -// NOTE: call peerHandshake on conn before calling newPeer(). -func newPeer(conn net.Conn, peerNodeInfo *types.NodeInfo, outbound bool, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{})) *Peer { - var p *Peer - onReceive := func(chID byte, msgBytes []byte) { - reactor := reactorsByCh[chID] - if reactor == nil { - PanicSanity(Fmt("Unknown channel %X", chID)) - } - reactor.Receive(chID, p, msgBytes) - } - onError := func(r interface{}) { - p.Stop() - onPeerError(p, r) - } - mconn := NewMConnection(conn, chDescs, onReceive, onError) - p = &Peer{ - outbound: outbound, - mconn: mconn, - NodeInfo: peerNodeInfo, - Key: peerNodeInfo.PubKey.KeyString(), - Data: NewCMap(), - } - p.BaseService = *NewBaseService(log, "Peer", p) - return p -} - -func (p *Peer) OnStart() error { - p.BaseService.OnStart() - _, err := p.mconn.Start() - return err -} - -func (p *Peer) OnStop() { - p.BaseService.OnStop() - p.mconn.Stop() -} - -func (p *Peer) Connection() *MConnection { - return p.mconn -} - -func (p *Peer) IsOutbound() bool { - return p.outbound -} - -func (p *Peer) Send(chID byte, msg interface{}) bool { - if !p.IsRunning() { - return false - } - return p.mconn.Send(chID, msg) -} - -func (p *Peer) TrySend(chID byte, msg interface{}) bool { - if !p.IsRunning() { - return false - } - return p.mconn.TrySend(chID, msg) -} - -func (p *Peer) CanSend(chID byte) bool { - if !p.IsRunning() { - return false - } - return p.mconn.CanSend(chID) -} - -func (p *Peer) WriteTo(w io.Writer) (n int64, err error) { - wire.WriteString(p.Key, w, &n, &err) - return -} - -func (p *Peer) String() string { - if p.outbound { - return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.Key[:12]) - } else { - return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.Key[:12]) - } -} - -func (p *Peer) Equals(other *Peer) bool { - return p.Key == other.Key -} - -func (p *Peer) Get(key string) interface{} { - return p.Data.Get(key) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/peer_set.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/peer_set.go deleted file mode 100644 index 8a5f8e6d4..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/peer_set.go +++ /dev/null @@ -1,227 +0,0 @@ -package p2p - -import ( - "net" - "strings" - "sync" -) - -// IPeerSet has a (immutable) subset of the methods of PeerSet. -type IPeerSet interface { - Has(key string) bool - Get(key string) *Peer - List() []*Peer - Size() int -} - -//----------------------------------------------------------------------------- - -var ( - maxPeersPerIPRange = [4]int{11, 7, 5, 3} // ... -) - -// PeerSet is a special structure for keeping a table of peers. -// Iteration over the peers is super fast and thread-safe. -// We also track how many peers per IP range and avoid too many -type PeerSet struct { - mtx sync.Mutex - lookup map[string]*peerSetItem - list []*Peer - connectedIPs *nestedCounter -} - -type peerSetItem struct { - peer *Peer - index int -} - -func NewPeerSet() *PeerSet { - return &PeerSet{ - lookup: make(map[string]*peerSetItem), - list: make([]*Peer, 0, 256), - connectedIPs: NewNestedCounter(), - } -} - -// Returns false if peer with key (PubKeyEd25519) is already in set -// or if we have too many peers from the peer's IP range -func (ps *PeerSet) Add(peer *Peer) error { - ps.mtx.Lock() - defer ps.mtx.Unlock() - if ps.lookup[peer.Key] != nil { - return ErrSwitchDuplicatePeer - } - - // ensure we havent maxed out connections for the peer's IP range yet - // and update the IP range counters - if !ps.incrIPRangeCounts(peer.Host) { - return ErrSwitchMaxPeersPerIPRange - } - - index := len(ps.list) - // Appending is safe even with other goroutines - // iterating over the ps.list slice. - ps.list = append(ps.list, peer) - ps.lookup[peer.Key] = &peerSetItem{peer, index} - return nil -} - -func (ps *PeerSet) Has(peerKey string) bool { - ps.mtx.Lock() - defer ps.mtx.Unlock() - _, ok := ps.lookup[peerKey] - return ok -} - -func (ps *PeerSet) Get(peerKey string) *Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - item, ok := ps.lookup[peerKey] - if ok { - return item.peer - } else { - return nil - } -} - -func (ps *PeerSet) Remove(peer *Peer) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - item := ps.lookup[peer.Key] - if item == nil { - return - } - - // update the IP range counters - ps.decrIPRangeCounts(peer.Host) - - index := item.index - // Copy the list but without the last element. - // (we must copy because we're mutating the list) - newList := make([]*Peer, len(ps.list)-1) - copy(newList, ps.list) - // If it's the last peer, that's an easy special case. - if index == len(ps.list)-1 { - ps.list = newList - delete(ps.lookup, peer.Key) - return - } - - // Move the last item from ps.list to "index" in list. - lastPeer := ps.list[len(ps.list)-1] - lastPeerKey := lastPeer.Key - lastPeerItem := ps.lookup[lastPeerKey] - newList[index] = lastPeer - lastPeerItem.index = index - ps.list = newList - delete(ps.lookup, peer.Key) - -} - -func (ps *PeerSet) Size() int { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return len(ps.list) -} - -// threadsafe list of peers. -func (ps *PeerSet) List() []*Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return ps.list -} - -//----------------------------------------------------------------------------- -// track the number of IPs we're connected to for each IP address range - -// forms an IP address hierarchy tree with counts -// the struct itself is not thread safe and should always only be accessed with the ps.mtx locked -type nestedCounter struct { - count int - children map[string]*nestedCounter -} - -func NewNestedCounter() *nestedCounter { - nc := new(nestedCounter) - nc.children = make(map[string]*nestedCounter) - return nc -} - -// Check if we have too many IPs in the IP range of the incoming connection -// Thread safe -func (ps *PeerSet) HasMaxForIPRange(conn net.Conn) (ok bool) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - ip, _, _ := net.SplitHostPort(conn.RemoteAddr().String()) - ipBytes := strings.Split(ip, ".") - - c := ps.connectedIPs - for i, ipByte := range ipBytes { - if c, ok = c.children[ipByte]; !ok { - return false - } - if maxPeersPerIPRange[i] <= c.count { - return true - } - } - return false -} - -// Increments counts for this address' IP range -// Returns false if we already have enough connections -// Not thread safe (only called by ps.Add()) -func (ps *PeerSet) incrIPRangeCounts(address string) bool { - addrParts := strings.Split(address, ".") - - c := ps.connectedIPs - return incrNestedCounters(c, addrParts, 0) -} - -// Recursively descend the IP hierarchy, checking if we have -// max peers for each range and incrementing if not. -// Returns false if incr failed because max peers reached for some range counter. -func incrNestedCounters(c *nestedCounter, ipBytes []string, index int) bool { - ipByte := ipBytes[index] - child := c.children[ipByte] - if child == nil { - child = NewNestedCounter() - c.children[ipByte] = child - } - if index+1 < len(ipBytes) { - if !incrNestedCounters(child, ipBytes, index+1) { - return false - } - } - if maxPeersPerIPRange[index] <= child.count { - return false - } else { - child.count += 1 - return true - } -} - -// Decrement counts for this address' IP range -func (ps *PeerSet) decrIPRangeCounts(address string) { - addrParts := strings.Split(address, ".") - - c := ps.connectedIPs - decrNestedCounters(c, addrParts, 0) -} - -// Recursively descend the IP hierarchy, decrementing by one. -// If the counter is zero, deletes the child. -func decrNestedCounters(c *nestedCounter, ipBytes []string, index int) { - ipByte := ipBytes[index] - child := c.children[ipByte] - if child == nil { - log.Error("p2p/peer_set decrNestedCounters encountered a missing child counter") - return - } - if index+1 < len(ipBytes) { - decrNestedCounters(child, ipBytes, index+1) - } - child.count -= 1 - if child.count <= 0 { - delete(c.children, ipByte) - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/peer_set_test.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/peer_set_test.go deleted file mode 100644 index bac24f8a8..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/peer_set_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package p2p - -import ( - "math/rand" - "strings" - "testing" - - . "github.com/tendermint/go-common" - "github.com/tendermint/tendermint/types" -) - -// Returns an empty dummy peer -func randPeer() *Peer { - return &Peer{ - Key: RandStr(12), - NodeInfo: &types.NodeInfo{ - Host: Fmt("%v.%v.%v.%v", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256), - }, - } -} - -func TestAddRemoveOne(t *testing.T) { - peerSet := NewPeerSet() - - peer := randPeer() - err := peerSet.Add(peer) - if err != nil { - t.Errorf("Failed to add new peer") - } - if peerSet.Size() != 1 { - t.Errorf("Failed to add new peer and increment size") - } - - peerSet.Remove(peer) - if peerSet.Has(peer.Key) { - t.Errorf("Failed to remove peer") - } - if peerSet.Size() != 0 { - t.Errorf("Failed to remove peer and decrement size") - } -} - -func TestAddRemoveMany(t *testing.T) { - peerSet := NewPeerSet() - - peers := []*Peer{} - N := 100 - maxPeersPerIPRange = [4]int{N, N, N, N} - for i := 0; i < N; i++ { - peer := randPeer() - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - if peerSet.Size() != i+1 { - t.Errorf("Failed to add new peer and increment size") - } - peers = append(peers, peer) - } - - for i, peer := range peers { - peerSet.Remove(peer) - if peerSet.Has(peer.Key) { - t.Errorf("Failed to remove peer") - } - if peerSet.Size() != len(peers)-i-1 { - t.Errorf("Failed to remove peer and decrement size") - } - } -} - -func newPeerInIPRange(ipBytes ...string) *Peer { - ips := make([]string, 4) - for i, ipByte := range ipBytes { - ips[i] = ipByte - } - for i := len(ipBytes); i < 4; i++ { - ips[i] = Fmt("%v", rand.Int()%256) - } - ipS := strings.Join(ips, ".") - return &Peer{ - Key: RandStr(12), - NodeInfo: &types.NodeInfo{ - Host: ipS, - }, - } -} - -func TestIPRanges(t *testing.T) { - peerSet := NewPeerSet() - - // test /8 - maxPeersPerIPRange = [4]int{2, 2, 2, 2} - peer := newPeerInIPRange("54", "1") - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - peer = newPeerInIPRange("54", "2") - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - peer = newPeerInIPRange("54", "3") - if err := peerSet.Add(peer); err == nil { - t.Errorf("Added peer when we shouldn't have") - } - peer = newPeerInIPRange("55", "1") - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - - // test /16 - peerSet = NewPeerSet() - maxPeersPerIPRange = [4]int{3, 2, 1, 1} - peer = newPeerInIPRange("54", "112", "1") - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - peer = newPeerInIPRange("54", "112", "2") - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - peer = newPeerInIPRange("54", "112", "3") - if err := peerSet.Add(peer); err == nil { - t.Errorf("Added peer when we shouldn't have") - } - peer = newPeerInIPRange("54", "113", "1") - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - - // test /24 - peerSet = NewPeerSet() - maxPeersPerIPRange = [4]int{5, 3, 2, 1} - peer = newPeerInIPRange("54", "112", "11", "1") - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - peer = newPeerInIPRange("54", "112", "11", "2") - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - peer = newPeerInIPRange("54", "112", "11", "3") - if err := peerSet.Add(peer); err == nil { - t.Errorf("Added peer when we shouldn't have") - } - peer = newPeerInIPRange("54", "112", "12", "1") - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - - // test /32 - peerSet = NewPeerSet() - maxPeersPerIPRange = [4]int{11, 7, 5, 2} - peer = newPeerInIPRange("54", "112", "11", "10") - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - peer = newPeerInIPRange("54", "112", "11", "10") - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - peer = newPeerInIPRange("54", "112", "11", "10") - if err := peerSet.Add(peer); err == nil { - t.Errorf("Added peer when we shouldn't have") - } - peer = newPeerInIPRange("54", "112", "11", "11") - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/pex_reactor.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/pex_reactor.go deleted file mode 100644 index 49b2a314e..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/pex_reactor.go +++ /dev/null @@ -1,262 +0,0 @@ -package p2p - -import ( - "bytes" - "errors" - "fmt" - "math/rand" - "reflect" - "time" - - . "github.com/tendermint/go-common" - "github.com/tendermint/tendermint/events" - "github.com/tendermint/go-wire" -) - -var pexErrInvalidMessage = errors.New("Invalid PEX message") - -const ( - PexChannel = byte(0x00) - ensurePeersPeriodSeconds = 30 - minNumOutboundPeers = 10 -) - -/* -PEXReactor handles PEX (peer exchange) and ensures that an -adequate number of peers are connected to the switch. -*/ -type PEXReactor struct { - BaseReactor - - sw *Switch - book *AddrBook - evsw events.Fireable -} - -func NewPEXReactor(book *AddrBook) *PEXReactor { - pexR := &PEXReactor{ - book: book, - } - pexR.BaseReactor = *NewBaseReactor(log, "PEXReactor", pexR) - return pexR -} - -func (pexR *PEXReactor) OnStart() error { - pexR.BaseReactor.OnStart() - go pexR.ensurePeersRoutine() - return nil -} - -func (pexR *PEXReactor) OnStop() { - pexR.BaseReactor.OnStop() -} - -// Implements Reactor -func (pexR *PEXReactor) GetChannels() []*ChannelDescriptor { - return []*ChannelDescriptor{ - &ChannelDescriptor{ - ID: PexChannel, - Priority: 1, - SendQueueCapacity: 10, - }, - } -} - -// Implements Reactor -func (pexR *PEXReactor) AddPeer(peer *Peer) { - // Add the peer to the address book - netAddr := NewNetAddressString(fmt.Sprintf("%s:%d", peer.Host, peer.P2PPort)) - if peer.IsOutbound() { - if pexR.book.NeedMoreAddrs() { - pexR.RequestPEX(peer) - } - } else { - // For inbound connections, the peer is its own source - // (For outbound peers, the address is already in the books) - pexR.book.AddAddress(netAddr, netAddr) - } -} - -// Implements Reactor -func (pexR *PEXReactor) RemovePeer(peer *Peer, reason interface{}) { - // TODO -} - -// Implements Reactor -// Handles incoming PEX messages. -func (pexR *PEXReactor) Receive(chID byte, src *Peer, msgBytes []byte) { - - // decode message - _, msg, err := DecodeMessage(msgBytes) - if err != nil { - log.Warn("Error decoding message", "error", err) - return - } - log.Notice("Received message", "msg", msg) - - switch msg := msg.(type) { - case *pexRequestMessage: - // src requested some peers. - // TODO: prevent abuse. - pexR.SendAddrs(src, pexR.book.GetSelection()) - case *pexAddrsMessage: - // We received some peer addresses from src. - // TODO: prevent abuse. - // (We don't want to get spammed with bad peers) - srcAddr := src.Connection().RemoteAddress - for _, addr := range msg.Addrs { - pexR.book.AddAddress(addr, srcAddr) - } - default: - log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg))) - } - -} - -// Asks peer for more addresses. -func (pexR *PEXReactor) RequestPEX(peer *Peer) { - peer.Send(PexChannel, &pexRequestMessage{}) -} - -func (pexR *PEXReactor) SendAddrs(peer *Peer, addrs []*NetAddress) { - peer.Send(PexChannel, &pexAddrsMessage{Addrs: addrs}) -} - -// Ensures that sufficient peers are connected. (continuous) -func (pexR *PEXReactor) ensurePeersRoutine() { - // Randomize when routine starts - time.Sleep(time.Duration(rand.Int63n(500*ensurePeersPeriodSeconds)) * time.Millisecond) - - // fire once immediately. - pexR.ensurePeers() - // fire periodically - timer := NewRepeatTimer("pex", ensurePeersPeriodSeconds*time.Second) -FOR_LOOP: - for { - select { - case <-timer.Ch: - pexR.ensurePeers() - case <-pexR.Quit: - break FOR_LOOP - } - } - - // Cleanup - timer.Stop() -} - -// Ensures that sufficient peers are connected. (once) -func (pexR *PEXReactor) ensurePeers() { - numOutPeers, _, numDialing := pexR.Switch.NumPeers() - numToDial := minNumOutboundPeers - (numOutPeers + numDialing) - log.Info("Ensure peers", "numOutPeers", numOutPeers, "numDialing", numDialing, "numToDial", numToDial) - if numToDial <= 0 { - return - } - toDial := NewCMap() - - // Try to pick numToDial addresses to dial. - // TODO: improve logic. - for i := 0; i < numToDial; i++ { - newBias := MinInt(numOutPeers, 8)*10 + 10 - var picked *NetAddress - // Try to fetch a new peer 3 times. - // This caps the maximum number of tries to 3 * numToDial. - for j := 0; j < 3; j++ { - try := pexR.book.PickAddress(newBias) - if try == nil { - break - } - alreadySelected := toDial.Has(try.IP.String()) - alreadyDialing := pexR.Switch.IsDialing(try) - alreadyConnected := pexR.Switch.Peers().Has(try.IP.String()) - if alreadySelected || alreadyDialing || alreadyConnected { - /* - log.Info("Cannot dial address", "addr", try, - "alreadySelected", alreadySelected, - "alreadyDialing", alreadyDialing, - "alreadyConnected", alreadyConnected) - */ - continue - } else { - log.Info("Will dial address", "addr", try) - picked = try - break - } - } - if picked == nil { - continue - } - toDial.Set(picked.IP.String(), picked) - } - - // Dial picked addresses - for _, item := range toDial.Values() { - go func(picked *NetAddress) { - _, err := pexR.Switch.DialPeerWithAddress(picked) - if err != nil { - pexR.book.MarkAttempt(picked) - } - }(item.(*NetAddress)) - } - - // If we need more addresses, pick a random peer and ask for more. - if pexR.book.NeedMoreAddrs() { - if peers := pexR.Switch.Peers().List(); len(peers) > 0 { - i := rand.Int() % len(peers) - peer := peers[i] - log.Info("No addresses to dial. Sending pexRequest to random peer", "peer", peer) - pexR.RequestPEX(peer) - } - } -} - -// implements events.Eventable -func (pexR *PEXReactor) SetFireable(evsw events.Fireable) { - pexR.evsw = evsw -} - -//----------------------------------------------------------------------------- -// Messages - -const ( - msgTypeRequest = byte(0x01) - msgTypeAddrs = byte(0x02) -) - -type PexMessage interface{} - -var _ = wire.RegisterInterface( - struct{ PexMessage }{}, - wire.ConcreteType{&pexRequestMessage{}, msgTypeRequest}, - wire.ConcreteType{&pexAddrsMessage{}, msgTypeAddrs}, -) - -func DecodeMessage(bz []byte) (msgType byte, msg PexMessage, err error) { - msgType = bz[0] - n := new(int64) - r := bytes.NewReader(bz) - msg = wire.ReadBinary(struct{ PexMessage }{}, r, n, &err).(struct{ PexMessage }).PexMessage - return -} - -/* -A pexRequestMessage requests additional peer addresses. -*/ -type pexRequestMessage struct { -} - -func (m *pexRequestMessage) String() string { - return "[pexRequest]" -} - -/* -A message with announced peer addresses. -*/ -type pexAddrsMessage struct { - Addrs []*NetAddress -} - -func (m *pexAddrsMessage) String() string { - return fmt.Sprintf("[pexAddrs %v]", m.Addrs) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/secret_connection.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/secret_connection.go deleted file mode 100644 index 1cadd62b2..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/secret_connection.go +++ /dev/null @@ -1,346 +0,0 @@ -// Uses nacl's secret_box to encrypt a net.Conn. -// It is (meant to be) an implementation of the STS protocol. -// Note we do not (yet) assume that a remote peer's pubkey -// is known ahead of time, and thus we are technically -// still vulnerable to MITM. (TODO!) -// See docs/sts-final.pdf for more info -package p2p - -import ( - "bytes" - crand "crypto/rand" - "crypto/sha256" - "encoding/binary" - "errors" - "io" - "net" - "time" - - "golang.org/x/crypto/nacl/box" - "golang.org/x/crypto/nacl/secretbox" - "golang.org/x/crypto/ripemd160" - - "github.com/tendermint/go-crypto" - . "github.com/tendermint/go-common" - "github.com/tendermint/go-wire" -) - -// 2 + 1024 == 1026 total frame size -const dataLenSize = 2 // uint16 to describe the length, is <= dataMaxSize -const dataMaxSize = 1024 -const totalFrameSize = dataMaxSize + dataLenSize -const sealedFrameSize = totalFrameSize + secretbox.Overhead -const authSigMsgSize = (32 + 1) + (64 + 1) // fixed size (length prefixed) byte arrays - -// Implements net.Conn -type SecretConnection struct { - conn io.ReadWriteCloser - recvBuffer []byte - recvNonce *[24]byte - sendNonce *[24]byte - remPubKey crypto.PubKeyEd25519 - shrSecret *[32]byte // shared secret -} - -// Performs handshake and returns a new authenticated SecretConnection. -// Returns nil if error in handshake. -// Caller should call conn.Close() -// See docs/sts-final.pdf for more information. -func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKeyEd25519) (*SecretConnection, error) { - - locPubKey := locPrivKey.PubKey().(crypto.PubKeyEd25519) - - // Generate ephemeral keys for perfect forward secrecy. - locEphPub, locEphPriv := genEphKeys() - - // Write local ephemeral pubkey and receive one too. - // NOTE: every 32-byte string is accepted as a Curve25519 public key - // (see DJB's Curve25519 paper: http://cr.yp.to/ecdh/curve25519-20060209.pdf) - remEphPub, err := shareEphPubKey(conn, locEphPub) - if err != nil { - return nil, err - } - - // Compute common shared secret. - shrSecret := computeSharedSecret(remEphPub, locEphPriv) - - // Sort by lexical order. - loEphPub, hiEphPub := sort32(locEphPub, remEphPub) - - // Generate nonces to use for secretbox. - recvNonce, sendNonce := genNonces(loEphPub, hiEphPub, locEphPub == loEphPub) - - // Generate common challenge to sign. - challenge := genChallenge(loEphPub, hiEphPub) - - // Construct SecretConnection. - sc := &SecretConnection{ - conn: conn, - recvBuffer: nil, - recvNonce: recvNonce, - sendNonce: sendNonce, - shrSecret: shrSecret, - } - - // Sign the challenge bytes for authentication. - locSignature := signChallenge(challenge, locPrivKey) - - // Share (in secret) each other's pubkey & challenge signature - authSigMsg, err := shareAuthSignature(sc, locPubKey, locSignature) - if err != nil { - return nil, err - } - remPubKey, remSignature := authSigMsg.Key, authSigMsg.Sig - if !remPubKey.VerifyBytes(challenge[:], remSignature) { - return nil, errors.New("Challenge verification failed") - } - - // We've authorized. - sc.remPubKey = remPubKey - return sc, nil -} - -// Returns authenticated remote pubkey -func (sc *SecretConnection) RemotePubKey() crypto.PubKeyEd25519 { - return sc.remPubKey -} - -// Writes encrypted frames of `sealedFrameSize` -// CONTRACT: data smaller than dataMaxSize is read atomically. -func (sc *SecretConnection) Write(data []byte) (n int, err error) { - for 0 < len(data) { - var frame []byte = make([]byte, totalFrameSize) - var chunk []byte - if dataMaxSize < len(data) { - chunk = data[:dataMaxSize] - data = data[dataMaxSize:] - } else { - chunk = data - data = nil - } - chunkLength := len(chunk) - binary.BigEndian.PutUint16(frame, uint16(chunkLength)) - copy(frame[dataLenSize:], chunk) - - // encrypt the frame - var sealedFrame = make([]byte, sealedFrameSize) - secretbox.Seal(sealedFrame[:0], frame, sc.sendNonce, sc.shrSecret) - // fmt.Printf("secretbox.Seal(sealed:%X,sendNonce:%X,shrSecret:%X\n", sealedFrame, sc.sendNonce, sc.shrSecret) - incr2Nonce(sc.sendNonce) - // end encryption - - _, err := sc.conn.Write(sealedFrame) - if err != nil { - return n, err - } else { - n += len(chunk) - } - } - return -} - -// CONTRACT: data smaller than dataMaxSize is read atomically. -func (sc *SecretConnection) Read(data []byte) (n int, err error) { - if 0 < len(sc.recvBuffer) { - n_ := copy(data, sc.recvBuffer) - sc.recvBuffer = sc.recvBuffer[n_:] - return - } - - sealedFrame := make([]byte, sealedFrameSize) - _, err = io.ReadFull(sc.conn, sealedFrame) - if err != nil { - return - } - - // decrypt the frame - var frame = make([]byte, totalFrameSize) - // fmt.Printf("secretbox.Open(sealed:%X,recvNonce:%X,shrSecret:%X\n", sealedFrame, sc.recvNonce, sc.shrSecret) - _, ok := secretbox.Open(frame[:0], sealedFrame, sc.recvNonce, sc.shrSecret) - if !ok { - return n, errors.New("Failed to decrypt SecretConnection") - } - incr2Nonce(sc.recvNonce) - // end decryption - - var chunkLength = binary.BigEndian.Uint16(frame) // read the first two bytes - if chunkLength > dataMaxSize { - return 0, errors.New("chunkLength is greater than dataMaxSize") - } - var chunk = frame[dataLenSize : dataLenSize+chunkLength] - - n = copy(data, chunk) - sc.recvBuffer = chunk[n:] - return -} - -// Implements net.Conn -func (sc *SecretConnection) Close() error { return sc.conn.Close() } -func (sc *SecretConnection) LocalAddr() net.Addr { return sc.conn.(net.Conn).LocalAddr() } -func (sc *SecretConnection) RemoteAddr() net.Addr { return sc.conn.(net.Conn).RemoteAddr() } -func (sc *SecretConnection) SetDeadline(t time.Time) error { return sc.conn.(net.Conn).SetDeadline(t) } -func (sc *SecretConnection) SetReadDeadline(t time.Time) error { - return sc.conn.(net.Conn).SetReadDeadline(t) -} -func (sc *SecretConnection) SetWriteDeadline(t time.Time) error { - return sc.conn.(net.Conn).SetWriteDeadline(t) -} - -func genEphKeys() (ephPub, ephPriv *[32]byte) { - var err error - ephPub, ephPriv, err = box.GenerateKey(crand.Reader) - if err != nil { - PanicCrisis("Could not generate ephemeral keypairs") - } - return -} - -func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[32]byte, err error) { - var err1, err2 error - - Parallel( - func() { - _, err1 = conn.Write(locEphPub[:]) - }, - func() { - remEphPub = new([32]byte) - _, err2 = io.ReadFull(conn, remEphPub[:]) - }, - ) - - if err1 != nil { - return nil, err1 - } - if err2 != nil { - return nil, err2 - } - - return remEphPub, nil -} - -func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) { - shrSecret = new([32]byte) - box.Precompute(shrSecret, remPubKey, locPrivKey) - return -} - -func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) { - if bytes.Compare(foo[:], bar[:]) < 0 { - lo = foo - hi = bar - } else { - lo = bar - hi = foo - } - return -} - -func genNonces(loPubKey, hiPubKey *[32]byte, locIsLo bool) (recvNonce, sendNonce *[24]byte) { - nonce1 := hash24(append(loPubKey[:], hiPubKey[:]...)) - nonce2 := new([24]byte) - copy(nonce2[:], nonce1[:]) - nonce2[len(nonce2)-1] ^= 0x01 - if locIsLo { - recvNonce = nonce1 - sendNonce = nonce2 - } else { - recvNonce = nonce2 - sendNonce = nonce1 - } - return -} - -func genChallenge(loPubKey, hiPubKey *[32]byte) (challenge *[32]byte) { - return hash32(append(loPubKey[:], hiPubKey[:]...)) -} - -func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKeyEd25519) (signature crypto.SignatureEd25519) { - signature = locPrivKey.Sign(challenge[:]).(crypto.SignatureEd25519) - return -} - -type authSigMessage struct { - Key crypto.PubKeyEd25519 - Sig crypto.SignatureEd25519 -} - -func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKeyEd25519, signature crypto.SignatureEd25519) (*authSigMessage, error) { - var recvMsg authSigMessage - var err1, err2 error - - Parallel( - func() { - msgBytes := wire.BinaryBytes(authSigMessage{pubKey, signature}) - _, err1 = sc.Write(msgBytes) - }, - func() { - readBuffer := make([]byte, authSigMsgSize) - _, err2 = io.ReadFull(sc, readBuffer) - if err2 != nil { - return - } - n := int64(0) // not used. - recvMsg = wire.ReadBinary(authSigMessage{}, bytes.NewBuffer(readBuffer), &n, &err2).(authSigMessage) - }) - - if err1 != nil { - return nil, err1 - } - if err2 != nil { - return nil, err2 - } - - return &recvMsg, nil -} - -func verifyChallengeSignature(challenge *[32]byte, remPubKey crypto.PubKeyEd25519, remSignature crypto.SignatureEd25519) bool { - return remPubKey.VerifyBytes(challenge[:], remSignature) -} - -//-------------------------------------------------------------------------------- - -// sha256 -func hash32(input []byte) (res *[32]byte) { - hasher := sha256.New() - hasher.Write(input) // does not error - resSlice := hasher.Sum(nil) - res = new([32]byte) - copy(res[:], resSlice) - return -} - -// We only fill in the first 20 bytes with ripemd160 -func hash24(input []byte) (res *[24]byte) { - hasher := ripemd160.New() - hasher.Write(input) // does not error - resSlice := hasher.Sum(nil) - res = new([24]byte) - copy(res[:], resSlice) - return -} - -// ripemd160 -func hash20(input []byte) (res *[20]byte) { - hasher := ripemd160.New() - hasher.Write(input) // does not error - resSlice := hasher.Sum(nil) - res = new([20]byte) - copy(res[:], resSlice) - return -} - -// increment nonce big-endian by 2 with wraparound. -func incr2Nonce(nonce *[24]byte) { - incrNonce(nonce) - incrNonce(nonce) -} - -// increment nonce big-endian by 1 with wraparound. -func incrNonce(nonce *[24]byte) { - for i := 23; 0 <= i; i-- { - nonce[i] += 1 - if nonce[i] != 0 { - return - } - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/secret_connection_test.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/secret_connection_test.go deleted file mode 100644 index 876af4e3d..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/secret_connection_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package p2p - -import ( - "bytes" - "io" - "testing" - - "github.com/tendermint/go-crypto" - . "github.com/tendermint/go-common" -) - -type dummyConn struct { - *io.PipeReader - *io.PipeWriter -} - -func (drw dummyConn) Close() (err error) { - err2 := drw.PipeWriter.CloseWithError(io.EOF) - err1 := drw.PipeReader.Close() - if err2 != nil { - return err - } - return err1 -} - -// Each returned ReadWriteCloser is akin to a net.Connection -func makeDummyConnPair() (fooConn, barConn dummyConn) { - barReader, fooWriter := io.Pipe() - fooReader, barWriter := io.Pipe() - return dummyConn{fooReader, fooWriter}, dummyConn{barReader, barWriter} -} - -func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) { - fooConn, barConn := makeDummyConnPair() - fooPrvKey := crypto.GenPrivKeyEd25519() - fooPubKey := fooPrvKey.PubKey().(crypto.PubKeyEd25519) - barPrvKey := crypto.GenPrivKeyEd25519() - barPubKey := barPrvKey.PubKey().(crypto.PubKeyEd25519) - - Parallel( - func() { - var err error - fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey) - if err != nil { - tb.Errorf("Failed to establish SecretConnection for foo: %v", err) - return - } - remotePubBytes := fooSecConn.RemotePubKey() - if !bytes.Equal(remotePubBytes[:], barPubKey[:]) { - tb.Errorf("Unexpected fooSecConn.RemotePubKey. Expected %v, got %v", - barPubKey, fooSecConn.RemotePubKey()) - } - }, - func() { - var err error - barSecConn, err = MakeSecretConnection(barConn, barPrvKey) - if barSecConn == nil { - tb.Errorf("Failed to establish SecretConnection for bar: %v", err) - return - } - remotePubBytes := barSecConn.RemotePubKey() - if !bytes.Equal(remotePubBytes[:], fooPubKey[:]) { - tb.Errorf("Unexpected barSecConn.RemotePubKey. Expected %v, got %v", - fooPubKey, barSecConn.RemotePubKey()) - } - }) - - return -} - -func TestSecretConnectionHandshake(t *testing.T) { - fooSecConn, barSecConn := makeSecretConnPair(t) - fooSecConn.Close() - barSecConn.Close() -} - -func TestSecretConnectionReadWrite(t *testing.T) { - fooConn, barConn := makeDummyConnPair() - fooWrites, barWrites := []string{}, []string{} - fooReads, barReads := []string{}, []string{} - - // Pre-generate the things to write (for foo & bar) - for i := 0; i < 100; i++ { - fooWrites = append(fooWrites, RandStr((RandInt()%(dataMaxSize*5))+1)) - barWrites = append(barWrites, RandStr((RandInt()%(dataMaxSize*5))+1)) - } - - // A helper that will run with (fooConn, fooWrites, fooReads) and vice versa - genNodeRunner := func(nodeConn dummyConn, nodeWrites []string, nodeReads *[]string) func() { - return func() { - // Node handskae - nodePrvKey := crypto.GenPrivKeyEd25519() - nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey) - if err != nil { - t.Errorf("Failed to establish SecretConnection for node: %v", err) - return - } - // In parallel, handle reads and writes - Parallel( - func() { - // Node writes - for _, nodeWrite := range nodeWrites { - n, err := nodeSecretConn.Write([]byte(nodeWrite)) - if err != nil { - t.Errorf("Failed to write to nodeSecretConn: %v", err) - return - } - if n != len(nodeWrite) { - t.Errorf("Failed to write all bytes. Expected %v, wrote %v", len(nodeWrite), n) - return - } - } - nodeConn.PipeWriter.Close() - }, - func() { - // Node reads - readBuffer := make([]byte, dataMaxSize) - for { - n, err := nodeSecretConn.Read(readBuffer) - if err == io.EOF { - return - } else if err != nil { - t.Errorf("Failed to read from nodeSecretConn: %v", err) - return - } - *nodeReads = append(*nodeReads, string(readBuffer[:n])) - } - nodeConn.PipeReader.Close() - }) - } - } - - // Run foo & bar in parallel - Parallel( - genNodeRunner(fooConn, fooWrites, &fooReads), - genNodeRunner(barConn, barWrites, &barReads), - ) - - // A helper to ensure that the writes and reads match. - // Additionally, small writes (<= dataMaxSize) must be atomically read. - compareWritesReads := func(writes []string, reads []string) { - for { - // Pop next write & corresponding reads - var read, write string = "", writes[0] - var readCount = 0 - for _, readChunk := range reads { - read += readChunk - readCount += 1 - if len(write) <= len(read) { - break - } - if len(write) <= dataMaxSize { - break // atomicity of small writes - } - } - // Compare - if write != read { - t.Errorf("Expected to read %X, got %X", write, read) - } - // Iterate - writes = writes[1:] - reads = reads[readCount:] - if len(writes) == 0 { - break - } - } - } - - compareWritesReads(fooWrites, barReads) - compareWritesReads(barWrites, fooReads) - -} - -func BenchmarkSecretConnection(b *testing.B) { - b.StopTimer() - fooSecConn, barSecConn := makeSecretConnPair(b) - fooWriteText := RandStr(dataMaxSize) - // Consume reads from bar's reader - go func() { - readBuffer := make([]byte, dataMaxSize) - for { - _, err := barSecConn.Read(readBuffer) - if err == io.EOF { - return - } else if err != nil { - b.Fatalf("Failed to read from barSecConn: %v", err) - } - } - }() - - b.StartTimer() - for i := 0; i < b.N; i++ { - _, err := fooSecConn.Write([]byte(fooWriteText)) - if err != nil { - b.Fatalf("Failed to write to fooSecConn: %v", err) - } - } - b.StopTimer() - - fooSecConn.Close() - //barSecConn.Close() race condition -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/switch.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/switch.go deleted file mode 100644 index a3ee16c60..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/switch.go +++ /dev/null @@ -1,391 +0,0 @@ -package p2p - -import ( - "errors" - "fmt" - "net" - "strconv" - "time" - - "github.com/tendermint/log15" - "github.com/tendermint/go-crypto" - . "github.com/tendermint/go-common" - "github.com/tendermint/tendermint/types" -) - -type Reactor interface { - Service // Start, Stop - - SetSwitch(*Switch) - GetChannels() []*ChannelDescriptor - AddPeer(peer *Peer) - RemovePeer(peer *Peer, reason interface{}) - Receive(chID byte, peer *Peer, msgBytes []byte) -} - -//-------------------------------------- - -type BaseReactor struct { - QuitService // Provides Start, Stop, .Quit - Switch *Switch -} - -func NewBaseReactor(log log15.Logger, name string, impl Reactor) *BaseReactor { - return &BaseReactor{ - QuitService: *NewQuitService(log, name, impl), - Switch: nil, - } -} - -func (br *BaseReactor) SetSwitch(sw *Switch) { - br.Switch = sw -} -func (_ *BaseReactor) GetChannels() []*ChannelDescriptor { return nil } -func (_ *BaseReactor) AddPeer(peer *Peer) {} -func (_ *BaseReactor) RemovePeer(peer *Peer, reason interface{}) {} -func (_ *BaseReactor) Receive(chID byte, peer *Peer, msgBytes []byte) {} - -//----------------------------------------------------------------------------- - -/* -The `Switch` handles peer connections and exposes an API to receive incoming messages -on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one -or more `Channels`. So while sending outgoing messages is typically performed on the peer, -incoming messages are received on the reactor. -*/ -type Switch struct { - BaseService - - listeners []Listener - reactors map[string]Reactor - chDescs []*ChannelDescriptor - reactorsByCh map[byte]Reactor - peers *PeerSet - dialing *CMap - nodeInfo *types.NodeInfo // our node info - nodePrivKey crypto.PrivKeyEd25519 // our node privkey -} - -var ( - ErrSwitchDuplicatePeer = errors.New("Duplicate peer") - ErrSwitchMaxPeersPerIPRange = errors.New("IP range has too many peers") -) - -const ( - peerDialTimeoutSeconds = 3 // TODO make this configurable - handshakeTimeoutSeconds = 20 // TODO make this configurable - maxNumPeers = 50 // TODO make this configurable -) - -func NewSwitch() *Switch { - sw := &Switch{ - reactors: make(map[string]Reactor), - chDescs: make([]*ChannelDescriptor, 0), - reactorsByCh: make(map[byte]Reactor), - peers: NewPeerSet(), - dialing: NewCMap(), - nodeInfo: nil, - } - sw.BaseService = *NewBaseService(log, "P2P Switch", sw) - return sw -} - -// Not goroutine safe. -func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { - // Validate the reactor. - // No two reactors can share the same channel. - reactorChannels := reactor.GetChannels() - for _, chDesc := range reactorChannels { - chID := chDesc.ID - if sw.reactorsByCh[chID] != nil { - PanicSanity(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) - } - sw.chDescs = append(sw.chDescs, chDesc) - sw.reactorsByCh[chID] = reactor - } - sw.reactors[name] = reactor - reactor.SetSwitch(sw) - return reactor -} - -// Not goroutine safe. -func (sw *Switch) Reactors() map[string]Reactor { - return sw.reactors -} - -// Not goroutine safe. -func (sw *Switch) Reactor(name string) Reactor { - return sw.reactors[name] -} - -// Not goroutine safe. -func (sw *Switch) AddListener(l Listener) { - sw.listeners = append(sw.listeners, l) -} - -// Not goroutine safe. -func (sw *Switch) Listeners() []Listener { - return sw.listeners -} - -// Not goroutine safe. -func (sw *Switch) IsListening() bool { - return len(sw.listeners) > 0 -} - -// Not goroutine safe. -func (sw *Switch) SetNodeInfo(nodeInfo *types.NodeInfo) { - sw.nodeInfo = nodeInfo -} - -// Not goroutine safe. -func (sw *Switch) NodeInfo() *types.NodeInfo { - return sw.nodeInfo -} - -// Not goroutine safe. -// NOTE: Overwrites sw.nodeInfo.PubKey -func (sw *Switch) SetNodePrivKey(nodePrivKey crypto.PrivKeyEd25519) { - sw.nodePrivKey = nodePrivKey - if sw.nodeInfo != nil { - sw.nodeInfo.PubKey = nodePrivKey.PubKey().(crypto.PubKeyEd25519) - } -} - -// Switch.Start() starts all the reactors, peers, and listeners. -func (sw *Switch) OnStart() error { - sw.BaseService.OnStart() - // Start reactors - for _, reactor := range sw.reactors { - _, err := reactor.Start() - if err != nil { - return err - } - } - // Start peers - for _, peer := range sw.peers.List() { - sw.startInitPeer(peer) - } - // Start listeners - for _, listener := range sw.listeners { - go sw.listenerRoutine(listener) - } - return nil -} - -func (sw *Switch) OnStop() { - sw.BaseService.OnStop() - // Stop listeners - for _, listener := range sw.listeners { - listener.Stop() - } - sw.listeners = nil - // Stop peers - for _, peer := range sw.peers.List() { - peer.Stop() - } - sw.peers = NewPeerSet() - // Stop reactors - for _, reactor := range sw.reactors { - reactor.Stop() - } -} - -// NOTE: This performs a blocking handshake before the peer is added. -// CONTRACT: Iff error is returned, peer is nil, and conn is immediately closed. -func (sw *Switch) AddPeerWithConnection(conn net.Conn, outbound bool) (*Peer, error) { - // Set deadline for handshake so we don't block forever on conn.ReadFull - conn.SetDeadline(time.Now().Add(handshakeTimeoutSeconds * time.Second)) - - // First, encrypt the connection. - sconn, err := MakeSecretConnection(conn, sw.nodePrivKey) - if err != nil { - conn.Close() - return nil, err - } - // Then, perform node handshake - peerNodeInfo, err := peerHandshake(sconn, sw.nodeInfo) - if err != nil { - sconn.Close() - return nil, err - } - // Check that the professed PubKey matches the sconn's. - if !peerNodeInfo.PubKey.Equals(sconn.RemotePubKey()) { - sconn.Close() - return nil, fmt.Errorf("Ignoring connection with unmatching pubkey: %v vs %v", - peerNodeInfo.PubKey, sconn.RemotePubKey()) - } - // Avoid self - if peerNodeInfo.PubKey.Equals(sw.nodeInfo.PubKey) { - sconn.Close() - return nil, fmt.Errorf("Ignoring connection from self") - } - // Check version, chain id - if err := sw.nodeInfo.CompatibleWith(peerNodeInfo); err != nil { - sconn.Close() - return nil, err - } - - // The peerNodeInfo is not verified, so overwrite - // the IP, and the port too if we dialed out - // Everything else we just have to trust - ip, port, _ := net.SplitHostPort(sconn.RemoteAddr().String()) - peerNodeInfo.Host = ip - if outbound { - porti, _ := strconv.Atoi(port) - peerNodeInfo.P2PPort = uint16(porti) - } - peer := newPeer(sconn, peerNodeInfo, outbound, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError) - - // Add the peer to .peers - // ignore if duplicate or if we already have too many for that IP range - if err := sw.peers.Add(peer); err != nil { - log.Notice("Ignoring peer", "error", err, "peer", peer) - peer.Stop() - return nil, err - } - - // remove deadline and start peer - conn.SetDeadline(time.Time{}) - if sw.IsRunning() { - sw.startInitPeer(peer) - } - - log.Notice("Added peer", "peer", peer) - return peer, nil -} - -func (sw *Switch) startInitPeer(peer *Peer) { - peer.Start() // spawn send/recv routines - sw.addPeerToReactors(peer) // run AddPeer on each reactor -} - -func (sw *Switch) DialPeerWithAddress(addr *NetAddress) (*Peer, error) { - log.Info("Dialing address", "address", addr) - sw.dialing.Set(addr.IP.String(), addr) - conn, err := addr.DialTimeout(peerDialTimeoutSeconds * time.Second) - sw.dialing.Delete(addr.IP.String()) - if err != nil { - log.Info("Failed dialing address", "address", addr, "error", err) - return nil, err - } - peer, err := sw.AddPeerWithConnection(conn, true) - if err != nil { - log.Info("Failed adding peer", "address", addr, "conn", conn, "error", err) - return nil, err - } - log.Notice("Dialed and added peer", "address", addr, "peer", peer) - return peer, nil -} - -func (sw *Switch) IsDialing(addr *NetAddress) bool { - return sw.dialing.Has(addr.IP.String()) -} - -// Broadcast runs a go routine for each attempted send, which will block -// trying to send for defaultSendTimeoutSeconds. Returns a channel -// which receives success values for each attempted send (false if times out) -func (sw *Switch) Broadcast(chID byte, msg interface{}) chan bool { - successChan := make(chan bool, len(sw.peers.List())) - log.Info("Broadcast", "channel", chID, "msg", msg) - for _, peer := range sw.peers.List() { - go func(peer *Peer) { - success := peer.Send(chID, msg) - successChan <- success - }(peer) - } - return successChan -} - -// Returns the count of outbound/inbound and outbound-dialing peers. -func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { - peers := sw.peers.List() - for _, peer := range peers { - if peer.outbound { - outbound++ - } else { - inbound++ - } - } - dialing = sw.dialing.Size() - return -} - -func (sw *Switch) Peers() IPeerSet { - return sw.peers -} - -// Disconnect from a peer due to external error. -// TODO: make record depending on reason. -func (sw *Switch) StopPeerForError(peer *Peer, reason interface{}) { - log.Notice("Stopping peer for error", "peer", peer, "error", reason) - sw.peers.Remove(peer) - peer.Stop() - sw.removePeerFromReactors(peer, reason) -} - -// Disconnect from a peer gracefully. -// TODO: handle graceful disconnects. -func (sw *Switch) StopPeerGracefully(peer *Peer) { - log.Notice("Stopping peer gracefully") - sw.peers.Remove(peer) - peer.Stop() - sw.removePeerFromReactors(peer, nil) -} - -func (sw *Switch) addPeerToReactors(peer *Peer) { - for _, reactor := range sw.reactors { - reactor.AddPeer(peer) - } -} - -func (sw *Switch) removePeerFromReactors(peer *Peer, reason interface{}) { - for _, reactor := range sw.reactors { - reactor.RemovePeer(peer, reason) - } -} - -func (sw *Switch) listenerRoutine(l Listener) { - for { - inConn, ok := <-l.Connections() - if !ok { - break - } - - // ignore connection if we already have enough - if maxNumPeers <= sw.peers.Size() { - log.Info("Ignoring inbound connection: already have enough peers", "address", inConn.RemoteAddr().String(), "numPeers", sw.peers.Size(), "max", maxNumPeers) - continue - } - - // Ignore connections from IP ranges for which we have too many - if sw.peers.HasMaxForIPRange(inConn) { - log.Info("Ignoring inbound connection: already have enough peers for that IP range", "address", inConn.RemoteAddr().String()) - continue - } - - // New inbound connection! - _, err := sw.AddPeerWithConnection(inConn, false) - if err != nil { - log.Notice("Ignoring inbound connection: error on AddPeerWithConnection", "address", inConn.RemoteAddr().String(), "error", err) - continue - } - - // NOTE: We don't yet have the listening port of the - // remote (if they have a listener at all). - // The peerHandshake will handle that - } - - // cleanup -} - -//----------------------------------------------------------------------------- - -type SwitchEventNewPeer struct { - Peer *Peer -} - -type SwitchEventDonePeer struct { - Peer *Peer - Error interface{} -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/switch_test.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/switch_test.go deleted file mode 100644 index ee08b6f96..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/switch_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package p2p - -import ( - "bytes" - "sync" - "testing" - "time" - - "github.com/tendermint/go-crypto" - . "github.com/tendermint/go-common" - _ "github.com/tendermint/go-config/tendermint_test" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/go-wire" -) - -type PeerMessage struct { - PeerKey string - Bytes []byte - Counter int -} - -type TestReactor struct { - BaseReactor - - mtx sync.Mutex - channels []*ChannelDescriptor - peersAdded []*Peer - peersRemoved []*Peer - logMessages bool - msgsCounter int - msgsReceived map[byte][]PeerMessage -} - -func NewTestReactor(channels []*ChannelDescriptor, logMessages bool) *TestReactor { - tr := &TestReactor{ - channels: channels, - logMessages: logMessages, - msgsReceived: make(map[byte][]PeerMessage), - } - tr.BaseReactor = *NewBaseReactor(log, "TestReactor", tr) - return tr -} - -func (tr *TestReactor) GetChannels() []*ChannelDescriptor { - return tr.channels -} - -func (tr *TestReactor) AddPeer(peer *Peer) { - tr.mtx.Lock() - defer tr.mtx.Unlock() - tr.peersAdded = append(tr.peersAdded, peer) -} - -func (tr *TestReactor) RemovePeer(peer *Peer, reason interface{}) { - tr.mtx.Lock() - defer tr.mtx.Unlock() - tr.peersRemoved = append(tr.peersRemoved, peer) -} - -func (tr *TestReactor) Receive(chID byte, peer *Peer, msgBytes []byte) { - if tr.logMessages { - tr.mtx.Lock() - defer tr.mtx.Unlock() - //fmt.Printf("Received: %X, %X\n", chID, msgBytes) - tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.Key, msgBytes, tr.msgsCounter}) - tr.msgsCounter++ - } -} - -//----------------------------------------------------------------------------- - -// convenience method for creating two switches connected to each other. -func makeSwitchPair(t testing.TB, initSwitch func(*Switch) *Switch) (*Switch, *Switch) { - - s1PrivKey := crypto.GenPrivKeyEd25519() - s2PrivKey := crypto.GenPrivKeyEd25519() - - // Create two switches that will be interconnected. - s1 := initSwitch(NewSwitch()) - s1.SetNodeInfo(&types.NodeInfo{ - PubKey: s1PrivKey.PubKey().(crypto.PubKeyEd25519), - Moniker: "switch1", - ChainID: "testing", - Version: types.Versions{Tendermint: "123.123.123"}, - }) - s1.SetNodePrivKey(s1PrivKey) - s2 := initSwitch(NewSwitch()) - s2.SetNodeInfo(&types.NodeInfo{ - PubKey: s2PrivKey.PubKey().(crypto.PubKeyEd25519), - Moniker: "switch2", - ChainID: "testing", - Version: types.Versions{Tendermint: "123.123.123"}, - }) - s2.SetNodePrivKey(s2PrivKey) - - // Start switches and reactors - s1.Start() - s2.Start() - - // Create a listener for s1 - l := NewDefaultListener("tcp", ":8001") - - // Dial the listener & add the connection to s2. - lAddr := l.ExternalAddress() - connOut, err := lAddr.Dial() - if err != nil { - t.Fatalf("Could not connect to listener address %v", lAddr) - } else { - t.Logf("Created a connection to listener address %v", lAddr) - } - connIn, ok := <-l.Connections() - if !ok { - t.Fatalf("Could not get inbound connection from listener") - } - - go s1.AddPeerWithConnection(connIn, false) // AddPeer is blocking, requires handshake. - s2.AddPeerWithConnection(connOut, true) - - // Wait for things to happen, peers to get added... - time.Sleep(100 * time.Millisecond) - - // Close the server, no longer needed. - l.Stop() - - return s1, s2 -} - -func TestSwitches(t *testing.T) { - s1, s2 := makeSwitchPair(t, func(sw *Switch) *Switch { - // Make two reactors of two channels each - sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{ - &ChannelDescriptor{ID: byte(0x00), Priority: 10}, - &ChannelDescriptor{ID: byte(0x01), Priority: 10}, - }, true)) - sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{ - &ChannelDescriptor{ID: byte(0x02), Priority: 10}, - &ChannelDescriptor{ID: byte(0x03), Priority: 10}, - }, true)) - return sw - }) - defer s1.Stop() - defer s2.Stop() - - // Lets send a message from s1 to s2. - if s1.Peers().Size() != 1 { - t.Errorf("Expected exactly 1 peer in s1, got %v", s1.Peers().Size()) - } - if s2.Peers().Size() != 1 { - t.Errorf("Expected exactly 1 peer in s2, got %v", s2.Peers().Size()) - } - - ch0Msg := "channel zero" - ch1Msg := "channel foo" - ch2Msg := "channel bar" - - s1.Broadcast(byte(0x00), ch0Msg) - s1.Broadcast(byte(0x01), ch1Msg) - s1.Broadcast(byte(0x02), ch2Msg) - - // Wait for things to settle... - time.Sleep(5000 * time.Millisecond) - - // Check message on ch0 - ch0Msgs := s2.Reactor("foo").(*TestReactor).msgsReceived[byte(0x00)] - if len(ch0Msgs) != 1 { - t.Errorf("Expected to have received 1 message in ch0") - } - if !bytes.Equal(ch0Msgs[0].Bytes, wire.BinaryBytes(ch0Msg)) { - t.Errorf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(ch0Msg), ch0Msgs[0].Bytes) - } - - // Check message on ch1 - ch1Msgs := s2.Reactor("foo").(*TestReactor).msgsReceived[byte(0x01)] - if len(ch1Msgs) != 1 { - t.Errorf("Expected to have received 1 message in ch1") - } - if !bytes.Equal(ch1Msgs[0].Bytes, wire.BinaryBytes(ch1Msg)) { - t.Errorf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(ch1Msg), ch1Msgs[0].Bytes) - } - - // Check message on ch2 - ch2Msgs := s2.Reactor("bar").(*TestReactor).msgsReceived[byte(0x02)] - if len(ch2Msgs) != 1 { - t.Errorf("Expected to have received 1 message in ch2") - } - if !bytes.Equal(ch2Msgs[0].Bytes, wire.BinaryBytes(ch2Msg)) { - t.Errorf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(ch2Msg), ch2Msgs[0].Bytes) - } - -} - -func BenchmarkSwitches(b *testing.B) { - - b.StopTimer() - - s1, s2 := makeSwitchPair(b, func(sw *Switch) *Switch { - // Make bar reactors of bar channels each - sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{ - &ChannelDescriptor{ID: byte(0x00), Priority: 10}, - &ChannelDescriptor{ID: byte(0x01), Priority: 10}, - }, false)) - sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{ - &ChannelDescriptor{ID: byte(0x02), Priority: 10}, - &ChannelDescriptor{ID: byte(0x03), Priority: 10}, - }, false)) - return sw - }) - defer s1.Stop() - defer s2.Stop() - - // Allow time for goroutines to boot up - time.Sleep(1000 * time.Millisecond) - b.StartTimer() - - numSuccess, numFailure := 0, 0 - - // Send random message from foo channel to another - for i := 0; i < b.N; i++ { - chID := byte(i % 4) - successChan := s1.Broadcast(chID, "test data") - for s := range successChan { - if s { - numSuccess += 1 - } else { - numFailure += 1 - } - } - } - - log.Warn(Fmt("success: %v, failure: %v", numSuccess, numFailure)) - - // Allow everything to flush before stopping switches & closing connections. - b.StopTimer() - time.Sleep(1000 * time.Millisecond) - -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/upnp/README.md b/Godeps/_workspace/src/github.com/tendermint/go-p2p/upnp/README.md deleted file mode 100644 index 557d05bdc..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/upnp/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# `tendermint/p2p/upnp` - -## Resources - -* http://www.upnp-hacks.org/upnp.html diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/upnp/log.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/upnp/log.go deleted file mode 100644 index edc5b4980..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/upnp/log.go +++ /dev/null @@ -1,7 +0,0 @@ -package upnp - -import ( - "github.com/tendermint/go-logger" -) - -var log = logger.New("module", "upnp") diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/upnp/probe.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/upnp/probe.go deleted file mode 100644 index 5ba9b2370..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/upnp/probe.go +++ /dev/null @@ -1,111 +0,0 @@ -package upnp - -import ( - "errors" - "fmt" - "net" - "time" - - . "github.com/tendermint/go-common" -) - -type UPNPCapabilities struct { - PortMapping bool - Hairpin bool -} - -func makeUPNPListener(intPort int, extPort int) (NAT, net.Listener, net.IP, error) { - nat, err := Discover() - if err != nil { - return nil, nil, nil, errors.New(fmt.Sprintf("NAT upnp could not be discovered: %v", err)) - } - log.Info(Fmt("ourIP: %v", nat.(*upnpNAT).ourIP)) - - ext, err := nat.GetExternalAddress() - if err != nil { - return nat, nil, nil, errors.New(fmt.Sprintf("External address error: %v", err)) - } - log.Info(Fmt("External address: %v", ext)) - - port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0) - if err != nil { - return nat, nil, ext, errors.New(fmt.Sprintf("Port mapping error: %v", err)) - } - log.Info(Fmt("Port mapping mapped: %v", port)) - - // also run the listener, open for all remote addresses. - listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort)) - if err != nil { - return nat, nil, ext, errors.New(fmt.Sprintf("Error establishing listener: %v", err)) - } - return nat, listener, ext, nil -} - -func testHairpin(listener net.Listener, extAddr string) (supportsHairpin bool) { - // Listener - go func() { - inConn, err := listener.Accept() - if err != nil { - log.Notice(Fmt("Listener.Accept() error: %v", err)) - return - } - log.Info(Fmt("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr())) - buf := make([]byte, 1024) - n, err := inConn.Read(buf) - if err != nil { - log.Notice(Fmt("Incoming connection read error: %v", err)) - return - } - log.Info(Fmt("Incoming connection read %v bytes: %X", n, buf)) - if string(buf) == "test data" { - supportsHairpin = true - return - } - }() - - // Establish outgoing - outConn, err := net.Dial("tcp", extAddr) - if err != nil { - log.Notice(Fmt("Outgoing connection dial error: %v", err)) - return - } - - n, err := outConn.Write([]byte("test data")) - if err != nil { - log.Notice(Fmt("Outgoing connection write error: %v", err)) - return - } - log.Info(Fmt("Outgoing connection wrote %v bytes", n)) - - // Wait for data receipt - time.Sleep(1 * time.Second) - return -} - -func Probe() (caps UPNPCapabilities, err error) { - log.Info("Probing for UPnP!") - - intPort, extPort := 8001, 8001 - - nat, listener, ext, err := makeUPNPListener(intPort, extPort) - if err != nil { - return - } - caps.PortMapping = true - - // Deferred cleanup - defer func() { - err = nat.DeletePortMapping("tcp", intPort, extPort) - if err != nil { - log.Warn(Fmt("Port mapping delete error: %v", err)) - } - listener.Close() - }() - - supportsHairpin := testHairpin(listener, fmt.Sprintf("%v:%v", ext, extPort)) - if supportsHairpin { - caps.Hairpin = true - } - - return -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/upnp/upnp.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/upnp/upnp.go deleted file mode 100644 index 3d6c55035..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/upnp/upnp.go +++ /dev/null @@ -1,380 +0,0 @@ -/* -Taken from taipei-torrent - -Just enough UPnP to be able to forward ports -*/ -package upnp - -// BUG(jae): TODO: use syscalls to get actual ourIP. http://pastebin.com/9exZG4rh - -import ( - "bytes" - "encoding/xml" - "errors" - "io/ioutil" - "net" - "net/http" - "strconv" - "strings" - "time" -) - -type upnpNAT struct { - serviceURL string - ourIP string - urnDomain string -} - -// protocol is either "udp" or "tcp" -type NAT interface { - GetExternalAddress() (addr net.IP, err error) - AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) - DeletePortMapping(protocol string, externalPort, internalPort int) (err error) -} - -func Discover() (nat NAT, err error) { - ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900") - if err != nil { - return - } - conn, err := net.ListenPacket("udp4", ":0") - if err != nil { - return - } - socket := conn.(*net.UDPConn) - defer socket.Close() - - err = socket.SetDeadline(time.Now().Add(3 * time.Second)) - if err != nil { - return - } - - st := "InternetGatewayDevice:1" - - buf := bytes.NewBufferString( - "M-SEARCH * HTTP/1.1\r\n" + - "HOST: 239.255.255.250:1900\r\n" + - "ST: ssdp:all\r\n" + - "MAN: \"ssdp:discover\"\r\n" + - "MX: 2\r\n\r\n") - message := buf.Bytes() - answerBytes := make([]byte, 1024) - for i := 0; i < 3; i++ { - _, err = socket.WriteToUDP(message, ssdp) - if err != nil { - return - } - var n int - n, _, err = socket.ReadFromUDP(answerBytes) - for { - n, _, err = socket.ReadFromUDP(answerBytes) - if err != nil { - break - } - answer := string(answerBytes[0:n]) - if strings.Index(answer, st) < 0 { - continue - } - // HTTP header field names are case-insensitive. - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 - locString := "\r\nlocation:" - answer = strings.ToLower(answer) - locIndex := strings.Index(answer, locString) - if locIndex < 0 { - continue - } - loc := answer[locIndex+len(locString):] - endIndex := strings.Index(loc, "\r\n") - if endIndex < 0 { - continue - } - locURL := strings.TrimSpace(loc[0:endIndex]) - var serviceURL, urnDomain string - serviceURL, urnDomain, err = getServiceURL(locURL) - if err != nil { - return - } - var ourIP net.IP - ourIP, err = localIPv4() - if err != nil { - return - } - nat = &upnpNAT{serviceURL: serviceURL, ourIP: ourIP.String(), urnDomain: urnDomain} - return - } - } - err = errors.New("UPnP port discovery failed.") - return -} - -type Envelope struct { - XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"` - Soap *SoapBody -} -type SoapBody struct { - XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"` - ExternalIP *ExternalIPAddressResponse -} - -type ExternalIPAddressResponse struct { - XMLName xml.Name `xml:"GetExternalIPAddressResponse"` - IPAddress string `xml:"NewExternalIPAddress"` -} - -type ExternalIPAddress struct { - XMLName xml.Name `xml:"NewExternalIPAddress"` - IP string -} - -type UPNPService struct { - ServiceType string `xml:"serviceType"` - ControlURL string `xml:"controlURL"` -} - -type DeviceList struct { - Device []Device `xml:"device"` -} - -type ServiceList struct { - Service []UPNPService `xml:"service"` -} - -type Device struct { - XMLName xml.Name `xml:"device"` - DeviceType string `xml:"deviceType"` - DeviceList DeviceList `xml:"deviceList"` - ServiceList ServiceList `xml:"serviceList"` -} - -type Root struct { - Device Device -} - -func getChildDevice(d *Device, deviceType string) *Device { - dl := d.DeviceList.Device - for i := 0; i < len(dl); i++ { - if strings.Index(dl[i].DeviceType, deviceType) >= 0 { - return &dl[i] - } - } - return nil -} - -func getChildService(d *Device, serviceType string) *UPNPService { - sl := d.ServiceList.Service - for i := 0; i < len(sl); i++ { - if strings.Index(sl[i].ServiceType, serviceType) >= 0 { - return &sl[i] - } - } - return nil -} - -func localIPv4() (net.IP, error) { - tt, err := net.Interfaces() - if err != nil { - return nil, err - } - for _, t := range tt { - aa, err := t.Addrs() - if err != nil { - return nil, err - } - for _, a := range aa { - ipnet, ok := a.(*net.IPNet) - if !ok { - continue - } - v4 := ipnet.IP.To4() - if v4 == nil || v4[0] == 127 { // loopback address - continue - } - return v4, nil - } - } - return nil, errors.New("cannot find local IP address") -} - -func getServiceURL(rootURL string) (url, urnDomain string, err error) { - r, err := http.Get(rootURL) - if err != nil { - return - } - defer r.Body.Close() - if r.StatusCode >= 400 { - err = errors.New(string(r.StatusCode)) - return - } - var root Root - err = xml.NewDecoder(r.Body).Decode(&root) - if err != nil { - return - } - a := &root.Device - if strings.Index(a.DeviceType, "InternetGatewayDevice:1") < 0 { - err = errors.New("No InternetGatewayDevice") - return - } - b := getChildDevice(a, "WANDevice:1") - if b == nil { - err = errors.New("No WANDevice") - return - } - c := getChildDevice(b, "WANConnectionDevice:1") - if c == nil { - err = errors.New("No WANConnectionDevice") - return - } - d := getChildService(c, "WANIPConnection:1") - if d == nil { - // Some routers don't follow the UPnP spec, and put WanIPConnection under WanDevice, - // instead of under WanConnectionDevice - d = getChildService(b, "WANIPConnection:1") - - if d == nil { - err = errors.New("No WANIPConnection") - return - } - } - // Extract the domain name, which isn't always 'schemas-upnp-org' - urnDomain = strings.Split(d.ServiceType, ":")[1] - url = combineURL(rootURL, d.ControlURL) - return -} - -func combineURL(rootURL, subURL string) string { - protocolEnd := "://" - protoEndIndex := strings.Index(rootURL, protocolEnd) - a := rootURL[protoEndIndex+len(protocolEnd):] - rootIndex := strings.Index(a, "/") - return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL -} - -func soapRequest(url, function, message, domain string) (r *http.Response, err error) { - fullMessage := "" + - "\r\n" + - "" + message + "" - - req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"") - req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3") - //req.Header.Set("Transfer-Encoding", "chunked") - req.Header.Set("SOAPAction", "\"urn:"+domain+":service:WANIPConnection:1#"+function+"\"") - req.Header.Set("Connection", "Close") - req.Header.Set("Cache-Control", "no-cache") - req.Header.Set("Pragma", "no-cache") - - // log.Stderr("soapRequest ", req) - - r, err = http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - /*if r.Body != nil { - defer r.Body.Close() - }*/ - - if r.StatusCode >= 400 { - // log.Stderr(function, r.StatusCode) - err = errors.New("Error " + strconv.Itoa(r.StatusCode) + " for " + function) - r = nil - return - } - return -} - -type statusInfo struct { - externalIpAddress string -} - -func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { - - message := "\r\n" + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - var envelope Envelope - data, err := ioutil.ReadAll(response.Body) - reader := bytes.NewReader(data) - xml.NewDecoder(reader).Decode(&envelope) - - info = statusInfo{envelope.Soap.ExternalIP.IPAddress} - - if err != nil { - return - } - - return -} - -func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) { - info, err := n.getExternalIPAddress() - if err != nil { - return - } - addr = net.ParseIP(info.externalIpAddress) - return -} - -func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) { - // A single concatenation would break ARM compilation. - message := "\r\n" + - "" + strconv.Itoa(externalPort) - message += "" + protocol + "" - message += "" + strconv.Itoa(internalPort) + "" + - "" + n.ourIP + "" + - "1" - message += description + - "" + strconv.Itoa(timeout) + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - - // TODO: check response to see if the port was forwarded - // log.Println(message, response) - // JAE: - // body, err := ioutil.ReadAll(response.Body) - // fmt.Println(string(body), err) - mappedExternalPort = externalPort - _ = response - return -} - -func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { - - message := "\r\n" + - "" + strconv.Itoa(externalPort) + - "" + protocol + "" + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - - // TODO: check response to see if the port was deleted - // log.Println(message, response) - _ = response - return -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/util.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/util.go deleted file mode 100644 index 2be320263..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/util.go +++ /dev/null @@ -1,15 +0,0 @@ -package p2p - -import ( - "crypto/sha256" -) - -// doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes. -func doubleSha256(b []byte) []byte { - hasher := sha256.New() - hasher.Write(b) - sum := hasher.Sum(nil) - hasher.Reset() - hasher.Write(sum) - return hasher.Sum(nil) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-p2p/version.go b/Godeps/_workspace/src/github.com/tendermint/go-p2p/version.go deleted file mode 100644 index 7e51463dc..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-p2p/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package p2p - -const Version = "0.3.0" diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/LICENSE.md b/Godeps/_workspace/src/github.com/tendermint/go-wire/LICENSE.md deleted file mode 100644 index a2c8ecdfb..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/LICENSE.md +++ /dev/null @@ -1,206 +0,0 @@ -Tendermint Go-Wire -Copyright (C) 2015 Tendermint - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/README.md b/Godeps/_workspace/src/github.com/tendermint/go-wire/README.md deleted file mode 100644 index b9c2c319a..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/README.md +++ /dev/null @@ -1,140 +0,0 @@ -# `tendermint/go-wire` - -The `binary` submodule encodes primary types and structs into bytes. - -## Primary types - -uint\*, int\*, string, time, byteslice and byteslice-slice types can be -encoded and decoded with the following methods: - -The following writes `o uint64` to `w io.Writer`, and increments `n` and/or sets `err` -```go -WriteUint64(o uint64, w io.Writer, n *int64, err *error) - -// Typical usage: -buf, n, err := new(bytes.Buffer), new(int64), new(error) -WriteUint64(uint64(x), buf, n, err) -if *err != nil { - panic(err) -} - -``` - -The following reads a `uint64` from `r io.Reader`, and increments `n` and/or sets `err` -```go -var o = ReadUint64(r io.Reader, n *int64, err *error) -``` - -Similar methods for `uint32`, `uint16`, `uint8`, `int64`, `int32`, `int16`, `int8` exist. -Protobuf variable length encoding is done with `uint` and `int` types: -```go -WriteUvarint(o uint, w io.Writer, n *int64, err *error) -var o = ReadUvarint(r io.Reader, n *int64, err *error) -``` - -Byteslices can be written with: -```go -WriteByteSlice(bz []byte, w io.Writer, n *int64, err *error) -``` - -Byteslices (and all slices such as byteslice-slices) are prepended with -`uvarint` encoded length, so `ReadByteSlice()` knows how many bytes to read. - -Note that there is no type information encoded -- the caller is assumed to know what types -to decode. - -## Struct Types - -Struct types can be automatically encoded with reflection. Unlike json-encoding, no field -name or type information is encoded. Field values are simply encoded in order. - -```go -type Foo struct { - MyString string - MyUint32 uint32 - myPrivateBytes []byte -} - -foo := Foo{"my string", math.MaxUint32, []byte("my private bytes")} - -buf, n, err := new(bytes.Buffer), new(int64), new(error) -WriteBinary(foo, buf, n, err) - -// fmt.Printf("%X", buf.Bytes()) gives: -// 096D7920737472696E67FFFFFFFF -// 09: uvarint encoded length of string "my string" -// 6D7920737472696E67: bytes of string "my string" -// FFFFFFFF: bytes for MaxUint32 -// Note that the unexported "myPrivateBytes" isn't encoded. - -foo2 := ReadBinary(Foo{}, buf, n, err).(Foo) - -// Or, to decode onto a pointer: -foo2 := ReadBinaryPtr(&Foo{}, buf, n, err).(*Foo) -``` - -WriteBinary and ReadBinary can encode/decode structs recursively. However, interface field -values are a bit more complicated. - -```go -type Greeter interface { - Greet() string -} - -type Dog struct{} -func (d Dog) Greet() string { return "Woof!" } - -type Cat struct{} -func (c Cat) Greet() string { return "Meow!" } - -type Foo struct { - Greeter -} - -foo := Foo{Dog{}} - -buf, n, err := new(bytes.Buffer), new(int64), new(error) -WriteBinary(foo, buf, n, err) - -// This errors because we don't know whether to read a Dog or Cat. -foo2 := ReadBinary(Foo{}, buf, n, err) -``` - -In the above example, `ReadBinary()` fails because the `Greeter` field for `Foo{}` -is ambiguous -- it could be either a `Dog{}` or a `Cat{}`, like a union structure. -The solution is to declare the concrete implementation types for interfaces: - -```go -type Dog struct{} -func (d Dog) TypeByte() byte { return GreeterTypeDog } -func (d Dog) Greet() string { return "Woof!" } - -type Cat struct{} -func (c Cat) TypeByte() byte { return GreeterTypeCat } -func (c Cat) Greet() string { return "Meow!" } - -var _ = RegisterInterface( - struct{Greeter}{}, - ConcreteType{Dog{}}, - ConcreteType{Cat{}}, -}) -``` - -NOTE: The TypeByte() is written and expected to be read even when the struct -is encoded or decoded directly: - -```go -WriteBinary(Dog{}, buf, n, err) // Writes GreeterTypeDog byte -dog_ := ReadBinary(Dog{}, buf, n, err) // Expects to read GreeterTypeDog byte -dog := dog_.(Dog) // ok if *err != nil, otherwise dog_ == nil. -``` - -### Revisions - -This documentation is out of date. Here are some changes that still need documentation: - -* 0x00 is reserved as a nil byte for RegisterInterface -* moved TypeByte() into RegisterInterface/ConcreteType -* Pointers that don't have a declared TypeByte() are - encoded with a leading 0x00 (nil) or 0x01. - diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/byteslice.go b/Godeps/_workspace/src/github.com/tendermint/go-wire/byteslice.go deleted file mode 100644 index c9ed62613..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/byteslice.go +++ /dev/null @@ -1,68 +0,0 @@ -package wire - -import ( - "io" - - . "github.com/tendermint/go-common" -) - -func WriteByteSlice(bz []byte, w io.Writer, n *int64, err *error) { - WriteVarint(len(bz), w, n, err) - WriteTo(bz, w, n, err) -} - -func ReadByteSlice(r io.Reader, n *int64, err *error) []byte { - length := ReadVarint(r, n, err) - if *err != nil { - return nil - } - if length < 0 { - *err = ErrBinaryReadSizeUnderflow - return nil - } - if MaxBinaryReadSize < MaxInt64(int64(length), *n+int64(length)) { - *err = ErrBinaryReadSizeOverflow - return nil - } - - buf := make([]byte, length) - ReadFull(buf, r, n, err) - return buf -} - -//----------------------------------------------------------------------------- - -func WriteByteSlices(bzz [][]byte, w io.Writer, n *int64, err *error) { - WriteVarint(len(bzz), w, n, err) - for _, bz := range bzz { - WriteByteSlice(bz, w, n, err) - if *err != nil { - return - } - } -} - -func ReadByteSlices(r io.Reader, n *int64, err *error) [][]byte { - length := ReadVarint(r, n, err) - if *err != nil { - return nil - } - if length < 0 { - *err = ErrBinaryReadSizeUnderflow - return nil - } - if MaxBinaryReadSize < MaxInt64(int64(length), *n+int64(length)) { - *err = ErrBinaryReadSizeOverflow - return nil - } - - bzz := make([][]byte, length) - for i := 0; i < length; i++ { - bz := ReadByteSlice(r, n, err) - if *err != nil { - return nil - } - bzz[i] = bz - } - return bzz -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/codec.go b/Godeps/_workspace/src/github.com/tendermint/go-wire/codec.go deleted file mode 100644 index 61a3f4d9d..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/codec.go +++ /dev/null @@ -1,171 +0,0 @@ -package wire - -import ( - "bytes" - "errors" - "fmt" - . "github.com/tendermint/go-common" - "io" - "reflect" - "time" -) - -type Encoder func(o interface{}, w io.Writer, n *int64, err *error) -type Decoder func(r io.Reader, n *int64, err *error) interface{} -type Comparator func(o1 interface{}, o2 interface{}) int - -type Codec struct { - Encode Encoder - Decode Decoder - Compare Comparator -} - -const ( - typeByte = byte(0x01) - typeInt8 = byte(0x02) - // typeUint8 = byte(0x03) - typeInt16 = byte(0x04) - typeUint16 = byte(0x05) - typeInt32 = byte(0x06) - typeUint32 = byte(0x07) - typeInt64 = byte(0x08) - typeUint64 = byte(0x09) - typeVarint = byte(0x0A) - typeUvarint = byte(0x0B) - typeString = byte(0x10) - typeByteSlice = byte(0x11) - typeTime = byte(0x20) -) - -func BasicCodecEncoder(o interface{}, w io.Writer, n *int64, err *error) { - switch o := o.(type) { - case nil: - PanicSanity("nil type unsupported") - case byte: - WriteByte(typeByte, w, n, err) - WriteByte(o, w, n, err) - case int8: - WriteByte(typeInt8, w, n, err) - WriteInt8(o, w, n, err) - //case uint8: - // WriteByte( typeUint8, w, n, err) - // WriteUint8( o, w, n, err) - case int16: - WriteByte(typeInt16, w, n, err) - WriteInt16(o, w, n, err) - case uint16: - WriteByte(typeUint16, w, n, err) - WriteUint16(o, w, n, err) - case int32: - WriteByte(typeInt32, w, n, err) - WriteInt32(o, w, n, err) - case uint32: - WriteByte(typeUint32, w, n, err) - WriteUint32(o, w, n, err) - case int64: - WriteByte(typeInt64, w, n, err) - WriteInt64(o, w, n, err) - case uint64: - WriteByte(typeUint64, w, n, err) - WriteUint64(o, w, n, err) - case int: - WriteByte(typeVarint, w, n, err) - WriteVarint(o, w, n, err) - case uint: - WriteByte(typeUvarint, w, n, err) - WriteUvarint(o, w, n, err) - case string: - WriteByte(typeString, w, n, err) - WriteString(o, w, n, err) - case []byte: - WriteByte(typeByteSlice, w, n, err) - WriteByteSlice(o, w, n, err) - case time.Time: - WriteByte(typeTime, w, n, err) - WriteTime(o, w, n, err) - default: - PanicSanity(fmt.Sprintf("Unsupported type: %v", reflect.TypeOf(o))) - } -} - -func BasicCodecDecoder(r io.Reader, n *int64, err *error) (o interface{}) { - type_ := ReadByte(r, n, err) - if *err != nil { - return - } - switch type_ { - case typeByte: - o = ReadByte(r, n, err) - case typeInt8: - o = ReadInt8(r, n, err) - //case typeUint8: - // o = ReadUint8(r, n, err) - case typeInt16: - o = ReadInt16(r, n, err) - case typeUint16: - o = ReadUint16(r, n, err) - case typeInt32: - o = ReadInt32(r, n, err) - case typeUint32: - o = ReadUint32(r, n, err) - case typeInt64: - o = ReadInt64(r, n, err) - case typeUint64: - o = ReadUint64(r, n, err) - case typeVarint: - o = ReadVarint(r, n, err) - case typeUvarint: - o = ReadUvarint(r, n, err) - case typeString: - o = ReadString(r, n, err) - case typeByteSlice: - o = ReadByteSlice(r, n, err) - case typeTime: - o = ReadTime(r, n, err) - default: - *err = errors.New(Fmt("Unsupported type byte: %X", type_)) - } - return -} - -// Contract: Caller must ensure that types match. -func BasicCodecComparator(o1 interface{}, o2 interface{}) int { - switch o1.(type) { - case byte: - return int(o1.(byte) - o2.(byte)) - case int8: - return int(o1.(int8) - o2.(int8)) - //case uint8: - case int16: - return int(o1.(int16) - o2.(int16)) - case uint16: - return int(o1.(uint16) - o2.(uint16)) - case int32: - return int(o1.(int32) - o2.(int32)) - case uint32: - return int(o1.(uint32) - o2.(uint32)) - case int64: - return int(o1.(int64) - o2.(int64)) - case uint64: - return int(o1.(uint64) - o2.(uint64)) - case int: - return o1.(int) - o2.(int) - case uint: - return int(o1.(uint)) - int(o2.(uint)) - case string: - return bytes.Compare([]byte(o1.(string)), []byte(o2.(string))) - case []byte: - return bytes.Compare(o1.([]byte), o2.([]byte)) - case time.Time: - return int(o1.(time.Time).UnixNano() - o2.(time.Time).UnixNano()) - default: - PanicSanity(Fmt("Unsupported type: %v", reflect.TypeOf(o1))) - } - return 0 -} - -var BasicCodec = Codec{ - Encode: BasicCodecEncoder, - Decode: BasicCodecDecoder, - Compare: BasicCodecComparator, -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/int.go b/Godeps/_workspace/src/github.com/tendermint/go-wire/int.go deleted file mode 100644 index c63666faf..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/int.go +++ /dev/null @@ -1,270 +0,0 @@ -package wire - -import ( - "encoding/binary" - "errors" - "io" -) - -// Byte - -func WriteByte(b byte, w io.Writer, n *int64, err *error) { - WriteTo([]byte{b}, w, n, err) -} - -func ReadByte(r io.Reader, n *int64, err *error) byte { - buf := make([]byte, 1) - ReadFull(buf, r, n, err) - return buf[0] -} - -// Int8 - -func WriteInt8(i int8, w io.Writer, n *int64, err *error) { - WriteByte(byte(i), w, n, err) -} - -func ReadInt8(r io.Reader, n *int64, err *error) int8 { - return int8(ReadByte(r, n, err)) -} - -// Uint8 - -func WriteUint8(i uint8, w io.Writer, n *int64, err *error) { - WriteByte(byte(i), w, n, err) -} - -func ReadUint8(r io.Reader, n *int64, err *error) uint8 { - return uint8(ReadByte(r, n, err)) -} - -// Int16 - -func WriteInt16(i int16, w io.Writer, n *int64, err *error) { - buf := make([]byte, 2) - binary.BigEndian.PutUint16(buf, uint16(i)) - *n += 2 - WriteTo(buf, w, n, err) -} - -func ReadInt16(r io.Reader, n *int64, err *error) int16 { - buf := make([]byte, 2) - ReadFull(buf, r, n, err) - return int16(binary.BigEndian.Uint16(buf)) -} - -// Uint16 - -func WriteUint16(i uint16, w io.Writer, n *int64, err *error) { - buf := make([]byte, 2) - binary.BigEndian.PutUint16(buf, uint16(i)) - *n += 2 - WriteTo(buf, w, n, err) -} - -func ReadUint16(r io.Reader, n *int64, err *error) uint16 { - buf := make([]byte, 2) - ReadFull(buf, r, n, err) - return uint16(binary.BigEndian.Uint16(buf)) -} - -// []Uint16 - -func WriteUint16s(iz []uint16, w io.Writer, n *int64, err *error) { - WriteUint32(uint32(len(iz)), w, n, err) - for _, i := range iz { - WriteUint16(i, w, n, err) - if *err != nil { - return - } - } -} - -func ReadUint16s(r io.Reader, n *int64, err *error) []uint16 { - length := ReadUint32(r, n, err) - if *err != nil { - return nil - } - iz := make([]uint16, length) - for j := uint32(0); j < length; j++ { - ii := ReadUint16(r, n, err) - if *err != nil { - return nil - } - iz[j] = ii - } - return iz -} - -// Int32 - -func WriteInt32(i int32, w io.Writer, n *int64, err *error) { - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, uint32(i)) - *n += 4 - WriteTo(buf, w, n, err) -} - -func ReadInt32(r io.Reader, n *int64, err *error) int32 { - buf := make([]byte, 4) - ReadFull(buf, r, n, err) - return int32(binary.BigEndian.Uint32(buf)) -} - -// Uint32 - -func WriteUint32(i uint32, w io.Writer, n *int64, err *error) { - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, uint32(i)) - *n += 4 - WriteTo(buf, w, n, err) -} - -func ReadUint32(r io.Reader, n *int64, err *error) uint32 { - buf := make([]byte, 4) - ReadFull(buf, r, n, err) - return uint32(binary.BigEndian.Uint32(buf)) -} - -// Int64 - -func WriteInt64(i int64, w io.Writer, n *int64, err *error) { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(i)) - *n += 8 - WriteTo(buf, w, n, err) -} - -func ReadInt64(r io.Reader, n *int64, err *error) int64 { - buf := make([]byte, 8) - ReadFull(buf, r, n, err) - return int64(binary.BigEndian.Uint64(buf)) -} - -// Uint64 - -func WriteUint64(i uint64, w io.Writer, n *int64, err *error) { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(i)) - *n += 8 - WriteTo(buf, w, n, err) -} - -func ReadUint64(r io.Reader, n *int64, err *error) uint64 { - buf := make([]byte, 8) - ReadFull(buf, r, n, err) - return uint64(binary.BigEndian.Uint64(buf)) -} - -// Varint - -func uvarintSize(i uint64) int { - if i == 0 { - return 0 - } - if i < 1<<8 { - return 1 - } - if i < 1<<16 { - return 2 - } - if i < 1<<24 { - return 3 - } - if i < 1<<32 { - return 4 - } - if i < 1<<40 { - return 5 - } - if i < 1<<48 { - return 6 - } - if i < 1<<56 { - return 7 - } - return 8 -} - -func WriteVarint(i int, w io.Writer, n *int64, err *error) { - var negate = false - if i < 0 { - negate = true - i = -i - } - var size = uvarintSize(uint64(i)) - if negate { - // e.g. 0xF1 for a single negative byte - WriteUint8(uint8(size+0xF0), w, n, err) - } else { - WriteUint8(uint8(size), w, n, err) - } - if size > 0 { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(i)) - WriteTo(buf[(8-size):], w, n, err) - } - *n += int64(1 + size) -} - -func ReadVarint(r io.Reader, n *int64, err *error) int { - var size = ReadUint8(r, n, err) - var negate = false - if (size >> 4) == 0xF { - negate = true - size = size & 0x0F - } - if size > 8 { - setFirstErr(err, errors.New("Varint overflow")) - return 0 - } - if size == 0 { - if negate { - setFirstErr(err, errors.New("Varint does not allow negative zero")) - } - return 0 - } - buf := make([]byte, 8) - ReadFull(buf[(8-size):], r, n, err) - *n += int64(1 + size) - var i = int(binary.BigEndian.Uint64(buf)) - if negate { - return -i - } else { - return i - } -} - -// Uvarint - -func WriteUvarint(i uint, w io.Writer, n *int64, err *error) { - var size = uvarintSize(uint64(i)) - WriteUint8(uint8(size), w, n, err) - if size > 0 { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(i)) - WriteTo(buf[(8-size):], w, n, err) - } - *n += int64(1 + size) -} - -func ReadUvarint(r io.Reader, n *int64, err *error) uint { - var size = ReadUint8(r, n, err) - if size > 8 { - setFirstErr(err, errors.New("Uvarint overflow")) - return 0 - } - if size == 0 { - return 0 - } - buf := make([]byte, 8) - ReadFull(buf[(8-size):], r, n, err) - *n += int64(1 + size) - return uint(binary.BigEndian.Uint64(buf)) -} - -func setFirstErr(err *error, newErr error) { - if *err == nil && newErr != nil { - *err = newErr - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/int_test.go b/Godeps/_workspace/src/github.com/tendermint/go-wire/int_test.go deleted file mode 100644 index 3baceaa4d..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/int_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package wire - -import ( - "bytes" - "fmt" - "testing" -) - -func TestVarint(t *testing.T) { - - check := func(i int, s string) { - buf := new(bytes.Buffer) - n, err := new(int64), new(error) - WriteVarint(i, buf, n, err) - bufBytes := buf.Bytes() // Read before consuming below. - i_ := ReadVarint(buf, n, err) - if i != i_ { - fmt.Println(bufBytes) - t.Fatalf("Encoded %v and got %v", i, i_) - } - if s != "" { - if bufHex := fmt.Sprintf("%X", bufBytes); bufHex != s { - t.Fatalf("Encoded %v, expected %v", bufHex, s) - } - } - } - - // 123457 is some prime. - for i := -(2 << 33); i < (2 << 33); i += 123457 { - check(i, "") - } - - // Near zero - check(-1, "F101") - check(0, "00") - check(1, "0101") - // Positives - check(1<<32-1, "04FFFFFFFF") - check(1<<32+0, "050100000000") - check(1<<32+1, "050100000001") - check(1<<53-1, "071FFFFFFFFFFFFF") - // Negatives - check(-1<<32+1, "F4FFFFFFFF") - check(-1<<32-0, "F50100000000") - check(-1<<32-1, "F50100000001") - check(-1<<53+1, "F71FFFFFFFFFFFFF") -} - -func TestUvarint(t *testing.T) { - - check := func(i uint, s string) { - buf := new(bytes.Buffer) - n, err := new(int64), new(error) - WriteUvarint(i, buf, n, err) - bufBytes := buf.Bytes() - i_ := ReadUvarint(buf, n, err) - if i != i_ { - fmt.Println(buf.Bytes()) - t.Fatalf("Encoded %v and got %v", i, i_) - } - if s != "" { - if bufHex := fmt.Sprintf("%X", bufBytes); bufHex != s { - t.Fatalf("Encoded %v, expected %v", bufHex, s) - } - } - } - - // 123457 is some prime. - for i := 0; i < (2 << 33); i += 123457 { - check(uint(i), "") - } - - check(1, "0101") - check(1<<32-1, "04FFFFFFFF") - check(1<<32+0, "050100000000") - check(1<<32+1, "050100000001") - check(1<<53-1, "071FFFFFFFFFFFFF") - -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/log.go b/Godeps/_workspace/src/github.com/tendermint/go-wire/log.go deleted file mode 100644 index 5aeca07a7..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/log.go +++ /dev/null @@ -1,17 +0,0 @@ -package wire - -import ( - "github.com/tendermint/go-logger" -) - -var log = logger.New("module", "binary") - -func init() { - log.SetHandler( - logger.LvlFilterHandler( - logger.LvlWarn, - //logger.LvlDebug, - logger.RootHandler(), - ), - ) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/reflect.go b/Godeps/_workspace/src/github.com/tendermint/go-wire/reflect.go deleted file mode 100644 index d08c23d68..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/reflect.go +++ /dev/null @@ -1,954 +0,0 @@ -package wire - -import ( - "encoding/hex" - "encoding/json" - "errors" - "io" - "reflect" - "sync" - "time" - - . "github.com/tendermint/go-common" -) - -const ( - ReflectSliceChunk = 1024 -) - -type TypeInfo struct { - Type reflect.Type // The type - - // If Type is kind reflect.Interface, is registered - IsRegisteredInterface bool - ByteToType map[byte]reflect.Type - TypeToByte map[reflect.Type]byte - - // If Type is concrete - Byte byte - - // If Type is kind reflect.Struct - Fields []StructFieldInfo -} - -type Options struct { - JSONName string // (JSON) Corresponding JSON field name. (override with `json=""`) - Varint bool // (Binary) Use length-prefixed encoding for (u)int* -} - -func getOptionsFromField(field reflect.StructField) (skip bool, opts Options) { - jsonName := field.Tag.Get("json") - if jsonName == "-" { - skip = true - return - } else if jsonName == "" { - jsonName = field.Name - } - varint := false - binTag := field.Tag.Get("binary") - if binTag == "varint" { // TODO: extend - varint = true - } - opts = Options{ - JSONName: jsonName, - Varint: varint, - } - return -} - -type StructFieldInfo struct { - Index int // Struct field index - Type reflect.Type // Struct field type - Options // Encoding options -} - -func (info StructFieldInfo) unpack() (int, reflect.Type, Options) { - return info.Index, info.Type, info.Options -} - -// e.g. If o is struct{Foo}{}, return is the Foo reflection type. -func GetTypeFromStructDeclaration(o interface{}) reflect.Type { - rt := reflect.TypeOf(o) - if rt.NumField() != 1 { - PanicSanity("Unexpected number of fields in struct-wrapped declaration of type") - } - return rt.Field(0).Type -} - -func SetByteForType(typeByte byte, rt reflect.Type) { - typeInfo := GetTypeInfo(rt) - if typeInfo.Byte != 0x00 && typeInfo.Byte != typeByte { - PanicSanity(Fmt("Type %v already registered with type byte %X", rt, typeByte)) - } - typeInfo.Byte = typeByte - // If pointer, we need to set it for the concrete type as well. - if rt.Kind() == reflect.Ptr { - SetByteForType(typeByte, rt.Elem()) - } -} - -// Predeclaration of common types -var ( - timeType = GetTypeFromStructDeclaration(struct{ time.Time }{}) -) - -const ( - iso8601 = "2006-01-02T15:04:05.000Z" // forced microseconds -) - -// NOTE: do not access typeInfos directly, but call GetTypeInfo() -var typeInfosMtx sync.Mutex -var typeInfos = map[reflect.Type]*TypeInfo{} - -func GetTypeInfo(rt reflect.Type) *TypeInfo { - typeInfosMtx.Lock() - defer typeInfosMtx.Unlock() - info := typeInfos[rt] - if info == nil { - info = MakeTypeInfo(rt) - typeInfos[rt] = info - } - return info -} - -// For use with the RegisterInterface declaration -type ConcreteType struct { - O interface{} - Byte byte -} - -// Must use this to register an interface to properly decode the -// underlying concrete type. -func RegisterInterface(o interface{}, ctypes ...ConcreteType) *TypeInfo { - it := GetTypeFromStructDeclaration(o) - if it.Kind() != reflect.Interface { - PanicSanity("RegisterInterface expects an interface") - } - toType := make(map[byte]reflect.Type, 0) - toByte := make(map[reflect.Type]byte, 0) - for _, ctype := range ctypes { - crt := reflect.TypeOf(ctype.O) - typeByte := ctype.Byte - SetByteForType(typeByte, crt) - if typeByte == 0x00 { - PanicSanity(Fmt("Byte of 0x00 is reserved for nil (%v)", ctype)) - } - if toType[typeByte] != nil { - PanicSanity(Fmt("Duplicate Byte for type %v and %v", ctype, toType[typeByte])) - } - toType[typeByte] = crt - toByte[crt] = typeByte - } - typeInfo := &TypeInfo{ - Type: it, - IsRegisteredInterface: true, - ByteToType: toType, - TypeToByte: toByte, - } - typeInfos[it] = typeInfo - return typeInfo -} - -func MakeTypeInfo(rt reflect.Type) *TypeInfo { - info := &TypeInfo{Type: rt} - - // If struct, register field name options - if rt.Kind() == reflect.Struct { - numFields := rt.NumField() - structFields := []StructFieldInfo{} - for i := 0; i < numFields; i++ { - field := rt.Field(i) - if field.PkgPath != "" { - continue - } - skip, opts := getOptionsFromField(field) - if skip { - continue - } - structFields = append(structFields, StructFieldInfo{ - Index: i, - Type: field.Type, - Options: opts, - }) - } - info.Fields = structFields - } - - return info -} - -// Contract: Caller must ensure that rt is supported -// (e.g. is recursively composed of supported native types, and structs and slices.) -func readReflectBinary(rv reflect.Value, rt reflect.Type, opts Options, r io.Reader, n *int64, err *error) { - - // Get typeInfo - typeInfo := GetTypeInfo(rt) - - if rt.Kind() == reflect.Interface { - if !typeInfo.IsRegisteredInterface { - // There's no way we can read such a thing. - *err = errors.New(Fmt("Cannot read unregistered interface type %v", rt)) - return - } - typeByte := ReadByte(r, n, err) - if *err != nil { - return - } - if typeByte == 0x00 { - return // nil - } - crt, ok := typeInfo.ByteToType[typeByte] - if !ok { - *err = errors.New(Fmt("Unexpected type byte %X for type %v", typeByte, rt)) - return - } - crv := reflect.New(crt).Elem() - r = NewPrefixedReader([]byte{typeByte}, r) - readReflectBinary(crv, crt, opts, r, n, err) - rv.Set(crv) // NOTE: orig rv is ignored. - return - } - - if rt.Kind() == reflect.Ptr { - typeByte := ReadByte(r, n, err) - if *err != nil { - return - } - if typeByte == 0x00 { - return // nil - } - // Create new if rv is nil. - if rv.IsNil() { - newRv := reflect.New(rt.Elem()) - rv.Set(newRv) - rv = newRv - } - // Dereference pointer - rv, rt = rv.Elem(), rt.Elem() - typeInfo = GetTypeInfo(rt) - if typeInfo.Byte != 0x00 { - r = NewPrefixedReader([]byte{typeByte}, r) - } else if typeByte != 0x01 { - *err = errors.New(Fmt("Unexpected type byte %X for ptr of untyped thing", typeByte)) - return - } - // continue... - } - - // Read Byte prefix - if typeInfo.Byte != 0x00 { - typeByte := ReadByte(r, n, err) - if typeByte != typeInfo.Byte { - *err = errors.New(Fmt("Expected Byte of %X but got %X", typeInfo.Byte, typeByte)) - return - } - } - - switch rt.Kind() { - case reflect.Array: - elemRt := rt.Elem() - length := rt.Len() - if elemRt.Kind() == reflect.Uint8 { - // Special case: Bytearrays - buf := make([]byte, length) - ReadFull(buf, r, n, err) - if *err != nil { - return - } - log.Info("Read bytearray", "bytes", buf) - reflect.Copy(rv, reflect.ValueOf(buf)) - } else { - for i := 0; i < length; i++ { - elemRv := rv.Index(i) - readReflectBinary(elemRv, elemRt, opts, r, n, err) - if *err != nil { - return - } - if MaxBinaryReadSize < *n { - *err = ErrBinaryReadSizeOverflow - return - } - } - log.Info(Fmt("Read %v-array", elemRt), "length", length) - } - - case reflect.Slice: - elemRt := rt.Elem() - if elemRt.Kind() == reflect.Uint8 { - // Special case: Byteslices - byteslice := ReadByteSlice(r, n, err) - log.Info("Read byteslice", "bytes", byteslice) - rv.Set(reflect.ValueOf(byteslice)) - } else { - var sliceRv reflect.Value - // Read length - length := ReadVarint(r, n, err) - log.Info(Fmt("Read length: %v", length)) - sliceRv = reflect.MakeSlice(rt, 0, 0) - // read one ReflectSliceChunk at a time and append - for i := 0; i*ReflectSliceChunk < length; i++ { - l := MinInt(ReflectSliceChunk, length-i*ReflectSliceChunk) - tmpSliceRv := reflect.MakeSlice(rt, l, l) - for j := 0; j < l; j++ { - elemRv := tmpSliceRv.Index(j) - readReflectBinary(elemRv, elemRt, opts, r, n, err) - if *err != nil { - return - } - if MaxBinaryReadSize < *n { - *err = ErrBinaryReadSizeOverflow - return - } - } - sliceRv = reflect.AppendSlice(sliceRv, tmpSliceRv) - } - - rv.Set(sliceRv) - } - - case reflect.Struct: - if rt == timeType { - // Special case: time.Time - t := ReadTime(r, n, err) - log.Info(Fmt("Read time: %v", t)) - rv.Set(reflect.ValueOf(t)) - } else { - for _, fieldInfo := range typeInfo.Fields { - i, fieldType, opts := fieldInfo.unpack() - fieldRv := rv.Field(i) - readReflectBinary(fieldRv, fieldType, opts, r, n, err) - } - } - - case reflect.String: - str := ReadString(r, n, err) - log.Info(Fmt("Read string: %v", str)) - rv.SetString(str) - - case reflect.Int64: - if opts.Varint { - num := ReadVarint(r, n, err) - log.Info(Fmt("Read num: %v", num)) - rv.SetInt(int64(num)) - } else { - num := ReadInt64(r, n, err) - log.Info(Fmt("Read num: %v", num)) - rv.SetInt(int64(num)) - } - - case reflect.Int32: - num := ReadUint32(r, n, err) - log.Info(Fmt("Read num: %v", num)) - rv.SetInt(int64(num)) - - case reflect.Int16: - num := ReadUint16(r, n, err) - log.Info(Fmt("Read num: %v", num)) - rv.SetInt(int64(num)) - - case reflect.Int8: - num := ReadUint8(r, n, err) - log.Info(Fmt("Read num: %v", num)) - rv.SetInt(int64(num)) - - case reflect.Int: - num := ReadVarint(r, n, err) - log.Info(Fmt("Read num: %v", num)) - rv.SetInt(int64(num)) - - case reflect.Uint64: - if opts.Varint { - num := ReadVarint(r, n, err) - log.Info(Fmt("Read num: %v", num)) - rv.SetUint(uint64(num)) - } else { - num := ReadUint64(r, n, err) - log.Info(Fmt("Read num: %v", num)) - rv.SetUint(uint64(num)) - } - - case reflect.Uint32: - num := ReadUint32(r, n, err) - log.Info(Fmt("Read num: %v", num)) - rv.SetUint(uint64(num)) - - case reflect.Uint16: - num := ReadUint16(r, n, err) - log.Info(Fmt("Read num: %v", num)) - rv.SetUint(uint64(num)) - - case reflect.Uint8: - num := ReadUint8(r, n, err) - log.Info(Fmt("Read num: %v", num)) - rv.SetUint(uint64(num)) - - case reflect.Uint: - num := ReadVarint(r, n, err) - log.Info(Fmt("Read num: %v", num)) - rv.SetUint(uint64(num)) - - case reflect.Bool: - num := ReadUint8(r, n, err) - log.Info(Fmt("Read bool: %v", num)) - rv.SetBool(num > 0) - - default: - PanicSanity(Fmt("Unknown field type %v", rt.Kind())) - } -} - -// rv: the reflection value of the thing to write -// rt: the type of rv as declared in the container, not necessarily rv.Type(). -func writeReflectBinary(rv reflect.Value, rt reflect.Type, opts Options, w io.Writer, n *int64, err *error) { - - // Get typeInfo - typeInfo := GetTypeInfo(rt) - - if rt.Kind() == reflect.Interface { - if rv.IsNil() { - // XXX ensure that typeByte 0 is reserved. - WriteByte(0x00, w, n, err) - return - } - crv := rv.Elem() // concrete reflection value - crt := crv.Type() // concrete reflection type - if typeInfo.IsRegisteredInterface { - // See if the crt is registered. - // If so, we're more restrictive. - _, ok := typeInfo.TypeToByte[crt] - if !ok { - switch crt.Kind() { - case reflect.Ptr: - *err = errors.New(Fmt("Unexpected pointer type %v for registered interface %v. "+ - "Was it registered as a value receiver rather than as a pointer receiver?", crt, rt.Name())) - case reflect.Struct: - *err = errors.New(Fmt("Unexpected struct type %v for registered interface %v. "+ - "Was it registered as a pointer receiver rather than as a value receiver?", crt, rt.Name())) - default: - *err = errors.New(Fmt("Unexpected type %v for registered interface %v. "+ - "If this is intentional, please register it.", crt, rt.Name())) - } - return - } - } else { - // We support writing unsafely for convenience. - } - // We don't have to write the typeByte here, - // the writeReflectBinary() call below will write it. - writeReflectBinary(crv, crt, opts, w, n, err) - return - } - - if rt.Kind() == reflect.Ptr { - // Dereference pointer - rv, rt = rv.Elem(), rt.Elem() - typeInfo = GetTypeInfo(rt) - if !rv.IsValid() { - // For better compatibility with other languages, - // as far as tendermint/wire is concerned, - // pointers to nil values are the same as nil. - WriteByte(0x00, w, n, err) - return - } - if typeInfo.Byte == 0x00 { - WriteByte(0x01, w, n, err) - // continue... - } else { - // continue... - } - } - - // Write type byte - if typeInfo.Byte != 0x00 { - WriteByte(typeInfo.Byte, w, n, err) - } - - // All other types - switch rt.Kind() { - case reflect.Array: - elemRt := rt.Elem() - length := rt.Len() - if elemRt.Kind() == reflect.Uint8 { - // Special case: Bytearrays - if rv.CanAddr() { - byteslice := rv.Slice(0, length).Bytes() - WriteTo(byteslice, w, n, err) - } else { - buf := make([]byte, length) - reflect.Copy(reflect.ValueOf(buf), rv) - WriteTo(buf, w, n, err) - } - } else { - // Write elems - for i := 0; i < length; i++ { - elemRv := rv.Index(i) - writeReflectBinary(elemRv, elemRt, opts, w, n, err) - } - } - - case reflect.Slice: - elemRt := rt.Elem() - if elemRt.Kind() == reflect.Uint8 { - // Special case: Byteslices - byteslice := rv.Bytes() - WriteByteSlice(byteslice, w, n, err) - } else { - // Write length - length := rv.Len() - WriteVarint(length, w, n, err) - // Write elems - for i := 0; i < length; i++ { - elemRv := rv.Index(i) - writeReflectBinary(elemRv, elemRt, opts, w, n, err) - } - } - - case reflect.Struct: - if rt == timeType { - // Special case: time.Time - WriteTime(rv.Interface().(time.Time), w, n, err) - } else { - for _, fieldInfo := range typeInfo.Fields { - i, fieldType, opts := fieldInfo.unpack() - fieldRv := rv.Field(i) - writeReflectBinary(fieldRv, fieldType, opts, w, n, err) - } - } - - case reflect.String: - WriteString(rv.String(), w, n, err) - - case reflect.Int64: - if opts.Varint { - WriteVarint(int(rv.Int()), w, n, err) - } else { - WriteInt64(rv.Int(), w, n, err) - } - - case reflect.Int32: - WriteInt32(int32(rv.Int()), w, n, err) - - case reflect.Int16: - WriteInt16(int16(rv.Int()), w, n, err) - - case reflect.Int8: - WriteInt8(int8(rv.Int()), w, n, err) - - case reflect.Int: - WriteVarint(int(rv.Int()), w, n, err) - - case reflect.Uint64: - if opts.Varint { - WriteUvarint(uint(rv.Uint()), w, n, err) - } else { - WriteUint64(rv.Uint(), w, n, err) - } - - case reflect.Uint32: - WriteUint32(uint32(rv.Uint()), w, n, err) - - case reflect.Uint16: - WriteUint16(uint16(rv.Uint()), w, n, err) - - case reflect.Uint8: - WriteUint8(uint8(rv.Uint()), w, n, err) - - case reflect.Uint: - WriteUvarint(uint(rv.Uint()), w, n, err) - - case reflect.Bool: - if rv.Bool() { - WriteUint8(uint8(1), w, n, err) - } else { - WriteUint8(uint8(0), w, n, err) - } - - default: - PanicSanity(Fmt("Unknown field type %v", rt.Kind())) - } -} - -//----------------------------------------------------------------------------- - -func readByteJSON(o interface{}) (typeByte byte, rest interface{}, err error) { - oSlice, ok := o.([]interface{}) - if !ok { - err = errors.New(Fmt("Expected type [Byte,?] but got type %v", reflect.TypeOf(o))) - return - } - if len(oSlice) != 2 { - err = errors.New(Fmt("Expected [Byte,?] len 2 but got len %v", len(oSlice))) - return - } - typeByte_, ok := oSlice[0].(float64) - typeByte = byte(typeByte_) - rest = oSlice[1] - return -} - -// Contract: Caller must ensure that rt is supported -// (e.g. is recursively composed of supported native types, and structs and slices.) -// rv and rt refer to the object we're unmarhsaling into, whereas o is the result of naiive json unmarshal (map[string]interface{}) -func readReflectJSON(rv reflect.Value, rt reflect.Type, o interface{}, err *error) { - - // Get typeInfo - typeInfo := GetTypeInfo(rt) - - if rt.Kind() == reflect.Interface { - if !typeInfo.IsRegisteredInterface { - // There's no way we can read such a thing. - *err = errors.New(Fmt("Cannot read unregistered interface type %v", rt)) - return - } - if o == nil { - return // nil - } - typeByte, _, err_ := readByteJSON(o) - if err_ != nil { - *err = err_ - return - } - crt, ok := typeInfo.ByteToType[typeByte] - if !ok { - *err = errors.New(Fmt("Byte %X not registered for interface %v", typeByte, rt)) - return - } - crv := reflect.New(crt).Elem() - readReflectJSON(crv, crt, o, err) - rv.Set(crv) // NOTE: orig rv is ignored. - return - } - - if rt.Kind() == reflect.Ptr { - if o == nil { - return // nil - } - // Create new struct if rv is nil. - if rv.IsNil() { - newRv := reflect.New(rt.Elem()) - rv.Set(newRv) - rv = newRv - } - // Dereference pointer - rv, rt = rv.Elem(), rt.Elem() - typeInfo = GetTypeInfo(rt) - // continue... - } - - // Read Byte prefix - if typeInfo.Byte != 0x00 { - typeByte, rest, err_ := readByteJSON(o) - if err_ != nil { - *err = err_ - return - } - if typeByte != typeInfo.Byte { - *err = errors.New(Fmt("Expected Byte of %X but got %X", typeInfo.Byte, byte(typeByte))) - return - } - o = rest - } - - switch rt.Kind() { - case reflect.Array: - elemRt := rt.Elem() - length := rt.Len() - if elemRt.Kind() == reflect.Uint8 { - // Special case: Bytearrays - oString, ok := o.(string) - if !ok { - *err = errors.New(Fmt("Expected string but got type %v", reflect.TypeOf(o))) - return - } - buf, err_ := hex.DecodeString(oString) - if err_ != nil { - *err = err_ - return - } - if len(buf) != length { - *err = errors.New(Fmt("Expected bytearray of length %v but got %v", length, len(buf))) - return - } - log.Info("Read bytearray", "bytes", buf) - reflect.Copy(rv, reflect.ValueOf(buf)) - } else { - oSlice, ok := o.([]interface{}) - if !ok { - *err = errors.New(Fmt("Expected array of %v but got type %v", rt, reflect.TypeOf(o))) - return - } - if len(oSlice) != length { - *err = errors.New(Fmt("Expected array of length %v but got %v", length, len(oSlice))) - return - } - for i := 0; i < length; i++ { - elemRv := rv.Index(i) - readReflectJSON(elemRv, elemRt, oSlice[i], err) - } - log.Info(Fmt("Read %v-array", elemRt), "length", length) - } - - case reflect.Slice: - elemRt := rt.Elem() - if elemRt.Kind() == reflect.Uint8 { - // Special case: Byteslices - oString, ok := o.(string) - if !ok { - *err = errors.New(Fmt("Expected string but got type %v", reflect.TypeOf(o))) - return - } - byteslice, err_ := hex.DecodeString(oString) - if err_ != nil { - *err = err_ - return - } - log.Info("Read byteslice", "bytes", byteslice) - rv.Set(reflect.ValueOf(byteslice)) - } else { - // Read length - oSlice, ok := o.([]interface{}) - if !ok { - *err = errors.New(Fmt("Expected array of %v but got type %v", rt, reflect.TypeOf(o))) - return - } - length := len(oSlice) - log.Info(Fmt("Read length: %v", length)) - sliceRv := reflect.MakeSlice(rt, length, length) - // Read elems - for i := 0; i < length; i++ { - elemRv := sliceRv.Index(i) - readReflectJSON(elemRv, elemRt, oSlice[i], err) - } - rv.Set(sliceRv) - } - - case reflect.Struct: - if rt == timeType { - // Special case: time.Time - str, ok := o.(string) - if !ok { - *err = errors.New(Fmt("Expected string but got type %v", reflect.TypeOf(o))) - return - } - log.Info(Fmt("Read time: %v", str)) - t, err_ := time.Parse(iso8601, str) - if err_ != nil { - *err = err_ - return - } - rv.Set(reflect.ValueOf(t)) - } else { - oMap, ok := o.(map[string]interface{}) - if !ok { - *err = errors.New(Fmt("Expected map but got type %v", reflect.TypeOf(o))) - return - } - // TODO: ensure that all fields are set? - // TODO: disallow unknown oMap fields? - for _, fieldInfo := range typeInfo.Fields { - i, fieldType, opts := fieldInfo.unpack() - value, ok := oMap[opts.JSONName] - if !ok { - continue // Skip missing fields. - } - fieldRv := rv.Field(i) - readReflectJSON(fieldRv, fieldType, value, err) - } - } - - case reflect.String: - str, ok := o.(string) - if !ok { - *err = errors.New(Fmt("Expected string but got type %v", reflect.TypeOf(o))) - return - } - log.Info(Fmt("Read string: %v", str)) - rv.SetString(str) - - case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: - num, ok := o.(float64) - if !ok { - *err = errors.New(Fmt("Expected numeric but got type %v", reflect.TypeOf(o))) - return - } - log.Info(Fmt("Read num: %v", num)) - rv.SetInt(int64(num)) - - case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint: - num, ok := o.(float64) - if !ok { - *err = errors.New(Fmt("Expected numeric but got type %v", reflect.TypeOf(o))) - return - } - if num < 0 { - *err = errors.New(Fmt("Expected unsigned numeric but got %v", num)) - return - } - log.Info(Fmt("Read num: %v", num)) - rv.SetUint(uint64(num)) - - case reflect.Bool: - bl, ok := o.(bool) - if !ok { - *err = errors.New(Fmt("Expected boolean but got type %v", reflect.TypeOf(o))) - return - } - log.Info(Fmt("Read boolean: %v", bl)) - rv.SetBool(bl) - - default: - PanicSanity(Fmt("Unknown field type %v", rt.Kind())) - } -} - -func writeReflectJSON(rv reflect.Value, rt reflect.Type, w io.Writer, n *int64, err *error) { - log.Info(Fmt("writeReflectJSON(%v, %v, %v, %v, %v)", rv, rt, w, n, err)) - - // Get typeInfo - typeInfo := GetTypeInfo(rt) - - if rt.Kind() == reflect.Interface { - if rv.IsNil() { - // XXX ensure that typeByte 0 is reserved. - WriteTo([]byte("null"), w, n, err) - return - } - crv := rv.Elem() // concrete reflection value - crt := crv.Type() // concrete reflection type - if typeInfo.IsRegisteredInterface { - // See if the crt is registered. - // If so, we're more restrictive. - _, ok := typeInfo.TypeToByte[crt] - if !ok { - switch crt.Kind() { - case reflect.Ptr: - *err = errors.New(Fmt("Unexpected pointer type %v for registered interface %v. "+ - "Was it registered as a value receiver rather than as a pointer receiver?", crt, rt.Name())) - case reflect.Struct: - *err = errors.New(Fmt("Unexpected struct type %v for registered interface %v. "+ - "Was it registered as a pointer receiver rather than as a value receiver?", crt, rt.Name())) - default: - *err = errors.New(Fmt("Unexpected type %v for registered interface %v. "+ - "If this is intentional, please register it.", crt, rt.Name())) - } - return - } - } else { - // We support writing unsafely for convenience. - } - // We don't have to write the typeByte here, - // the writeReflectJSON() call below will write it. - writeReflectJSON(crv, crt, w, n, err) - return - } - - if rt.Kind() == reflect.Ptr { - // Dereference pointer - rv, rt = rv.Elem(), rt.Elem() - typeInfo = GetTypeInfo(rt) - if !rv.IsValid() { - // For better compatibility with other languages, - // as far as tendermint/wire is concerned, - // pointers to nil values are the same as nil. - WriteTo([]byte("null"), w, n, err) - return - } - // continue... - } - - // Write Byte - if typeInfo.Byte != 0x00 { - WriteTo([]byte(Fmt("[%v,", typeInfo.Byte)), w, n, err) - defer WriteTo([]byte("]"), w, n, err) - } - - // All other types - switch rt.Kind() { - case reflect.Array: - elemRt := rt.Elem() - length := rt.Len() - if elemRt.Kind() == reflect.Uint8 { - // Special case: Bytearray - bytearray := reflect.ValueOf(make([]byte, length)) - reflect.Copy(bytearray, rv) - WriteTo([]byte(Fmt("\"%X\"", bytearray.Interface())), w, n, err) - } else { - WriteTo([]byte("["), w, n, err) - // Write elems - for i := 0; i < length; i++ { - elemRv := rv.Index(i) - writeReflectJSON(elemRv, elemRt, w, n, err) - if i < length-1 { - WriteTo([]byte(","), w, n, err) - } - } - WriteTo([]byte("]"), w, n, err) - } - - case reflect.Slice: - elemRt := rt.Elem() - if elemRt.Kind() == reflect.Uint8 { - // Special case: Byteslices - byteslice := rv.Bytes() - WriteTo([]byte(Fmt("\"%X\"", byteslice)), w, n, err) - } else { - WriteTo([]byte("["), w, n, err) - // Write elems - length := rv.Len() - for i := 0; i < length; i++ { - elemRv := rv.Index(i) - writeReflectJSON(elemRv, elemRt, w, n, err) - if i < length-1 { - WriteTo([]byte(","), w, n, err) - } - } - WriteTo([]byte("]"), w, n, err) - } - - case reflect.Struct: - if rt == timeType { - // Special case: time.Time - t := rv.Interface().(time.Time).UTC() - str := t.Format(iso8601) - jsonBytes, err_ := json.Marshal(str) - if err_ != nil { - *err = err_ - return - } - WriteTo(jsonBytes, w, n, err) - } else { - WriteTo([]byte("{"), w, n, err) - wroteField := false - for _, fieldInfo := range typeInfo.Fields { - i, fieldType, opts := fieldInfo.unpack() - fieldRv := rv.Field(i) - if wroteField { - WriteTo([]byte(","), w, n, err) - } else { - wroteField = true - } - WriteTo([]byte(Fmt("\"%v\":", opts.JSONName)), w, n, err) - writeReflectJSON(fieldRv, fieldType, w, n, err) - } - WriteTo([]byte("}"), w, n, err) - } - - case reflect.String: - fallthrough - case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint: - fallthrough - case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: - fallthrough - case reflect.Bool: - jsonBytes, err_ := json.Marshal(rv.Interface()) - if err_ != nil { - *err = err_ - return - } - WriteTo(jsonBytes, w, n, err) - - default: - PanicSanity(Fmt("Unknown field type %v", rt.Kind())) - } - -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/reflect_test.go b/Godeps/_workspace/src/github.com/tendermint/go-wire/reflect_test.go deleted file mode 100644 index 509859124..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/reflect_test.go +++ /dev/null @@ -1,508 +0,0 @@ -package wire - -import ( - "bytes" - "fmt" - "reflect" - "testing" - "time" - - . "github.com/tendermint/go-common" -) - -type SimpleStruct struct { - String string - Bytes []byte - Time time.Time -} - -type Animal interface{} - -const ( - AnimalTypeCat = byte(0x01) - AnimalTypeDog = byte(0x02) - AnimalTypeSnake = byte(0x03) - AnimalTypeViper = byte(0x04) -) - -// Implements Animal -type Cat struct { - SimpleStruct -} - -// Implements Animal -type Dog struct { - SimpleStruct -} - -// Implements Animal -type Snake []byte - -// Implements Animal -type Viper struct { - Bytes []byte -} - -var _ = RegisterInterface( - struct{ Animal }{}, - ConcreteType{Cat{}, AnimalTypeCat}, - ConcreteType{Dog{}, AnimalTypeDog}, - ConcreteType{Snake{}, AnimalTypeSnake}, - ConcreteType{&Viper{}, AnimalTypeViper}, -) - -// TODO: add assertions here ... -func TestAnimalInterface(t *testing.T) { - var foo Animal - - // Type of pointer to Animal - rt := reflect.TypeOf(&foo) - fmt.Printf("rt: %v\n", rt) - - // Type of Animal itself. - // NOTE: normally this is acquired through other means - // like introspecting on method signatures, or struct fields. - rte := rt.Elem() - fmt.Printf("rte: %v\n", rte) - - // Get a new pointer to the interface - // NOTE: calling .Interface() is to get the actual value, - // instead of reflection values. - ptr := reflect.New(rte).Interface() - fmt.Printf("ptr: %v", ptr) - - // Make a binary byteslice that represents a *snake. - foo = Snake([]byte("snake")) - snakeBytes := BinaryBytes(foo) - snakeReader := bytes.NewReader(snakeBytes) - - // Now you can read it. - n, err := new(int64), new(error) - it := ReadBinary(foo, snakeReader, n, err).(Animal) - fmt.Println(it, reflect.TypeOf(it)) -} - -//------------------------------------- - -type Constructor func() interface{} -type Instantiator func() (o interface{}, ptr interface{}) -type Validator func(o interface{}, t *testing.T) - -type TestCase struct { - Constructor - Instantiator - Validator -} - -//------------------------------------- - -func constructBasic() interface{} { - cat := Cat{ - SimpleStruct{ - String: "String", - Bytes: []byte("Bytes"), - Time: time.Unix(123, 456789999), - }, - } - return cat -} - -func instantiateBasic() (interface{}, interface{}) { - return Cat{}, &Cat{} -} - -func validateBasic(o interface{}, t *testing.T) { - cat := o.(Cat) - if cat.String != "String" { - t.Errorf("Expected cat.String == 'String', got %v", cat.String) - } - if string(cat.Bytes) != "Bytes" { - t.Errorf("Expected cat.Bytes == 'Bytes', got %X", cat.Bytes) - } - if cat.Time.UnixNano() != 123456000000 { // Only milliseconds - t.Errorf("Expected cat.Time.UnixNano() == 123456000000, got %v", cat.Time.UnixNano()) - } -} - -//------------------------------------- - -type NilTestStruct struct { - IntPtr *int - CatPtr *Cat - Animal Animal -} - -func constructNilTestStruct() interface{} { - return NilTestStruct{} -} - -func instantiateNilTestStruct() (interface{}, interface{}) { - return NilTestStruct{}, &NilTestStruct{} -} - -func validateNilTestStruct(o interface{}, t *testing.T) { - nts := o.(NilTestStruct) - if nts.IntPtr != nil { - t.Errorf("Expected nts.IntPtr to be nil, got %v", nts.IntPtr) - } - if nts.CatPtr != nil { - t.Errorf("Expected nts.CatPtr to be nil, got %v", nts.CatPtr) - } - if nts.Animal != nil { - t.Errorf("Expected nts.Animal to be nil, got %v", nts.Animal) - } -} - -//------------------------------------- - -type ComplexStruct struct { - Name string - Animal Animal -} - -func constructComplex() interface{} { - c := ComplexStruct{ - Name: "Complex", - Animal: constructBasic(), - } - return c -} - -func instantiateComplex() (interface{}, interface{}) { - return ComplexStruct{}, &ComplexStruct{} -} - -func validateComplex(o interface{}, t *testing.T) { - c2 := o.(ComplexStruct) - if cat, ok := c2.Animal.(Cat); ok { - validateBasic(cat, t) - } else { - t.Errorf("Expected c2.Animal to be of type cat, got %v", reflect.ValueOf(c2.Animal).Elem().Type()) - } -} - -//------------------------------------- - -type ComplexStruct2 struct { - Cat Cat - Dog *Dog - Snake Snake - Snake2 *Snake - Viper Viper - Viper2 *Viper -} - -func constructComplex2() interface{} { - snake_ := Snake([]byte("hiss")) - snakePtr_ := &snake_ - - c := ComplexStruct2{ - Cat: Cat{ - SimpleStruct{ - String: "String", - Bytes: []byte("Bytes"), - }, - }, - Dog: &Dog{ - SimpleStruct{ - String: "Woof", - Bytes: []byte("Bark"), - }, - }, - Snake: Snake([]byte("hiss")), - Snake2: snakePtr_, - Viper: Viper{Bytes: []byte("hizz")}, - Viper2: &Viper{Bytes: []byte("hizz")}, - } - return c -} - -func instantiateComplex2() (interface{}, interface{}) { - return ComplexStruct2{}, &ComplexStruct2{} -} - -func validateComplex2(o interface{}, t *testing.T) { - c2 := o.(ComplexStruct2) - cat := c2.Cat - if cat.String != "String" { - t.Errorf("Expected cat.String == 'String', got %v", cat.String) - } - if string(cat.Bytes) != "Bytes" { - t.Errorf("Expected cat.Bytes == 'Bytes', got %X", cat.Bytes) - } - - dog := c2.Dog - if dog.String != "Woof" { - t.Errorf("Expected dog.String == 'Woof', got %v", dog.String) - } - if string(dog.Bytes) != "Bark" { - t.Errorf("Expected dog.Bytes == 'Bark', got %X", dog.Bytes) - } - - snake := c2.Snake - if string(snake) != "hiss" { - t.Errorf("Expected string(snake) == 'hiss', got %v", string(snake)) - } - - snake2 := c2.Snake2 - if string(*snake2) != "hiss" { - t.Errorf("Expected string(snake2) == 'hiss', got %v", string(*snake2)) - } - - viper := c2.Viper - if string(viper.Bytes) != "hizz" { - t.Errorf("Expected string(viper.Bytes) == 'hizz', got %v", string(viper.Bytes)) - } - - viper2 := c2.Viper2 - if string(viper2.Bytes) != "hizz" { - t.Errorf("Expected string(viper2.Bytes) == 'hizz', got %v", string(viper2.Bytes)) - } -} - -//------------------------------------- - -type ComplexStructArray struct { - Animals []Animal - Bytes [5]byte - Ints [5]int - Array SimpleArray -} - -func constructComplexArray() interface{} { - c := ComplexStructArray{ - Animals: []Animal{ - Cat{ - SimpleStruct{ - String: "String", - Bytes: []byte("Bytes"), - }, - }, - Dog{ - SimpleStruct{ - String: "Woof", - Bytes: []byte("Bark"), - }, - }, - Snake([]byte("hiss")), - &Viper{ - Bytes: []byte("hizz"), - }, - }, - Bytes: [5]byte{1, 10, 50, 100, 200}, - Ints: [5]int{1, 2, 3, 4, 5}, - Array: SimpleArray([5]byte{1, 10, 50, 100, 200}), - } - return c -} - -func instantiateComplexArray() (interface{}, interface{}) { - return ComplexStructArray{}, &ComplexStructArray{} -} - -func validateComplexArray(o interface{}, t *testing.T) { - c2 := o.(ComplexStructArray) - if cat, ok := c2.Animals[0].(Cat); ok { - if cat.String != "String" { - t.Errorf("Expected cat.String == 'String', got %v", cat.String) - } - if string(cat.Bytes) != "Bytes" { - t.Errorf("Expected cat.Bytes == 'Bytes', got %X", cat.Bytes) - } - } else { - t.Errorf("Expected c2.Animals[0] to be of type cat, got %v", reflect.ValueOf(c2.Animals[0]).Elem().Type()) - } - - if dog, ok := c2.Animals[1].(Dog); ok { - if dog.String != "Woof" { - t.Errorf("Expected dog.String == 'Woof', got %v", dog.String) - } - if string(dog.Bytes) != "Bark" { - t.Errorf("Expected dog.Bytes == 'Bark', got %X", dog.Bytes) - } - } else { - t.Errorf("Expected c2.Animals[1] to be of type dog, got %v", reflect.ValueOf(c2.Animals[1]).Elem().Type()) - } - - if snake, ok := c2.Animals[2].(Snake); ok { - if string(snake) != "hiss" { - t.Errorf("Expected string(snake) == 'hiss', got %v", string(snake)) - } - } else { - t.Errorf("Expected c2.Animals[2] to be of type Snake, got %v", reflect.ValueOf(c2.Animals[2]).Elem().Type()) - } - - if viper, ok := c2.Animals[3].(*Viper); ok { - if string(viper.Bytes) != "hizz" { - t.Errorf("Expected string(viper.Bytes) == 'hizz', got %v", string(viper.Bytes)) - } - } else { - t.Errorf("Expected c2.Animals[3] to be of type *Viper, got %v", reflect.ValueOf(c2.Animals[3]).Elem().Type()) - } -} - -//----------------------------------------------------------------------------- - -var testCases = []TestCase{} - -func init() { - testCases = append(testCases, TestCase{constructBasic, instantiateBasic, validateBasic}) - testCases = append(testCases, TestCase{constructComplex, instantiateComplex, validateComplex}) - testCases = append(testCases, TestCase{constructComplex2, instantiateComplex2, validateComplex2}) - testCases = append(testCases, TestCase{constructComplexArray, instantiateComplexArray, validateComplexArray}) - testCases = append(testCases, TestCase{constructNilTestStruct, instantiateNilTestStruct, validateNilTestStruct}) -} - -func TestBinary(t *testing.T) { - - for i, testCase := range testCases { - - log.Notice(fmt.Sprintf("Running test case %v", i)) - - // Construct an object - o := testCase.Constructor() - - // Write the object - data := BinaryBytes(o) - t.Logf("Binary: %X", data) - - instance, instancePtr := testCase.Instantiator() - - // Read onto a struct - n, err := new(int64), new(error) - res := ReadBinary(instance, bytes.NewReader(data), n, err) - if *err != nil { - t.Fatalf("Failed to read into instance: %v", *err) - } - - // Validate object - testCase.Validator(res, t) - - // Read onto a pointer - n, err = new(int64), new(error) - res = ReadBinaryPtr(instancePtr, bytes.NewReader(data), n, err) - if *err != nil { - t.Fatalf("Failed to read into instance: %v", *err) - } - - if res != instancePtr { - t.Errorf("Expected pointer to pass through") - } - - // Validate object - testCase.Validator(reflect.ValueOf(res).Elem().Interface(), t) - } - -} - -func TestJSON(t *testing.T) { - - for i, testCase := range testCases { - - log.Notice(fmt.Sprintf("Running test case %v", i)) - - // Construct an object - o := testCase.Constructor() - - // Write the object - data := JSONBytes(o) - t.Logf("JSON: %v", string(data)) - - instance, instancePtr := testCase.Instantiator() - - // Read onto a struct - err := new(error) - res := ReadJSON(instance, data, err) - if *err != nil { - t.Fatalf("Failed to read cat: %v", *err) - } - - // Validate object - testCase.Validator(res, t) - - // Read onto a pointer - res = ReadJSON(instancePtr, data, err) - if *err != nil { - t.Fatalf("Failed to read cat: %v", *err) - } - - if res != instancePtr { - t.Errorf("Expected pointer to pass through") - } - - // Validate object - testCase.Validator(reflect.ValueOf(res).Elem().Interface(), t) - } - -} - -//------------------------------------------------------------------------------ - -type Foo struct { - FieldA string `json:"fieldA"` // json field name is "fieldA" - FieldB string // json field name is "FieldB" - fieldC string // not exported, not serialized. -} - -func TestJSONFieldNames(t *testing.T) { - for i := 0; i < 20; i++ { // Try to ensure deterministic success. - foo := Foo{"a", "b", "c"} - stringified := string(JSONBytes(foo)) - expected := `{"fieldA":"a","FieldB":"b"}` - if stringified != expected { - t.Fatalf("JSONFieldNames error: expected %v, got %v", - expected, stringified) - } - } -} - -//------------------------------------------------------------------------------ - -func TestBadAlloc(t *testing.T) { - n, err := new(int64), new(error) - instance := new([]byte) - data := RandBytes(100 * 1024) - b := new(bytes.Buffer) - // this slice of data claims to be much bigger than it really is - WriteUvarint(uint(10000000000000000), b, n, err) - b.Write(data) - res := ReadBinary(instance, b, n, err) - fmt.Println(res, *err) -} - -//------------------------------------------------------------------------------ - -type SimpleArray [5]byte - -func TestSimpleArray(t *testing.T) { - var foo SimpleArray - - // Type of pointer to array - rt := reflect.TypeOf(&foo) - fmt.Printf("rt: %v\n", rt) // *binary.SimpleArray - - // Type of array itself. - // NOTE: normally this is acquired through other means - // like introspecting on method signatures, or struct fields. - rte := rt.Elem() - fmt.Printf("rte: %v\n", rte) // binary.SimpleArray - - // Get a new pointer to the array - // NOTE: calling .Interface() is to get the actual value, - // instead of reflection values. - ptr := reflect.New(rte).Interface() - fmt.Printf("ptr: %v\n", ptr) // &[0 0 0 0 0] - - // Make a simple int aray - fooArray := SimpleArray([5]byte{1, 10, 50, 100, 200}) - fooBytes := BinaryBytes(fooArray) - fooReader := bytes.NewReader(fooBytes) - - // Now you can read it. - n, err := new(int64), new(error) - it := ReadBinary(foo, fooReader, n, err).(SimpleArray) - - if !bytes.Equal(it[:], fooArray[:]) { - t.Errorf("Expected %v but got %v", fooArray, it) - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/string.go b/Godeps/_workspace/src/github.com/tendermint/go-wire/string.go deleted file mode 100644 index 5572a45bd..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/string.go +++ /dev/null @@ -1,33 +0,0 @@ -package wire - -import ( - "io" - - . "github.com/tendermint/go-common" -) - -// String - -func WriteString(s string, w io.Writer, n *int64, err *error) { - WriteVarint(len(s), w, n, err) - WriteTo([]byte(s), w, n, err) -} - -func ReadString(r io.Reader, n *int64, err *error) string { - length := ReadVarint(r, n, err) - if *err != nil { - return "" - } - if length < 0 { - *err = ErrBinaryReadSizeUnderflow - return "" - } - if MaxBinaryReadSize < MaxInt64(int64(length), *n+int64(length)) { - *err = ErrBinaryReadSizeOverflow - return "" - } - - buf := make([]byte, length) - ReadFull(buf, r, n, err) - return string(buf) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/time.go b/Godeps/_workspace/src/github.com/tendermint/go-wire/time.go deleted file mode 100644 index e76c335ee..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/time.go +++ /dev/null @@ -1,27 +0,0 @@ -package wire - -import ( - "io" - "time" - - . "github.com/tendermint/go-common" -) - -/* -Writes nanoseconds since epoch but with millisecond precision. -This is to ease compatibility with Javascript etc. -*/ - -func WriteTime(t time.Time, w io.Writer, n *int64, err *error) { - nanosecs := t.UnixNano() - millisecs := nanosecs / 1000000 - WriteInt64(millisecs*1000000, w, n, err) -} - -func ReadTime(r io.Reader, n *int64, err *error) time.Time { - t := ReadInt64(r, n, err) - if t%1000000 != 0 { - PanicSanity("Time cannot have sub-millisecond precision") - } - return time.Unix(0, t) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/util.go b/Godeps/_workspace/src/github.com/tendermint/go-wire/util.go deleted file mode 100644 index 6064b341e..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/util.go +++ /dev/null @@ -1,78 +0,0 @@ -package wire - -import ( - "bytes" - "crypto/sha256" - "encoding/json" - - "code.google.com/p/go.crypto/ripemd160" - - . "github.com/tendermint/go-common" -) - -func BinaryBytes(o interface{}) []byte { - w, n, err := new(bytes.Buffer), new(int64), new(error) - WriteBinary(o, w, n, err) - if *err != nil { - PanicSanity(*err) - } - return w.Bytes() -} - -func JSONBytes(o interface{}) []byte { - w, n, err := new(bytes.Buffer), new(int64), new(error) - WriteJSON(o, w, n, err) - if *err != nil { - PanicSanity(*err) - } - return w.Bytes() -} - -// NOTE: inefficient -func JSONBytesPretty(o interface{}) []byte { - jsonBytes := JSONBytes(o) - var object interface{} - err := json.Unmarshal(jsonBytes, &object) - if err != nil { - PanicSanity(err) - } - jsonBytes, err = json.MarshalIndent(object, "", "\t") - if err != nil { - PanicSanity(err) - } - return jsonBytes -} - -// NOTE: does not care about the type, only the binary representation. -func BinaryEqual(a, b interface{}) bool { - aBytes := BinaryBytes(a) - bBytes := BinaryBytes(b) - return bytes.Equal(aBytes, bBytes) -} - -// NOTE: does not care about the type, only the binary representation. -func BinaryCompare(a, b interface{}) int { - aBytes := BinaryBytes(a) - bBytes := BinaryBytes(b) - return bytes.Compare(aBytes, bBytes) -} - -// NOTE: only use this if you need 32 bytes. -func BinarySha256(o interface{}) []byte { - hasher, n, err := sha256.New(), new(int64), new(error) - WriteBinary(o, hasher, n, err) - if *err != nil { - PanicSanity(*err) - } - return hasher.Sum(nil) -} - -// NOTE: The default hash function is Ripemd160. -func BinaryRipemd160(o interface{}) []byte { - hasher, n, err := ripemd160.New(), new(int64), new(error) - WriteBinary(o, hasher, n, err) - if *err != nil { - PanicSanity(*err) - } - return hasher.Sum(nil) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/version.go b/Godeps/_workspace/src/github.com/tendermint/go-wire/version.go deleted file mode 100644 index 58953255f..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package wire - -const Version = "0.5.0" diff --git a/Godeps/_workspace/src/github.com/tendermint/go-wire/wire.go b/Godeps/_workspace/src/github.com/tendermint/go-wire/wire.go deleted file mode 100644 index 258a68e23..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/go-wire/wire.go +++ /dev/null @@ -1,134 +0,0 @@ -package wire - -import ( - "encoding/json" - "errors" - "io" - "reflect" - - . "github.com/tendermint/go-common" -) - -// TODO document and maybe make it configurable. -const MaxBinaryReadSize = 21 * 1024 * 1024 - -var ErrBinaryReadSizeOverflow = errors.New("Error: binary read size overflow") -var ErrBinaryReadSizeUnderflow = errors.New("Error: binary read size underflow") - -func ReadBinary(o interface{}, r io.Reader, n *int64, err *error) interface{} { - rv, rt := reflect.ValueOf(o), reflect.TypeOf(o) - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - // This allows ReadBinaryObject() to return a nil pointer, - // if the value read is nil. - rvPtr := reflect.New(rt) - ReadBinaryPtr(rvPtr.Interface(), r, n, err) - return rvPtr.Elem().Interface() - } else { - readReflectBinary(rv, rt, Options{}, r, n, err) - return o - } - } else { - ptrRv := reflect.New(rt) - readReflectBinary(ptrRv.Elem(), rt, Options{}, r, n, err) - return ptrRv.Elem().Interface() - } -} - -func ReadBinaryPtr(o interface{}, r io.Reader, n *int64, err *error) interface{} { - rv, rt := reflect.ValueOf(o), reflect.TypeOf(o) - if rv.Kind() == reflect.Ptr { - readReflectBinary(rv.Elem(), rt.Elem(), Options{}, r, n, err) - } else { - PanicSanity("ReadBinaryPtr expects o to be a pointer") - } - return o -} - -func WriteBinary(o interface{}, w io.Writer, n *int64, err *error) { - rv := reflect.ValueOf(o) - rt := reflect.TypeOf(o) - writeReflectBinary(rv, rt, Options{}, w, n, err) -} - -func ReadJSON(o interface{}, bytes []byte, err *error) interface{} { - var object interface{} - *err = json.Unmarshal(bytes, &object) - if *err != nil { - return o - } - - return ReadJSONObject(o, object, err) -} - -func ReadJSONPtr(o interface{}, bytes []byte, err *error) interface{} { - var object interface{} - *err = json.Unmarshal(bytes, &object) - if *err != nil { - return o - } - - return ReadJSONObjectPtr(o, object, err) -} - -// o is the ultimate destination, object is the result of json unmarshal -func ReadJSONObject(o interface{}, object interface{}, err *error) interface{} { - rv, rt := reflect.ValueOf(o), reflect.TypeOf(o) - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - // This allows ReadJSONObject() to return a nil pointer - // if the value read is nil. - rvPtr := reflect.New(rt) - ReadJSONObjectPtr(rvPtr.Interface(), object, err) - return rvPtr.Elem().Interface() - } else { - readReflectJSON(rv, rt, object, err) - return o - } - } else { - ptrRv := reflect.New(rt) - readReflectJSON(ptrRv.Elem(), rt, object, err) - return ptrRv.Elem().Interface() - } -} - -func ReadJSONObjectPtr(o interface{}, object interface{}, err *error) interface{} { - rv, rt := reflect.ValueOf(o), reflect.TypeOf(o) - if rv.Kind() == reflect.Ptr { - readReflectJSON(rv.Elem(), rt.Elem(), object, err) - } else { - PanicSanity("ReadJSON(Object)Ptr expects o to be a pointer") - } - return o -} - -func WriteJSON(o interface{}, w io.Writer, n *int64, err *error) { - rv := reflect.ValueOf(o) - rt := reflect.TypeOf(o) - if rv.Kind() == reflect.Ptr { - rv, rt = rv.Elem(), rt.Elem() - } - writeReflectJSON(rv, rt, w, n, err) -} - -// Write all of bz to w -// Increment n and set err accordingly. -func WriteTo(bz []byte, w io.Writer, n *int64, err *error) { - if *err != nil { - return - } - n_, err_ := w.Write(bz) - *n += int64(n_) - *err = err_ -} - -// Read len(buf) from r -// Increment n and set err accordingly. -func ReadFull(buf []byte, r io.Reader, n *int64, err *error) { - if *err != nil { - return - } - n_, err_ := io.ReadFull(r, buf) - *n += int64(n_) - *err = err_ -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/.travis.yml b/Godeps/_workspace/src/github.com/tendermint/log15/.travis.yml deleted file mode 100644 index a66c49b9c..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - 1.3 - - release - - tip diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/CONTRIBUTORS b/Godeps/_workspace/src/github.com/tendermint/log15/CONTRIBUTORS deleted file mode 100644 index a0866713b..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/CONTRIBUTORS +++ /dev/null @@ -1,11 +0,0 @@ -Contributors to log15: - -- Aaron L -- Alan Shreve -- Chris Hines -- Ciaran Downey -- Dmitry Chestnykh -- Evan Shaw -- Péter Szilágyi -- Trevor Gattis -- Vincent Vanackere diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/LICENSE b/Godeps/_workspace/src/github.com/tendermint/log15/LICENSE deleted file mode 100644 index 5f0d1fb6a..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 Alan Shreve - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/README.md b/Godeps/_workspace/src/github.com/tendermint/log15/README.md deleted file mode 100644 index 49313fffa..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/README.md +++ /dev/null @@ -1,60 +0,0 @@ -![obligatory xkcd](http://imgs.xkcd.com/comics/standards.png) - -# log15 [![godoc reference](https://godoc.org/gopkg.in/inconshreveable/log15.v2?status.png)](https://godoc.org/gopkg.in/inconshreveable/log15.v2) - -Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](http://golang.org/pkg/io/) and [`net/http`](http://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](http://golang.org/pkg/log/) package. - -## Features -- A simple, easy-to-understand API -- Promotes structured logging by encouraging use of key/value pairs -- Child loggers which inherit and add their own private context -- Lazy evaluation of expensive operations -- Simple Handler interface allowing for construction of flexible, custom logging configurations with a tiny API. -- Color terminal support -- Built-in support for logging to files, streams, syslog, and the network -- Support for forking records to multiple handlers, buffering records for output, failing over from failed handler writes, + more - -## Versioning -The API of the master branch of log15 should always be considered unstable. Using a stable version -of the log15 package is supported by gopkg.in. Include your dependency like so: - -```go -import log "gopkg.in/inconshreveable/log15.v2" -``` - -## Examples - -```go -// all loggers can have key/value context -srvlog := log.New("module", "app/server") - -// all log messages can have key/value context -srvlog.Warn("abnormal conn rate", "rate", curRate, "low", lowRate, "high", highRate) - -// child loggers with inherited context -connlog := srvlog.New("raddr", c.RemoteAddr()) -connlog.Info("connection open") - -// lazy evaluation -connlog.Debug("ping remote", "latency", log.Lazy(pingRemote)) - -// flexible configuration -srvlog.SetHandler(log.MultiHandler( - log.StreamHandler(os.Stderr, log.LogfmtFormat()), - log.LvlFilterHandler( - log.LvlError, - log.Must.FileHandler("errors.json", log.JsonHandler()))) -``` - -## FAQ - -### The varargs style is brittle and error prone! Can I have type safety please? -Yes. Use `log.Ctx`: - -```go -srvlog := log.New(log.Ctx{"module": "app/server"}) -srvlog.Warn("abnormal conn rate", log.Ctx{"rate": curRate, "low": lowRate, "high": highRate}) -``` - -## License -Apache diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/RELEASING.md b/Godeps/_workspace/src/github.com/tendermint/log15/RELEASING.md deleted file mode 100644 index 589a4dcc6..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/RELEASING.md +++ /dev/null @@ -1,19 +0,0 @@ -# log15's release strategy - -log15 uses gopkg.in to manage versioning releases so that consumers who don't vendor dependencies can rely upon a stable API. - -## Master - -Master is considered to have no API stability guarantee, so merging new code that passes tests into master is always okay. - -## Releasing a new API-compatible version - -The process to release a new API-compatible version is described below. For the purposes of this example, we'll assume you're trying to release a new version of v2 - -1. `git checkout v2` -1. `git merge master` -1. Audit the code for any imports of sub-packages. Modify any import references from `github.com/inconshrevealbe/log15/` -> `gopkg.in/inconshreveable/log15.v2/` -1. `git commit` -1. `git tag`, find the latest tag of the style v2.X. -1. `git tag v2.X+1` If the last version was v2.6, you would run `git tag v2.7` -1. `git push --tags git@github.com:inconshreveable/log15.git v2` diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/bench_test.go b/Godeps/_workspace/src/github.com/tendermint/log15/bench_test.go deleted file mode 100644 index e692e6193..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/bench_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package log15 - -import ( - "bytes" - "testing" - "time" -) - -func BenchmarkStreamNoCtx(b *testing.B) { - lg := New() - - buf := bytes.Buffer{} - lg.SetHandler(StreamHandler(&buf, LogfmtFormat())) - - for i := 0; i < b.N; i++ { - lg.Info("test message") - buf.Reset() - } -} - -func BenchmarkDiscard(b *testing.B) { - lg := New() - lg.SetHandler(DiscardHandler()) - - for i := 0; i < b.N; i++ { - lg.Info("test message") - } -} - -func BenchmarkCallerFileHandler(b *testing.B) { - lg := New() - lg.SetHandler(CallerFileHandler(DiscardHandler())) - - for i := 0; i < b.N; i++ { - lg.Info("test message") - } -} - -func BenchmarkCallerFuncHandler(b *testing.B) { - lg := New() - lg.SetHandler(CallerFuncHandler(DiscardHandler())) - - for i := 0; i < b.N; i++ { - lg.Info("test message") - } -} - -func BenchmarkLogfmtNoCtx(b *testing.B) { - r := Record{ - Time: time.Now(), - Lvl: LvlInfo, - Msg: "test message", - Ctx: []interface{}{}, - } - - logfmt := LogfmtFormat() - for i := 0; i < b.N; i++ { - logfmt.Format(&r) - } -} - -func BenchmarkJsonNoCtx(b *testing.B) { - r := Record{ - Time: time.Now(), - Lvl: LvlInfo, - Msg: "test message", - Ctx: []interface{}{}, - } - - jsonfmt := JsonFormat() - for i := 0; i < b.N; i++ { - jsonfmt.Format(&r) - } -} - -func BenchmarkMultiLevelFilter(b *testing.B) { - handler := MultiHandler( - LvlFilterHandler(LvlDebug, DiscardHandler()), - LvlFilterHandler(LvlError, DiscardHandler()), - ) - - lg := New() - lg.SetHandler(handler) - for i := 0; i < b.N; i++ { - lg.Info("test message") - } -} - -func BenchmarkDescendant1(b *testing.B) { - lg := New() - lg.SetHandler(DiscardHandler()) - lg = lg.New() - for i := 0; i < b.N; i++ { - lg.Info("test message") - } -} - -func BenchmarkDescendant2(b *testing.B) { - lg := New() - lg.SetHandler(DiscardHandler()) - for i := 0; i < 2; i++ { - lg = lg.New() - } - for i := 0; i < b.N; i++ { - lg.Info("test message") - } -} - -func BenchmarkDescendant4(b *testing.B) { - lg := New() - lg.SetHandler(DiscardHandler()) - for i := 0; i < 4; i++ { - lg = lg.New() - } - for i := 0; i < b.N; i++ { - lg.Info("test message") - } -} - -func BenchmarkDescendant8(b *testing.B) { - lg := New() - lg.SetHandler(DiscardHandler()) - for i := 0; i < 8; i++ { - lg = lg.New() - } - for i := 0; i < b.N; i++ { - lg.Info("test message") - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/doc.go b/Godeps/_workspace/src/github.com/tendermint/log15/doc.go deleted file mode 100644 index e60af69be..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/doc.go +++ /dev/null @@ -1,302 +0,0 @@ -/* -Package log15 provides an opinionated, simple toolkit for best-practice logging that is -both human and machine readable. It is modeled after the standard library's io and net/http -packages. - -This package enforces you to only log key/value pairs. Keys must be strings. Values may be -any type that you like. The default output format is logfmt, but you may also choose to use -JSON instead if that suits you. Here's how you log: - - log.Info("page accessed", "path", r.URL.Path, "user_id", user.id) - -This will output a line that looks like: - - lvl=info t=2014-05-02T16:07:23-0700 msg="page access" path=/org/71/profile user_id=9 - -Getting Started - -To get started, you'll want to import the library: - - import log "gopkg.in/inconshreveable/log15.v2" - - -Now you're ready to start logging: - - func main() { - log.Info("Program starting", "args", os.Args()) - } - - -Convention - -Because recording a human-meaningful message is common and good practice, the first argument to every -logging method is the value to the *implicit* key 'msg'. - -Additionally, the level you choose for a message will be automatically added with the key 'lvl', and so -will the current timestamp with key 't'. - -You may supply any additional context as a set of key/value pairs to the logging function. log15 allows -you to favor terseness, ordering, and speed over safety. This is a reasonable tradeoff for -logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate -in the variadic argument list: - - log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val) - -If you really do favor your type-safety, you may choose to pass a log.Ctx instead: - - log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val}) - - -Context loggers - -Frequently, you want to add context to a logger so that you can track actions associated with it. An http -request is a good example. You can easily create new loggers that have context that is automatically included -with each log line: - - requestlogger := log.New("path", r.URL.Path) - - // later - requestlogger.Debug("db txn commit", "duration", txnTimer.Finish()) - -This will output a log line that includes the path context that is attached to the logger: - - lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12 - - -Handlers - -The Handler interface defines where log lines are printed to and how they are formated. Handler is a -single interface that is inspired by net/http's handler interface: - - type Handler interface { - Log(r *Record) - } - - -Handlers can filter records, format them, or dispatch to multiple other Handlers. -This package implements a number of Handlers for common logging patterns that are -can be easily composed to create flexible, custom logging structures. - -Here's an example handler that prints logfmt output to Stdout: - - handler := log.StreamHandler(os.Stdout, log.LogfmtFormat()) - -Here's an example handler that defers to two other handlers. One handler only prints records -from the rpc package in logfmt to standard out. The other prints records at Error level -or above in JSON formatted output to the file /var/log/service.json - - handler := log.MultiHandler( - log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JsonFormat())), - log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler()) - ) - -Custom Handlers - -The Handler interface is so simple that it's also trivial to write your own. Let's create an -example handler which tries to write to one handler, but if that fails it falls back to -writing to another handler and includes the error that it encountered when trying to write -to the primary. This might be useful when trying to log over a network socket, but if that -fails you want to log those records to a file on disk. - - type BackupHandler struct { - Primary Handler - Secondary Handler - } - - func (h *BackupHandler) Log (r *Record) error { - err := h.Primary.Log(r) - if err != nil { - r.Ctx = append(ctx, "primary_err", err) - return h.Secondary.Log(r) - } - return nil - } - -This pattern is so useful that a generic version that handles an arbitrary number of Handlers -is included as part of this library called FailoverHandler. - -Logging Expensive Operations - -Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay -the price of computing them if you haven't turned up your logging level to a high level of detail. - -This package provides a simple type to annotate a logging operation that you want to be evaluated -lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler -filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example: - - func factorRSAKey() (factors []int) { - // return the factors of a very large number - } - - log.Debug("factors", log.Lazy{factorRSAKey}) - -If this message is not logged for any reason (like logging at the Error level), then -factorRSAKey is never evaluated. - -Dynamic context values - -The same log.Lazy mechanism can be used to attach context to a logger which you want to be -evaluated when the message is logged, but not when the logger is created. For example, let's imagine -a game where you have Player objects: - - type Player struct { - name string - alive bool - log.Logger - } - -You always want to log a player's name and whether they're alive or dead, so when you create the player -object, you might do: - - p := &Player{name: name, alive: true} - p.Logger = log.New("name", p.name, "alive", p.alive) - -Only now, even after a player has died, the logger will still report they are alive because the logging -context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation -of whether the player is alive or not to each log message, so that the log records will reflect the player's -current state no matter when the log message is written: - - p := &Player{name: name, alive: true} - isAlive := func() bool { return p.alive } - player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive}) - -Terminal Format - -If log15 detects that stdout is a terminal, it will configure the default -handler for it (which is log.StdoutHandler) to use TerminalFormat. This format -logs records nicely for your terminal, including color-coded output based -on log level. - -Error Handling - -Becasuse log15 allows you to step around the type system, there are a few ways you can specify -invalid arguments to the logging functions. You could, for example, wrap something that is not -a zero-argument function with log.Lazy or pass a context key that is not a string. Since logging libraries -are typically the mechanism by which errors are reported, it would be onerous for the logging functions -to return errors. Instead, log15 handles errors by making these guarantees to you: - -- Any log record containing an error will still be printed with the error explained to you as part of the log record. - -- Any log record containing an error will include the context key LOG15_ERROR, enabling you to easily -(and if you like, automatically) detect if any of your logging calls are passing bad values. - -Understanding this, you might wonder why the Handler interface can return an error value in its Log method. Handlers -are encouraged to return errors only if they fail to write their log records out to an external source like if the -syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures -like the FailoverHandler. - -Library Use - -log15 is intended to be useful for library authors as a way to provide configurable logging to -users of their library. Best practice for use in a library is to always disable all output for your logger -by default and to provide a public Logger instance that consumers of your library can configure. Like so: - - package yourlib - - import "gopkg.in/inconshreveable/log15.v2" - - var Log = log.New() - - func init() { - Log.SetHandler(log.DiscardHandler()) - } - -Users of your library may then enable it if they like: - - import "gopkg.in/inconshreveable/log15.v2" - import "example.com/yourlib" - - func main() { - handler := // custom handler setup - yourlib.Log.SetHandler(handler) - } - -Best practices attaching logger context - -The ability to attach context to a logger is a powerful one. Where should you do it and why? -I favor embedding a Logger directly into any persistent object in my application and adding -unique, tracing context keys to it. For instance, imagine I am writing a web browser: - - type Tab struct { - url string - render *RenderingContext - // ... - - Logger - } - - func NewTab(url string) *Tab { - return &Tab { - // ... - url: url, - - Logger: log.New("url", url), - } - } - -When a new tab is created, I assign a logger to it with the url of -the tab as context so it can easily be traced through the logs. -Now, whenever we perform any operation with the tab, we'll log with its -embedded logger and it will include the tab title automatically: - - tab.Debug("moved position", "idx", tab.idx) - -There's only one problem. What if the tab url changes? We could -use log.Lazy to make sure the current url is always written, but that -would mean that we couldn't trace a tab's full lifetime through our -logs after the user navigate to a new URL. - -Instead, think about what values to attach to your loggers the -same way you think about what to use as a key in a SQL database schema. -If it's possible to use a natural key that is unique for the lifetime of the -object, do so. But otherwise, log15's ext package has a handy RandId -function to let you generate what you might call "surrogate keys" -They're just random hex identifiers to use for tracing. Back to our -Tab example, we would prefer to set up our Logger like so: - - import logext "gopkg.in/inconshreveable/log15.v2/ext" - - t := &Tab { - // ... - url: url, - } - - t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl}) - return t - -Now we'll have a unique traceable identifier even across loading new urls, but -we'll still be able to see the tab's current url in the log messages. - -Must - -For all Handler functions which can return an error, there is a version of that -function which will return no error but panics on failure. They are all available -on the Must object. For example: - - log.Must.FileHandler("/path", log.JsonFormat) - log.Must.NetHandler("tcp", ":1234", log.JsonFormat) - -Inspiration and Credit - -All of the following excellent projects inspired the design of this library: - -code.google.com/p/log4go - -github.com/op/go-logging - -github.com/technoweenie/grohl - -github.com/Sirupsen/logrus - -github.com/kr/logfmt - -github.com/spacemonkeygo/spacelog - -golang's stdlib, notably io and net/http - -The Name - -https://xkcd.com/927/ - -*/ -package log15 diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/ext/ext_test.go b/Godeps/_workspace/src/github.com/tendermint/log15/ext/ext_test.go deleted file mode 100644 index be276b32f..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/ext/ext_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package ext - -import ( - "errors" - log "github.com/inconshreveable/log15" - "math" - "testing" -) - -func testHandler() (log.Handler, *log.Record) { - rec := new(log.Record) - return log.FuncHandler(func(r *log.Record) error { - *rec = *r - return nil - }), rec -} - -func TestHotSwapHandler(t *testing.T) { - t.Parallel() - - h1, r1 := testHandler() - - l := log.New() - h := HotSwapHandler(h1) - l.SetHandler(h) - - l.Info("to h1") - if r1.Msg != "to h1" { - t.Fatalf("didn't get expected message to h1") - } - - h2, r2 := testHandler() - h.Swap(h2) - l.Info("to h2") - if r2.Msg != "to h2" { - t.Fatalf("didn't get expected message to h2") - } -} - -func TestSpeculativeHandler(t *testing.T) { - t.Parallel() - - // test with an even multiple of the buffer size, less than full buffer size - // and not a multiple of the buffer size - for _, count := range []int{10000, 50, 432} { - recs := make(chan *log.Record) - done := make(chan int) - spec := SpeculativeHandler(100, log.ChannelHandler(recs)) - - go func() { - defer close(done) - expectedCount := int(math.Min(float64(count), float64(100))) - expectedIdx := count - expectedCount - for r := range recs { - if r.Ctx[1] != expectedIdx { - t.Errorf("Bad ctx 'i', got %d expected %d", r.Ctx[1], expectedIdx) - return - } - expectedIdx++ - expectedCount-- - - if expectedCount == 0 { - // got everything we expected - break - } - } - - select { - case <-recs: - t.Errorf("got an extra record we shouldn't have!") - default: - } - }() - - lg := log.New() - lg.SetHandler(spec) - for i := 0; i < count; i++ { - lg.Debug("test speculative", "i", i) - } - - go spec.Flush() - - // wait for the go routine to finish - <-done - } -} - -func TestErrorHandler(t *testing.T) { - t.Parallel() - - h, r := testHandler() - lg := log.New() - lg.SetHandler(EscalateErrHandler( - log.LvlFilterHandler(log.LvlError, h))) - - lg.Debug("some function result", "err", nil) - if r.Msg != "" { - t.Fatalf("Expected debug level message to be filtered") - } - - lg.Debug("some function result", "err", errors.New("failed operation")) - if r.Msg != "some function result" { - t.Fatalf("Expected debug level message to be escalated and pass lvlfilter") - } - - if r.Lvl != log.LvlError { - t.Fatalf("Expected debug level message to be escalated to LvlError") - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/ext/handler.go b/Godeps/_workspace/src/github.com/tendermint/log15/ext/handler.go deleted file mode 100644 index e0c5a9ed1..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/ext/handler.go +++ /dev/null @@ -1,116 +0,0 @@ -package ext - -import ( - "sync" - "sync/atomic" - "unsafe" - - log "github.com/inconshreveable/log15" -) - -// EscalateErrHandler wraps another handler and passes all records through -// unchanged except if the logged context contains a non-nil error -// value in its context. In that case, the record's level is raised -// to LvlError unless it was already more serious (LvlCrit). -// -// This allows you to log the result of all functions for debugging -// and still capture error conditions when in production with a single -// log line. As an example, the following the log record will be written -// out only if there was an error writing a value to redis: -// -// logger := logext.EscalateErrHandler( -// log.LvlFilterHandler(log.LvlInfo, log.StdoutHandler)) -// -// reply, err := redisConn.Do("SET", "foo", "bar") -// logger.Debug("Wrote value to redis", "reply", reply, "err", err) -// if err != nil { -// return err -// } -// -func EscalateErrHandler(h log.Handler) log.Handler { - return log.FuncHandler(func(r *log.Record) error { - if r.Lvl > log.LvlError { - for i := 1; i < len(r.Ctx); i++ { - if v, ok := r.Ctx[i].(error); ok && v != nil { - r.Lvl = log.LvlError - break - } - } - } - return h.Log(r) - }) -} - -// SpeculativeHandler is a handler for speculative logging. It -// keeps a ring buffer of the given size full of the last events -// logged into it. When Flush is called, all buffered log records -// are written to the wrapped handler. This is extremely for -// continuosly capturing debug level output, but only flushing those -// log records if an exceptional condition is encountered. -func SpeculativeHandler(size int, h log.Handler) *Speculative { - return &Speculative{ - handler: h, - recs: make([]*log.Record, size), - } -} - -type Speculative struct { - mu sync.Mutex - idx int - recs []*log.Record - handler log.Handler - full bool -} - -func (h *Speculative) Log(r *log.Record) error { - h.mu.Lock() - defer h.mu.Unlock() - h.recs[h.idx] = r - h.idx = (h.idx + 1) % len(h.recs) - h.full = h.full || h.idx == 0 - return nil -} - -func (h *Speculative) Flush() { - recs := make([]*log.Record, 0) - func() { - h.mu.Lock() - defer h.mu.Unlock() - if h.full { - recs = append(recs, h.recs[h.idx:]...) - } - recs = append(recs, h.recs[:h.idx]...) - - // reset state - h.full = false - h.idx = 0 - }() - - // don't hold the lock while we flush to the wrapped handler - for _, r := range recs { - h.handler.Log(r) - } -} - -// HotSwapHandler wraps another handler that may swapped out -// dynamically at runtime in a thread-safe fashion. -// HotSwapHandler is the same functionality -// used to implement the SetHandler method for the default -// implementation of Logger. -func HotSwapHandler(h log.Handler) *HotSwap { - hs := new(HotSwap) - hs.Swap(h) - return hs -} - -type HotSwap struct { - handler unsafe.Pointer -} - -func (h *HotSwap) Log(r *log.Record) error { - return (*(*log.Handler)(atomic.LoadPointer(&h.handler))).Log(r) -} - -func (h *HotSwap) Swap(newHandler log.Handler) { - atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler)) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/ext/id.go b/Godeps/_workspace/src/github.com/tendermint/log15/ext/id.go deleted file mode 100644 index 0bfb1551f..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/ext/id.go +++ /dev/null @@ -1,47 +0,0 @@ -package ext - -import ( - "fmt" - "math/rand" - "sync" - "time" -) - -var r = rand.New(&lockedSource{src: rand.NewSource(time.Now().Unix())}) - -// RandId creates a random identifier of the requested length. -// Useful for assigning mostly-unique identifiers for logging -// and identification that are unlikely to collide because of -// short lifespan or low set cardinality -func RandId(idlen int) string { - b := make([]byte, idlen) - var randVal uint32 - for i := 0; i < idlen; i++ { - byteIdx := i % 4 - if byteIdx == 0 { - randVal = r.Uint32() - } - b[i] = byte((randVal >> (8 * uint(byteIdx))) & 0xFF) - } - return fmt.Sprintf("%x", b) -} - -// lockedSource is a wrapper to allow a rand.Source to be used -// concurrently (same type as the one used internally in math/rand). -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/format.go b/Godeps/_workspace/src/github.com/tendermint/log15/format.go deleted file mode 100644 index 740433b3c..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/format.go +++ /dev/null @@ -1,252 +0,0 @@ -package log15 - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "time" -) - -const ( - timeFormat = "2006-01-02T15:04:05-0700" - termTimeFormat = "01-02|15:04:05" - floatFormat = 'f' - termMsgJust = 40 -) - -type Format interface { - Format(r *Record) []byte -} - -// FormatFunc returns a new Format object which uses -// the given function to perform record formatting. -func FormatFunc(f func(*Record) []byte) Format { - return formatFunc(f) -} - -type formatFunc func(*Record) []byte - -func (f formatFunc) Format(r *Record) []byte { - return f(r) -} - -// TerminalFormat formats log records optimized for human readability on -// a terminal with color-coded level output and terser human friendly timestamp. -// This format should only be used for interactive programs or while developing. -// -// [TIME] [LEVEL] MESAGE key=value key=value ... -// -// Example: -// -// [May 16 20:58:45] [DBUG] remove route ns=haproxy addr=127.0.0.1:50002 -// -func TerminalFormat() Format { - return FormatFunc(func(r *Record) []byte { - var color = 0 - switch r.Lvl { - case LvlCrit: - color = 35 - case LvlError: - color = 31 - case LvlWarn: - color = 33 - case LvlNotice: - color = 32 - case LvlInfo: - color = 34 - case LvlDebug: - color = 36 - } - - b := &bytes.Buffer{} - lvl := strings.ToUpper(r.Lvl.String()) - if color > 0 { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg) - } else { - fmt.Fprintf(b, "[%s] [%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg) - } - - // try to justify the log output for short messages - if len(r.Ctx) > 0 && len(r.Msg) < termMsgJust { - b.Write(bytes.Repeat([]byte{' '}, termMsgJust-len(r.Msg))) - } - - // print the keys logfmt style - logfmt(b, r.Ctx, color) - return b.Bytes() - }) -} - -// LogfmtFormat prints records in logfmt format, an easy machine-parseable but human-readable -// format for key/value pairs. -// -// For more details see: http://godoc.org/github.com/kr/logfmt -// -func LogfmtFormat() Format { - return FormatFunc(func(r *Record) []byte { - common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg} - buf := &bytes.Buffer{} - logfmt(buf, append(common, r.Ctx...), 0) - return buf.Bytes() - }) -} - -func logfmt(buf *bytes.Buffer, ctx []interface{}, color int) { - for i := 0; i < len(ctx); i += 2 { - if i != 0 { - buf.WriteByte(' ') - } - - k, ok := ctx[i].(string) - v := formatLogfmtValue(ctx[i+1]) - if !ok { - k, v = errorKey, formatLogfmtValue(k) - } - - // XXX: we should probably check that all of your key bytes aren't invalid - if color > 0 { - fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=%s", color, k, v) - } else { - fmt.Fprintf(buf, "%s=%s", k, v) - } - } - - buf.WriteByte('\n') -} - -// JsonFormat formats log records as JSON objects separated by newlines. -// It is the equivalent of JsonFormatEx(false, true). -func JsonFormat() Format { - return JsonFormatEx(false, true) -} - -// JsonFormatEx formats log records as JSON objects. If pretty is true, -// records will be pretty-printed. If lineSeparated is true, records -// will be logged with a new line between each record. -func JsonFormatEx(pretty, lineSeparated bool) Format { - jsonMarshal := json.Marshal - if pretty { - jsonMarshal = func(v interface{}) ([]byte, error) { - return json.MarshalIndent(v, "", " ") - } - } - - return FormatFunc(func(r *Record) []byte { - props := make(map[string]interface{}) - - props[r.KeyNames.Time] = r.Time - props[r.KeyNames.Lvl] = r.Lvl - props[r.KeyNames.Msg] = r.Msg - - for i := 0; i < len(r.Ctx); i += 2 { - k, ok := r.Ctx[i].(string) - if !ok { - props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i]) - } - props[k] = formatJsonValue(r.Ctx[i+1]) - } - - b, err := jsonMarshal(props) - if err != nil { - b, _ = jsonMarshal(map[string]string{ - errorKey: err.Error(), - }) - return b - } - - if lineSeparated { - b = append(b, '\n') - } - - return b - }) -} - -func formatShared(value interface{}) interface{} { - switch v := value.(type) { - case time.Time: - return v.Format(timeFormat) - - case error: - return v.Error() - - case fmt.Stringer: - return v.String() - - default: - return v - } -} - -func formatJsonValue(value interface{}) interface{} { - value = formatShared(value) - switch value.(type) { - case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string: - return value - default: - return fmt.Sprintf("%+v", value) - } -} - -// formatValue formats a value for serialization -func formatLogfmtValue(value interface{}) string { - if value == nil { - return "nil" - } - - value = formatShared(value) - switch v := value.(type) { - case bool: - return strconv.FormatBool(v) - case float32: - return strconv.FormatFloat(float64(v), floatFormat, 3, 64) - case float64: - return strconv.FormatFloat(v, floatFormat, 3, 64) - case int, int8, int16, int32, int64, uint, uint16, uint32, uint64: - return fmt.Sprintf("%d", value) - case string: - return escapeString(v) - case uint8: - return fmt.Sprintf("%X", value) - case []byte: - return fmt.Sprintf("%X", value) - default: - return escapeString(fmt.Sprintf("%+v", value)) - } -} - -func escapeString(s string) string { - needQuotes := false - e := bytes.Buffer{} - e.WriteByte('"') - for _, r := range s { - if r <= ' ' || r == '=' || r == '"' { - needQuotes = true - } - - switch r { - case '\\', '"': - e.WriteByte('\\') - e.WriteByte(byte(r)) - case '\n': - e.WriteByte('\\') - e.WriteByte('n') - case '\r': - e.WriteByte('\\') - e.WriteByte('r') - case '\t': - e.WriteByte('\\') - e.WriteByte('t') - default: - e.WriteRune(r) - } - } - e.WriteByte('"') - start, stop := 0, e.Len() - if !needQuotes { - start, stop = 1, stop-1 - } - return string(e.Bytes()[start:stop]) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/handler.go b/Godeps/_workspace/src/github.com/tendermint/log15/handler.go deleted file mode 100644 index 4c771b4ba..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/handler.go +++ /dev/null @@ -1,387 +0,0 @@ -package log15 - -import ( - "bytes" - "fmt" - "io" - "net" - "os" - "reflect" - "sync" - "sync/atomic" - "unsafe" - - "github.com/inconshreveable/log15/stack" -) - -// A Logger prints its log records by writing to a Handler. -// The Handler interface defines where and how log records are written. -// Handlers are composable, providing you great flexibility in combining -// them to achieve the logging structure that suits your applications. -type Handler interface { - Log(r *Record) error -} - -// FuncHandler returns a Handler that logs records with the given -// function. -func FuncHandler(fn func(r *Record) error) Handler { - return funcHandler(fn) -} - -type funcHandler func(r *Record) error - -func (h funcHandler) Log(r *Record) error { - return h(r) -} - -// StreamHandler writes log records to an io.Writer -// with the given format. StreamHandler can be used -// to easily begin writing log records to other -// outputs. -// -// StreamHandler wraps itself with LazyHandler and SyncHandler -// to evaluate Lazy objects and perform safe concurrent writes. -func StreamHandler(wr io.Writer, fmtr Format) Handler { - h := FuncHandler(func(r *Record) error { - _, err := wr.Write(fmtr.Format(r)) - return err - }) - return LazyHandler(SyncHandler(h)) -} - -// SyncHandler can be wrapped around a handler to guarantee that -// only a single Log operation can proceed at a time. It's necessary -// for thread-safe concurrent writes. -func SyncHandler(h Handler) Handler { - var mu sync.Mutex - return FuncHandler(func(r *Record) error { - defer mu.Unlock() - mu.Lock() - return h.Log(r) - }) -} - -// FileHandler returns a handler which writes log records to the give file -// using the given format. If the path -// already exists, FileHandler will append to the given file. If it does not, -// FileHandler will create the file with mode 0644. -func FileHandler(path string, fmtr Format) (Handler, error) { - f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) - if err != nil { - return nil, err - } - return closingHandler{f, StreamHandler(f, fmtr)}, nil -} - -// NetHandler opens a socket to the given address and writes records -// over the connection. -func NetHandler(network, addr string, fmtr Format) (Handler, error) { - conn, err := net.Dial(network, addr) - if err != nil { - return nil, err - } - - return closingHandler{conn, StreamHandler(conn, fmtr)}, nil -} - -// XXX: closingHandler is essentially unused at the moment -// it's meant for a future time when the Handler interface supports -// a possible Close() operation -type closingHandler struct { - io.WriteCloser - Handler -} - -func (h *closingHandler) Close() error { - return h.WriteCloser.Close() -} - -// CallerFileHandler returns a Handler that adds the line number and file of -// the calling function to the context with key "caller". -func CallerFileHandler(h Handler) Handler { - return FuncHandler(func(r *Record) error { - call := stack.Call(r.CallPC[0]) - r.Ctx = append(r.Ctx, "caller", fmt.Sprint(call)) - return h.Log(r) - }) -} - -// CallerFuncHandler returns a Handler that adds the calling function name to -// the context with key "fn". -func CallerFuncHandler(h Handler) Handler { - return FuncHandler(func(r *Record) error { - call := stack.Call(r.CallPC[0]) - r.Ctx = append(r.Ctx, "fn", fmt.Sprintf("%+n", call)) - return h.Log(r) - }) -} - -// CallerStackHandler returns a Handler that adds a stack trace to the context -// with key "stack". The stack trace is formated as a space separated list of -// call sites inside matching []'s. The most recent call site is listed first. -// Each call site is formatted according to format. See the documentation of -// log15/stack.Call.Format for the list of supported formats. -func CallerStackHandler(format string, h Handler) Handler { - return FuncHandler(func(r *Record) error { - s := stack.Callers(). - TrimBelow(stack.Call(r.CallPC[0])). - TrimRuntime() - if len(s) > 0 { - buf := &bytes.Buffer{} - buf.WriteByte('[') - for i, pc := range s { - if i > 0 { - buf.WriteByte(' ') - } - fmt.Fprintf(buf, format, pc) - } - buf.WriteByte(']') - r.Ctx = append(r.Ctx, "stack", buf.String()) - } - return h.Log(r) - }) -} - -// FilterHandler returns a Handler that only writes records to the -// wrapped Handler if the given function evaluates true. For example, -// to only log records where the 'err' key is not nil: -// -// logger.SetHandler(FilterHandler(func(r *Record) bool { -// for i := 0; i < len(r.Ctx); i += 2 { -// if r.Ctx[i] == "err" { -// return r.Ctx[i+1] != nil -// } -// } -// return false -// }, h)) -// -func FilterHandler(fn func(r *Record) bool, h Handler) Handler { - return FuncHandler(func(r *Record) error { - if fn(r) { - return h.Log(r) - } - return nil - }) -} - -// MatchFilterHandler returns a Handler that only writes records -// to the wrapped Handler if the given key in the logged -// context matches the value. For example, to only log records -// from your ui package: -// -// log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler) -// -func MatchFilterHandler(key string, value interface{}, h Handler) Handler { - return FilterHandler(func(r *Record) (pass bool) { - switch key { - case r.KeyNames.Lvl: - return r.Lvl == value - case r.KeyNames.Time: - return r.Time == value - case r.KeyNames.Msg: - return r.Msg == value - } - - for i := 0; i < len(r.Ctx); i += 2 { - if r.Ctx[i] == key { - return r.Ctx[i+1] == value - } - } - return false - }, h) -} - -// LvlFilterHandler returns a Handler that only writes -// records which are less than the given verbosity -// level to the wrapped Handler. For example, to only -// log Error/Crit records: -// -// log.LvlFilterHandler(log.Error, log.StdoutHandler) -// -func LvlFilterHandler(maxLvl Lvl, h Handler) Handler { - return FilterHandler(func(r *Record) (pass bool) { - return r.Lvl <= maxLvl - }, h) -} - -// A MultiHandler dispatches any write to each of its handlers. -// This is useful for writing different types of log information -// to different locations. For example, to log to a file and -// standard error: -// -// log.MultiHandler( -// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()), -// log.StderrHandler) -// -func MultiHandler(hs ...Handler) Handler { - return FuncHandler(func(r *Record) error { - for _, h := range hs { - // what to do about failures? - h.Log(r) - } - return nil - }) -} - -// A FailoverHandler writes all log records to the first handler -// specified, but will failover and write to the second handler if -// the first handler has failed, and so on for all handlers specified. -// For example you might want to log to a network socket, but failover -// to writing to a file if the network fails, and then to -// standard out if the file write fails: -// -// log.FailoverHandler( -// log.Must.NetHandler("tcp", ":9090", log.JsonFormat()), -// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()), -// log.StdoutHandler) -// -// All writes that do not go to the first handler will add context with keys of -// the form "failover_err_{idx}" which explain the error encountered while -// trying to write to the handlers before them in the list. -func FailoverHandler(hs ...Handler) Handler { - return FuncHandler(func(r *Record) error { - var err error - for i, h := range hs { - err = h.Log(r) - if err == nil { - return nil - } else { - r.Ctx = append(r.Ctx, fmt.Sprintf("failover_err_%d", i), err) - } - } - - return err - }) -} - -// ChannelHandler writes all records to the given channel. -// It blocks if the channel is full. Useful for async processing -// of log messages, it's used by BufferedHandler. -func ChannelHandler(recs chan<- *Record) Handler { - return FuncHandler(func(r *Record) error { - recs <- r - return nil - }) -} - -// BufferedHandler writes all records to a buffered -// channel of the given size which flushes into the wrapped -// handler whenever it is available for writing. Since these -// writes happen asynchronously, all writes to a BufferedHandler -// never return an error and any errors from the wrapped handler are ignored. -func BufferedHandler(bufSize int, h Handler) Handler { - recs := make(chan *Record, bufSize) - go func() { - for m := range recs { - _ = h.Log(m) - } - }() - return ChannelHandler(recs) -} - -// swapHandler wraps another handler that may be swapped out -// dynamically at runtime in a thread-safe fashion. -type swapHandler struct { - handler unsafe.Pointer -} - -func (h *swapHandler) Log(r *Record) error { - return (*(*Handler)(atomic.LoadPointer(&h.handler))).Log(r) -} - -func (h *swapHandler) Swap(newHandler Handler) { - atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler)) -} - -// LazyHandler writes all values to the wrapped handler after evaluating -// any lazy functions in the record's context. It is already wrapped -// around StreamHandler and SyslogHandler in this library, you'll only need -// it if you write your own Handler. -func LazyHandler(h Handler) Handler { - return FuncHandler(func(r *Record) error { - // go through the values (odd indices) and reassign - // the values of any lazy fn to the result of its execution - hadErr := false - for i := 1; i < len(r.Ctx); i += 2 { - lz, ok := r.Ctx[i].(Lazy) - if ok { - v, err := evaluateLazy(lz) - if err != nil { - hadErr = true - r.Ctx[i] = err - } else { - if cs, ok := v.(stack.Trace); ok { - v = cs.TrimBelow(stack.Call(r.CallPC[0])). - TrimRuntime() - } - r.Ctx[i] = v - } - } - } - - if hadErr { - r.Ctx = append(r.Ctx, errorKey, "bad lazy") - } - - return h.Log(r) - }) -} - -func evaluateLazy(lz Lazy) (interface{}, error) { - t := reflect.TypeOf(lz.Fn) - - if t.Kind() != reflect.Func { - return nil, fmt.Errorf("INVALID_LAZY, not func: %+v", lz.Fn) - } - - if t.NumIn() > 0 { - return nil, fmt.Errorf("INVALID_LAZY, func takes args: %+v", lz.Fn) - } - - if t.NumOut() == 0 { - return nil, fmt.Errorf("INVALID_LAZY, no func return val: %+v", lz.Fn) - } - - value := reflect.ValueOf(lz.Fn) - results := value.Call([]reflect.Value{}) - if len(results) == 1 { - return results[0].Interface(), nil - } else { - values := make([]interface{}, len(results)) - for i, v := range results { - values[i] = v.Interface() - } - return values, nil - } -} - -// DiscardHandler reports success for all writes but does nothing. -// It is useful for dynamically disabling logging at runtime via -// a Logger's SetHandler method. -func DiscardHandler() Handler { - return FuncHandler(func(r *Record) error { - return nil - }) -} - -// The Must object provides the following Handler creation functions -// which instead of returning an error parameter only return a Handler -// and panic on failure: FileHandler, NetHandler, SyslogHandler, SyslogNetHandler -var Must muster - -func must(h Handler, err error) Handler { - if err != nil { - panic(err) - } - return h -} - -type muster struct{} - -func (m muster) FileHandler(path string, fmtr Format) Handler { - return must(FileHandler(path, fmtr)) -} - -func (m muster) NetHandler(network, addr string, fmtr Format) Handler { - return must(NetHandler(network, addr, fmtr)) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/log15_test.go b/Godeps/_workspace/src/github.com/tendermint/log15/log15_test.go deleted file mode 100644 index 516f7a4bc..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/log15_test.go +++ /dev/null @@ -1,556 +0,0 @@ -package log15 - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "net" - "regexp" - "runtime" - "sync" - "testing" - "time" -) - -func testHandler() (Handler, *Record) { - rec := new(Record) - return FuncHandler(func(r *Record) error { - *rec = *r - return nil - }), rec -} - -func testLogger() (Logger, Handler, *Record) { - l := New() - h, r := testHandler() - l.SetHandler(LazyHandler(h)) - return l, h, r -} - -func TestLazy(t *testing.T) { - t.Parallel() - - x := 1 - lazy := func() int { - return x - } - - l, _, r := testLogger() - l.Info("", "x", Lazy{lazy}) - if r.Ctx[1] != 1 { - t.Fatalf("Lazy function not evaluated, got %v, expected %d", r.Ctx[1], 1) - } - - x = 2 - l.Info("", "x", Lazy{lazy}) - if r.Ctx[1] != 2 { - t.Fatalf("Lazy function not evaluated, got %v, expected %d", r.Ctx[1], 1) - } -} - -func TestInvalidLazy(t *testing.T) { - t.Parallel() - - l, _, r := testLogger() - validate := func() { - if len(r.Ctx) < 4 { - t.Fatalf("Invalid lazy, got %d args, expecting at least 4", len(r.Ctx)) - } - - if r.Ctx[2] != errorKey { - t.Fatalf("Invalid lazy, got key %s expecting %s", r.Ctx[2], errorKey) - } - } - - l.Info("", "x", Lazy{1}) - validate() - - l.Info("", "x", Lazy{func(x int) int { return x }}) - validate() - - l.Info("", "x", Lazy{func() {}}) - validate() -} - -func TestCtx(t *testing.T) { - t.Parallel() - - l, _, r := testLogger() - l.Info("", Ctx{"x": 1, "y": "foo", "tester": t}) - if len(r.Ctx) != 6 { - t.Fatalf("Expecting Ctx tansformed into %d ctx args, got %d: %v", 6, len(r.Ctx), r.Ctx) - } -} - -func testFormatter(f Format) (Logger, *bytes.Buffer) { - l := New() - var buf bytes.Buffer - l.SetHandler(StreamHandler(&buf, f)) - return l, &buf -} - -func TestJson(t *testing.T) { - t.Parallel() - - l, buf := testFormatter(JsonFormat()) - l.Error("some message", "x", 1, "y", 3.2) - - var v map[string]interface{} - decoder := json.NewDecoder(buf) - if err := decoder.Decode(&v); err != nil { - t.Fatalf("Error decoding JSON: %v", v) - } - - validate := func(key string, expected interface{}) { - if v[key] != expected { - t.Fatalf("Got %v expected %v for %v", v[key], expected, key) - } - } - - validate("msg", "some message") - validate("x", float64(1)) // all numbers are floats in JSON land - validate("y", 3.2) -} - -func TestLogfmt(t *testing.T) { - t.Parallel() - - l, buf := testFormatter(LogfmtFormat()) - l.Error("some message", "x", 1, "y", 3.2, "equals", "=", "quote", "\"") - - // skip timestamp in comparison - got := buf.Bytes()[27:buf.Len()] - expected := []byte(`lvl=eror msg="some message" x=1 y=3.200 equals="=" quote="\""` + "\n") - if !bytes.Equal(got, expected) { - t.Fatalf("Got %s, expected %s", got, expected) - } -} - -func TestMultiHandler(t *testing.T) { - t.Parallel() - - h1, r1 := testHandler() - h2, r2 := testHandler() - l := New() - l.SetHandler(MultiHandler(h1, h2)) - l.Debug("clone") - - if r1.Msg != "clone" { - t.Fatalf("wrong value for h1.Msg. Got %s expected %s", r1.Msg, "clone") - } - - if r2.Msg != "clone" { - t.Fatalf("wrong value for h2.Msg. Got %s expected %s", r2.Msg, "clone") - } - -} - -type waitHandler struct { - ch chan Record -} - -func (h *waitHandler) Log(r *Record) error { - h.ch <- *r - return nil -} - -func TestBufferedHandler(t *testing.T) { - t.Parallel() - - ch := make(chan Record) - l := New() - l.SetHandler(BufferedHandler(0, &waitHandler{ch})) - - l.Debug("buffer") - if r := <-ch; r.Msg != "buffer" { - t.Fatalf("wrong value for r.Msg. Got %s expected %s", r.Msg, "") - } -} - -func TestLogContext(t *testing.T) { - t.Parallel() - - l, _, r := testLogger() - l = l.New("foo", "bar") - l.Crit("baz") - - if len(r.Ctx) != 2 { - t.Fatalf("Expected logger context in record context. Got length %d, expected %d", len(r.Ctx), 2) - } - - if r.Ctx[0] != "foo" { - t.Fatalf("Wrong context key, got %s expected %s", r.Ctx[0], "foo") - } - - if r.Ctx[1] != "bar" { - t.Fatalf("Wrong context value, got %s expected %s", r.Ctx[1], "bar") - } -} - -func TestMapCtx(t *testing.T) { - t.Parallel() - - l, _, r := testLogger() - l.Crit("test", Ctx{"foo": "bar"}) - - if len(r.Ctx) != 2 { - t.Fatalf("Wrong context length, got %d, expected %d", len(r.Ctx), 2) - } - - if r.Ctx[0] != "foo" { - t.Fatalf("Wrong context key, got %s expected %s", r.Ctx[0], "foo") - } - - if r.Ctx[1] != "bar" { - t.Fatalf("Wrong context value, got %s expected %s", r.Ctx[1], "bar") - } -} - -func TestLvlFilterHandler(t *testing.T) { - t.Parallel() - - l := New() - h, r := testHandler() - l.SetHandler(LvlFilterHandler(LvlWarn, h)) - l.Info("info'd") - - if r.Msg != "" { - t.Fatalf("Expected zero record, but got record with msg: %v", r.Msg) - } - - l.Warn("warned") - if r.Msg != "warned" { - t.Fatalf("Got record msg %s expected %s", r.Msg, "warned") - } - - l.Warn("error'd") - if r.Msg != "error'd" { - t.Fatalf("Got record msg %s expected %s", r.Msg, "error'd") - } -} - -func TestNetHandler(t *testing.T) { - t.Parallel() - - l, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Failed to listen: %v", l) - } - - errs := make(chan error) - go func() { - c, err := l.Accept() - if err != nil { - t.Errorf("Failed to accept conneciton: %v", err) - return - } - - rd := bufio.NewReader(c) - s, err := rd.ReadString('\n') - if err != nil { - t.Errorf("Failed to read string: %v", err) - } - - got := s[27:] - expected := "lvl=info msg=test x=1\n" - if got != expected { - t.Errorf("Got log line %s, expected %s", got, expected) - } - - errs <- nil - }() - - lg := New() - h, err := NetHandler("tcp", l.Addr().String(), LogfmtFormat()) - if err != nil { - t.Fatal(err) - } - lg.SetHandler(h) - lg.Info("test", "x", 1) - - select { - case <-time.After(time.Second): - t.Fatalf("Test timed out!") - case <-errs: - // ok - } -} - -func TestMatchFilterHandler(t *testing.T) { - t.Parallel() - - l, h, r := testLogger() - l.SetHandler(MatchFilterHandler("err", nil, h)) - - l.Crit("test", "foo", "bar") - if r.Msg != "" { - t.Fatalf("expected filter handler to discard msg") - } - - l.Crit("test2", "err", "bad fd") - if r.Msg != "" { - t.Fatalf("expected filter handler to discard msg") - } - - l.Crit("test3", "err", nil) - if r.Msg != "test3" { - t.Fatalf("expected filter handler to allow msg") - } -} - -func TestMatchFilterBuiltin(t *testing.T) { - t.Parallel() - - l, h, r := testLogger() - l.SetHandler(MatchFilterHandler("lvl", LvlError, h)) - l.Info("does not pass") - - if r.Msg != "" { - t.Fatalf("got info level record that should not have matched") - } - - l.Error("error!") - if r.Msg != "error!" { - t.Fatalf("did not get error level record that should have matched") - } - - r.Msg = "" - l.SetHandler(MatchFilterHandler("msg", "matching message", h)) - l.Info("doesn't match") - if r.Msg != "" { - t.Fatalf("got record with wrong message matched") - } - - l.Debug("matching message") - if r.Msg != "matching message" { - t.Fatalf("did not get record which matches") - } -} - -type failingWriter struct { - fail bool -} - -func (w *failingWriter) Write(buf []byte) (int, error) { - if w.fail { - return 0, errors.New("fail") - } else { - return len(buf), nil - } -} - -func TestFailoverHandler(t *testing.T) { - t.Parallel() - - l := New() - h, r := testHandler() - w := &failingWriter{false} - - l.SetHandler(FailoverHandler( - StreamHandler(w, JsonFormat()), - h)) - - l.Debug("test ok") - if r.Msg != "" { - t.Fatalf("expected no failover") - } - - w.fail = true - l.Debug("test failover", "x", 1) - if r.Msg != "test failover" { - t.Fatalf("expected failover") - } - - if len(r.Ctx) != 4 { - t.Fatalf("expected additional failover ctx") - } - - got := r.Ctx[2] - expected := "failover_err_0" - if got != expected { - t.Fatalf("expected failover ctx. got: %s, expected %s", got, expected) - } -} - -// https://github.com/inconshreveable/log15/issues/16 -func TestIndependentSetHandler(t *testing.T) { - t.Parallel() - - parent, _, r := testLogger() - child := parent.New() - child.SetHandler(DiscardHandler()) - parent.Info("test") - if r.Msg != "test" { - t.Fatalf("parent handler affected by child") - } -} - -// https://github.com/inconshreveable/log15/issues/16 -func TestInheritHandler(t *testing.T) { - t.Parallel() - - parent, _, r := testLogger() - child := parent.New() - parent.SetHandler(DiscardHandler()) - child.Info("test") - if r.Msg == "test" { - t.Fatalf("child handler affected not affected by parent") - } -} - -func TestCallerFileHandler(t *testing.T) { - t.Parallel() - - l := New() - h, r := testHandler() - l.SetHandler(CallerFileHandler(h)) - - l.Info("baz") - _, _, line, _ := runtime.Caller(0) - - if len(r.Ctx) != 2 { - t.Fatalf("Expected caller in record context. Got length %d, expected %d", len(r.Ctx), 2) - } - - const key = "caller" - - if r.Ctx[0] != key { - t.Fatalf("Wrong context key, got %s expected %s", r.Ctx[0], key) - } - - s, ok := r.Ctx[1].(string) - if !ok { - t.Fatalf("Wrong context value type, got %T expected string", r.Ctx[1]) - } - - exp := fmt.Sprint("log15_test.go:", line-1) - if s != exp { - t.Fatalf("Wrong context value, got %s expected string matching %s", s, exp) - } -} - -func TestCallerFuncHandler(t *testing.T) { - t.Parallel() - - l := New() - h, r := testHandler() - l.SetHandler(CallerFuncHandler(h)) - - l.Info("baz") - - if len(r.Ctx) != 2 { - t.Fatalf("Expected caller in record context. Got length %d, expected %d", len(r.Ctx), 2) - } - - const key = "fn" - - if r.Ctx[0] != key { - t.Fatalf("Wrong context key, got %s expected %s", r.Ctx[0], key) - } - - const regex = ".*\\.TestCallerFuncHandler" - - s, ok := r.Ctx[1].(string) - if !ok { - t.Fatalf("Wrong context value type, got %T expected string", r.Ctx[1]) - } - - match, err := regexp.MatchString(regex, s) - if err != nil { - t.Fatalf("Error matching %s to regex %s: %v", s, regex, err) - } - - if !match { - t.Fatalf("Wrong context value, got %s expected string matching %s", s, regex) - } -} - -// https://github.com/inconshreveable/log15/issues/27 -func TestCallerStackHandler(t *testing.T) { - t.Parallel() - - l := New() - h, r := testHandler() - l.SetHandler(CallerStackHandler("%#v", h)) - - lines := []int{} - - func() { - l.Info("baz") - _, _, line, _ := runtime.Caller(0) - lines = append(lines, line-1) - }() - _, file, line, _ := runtime.Caller(0) - lines = append(lines, line-1) - - if len(r.Ctx) != 2 { - t.Fatalf("Expected stack in record context. Got length %d, expected %d", len(r.Ctx), 2) - } - - const key = "stack" - - if r.Ctx[0] != key { - t.Fatalf("Wrong context key, got %s expected %s", r.Ctx[0], key) - } - - s, ok := r.Ctx[1].(string) - if !ok { - t.Fatalf("Wrong context value type, got %T expected string", r.Ctx[1]) - } - - exp := "[" - for i, line := range lines { - if i > 0 { - exp += " " - } - exp += fmt.Sprint(file, ":", line) - } - exp += "]" - - if s != exp { - t.Fatalf("Wrong context value, got %s expected string matching %s", s, exp) - } -} - -// tests that when logging concurrently to the same logger -// from multiple goroutines that the calls are handled independently -// this test tries to trigger a previous bug where concurrent calls could -// corrupt each other's context values -// -// this test runs N concurrent goroutines each logging a fixed number of -// records and a handler that buckets them based on the index passed in the context. -// if the logger is not concurrent-safe then the values in the buckets will not all be the same -// -// https://github.com/inconshreveable/log15/pull/30 -func TestConcurrent(t *testing.T) { - root := New() - // this was the first value that triggered - // go to allocate extra capacity in the logger's context slice which - // was necessary to trigger the bug - const ctxLen = 34 - l := root.New(make([]interface{}, ctxLen)...) - const goroutines = 8 - var res [goroutines]int - l.SetHandler(SyncHandler(FuncHandler(func(r *Record) error { - res[r.Ctx[ctxLen+1].(int)]++ - return nil - }))) - var wg sync.WaitGroup - wg.Add(goroutines) - for i := 0; i < goroutines; i++ { - go func(idx int) { - defer wg.Done() - for j := 0; j < 10000; j++ { - l.Info("test message", "goroutine_idx", idx) - } - }(i) - } - wg.Wait() - for _, val := range res[:] { - if val != 10000 { - t.Fatalf("Wrong number of messages for context: %+v", res) - } - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/logger.go b/Godeps/_workspace/src/github.com/tendermint/log15/logger.go deleted file mode 100644 index 8ddf3ffd0..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/logger.go +++ /dev/null @@ -1,211 +0,0 @@ -package log15 - -import ( - "fmt" - "runtime" - "time" -) - -const timeKey = "t" -const lvlKey = "lvl" -const msgKey = "msg" -const errorKey = "LOG15_ERROR" - -type Lvl int - -const ( - LvlCrit Lvl = iota - LvlError - LvlWarn - LvlNotice - LvlInfo - LvlDebug -) - -// Returns the name of a Lvl -func (l Lvl) String() string { - switch l { - case LvlDebug: - return "dbug" - case LvlInfo: - return "info" - case LvlNotice: - return "note" - case LvlWarn: - return "warn" - case LvlError: - return "eror" - case LvlCrit: - return "crit" - default: - panic("bad level") - } -} - -// Returns the appropriate Lvl from a string name. -// Useful for parsing command line args and configuration files. -func LvlFromString(lvlString string) (Lvl, error) { - switch lvlString { - case "debug", "dbug": - return LvlDebug, nil - case "info": - return LvlInfo, nil - case "note", "notice": - return LvlNotice, nil - case "warn": - return LvlWarn, nil - case "error", "eror": - return LvlError, nil - case "crit": - return LvlCrit, nil - default: - return LvlDebug, fmt.Errorf("Unknown level: %v", lvlString) - } -} - -// A Record is what a Logger asks its handler to write -type Record struct { - Time time.Time - Lvl Lvl - Msg string - Ctx []interface{} - CallPC [1]uintptr - KeyNames RecordKeyNames -} - -type RecordKeyNames struct { - Time string - Msg string - Lvl string -} - -// A Logger writes key/value pairs to a Handler -type Logger interface { - // New returns a new Logger that has this logger's context plus the given context - New(ctx ...interface{}) Logger - - // SetHandler updates the logger to write records to the specified handler. - SetHandler(h Handler) - - // Log a message at the given level with context key/value pairs - Debug(msg string, ctx ...interface{}) - Info(msg string, ctx ...interface{}) - Notice(msg string, ctx ...interface{}) - Warn(msg string, ctx ...interface{}) - Error(msg string, ctx ...interface{}) - Crit(msg string, ctx ...interface{}) -} - -type logger struct { - ctx []interface{} - h *swapHandler -} - -func (l *logger) write(msg string, lvl Lvl, ctx []interface{}) { - r := Record{ - Time: time.Now(), - Lvl: lvl, - Msg: msg, - Ctx: newContext(l.ctx, ctx), - KeyNames: RecordKeyNames{ - Time: timeKey, - Msg: msgKey, - Lvl: lvlKey, - }, - } - runtime.Callers(3, r.CallPC[:]) - l.h.Log(&r) -} - -func (l *logger) New(ctx ...interface{}) Logger { - child := &logger{newContext(l.ctx, ctx), new(swapHandler)} - child.SetHandler(l.h) - return child -} - -func newContext(prefix []interface{}, suffix []interface{}) []interface{} { - normalizedSuffix := normalize(suffix) - newCtx := make([]interface{}, len(prefix)+len(normalizedSuffix)) - n := copy(newCtx, prefix) - copy(newCtx[n:], normalizedSuffix) - return newCtx -} - -func (l *logger) Debug(msg string, ctx ...interface{}) { - l.write(msg, LvlDebug, ctx) -} - -func (l *logger) Info(msg string, ctx ...interface{}) { - l.write(msg, LvlInfo, ctx) -} - -func (l *logger) Notice(msg string, ctx ...interface{}) { - l.write(msg, LvlNotice, ctx) -} - -func (l *logger) Warn(msg string, ctx ...interface{}) { - l.write(msg, LvlWarn, ctx) -} - -func (l *logger) Error(msg string, ctx ...interface{}) { - l.write(msg, LvlError, ctx) -} - -func (l *logger) Crit(msg string, ctx ...interface{}) { - l.write(msg, LvlCrit, ctx) -} - -func (l *logger) SetHandler(h Handler) { - l.h.Swap(h) -} - -func normalize(ctx []interface{}) []interface{} { - // if the caller passed a Ctx object, then expand it - if len(ctx) == 1 { - if ctxMap, ok := ctx[0].(Ctx); ok { - ctx = ctxMap.toArray() - } - } - - // ctx needs to be even because it's a series of key/value pairs - // no one wants to check for errors on logging functions, - // so instead of erroring on bad input, we'll just make sure - // that things are the right length and users can fix bugs - // when they see the output looks wrong - if len(ctx)%2 != 0 { - ctx = append(ctx, nil, errorKey, "Normalized odd number of arguments by adding nil") - } - - return ctx -} - -// Lazy allows you to defer calculation of a logged value that is expensive -// to compute until it is certain that it must be evaluated with the given filters. -// -// Lazy may also be used in conjunction with a Logger's New() function -// to generate a child logger which always reports the current value of changing -// state. -// -// You may wrap any function which takes no arguments to Lazy. It may return any -// number of values of any type. -type Lazy struct { - Fn interface{} -} - -// Ctx is a map of key/value pairs to pass as context to a log function -// Use this only if you really need greater safety around the arguments you pass -// to the logging functions. -type Ctx map[string]interface{} - -func (c Ctx) toArray() []interface{} { - arr := make([]interface{}, len(c)*2) - - i := 0 - for k, v := range c { - arr[i] = k - arr[i+1] = v - i += 2 - } - - return arr -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/root.go b/Godeps/_workspace/src/github.com/tendermint/log15/root.go deleted file mode 100644 index 981031730..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/root.go +++ /dev/null @@ -1,72 +0,0 @@ -package log15 - -import ( - "os" - - "github.com/inconshreveable/log15/term" - "github.com/mattn/go-colorable" -) - -var ( - root *logger - StdoutHandler = StreamHandler(os.Stdout, LogfmtFormat()) - StderrHandler = StreamHandler(os.Stderr, LogfmtFormat()) -) - -func init() { - if term.IsTty(os.Stdout.Fd()) { - StdoutHandler = StreamHandler(colorable.NewColorableStdout(), TerminalFormat()) - } - - if term.IsTty(os.Stderr.Fd()) { - StderrHandler = StreamHandler(colorable.NewColorableStderr(), TerminalFormat()) - } - - root = &logger{[]interface{}{}, new(swapHandler)} - root.SetHandler(StdoutHandler) -} - -// New returns a new logger with the given context. -// New is a convenient alias for Root().New -func New(ctx ...interface{}) Logger { - return root.New(ctx...) -} - -// Root returns the root logger -func Root() Logger { - return root -} - -// The following functions bypass the exported logger methods (logger.Debug, -// etc.) to keep the call depth the same for all paths to logger.write so -// runtime.Caller(2) always refers to the call site in client code. - -// Debug is a convenient alias for Root().Debug -func Debug(msg string, ctx ...interface{}) { - root.write(msg, LvlDebug, ctx) -} - -// Info is a convenient alias for Root().Info -func Info(msg string, ctx ...interface{}) { - root.write(msg, LvlInfo, ctx) -} - -// Notice is a convenient alias for Root().Notice -func Notice(msg string, ctx ...interface{}) { - root.write(msg, LvlNotice, ctx) -} - -// Warn is a convenient alias for Root().Warn -func Warn(msg string, ctx ...interface{}) { - root.write(msg, LvlWarn, ctx) -} - -// Error is a convenient alias for Root().Error -func Error(msg string, ctx ...interface{}) { - root.write(msg, LvlError, ctx) -} - -// Crit is a convenient alias for Root().Crit -func Crit(msg string, ctx ...interface{}) { - root.write(msg, LvlCrit, ctx) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/stack/stack.go b/Godeps/_workspace/src/github.com/tendermint/log15/stack/stack.go deleted file mode 100644 index ae3021cce..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/stack/stack.go +++ /dev/null @@ -1,225 +0,0 @@ -// Package stack implements utilities to capture, manipulate, and format call -// stacks. -package stack - -import ( - "fmt" - "path/filepath" - "runtime" - "strings" -) - -// Call records a single function invocation from a goroutine stack. It is a -// wrapper for the program counter values returned by runtime.Caller and -// runtime.Callers and consumed by runtime.FuncForPC. -type Call uintptr - -// Format implements fmt.Formatter with support for the following verbs. -// -// %s source file -// %d line number -// %n function name -// %v equivalent to %s:%d -// -// It accepts the '+' and '#' flags for most of the verbs as follows. -// -// %+s path of source file relative to the compile time GOPATH -// %#s full path of source file -// %+n import path qualified function name -// %+v equivalent to %+s:%d -// %#v equivalent to %#s:%d -func (pc Call) Format(s fmt.State, c rune) { - // BUG(ChrisHines): Subtracting one from pc is a work around for - // https://code.google.com/p/go/issues/detail?id=7690. The idea for this - // work around comes from rsc's initial patch at - // https://codereview.appspot.com/84100043/#ps20001, but as noted in the - // issue discussion, it is not a complete fix since it doesn't handle some - // cases involving signals. Just the same, it handles all of the other - // cases I have tested. - pcFix := uintptr(pc) - 1 - fn := runtime.FuncForPC(pcFix) - if fn == nil { - fmt.Fprintf(s, "%%!%c(NOFUNC)", c) - return - } - - switch c { - case 's', 'v': - file, line := fn.FileLine(pcFix) - switch { - case s.Flag('#'): - // done - case s.Flag('+'): - // Here we want to get the source file path relative to the - // compile time GOPATH. As of Go 1.3.x there is no direct way to - // know the compiled GOPATH at runtime, but we can infer the - // number of path segments in the GOPATH. We note that fn.Name() - // returns the function name qualified by the import path, which - // does not include the GOPATH. Thus we can trim segments from the - // beginning of the file path until the number of path separators - // remaining is one more than the number of path separators in the - // function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path - // separator than our desired output. - const sep = "/" - impCnt := strings.Count(fn.Name(), sep) + 1 - pathCnt := strings.Count(file, sep) - for pathCnt > impCnt { - i := strings.Index(file, sep) - if i == -1 { - break - } - file = file[i+len(sep):] - pathCnt-- - } - default: - const sep = "/" - if i := strings.LastIndex(file, sep); i != -1 { - file = file[i+len(sep):] - } - } - fmt.Fprint(s, file) - if c == 'v' { - fmt.Fprint(s, ":", line) - } - - case 'd': - _, line := fn.FileLine(pcFix) - fmt.Fprint(s, line) - - case 'n': - name := fn.Name() - if !s.Flag('+') { - const pathSep = "/" - if i := strings.LastIndex(name, pathSep); i != -1 { - name = name[i+len(pathSep):] - } - const pkgSep = "." - if i := strings.Index(name, pkgSep); i != -1 { - name = name[i+len(pkgSep):] - } - } - fmt.Fprint(s, name) - } -} - -// Callers returns a Trace for the current goroutine with element 0 -// identifying the calling function. -func Callers() Trace { - pcs := poolBuf() - pcs = pcs[:cap(pcs)] - n := runtime.Callers(2, pcs) - cs := make([]Call, n) - for i, pc := range pcs[:n] { - cs[i] = Call(pc) - } - putPoolBuf(pcs) - return cs -} - -// name returns the import path qualified name of the function containing the -// call. -func (pc Call) name() string { - pcFix := uintptr(pc) - 1 // work around for go issue #7690 - fn := runtime.FuncForPC(pcFix) - if fn == nil { - return "???" - } - return fn.Name() -} - -func (pc Call) file() string { - pcFix := uintptr(pc) - 1 // work around for go issue #7690 - fn := runtime.FuncForPC(pcFix) - if fn == nil { - return "???" - } - file, _ := fn.FileLine(pcFix) - return file -} - -// Trace records a sequence of function invocations from a goroutine stack. -type Trace []Call - -// Format implements fmt.Formatter by printing the Trace as square brackes ([, -// ]) surrounding a space separated list of Calls each formatted with the -// supplied verb and options. -func (pcs Trace) Format(s fmt.State, c rune) { - s.Write([]byte("[")) - for i, pc := range pcs { - if i > 0 { - s.Write([]byte(" ")) - } - pc.Format(s, c) - } - s.Write([]byte("]")) -} - -// TrimBelow returns a slice of the Trace with all entries below pc removed. -func (pcs Trace) TrimBelow(pc Call) Trace { - for len(pcs) > 0 && pcs[0] != pc { - pcs = pcs[1:] - } - return pcs -} - -// TrimAbove returns a slice of the Trace with all entries above pc removed. -func (pcs Trace) TrimAbove(pc Call) Trace { - for len(pcs) > 0 && pcs[len(pcs)-1] != pc { - pcs = pcs[:len(pcs)-1] - } - return pcs -} - -// TrimBelowName returns a slice of the Trace with all entries below the -// lowest with function name name removed. -func (pcs Trace) TrimBelowName(name string) Trace { - for len(pcs) > 0 && pcs[0].name() != name { - pcs = pcs[1:] - } - return pcs -} - -// TrimAboveName returns a slice of the Trace with all entries above the -// highest with function name name removed. -func (pcs Trace) TrimAboveName(name string) Trace { - for len(pcs) > 0 && pcs[len(pcs)-1].name() != name { - pcs = pcs[:len(pcs)-1] - } - return pcs -} - -var goroot string - -func init() { - goroot = filepath.ToSlash(runtime.GOROOT()) - if runtime.GOOS == "windows" { - goroot = strings.ToLower(goroot) - } -} - -func inGoroot(path string) bool { - if runtime.GOOS == "windows" { - path = strings.ToLower(path) - } - return strings.HasPrefix(path, goroot) -} - -// TrimRuntime returns a slice of the Trace with the topmost entries from the -// go runtime removed. It considers any calls originating from files under -// GOROOT as part of the runtime. -func (pcs Trace) TrimRuntime() Trace { - for len(pcs) > 0 && inGoroot(pcs[len(pcs)-1].file()) { - pcs = pcs[:len(pcs)-1] - } - return pcs -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/stack/stack_pool.go b/Godeps/_workspace/src/github.com/tendermint/log15/stack/stack_pool.go deleted file mode 100644 index 34f2ca970..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/stack/stack_pool.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.3 - -package stack - -import ( - "sync" -) - -var pcStackPool = sync.Pool{ - New: func() interface{} { return make([]uintptr, 1000) }, -} - -func poolBuf() []uintptr { - return pcStackPool.Get().([]uintptr) -} - -func putPoolBuf(p []uintptr) { - pcStackPool.Put(p) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/stack/stack_pool_chan.go b/Godeps/_workspace/src/github.com/tendermint/log15/stack/stack_pool_chan.go deleted file mode 100644 index aa449edf5..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/stack/stack_pool_chan.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !go1.3 - -package stack - -const ( - stackPoolSize = 64 -) - -var ( - pcStackPool = make(chan []uintptr, stackPoolSize) -) - -func poolBuf() []uintptr { - select { - case p := <-pcStackPool: - return p - default: - return make([]uintptr, 1000) - } -} - -func putPoolBuf(p []uintptr) { - select { - case pcStackPool <- p: - default: - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/stack/stack_test.go b/Godeps/_workspace/src/github.com/tendermint/log15/stack/stack_test.go deleted file mode 100644 index 52371b1e4..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/stack/stack_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package stack_test - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "runtime" - "testing" - - "github.com/inconshreveable/log15/stack" -) - -type testType struct{} - -func (tt testType) testMethod() (pc uintptr, file string, line int, ok bool) { - return runtime.Caller(0) -} - -func TestCallFormat(t *testing.T) { - t.Parallel() - - pc, file, line, ok := runtime.Caller(0) - if !ok { - t.Fatal("runtime.Caller(0) failed") - } - - gopathSrc := filepath.Join(os.Getenv("GOPATH"), "src") - relFile, err := filepath.Rel(gopathSrc, file) - if err != nil { - t.Fatalf("failed to determine path relative to GOPATH: %v", err) - } - relFile = filepath.ToSlash(relFile) - - pc2, file2, line2, ok2 := testType{}.testMethod() - if !ok2 { - t.Fatal("runtime.Caller(0) failed") - } - relFile2, err := filepath.Rel(gopathSrc, file) - if err != nil { - t.Fatalf("failed to determine path relative to GOPATH: %v", err) - } - relFile2 = filepath.ToSlash(relFile2) - - data := []struct { - pc uintptr - desc string - fmt string - out string - }{ - {0, "error", "%s", "%!s(NOFUNC)"}, - - {pc, "func", "%s", path.Base(file)}, - {pc, "func", "%+s", relFile}, - {pc, "func", "%#s", file}, - {pc, "func", "%d", fmt.Sprint(line)}, - {pc, "func", "%n", "TestCallFormat"}, - {pc, "func", "%+n", runtime.FuncForPC(pc).Name()}, - {pc, "func", "%v", fmt.Sprint(path.Base(file), ":", line)}, - {pc, "func", "%+v", fmt.Sprint(relFile, ":", line)}, - {pc, "func", "%#v", fmt.Sprint(file, ":", line)}, - {pc, "func", "%v|%[1]n()", fmt.Sprint(path.Base(file), ":", line, "|", "TestCallFormat()")}, - - {pc2, "meth", "%s", path.Base(file2)}, - {pc2, "meth", "%+s", relFile2}, - {pc2, "meth", "%#s", file2}, - {pc2, "meth", "%d", fmt.Sprint(line2)}, - {pc2, "meth", "%n", "testType.testMethod"}, - {pc2, "meth", "%+n", runtime.FuncForPC(pc2).Name()}, - {pc2, "meth", "%v", fmt.Sprint(path.Base(file2), ":", line2)}, - {pc2, "meth", "%+v", fmt.Sprint(relFile2, ":", line2)}, - {pc2, "meth", "%#v", fmt.Sprint(file2, ":", line2)}, - {pc2, "meth", "%v|%[1]n()", fmt.Sprint(path.Base(file2), ":", line2, "|", "testType.testMethod()")}, - } - - for _, d := range data { - got := fmt.Sprintf(d.fmt, stack.Call(d.pc)) - if got != d.out { - t.Errorf("fmt.Sprintf(%q, Call(%s)) = %s, want %s", d.fmt, d.desc, got, d.out) - } - } -} - -func BenchmarkCallVFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprint(ioutil.Discard, stack.Call(pc)) - } -} - -func BenchmarkCallPlusVFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%+v", stack.Call(pc)) - } -} - -func BenchmarkCallSharpVFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%#v", stack.Call(pc)) - } -} - -func BenchmarkCallSFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%s", stack.Call(pc)) - } -} - -func BenchmarkCallPlusSFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%+s", stack.Call(pc)) - } -} - -func BenchmarkCallSharpSFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%#s", stack.Call(pc)) - } -} - -func BenchmarkCallDFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%d", stack.Call(pc)) - } -} - -func BenchmarkCallNFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%n", stack.Call(pc)) - } -} - -func BenchmarkCallPlusNFmt(b *testing.B) { - pc, _, _, ok := runtime.Caller(0) - if !ok { - b.Fatal("runtime.Caller(0) failed") - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - fmt.Fprintf(ioutil.Discard, "%+n", stack.Call(pc)) - } -} - -func BenchmarkCallers(b *testing.B) { - for i := 0; i < b.N; i++ { - stack.Callers() - } -} - -func deepStack(depth int, b *testing.B) stack.Trace { - if depth > 0 { - return deepStack(depth-1, b) - } - b.StartTimer() - s := stack.Callers() - b.StopTimer() - return s -} - -func BenchmarkCallers10(b *testing.B) { - b.StopTimer() - - for i := 0; i < b.N; i++ { - deepStack(10, b) - } -} - -func BenchmarkCallers50(b *testing.B) { - b.StopTimer() - - for i := 0; i < b.N; i++ { - deepStack(50, b) - } -} - -func BenchmarkCallers100(b *testing.B) { - b.StopTimer() - - for i := 0; i < b.N; i++ { - deepStack(100, b) - } -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/syslog.go b/Godeps/_workspace/src/github.com/tendermint/log15/syslog.go deleted file mode 100644 index 4b16e2b27..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/syslog.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build !windows,!plan9 - -package log15 - -import ( - "log/syslog" - "strings" -) - -// SyslogHandler opens a connection to the system syslog daemon by calling -// syslog.New and writes all records to it. -func SyslogHandler(tag string, fmtr Format) (Handler, error) { - wr, err := syslog.New(syslog.LOG_INFO, tag) - return sharedSyslog(fmtr, wr, err) -} - -// SyslogHandler opens a connection to a log daemon over the network and writes -// all log records to it. -func SyslogNetHandler(net, addr string, tag string, fmtr Format) (Handler, error) { - wr, err := syslog.Dial(net, addr, syslog.LOG_INFO, tag) - return sharedSyslog(fmtr, wr, err) -} - -func sharedSyslog(fmtr Format, sysWr *syslog.Writer, err error) (Handler, error) { - if err != nil { - return nil, err - } - h := FuncHandler(func(r *Record) error { - var syslogFn = sysWr.Info - switch r.Lvl { - case LvlCrit: - syslogFn = sysWr.Crit - case LvlError: - syslogFn = sysWr.Err - case LvlWarn: - syslogFn = sysWr.Warning - case LvlNotice: - syslogFn = sysWr.Notice - case LvlInfo: - syslogFn = sysWr.Info - case LvlDebug: - syslogFn = sysWr.Debug - } - - s := strings.TrimSpace(string(fmtr.Format(r))) - return syslogFn(s) - }) - return LazyHandler(&closingHandler{sysWr, h}), nil -} - -func (m muster) SyslogHandler(tag string, fmtr Format) Handler { - return must(SyslogHandler(tag, fmtr)) -} - -func (m muster) SyslogNetHandler(net, addr string, tag string, fmtr Format) Handler { - return must(SyslogNetHandler(net, addr, tag, fmtr)) -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/term/LICENSE b/Godeps/_workspace/src/github.com/tendermint/log15/term/LICENSE deleted file mode 100644 index f090cb42f..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/term/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_darwin.go b/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_darwin.go deleted file mode 100644 index b05de4cb8..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_freebsd.go b/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_freebsd.go deleted file mode 100644 index cfaceab33..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_freebsd.go +++ /dev/null @@ -1,18 +0,0 @@ -package term - -import ( - "syscall" -) - -const ioctlReadTermios = syscall.TIOCGETA - -// Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_linux.go b/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_linux.go deleted file mode 100644 index 5e2419c6d..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package term - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_notwindows.go b/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_notwindows.go deleted file mode 100644 index c0b201a53..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_notwindows.go +++ /dev/null @@ -1,20 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux,!appengine darwin freebsd - -package term - -import ( - "syscall" - "unsafe" -) - -// IsTty returns true if the given file descriptor is a terminal. -func IsTty(fd uintptr) bool { - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_windows.go b/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_windows.go deleted file mode 100644 index df3c30c15..000000000 --- a/Godeps/_workspace/src/github.com/tendermint/log15/term/terminal_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package term - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTty returns true if the given file descriptor is a terminal. -func IsTty(fd uintptr) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/const_amd64.s b/Godeps/_workspace/src/golang.org/x/crypto/curve25519/const_amd64.s deleted file mode 100644 index 797f9b051..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/const_amd64.s +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -DATA ·REDMASK51(SB)/8, $0x0007FFFFFFFFFFFF -GLOBL ·REDMASK51(SB), 8, $8 - -DATA ·_121666_213(SB)/8, $996687872 -GLOBL ·_121666_213(SB), 8, $8 - -DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA -GLOBL ·_2P0(SB), 8, $8 - -DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE -GLOBL ·_2P1234(SB), 8, $8 diff --git a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/cswap_amd64.s b/Godeps/_workspace/src/golang.org/x/crypto/curve25519/cswap_amd64.s deleted file mode 100644 index 45484d1b5..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/cswap_amd64.s +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// func cswap(inout *[5]uint64, v uint64) -TEXT ·cswap(SB),7,$0 - MOVQ inout+0(FP),DI - MOVQ v+8(FP),SI - - CMPQ SI,$1 - MOVQ 0(DI),SI - MOVQ 80(DI),DX - MOVQ 8(DI),CX - MOVQ 88(DI),R8 - MOVQ SI,R9 - CMOVQEQ DX,SI - CMOVQEQ R9,DX - MOVQ CX,R9 - CMOVQEQ R8,CX - CMOVQEQ R9,R8 - MOVQ SI,0(DI) - MOVQ DX,80(DI) - MOVQ CX,8(DI) - MOVQ R8,88(DI) - MOVQ 16(DI),SI - MOVQ 96(DI),DX - MOVQ 24(DI),CX - MOVQ 104(DI),R8 - MOVQ SI,R9 - CMOVQEQ DX,SI - CMOVQEQ R9,DX - MOVQ CX,R9 - CMOVQEQ R8,CX - CMOVQEQ R9,R8 - MOVQ SI,16(DI) - MOVQ DX,96(DI) - MOVQ CX,24(DI) - MOVQ R8,104(DI) - MOVQ 32(DI),SI - MOVQ 112(DI),DX - MOVQ 40(DI),CX - MOVQ 120(DI),R8 - MOVQ SI,R9 - CMOVQEQ DX,SI - CMOVQEQ R9,DX - MOVQ CX,R9 - CMOVQEQ R8,CX - CMOVQEQ R9,R8 - MOVQ SI,32(DI) - MOVQ DX,112(DI) - MOVQ CX,40(DI) - MOVQ R8,120(DI) - MOVQ 48(DI),SI - MOVQ 128(DI),DX - MOVQ 56(DI),CX - MOVQ 136(DI),R8 - MOVQ SI,R9 - CMOVQEQ DX,SI - CMOVQEQ R9,DX - MOVQ CX,R9 - CMOVQEQ R8,CX - CMOVQEQ R9,R8 - MOVQ SI,48(DI) - MOVQ DX,128(DI) - MOVQ CX,56(DI) - MOVQ R8,136(DI) - MOVQ 64(DI),SI - MOVQ 144(DI),DX - MOVQ 72(DI),CX - MOVQ 152(DI),R8 - MOVQ SI,R9 - CMOVQEQ DX,SI - CMOVQEQ R9,DX - MOVQ CX,R9 - CMOVQEQ R8,CX - CMOVQEQ R9,R8 - MOVQ SI,64(DI) - MOVQ DX,144(DI) - MOVQ CX,72(DI) - MOVQ R8,152(DI) - MOVQ DI,AX - MOVQ SI,DX - RET diff --git a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/curve25519.go b/Godeps/_workspace/src/golang.org/x/crypto/curve25519/curve25519.go deleted file mode 100644 index 6918c47fc..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/curve25519.go +++ /dev/null @@ -1,841 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// We have a implementation in amd64 assembly so this code is only run on -// non-amd64 platforms. The amd64 assembly does not support gccgo. -// +build !amd64 gccgo appengine - -package curve25519 - -// This code is a port of the public domain, "ref10" implementation of -// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. - -// fieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type fieldElement [10]int32 - -func feZero(fe *fieldElement) { - for i := range fe { - fe[i] = 0 - } -} - -func feOne(fe *fieldElement) { - feZero(fe) - fe[0] = 1 -} - -func feAdd(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] + b[i] - } -} - -func feSub(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] - b[i] - } -} - -func feCopy(dst, src *fieldElement) { - for i := range dst { - dst[i] = src[i] - } -} - -// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func feCSwap(f, g *fieldElement, b int32) { - var x fieldElement - b = -b - for i := range x { - x[i] = b & (f[i] ^ g[i]) - } - - for i := range f { - f[i] ^= x[i] - } - for i := range g { - g[i] ^= x[i] - } -} - -// load3 reads a 24-bit, little-endian value from in. -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -// load4 reads a 32-bit, little-endian value from in. -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func feFromBytes(dst *fieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := load3(src[29:]) << 2 - - var carry [10]int64 - carry[9] = (h9 + 1<<24) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + 1<<24) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + 1<<24) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + 1<<24) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + 1<<24) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + 1<<25) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + 1<<25) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + 1<<25) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + 1<<25) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + 1<<25) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - dst[0] = int32(h0) - dst[1] = int32(h1) - dst[2] = int32(h2) - dst[3] = int32(h3) - dst[4] = int32(h4) - dst[5] = int32(h5) - dst[6] = int32(h6) - dst[7] = int32(h7) - dst[8] = int32(h8) - dst[9] = int32(h9) -} - -// feToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -// feMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func feMul(h, f, g *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - g0 := g[0] - g1 := g[1] - g2 := g[2] - g3 := g[3] - g4 := g[4] - g5 := g[5] - g6 := g[6] - g7 := g[7] - g8 := g[8] - g9 := g[9] - g1_19 := 19 * g1 // 1.4*2^29 - g2_19 := 19 * g2 // 1.4*2^30; still ok - g3_19 := 19 * g3 - g4_19 := 19 * g4 - g5_19 := 19 * g5 - g6_19 := 19 * g6 - g7_19 := 19 * g7 - g8_19 := 19 * g8 - g9_19 := 19 * g9 - f1_2 := 2 * f1 - f3_2 := 2 * f3 - f5_2 := 2 * f5 - f7_2 := 2 * f7 - f9_2 := 2 * f9 - f0g0 := int64(f0) * int64(g0) - f0g1 := int64(f0) * int64(g1) - f0g2 := int64(f0) * int64(g2) - f0g3 := int64(f0) * int64(g3) - f0g4 := int64(f0) * int64(g4) - f0g5 := int64(f0) * int64(g5) - f0g6 := int64(f0) * int64(g6) - f0g7 := int64(f0) * int64(g7) - f0g8 := int64(f0) * int64(g8) - f0g9 := int64(f0) * int64(g9) - f1g0 := int64(f1) * int64(g0) - f1g1_2 := int64(f1_2) * int64(g1) - f1g2 := int64(f1) * int64(g2) - f1g3_2 := int64(f1_2) * int64(g3) - f1g4 := int64(f1) * int64(g4) - f1g5_2 := int64(f1_2) * int64(g5) - f1g6 := int64(f1) * int64(g6) - f1g7_2 := int64(f1_2) * int64(g7) - f1g8 := int64(f1) * int64(g8) - f1g9_38 := int64(f1_2) * int64(g9_19) - f2g0 := int64(f2) * int64(g0) - f2g1 := int64(f2) * int64(g1) - f2g2 := int64(f2) * int64(g2) - f2g3 := int64(f2) * int64(g3) - f2g4 := int64(f2) * int64(g4) - f2g5 := int64(f2) * int64(g5) - f2g6 := int64(f2) * int64(g6) - f2g7 := int64(f2) * int64(g7) - f2g8_19 := int64(f2) * int64(g8_19) - f2g9_19 := int64(f2) * int64(g9_19) - f3g0 := int64(f3) * int64(g0) - f3g1_2 := int64(f3_2) * int64(g1) - f3g2 := int64(f3) * int64(g2) - f3g3_2 := int64(f3_2) * int64(g3) - f3g4 := int64(f3) * int64(g4) - f3g5_2 := int64(f3_2) * int64(g5) - f3g6 := int64(f3) * int64(g6) - f3g7_38 := int64(f3_2) * int64(g7_19) - f3g8_19 := int64(f3) * int64(g8_19) - f3g9_38 := int64(f3_2) * int64(g9_19) - f4g0 := int64(f4) * int64(g0) - f4g1 := int64(f4) * int64(g1) - f4g2 := int64(f4) * int64(g2) - f4g3 := int64(f4) * int64(g3) - f4g4 := int64(f4) * int64(g4) - f4g5 := int64(f4) * int64(g5) - f4g6_19 := int64(f4) * int64(g6_19) - f4g7_19 := int64(f4) * int64(g7_19) - f4g8_19 := int64(f4) * int64(g8_19) - f4g9_19 := int64(f4) * int64(g9_19) - f5g0 := int64(f5) * int64(g0) - f5g1_2 := int64(f5_2) * int64(g1) - f5g2 := int64(f5) * int64(g2) - f5g3_2 := int64(f5_2) * int64(g3) - f5g4 := int64(f5) * int64(g4) - f5g5_38 := int64(f5_2) * int64(g5_19) - f5g6_19 := int64(f5) * int64(g6_19) - f5g7_38 := int64(f5_2) * int64(g7_19) - f5g8_19 := int64(f5) * int64(g8_19) - f5g9_38 := int64(f5_2) * int64(g9_19) - f6g0 := int64(f6) * int64(g0) - f6g1 := int64(f6) * int64(g1) - f6g2 := int64(f6) * int64(g2) - f6g3 := int64(f6) * int64(g3) - f6g4_19 := int64(f6) * int64(g4_19) - f6g5_19 := int64(f6) * int64(g5_19) - f6g6_19 := int64(f6) * int64(g6_19) - f6g7_19 := int64(f6) * int64(g7_19) - f6g8_19 := int64(f6) * int64(g8_19) - f6g9_19 := int64(f6) * int64(g9_19) - f7g0 := int64(f7) * int64(g0) - f7g1_2 := int64(f7_2) * int64(g1) - f7g2 := int64(f7) * int64(g2) - f7g3_38 := int64(f7_2) * int64(g3_19) - f7g4_19 := int64(f7) * int64(g4_19) - f7g5_38 := int64(f7_2) * int64(g5_19) - f7g6_19 := int64(f7) * int64(g6_19) - f7g7_38 := int64(f7_2) * int64(g7_19) - f7g8_19 := int64(f7) * int64(g8_19) - f7g9_38 := int64(f7_2) * int64(g9_19) - f8g0 := int64(f8) * int64(g0) - f8g1 := int64(f8) * int64(g1) - f8g2_19 := int64(f8) * int64(g2_19) - f8g3_19 := int64(f8) * int64(g3_19) - f8g4_19 := int64(f8) * int64(g4_19) - f8g5_19 := int64(f8) * int64(g5_19) - f8g6_19 := int64(f8) * int64(g6_19) - f8g7_19 := int64(f8) * int64(g7_19) - f8g8_19 := int64(f8) * int64(g8_19) - f8g9_19 := int64(f8) * int64(g9_19) - f9g0 := int64(f9) * int64(g0) - f9g1_38 := int64(f9_2) * int64(g1_19) - f9g2_19 := int64(f9) * int64(g2_19) - f9g3_38 := int64(f9_2) * int64(g3_19) - f9g4_19 := int64(f9) * int64(g4_19) - f9g5_38 := int64(f9_2) * int64(g5_19) - f9g6_19 := int64(f9) * int64(g6_19) - f9g7_38 := int64(f9_2) * int64(g7_19) - f9g8_19 := int64(f9) * int64(g8_19) - f9g9_38 := int64(f9_2) * int64(g9_19) - h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 - h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 - h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 - h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 - h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 - h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 - h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 - h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 - h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 - h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 - var carry [10]int64 - - // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - // |h0| <= 2^25 - // |h4| <= 2^25 - // |h1| <= 1.51*2^58 - // |h5| <= 1.51*2^58 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - // |h1| <= 2^24; from now on fits into int32 - // |h5| <= 2^24; from now on fits into int32 - // |h2| <= 1.21*2^59 - // |h6| <= 1.21*2^59 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - // |h2| <= 2^25; from now on fits into int32 unchanged - // |h6| <= 2^25; from now on fits into int32 unchanged - // |h3| <= 1.51*2^58 - // |h7| <= 1.51*2^58 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - // |h3| <= 2^24; from now on fits into int32 unchanged - // |h7| <= 2^24; from now on fits into int32 unchanged - // |h4| <= 1.52*2^33 - // |h8| <= 1.52*2^33 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - // |h4| <= 2^25; from now on fits into int32 unchanged - // |h8| <= 2^25; from now on fits into int32 unchanged - // |h5| <= 1.01*2^24 - // |h9| <= 1.51*2^58 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - // |h9| <= 2^24; from now on fits into int32 unchanged - // |h0| <= 1.8*2^37 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - // |h0| <= 2^25; from now on fits into int32 unchanged - // |h1| <= 1.01*2^24 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feSquare(h, f *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feMul121666 calculates h = f * 121666. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feMul121666(h, f *fieldElement) { - h0 := int64(f[0]) * 121666 - h1 := int64(f[1]) * 121666 - h2 := int64(f[2]) * 121666 - h3 := int64(f[3]) * 121666 - h4 := int64(f[4]) * 121666 - h5 := int64(f[5]) * 121666 - h6 := int64(f[6]) * 121666 - h7 := int64(f[7]) * 121666 - h8 := int64(f[8]) * 121666 - h9 := int64(f[9]) * 121666 - var carry [10]int64 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feInvert sets out = z^-1. -func feInvert(out, z *fieldElement) { - var t0, t1, t2, t3 fieldElement - var i int - - feSquare(&t0, z) - for i = 1; i < 1; i++ { - feSquare(&t0, &t0) - } - feSquare(&t1, &t0) - for i = 1; i < 2; i++ { - feSquare(&t1, &t1) - } - feMul(&t1, z, &t1) - feMul(&t0, &t0, &t1) - feSquare(&t2, &t0) - for i = 1; i < 1; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t1, &t2) - feSquare(&t2, &t1) - for i = 1; i < 5; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 20; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 100; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t1, &t1) - for i = 1; i < 5; i++ { - feSquare(&t1, &t1) - } - feMul(out, &t1, &t0) -} - -func scalarMult(out, in, base *[32]byte) { - var e [32]byte - - copy(e[:], in[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement - feFromBytes(&x1, base) - feOne(&x2) - feCopy(&x3, &x1) - feOne(&z3) - - swap := int32(0) - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int32(b) - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - swap = int32(b) - - feSub(&tmp0, &x3, &z3) - feSub(&tmp1, &x2, &z2) - feAdd(&x2, &x2, &z2) - feAdd(&z2, &x3, &z3) - feMul(&z3, &tmp0, &x2) - feMul(&z2, &z2, &tmp1) - feSquare(&tmp0, &tmp1) - feSquare(&tmp1, &x2) - feAdd(&x3, &z3, &z2) - feSub(&z2, &z3, &z2) - feMul(&x2, &tmp1, &tmp0) - feSub(&tmp1, &tmp1, &tmp0) - feSquare(&z2, &z2) - feMul121666(&z3, &tmp1) - feSquare(&x3, &x3) - feAdd(&tmp0, &tmp0, &z3) - feMul(&z3, &x1, &z2) - feMul(&z2, &tmp1, &tmp0) - } - - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - - feInvert(&z2, &z2) - feMul(&x2, &x2, &z2) - feToBytes(out, &x2) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/curve25519_test.go b/Godeps/_workspace/src/golang.org/x/crypto/curve25519/curve25519_test.go deleted file mode 100644 index 14b0ee87c..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/curve25519_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package curve25519 - -import ( - "fmt" - "testing" -) - -const expectedHex = "89161fde887b2b53de549af483940106ecc114d6982daa98256de23bdf77661a" - -func TestBaseScalarMult(t *testing.T) { - var a, b [32]byte - in := &a - out := &b - a[0] = 1 - - for i := 0; i < 200; i++ { - ScalarBaseMult(out, in) - in, out = out, in - } - - result := fmt.Sprintf("%x", in[:]) - if result != expectedHex { - t.Errorf("incorrect result: got %s, want %s", result, expectedHex) - } -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/doc.go b/Godeps/_workspace/src/golang.org/x/crypto/curve25519/doc.go deleted file mode 100644 index f7db9c1ce..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package curve25519 provides an implementation of scalar multiplication on -// the elliptic curve known as curve25519. See http://cr.yp.to/ecdh.html -package curve25519 - -// basePoint is the x coordinate of the generator of the curve. -var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - -// ScalarMult sets dst to the product in*base where dst and base are the x -// coordinates of group points and all values are in little-endian form. -func ScalarMult(dst, in, base *[32]byte) { - scalarMult(dst, in, base) -} - -// ScalarBaseMult sets dst to the product in*base where dst and base are the x -// coordinates of group points, base is the standard generator and all values -// are in little-endian form. -func ScalarBaseMult(dst, in *[32]byte) { - ScalarMult(dst, in, &basePoint) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/freeze_amd64.s b/Godeps/_workspace/src/golang.org/x/crypto/curve25519/freeze_amd64.s deleted file mode 100644 index 37599fac0..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/freeze_amd64.s +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// func freeze(inout *[5]uint64) -TEXT ·freeze(SB),7,$96-8 - MOVQ inout+0(FP), DI - - MOVQ SP,R11 - MOVQ $31,CX - NOTQ CX - ANDQ CX,SP - ADDQ $32,SP - - MOVQ R11,0(SP) - MOVQ R12,8(SP) - MOVQ R13,16(SP) - MOVQ R14,24(SP) - MOVQ R15,32(SP) - MOVQ BX,40(SP) - MOVQ BP,48(SP) - MOVQ 0(DI),SI - MOVQ 8(DI),DX - MOVQ 16(DI),CX - MOVQ 24(DI),R8 - MOVQ 32(DI),R9 - MOVQ ·REDMASK51(SB),AX - MOVQ AX,R10 - SUBQ $18,R10 - MOVQ $3,R11 -REDUCELOOP: - MOVQ SI,R12 - SHRQ $51,R12 - ANDQ AX,SI - ADDQ R12,DX - MOVQ DX,R12 - SHRQ $51,R12 - ANDQ AX,DX - ADDQ R12,CX - MOVQ CX,R12 - SHRQ $51,R12 - ANDQ AX,CX - ADDQ R12,R8 - MOVQ R8,R12 - SHRQ $51,R12 - ANDQ AX,R8 - ADDQ R12,R9 - MOVQ R9,R12 - SHRQ $51,R12 - ANDQ AX,R9 - IMUL3Q $19,R12,R12 - ADDQ R12,SI - SUBQ $1,R11 - JA REDUCELOOP - MOVQ $1,R12 - CMPQ R10,SI - CMOVQLT R11,R12 - CMPQ AX,DX - CMOVQNE R11,R12 - CMPQ AX,CX - CMOVQNE R11,R12 - CMPQ AX,R8 - CMOVQNE R11,R12 - CMPQ AX,R9 - CMOVQNE R11,R12 - NEGQ R12 - ANDQ R12,AX - ANDQ R12,R10 - SUBQ R10,SI - SUBQ AX,DX - SUBQ AX,CX - SUBQ AX,R8 - SUBQ AX,R9 - MOVQ SI,0(DI) - MOVQ DX,8(DI) - MOVQ CX,16(DI) - MOVQ R8,24(DI) - MOVQ R9,32(DI) - MOVQ 0(SP),R11 - MOVQ 8(SP),R12 - MOVQ 16(SP),R13 - MOVQ 24(SP),R14 - MOVQ 32(SP),R15 - MOVQ 40(SP),BX - MOVQ 48(SP),BP - MOVQ R11,SP - MOVQ DI,AX - MOVQ SI,DX - RET diff --git a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/Godeps/_workspace/src/golang.org/x/crypto/curve25519/ladderstep_amd64.s deleted file mode 100644 index 3949f9cfa..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/ladderstep_amd64.s +++ /dev/null @@ -1,1398 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// func ladderstep(inout *[5][5]uint64) -TEXT ·ladderstep(SB),0,$384-8 - MOVQ inout+0(FP),DI - - MOVQ SP,R11 - MOVQ $31,CX - NOTQ CX - ANDQ CX,SP - ADDQ $32,SP - - MOVQ R11,0(SP) - MOVQ R12,8(SP) - MOVQ R13,16(SP) - MOVQ R14,24(SP) - MOVQ R15,32(SP) - MOVQ BX,40(SP) - MOVQ BP,48(SP) - MOVQ 40(DI),SI - MOVQ 48(DI),DX - MOVQ 56(DI),CX - MOVQ 64(DI),R8 - MOVQ 72(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 80(DI),SI - ADDQ 88(DI),DX - ADDQ 96(DI),CX - ADDQ 104(DI),R8 - ADDQ 112(DI),R9 - SUBQ 80(DI),AX - SUBQ 88(DI),R10 - SUBQ 96(DI),R11 - SUBQ 104(DI),R12 - SUBQ 112(DI),R13 - MOVQ SI,56(SP) - MOVQ DX,64(SP) - MOVQ CX,72(SP) - MOVQ R8,80(SP) - MOVQ R9,88(SP) - MOVQ AX,96(SP) - MOVQ R10,104(SP) - MOVQ R11,112(SP) - MOVQ R12,120(SP) - MOVQ R13,128(SP) - MOVQ 96(SP),AX - MULQ 96(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 96(SP),AX - SHLQ $1,AX - MULQ 104(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 96(SP),AX - SHLQ $1,AX - MULQ 112(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 96(SP),AX - SHLQ $1,AX - MULQ 120(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 96(SP),AX - SHLQ $1,AX - MULQ 128(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 104(SP),AX - MULQ 104(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 104(SP),AX - SHLQ $1,AX - MULQ 112(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 104(SP),AX - SHLQ $1,AX - MULQ 120(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 104(SP),DX - IMUL3Q $38,DX,AX - MULQ 128(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 112(SP),AX - MULQ 112(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 112(SP),DX - IMUL3Q $38,DX,AX - MULQ 120(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 112(SP),DX - IMUL3Q $38,DX,AX - MULQ 128(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 120(SP),DX - IMUL3Q $19,DX,AX - MULQ 120(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 120(SP),DX - IMUL3Q $38,DX,AX - MULQ 128(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(SP),DX - IMUL3Q $19,DX,AX - MULQ 128(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,136(SP) - MOVQ R8,144(SP) - MOVQ R9,152(SP) - MOVQ AX,160(SP) - MOVQ R10,168(SP) - MOVQ 56(SP),AX - MULQ 56(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 56(SP),AX - SHLQ $1,AX - MULQ 64(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 56(SP),AX - SHLQ $1,AX - MULQ 72(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 56(SP),AX - SHLQ $1,AX - MULQ 80(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 56(SP),AX - SHLQ $1,AX - MULQ 88(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 64(SP),AX - MULQ 64(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 64(SP),AX - SHLQ $1,AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 64(SP),AX - SHLQ $1,AX - MULQ 80(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 64(SP),DX - IMUL3Q $38,DX,AX - MULQ 88(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 72(SP),AX - MULQ 72(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 72(SP),DX - IMUL3Q $38,DX,AX - MULQ 80(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 72(SP),DX - IMUL3Q $38,DX,AX - MULQ 88(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 80(SP),DX - IMUL3Q $19,DX,AX - MULQ 80(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 80(SP),DX - IMUL3Q $38,DX,AX - MULQ 88(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 88(SP),DX - IMUL3Q $19,DX,AX - MULQ 88(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,176(SP) - MOVQ R8,184(SP) - MOVQ R9,192(SP) - MOVQ AX,200(SP) - MOVQ R10,208(SP) - MOVQ SI,SI - MOVQ R8,DX - MOVQ R9,CX - MOVQ AX,R8 - MOVQ R10,R9 - ADDQ ·_2P0(SB),SI - ADDQ ·_2P1234(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R8 - ADDQ ·_2P1234(SB),R9 - SUBQ 136(SP),SI - SUBQ 144(SP),DX - SUBQ 152(SP),CX - SUBQ 160(SP),R8 - SUBQ 168(SP),R9 - MOVQ SI,216(SP) - MOVQ DX,224(SP) - MOVQ CX,232(SP) - MOVQ R8,240(SP) - MOVQ R9,248(SP) - MOVQ 120(DI),SI - MOVQ 128(DI),DX - MOVQ 136(DI),CX - MOVQ 144(DI),R8 - MOVQ 152(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 160(DI),SI - ADDQ 168(DI),DX - ADDQ 176(DI),CX - ADDQ 184(DI),R8 - ADDQ 192(DI),R9 - SUBQ 160(DI),AX - SUBQ 168(DI),R10 - SUBQ 176(DI),R11 - SUBQ 184(DI),R12 - SUBQ 192(DI),R13 - MOVQ SI,256(SP) - MOVQ DX,264(SP) - MOVQ CX,272(SP) - MOVQ R8,280(SP) - MOVQ R9,288(SP) - MOVQ AX,296(SP) - MOVQ R10,304(SP) - MOVQ R11,312(SP) - MOVQ R12,320(SP) - MOVQ R13,328(SP) - MOVQ 280(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,336(SP) - MULQ 112(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 288(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,344(SP) - MULQ 104(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),AX - MULQ 96(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),AX - MULQ 104(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 256(SP),AX - MULQ 112(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 256(SP),AX - MULQ 120(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 256(SP),AX - MULQ 128(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 264(SP),AX - MULQ 96(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 264(SP),AX - MULQ 104(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 264(SP),AX - MULQ 112(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 264(SP),AX - MULQ 120(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 264(SP),DX - IMUL3Q $19,DX,AX - MULQ 128(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 272(SP),AX - MULQ 96(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 272(SP),AX - MULQ 104(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 272(SP),AX - MULQ 112(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 272(SP),DX - IMUL3Q $19,DX,AX - MULQ 120(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 272(SP),DX - IMUL3Q $19,DX,AX - MULQ 128(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 280(SP),AX - MULQ 96(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 280(SP),AX - MULQ 104(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 336(SP),AX - MULQ 120(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 336(SP),AX - MULQ 128(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 288(SP),AX - MULQ 96(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 344(SP),AX - MULQ 112(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 344(SP),AX - MULQ 120(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 344(SP),AX - MULQ 128(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,96(SP) - MOVQ R8,104(SP) - MOVQ R9,112(SP) - MOVQ AX,120(SP) - MOVQ R10,128(SP) - MOVQ 320(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,256(SP) - MULQ 72(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 328(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,264(SP) - MULQ 64(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 296(SP),AX - MULQ 56(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 296(SP),AX - MULQ 64(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 296(SP),AX - MULQ 72(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 296(SP),AX - MULQ 80(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 296(SP),AX - MULQ 88(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 304(SP),AX - MULQ 56(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 304(SP),AX - MULQ 64(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 304(SP),AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 304(SP),AX - MULQ 80(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 304(SP),DX - IMUL3Q $19,DX,AX - MULQ 88(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 312(SP),AX - MULQ 56(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 312(SP),AX - MULQ 64(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 312(SP),AX - MULQ 72(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 312(SP),DX - IMUL3Q $19,DX,AX - MULQ 80(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 312(SP),DX - IMUL3Q $19,DX,AX - MULQ 88(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 320(SP),AX - MULQ 56(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 320(SP),AX - MULQ 64(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 256(SP),AX - MULQ 80(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 256(SP),AX - MULQ 88(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 328(SP),AX - MULQ 56(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 264(SP),AX - MULQ 72(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 264(SP),AX - MULQ 80(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 264(SP),AX - MULQ 88(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,DX - MOVQ R8,CX - MOVQ R9,R11 - MOVQ AX,R12 - MOVQ R10,R13 - ADDQ ·_2P0(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 96(SP),SI - ADDQ 104(SP),R8 - ADDQ 112(SP),R9 - ADDQ 120(SP),AX - ADDQ 128(SP),R10 - SUBQ 96(SP),DX - SUBQ 104(SP),CX - SUBQ 112(SP),R11 - SUBQ 120(SP),R12 - SUBQ 128(SP),R13 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ DX,160(DI) - MOVQ CX,168(DI) - MOVQ R11,176(DI) - MOVQ R12,184(DI) - MOVQ R13,192(DI) - MOVQ 120(DI),AX - MULQ 120(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 128(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 136(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 144(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 152(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 128(DI),AX - MULQ 128(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 136(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 144(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 128(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),AX - MULQ 136(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 144(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $19,DX,AX - MULQ 144(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 152(DI),DX - IMUL3Q $19,DX,AX - MULQ 152(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ 160(DI),AX - MULQ 160(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 168(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 176(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 184(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 192(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 168(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 176(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 184(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 176(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 184(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $19,DX,AX - MULQ 184(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 184(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,56(SP) - MULQ 16(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,64(SP) - MULQ 8(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 0(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 8(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - MULQ 16(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - MULQ 24(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - MULQ 32(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 0(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 168(DI),AX - MULQ 8(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - MULQ 16(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - MULQ 24(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 0(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 176(DI),AX - MULQ 8(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 176(DI),AX - MULQ 16(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 24(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),AX - MULQ 0(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 184(DI),AX - MULQ 8(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 56(SP),AX - MULQ 24(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 56(SP),AX - MULQ 32(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),AX - MULQ 0(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 64(SP),AX - MULQ 16(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),AX - MULQ 24(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 64(SP),AX - MULQ 32(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 200(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,56(SP) - MULQ 152(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 208(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,64(SP) - MULQ 144(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(SP),AX - MULQ 136(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(SP),AX - MULQ 144(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 176(SP),AX - MULQ 152(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 176(SP),AX - MULQ 160(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 176(SP),AX - MULQ 168(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 184(SP),AX - MULQ 136(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(SP),AX - MULQ 144(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 184(SP),AX - MULQ 152(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 184(SP),AX - MULQ 160(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 184(SP),DX - IMUL3Q $19,DX,AX - MULQ 168(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 192(SP),AX - MULQ 136(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(SP),AX - MULQ 144(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 192(SP),AX - MULQ 152(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 192(SP),DX - IMUL3Q $19,DX,AX - MULQ 160(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 192(SP),DX - IMUL3Q $19,DX,AX - MULQ 168(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 200(SP),AX - MULQ 136(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 200(SP),AX - MULQ 144(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 56(SP),AX - MULQ 160(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 56(SP),AX - MULQ 168(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 208(SP),AX - MULQ 136(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 64(SP),AX - MULQ 152(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),AX - MULQ 160(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 64(SP),AX - MULQ 168(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,40(DI) - MOVQ R8,48(DI) - MOVQ R9,56(DI) - MOVQ AX,64(DI) - MOVQ R10,72(DI) - MOVQ 216(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - MOVQ AX,SI - MOVQ DX,CX - MOVQ 224(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,CX - MOVQ DX,R8 - MOVQ 232(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R8 - MOVQ DX,R9 - MOVQ 240(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R9 - MOVQ DX,R10 - MOVQ 248(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R10 - IMUL3Q $19,DX,DX - ADDQ DX,SI - ADDQ 136(SP),SI - ADDQ 144(SP),CX - ADDQ 152(SP),R8 - ADDQ 160(SP),R9 - ADDQ 168(SP),R10 - MOVQ SI,80(DI) - MOVQ CX,88(DI) - MOVQ R8,96(DI) - MOVQ R9,104(DI) - MOVQ R10,112(DI) - MOVQ 104(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,56(SP) - MULQ 232(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 112(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,64(SP) - MULQ 224(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 216(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 224(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 80(DI),AX - MULQ 232(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 80(DI),AX - MULQ 240(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 80(DI),AX - MULQ 248(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 88(DI),AX - MULQ 216(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 88(DI),AX - MULQ 224(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 88(DI),AX - MULQ 232(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 88(DI),AX - MULQ 240(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 88(DI),DX - IMUL3Q $19,DX,AX - MULQ 248(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),AX - MULQ 216(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 96(DI),AX - MULQ 224(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 96(DI),AX - MULQ 232(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 240(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 248(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 104(DI),AX - MULQ 216(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 104(DI),AX - MULQ 224(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 56(SP),AX - MULQ 240(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 56(SP),AX - MULQ 248(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 112(DI),AX - MULQ 216(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 64(SP),AX - MULQ 232(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),AX - MULQ 240(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 64(SP),AX - MULQ 248(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,80(DI) - MOVQ R8,88(DI) - MOVQ R9,96(DI) - MOVQ AX,104(DI) - MOVQ R10,112(DI) - MOVQ 0(SP),R11 - MOVQ 8(SP),R12 - MOVQ 16(SP),R13 - MOVQ 24(SP),R14 - MOVQ 32(SP),R15 - MOVQ 40(SP),BX - MOVQ 48(SP),BP - MOVQ R11,SP - MOVQ DI,AX - MOVQ SI,DX - RET diff --git a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/mont25519_amd64.go b/Godeps/_workspace/src/golang.org/x/crypto/curve25519/mont25519_amd64.go deleted file mode 100644 index 5822bd533..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/mont25519_amd64.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo,!appengine - -package curve25519 - -// These functions are implemented in the .s files. The names of the functions -// in the rest of the file are also taken from the SUPERCOP sources to help -// people following along. - -//go:noescape - -func cswap(inout *[5]uint64, v uint64) - -//go:noescape - -func ladderstep(inout *[5][5]uint64) - -//go:noescape - -func freeze(inout *[5]uint64) - -//go:noescape - -func mul(dest, a, b *[5]uint64) - -//go:noescape - -func square(out, in *[5]uint64) - -// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. -func mladder(xr, zr *[5]uint64, s *[32]byte) { - var work [5][5]uint64 - - work[0] = *xr - setint(&work[1], 1) - setint(&work[2], 0) - work[3] = *xr - setint(&work[4], 1) - - j := uint(6) - var prevbit byte - - for i := 31; i >= 0; i-- { - for j < 8 { - bit := ((*s)[i] >> j) & 1 - swap := bit ^ prevbit - prevbit = bit - cswap(&work[1], uint64(swap)) - ladderstep(&work) - j-- - } - j = 7 - } - - *xr = work[1] - *zr = work[2] -} - -func scalarMult(out, in, base *[32]byte) { - var e [32]byte - copy(e[:], (*in)[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var t, z [5]uint64 - unpack(&t, base) - mladder(&t, &z, &e) - invert(&z, &z) - mul(&t, &t, &z) - pack(out, &t) -} - -func setint(r *[5]uint64, v uint64) { - r[0] = v - r[1] = 0 - r[2] = 0 - r[3] = 0 - r[4] = 0 -} - -// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian -// order. -func unpack(r *[5]uint64, x *[32]byte) { - r[0] = uint64(x[0]) | - uint64(x[1])<<8 | - uint64(x[2])<<16 | - uint64(x[3])<<24 | - uint64(x[4])<<32 | - uint64(x[5])<<40 | - uint64(x[6]&7)<<48 - - r[1] = uint64(x[6])>>3 | - uint64(x[7])<<5 | - uint64(x[8])<<13 | - uint64(x[9])<<21 | - uint64(x[10])<<29 | - uint64(x[11])<<37 | - uint64(x[12]&63)<<45 - - r[2] = uint64(x[12])>>6 | - uint64(x[13])<<2 | - uint64(x[14])<<10 | - uint64(x[15])<<18 | - uint64(x[16])<<26 | - uint64(x[17])<<34 | - uint64(x[18])<<42 | - uint64(x[19]&1)<<50 - - r[3] = uint64(x[19])>>1 | - uint64(x[20])<<7 | - uint64(x[21])<<15 | - uint64(x[22])<<23 | - uint64(x[23])<<31 | - uint64(x[24])<<39 | - uint64(x[25]&15)<<47 - - r[4] = uint64(x[25])>>4 | - uint64(x[26])<<4 | - uint64(x[27])<<12 | - uint64(x[28])<<20 | - uint64(x[29])<<28 | - uint64(x[30])<<36 | - uint64(x[31]&127)<<44 -} - -// pack sets out = x where out is the usual, little-endian form of the 5, -// 51-bit limbs in x. -func pack(out *[32]byte, x *[5]uint64) { - t := *x - freeze(&t) - - out[0] = byte(t[0]) - out[1] = byte(t[0] >> 8) - out[2] = byte(t[0] >> 16) - out[3] = byte(t[0] >> 24) - out[4] = byte(t[0] >> 32) - out[5] = byte(t[0] >> 40) - out[6] = byte(t[0] >> 48) - - out[6] ^= byte(t[1]<<3) & 0xf8 - out[7] = byte(t[1] >> 5) - out[8] = byte(t[1] >> 13) - out[9] = byte(t[1] >> 21) - out[10] = byte(t[1] >> 29) - out[11] = byte(t[1] >> 37) - out[12] = byte(t[1] >> 45) - - out[12] ^= byte(t[2]<<6) & 0xc0 - out[13] = byte(t[2] >> 2) - out[14] = byte(t[2] >> 10) - out[15] = byte(t[2] >> 18) - out[16] = byte(t[2] >> 26) - out[17] = byte(t[2] >> 34) - out[18] = byte(t[2] >> 42) - out[19] = byte(t[2] >> 50) - - out[19] ^= byte(t[3]<<1) & 0xfe - out[20] = byte(t[3] >> 7) - out[21] = byte(t[3] >> 15) - out[22] = byte(t[3] >> 23) - out[23] = byte(t[3] >> 31) - out[24] = byte(t[3] >> 39) - out[25] = byte(t[3] >> 47) - - out[25] ^= byte(t[4]<<4) & 0xf0 - out[26] = byte(t[4] >> 4) - out[27] = byte(t[4] >> 12) - out[28] = byte(t[4] >> 20) - out[29] = byte(t[4] >> 28) - out[30] = byte(t[4] >> 36) - out[31] = byte(t[4] >> 44) -} - -// invert calculates r = x^-1 mod p using Fermat's little theorem. -func invert(r *[5]uint64, x *[5]uint64) { - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 - - square(&z2, x) /* 2 */ - square(&t, &z2) /* 4 */ - square(&t, &t) /* 8 */ - mul(&z9, &t, x) /* 9 */ - mul(&z11, &z9, &z2) /* 11 */ - square(&t, &z11) /* 22 */ - mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ - - square(&t, &z2_5_0) /* 2^6 - 2^1 */ - for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ - - square(&t, &z2_10_0) /* 2^11 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ - - square(&t, &z2_20_0) /* 2^21 - 2^1 */ - for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ - square(&t, &t) - } - mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ - - square(&t, &t) /* 2^41 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ - square(&t, &t) - } - mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ - - square(&t, &z2_50_0) /* 2^51 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ - square(&t, &t) - } - mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ - - square(&t, &z2_100_0) /* 2^101 - 2^1 */ - for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ - square(&t, &t) - } - mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ - - square(&t, &t) /* 2^201 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ - square(&t, &t) - } - mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ - - square(&t, &t) /* 2^251 - 2^1 */ - square(&t, &t) /* 2^252 - 2^2 */ - square(&t, &t) /* 2^253 - 2^3 */ - - square(&t, &t) /* 2^254 - 2^4 */ - - square(&t, &t) /* 2^255 - 2^5 */ - mul(r, &t, &z11) /* 2^255 - 21 */ -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/mul_amd64.s b/Godeps/_workspace/src/golang.org/x/crypto/curve25519/mul_amd64.s deleted file mode 100644 index e48d183ee..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/mul_amd64.s +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// func mul(dest, a, b *[5]uint64) -TEXT ·mul(SB),0,$128-24 - MOVQ dest+0(FP), DI - MOVQ a+8(FP), SI - MOVQ b+16(FP), DX - - MOVQ SP,R11 - MOVQ $31,CX - NOTQ CX - ANDQ CX,SP - ADDQ $32,SP - - MOVQ R11,0(SP) - MOVQ R12,8(SP) - MOVQ R13,16(SP) - MOVQ R14,24(SP) - MOVQ R15,32(SP) - MOVQ BX,40(SP) - MOVQ BP,48(SP) - MOVQ DI,56(SP) - MOVQ DX,CX - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,64(SP) - MULQ 16(CX) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,72(SP) - MULQ 8(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 0(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 8(CX) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SI),AX - MULQ 16(CX) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SI),AX - MULQ 24(CX) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 0(SI),AX - MULQ 32(CX) - MOVQ AX,BX - MOVQ DX,BP - MOVQ 8(SI),AX - MULQ 0(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SI),AX - MULQ 8(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SI),AX - MULQ 16(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SI),AX - MULQ 24(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),AX - MULQ 0(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 16(SI),AX - MULQ 8(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SI),AX - MULQ 16(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 24(SI),AX - MULQ 0(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 24(SI),AX - MULQ 8(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 64(SP),AX - MULQ 24(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 64(SP),AX - MULQ 32(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 32(SI),AX - MULQ 0(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 72(SP),AX - MULQ 16(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 72(SP),AX - MULQ 24(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 72(SP),AX - MULQ 32(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ ·REDMASK51(SB),SI - SHLQ $13,R9:R8 - ANDQ SI,R8 - SHLQ $13,R11:R10 - ANDQ SI,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ SI,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ SI,R14 - ADDQ R13,R14 - SHLQ $13,BP:BX - ANDQ SI,BX - ADDQ R15,BX - IMUL3Q $19,BP,DX - ADDQ DX,R8 - MOVQ R8,DX - SHRQ $51,DX - ADDQ R10,DX - MOVQ DX,CX - SHRQ $51,DX - ANDQ SI,R8 - ADDQ R12,DX - MOVQ DX,R9 - SHRQ $51,DX - ANDQ SI,CX - ADDQ R14,DX - MOVQ DX,AX - SHRQ $51,DX - ANDQ SI,R9 - ADDQ BX,DX - MOVQ DX,R10 - SHRQ $51,DX - ANDQ SI,AX - IMUL3Q $19,DX,DX - ADDQ DX,R8 - ANDQ SI,R10 - MOVQ R8,0(DI) - MOVQ CX,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - MOVQ 0(SP),R11 - MOVQ 8(SP),R12 - MOVQ 16(SP),R13 - MOVQ 24(SP),R14 - MOVQ 32(SP),R15 - MOVQ 40(SP),BX - MOVQ 48(SP),BP - MOVQ R11,SP - MOVQ DI,AX - MOVQ SI,DX - RET diff --git a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/square_amd64.s b/Godeps/_workspace/src/golang.org/x/crypto/curve25519/square_amd64.s deleted file mode 100644 index 78d1a50dd..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/curve25519/square_amd64.s +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// func square(out, in *[5]uint64) -TEXT ·square(SB),7,$96-16 - MOVQ out+0(FP), DI - MOVQ in+8(FP), SI - - MOVQ SP,R11 - MOVQ $31,CX - NOTQ CX - ANDQ CX,SP - ADDQ $32, SP - - MOVQ R11,0(SP) - MOVQ R12,8(SP) - MOVQ R13,16(SP) - MOVQ R14,24(SP) - MOVQ R15,32(SP) - MOVQ BX,40(SP) - MOVQ BP,48(SP) - MOVQ 0(SI),AX - MULQ 0(SI) - MOVQ AX,CX - MOVQ DX,R8 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 8(SI) - MOVQ AX,R9 - MOVQ DX,R10 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 16(SI) - MOVQ AX,R11 - MOVQ DX,R12 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 24(SI) - MOVQ AX,R13 - MOVQ DX,R14 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 32(SI) - MOVQ AX,R15 - MOVQ DX,BX - MOVQ 8(SI),AX - MULQ 8(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 16(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 24(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 8(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),AX - MULQ 16(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 24(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ ·REDMASK51(SB),SI - SHLQ $13,R8:CX - ANDQ SI,CX - SHLQ $13,R10:R9 - ANDQ SI,R9 - ADDQ R8,R9 - SHLQ $13,R12:R11 - ANDQ SI,R11 - ADDQ R10,R11 - SHLQ $13,R14:R13 - ANDQ SI,R13 - ADDQ R12,R13 - SHLQ $13,BX:R15 - ANDQ SI,R15 - ADDQ R14,R15 - IMUL3Q $19,BX,DX - ADDQ DX,CX - MOVQ CX,DX - SHRQ $51,DX - ADDQ R9,DX - ANDQ SI,CX - MOVQ DX,R8 - SHRQ $51,DX - ADDQ R11,DX - ANDQ SI,R8 - MOVQ DX,R9 - SHRQ $51,DX - ADDQ R13,DX - ANDQ SI,R9 - MOVQ DX,AX - SHRQ $51,DX - ADDQ R15,DX - ANDQ SI,AX - MOVQ DX,R10 - SHRQ $51,DX - IMUL3Q $19,DX,DX - ADDQ DX,CX - ANDQ SI,R10 - MOVQ CX,0(DI) - MOVQ R8,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - MOVQ 0(SP),R11 - MOVQ 8(SP),R12 - MOVQ 16(SP),R13 - MOVQ 24(SP),R14 - MOVQ 32(SP),R15 - MOVQ 40(SP),BX - MOVQ 48(SP),BP - MOVQ R11,SP - MOVQ DI,AX - MOVQ SI,DX - RET diff --git a/Godeps/_workspace/src/golang.org/x/crypto/nacl/box/box.go b/Godeps/_workspace/src/golang.org/x/crypto/nacl/box/box.go deleted file mode 100644 index ffe00baf5..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/nacl/box/box.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package box authenticates and encrypts messages using public-key cryptography. - -Box uses Curve25519, XSalsa20 and Poly1305 to encrypt and authenticate -messages. The length of messages is not hidden. - -It is the caller's responsibility to ensure the uniqueness of nonces—for -example, by using nonce 1 for the first message, nonce 2 for the second -message, etc. Nonces are long enough that randomly generated nonces have -negligible risk of collision. - -This package is interoperable with NaCl: http://nacl.cr.yp.to/box.html. -*/ -package box - -import ( - "golang.org/x/crypto/curve25519" - "golang.org/x/crypto/nacl/secretbox" - "golang.org/x/crypto/salsa20/salsa" - "io" -) - -// Overhead is the number of bytes of overhead when boxing a message. -const Overhead = secretbox.Overhead - -// GenerateKey generates a new public/private key pair suitable for use with -// Seal and Open. -func GenerateKey(rand io.Reader) (publicKey, privateKey *[32]byte, err error) { - publicKey = new([32]byte) - privateKey = new([32]byte) - _, err = io.ReadFull(rand, privateKey[:]) - if err != nil { - publicKey = nil - privateKey = nil - return - } - - curve25519.ScalarBaseMult(publicKey, privateKey) - return -} - -var zeros [16]byte - -// Precompute calculates the shared key between peersPublicKey and privateKey -// and writes it to sharedKey. The shared key can be used with -// OpenAfterPrecomputation and SealAfterPrecomputation to speed up processing -// when using the same pair of keys repeatedly. -func Precompute(sharedKey, peersPublicKey, privateKey *[32]byte) { - curve25519.ScalarMult(sharedKey, privateKey, peersPublicKey) - salsa.HSalsa20(sharedKey, &zeros, sharedKey, &salsa.Sigma) -} - -// Seal appends an encrypted and authenticated copy of message to out, which -// will be Overhead bytes longer than the original and must not overlap. The -// nonce must be unique for each distinct message for a given pair of keys. -func Seal(out, message []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) []byte { - var sharedKey [32]byte - Precompute(&sharedKey, peersPublicKey, privateKey) - return secretbox.Seal(out, message, nonce, &sharedKey) -} - -// SealAfterPrecomputation performs the same actions as Seal, but takes a -// shared key as generated by Precompute. -func SealAfterPrecomputation(out, message []byte, nonce *[24]byte, sharedKey *[32]byte) []byte { - return secretbox.Seal(out, message, nonce, sharedKey) -} - -// Open authenticates and decrypts a box produced by Seal and appends the -// message to out, which must not overlap box. The output will be Overhead -// bytes smaller than box. -func Open(out, box []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) ([]byte, bool) { - var sharedKey [32]byte - Precompute(&sharedKey, peersPublicKey, privateKey) - return secretbox.Open(out, box, nonce, &sharedKey) -} - -// OpenAfterPrecomputation performs the same actions as Open, but takes a -// shared key as generated by Precompute. -func OpenAfterPrecomputation(out, box []byte, nonce *[24]byte, sharedKey *[32]byte) ([]byte, bool) { - return secretbox.Open(out, box, nonce, sharedKey) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/nacl/box/box_test.go b/Godeps/_workspace/src/golang.org/x/crypto/nacl/box/box_test.go deleted file mode 100644 index 481ade28a..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/nacl/box/box_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package box - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "testing" - - "golang.org/x/crypto/curve25519" -) - -func TestSealOpen(t *testing.T) { - publicKey1, privateKey1, _ := GenerateKey(rand.Reader) - publicKey2, privateKey2, _ := GenerateKey(rand.Reader) - - if *privateKey1 == *privateKey2 { - t.Fatalf("private keys are equal!") - } - if *publicKey1 == *publicKey2 { - t.Fatalf("public keys are equal!") - } - message := []byte("test message") - var nonce [24]byte - - box := Seal(nil, message, &nonce, publicKey1, privateKey2) - opened, ok := Open(nil, box, &nonce, publicKey2, privateKey1) - if !ok { - t.Fatalf("failed to open box") - } - - if !bytes.Equal(opened, message) { - t.Fatalf("got %x, want %x", opened, message) - } - - for i := range box { - box[i] ^= 0x40 - _, ok := Open(nil, box, &nonce, publicKey2, privateKey1) - if ok { - t.Fatalf("opened box with byte %d corrupted", i) - } - box[i] ^= 0x40 - } -} - -func TestBox(t *testing.T) { - var privateKey1, privateKey2 [32]byte - for i := range privateKey1[:] { - privateKey1[i] = 1 - } - for i := range privateKey2[:] { - privateKey2[i] = 2 - } - - var publicKey1 [32]byte - curve25519.ScalarBaseMult(&publicKey1, &privateKey1) - var message [64]byte - for i := range message[:] { - message[i] = 3 - } - - var nonce [24]byte - for i := range nonce[:] { - nonce[i] = 4 - } - - box := Seal(nil, message[:], &nonce, &publicKey1, &privateKey2) - - // expected was generated using the C implementation of NaCl. - expected, _ := hex.DecodeString("78ea30b19d2341ebbdba54180f821eec265cf86312549bea8a37652a8bb94f07b78a73ed1708085e6ddd0e943bbdeb8755079a37eb31d86163ce241164a47629c0539f330b4914cd135b3855bc2a2dfc") - - if !bytes.Equal(box, expected) { - t.Fatalf("box didn't match, got\n%x\n, expected\n%x", box, expected) - } -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox.go b/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox.go deleted file mode 100644 index ed46ba2f2..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package secretbox encrypts and authenticates small messages. - -Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with -secret-key cryptography. The length of messages is not hidden. - -It is the caller's responsibility to ensure the uniqueness of nonces—for -example, by using nonce 1 for the first message, nonce 2 for the second -message, etc. Nonces are long enough that randomly generated nonces have -negligible risk of collision. - -This package is interoperable with NaCl: http://nacl.cr.yp.to/secretbox.html. -*/ -package secretbox - -import ( - "golang.org/x/crypto/poly1305" - "golang.org/x/crypto/salsa20/salsa" -) - -// Overhead is the number of bytes of overhead when boxing a message. -const Overhead = poly1305.TagSize - -// setup produces a sub-key and Salsa20 counter given a nonce and key. -func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { - // We use XSalsa20 for encryption so first we need to generate a - // key and nonce with HSalsa20. - var hNonce [16]byte - copy(hNonce[:], nonce[:]) - salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) - - // The final 8 bytes of the original nonce form the new nonce. - copy(counter[:], nonce[16:]) -} - -// sliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func sliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return -} - -// Seal appends an encrypted and authenticated copy of message to out, which -// must not overlap message. The key and nonce pair must be unique for each -// distinct message and the output will be Overhead bytes longer than message. -func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { - var subKey [32]byte - var counter [16]byte - setup(&subKey, &counter, nonce, key) - - // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since - // Salsa20 works with 64-byte blocks, we also generate 32 bytes of - // keystream as a side effect. - var firstBlock [64]byte - salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) - - var poly1305Key [32]byte - copy(poly1305Key[:], firstBlock[:]) - - ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) - - // We XOR up to 32 bytes of message with the keystream generated from - // the first block. - firstMessageBlock := message - if len(firstMessageBlock) > 32 { - firstMessageBlock = firstMessageBlock[:32] - } - - tagOut := out - out = out[poly1305.TagSize:] - for i, x := range firstMessageBlock { - out[i] = firstBlock[32+i] ^ x - } - message = message[len(firstMessageBlock):] - ciphertext := out - out = out[len(firstMessageBlock):] - - // Now encrypt the rest. - counter[8] = 1 - salsa.XORKeyStream(out, message, &counter, &subKey) - - var tag [poly1305.TagSize]byte - poly1305.Sum(&tag, ciphertext, &poly1305Key) - copy(tagOut, tag[:]) - - return ret -} - -// Open authenticates and decrypts a box produced by Seal and appends the -// message to out, which must not overlap box. The output will be Overhead -// bytes smaller than box. -func Open(out []byte, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { - if len(box) < Overhead { - return nil, false - } - - var subKey [32]byte - var counter [16]byte - setup(&subKey, &counter, nonce, key) - - // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since - // Salsa20 works with 64-byte blocks, we also generate 32 bytes of - // keystream as a side effect. - var firstBlock [64]byte - salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) - - var poly1305Key [32]byte - copy(poly1305Key[:], firstBlock[:]) - var tag [poly1305.TagSize]byte - copy(tag[:], box) - - if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { - return nil, false - } - - ret, out := sliceForAppend(out, len(box)-Overhead) - - // We XOR up to 32 bytes of box with the keystream generated from - // the first block. - box = box[Overhead:] - firstMessageBlock := box - if len(firstMessageBlock) > 32 { - firstMessageBlock = firstMessageBlock[:32] - } - for i, x := range firstMessageBlock { - out[i] = firstBlock[32+i] ^ x - } - - box = box[len(firstMessageBlock):] - out = out[len(firstMessageBlock):] - - // Now decrypt the rest. - counter[8] = 1 - salsa.XORKeyStream(out, box, &counter, &subKey) - - return ret, true -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox_test.go b/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox_test.go deleted file mode 100644 index 664dc1521..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/nacl/secretbox/secretbox_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package secretbox - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "testing" -) - -func TestSealOpen(t *testing.T) { - var key [32]byte - var nonce [24]byte - - rand.Reader.Read(key[:]) - rand.Reader.Read(nonce[:]) - - var box, opened []byte - - for msgLen := 0; msgLen < 128; msgLen += 17 { - message := make([]byte, msgLen) - rand.Reader.Read(message) - - box = Seal(box[:0], message, &nonce, &key) - var ok bool - opened, ok = Open(opened[:0], box, &nonce, &key) - if !ok { - t.Errorf("%d: failed to open box", msgLen) - continue - } - - if !bytes.Equal(opened, message) { - t.Errorf("%d: got %x, expected %x", msgLen, opened, message) - continue - } - } - - for i := range box { - box[i] ^= 0x20 - _, ok := Open(opened[:0], box, &nonce, &key) - if ok { - t.Errorf("box was opened after corrupting byte %d", i) - } - box[i] ^= 0x20 - } -} - -func TestSecretBox(t *testing.T) { - var key [32]byte - var nonce [24]byte - var message [64]byte - - for i := range key[:] { - key[i] = 1 - } - for i := range nonce[:] { - nonce[i] = 2 - } - for i := range message[:] { - message[i] = 3 - } - - box := Seal(nil, message[:], &nonce, &key) - // expected was generated using the C implementation of NaCl. - expected, _ := hex.DecodeString("8442bc313f4626f1359e3b50122b6ce6fe66ddfe7d39d14e637eb4fd5b45beadab55198df6ab5368439792a23c87db70acb6156dc5ef957ac04f6276cf6093b84be77ff0849cc33e34b7254d5a8f65ad") - - if !bytes.Equal(box, expected) { - t.Fatalf("box didn't match, got\n%x\n, expected\n%x", box, expected) - } -} - -func TestAppend(t *testing.T) { - var key [32]byte - var nonce [24]byte - var message [8]byte - - out := make([]byte, 4) - box := Seal(out, message[:], &nonce, &key) - if !bytes.Equal(box[:4], out[:4]) { - t.Fatalf("Seal didn't correctly append") - } - - out = make([]byte, 4, 100) - box = Seal(out, message[:], &nonce, &key) - if !bytes.Equal(box[:4], out[:4]) { - t.Fatalf("Seal didn't correctly append with sufficient capacity.") - } -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/const_amd64.s b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/const_amd64.s deleted file mode 100644 index 8e861f337..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/const_amd64.s +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -DATA ·SCALE(SB)/8, $0x37F4000000000000 -GLOBL ·SCALE(SB), 8, $8 -DATA ·TWO32(SB)/8, $0x41F0000000000000 -GLOBL ·TWO32(SB), 8, $8 -DATA ·TWO64(SB)/8, $0x43F0000000000000 -GLOBL ·TWO64(SB), 8, $8 -DATA ·TWO96(SB)/8, $0x45F0000000000000 -GLOBL ·TWO96(SB), 8, $8 -DATA ·ALPHA32(SB)/8, $0x45E8000000000000 -GLOBL ·ALPHA32(SB), 8, $8 -DATA ·ALPHA64(SB)/8, $0x47E8000000000000 -GLOBL ·ALPHA64(SB), 8, $8 -DATA ·ALPHA96(SB)/8, $0x49E8000000000000 -GLOBL ·ALPHA96(SB), 8, $8 -DATA ·ALPHA130(SB)/8, $0x4C08000000000000 -GLOBL ·ALPHA130(SB), 8, $8 -DATA ·DOFFSET0(SB)/8, $0x4330000000000000 -GLOBL ·DOFFSET0(SB), 8, $8 -DATA ·DOFFSET1(SB)/8, $0x4530000000000000 -GLOBL ·DOFFSET1(SB), 8, $8 -DATA ·DOFFSET2(SB)/8, $0x4730000000000000 -GLOBL ·DOFFSET2(SB), 8, $8 -DATA ·DOFFSET3(SB)/8, $0x4930000000000000 -GLOBL ·DOFFSET3(SB), 8, $8 -DATA ·DOFFSET3MINUSTWO128(SB)/8, $0x492FFFFE00000000 -GLOBL ·DOFFSET3MINUSTWO128(SB), 8, $8 -DATA ·HOFFSET0(SB)/8, $0x43300001FFFFFFFB -GLOBL ·HOFFSET0(SB), 8, $8 -DATA ·HOFFSET1(SB)/8, $0x45300001FFFFFFFE -GLOBL ·HOFFSET1(SB), 8, $8 -DATA ·HOFFSET2(SB)/8, $0x47300001FFFFFFFE -GLOBL ·HOFFSET2(SB), 8, $8 -DATA ·HOFFSET3(SB)/8, $0x49300003FFFFFFFE -GLOBL ·HOFFSET3(SB), 8, $8 -DATA ·ROUNDING(SB)/2, $0x137f -GLOBL ·ROUNDING(SB), 8, $2 diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305.go b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305.go deleted file mode 100644 index 2270d2b38..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package poly1305 implements Poly1305 one-time message authentication code as specified in http://cr.yp.to/mac/poly1305-20050329.pdf. - -Poly1305 is a fast, one-time authentication function. It is infeasible for an -attacker to generate an authenticator for a message without the key. However, a -key must only be used for a single message. Authenticating two different -messages with the same key allows an attacker to forge authenticators for other -messages with the same key. - -Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was -used with a fixed key in order to generate one-time keys from an nonce. -However, in this package AES isn't used and the one-time key is specified -directly. -*/ -package poly1305 - -import "crypto/subtle" - -// TagSize is the size, in bytes, of a poly1305 authenticator. -const TagSize = 16 - -// Verify returns true if mac is a valid authenticator for m with the given -// key. -func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { - var tmp [16]byte - Sum(&tmp, m, key) - return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_amd64.s b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_amd64.s deleted file mode 100644 index f8d4ee928..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_amd64.s +++ /dev/null @@ -1,497 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key) -TEXT ·poly1305(SB),0,$224-32 - MOVQ out+0(FP),DI - MOVQ m+8(FP),SI - MOVQ mlen+16(FP),DX - MOVQ key+24(FP),CX - - MOVQ SP,R11 - MOVQ $31,R9 - NOTQ R9 - ANDQ R9,SP - ADDQ $32,SP - - MOVQ R11,32(SP) - MOVQ R12,40(SP) - MOVQ R13,48(SP) - MOVQ R14,56(SP) - MOVQ R15,64(SP) - MOVQ BX,72(SP) - MOVQ BP,80(SP) - FLDCW ·ROUNDING(SB) - MOVL 0(CX),R8 - MOVL 4(CX),R9 - MOVL 8(CX),AX - MOVL 12(CX),R10 - MOVQ DI,88(SP) - MOVQ CX,96(SP) - MOVL $0X43300000,108(SP) - MOVL $0X45300000,116(SP) - MOVL $0X47300000,124(SP) - MOVL $0X49300000,132(SP) - ANDL $0X0FFFFFFF,R8 - ANDL $0X0FFFFFFC,R9 - ANDL $0X0FFFFFFC,AX - ANDL $0X0FFFFFFC,R10 - MOVL R8,104(SP) - MOVL R9,112(SP) - MOVL AX,120(SP) - MOVL R10,128(SP) - FMOVD 104(SP), F0 - FSUBD ·DOFFSET0(SB), F0 - FMOVD 112(SP), F0 - FSUBD ·DOFFSET1(SB), F0 - FMOVD 120(SP), F0 - FSUBD ·DOFFSET2(SB), F0 - FMOVD 128(SP), F0 - FSUBD ·DOFFSET3(SB), F0 - FXCHD F0, F3 - FMOVDP F0, 136(SP) - FXCHD F0, F1 - FMOVD F0, 144(SP) - FMULD ·SCALE(SB), F0 - FMOVDP F0, 152(SP) - FMOVD F0, 160(SP) - FMULD ·SCALE(SB), F0 - FMOVDP F0, 168(SP) - FMOVD F0, 176(SP) - FMULD ·SCALE(SB), F0 - FMOVDP F0, 184(SP) - FLDZ - FLDZ - FLDZ - FLDZ - CMPQ DX,$16 - JB ADDATMOST15BYTES - INITIALATLEAST16BYTES: - MOVL 12(SI),DI - MOVL 8(SI),CX - MOVL 4(SI),R8 - MOVL 0(SI),R9 - MOVL DI,128(SP) - MOVL CX,120(SP) - MOVL R8,112(SP) - MOVL R9,104(SP) - ADDQ $16,SI - SUBQ $16,DX - FXCHD F0, F3 - FADDD 128(SP), F0 - FSUBD ·DOFFSET3MINUSTWO128(SB), F0 - FXCHD F0, F1 - FADDD 112(SP), F0 - FSUBD ·DOFFSET1(SB), F0 - FXCHD F0, F2 - FADDD 120(SP), F0 - FSUBD ·DOFFSET2(SB), F0 - FXCHD F0, F3 - FADDD 104(SP), F0 - FSUBD ·DOFFSET0(SB), F0 - CMPQ DX,$16 - JB MULTIPLYADDATMOST15BYTES - MULTIPLYADDATLEAST16BYTES: - MOVL 12(SI),DI - MOVL 8(SI),CX - MOVL 4(SI),R8 - MOVL 0(SI),R9 - MOVL DI,128(SP) - MOVL CX,120(SP) - MOVL R8,112(SP) - MOVL R9,104(SP) - ADDQ $16,SI - SUBQ $16,DX - FMOVD ·ALPHA130(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA130(SB), F0 - FSUBD F0,F2 - FMULD ·SCALE(SB), F0 - FMOVD ·ALPHA32(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA32(SB), F0 - FSUBD F0,F2 - FXCHD F0, F2 - FADDDP F0,F1 - FMOVD ·ALPHA64(SB), F0 - FADDD F4,F0 - FSUBD ·ALPHA64(SB), F0 - FSUBD F0,F4 - FMOVD ·ALPHA96(SB), F0 - FADDD F6,F0 - FSUBD ·ALPHA96(SB), F0 - FSUBD F0,F6 - FXCHD F0, F6 - FADDDP F0,F1 - FXCHD F0, F3 - FADDDP F0,F5 - FXCHD F0, F3 - FADDDP F0,F1 - FMOVD 176(SP), F0 - FMULD F3,F0 - FMOVD 160(SP), F0 - FMULD F4,F0 - FMOVD 144(SP), F0 - FMULD F5,F0 - FMOVD 136(SP), F0 - FMULDP F0,F6 - FMOVD 160(SP), F0 - FMULD F4,F0 - FADDDP F0,F3 - FMOVD 144(SP), F0 - FMULD F4,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F4,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULDP F0,F4 - FXCHD F0, F3 - FADDDP F0,F5 - FMOVD 144(SP), F0 - FMULD F4,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F4,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F4,F0 - FADDDP F0,F3 - FMOVD 168(SP), F0 - FMULDP F0,F4 - FXCHD F0, F3 - FADDDP F0,F4 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FXCHD F0, F3 - FMOVD 184(SP), F0 - FMULD F5,F0 - FADDDP F0,F3 - FXCHD F0, F1 - FMOVD 168(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 152(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F1 - CMPQ DX,$16 - FXCHD F0, F2 - FMOVD 128(SP), F0 - FSUBD ·DOFFSET3MINUSTWO128(SB), F0 - FADDDP F0,F1 - FXCHD F0, F1 - FMOVD 120(SP), F0 - FSUBD ·DOFFSET2(SB), F0 - FADDDP F0,F1 - FXCHD F0, F3 - FMOVD 112(SP), F0 - FSUBD ·DOFFSET1(SB), F0 - FADDDP F0,F1 - FXCHD F0, F2 - FMOVD 104(SP), F0 - FSUBD ·DOFFSET0(SB), F0 - FADDDP F0,F1 - JAE MULTIPLYADDATLEAST16BYTES - MULTIPLYADDATMOST15BYTES: - FMOVD ·ALPHA130(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA130(SB), F0 - FSUBD F0,F2 - FMULD ·SCALE(SB), F0 - FMOVD ·ALPHA32(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA32(SB), F0 - FSUBD F0,F2 - FMOVD ·ALPHA64(SB), F0 - FADDD F5,F0 - FSUBD ·ALPHA64(SB), F0 - FSUBD F0,F5 - FMOVD ·ALPHA96(SB), F0 - FADDD F7,F0 - FSUBD ·ALPHA96(SB), F0 - FSUBD F0,F7 - FXCHD F0, F7 - FADDDP F0,F1 - FXCHD F0, F5 - FADDDP F0,F1 - FXCHD F0, F3 - FADDDP F0,F5 - FADDDP F0,F1 - FMOVD 176(SP), F0 - FMULD F1,F0 - FMOVD 160(SP), F0 - FMULD F2,F0 - FMOVD 144(SP), F0 - FMULD F3,F0 - FMOVD 136(SP), F0 - FMULDP F0,F4 - FMOVD 160(SP), F0 - FMULD F5,F0 - FADDDP F0,F3 - FMOVD 144(SP), F0 - FMULD F5,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F3 - FMOVD 144(SP), F0 - FMULD F5,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F5,F0 - FADDDP F0,F4 - FMOVD 168(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F5,F0 - FADDDP F0,F4 - FMOVD 168(SP), F0 - FMULD F5,F0 - FADDDP F0,F3 - FMOVD 152(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F1 - ADDATMOST15BYTES: - CMPQ DX,$0 - JE NOMOREBYTES - MOVL $0,0(SP) - MOVL $0, 4 (SP) - MOVL $0, 8 (SP) - MOVL $0, 12 (SP) - LEAQ 0(SP),DI - MOVQ DX,CX - REP; MOVSB - MOVB $1,0(DI) - MOVL 12 (SP),DI - MOVL 8 (SP),SI - MOVL 4 (SP),DX - MOVL 0(SP),CX - MOVL DI,128(SP) - MOVL SI,120(SP) - MOVL DX,112(SP) - MOVL CX,104(SP) - FXCHD F0, F3 - FADDD 128(SP), F0 - FSUBD ·DOFFSET3(SB), F0 - FXCHD F0, F2 - FADDD 120(SP), F0 - FSUBD ·DOFFSET2(SB), F0 - FXCHD F0, F1 - FADDD 112(SP), F0 - FSUBD ·DOFFSET1(SB), F0 - FXCHD F0, F3 - FADDD 104(SP), F0 - FSUBD ·DOFFSET0(SB), F0 - FMOVD ·ALPHA130(SB), F0 - FADDD F3,F0 - FSUBD ·ALPHA130(SB), F0 - FSUBD F0,F3 - FMULD ·SCALE(SB), F0 - FMOVD ·ALPHA32(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA32(SB), F0 - FSUBD F0,F2 - FMOVD ·ALPHA64(SB), F0 - FADDD F6,F0 - FSUBD ·ALPHA64(SB), F0 - FSUBD F0,F6 - FMOVD ·ALPHA96(SB), F0 - FADDD F5,F0 - FSUBD ·ALPHA96(SB), F0 - FSUBD F0,F5 - FXCHD F0, F4 - FADDDP F0,F3 - FXCHD F0, F6 - FADDDP F0,F1 - FXCHD F0, F3 - FADDDP F0,F5 - FXCHD F0, F3 - FADDDP F0,F1 - FMOVD 176(SP), F0 - FMULD F3,F0 - FMOVD 160(SP), F0 - FMULD F4,F0 - FMOVD 144(SP), F0 - FMULD F5,F0 - FMOVD 136(SP), F0 - FMULDP F0,F6 - FMOVD 160(SP), F0 - FMULD F5,F0 - FADDDP F0,F3 - FMOVD 144(SP), F0 - FMULD F5,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F5 - FMOVD 144(SP), F0 - FMULD F6,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F6,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F6,F0 - FADDDP F0,F4 - FMOVD 168(SP), F0 - FMULDP F0,F6 - FXCHD F0, F5 - FADDDP F0,F4 - FMOVD 136(SP), F0 - FMULD F2,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F2,F0 - FADDDP F0,F5 - FMOVD 168(SP), F0 - FMULD F2,F0 - FADDDP F0,F3 - FMOVD 152(SP), F0 - FMULDP F0,F2 - FXCHD F0, F1 - FADDDP F0,F3 - FXCHD F0, F3 - FXCHD F0, F2 - NOMOREBYTES: - MOVL $0,R10 - FMOVD ·ALPHA130(SB), F0 - FADDD F4,F0 - FSUBD ·ALPHA130(SB), F0 - FSUBD F0,F4 - FMULD ·SCALE(SB), F0 - FMOVD ·ALPHA32(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA32(SB), F0 - FSUBD F0,F2 - FMOVD ·ALPHA64(SB), F0 - FADDD F4,F0 - FSUBD ·ALPHA64(SB), F0 - FSUBD F0,F4 - FMOVD ·ALPHA96(SB), F0 - FADDD F6,F0 - FSUBD ·ALPHA96(SB), F0 - FXCHD F0, F6 - FSUBD F6,F0 - FXCHD F0, F4 - FADDDP F0,F3 - FXCHD F0, F4 - FADDDP F0,F1 - FXCHD F0, F2 - FADDDP F0,F3 - FXCHD F0, F4 - FADDDP F0,F3 - FXCHD F0, F3 - FADDD ·HOFFSET0(SB), F0 - FXCHD F0, F3 - FADDD ·HOFFSET1(SB), F0 - FXCHD F0, F1 - FADDD ·HOFFSET2(SB), F0 - FXCHD F0, F2 - FADDD ·HOFFSET3(SB), F0 - FXCHD F0, F3 - FMOVDP F0, 104(SP) - FMOVDP F0, 112(SP) - FMOVDP F0, 120(SP) - FMOVDP F0, 128(SP) - MOVL 108(SP),DI - ANDL $63,DI - MOVL 116(SP),SI - ANDL $63,SI - MOVL 124(SP),DX - ANDL $63,DX - MOVL 132(SP),CX - ANDL $63,CX - MOVL 112(SP),R8 - ADDL DI,R8 - MOVQ R8,112(SP) - MOVL 120(SP),DI - ADCL SI,DI - MOVQ DI,120(SP) - MOVL 128(SP),DI - ADCL DX,DI - MOVQ DI,128(SP) - MOVL R10,DI - ADCL CX,DI - MOVQ DI,136(SP) - MOVQ $5,DI - MOVL 104(SP),SI - ADDL SI,DI - MOVQ DI,104(SP) - MOVL R10,DI - MOVQ 112(SP),DX - ADCL DX,DI - MOVQ DI,112(SP) - MOVL R10,DI - MOVQ 120(SP),CX - ADCL CX,DI - MOVQ DI,120(SP) - MOVL R10,DI - MOVQ 128(SP),R8 - ADCL R8,DI - MOVQ DI,128(SP) - MOVQ $0XFFFFFFFC,DI - MOVQ 136(SP),R9 - ADCL R9,DI - SARL $16,DI - MOVQ DI,R9 - XORL $0XFFFFFFFF,R9 - ANDQ DI,SI - MOVQ 104(SP),AX - ANDQ R9,AX - ORQ AX,SI - ANDQ DI,DX - MOVQ 112(SP),AX - ANDQ R9,AX - ORQ AX,DX - ANDQ DI,CX - MOVQ 120(SP),AX - ANDQ R9,AX - ORQ AX,CX - ANDQ DI,R8 - MOVQ 128(SP),DI - ANDQ R9,DI - ORQ DI,R8 - MOVQ 88(SP),DI - MOVQ 96(SP),R9 - ADDL 16(R9),SI - ADCL 20(R9),DX - ADCL 24(R9),CX - ADCL 28(R9),R8 - MOVL SI,0(DI) - MOVL DX,4(DI) - MOVL CX,8(DI) - MOVL R8,12(DI) - MOVQ 32(SP),R11 - MOVQ 40(SP),R12 - MOVQ 48(SP),R13 - MOVQ 56(SP),R14 - MOVQ 64(SP),R15 - MOVQ 72(SP),BX - MOVQ 80(SP),BP - MOVQ R11,SP - RET diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_arm.s b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_arm.s deleted file mode 100644 index c9ceaeb8d..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_arm.s +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 5a from the public -// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305. - -// +build arm,!gccgo,!appengine - -DATA poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff -DATA poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03 -DATA poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff -DATA poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff -DATA poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff -GLOBL poly1305_init_constants_armv6<>(SB), 8, $20 - -// Warning: the linker may use R11 to synthesize certain instructions. Please -// take care and verify that no synthetic instructions use it. - -TEXT poly1305_init_ext_armv6<>(SB),4,$-4 - MOVM.DB.W [R4-R11], (R13) - MOVM.IA.W (R1), [R2-R5] - MOVW $poly1305_init_constants_armv6<>(SB), R7 - MOVW R2, R8 - MOVW R2>>26, R9 - MOVW R3>>20, g - MOVW R4>>14, R11 - MOVW R5>>8, R12 - ORR R3<<6, R9, R9 - ORR R4<<12, g, g - ORR R5<<18, R11, R11 - MOVM.IA (R7), [R2-R6] - AND R8, R2, R2 - AND R9, R3, R3 - AND g, R4, R4 - AND R11, R5, R5 - AND R12, R6, R6 - MOVM.IA.W [R2-R6], (R0) - EOR R2, R2, R2 - EOR R3, R3, R3 - EOR R4, R4, R4 - EOR R5, R5, R5 - EOR R6, R6, R6 - MOVM.IA.W [R2-R6], (R0) - MOVM.IA.W (R1), [R2-R5] - MOVM.IA [R2-R6], (R0) - MOVM.IA.W (R13), [R4-R11] - RET - -TEXT poly1305_blocks_armv6<>(SB),4,$-4 - MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13) - SUB $128, R13 - MOVW R0, 36(R13) - MOVW R1, 40(R13) - MOVW R2, 44(R13) - MOVW R1, R14 - MOVW R2, R12 - MOVW 56(R0), R8 - WORD $0xe1180008 // TST R8, R8 not working see issue 5921 - EOR R6, R6, R6 - MOVW.EQ $(1<<24), R6 - MOVW R6, 32(R13) - ADD $64, R13, g - MOVM.IA (R0), [R0-R9] - MOVM.IA [R0-R4], (g) - CMP $16, R12 - BLO poly1305_blocks_armv6_done -poly1305_blocks_armv6_mainloop: - MOVM.IA.W (R14), [R0-R3] - MOVW R0>>26, g - MOVW R1>>20, R11 - MOVW R2>>14, R12 - MOVW R14, 40(R13) - MOVW R3>>8, R4 - ORR R1<<6, g, g - ORR R2<<12, R11, R11 - ORR R3<<18, R12, R12 - BIC $0xfc000000, R0, R0 - BIC $0xfc000000, g, g - MOVW 32(R13), R3 - BIC $0xfc000000, R11, R11 - BIC $0xfc000000, R12, R12 - ADD R0, R5, R5 - ADD g, R6, R6 - ORR R3, R4, R4 - ADD R11, R7, R7 - ADD $64, R13, R14 - ADD R12, R8, R8 - ADD R4, R9, R9 - MOVM.IA (R14), [R0-R4] - MULLU R4, R5, (R11, g) - MULLU R3, R5, (R14, R12) - MULALU R3, R6, (R11, g) - MULALU R2, R6, (R14, R12) - MULALU R2, R7, (R11, g) - MULALU R1, R7, (R14, R12) - ADD R4<<2, R4, R4 - ADD R3<<2, R3, R3 - MULALU R1, R8, (R11, g) - MULALU R0, R8, (R14, R12) - MULALU R0, R9, (R11, g) - MULALU R4, R9, (R14, R12) - MOVW g, 24(R13) - MOVW R11, 28(R13) - MOVW R12, 16(R13) - MOVW R14, 20(R13) - MULLU R2, R5, (R11, g) - MULLU R1, R5, (R14, R12) - MULALU R1, R6, (R11, g) - MULALU R0, R6, (R14, R12) - MULALU R0, R7, (R11, g) - MULALU R4, R7, (R14, R12) - ADD R2<<2, R2, R2 - ADD R1<<2, R1, R1 - MULALU R4, R8, (R11, g) - MULALU R3, R8, (R14, R12) - MULALU R3, R9, (R11, g) - MULALU R2, R9, (R14, R12) - MOVW g, 8(R13) - MOVW R11, 12(R13) - MOVW R12, 0(R13) - MOVW R14, w+4(SP) - MULLU R0, R5, (R11, g) - MULALU R4, R6, (R11, g) - MULALU R3, R7, (R11, g) - MULALU R2, R8, (R11, g) - MULALU R1, R9, (R11, g) - MOVM.IA (R13), [R0-R7] - MOVW g>>26, R12 - MOVW R4>>26, R14 - ORR R11<<6, R12, R12 - ORR R5<<6, R14, R14 - BIC $0xfc000000, g, g - BIC $0xfc000000, R4, R4 - ADD.S R12, R0, R0 - ADC $0, R1, R1 - ADD.S R14, R6, R6 - ADC $0, R7, R7 - MOVW R0>>26, R12 - MOVW R6>>26, R14 - ORR R1<<6, R12, R12 - ORR R7<<6, R14, R14 - BIC $0xfc000000, R0, R0 - BIC $0xfc000000, R6, R6 - ADD R14<<2, R14, R14 - ADD.S R12, R2, R2 - ADC $0, R3, R3 - ADD R14, g, g - MOVW R2>>26, R12 - MOVW g>>26, R14 - ORR R3<<6, R12, R12 - BIC $0xfc000000, g, R5 - BIC $0xfc000000, R2, R7 - ADD R12, R4, R4 - ADD R14, R0, R0 - MOVW R4>>26, R12 - BIC $0xfc000000, R4, R8 - ADD R12, R6, R9 - MOVW w+44(SP), R12 - MOVW w+40(SP), R14 - MOVW R0, R6 - CMP $32, R12 - SUB $16, R12, R12 - MOVW R12, 44(R13) - BHS poly1305_blocks_armv6_mainloop -poly1305_blocks_armv6_done: - MOVW 36(R13), R12 - MOVW R5, 20(R12) - MOVW R6, 24(R12) - MOVW R7, 28(R12) - MOVW R8, 32(R12) - MOVW R9, 36(R12) - ADD $128, R13, R13 - MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14] - RET - -TEXT poly1305_finish_ext_armv6<>(SB),4,$-4 - MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13) - SUB $16, R13, R13 - MOVW R0, R5 - MOVW R1, R6 - MOVW R2, R7 - MOVW R3, R8 - AND.S R2, R2, R2 - BEQ poly1305_finish_ext_armv6_noremaining - EOR R0, R0 - MOVW R13, R9 - MOVW R0, 0(R13) - MOVW R0, 4(R13) - MOVW R0, 8(R13) - MOVW R0, 12(R13) - WORD $0xe3120008 // TST R2, #8 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip8 - MOVM.IA.W (R1), [g-R11] - MOVM.IA.W [g-R11], (R9) -poly1305_finish_ext_armv6_skip8: - WORD $0xe3120004 // TST $4, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip4 - MOVW.P 4(R1), g - MOVW.P g, 4(R9) -poly1305_finish_ext_armv6_skip4: - WORD $0xe3120002 // TST $2, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip2 - MOVHU.P 2(R1), g - MOVH.P g, 2(R9) -poly1305_finish_ext_armv6_skip2: - WORD $0xe3120001 // TST $1, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip1 - MOVBU.P 1(R1), g - MOVBU.P g, 1(R9) -poly1305_finish_ext_armv6_skip1: - MOVW $1, R11 - MOVBU R11, 0(R9) - MOVW R11, 56(R5) - MOVW R5, R0 - MOVW R13, R1 - MOVW $16, R2 - BL poly1305_blocks_armv6<>(SB) -poly1305_finish_ext_armv6_noremaining: - MOVW 20(R5), R0 - MOVW 24(R5), R1 - MOVW 28(R5), R2 - MOVW 32(R5), R3 - MOVW 36(R5), R4 - MOVW R4>>26, R12 - BIC $0xfc000000, R4, R4 - ADD R12<<2, R12, R12 - ADD R12, R0, R0 - MOVW R0>>26, R12 - BIC $0xfc000000, R0, R0 - ADD R12, R1, R1 - MOVW R1>>26, R12 - BIC $0xfc000000, R1, R1 - ADD R12, R2, R2 - MOVW R2>>26, R12 - BIC $0xfc000000, R2, R2 - ADD R12, R3, R3 - MOVW R3>>26, R12 - BIC $0xfc000000, R3, R3 - ADD R12, R4, R4 - ADD $5, R0, R6 - MOVW R6>>26, R12 - BIC $0xfc000000, R6, R6 - ADD R12, R1, R7 - MOVW R7>>26, R12 - BIC $0xfc000000, R7, R7 - ADD R12, R2, g - MOVW g>>26, R12 - BIC $0xfc000000, g, g - ADD R12, R3, R11 - MOVW $-(1<<26), R12 - ADD R11>>26, R12, R12 - BIC $0xfc000000, R11, R11 - ADD R12, R4, R14 - MOVW R14>>31, R12 - SUB $1, R12 - AND R12, R6, R6 - AND R12, R7, R7 - AND R12, g, g - AND R12, R11, R11 - AND R12, R14, R14 - MVN R12, R12 - AND R12, R0, R0 - AND R12, R1, R1 - AND R12, R2, R2 - AND R12, R3, R3 - AND R12, R4, R4 - ORR R6, R0, R0 - ORR R7, R1, R1 - ORR g, R2, R2 - ORR R11, R3, R3 - ORR R14, R4, R4 - ORR R1<<26, R0, R0 - MOVW R1>>6, R1 - ORR R2<<20, R1, R1 - MOVW R2>>12, R2 - ORR R3<<14, R2, R2 - MOVW R3>>18, R3 - ORR R4<<8, R3, R3 - MOVW 40(R5), R6 - MOVW 44(R5), R7 - MOVW 48(R5), g - MOVW 52(R5), R11 - ADD.S R6, R0, R0 - ADC.S R7, R1, R1 - ADC.S g, R2, R2 - ADC.S R11, R3, R3 - MOVM.IA [R0-R3], (R8) - MOVW R5, R12 - EOR R0, R0, R0 - EOR R1, R1, R1 - EOR R2, R2, R2 - EOR R3, R3, R3 - EOR R4, R4, R4 - EOR R5, R5, R5 - EOR R6, R6, R6 - EOR R7, R7, R7 - MOVM.IA.W [R0-R7], (R12) - MOVM.IA [R0-R7], (R12) - ADD $16, R13, R13 - MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14] - RET - -// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key) -TEXT ·poly1305_auth_armv6(SB),0,$280-16 - MOVW out+0(FP), R4 - MOVW m+4(FP), R5 - MOVW mlen+8(FP), R6 - MOVW key+12(FP), R7 - - MOVW R13, R8 - BIC $63, R13 - SUB $64, R13, R13 - MOVW R13, R0 - MOVW R7, R1 - BL poly1305_init_ext_armv6<>(SB) - BIC.S $15, R6, R2 - BEQ poly1305_auth_armv6_noblocks - MOVW R13, R0 - MOVW R5, R1 - ADD R2, R5, R5 - SUB R2, R6, R6 - BL poly1305_blocks_armv6<>(SB) -poly1305_auth_armv6_noblocks: - MOVW R13, R0 - MOVW R5, R1 - MOVW R6, R2 - MOVW R4, R3 - BL poly1305_finish_ext_armv6<>(SB) - MOVW R8, R13 - RET diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_test.go b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_test.go deleted file mode 100644 index 2c6d1bc98..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/poly1305_test.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package poly1305 - -import ( - "bytes" - "testing" -) - -var testData = []struct { - in, k, correct []byte -}{ - { - []byte("Hello world!"), - []byte("this is 32-byte key for Poly1305"), - []byte{0xa6, 0xf7, 0x45, 0x00, 0x8f, 0x81, 0xc9, 0x16, 0xa2, 0x0d, 0xcc, 0x74, 0xee, 0xf2, 0xb2, 0xf0}, - }, - { - make([]byte, 32), - []byte("this is 32-byte key for Poly1305"), - []byte{0x49, 0xec, 0x78, 0x09, 0x0e, 0x48, 0x1e, 0xc6, 0xc2, 0x6b, 0x33, 0xb9, 0x1c, 0xcc, 0x03, 0x07}, - }, - { - make([]byte, 2007), - []byte("this is 32-byte key for Poly1305"), - []byte{0xda, 0x84, 0xbc, 0xab, 0x02, 0x67, 0x6c, 0x38, 0xcd, 0xb0, 0x15, 0x60, 0x42, 0x74, 0xc2, 0xaa}, - }, - { - make([]byte, 2007), - make([]byte, 32), - make([]byte, 16), - }, -} - -func TestSum(t *testing.T) { - var out [16]byte - var key [32]byte - - for i, v := range testData { - copy(key[:], v.k) - Sum(&out, v.in, &key) - if !bytes.Equal(out[:], v.correct) { - t.Errorf("%d: expected %x, got %x", i, v.correct, out[:]) - } - } -} - -func Benchmark1K(b *testing.B) { - b.StopTimer() - var out [16]byte - var key [32]byte - in := make([]byte, 1024) - b.SetBytes(int64(len(in))) - b.StartTimer() - - for i := 0; i < b.N; i++ { - Sum(&out, in, &key) - } -} - -func Benchmark64(b *testing.B) { - b.StopTimer() - var out [16]byte - var key [32]byte - in := make([]byte, 64) - b.SetBytes(int64(len(in))) - b.StartTimer() - - for i := 0; i < b.N; i++ { - Sum(&out, in, &key) - } -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_amd64.go b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_amd64.go deleted file mode 100644 index 6775c703f..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_amd64.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo,!appengine - -package poly1305 - -// This function is implemented in poly1305_amd64.s - -//go:noescape - -func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte) - -// Sum generates an authenticator for m using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - var mPtr *byte - if len(m) > 0 { - mPtr = &m[0] - } - poly1305(out, mPtr, uint64(len(m)), key) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_arm.go b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_arm.go deleted file mode 100644 index 50b979c24..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_arm.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build arm,!gccgo,!appengine - -package poly1305 - -// This function is implemented in poly1305_arm.s - -//go:noescape - -func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte) - -// Sum generates an authenticator for m using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - var mPtr *byte - if len(m) > 0 { - mPtr = &m[0] - } - poly1305_auth_armv6(out, mPtr, uint32(len(m)), key) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_ref.go b/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_ref.go deleted file mode 100644 index 0b24fc78b..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/poly1305/sum_ref.go +++ /dev/null @@ -1,1531 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!arm gccgo appengine - -package poly1305 - -// Based on original, public domain implementation from NaCl by D. J. -// Bernstein. - -import "math" - -const ( - alpham80 = 0.00000000558793544769287109375 - alpham48 = 24.0 - alpham16 = 103079215104.0 - alpha0 = 6755399441055744.0 - alpha18 = 1770887431076116955136.0 - alpha32 = 29014219670751100192948224.0 - alpha50 = 7605903601369376408980219232256.0 - alpha64 = 124615124604835863084731911901282304.0 - alpha82 = 32667107224410092492483962313449748299776.0 - alpha96 = 535217884764734955396857238543560676143529984.0 - alpha112 = 35076039295941670036888435985190792471742381031424.0 - alpha130 = 9194973245195333150150082162901855101712434733101613056.0 - scale = 0.0000000000000000000000000000000000000036734198463196484624023016788195177431833298649127735047148490821200539357960224151611328125 - offset0 = 6755408030990331.0 - offset1 = 29014256564239239022116864.0 - offset2 = 124615283061160854719918951570079744.0 - offset3 = 535219245894202480694386063513315216128475136.0 -) - -// Sum generates an authenticator for m using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - r := key - s := key[16:] - var ( - y7 float64 - y6 float64 - y1 float64 - y0 float64 - y5 float64 - y4 float64 - x7 float64 - x6 float64 - x1 float64 - x0 float64 - y3 float64 - y2 float64 - x5 float64 - r3lowx0 float64 - x4 float64 - r0lowx6 float64 - x3 float64 - r3highx0 float64 - x2 float64 - r0highx6 float64 - r0lowx0 float64 - sr1lowx6 float64 - r0highx0 float64 - sr1highx6 float64 - sr3low float64 - r1lowx0 float64 - sr2lowx6 float64 - r1highx0 float64 - sr2highx6 float64 - r2lowx0 float64 - sr3lowx6 float64 - r2highx0 float64 - sr3highx6 float64 - r1highx4 float64 - r1lowx4 float64 - r0highx4 float64 - r0lowx4 float64 - sr3highx4 float64 - sr3lowx4 float64 - sr2highx4 float64 - sr2lowx4 float64 - r0lowx2 float64 - r0highx2 float64 - r1lowx2 float64 - r1highx2 float64 - r2lowx2 float64 - r2highx2 float64 - sr3lowx2 float64 - sr3highx2 float64 - z0 float64 - z1 float64 - z2 float64 - z3 float64 - m0 int64 - m1 int64 - m2 int64 - m3 int64 - m00 uint32 - m01 uint32 - m02 uint32 - m03 uint32 - m10 uint32 - m11 uint32 - m12 uint32 - m13 uint32 - m20 uint32 - m21 uint32 - m22 uint32 - m23 uint32 - m30 uint32 - m31 uint32 - m32 uint32 - m33 uint64 - lbelow2 int32 - lbelow3 int32 - lbelow4 int32 - lbelow5 int32 - lbelow6 int32 - lbelow7 int32 - lbelow8 int32 - lbelow9 int32 - lbelow10 int32 - lbelow11 int32 - lbelow12 int32 - lbelow13 int32 - lbelow14 int32 - lbelow15 int32 - s00 uint32 - s01 uint32 - s02 uint32 - s03 uint32 - s10 uint32 - s11 uint32 - s12 uint32 - s13 uint32 - s20 uint32 - s21 uint32 - s22 uint32 - s23 uint32 - s30 uint32 - s31 uint32 - s32 uint32 - s33 uint32 - bits32 uint64 - f uint64 - f0 uint64 - f1 uint64 - f2 uint64 - f3 uint64 - f4 uint64 - g uint64 - g0 uint64 - g1 uint64 - g2 uint64 - g3 uint64 - g4 uint64 - ) - - var p int32 - - l := int32(len(m)) - - r00 := uint32(r[0]) - - r01 := uint32(r[1]) - - r02 := uint32(r[2]) - r0 := int64(2151) - - r03 := uint32(r[3]) - r03 &= 15 - r0 <<= 51 - - r10 := uint32(r[4]) - r10 &= 252 - r01 <<= 8 - r0 += int64(r00) - - r11 := uint32(r[5]) - r02 <<= 16 - r0 += int64(r01) - - r12 := uint32(r[6]) - r03 <<= 24 - r0 += int64(r02) - - r13 := uint32(r[7]) - r13 &= 15 - r1 := int64(2215) - r0 += int64(r03) - - d0 := r0 - r1 <<= 51 - r2 := int64(2279) - - r20 := uint32(r[8]) - r20 &= 252 - r11 <<= 8 - r1 += int64(r10) - - r21 := uint32(r[9]) - r12 <<= 16 - r1 += int64(r11) - - r22 := uint32(r[10]) - r13 <<= 24 - r1 += int64(r12) - - r23 := uint32(r[11]) - r23 &= 15 - r2 <<= 51 - r1 += int64(r13) - - d1 := r1 - r21 <<= 8 - r2 += int64(r20) - - r30 := uint32(r[12]) - r30 &= 252 - r22 <<= 16 - r2 += int64(r21) - - r31 := uint32(r[13]) - r23 <<= 24 - r2 += int64(r22) - - r32 := uint32(r[14]) - r2 += int64(r23) - r3 := int64(2343) - - d2 := r2 - r3 <<= 51 - - r33 := uint32(r[15]) - r33 &= 15 - r31 <<= 8 - r3 += int64(r30) - - r32 <<= 16 - r3 += int64(r31) - - r33 <<= 24 - r3 += int64(r32) - - r3 += int64(r33) - h0 := alpha32 - alpha32 - - d3 := r3 - h1 := alpha32 - alpha32 - - h2 := alpha32 - alpha32 - - h3 := alpha32 - alpha32 - - h4 := alpha32 - alpha32 - - r0low := math.Float64frombits(uint64(d0)) - h5 := alpha32 - alpha32 - - r1low := math.Float64frombits(uint64(d1)) - h6 := alpha32 - alpha32 - - r2low := math.Float64frombits(uint64(d2)) - h7 := alpha32 - alpha32 - - r0low -= alpha0 - - r1low -= alpha32 - - r2low -= alpha64 - - r0high := r0low + alpha18 - - r3low := math.Float64frombits(uint64(d3)) - - r1high := r1low + alpha50 - sr1low := scale * r1low - - r2high := r2low + alpha82 - sr2low := scale * r2low - - r0high -= alpha18 - r0high_stack := r0high - - r3low -= alpha96 - - r1high -= alpha50 - r1high_stack := r1high - - sr1high := sr1low + alpham80 - - r0low -= r0high - - r2high -= alpha82 - sr3low = scale * r3low - - sr2high := sr2low + alpham48 - - r1low -= r1high - r1low_stack := r1low - - sr1high -= alpham80 - sr1high_stack := sr1high - - r2low -= r2high - r2low_stack := r2low - - sr2high -= alpham48 - sr2high_stack := sr2high - - r3high := r3low + alpha112 - r0low_stack := r0low - - sr1low -= sr1high - sr1low_stack := sr1low - - sr3high := sr3low + alpham16 - r2high_stack := r2high - - sr2low -= sr2high - sr2low_stack := sr2low - - r3high -= alpha112 - r3high_stack := r3high - - sr3high -= alpham16 - sr3high_stack := sr3high - - r3low -= r3high - r3low_stack := r3low - - sr3low -= sr3high - sr3low_stack := sr3low - - if l < 16 { - goto addatmost15bytes - } - - m00 = uint32(m[p+0]) - m0 = 2151 - - m0 <<= 51 - m1 = 2215 - m01 = uint32(m[p+1]) - - m1 <<= 51 - m2 = 2279 - m02 = uint32(m[p+2]) - - m2 <<= 51 - m3 = 2343 - m03 = uint32(m[p+3]) - - m10 = uint32(m[p+4]) - m01 <<= 8 - m0 += int64(m00) - - m11 = uint32(m[p+5]) - m02 <<= 16 - m0 += int64(m01) - - m12 = uint32(m[p+6]) - m03 <<= 24 - m0 += int64(m02) - - m13 = uint32(m[p+7]) - m3 <<= 51 - m0 += int64(m03) - - m20 = uint32(m[p+8]) - m11 <<= 8 - m1 += int64(m10) - - m21 = uint32(m[p+9]) - m12 <<= 16 - m1 += int64(m11) - - m22 = uint32(m[p+10]) - m13 <<= 24 - m1 += int64(m12) - - m23 = uint32(m[p+11]) - m1 += int64(m13) - - m30 = uint32(m[p+12]) - m21 <<= 8 - m2 += int64(m20) - - m31 = uint32(m[p+13]) - m22 <<= 16 - m2 += int64(m21) - - m32 = uint32(m[p+14]) - m23 <<= 24 - m2 += int64(m22) - - m33 = uint64(m[p+15]) - m2 += int64(m23) - - d0 = m0 - m31 <<= 8 - m3 += int64(m30) - - d1 = m1 - m32 <<= 16 - m3 += int64(m31) - - d2 = m2 - m33 += 256 - - m33 <<= 24 - m3 += int64(m32) - - m3 += int64(m33) - d3 = m3 - - p += 16 - l -= 16 - - z0 = math.Float64frombits(uint64(d0)) - - z1 = math.Float64frombits(uint64(d1)) - - z2 = math.Float64frombits(uint64(d2)) - - z3 = math.Float64frombits(uint64(d3)) - - z0 -= alpha0 - - z1 -= alpha32 - - z2 -= alpha64 - - z3 -= alpha96 - - h0 += z0 - - h1 += z1 - - h3 += z2 - - h5 += z3 - - if l < 16 { - goto multiplyaddatmost15bytes - } - -multiplyaddatleast16bytes: - - m2 = 2279 - m20 = uint32(m[p+8]) - y7 = h7 + alpha130 - - m2 <<= 51 - m3 = 2343 - m21 = uint32(m[p+9]) - y6 = h6 + alpha130 - - m3 <<= 51 - m0 = 2151 - m22 = uint32(m[p+10]) - y1 = h1 + alpha32 - - m0 <<= 51 - m1 = 2215 - m23 = uint32(m[p+11]) - y0 = h0 + alpha32 - - m1 <<= 51 - m30 = uint32(m[p+12]) - y7 -= alpha130 - - m21 <<= 8 - m2 += int64(m20) - m31 = uint32(m[p+13]) - y6 -= alpha130 - - m22 <<= 16 - m2 += int64(m21) - m32 = uint32(m[p+14]) - y1 -= alpha32 - - m23 <<= 24 - m2 += int64(m22) - m33 = uint64(m[p+15]) - y0 -= alpha32 - - m2 += int64(m23) - m00 = uint32(m[p+0]) - y5 = h5 + alpha96 - - m31 <<= 8 - m3 += int64(m30) - m01 = uint32(m[p+1]) - y4 = h4 + alpha96 - - m32 <<= 16 - m02 = uint32(m[p+2]) - x7 = h7 - y7 - y7 *= scale - - m33 += 256 - m03 = uint32(m[p+3]) - x6 = h6 - y6 - y6 *= scale - - m33 <<= 24 - m3 += int64(m31) - m10 = uint32(m[p+4]) - x1 = h1 - y1 - - m01 <<= 8 - m3 += int64(m32) - m11 = uint32(m[p+5]) - x0 = h0 - y0 - - m3 += int64(m33) - m0 += int64(m00) - m12 = uint32(m[p+6]) - y5 -= alpha96 - - m02 <<= 16 - m0 += int64(m01) - m13 = uint32(m[p+7]) - y4 -= alpha96 - - m03 <<= 24 - m0 += int64(m02) - d2 = m2 - x1 += y7 - - m0 += int64(m03) - d3 = m3 - x0 += y6 - - m11 <<= 8 - m1 += int64(m10) - d0 = m0 - x7 += y5 - - m12 <<= 16 - m1 += int64(m11) - x6 += y4 - - m13 <<= 24 - m1 += int64(m12) - y3 = h3 + alpha64 - - m1 += int64(m13) - d1 = m1 - y2 = h2 + alpha64 - - x0 += x1 - - x6 += x7 - - y3 -= alpha64 - r3low = r3low_stack - - y2 -= alpha64 - r0low = r0low_stack - - x5 = h5 - y5 - r3lowx0 = r3low * x0 - r3high = r3high_stack - - x4 = h4 - y4 - r0lowx6 = r0low * x6 - r0high = r0high_stack - - x3 = h3 - y3 - r3highx0 = r3high * x0 - sr1low = sr1low_stack - - x2 = h2 - y2 - r0highx6 = r0high * x6 - sr1high = sr1high_stack - - x5 += y3 - r0lowx0 = r0low * x0 - r1low = r1low_stack - - h6 = r3lowx0 + r0lowx6 - sr1lowx6 = sr1low * x6 - r1high = r1high_stack - - x4 += y2 - r0highx0 = r0high * x0 - sr2low = sr2low_stack - - h7 = r3highx0 + r0highx6 - sr1highx6 = sr1high * x6 - sr2high = sr2high_stack - - x3 += y1 - r1lowx0 = r1low * x0 - r2low = r2low_stack - - h0 = r0lowx0 + sr1lowx6 - sr2lowx6 = sr2low * x6 - r2high = r2high_stack - - x2 += y0 - r1highx0 = r1high * x0 - sr3low = sr3low_stack - - h1 = r0highx0 + sr1highx6 - sr2highx6 = sr2high * x6 - sr3high = sr3high_stack - - x4 += x5 - r2lowx0 = r2low * x0 - z2 = math.Float64frombits(uint64(d2)) - - h2 = r1lowx0 + sr2lowx6 - sr3lowx6 = sr3low * x6 - - x2 += x3 - r2highx0 = r2high * x0 - z3 = math.Float64frombits(uint64(d3)) - - h3 = r1highx0 + sr2highx6 - sr3highx6 = sr3high * x6 - - r1highx4 = r1high * x4 - z2 -= alpha64 - - h4 = r2lowx0 + sr3lowx6 - r1lowx4 = r1low * x4 - - r0highx4 = r0high * x4 - z3 -= alpha96 - - h5 = r2highx0 + sr3highx6 - r0lowx4 = r0low * x4 - - h7 += r1highx4 - sr3highx4 = sr3high * x4 - - h6 += r1lowx4 - sr3lowx4 = sr3low * x4 - - h5 += r0highx4 - sr2highx4 = sr2high * x4 - - h4 += r0lowx4 - sr2lowx4 = sr2low * x4 - - h3 += sr3highx4 - r0lowx2 = r0low * x2 - - h2 += sr3lowx4 - r0highx2 = r0high * x2 - - h1 += sr2highx4 - r1lowx2 = r1low * x2 - - h0 += sr2lowx4 - r1highx2 = r1high * x2 - - h2 += r0lowx2 - r2lowx2 = r2low * x2 - - h3 += r0highx2 - r2highx2 = r2high * x2 - - h4 += r1lowx2 - sr3lowx2 = sr3low * x2 - - h5 += r1highx2 - sr3highx2 = sr3high * x2 - - p += 16 - l -= 16 - h6 += r2lowx2 - - h7 += r2highx2 - - z1 = math.Float64frombits(uint64(d1)) - h0 += sr3lowx2 - - z0 = math.Float64frombits(uint64(d0)) - h1 += sr3highx2 - - z1 -= alpha32 - - z0 -= alpha0 - - h5 += z3 - - h3 += z2 - - h1 += z1 - - h0 += z0 - - if l >= 16 { - goto multiplyaddatleast16bytes - } - -multiplyaddatmost15bytes: - - y7 = h7 + alpha130 - - y6 = h6 + alpha130 - - y1 = h1 + alpha32 - - y0 = h0 + alpha32 - - y7 -= alpha130 - - y6 -= alpha130 - - y1 -= alpha32 - - y0 -= alpha32 - - y5 = h5 + alpha96 - - y4 = h4 + alpha96 - - x7 = h7 - y7 - y7 *= scale - - x6 = h6 - y6 - y6 *= scale - - x1 = h1 - y1 - - x0 = h0 - y0 - - y5 -= alpha96 - - y4 -= alpha96 - - x1 += y7 - - x0 += y6 - - x7 += y5 - - x6 += y4 - - y3 = h3 + alpha64 - - y2 = h2 + alpha64 - - x0 += x1 - - x6 += x7 - - y3 -= alpha64 - r3low = r3low_stack - - y2 -= alpha64 - r0low = r0low_stack - - x5 = h5 - y5 - r3lowx0 = r3low * x0 - r3high = r3high_stack - - x4 = h4 - y4 - r0lowx6 = r0low * x6 - r0high = r0high_stack - - x3 = h3 - y3 - r3highx0 = r3high * x0 - sr1low = sr1low_stack - - x2 = h2 - y2 - r0highx6 = r0high * x6 - sr1high = sr1high_stack - - x5 += y3 - r0lowx0 = r0low * x0 - r1low = r1low_stack - - h6 = r3lowx0 + r0lowx6 - sr1lowx6 = sr1low * x6 - r1high = r1high_stack - - x4 += y2 - r0highx0 = r0high * x0 - sr2low = sr2low_stack - - h7 = r3highx0 + r0highx6 - sr1highx6 = sr1high * x6 - sr2high = sr2high_stack - - x3 += y1 - r1lowx0 = r1low * x0 - r2low = r2low_stack - - h0 = r0lowx0 + sr1lowx6 - sr2lowx6 = sr2low * x6 - r2high = r2high_stack - - x2 += y0 - r1highx0 = r1high * x0 - sr3low = sr3low_stack - - h1 = r0highx0 + sr1highx6 - sr2highx6 = sr2high * x6 - sr3high = sr3high_stack - - x4 += x5 - r2lowx0 = r2low * x0 - - h2 = r1lowx0 + sr2lowx6 - sr3lowx6 = sr3low * x6 - - x2 += x3 - r2highx0 = r2high * x0 - - h3 = r1highx0 + sr2highx6 - sr3highx6 = sr3high * x6 - - r1highx4 = r1high * x4 - - h4 = r2lowx0 + sr3lowx6 - r1lowx4 = r1low * x4 - - r0highx4 = r0high * x4 - - h5 = r2highx0 + sr3highx6 - r0lowx4 = r0low * x4 - - h7 += r1highx4 - sr3highx4 = sr3high * x4 - - h6 += r1lowx4 - sr3lowx4 = sr3low * x4 - - h5 += r0highx4 - sr2highx4 = sr2high * x4 - - h4 += r0lowx4 - sr2lowx4 = sr2low * x4 - - h3 += sr3highx4 - r0lowx2 = r0low * x2 - - h2 += sr3lowx4 - r0highx2 = r0high * x2 - - h1 += sr2highx4 - r1lowx2 = r1low * x2 - - h0 += sr2lowx4 - r1highx2 = r1high * x2 - - h2 += r0lowx2 - r2lowx2 = r2low * x2 - - h3 += r0highx2 - r2highx2 = r2high * x2 - - h4 += r1lowx2 - sr3lowx2 = sr3low * x2 - - h5 += r1highx2 - sr3highx2 = sr3high * x2 - - h6 += r2lowx2 - - h7 += r2highx2 - - h0 += sr3lowx2 - - h1 += sr3highx2 - -addatmost15bytes: - - if l == 0 { - goto nomorebytes - } - - lbelow2 = l - 2 - - lbelow3 = l - 3 - - lbelow2 >>= 31 - lbelow4 = l - 4 - - m00 = uint32(m[p+0]) - lbelow3 >>= 31 - p += lbelow2 - - m01 = uint32(m[p+1]) - lbelow4 >>= 31 - p += lbelow3 - - m02 = uint32(m[p+2]) - p += lbelow4 - m0 = 2151 - - m03 = uint32(m[p+3]) - m0 <<= 51 - m1 = 2215 - - m0 += int64(m00) - m01 &^= uint32(lbelow2) - - m02 &^= uint32(lbelow3) - m01 -= uint32(lbelow2) - - m01 <<= 8 - m03 &^= uint32(lbelow4) - - m0 += int64(m01) - lbelow2 -= lbelow3 - - m02 += uint32(lbelow2) - lbelow3 -= lbelow4 - - m02 <<= 16 - m03 += uint32(lbelow3) - - m03 <<= 24 - m0 += int64(m02) - - m0 += int64(m03) - lbelow5 = l - 5 - - lbelow6 = l - 6 - lbelow7 = l - 7 - - lbelow5 >>= 31 - lbelow8 = l - 8 - - lbelow6 >>= 31 - p += lbelow5 - - m10 = uint32(m[p+4]) - lbelow7 >>= 31 - p += lbelow6 - - m11 = uint32(m[p+5]) - lbelow8 >>= 31 - p += lbelow7 - - m12 = uint32(m[p+6]) - m1 <<= 51 - p += lbelow8 - - m13 = uint32(m[p+7]) - m10 &^= uint32(lbelow5) - lbelow4 -= lbelow5 - - m10 += uint32(lbelow4) - lbelow5 -= lbelow6 - - m11 &^= uint32(lbelow6) - m11 += uint32(lbelow5) - - m11 <<= 8 - m1 += int64(m10) - - m1 += int64(m11) - m12 &^= uint32(lbelow7) - - lbelow6 -= lbelow7 - m13 &^= uint32(lbelow8) - - m12 += uint32(lbelow6) - lbelow7 -= lbelow8 - - m12 <<= 16 - m13 += uint32(lbelow7) - - m13 <<= 24 - m1 += int64(m12) - - m1 += int64(m13) - m2 = 2279 - - lbelow9 = l - 9 - m3 = 2343 - - lbelow10 = l - 10 - lbelow11 = l - 11 - - lbelow9 >>= 31 - lbelow12 = l - 12 - - lbelow10 >>= 31 - p += lbelow9 - - m20 = uint32(m[p+8]) - lbelow11 >>= 31 - p += lbelow10 - - m21 = uint32(m[p+9]) - lbelow12 >>= 31 - p += lbelow11 - - m22 = uint32(m[p+10]) - m2 <<= 51 - p += lbelow12 - - m23 = uint32(m[p+11]) - m20 &^= uint32(lbelow9) - lbelow8 -= lbelow9 - - m20 += uint32(lbelow8) - lbelow9 -= lbelow10 - - m21 &^= uint32(lbelow10) - m21 += uint32(lbelow9) - - m21 <<= 8 - m2 += int64(m20) - - m2 += int64(m21) - m22 &^= uint32(lbelow11) - - lbelow10 -= lbelow11 - m23 &^= uint32(lbelow12) - - m22 += uint32(lbelow10) - lbelow11 -= lbelow12 - - m22 <<= 16 - m23 += uint32(lbelow11) - - m23 <<= 24 - m2 += int64(m22) - - m3 <<= 51 - lbelow13 = l - 13 - - lbelow13 >>= 31 - lbelow14 = l - 14 - - lbelow14 >>= 31 - p += lbelow13 - lbelow15 = l - 15 - - m30 = uint32(m[p+12]) - lbelow15 >>= 31 - p += lbelow14 - - m31 = uint32(m[p+13]) - p += lbelow15 - m2 += int64(m23) - - m32 = uint32(m[p+14]) - m30 &^= uint32(lbelow13) - lbelow12 -= lbelow13 - - m30 += uint32(lbelow12) - lbelow13 -= lbelow14 - - m3 += int64(m30) - m31 &^= uint32(lbelow14) - - m31 += uint32(lbelow13) - m32 &^= uint32(lbelow15) - - m31 <<= 8 - lbelow14 -= lbelow15 - - m3 += int64(m31) - m32 += uint32(lbelow14) - d0 = m0 - - m32 <<= 16 - m33 = uint64(lbelow15 + 1) - d1 = m1 - - m33 <<= 24 - m3 += int64(m32) - d2 = m2 - - m3 += int64(m33) - d3 = m3 - - z3 = math.Float64frombits(uint64(d3)) - - z2 = math.Float64frombits(uint64(d2)) - - z1 = math.Float64frombits(uint64(d1)) - - z0 = math.Float64frombits(uint64(d0)) - - z3 -= alpha96 - - z2 -= alpha64 - - z1 -= alpha32 - - z0 -= alpha0 - - h5 += z3 - - h3 += z2 - - h1 += z1 - - h0 += z0 - - y7 = h7 + alpha130 - - y6 = h6 + alpha130 - - y1 = h1 + alpha32 - - y0 = h0 + alpha32 - - y7 -= alpha130 - - y6 -= alpha130 - - y1 -= alpha32 - - y0 -= alpha32 - - y5 = h5 + alpha96 - - y4 = h4 + alpha96 - - x7 = h7 - y7 - y7 *= scale - - x6 = h6 - y6 - y6 *= scale - - x1 = h1 - y1 - - x0 = h0 - y0 - - y5 -= alpha96 - - y4 -= alpha96 - - x1 += y7 - - x0 += y6 - - x7 += y5 - - x6 += y4 - - y3 = h3 + alpha64 - - y2 = h2 + alpha64 - - x0 += x1 - - x6 += x7 - - y3 -= alpha64 - r3low = r3low_stack - - y2 -= alpha64 - r0low = r0low_stack - - x5 = h5 - y5 - r3lowx0 = r3low * x0 - r3high = r3high_stack - - x4 = h4 - y4 - r0lowx6 = r0low * x6 - r0high = r0high_stack - - x3 = h3 - y3 - r3highx0 = r3high * x0 - sr1low = sr1low_stack - - x2 = h2 - y2 - r0highx6 = r0high * x6 - sr1high = sr1high_stack - - x5 += y3 - r0lowx0 = r0low * x0 - r1low = r1low_stack - - h6 = r3lowx0 + r0lowx6 - sr1lowx6 = sr1low * x6 - r1high = r1high_stack - - x4 += y2 - r0highx0 = r0high * x0 - sr2low = sr2low_stack - - h7 = r3highx0 + r0highx6 - sr1highx6 = sr1high * x6 - sr2high = sr2high_stack - - x3 += y1 - r1lowx0 = r1low * x0 - r2low = r2low_stack - - h0 = r0lowx0 + sr1lowx6 - sr2lowx6 = sr2low * x6 - r2high = r2high_stack - - x2 += y0 - r1highx0 = r1high * x0 - sr3low = sr3low_stack - - h1 = r0highx0 + sr1highx6 - sr2highx6 = sr2high * x6 - sr3high = sr3high_stack - - x4 += x5 - r2lowx0 = r2low * x0 - - h2 = r1lowx0 + sr2lowx6 - sr3lowx6 = sr3low * x6 - - x2 += x3 - r2highx0 = r2high * x0 - - h3 = r1highx0 + sr2highx6 - sr3highx6 = sr3high * x6 - - r1highx4 = r1high * x4 - - h4 = r2lowx0 + sr3lowx6 - r1lowx4 = r1low * x4 - - r0highx4 = r0high * x4 - - h5 = r2highx0 + sr3highx6 - r0lowx4 = r0low * x4 - - h7 += r1highx4 - sr3highx4 = sr3high * x4 - - h6 += r1lowx4 - sr3lowx4 = sr3low * x4 - - h5 += r0highx4 - sr2highx4 = sr2high * x4 - - h4 += r0lowx4 - sr2lowx4 = sr2low * x4 - - h3 += sr3highx4 - r0lowx2 = r0low * x2 - - h2 += sr3lowx4 - r0highx2 = r0high * x2 - - h1 += sr2highx4 - r1lowx2 = r1low * x2 - - h0 += sr2lowx4 - r1highx2 = r1high * x2 - - h2 += r0lowx2 - r2lowx2 = r2low * x2 - - h3 += r0highx2 - r2highx2 = r2high * x2 - - h4 += r1lowx2 - sr3lowx2 = sr3low * x2 - - h5 += r1highx2 - sr3highx2 = sr3high * x2 - - h6 += r2lowx2 - - h7 += r2highx2 - - h0 += sr3lowx2 - - h1 += sr3highx2 - -nomorebytes: - - y7 = h7 + alpha130 - - y0 = h0 + alpha32 - - y1 = h1 + alpha32 - - y2 = h2 + alpha64 - - y7 -= alpha130 - - y3 = h3 + alpha64 - - y4 = h4 + alpha96 - - y5 = h5 + alpha96 - - x7 = h7 - y7 - y7 *= scale - - y0 -= alpha32 - - y1 -= alpha32 - - y2 -= alpha64 - - h6 += x7 - - y3 -= alpha64 - - y4 -= alpha96 - - y5 -= alpha96 - - y6 = h6 + alpha130 - - x0 = h0 - y0 - - x1 = h1 - y1 - - x2 = h2 - y2 - - y6 -= alpha130 - - x0 += y7 - - x3 = h3 - y3 - - x4 = h4 - y4 - - x5 = h5 - y5 - - x6 = h6 - y6 - - y6 *= scale - - x2 += y0 - - x3 += y1 - - x4 += y2 - - x0 += y6 - - x5 += y3 - - x6 += y4 - - x2 += x3 - - x0 += x1 - - x4 += x5 - - x6 += y5 - - x2 += offset1 - d1 = int64(math.Float64bits(x2)) - - x0 += offset0 - d0 = int64(math.Float64bits(x0)) - - x4 += offset2 - d2 = int64(math.Float64bits(x4)) - - x6 += offset3 - d3 = int64(math.Float64bits(x6)) - - f0 = uint64(d0) - - f1 = uint64(d1) - bits32 = math.MaxUint64 - - f2 = uint64(d2) - bits32 >>= 32 - - f3 = uint64(d3) - f = f0 >> 32 - - f0 &= bits32 - f &= 255 - - f1 += f - g0 = f0 + 5 - - g = g0 >> 32 - g0 &= bits32 - - f = f1 >> 32 - f1 &= bits32 - - f &= 255 - g1 = f1 + g - - g = g1 >> 32 - f2 += f - - f = f2 >> 32 - g1 &= bits32 - - f2 &= bits32 - f &= 255 - - f3 += f - g2 = f2 + g - - g = g2 >> 32 - g2 &= bits32 - - f4 = f3 >> 32 - f3 &= bits32 - - f4 &= 255 - g3 = f3 + g - - g = g3 >> 32 - g3 &= bits32 - - g4 = f4 + g - - g4 = g4 - 4 - s00 = uint32(s[0]) - - f = uint64(int64(g4) >> 63) - s01 = uint32(s[1]) - - f0 &= f - g0 &^= f - s02 = uint32(s[2]) - - f1 &= f - f0 |= g0 - s03 = uint32(s[3]) - - g1 &^= f - f2 &= f - s10 = uint32(s[4]) - - f3 &= f - g2 &^= f - s11 = uint32(s[5]) - - g3 &^= f - f1 |= g1 - s12 = uint32(s[6]) - - f2 |= g2 - f3 |= g3 - s13 = uint32(s[7]) - - s01 <<= 8 - f0 += uint64(s00) - s20 = uint32(s[8]) - - s02 <<= 16 - f0 += uint64(s01) - s21 = uint32(s[9]) - - s03 <<= 24 - f0 += uint64(s02) - s22 = uint32(s[10]) - - s11 <<= 8 - f1 += uint64(s10) - s23 = uint32(s[11]) - - s12 <<= 16 - f1 += uint64(s11) - s30 = uint32(s[12]) - - s13 <<= 24 - f1 += uint64(s12) - s31 = uint32(s[13]) - - f0 += uint64(s03) - f1 += uint64(s13) - s32 = uint32(s[14]) - - s21 <<= 8 - f2 += uint64(s20) - s33 = uint32(s[15]) - - s22 <<= 16 - f2 += uint64(s21) - - s23 <<= 24 - f2 += uint64(s22) - - s31 <<= 8 - f3 += uint64(s30) - - s32 <<= 16 - f3 += uint64(s31) - - s33 <<= 24 - f3 += uint64(s32) - - f2 += uint64(s23) - f3 += uint64(s33) - - out[0] = byte(f0) - f0 >>= 8 - out[1] = byte(f0) - f0 >>= 8 - out[2] = byte(f0) - f0 >>= 8 - out[3] = byte(f0) - f0 >>= 8 - f1 += f0 - - out[4] = byte(f1) - f1 >>= 8 - out[5] = byte(f1) - f1 >>= 8 - out[6] = byte(f1) - f1 >>= 8 - out[7] = byte(f1) - f1 >>= 8 - f2 += f1 - - out[8] = byte(f2) - f2 >>= 8 - out[9] = byte(f2) - f2 >>= 8 - out[10] = byte(f2) - f2 >>= 8 - out[11] = byte(f2) - f2 >>= 8 - f3 += f2 - - out[12] = byte(f3) - f3 >>= 8 - out[13] = byte(f3) - f3 >>= 8 - out[14] = byte(f3) - f3 >>= 8 - out[15] = byte(f3) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ripemd160/ripemd160.go b/Godeps/_workspace/src/golang.org/x/crypto/ripemd160/ripemd160.go deleted file mode 100644 index da690f0b9..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/ripemd160/ripemd160.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ripemd160 implements the RIPEMD-160 hash algorithm. -package ripemd160 - -// RIPEMD-160 is designed by by Hans Dobbertin, Antoon Bosselaers, and Bart -// Preneel with specifications available at: -// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf. - -import ( - "crypto" - "hash" -) - -func init() { - crypto.RegisterHash(crypto.RIPEMD160, New) -} - -// The size of the checksum in bytes. -const Size = 20 - -// The block size of the hash algorithm in bytes. -const BlockSize = 64 - -const ( - _s0 = 0x67452301 - _s1 = 0xefcdab89 - _s2 = 0x98badcfe - _s3 = 0x10325476 - _s4 = 0xc3d2e1f0 -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - s [5]uint32 // running context - x [BlockSize]byte // temporary buffer - nx int // index into x - tc uint64 // total count of bytes processed -} - -func (d *digest) Reset() { - d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4 - d.nx = 0 - d.tc = 0 -} - -// New returns a new hash.Hash computing the checksum. -func New() hash.Hash { - result := new(digest) - result.Reset() - return result -} - -func (d *digest) Size() int { return Size } - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - d.tc += uint64(nn) - if d.nx > 0 { - n := len(p) - if n > BlockSize-d.nx { - n = BlockSize - d.nx - } - for i := 0; i < n; i++ { - d.x[d.nx+i] = p[i] - } - d.nx += n - if d.nx == BlockSize { - _Block(d, d.x[0:]) - d.nx = 0 - } - p = p[n:] - } - n := _Block(d, p) - p = p[n:] - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -func (d0 *digest) Sum(in []byte) []byte { - // Make a copy of d0 so that caller can keep writing and summing. - d := *d0 - - // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. - tc := d.tc - var tmp [64]byte - tmp[0] = 0x80 - if tc%64 < 56 { - d.Write(tmp[0 : 56-tc%64]) - } else { - d.Write(tmp[0 : 64+56-tc%64]) - } - - // Length in bits. - tc <<= 3 - for i := uint(0); i < 8; i++ { - tmp[i] = byte(tc >> (8 * i)) - } - d.Write(tmp[0:8]) - - if d.nx != 0 { - panic("d.nx != 0") - } - - var digest [Size]byte - for i, s := range d.s { - digest[i*4] = byte(s) - digest[i*4+1] = byte(s >> 8) - digest[i*4+2] = byte(s >> 16) - digest[i*4+3] = byte(s >> 24) - } - - return append(in, digest[:]...) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ripemd160/ripemd160_test.go b/Godeps/_workspace/src/golang.org/x/crypto/ripemd160/ripemd160_test.go deleted file mode 100644 index 5df1b2593..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/ripemd160/ripemd160_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ripemd160 - -// Test vectors are from: -// http://homes.esat.kuleuven.be/~bosselae/ripemd160.html - -import ( - "fmt" - "io" - "testing" -) - -type mdTest struct { - out string - in string -} - -var vectors = [...]mdTest{ - {"9c1185a5c5e9fc54612808977ee8f548b2258d31", ""}, - {"0bdc9d2d256b3ee9daae347be6f4dc835a467ffe", "a"}, - {"8eb208f7e05d987a9b044a8e98c6b087f15a0bfc", "abc"}, - {"5d0689ef49d2fae572b881b123a85ffa21595f36", "message digest"}, - {"f71c27109c692c1b56bbdceb5b9d2865b3708dbc", "abcdefghijklmnopqrstuvwxyz"}, - {"12a053384a9c0c88e405a06c27dcf49ada62eb2b", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"}, - {"b0e20b6e3116640286ed3a87a5713079b21f5189", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"}, - {"9b752e45573d4b39f4dbd3323cab82bf63326bfb", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"}, -} - -func TestVectors(t *testing.T) { - for i := 0; i < len(vectors); i++ { - tv := vectors[i] - md := New() - for j := 0; j < 3; j++ { - if j < 2 { - io.WriteString(md, tv.in) - } else { - io.WriteString(md, tv.in[0:len(tv.in)/2]) - md.Sum(nil) - io.WriteString(md, tv.in[len(tv.in)/2:]) - } - s := fmt.Sprintf("%x", md.Sum(nil)) - if s != tv.out { - t.Fatalf("RIPEMD-160[%d](%s) = %s, expected %s", j, tv.in, s, tv.out) - } - md.Reset() - } - } -} - -func TestMillionA(t *testing.T) { - md := New() - for i := 0; i < 100000; i++ { - io.WriteString(md, "aaaaaaaaaa") - } - out := "52783243c1697bdbe16d37f97f68f08325dc1528" - s := fmt.Sprintf("%x", md.Sum(nil)) - if s != out { - t.Fatalf("RIPEMD-160 (1 million 'a') = %s, expected %s", s, out) - } - md.Reset() -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/ripemd160/ripemd160block.go b/Godeps/_workspace/src/golang.org/x/crypto/ripemd160/ripemd160block.go deleted file mode 100644 index 7bc8e6c48..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/ripemd160/ripemd160block.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// RIPEMD-160 block step. -// In its own file so that a faster assembly or C version -// can be substituted easily. - -package ripemd160 - -// work buffer indices and roll amounts for one line -var _n = [80]uint{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8, - 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12, - 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2, - 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13, -} - -var _r = [80]uint{ - 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8, - 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12, - 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5, - 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12, - 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6, -} - -// same for the other parallel one -var n_ = [80]uint{ - 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12, - 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2, - 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13, - 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14, - 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11, -} - -var r_ = [80]uint{ - 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6, - 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11, - 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5, - 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8, - 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11, -} - -func _Block(md *digest, p []byte) int { - n := 0 - var x [16]uint32 - var alpha, beta uint32 - for len(p) >= BlockSize { - a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4] - aa, bb, cc, dd, ee := a, b, c, d, e - j := 0 - for i := 0; i < 16; i++ { - x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 - j += 4 - } - - // round 1 - i := 0 - for i < 16 { - alpha = a + (b ^ c ^ d) + x[_n[i]] - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 - a, b, c, d, e = e, alpha, b, beta, d - - // parallel line - alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 - aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd - - i++ - } - - // round 2 - for i < 32 { - alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999 - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 - a, b, c, d, e = e, alpha, b, beta, d - - // parallel line - alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 - aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd - - i++ - } - - // round 3 - for i < 48 { - alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1 - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 - a, b, c, d, e = e, alpha, b, beta, d - - // parallel line - alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 - aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd - - i++ - } - - // round 4 - for i < 64 { - alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 - a, b, c, d, e = e, alpha, b, beta, d - - // parallel line - alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 - aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd - - i++ - } - - // round 5 - for i < 80 { - alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 - a, b, c, d, e = e, alpha, b, beta, d - - // parallel line - alpha = aa + (bb ^ cc ^ dd) + x[n_[i]] - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 - aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd - - i++ - } - - // combine results - dd += c + md.s[1] - md.s[1] = md.s[2] + d + ee - md.s[2] = md.s[3] + e + aa - md.s[3] = md.s[4] + a + bb - md.s[4] = md.s[0] + b + cc - md.s[0] = dd - - p = p[BlockSize:] - n += BlockSize - } - return n -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/hsalsa20.go deleted file mode 100644 index 4ba47d591..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/hsalsa20.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package salsa provides low-level access to functions in the Salsa family. -package salsa - -// Sigma is the Salsa20 constant for 256-bit keys. -var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} - -// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte -// key k, and 16-byte constant c, and puts the result into the 32-byte array -// out. -func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { - x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 - x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 - x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 - x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 - x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 - x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 - x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 - x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 - x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 - x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 - x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 - x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 - x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 - x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 - x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 - - for i := 0; i < 20; i += 2 { - u := x0 + x12 - x4 ^= u<<7 | u>>(32-7) - u = x4 + x0 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x4 - x12 ^= u<<13 | u>>(32-13) - u = x12 + x8 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x1 - x9 ^= u<<7 | u>>(32-7) - u = x9 + x5 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x9 - x1 ^= u<<13 | u>>(32-13) - u = x1 + x13 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x6 - x14 ^= u<<7 | u>>(32-7) - u = x14 + x10 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x14 - x6 ^= u<<13 | u>>(32-13) - u = x6 + x2 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x11 - x3 ^= u<<7 | u>>(32-7) - u = x3 + x15 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x3 - x11 ^= u<<13 | u>>(32-13) - u = x11 + x7 - x15 ^= u<<18 | u>>(32-18) - - u = x0 + x3 - x1 ^= u<<7 | u>>(32-7) - u = x1 + x0 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x1 - x3 ^= u<<13 | u>>(32-13) - u = x3 + x2 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x4 - x6 ^= u<<7 | u>>(32-7) - u = x6 + x5 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x6 - x4 ^= u<<13 | u>>(32-13) - u = x4 + x7 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x9 - x11 ^= u<<7 | u>>(32-7) - u = x11 + x10 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x11 - x9 ^= u<<13 | u>>(32-13) - u = x9 + x8 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x14 - x12 ^= u<<7 | u>>(32-7) - u = x12 + x15 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x12 - x14 ^= u<<13 | u>>(32-13) - u = x14 + x13 - x15 ^= u<<18 | u>>(32-18) - } - out[0] = byte(x0) - out[1] = byte(x0 >> 8) - out[2] = byte(x0 >> 16) - out[3] = byte(x0 >> 24) - - out[4] = byte(x5) - out[5] = byte(x5 >> 8) - out[6] = byte(x5 >> 16) - out[7] = byte(x5 >> 24) - - out[8] = byte(x10) - out[9] = byte(x10 >> 8) - out[10] = byte(x10 >> 16) - out[11] = byte(x10 >> 24) - - out[12] = byte(x15) - out[13] = byte(x15 >> 8) - out[14] = byte(x15 >> 16) - out[15] = byte(x15 >> 24) - - out[16] = byte(x6) - out[17] = byte(x6 >> 8) - out[18] = byte(x6 >> 16) - out[19] = byte(x6 >> 24) - - out[20] = byte(x7) - out[21] = byte(x7 >> 8) - out[22] = byte(x7 >> 16) - out[23] = byte(x7 >> 24) - - out[24] = byte(x8) - out[25] = byte(x8 >> 8) - out[26] = byte(x8 >> 16) - out[27] = byte(x8 >> 24) - - out[28] = byte(x9) - out[29] = byte(x9 >> 8) - out[30] = byte(x9 >> 16) - out[31] = byte(x9 >> 24) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s b/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s deleted file mode 100644 index 6e1df9639..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s +++ /dev/null @@ -1,902 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!appengine,!gccgo - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) -TEXT ·salsa2020XORKeyStream(SB),0,$512-40 - MOVQ out+0(FP),DI - MOVQ in+8(FP),SI - MOVQ n+16(FP),DX - MOVQ nonce+24(FP),CX - MOVQ key+32(FP),R8 - - MOVQ SP,R11 - MOVQ $31,R9 - NOTQ R9 - ANDQ R9,SP - ADDQ $32,SP - - MOVQ R11,352(SP) - MOVQ R12,360(SP) - MOVQ R13,368(SP) - MOVQ R14,376(SP) - MOVQ R15,384(SP) - MOVQ BX,392(SP) - MOVQ BP,400(SP) - MOVQ DX,R9 - MOVQ CX,DX - MOVQ R8,R10 - CMPQ R9,$0 - JBE DONE - START: - MOVL 20(R10),CX - MOVL 0(R10),R8 - MOVL 0(DX),AX - MOVL 16(R10),R11 - MOVL CX,0(SP) - MOVL R8, 4 (SP) - MOVL AX, 8 (SP) - MOVL R11, 12 (SP) - MOVL 8(DX),CX - MOVL 24(R10),R8 - MOVL 4(R10),AX - MOVL 4(DX),R11 - MOVL CX,16(SP) - MOVL R8, 20 (SP) - MOVL AX, 24 (SP) - MOVL R11, 28 (SP) - MOVL 12(DX),CX - MOVL 12(R10),DX - MOVL 28(R10),R8 - MOVL 8(R10),AX - MOVL DX,32(SP) - MOVL CX, 36 (SP) - MOVL R8, 40 (SP) - MOVL AX, 44 (SP) - MOVQ $1634760805,DX - MOVQ $857760878,CX - MOVQ $2036477234,R8 - MOVQ $1797285236,AX - MOVL DX,48(SP) - MOVL CX, 52 (SP) - MOVL R8, 56 (SP) - MOVL AX, 60 (SP) - CMPQ R9,$256 - JB BYTESBETWEEN1AND255 - MOVOA 48(SP),X0 - PSHUFL $0X55,X0,X1 - PSHUFL $0XAA,X0,X2 - PSHUFL $0XFF,X0,X3 - PSHUFL $0X00,X0,X0 - MOVOA X1,64(SP) - MOVOA X2,80(SP) - MOVOA X3,96(SP) - MOVOA X0,112(SP) - MOVOA 0(SP),X0 - PSHUFL $0XAA,X0,X1 - PSHUFL $0XFF,X0,X2 - PSHUFL $0X00,X0,X3 - PSHUFL $0X55,X0,X0 - MOVOA X1,128(SP) - MOVOA X2,144(SP) - MOVOA X3,160(SP) - MOVOA X0,176(SP) - MOVOA 16(SP),X0 - PSHUFL $0XFF,X0,X1 - PSHUFL $0X55,X0,X2 - PSHUFL $0XAA,X0,X0 - MOVOA X1,192(SP) - MOVOA X2,208(SP) - MOVOA X0,224(SP) - MOVOA 32(SP),X0 - PSHUFL $0X00,X0,X1 - PSHUFL $0XAA,X0,X2 - PSHUFL $0XFF,X0,X0 - MOVOA X1,240(SP) - MOVOA X2,256(SP) - MOVOA X0,272(SP) - BYTESATLEAST256: - MOVL 16(SP),DX - MOVL 36 (SP),CX - MOVL DX,288(SP) - MOVL CX,304(SP) - ADDQ $1,DX - SHLQ $32,CX - ADDQ CX,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 292 (SP) - MOVL CX, 308 (SP) - ADDQ $1,DX - SHLQ $32,CX - ADDQ CX,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 296 (SP) - MOVL CX, 312 (SP) - ADDQ $1,DX - SHLQ $32,CX - ADDQ CX,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX, 300 (SP) - MOVL CX, 316 (SP) - ADDQ $1,DX - SHLQ $32,CX - ADDQ CX,DX - MOVQ DX,CX - SHRQ $32,CX - MOVL DX,16(SP) - MOVL CX, 36 (SP) - MOVQ R9,408(SP) - MOVQ $20,DX - MOVOA 64(SP),X0 - MOVOA 80(SP),X1 - MOVOA 96(SP),X2 - MOVOA 256(SP),X3 - MOVOA 272(SP),X4 - MOVOA 128(SP),X5 - MOVOA 144(SP),X6 - MOVOA 176(SP),X7 - MOVOA 192(SP),X8 - MOVOA 208(SP),X9 - MOVOA 224(SP),X10 - MOVOA 304(SP),X11 - MOVOA 112(SP),X12 - MOVOA 160(SP),X13 - MOVOA 240(SP),X14 - MOVOA 288(SP),X15 - MAINLOOP1: - MOVOA X1,320(SP) - MOVOA X2,336(SP) - MOVOA X13,X1 - PADDL X12,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X14 - PSRLL $25,X2 - PXOR X2,X14 - MOVOA X7,X1 - PADDL X0,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X11 - PSRLL $25,X2 - PXOR X2,X11 - MOVOA X12,X1 - PADDL X14,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X15 - PSRLL $23,X2 - PXOR X2,X15 - MOVOA X0,X1 - PADDL X11,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X9 - PSRLL $23,X2 - PXOR X2,X9 - MOVOA X14,X1 - PADDL X15,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X13 - PSRLL $19,X2 - PXOR X2,X13 - MOVOA X11,X1 - PADDL X9,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X7 - PSRLL $19,X2 - PXOR X2,X7 - MOVOA X15,X1 - PADDL X13,X1 - MOVOA X1,X2 - PSLLL $18,X1 - PXOR X1,X12 - PSRLL $14,X2 - PXOR X2,X12 - MOVOA 320(SP),X1 - MOVOA X12,320(SP) - MOVOA X9,X2 - PADDL X7,X2 - MOVOA X2,X12 - PSLLL $18,X2 - PXOR X2,X0 - PSRLL $14,X12 - PXOR X12,X0 - MOVOA X5,X2 - PADDL X1,X2 - MOVOA X2,X12 - PSLLL $7,X2 - PXOR X2,X3 - PSRLL $25,X12 - PXOR X12,X3 - MOVOA 336(SP),X2 - MOVOA X0,336(SP) - MOVOA X6,X0 - PADDL X2,X0 - MOVOA X0,X12 - PSLLL $7,X0 - PXOR X0,X4 - PSRLL $25,X12 - PXOR X12,X4 - MOVOA X1,X0 - PADDL X3,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X10 - PSRLL $23,X12 - PXOR X12,X10 - MOVOA X2,X0 - PADDL X4,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X8 - PSRLL $23,X12 - PXOR X12,X8 - MOVOA X3,X0 - PADDL X10,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X5 - PSRLL $19,X12 - PXOR X12,X5 - MOVOA X4,X0 - PADDL X8,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X6 - PSRLL $19,X12 - PXOR X12,X6 - MOVOA X10,X0 - PADDL X5,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X1 - PSRLL $14,X12 - PXOR X12,X1 - MOVOA 320(SP),X0 - MOVOA X1,320(SP) - MOVOA X4,X1 - PADDL X0,X1 - MOVOA X1,X12 - PSLLL $7,X1 - PXOR X1,X7 - PSRLL $25,X12 - PXOR X12,X7 - MOVOA X8,X1 - PADDL X6,X1 - MOVOA X1,X12 - PSLLL $18,X1 - PXOR X1,X2 - PSRLL $14,X12 - PXOR X12,X2 - MOVOA 336(SP),X12 - MOVOA X2,336(SP) - MOVOA X14,X1 - PADDL X12,X1 - MOVOA X1,X2 - PSLLL $7,X1 - PXOR X1,X5 - PSRLL $25,X2 - PXOR X2,X5 - MOVOA X0,X1 - PADDL X7,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X10 - PSRLL $23,X2 - PXOR X2,X10 - MOVOA X12,X1 - PADDL X5,X1 - MOVOA X1,X2 - PSLLL $9,X1 - PXOR X1,X8 - PSRLL $23,X2 - PXOR X2,X8 - MOVOA X7,X1 - PADDL X10,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X4 - PSRLL $19,X2 - PXOR X2,X4 - MOVOA X5,X1 - PADDL X8,X1 - MOVOA X1,X2 - PSLLL $13,X1 - PXOR X1,X14 - PSRLL $19,X2 - PXOR X2,X14 - MOVOA X10,X1 - PADDL X4,X1 - MOVOA X1,X2 - PSLLL $18,X1 - PXOR X1,X0 - PSRLL $14,X2 - PXOR X2,X0 - MOVOA 320(SP),X1 - MOVOA X0,320(SP) - MOVOA X8,X0 - PADDL X14,X0 - MOVOA X0,X2 - PSLLL $18,X0 - PXOR X0,X12 - PSRLL $14,X2 - PXOR X2,X12 - MOVOA X11,X0 - PADDL X1,X0 - MOVOA X0,X2 - PSLLL $7,X0 - PXOR X0,X6 - PSRLL $25,X2 - PXOR X2,X6 - MOVOA 336(SP),X2 - MOVOA X12,336(SP) - MOVOA X3,X0 - PADDL X2,X0 - MOVOA X0,X12 - PSLLL $7,X0 - PXOR X0,X13 - PSRLL $25,X12 - PXOR X12,X13 - MOVOA X1,X0 - PADDL X6,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X15 - PSRLL $23,X12 - PXOR X12,X15 - MOVOA X2,X0 - PADDL X13,X0 - MOVOA X0,X12 - PSLLL $9,X0 - PXOR X0,X9 - PSRLL $23,X12 - PXOR X12,X9 - MOVOA X6,X0 - PADDL X15,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X11 - PSRLL $19,X12 - PXOR X12,X11 - MOVOA X13,X0 - PADDL X9,X0 - MOVOA X0,X12 - PSLLL $13,X0 - PXOR X0,X3 - PSRLL $19,X12 - PXOR X12,X3 - MOVOA X15,X0 - PADDL X11,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X1 - PSRLL $14,X12 - PXOR X12,X1 - MOVOA X9,X0 - PADDL X3,X0 - MOVOA X0,X12 - PSLLL $18,X0 - PXOR X0,X2 - PSRLL $14,X12 - PXOR X12,X2 - MOVOA 320(SP),X12 - MOVOA 336(SP),X0 - SUBQ $2,DX - JA MAINLOOP1 - PADDL 112(SP),X12 - PADDL 176(SP),X7 - PADDL 224(SP),X10 - PADDL 272(SP),X4 - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 0(SI),DX - XORL 4(SI),CX - XORL 8(SI),R8 - XORL 12(SI),R9 - MOVL DX,0(DI) - MOVL CX,4(DI) - MOVL R8,8(DI) - MOVL R9,12(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 64(SI),DX - XORL 68(SI),CX - XORL 72(SI),R8 - XORL 76(SI),R9 - MOVL DX,64(DI) - MOVL CX,68(DI) - MOVL R8,72(DI) - MOVL R9,76(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - PSHUFL $0X39,X12,X12 - PSHUFL $0X39,X7,X7 - PSHUFL $0X39,X10,X10 - PSHUFL $0X39,X4,X4 - XORL 128(SI),DX - XORL 132(SI),CX - XORL 136(SI),R8 - XORL 140(SI),R9 - MOVL DX,128(DI) - MOVL CX,132(DI) - MOVL R8,136(DI) - MOVL R9,140(DI) - MOVD X12,DX - MOVD X7,CX - MOVD X10,R8 - MOVD X4,R9 - XORL 192(SI),DX - XORL 196(SI),CX - XORL 200(SI),R8 - XORL 204(SI),R9 - MOVL DX,192(DI) - MOVL CX,196(DI) - MOVL R8,200(DI) - MOVL R9,204(DI) - PADDL 240(SP),X14 - PADDL 64(SP),X0 - PADDL 128(SP),X5 - PADDL 192(SP),X8 - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 16(SI),DX - XORL 20(SI),CX - XORL 24(SI),R8 - XORL 28(SI),R9 - MOVL DX,16(DI) - MOVL CX,20(DI) - MOVL R8,24(DI) - MOVL R9,28(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 80(SI),DX - XORL 84(SI),CX - XORL 88(SI),R8 - XORL 92(SI),R9 - MOVL DX,80(DI) - MOVL CX,84(DI) - MOVL R8,88(DI) - MOVL R9,92(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - PSHUFL $0X39,X14,X14 - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X5,X5 - PSHUFL $0X39,X8,X8 - XORL 144(SI),DX - XORL 148(SI),CX - XORL 152(SI),R8 - XORL 156(SI),R9 - MOVL DX,144(DI) - MOVL CX,148(DI) - MOVL R8,152(DI) - MOVL R9,156(DI) - MOVD X14,DX - MOVD X0,CX - MOVD X5,R8 - MOVD X8,R9 - XORL 208(SI),DX - XORL 212(SI),CX - XORL 216(SI),R8 - XORL 220(SI),R9 - MOVL DX,208(DI) - MOVL CX,212(DI) - MOVL R8,216(DI) - MOVL R9,220(DI) - PADDL 288(SP),X15 - PADDL 304(SP),X11 - PADDL 80(SP),X1 - PADDL 144(SP),X6 - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 32(SI),DX - XORL 36(SI),CX - XORL 40(SI),R8 - XORL 44(SI),R9 - MOVL DX,32(DI) - MOVL CX,36(DI) - MOVL R8,40(DI) - MOVL R9,44(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 96(SI),DX - XORL 100(SI),CX - XORL 104(SI),R8 - XORL 108(SI),R9 - MOVL DX,96(DI) - MOVL CX,100(DI) - MOVL R8,104(DI) - MOVL R9,108(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - PSHUFL $0X39,X15,X15 - PSHUFL $0X39,X11,X11 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X6,X6 - XORL 160(SI),DX - XORL 164(SI),CX - XORL 168(SI),R8 - XORL 172(SI),R9 - MOVL DX,160(DI) - MOVL CX,164(DI) - MOVL R8,168(DI) - MOVL R9,172(DI) - MOVD X15,DX - MOVD X11,CX - MOVD X1,R8 - MOVD X6,R9 - XORL 224(SI),DX - XORL 228(SI),CX - XORL 232(SI),R8 - XORL 236(SI),R9 - MOVL DX,224(DI) - MOVL CX,228(DI) - MOVL R8,232(DI) - MOVL R9,236(DI) - PADDL 160(SP),X13 - PADDL 208(SP),X9 - PADDL 256(SP),X3 - PADDL 96(SP),X2 - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 48(SI),DX - XORL 52(SI),CX - XORL 56(SI),R8 - XORL 60(SI),R9 - MOVL DX,48(DI) - MOVL CX,52(DI) - MOVL R8,56(DI) - MOVL R9,60(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 112(SI),DX - XORL 116(SI),CX - XORL 120(SI),R8 - XORL 124(SI),R9 - MOVL DX,112(DI) - MOVL CX,116(DI) - MOVL R8,120(DI) - MOVL R9,124(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - PSHUFL $0X39,X13,X13 - PSHUFL $0X39,X9,X9 - PSHUFL $0X39,X3,X3 - PSHUFL $0X39,X2,X2 - XORL 176(SI),DX - XORL 180(SI),CX - XORL 184(SI),R8 - XORL 188(SI),R9 - MOVL DX,176(DI) - MOVL CX,180(DI) - MOVL R8,184(DI) - MOVL R9,188(DI) - MOVD X13,DX - MOVD X9,CX - MOVD X3,R8 - MOVD X2,R9 - XORL 240(SI),DX - XORL 244(SI),CX - XORL 248(SI),R8 - XORL 252(SI),R9 - MOVL DX,240(DI) - MOVL CX,244(DI) - MOVL R8,248(DI) - MOVL R9,252(DI) - MOVQ 408(SP),R9 - SUBQ $256,R9 - ADDQ $256,SI - ADDQ $256,DI - CMPQ R9,$256 - JAE BYTESATLEAST256 - CMPQ R9,$0 - JBE DONE - BYTESBETWEEN1AND255: - CMPQ R9,$64 - JAE NOCOPY - MOVQ DI,DX - LEAQ 416(SP),DI - MOVQ R9,CX - REP; MOVSB - LEAQ 416(SP),DI - LEAQ 416(SP),SI - NOCOPY: - MOVQ R9,408(SP) - MOVOA 48(SP),X0 - MOVOA 0(SP),X1 - MOVOA 16(SP),X2 - MOVOA 32(SP),X3 - MOVOA X1,X4 - MOVQ $20,CX - MAINLOOP2: - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X3 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X3,X3 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X1 - PSHUFL $0X4E,X2,X2 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X1,X1 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X1 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X1,X1 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X3 - PSHUFL $0X4E,X2,X2 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X3,X3 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X3 - PXOR X6,X3 - PADDL X3,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X3,X3 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X1 - PSHUFL $0X4E,X2,X2 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X3,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X1,X1 - PXOR X6,X0 - PADDL X0,X4 - MOVOA X0,X5 - MOVOA X4,X6 - PSLLL $7,X4 - PSRLL $25,X6 - PXOR X4,X1 - PXOR X6,X1 - PADDL X1,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $9,X5 - PSRLL $23,X6 - PXOR X5,X2 - PSHUFL $0X93,X1,X1 - PXOR X6,X2 - PADDL X2,X4 - MOVOA X2,X5 - MOVOA X4,X6 - PSLLL $13,X4 - PSRLL $19,X6 - PXOR X4,X3 - PSHUFL $0X4E,X2,X2 - PXOR X6,X3 - SUBQ $4,CX - PADDL X3,X5 - MOVOA X1,X4 - MOVOA X5,X6 - PSLLL $18,X5 - PXOR X7,X7 - PSRLL $14,X6 - PXOR X5,X0 - PSHUFL $0X39,X3,X3 - PXOR X6,X0 - JA MAINLOOP2 - PADDL 48(SP),X0 - PADDL 0(SP),X1 - PADDL 16(SP),X2 - PADDL 32(SP),X3 - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 0(SI),CX - XORL 48(SI),R8 - XORL 32(SI),R9 - XORL 16(SI),AX - MOVL CX,0(DI) - MOVL R8,48(DI) - MOVL R9,32(DI) - MOVL AX,16(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 20(SI),CX - XORL 4(SI),R8 - XORL 52(SI),R9 - XORL 36(SI),AX - MOVL CX,20(DI) - MOVL R8,4(DI) - MOVL R9,52(DI) - MOVL AX,36(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - PSHUFL $0X39,X0,X0 - PSHUFL $0X39,X1,X1 - PSHUFL $0X39,X2,X2 - PSHUFL $0X39,X3,X3 - XORL 40(SI),CX - XORL 24(SI),R8 - XORL 8(SI),R9 - XORL 56(SI),AX - MOVL CX,40(DI) - MOVL R8,24(DI) - MOVL R9,8(DI) - MOVL AX,56(DI) - MOVD X0,CX - MOVD X1,R8 - MOVD X2,R9 - MOVD X3,AX - XORL 60(SI),CX - XORL 44(SI),R8 - XORL 28(SI),R9 - XORL 12(SI),AX - MOVL CX,60(DI) - MOVL R8,44(DI) - MOVL R9,28(DI) - MOVL AX,12(DI) - MOVQ 408(SP),R9 - MOVL 16(SP),CX - MOVL 36 (SP),R8 - ADDQ $1,CX - SHLQ $32,R8 - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $32,R8 - MOVL CX,16(SP) - MOVL R8, 36 (SP) - CMPQ R9,$64 - JA BYTESATLEAST65 - JAE BYTESATLEAST64 - MOVQ DI,SI - MOVQ DX,DI - MOVQ R9,CX - REP; MOVSB - BYTESATLEAST64: - DONE: - MOVQ 352(SP),R11 - MOVQ 360(SP),R12 - MOVQ 368(SP),R13 - MOVQ 376(SP),R14 - MOVQ 384(SP),R15 - MOVQ 392(SP),BX - MOVQ 400(SP),BP - MOVQ R11,SP - RET - BYTESATLEAST65: - SUBQ $64,R9 - ADDQ $64,DI - ADDQ $64,SI - JMP BYTESBETWEEN1AND255 diff --git a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa208.go b/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa208.go deleted file mode 100644 index 9bfc0927c..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa208.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package salsa - -// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts -// the result into the 64-byte array out. The input and output may be the same array. -func Core208(out *[64]byte, in *[64]byte) { - j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 - j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 - j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 - j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 - j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 - j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 - j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 - j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 - j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 - j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 - j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 - j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 - j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 - j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 - j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 - x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 - - for i := 0; i < 8; i += 2 { - u := x0 + x12 - x4 ^= u<<7 | u>>(32-7) - u = x4 + x0 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x4 - x12 ^= u<<13 | u>>(32-13) - u = x12 + x8 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x1 - x9 ^= u<<7 | u>>(32-7) - u = x9 + x5 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x9 - x1 ^= u<<13 | u>>(32-13) - u = x1 + x13 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x6 - x14 ^= u<<7 | u>>(32-7) - u = x14 + x10 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x14 - x6 ^= u<<13 | u>>(32-13) - u = x6 + x2 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x11 - x3 ^= u<<7 | u>>(32-7) - u = x3 + x15 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x3 - x11 ^= u<<13 | u>>(32-13) - u = x11 + x7 - x15 ^= u<<18 | u>>(32-18) - - u = x0 + x3 - x1 ^= u<<7 | u>>(32-7) - u = x1 + x0 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x1 - x3 ^= u<<13 | u>>(32-13) - u = x3 + x2 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x4 - x6 ^= u<<7 | u>>(32-7) - u = x6 + x5 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x6 - x4 ^= u<<13 | u>>(32-13) - u = x4 + x7 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x9 - x11 ^= u<<7 | u>>(32-7) - u = x11 + x10 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x11 - x9 ^= u<<13 | u>>(32-13) - u = x9 + x8 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x14 - x12 ^= u<<7 | u>>(32-7) - u = x12 + x15 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x12 - x14 ^= u<<13 | u>>(32-13) - u = x14 + x13 - x15 ^= u<<18 | u>>(32-18) - } - x0 += j0 - x1 += j1 - x2 += j2 - x3 += j3 - x4 += j4 - x5 += j5 - x6 += j6 - x7 += j7 - x8 += j8 - x9 += j9 - x10 += j10 - x11 += j11 - x12 += j12 - x13 += j13 - x14 += j14 - x15 += j15 - - out[0] = byte(x0) - out[1] = byte(x0 >> 8) - out[2] = byte(x0 >> 16) - out[3] = byte(x0 >> 24) - - out[4] = byte(x1) - out[5] = byte(x1 >> 8) - out[6] = byte(x1 >> 16) - out[7] = byte(x1 >> 24) - - out[8] = byte(x2) - out[9] = byte(x2 >> 8) - out[10] = byte(x2 >> 16) - out[11] = byte(x2 >> 24) - - out[12] = byte(x3) - out[13] = byte(x3 >> 8) - out[14] = byte(x3 >> 16) - out[15] = byte(x3 >> 24) - - out[16] = byte(x4) - out[17] = byte(x4 >> 8) - out[18] = byte(x4 >> 16) - out[19] = byte(x4 >> 24) - - out[20] = byte(x5) - out[21] = byte(x5 >> 8) - out[22] = byte(x5 >> 16) - out[23] = byte(x5 >> 24) - - out[24] = byte(x6) - out[25] = byte(x6 >> 8) - out[26] = byte(x6 >> 16) - out[27] = byte(x6 >> 24) - - out[28] = byte(x7) - out[29] = byte(x7 >> 8) - out[30] = byte(x7 >> 16) - out[31] = byte(x7 >> 24) - - out[32] = byte(x8) - out[33] = byte(x8 >> 8) - out[34] = byte(x8 >> 16) - out[35] = byte(x8 >> 24) - - out[36] = byte(x9) - out[37] = byte(x9 >> 8) - out[38] = byte(x9 >> 16) - out[39] = byte(x9 >> 24) - - out[40] = byte(x10) - out[41] = byte(x10 >> 8) - out[42] = byte(x10 >> 16) - out[43] = byte(x10 >> 24) - - out[44] = byte(x11) - out[45] = byte(x11 >> 8) - out[46] = byte(x11 >> 16) - out[47] = byte(x11 >> 24) - - out[48] = byte(x12) - out[49] = byte(x12 >> 8) - out[50] = byte(x12 >> 16) - out[51] = byte(x12 >> 24) - - out[52] = byte(x13) - out[53] = byte(x13 >> 8) - out[54] = byte(x13 >> 16) - out[55] = byte(x13 >> 24) - - out[56] = byte(x14) - out[57] = byte(x14 >> 8) - out[58] = byte(x14 >> 16) - out[59] = byte(x14 >> 24) - - out[60] = byte(x15) - out[61] = byte(x15 >> 8) - out[62] = byte(x15 >> 16) - out[63] = byte(x15 >> 24) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go deleted file mode 100644 index 903c7858e..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!appengine,!gccgo - -package salsa - -// This function is implemented in salsa2020_amd64.s. - -//go:noescape - -func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) - -// XORKeyStream crypts bytes from in to out using the given key and counters. -// In and out may be the same slice but otherwise should not overlap. Counter -// contains the raw salsa20 counter bytes (both nonce and block counter). -func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { - if len(in) == 0 { - return - } - salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go deleted file mode 100644 index 95f8ca5bb..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine gccgo - -package salsa - -const rounds = 20 - -// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, -// and 16-byte constant c, and puts the result into 64-byte array out. -func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { - j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 - j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 - j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 - j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 - j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 - j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 - j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 - j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 - j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 - j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 - j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 - j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 - j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 - j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 - j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 - x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 - - for i := 0; i < rounds; i += 2 { - u := x0 + x12 - x4 ^= u<<7 | u>>(32-7) - u = x4 + x0 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x4 - x12 ^= u<<13 | u>>(32-13) - u = x12 + x8 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x1 - x9 ^= u<<7 | u>>(32-7) - u = x9 + x5 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x9 - x1 ^= u<<13 | u>>(32-13) - u = x1 + x13 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x6 - x14 ^= u<<7 | u>>(32-7) - u = x14 + x10 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x14 - x6 ^= u<<13 | u>>(32-13) - u = x6 + x2 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x11 - x3 ^= u<<7 | u>>(32-7) - u = x3 + x15 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x3 - x11 ^= u<<13 | u>>(32-13) - u = x11 + x7 - x15 ^= u<<18 | u>>(32-18) - - u = x0 + x3 - x1 ^= u<<7 | u>>(32-7) - u = x1 + x0 - x2 ^= u<<9 | u>>(32-9) - u = x2 + x1 - x3 ^= u<<13 | u>>(32-13) - u = x3 + x2 - x0 ^= u<<18 | u>>(32-18) - - u = x5 + x4 - x6 ^= u<<7 | u>>(32-7) - u = x6 + x5 - x7 ^= u<<9 | u>>(32-9) - u = x7 + x6 - x4 ^= u<<13 | u>>(32-13) - u = x4 + x7 - x5 ^= u<<18 | u>>(32-18) - - u = x10 + x9 - x11 ^= u<<7 | u>>(32-7) - u = x11 + x10 - x8 ^= u<<9 | u>>(32-9) - u = x8 + x11 - x9 ^= u<<13 | u>>(32-13) - u = x9 + x8 - x10 ^= u<<18 | u>>(32-18) - - u = x15 + x14 - x12 ^= u<<7 | u>>(32-7) - u = x12 + x15 - x13 ^= u<<9 | u>>(32-9) - u = x13 + x12 - x14 ^= u<<13 | u>>(32-13) - u = x14 + x13 - x15 ^= u<<18 | u>>(32-18) - } - x0 += j0 - x1 += j1 - x2 += j2 - x3 += j3 - x4 += j4 - x5 += j5 - x6 += j6 - x7 += j7 - x8 += j8 - x9 += j9 - x10 += j10 - x11 += j11 - x12 += j12 - x13 += j13 - x14 += j14 - x15 += j15 - - out[0] = byte(x0) - out[1] = byte(x0 >> 8) - out[2] = byte(x0 >> 16) - out[3] = byte(x0 >> 24) - - out[4] = byte(x1) - out[5] = byte(x1 >> 8) - out[6] = byte(x1 >> 16) - out[7] = byte(x1 >> 24) - - out[8] = byte(x2) - out[9] = byte(x2 >> 8) - out[10] = byte(x2 >> 16) - out[11] = byte(x2 >> 24) - - out[12] = byte(x3) - out[13] = byte(x3 >> 8) - out[14] = byte(x3 >> 16) - out[15] = byte(x3 >> 24) - - out[16] = byte(x4) - out[17] = byte(x4 >> 8) - out[18] = byte(x4 >> 16) - out[19] = byte(x4 >> 24) - - out[20] = byte(x5) - out[21] = byte(x5 >> 8) - out[22] = byte(x5 >> 16) - out[23] = byte(x5 >> 24) - - out[24] = byte(x6) - out[25] = byte(x6 >> 8) - out[26] = byte(x6 >> 16) - out[27] = byte(x6 >> 24) - - out[28] = byte(x7) - out[29] = byte(x7 >> 8) - out[30] = byte(x7 >> 16) - out[31] = byte(x7 >> 24) - - out[32] = byte(x8) - out[33] = byte(x8 >> 8) - out[34] = byte(x8 >> 16) - out[35] = byte(x8 >> 24) - - out[36] = byte(x9) - out[37] = byte(x9 >> 8) - out[38] = byte(x9 >> 16) - out[39] = byte(x9 >> 24) - - out[40] = byte(x10) - out[41] = byte(x10 >> 8) - out[42] = byte(x10 >> 16) - out[43] = byte(x10 >> 24) - - out[44] = byte(x11) - out[45] = byte(x11 >> 8) - out[46] = byte(x11 >> 16) - out[47] = byte(x11 >> 24) - - out[48] = byte(x12) - out[49] = byte(x12 >> 8) - out[50] = byte(x12 >> 16) - out[51] = byte(x12 >> 24) - - out[52] = byte(x13) - out[53] = byte(x13 >> 8) - out[54] = byte(x13 >> 16) - out[55] = byte(x13 >> 24) - - out[56] = byte(x14) - out[57] = byte(x14 >> 8) - out[58] = byte(x14 >> 16) - out[59] = byte(x14 >> 24) - - out[60] = byte(x15) - out[61] = byte(x15 >> 8) - out[62] = byte(x15 >> 16) - out[63] = byte(x15 >> 24) -} - -// XORKeyStream crypts bytes from in to out using the given key and counters. -// In and out may be the same slice but otherwise should not overlap. Counter -// contains the raw salsa20 counter bytes (both nonce and block counter). -func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { - var block [64]byte - var counterCopy [16]byte - copy(counterCopy[:], counter[:]) - - for len(in) >= 64 { - core(&block, &counterCopy, key, &Sigma) - for i, x := range block { - out[i] = in[i] ^ x - } - u := uint32(1) - for i := 8; i < 16; i++ { - u += uint32(counterCopy[i]) - counterCopy[i] = byte(u) - u >>= 8 - } - in = in[64:] - out = out[64:] - } - - if len(in) > 0 { - core(&block, &counterCopy, key, &Sigma) - for i, v := range in { - out[i] = v ^ block[i] - } - } -} diff --git a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa_test.go b/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa_test.go deleted file mode 100644 index f8cecd9e6..000000000 --- a/Godeps/_workspace/src/golang.org/x/crypto/salsa20/salsa/salsa_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package salsa - -import "testing" - -func TestCore208(t *testing.T) { - in := [64]byte{ - 0x7e, 0x87, 0x9a, 0x21, 0x4f, 0x3e, 0xc9, 0x86, - 0x7c, 0xa9, 0x40, 0xe6, 0x41, 0x71, 0x8f, 0x26, - 0xba, 0xee, 0x55, 0x5b, 0x8c, 0x61, 0xc1, 0xb5, - 0x0d, 0xf8, 0x46, 0x11, 0x6d, 0xcd, 0x3b, 0x1d, - 0xee, 0x24, 0xf3, 0x19, 0xdf, 0x9b, 0x3d, 0x85, - 0x14, 0x12, 0x1e, 0x4b, 0x5a, 0xc5, 0xaa, 0x32, - 0x76, 0x02, 0x1d, 0x29, 0x09, 0xc7, 0x48, 0x29, - 0xed, 0xeb, 0xc6, 0x8d, 0xb8, 0xb8, 0xc2, 0x5e} - - out := [64]byte{ - 0xa4, 0x1f, 0x85, 0x9c, 0x66, 0x08, 0xcc, 0x99, - 0x3b, 0x81, 0xca, 0xcb, 0x02, 0x0c, 0xef, 0x05, - 0x04, 0x4b, 0x21, 0x81, 0xa2, 0xfd, 0x33, 0x7d, - 0xfd, 0x7b, 0x1c, 0x63, 0x96, 0x68, 0x2f, 0x29, - 0xb4, 0x39, 0x31, 0x68, 0xe3, 0xc9, 0xe6, 0xbc, - 0xfe, 0x6b, 0xc5, 0xb7, 0xa0, 0x6d, 0x96, 0xba, - 0xe4, 0x24, 0xcc, 0x10, 0x2c, 0x91, 0x74, 0x5c, - 0x24, 0xad, 0x67, 0x3d, 0xc7, 0x61, 0x8f, 0x81, - } - - Core208(&in, &in) - if in != out { - t.Errorf("expected %x, got %x", out, in) - } -} diff --git a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/.travis.yml b/Godeps/_workspace/src/gopkg.in/fatih/set.v0/.travis.yml deleted file mode 100644 index b05e4c53f..000000000 --- a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: 1.2 - diff --git a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/LICENSE.md b/Godeps/_workspace/src/gopkg.in/fatih/set.v0/LICENSE.md deleted file mode 100644 index 25fdaf639..000000000 --- a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/README.md b/Godeps/_workspace/src/gopkg.in/fatih/set.v0/README.md deleted file mode 100644 index 23afdd98d..000000000 --- a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/README.md +++ /dev/null @@ -1,245 +0,0 @@ -# Set [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/gopkg.in/fatih/set.v0) [![Build Status](http://img.shields.io/travis/fatih/set.svg?style=flat-square)](https://travis-ci.org/fatih/set) - -Set is a basic and simple, hash-based, **Set** data structure implementation -in Go (Golang). - -Set provides both threadsafe and non-threadsafe implementations of a generic -set data structure. The thread safety encompasses all operations on one set. -Operations on multiple sets are consistent in that the elements of each set -used was valid at exactly one point in time between the start and the end of -the operation. Because it's thread safe, you can use it concurrently with your -goroutines. - -For usage see examples below or click on the godoc badge. - -## Install and Usage - -Install the package with: - -```bash -go get gopkg.in/fatih/set.v0 -``` - -Import it with: - -```go -import "gopkg.in/fatih/set.v0" -``` - -and use `set` as the package name inside the code. - -## Examples - -#### Initialization of a new Set - -```go - -// create a set with zero items -s := set.New() -s := set.NewNonTS() // non thread-safe version - -// ... or with some initial values -s := set.New("istanbul", "frankfurt", 30.123, "san francisco", 1234) -s := set.NewNonTS("kenya", "ethiopia", "sumatra") - -``` - -#### Basic Operations - -```go -// add items -s.Add("istanbul") -s.Add("istanbul") // nothing happens if you add duplicate item - -// add multiple items -s.Add("ankara", "san francisco", 3.14) - -// remove item -s.Remove("frankfurt") -s.Remove("frankfurt") // nothing happes if you remove a nonexisting item - -// remove multiple items -s.Remove("barcelona", 3.14, "ankara") - -// removes an arbitary item and return it -item := s.Pop() - -// create a new copy -other := s.Copy() - -// remove all items -s.Clear() - -// number of items in the set -len := s.Size() - -// return a list of items -items := s.List() - -// string representation of set -fmt.Printf("set is %s", s.String()) - -``` - -#### Check Operations - -```go -// check for set emptiness, returns true if set is empty -s.IsEmpty() - -// check for a single item exist -s.Has("istanbul") - -// ... or for multiple items. This will return true if all of the items exist. -s.Has("istanbul", "san francisco", 3.14) - -// create two sets for the following checks... -s := s.New("1", "2", "3", "4", "5") -t := s.New("1", "2", "3") - - -// check if they are the same -if !s.IsEqual(t) { - fmt.Println("s is not equal to t") -} - -// if s contains all elements of t -if s.IsSubset(t) { - fmt.Println("t is a subset of s") -} - -// ... or if s is a superset of t -if t.IsSuperset(s) { - fmt.Println("s is a superset of t") -} - - -``` - -#### Set Operations - - -```go -// let us initialize two sets with some values -a := set.New("ankara", "berlin", "san francisco") -b := set.New("frankfurt", "berlin") - -// creates a new set with the items in a and b combined. -// [frankfurt, berlin, ankara, san francisco] -c := set.Union(a, b) - -// contains items which is in both a and b -// [berlin] -c := set.Intersection(a, b) - -// contains items which are in a but not in b -// [ankara, san francisco] -c := set.Difference(a, b) - -// contains items which are in one of either, but not in both. -// [frankfurt, ankara, san francisco] -c := set.SymmetricDifference(a, b) - -``` - -```go -// like Union but saves the result back into a. -a.Merge(b) - -// removes the set items which are in b from a and saves the result back into a. -a.Separate(b) - -``` - -#### Multiple Set Operations - -```go -a := set.New("1", "3", "4", "5") -b := set.New("2", "3", "4", "5") -c := set.New("4", "5", "6", "7") - -// creates a new set with items in a, b and c -// [1 2 3 4 5 6 7] -u := set.Union(a, b, c) - -// creates a new set with items in a but not in b and c -// [1] -u := set.Difference(a, b, c) - -// creates a new set with items that are common to a, b and c -// [5] -u := set.Intersection(a, b, c) -``` - -#### Helper methods - -The Slice functions below are a convenient way to extract or convert your Set data -into basic data types. - - -```go -// create a set of mixed types -s := set.New("ankara", "5", "8", "san francisco", 13, 21) - - -// convert s into a slice of strings (type is []string) -// [ankara 5 8 san francisco] -t := set.StringSlice(s) - - -// u contains a slice of ints (type is []int) -// [13, 21] -u := set.IntSlice(s) - -``` - -#### Concurrent safe usage - -Below is an example of a concurrent way that uses set. We call ten functions -concurrently and wait until they are finished. It basically creates a new -string for each goroutine and adds it to our set. - -```go -package main - -import ( - "fmt" - "github.com/fatih/set" - "strconv" - "sync" -) - -func main() { - var wg sync.WaitGroup // this is just for waiting until all goroutines finish - - // Initialize our thread safe Set - s := set.New() - - // Add items concurrently (item1, item2, and so on) - for i := 0; i < 10; i++ { - wg.Add(1) - go func(i int) { - item := "item" + strconv.Itoa(i) - fmt.Println("adding", item) - s.Add(item) - wg.Done() - }(i) - } - - // Wait until all concurrent calls finished and print our set - wg.Wait() - fmt.Println(s) -} -``` - -## Credits - - * [Fatih Arslan](https://github.com/fatih) - * [Arne Hormann](https://github.com/arnehormann) - * [Sam Boyer](https://github.com/sdboyer) - * [Ralph Loizzo](https://github.com/friartech) - -## License - -The MIT License (MIT) - see LICENSE.md for more details - diff --git a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set.go b/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set.go deleted file mode 100644 index ac0240ce7..000000000 --- a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set.go +++ /dev/null @@ -1,121 +0,0 @@ -// Package set provides both threadsafe and non-threadsafe implementations of -// a generic set data structure. In the threadsafe set, safety encompasses all -// operations on one set. Operations on multiple sets are consistent in that -// the elements of each set used was valid at exactly one point in time -// between the start and the end of the operation. -package set - -// Interface is describing a Set. Sets are an unordered, unique list of values. -type Interface interface { - New(items ...interface{}) Interface - Add(items ...interface{}) - Remove(items ...interface{}) - Pop() interface{} - Has(items ...interface{}) bool - Size() int - Clear() - IsEmpty() bool - IsEqual(s Interface) bool - IsSubset(s Interface) bool - IsSuperset(s Interface) bool - Each(func(interface{}) bool) - String() string - List() []interface{} - Copy() Interface - Merge(s Interface) - Separate(s Interface) -} - -// helpful to not write everywhere struct{}{} -var keyExists = struct{}{} - -// Union is the merger of multiple sets. It returns a new set with all the -// elements present in all the sets that are passed. -// -// The dynamic type of the returned set is determined by the first passed set's -// implementation of the New() method. -func Union(set1, set2 Interface, sets ...Interface) Interface { - u := set1.Copy() - set2.Each(func(item interface{}) bool { - u.Add(item) - return true - }) - for _, set := range sets { - set.Each(func(item interface{}) bool { - u.Add(item) - return true - }) - } - - return u -} - -// Difference returns a new set which contains items which are in in the first -// set but not in the others. Unlike the Difference() method you can use this -// function separately with multiple sets. -func Difference(set1, set2 Interface, sets ...Interface) Interface { - s := set1.Copy() - s.Separate(set2) - for _, set := range sets { - s.Separate(set) // seperate is thread safe - } - return s -} - -// Intersection returns a new set which contains items that only exist in all given sets. -func Intersection(set1, set2 Interface, sets ...Interface) Interface { - all := Union(set1, set2, sets...) - result := Union(set1, set2, sets...) - - all.Each(func(item interface{}) bool { - if !set1.Has(item) || !set2.Has(item) { - result.Remove(item) - } - - for _, set := range sets { - if !set.Has(item) { - result.Remove(item) - } - } - return true - }) - return result -} - -// SymmetricDifference returns a new set which s is the difference of items which are in -// one of either, but not in both. -func SymmetricDifference(s Interface, t Interface) Interface { - u := Difference(s, t) - v := Difference(t, s) - return Union(u, v) -} - -// StringSlice is a helper function that returns a slice of strings of s. If -// the set contains mixed types of items only items of type string are returned. -func StringSlice(s Interface) []string { - slice := make([]string, 0) - for _, item := range s.List() { - v, ok := item.(string) - if !ok { - continue - } - - slice = append(slice, v) - } - return slice -} - -// IntSlice is a helper function that returns a slice of ints of s. If -// the set contains mixed types of items only items of type int are returned. -func IntSlice(s Interface) []int { - slice := make([]int, 0) - for _, item := range s.List() { - v, ok := item.(int) - if !ok { - continue - } - - slice = append(slice, v) - } - return slice -} diff --git a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_nots.go b/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_nots.go deleted file mode 100644 index ec1ab2285..000000000 --- a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_nots.go +++ /dev/null @@ -1,195 +0,0 @@ -package set - -import ( - "fmt" - "strings" -) - -// Provides a common set baseline for both threadsafe and non-ts Sets. -type set struct { - m map[interface{}]struct{} // struct{} doesn't take up space -} - -// SetNonTS defines a non-thread safe set data structure. -type SetNonTS struct { - set -} - -// NewNonTS creates and initialize a new non-threadsafe Set. -// It accepts a variable number of arguments to populate the initial set. -// If nothing is passed a SetNonTS with zero size is created. -func NewNonTS(items ...interface{}) *SetNonTS { - s := &SetNonTS{} - s.m = make(map[interface{}]struct{}) - - // Ensure interface compliance - var _ Interface = s - - s.Add(items...) - return s -} - -// New creates and initalizes a new Set interface. It accepts a variable -// number of arguments to populate the initial set. If nothing is passed a -// zero size Set based on the struct is created. -func (s *set) New(items ...interface{}) Interface { - return NewNonTS(items...) -} - -// Add includes the specified items (one or more) to the set. The underlying -// Set s is modified. If passed nothing it silently returns. -func (s *set) Add(items ...interface{}) { - if len(items) == 0 { - return - } - - for _, item := range items { - s.m[item] = keyExists - } -} - -// Remove deletes the specified items from the set. The underlying Set s is -// modified. If passed nothing it silently returns. -func (s *set) Remove(items ...interface{}) { - if len(items) == 0 { - return - } - - for _, item := range items { - delete(s.m, item) - } -} - -// Pop deletes and return an item from the set. The underlying Set s is -// modified. If set is empty, nil is returned. -func (s *set) Pop() interface{} { - for item := range s.m { - delete(s.m, item) - return item - } - return nil -} - -// Has looks for the existence of items passed. It returns false if nothing is -// passed. For multiple items it returns true only if all of the items exist. -func (s *set) Has(items ...interface{}) bool { - // assume checked for empty item, which not exist - if len(items) == 0 { - return false - } - - has := true - for _, item := range items { - if _, has = s.m[item]; !has { - break - } - } - return has -} - -// Size returns the number of items in a set. -func (s *set) Size() int { - return len(s.m) -} - -// Clear removes all items from the set. -func (s *set) Clear() { - s.m = make(map[interface{}]struct{}) -} - -// IsEmpty reports whether the Set is empty. -func (s *set) IsEmpty() bool { - return s.Size() == 0 -} - -// IsEqual test whether s and t are the same in size and have the same items. -func (s *set) IsEqual(t Interface) bool { - // Force locking only if given set is threadsafe. - if conv, ok := t.(*Set); ok { - conv.l.RLock() - defer conv.l.RUnlock() - } - - // return false if they are no the same size - if sameSize := len(s.m) == t.Size(); !sameSize { - return false - } - - equal := true - t.Each(func(item interface{}) bool { - _, equal = s.m[item] - return equal // if false, Each() will end - }) - - return equal -} - -// IsSubset tests whether t is a subset of s. -func (s *set) IsSubset(t Interface) (subset bool) { - subset = true - - t.Each(func(item interface{}) bool { - _, subset = s.m[item] - return subset - }) - - return -} - -// IsSuperset tests whether t is a superset of s. -func (s *set) IsSuperset(t Interface) bool { - return t.IsSubset(s) -} - -// Each traverses the items in the Set, calling the provided function for each -// set member. Traversal will continue until all items in the Set have been -// visited, or if the closure returns false. -func (s *set) Each(f func(item interface{}) bool) { - for item := range s.m { - if !f(item) { - break - } - } -} - -// String returns a string representation of s -func (s *set) String() string { - t := make([]string, 0, len(s.List())) - for _, item := range s.List() { - t = append(t, fmt.Sprintf("%v", item)) - } - - return fmt.Sprintf("[%s]", strings.Join(t, ", ")) -} - -// List returns a slice of all items. There is also StringSlice() and -// IntSlice() methods for returning slices of type string or int. -func (s *set) List() []interface{} { - list := make([]interface{}, 0, len(s.m)) - - for item := range s.m { - list = append(list, item) - } - - return list -} - -// Copy returns a new Set with a copy of s. -func (s *set) Copy() Interface { - return NewNonTS(s.List()...) -} - -// Merge is like Union, however it modifies the current set it's applied on -// with the given t set. -func (s *set) Merge(t Interface) { - t.Each(func(item interface{}) bool { - s.m[item] = keyExists - return true - }) -} - -// it's not the opposite of Merge. -// Separate removes the set items containing in t from set s. Please aware that -func (s *set) Separate(t Interface) { - s.Remove(t.List()...) -} diff --git a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_nots_test.go b/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_nots_test.go deleted file mode 100644 index fd599699f..000000000 --- a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_nots_test.go +++ /dev/null @@ -1,282 +0,0 @@ -package set - -import ( - "reflect" - "strings" - "testing" -) - -func TestSetNonTS_NewNonTS_parameters(t *testing.T) { - s := NewNonTS("string", "another_string", 1, 3.14) - - if s.Size() != 4 { - t.Error("NewNonTS: calling with parameters should create a set with size of four") - } -} - -func TestSetNonTS_Add(t *testing.T) { - s := NewNonTS() - s.Add(1) - s.Add(2) - s.Add(2) // duplicate - s.Add("fatih") - s.Add("zeynep") - s.Add("zeynep") // another duplicate - - if s.Size() != 4 { - t.Error("Add: items are not unique. The set size should be four") - } - - if !s.Has(1, 2, "fatih", "zeynep") { - t.Error("Add: added items are not availabile in the set.") - } -} - -func TestSetNonTS_Add_multiple(t *testing.T) { - s := NewNonTS() - s.Add("ankara", "san francisco", 3.14) - - if s.Size() != 3 { - t.Error("Add: items are not unique. The set size should be three") - } - - if !s.Has("ankara", "san francisco", 3.14) { - t.Error("Add: added items are not availabile in the set.") - } -} - -func TestSetNonTS_Remove(t *testing.T) { - s := NewNonTS() - s.Add(1) - s.Add(2) - s.Add("fatih") - - s.Remove(1) - if s.Size() != 2 { - t.Error("Remove: set size should be two after removing") - } - - s.Remove(1) - if s.Size() != 2 { - t.Error("Remove: set size should be not change after trying to remove a non-existing item") - } - - s.Remove(2) - s.Remove("fatih") - if s.Size() != 0 { - t.Error("Remove: set size should be zero") - } - - s.Remove("fatih") // try to remove something from a zero length set -} - -func TestSetNonTS_Remove_multiple(t *testing.T) { - s := NewNonTS() - s.Add("ankara", "san francisco", 3.14, "istanbul") - s.Remove("ankara", "san francisco", 3.14) - - if s.Size() != 1 { - t.Error("Remove: items are not unique. The set size should be four") - } - - if !s.Has("istanbul") { - t.Error("Add: added items are not availabile in the set.") - } -} - -func TestSetNonTS_Pop(t *testing.T) { - s := NewNonTS() - s.Add(1) - s.Add(2) - s.Add("fatih") - - a := s.Pop() - if s.Size() != 2 { - t.Error("Pop: set size should be two after popping out") - } - - if s.Has(a) { - t.Error("Pop: returned item should not exist") - } - - s.Pop() - s.Pop() - b := s.Pop() - if b != nil { - t.Error("Pop: should return nil because set is empty") - } - - s.Pop() // try to remove something from a zero length set -} - -func TestSetNonTS_Has(t *testing.T) { - s := NewNonTS("1", "2", "3", "4") - - if !s.Has("1") { - t.Error("Has: the item 1 exist, but 'Has' is returning false") - } - - if !s.Has("1", "2", "3", "4") { - t.Error("Has: the items all exist, but 'Has' is returning false") - } -} - -func TestSetNonTS_Clear(t *testing.T) { - s := NewNonTS() - s.Add(1) - s.Add("istanbul") - s.Add("san francisco") - - s.Clear() - if s.Size() != 0 { - t.Error("Clear: set size should be zero") - } -} - -func TestSetNonTS_IsEmpty(t *testing.T) { - s := NewNonTS() - - empty := s.IsEmpty() - if !empty { - t.Error("IsEmpty: set is empty, it should be true") - } - - s.Add(2) - s.Add(3) - notEmpty := s.IsEmpty() - - if notEmpty { - t.Error("IsEmpty: set is filled, it should be false") - } -} - -func TestSetNonTS_IsEqual(t *testing.T) { - s := NewNonTS("1", "2", "3") - u := NewNonTS("1", "2", "3") - - ok := s.IsEqual(u) - if !ok { - t.Error("IsEqual: set s and t are equal. However it returns false") - } - - // same size, different content - a := NewNonTS("1", "2", "3") - b := NewNonTS("4", "5", "6") - - ok = a.IsEqual(b) - if ok { - t.Error("IsEqual: set a and b are now equal (1). However it returns true") - } - - // different size, similar content - a = NewNonTS("1", "2", "3") - b = NewNonTS("1", "2", "3", "4") - - ok = a.IsEqual(b) - if ok { - t.Error("IsEqual: set s and t are now equal (2). However it returns true") - } - -} - -func TestSetNonTS_IsSubset(t *testing.T) { - s := NewNonTS("1", "2", "3", "4") - u := NewNonTS("1", "2", "3") - - ok := s.IsSubset(u) - if !ok { - t.Error("IsSubset: u is a subset of s. However it returns false") - } - - ok = u.IsSubset(s) - if ok { - t.Error("IsSubset: s is not a subset of u. However it returns true") - } - -} - -func TestSetNonTS_IsSuperset(t *testing.T) { - s := NewNonTS("1", "2", "3", "4") - u := NewNonTS("1", "2", "3") - - ok := u.IsSuperset(s) - if !ok { - t.Error("IsSuperset: s is a superset of u. However it returns false") - } - - ok = s.IsSuperset(u) - if ok { - t.Error("IsSuperset: u is not a superset of u. However it returns true") - } - -} - -func TestSetNonTS_String(t *testing.T) { - s := NewNonTS() - if s.String() != "[]" { - t.Errorf("String: output is not what is excepted '%s'", s.String()) - } - - s.Add("1", "2", "3", "4") - if !strings.HasPrefix(s.String(), "[") { - t.Error("String: output should begin with a square bracket") - } - - if !strings.HasSuffix(s.String(), "]") { - t.Error("String: output should end with a square bracket") - } - -} - -func TestSetNonTS_List(t *testing.T) { - s := NewNonTS("1", "2", "3", "4") - - // this returns a slice of interface{} - if len(s.List()) != 4 { - t.Error("List: slice size should be four.") - } - - for _, item := range s.List() { - r := reflect.TypeOf(item) - if r.Kind().String() != "string" { - t.Error("List: slice item should be a string") - } - } -} - -func TestSetNonTS_Copy(t *testing.T) { - s := NewNonTS("1", "2", "3", "4") - r := s.Copy() - - if !s.IsEqual(r) { - t.Error("Copy: set s and r are not equal") - } -} - -func TestSetNonTS_Merge(t *testing.T) { - s := NewNonTS("1", "2", "3") - r := NewNonTS("3", "4", "5") - s.Merge(r) - - if s.Size() != 5 { - t.Error("Merge: the set doesn't have all items in it.") - } - - if !s.Has("1", "2", "3", "4", "5") { - t.Error("Merge: merged items are not availabile in the set.") - } -} - -func TestSetNonTS_Separate(t *testing.T) { - s := NewNonTS("1", "2", "3") - r := NewNonTS("3", "5") - s.Separate(r) - - if s.Size() != 2 { - t.Error("Separate: the set doesn't have all items in it.") - } - - if !s.Has("1", "2") { - t.Error("Separate: items after separation are not availabile in the set.") - } -} diff --git a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_test.go b/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_test.go deleted file mode 100644 index 83dd5806d..000000000 --- a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package set - -import ( - "reflect" - "testing" -) - -func Test_Union(t *testing.T) { - s := New("1", "2", "3") - r := New("3", "4", "5") - x := NewNonTS("5", "6", "7") - - u := Union(s, r, x) - if settype := reflect.TypeOf(u).String(); settype != "*set.Set" { - t.Error("Union should derive its set type from the first passed set, got", settype) - } - if u.Size() != 7 { - t.Error("Union: the merged set doesn't have all items in it.") - } - - if !u.Has("1", "2", "3", "4", "5", "6", "7") { - t.Error("Union: merged items are not availabile in the set.") - } - - z := Union(x, r) - if z.Size() != 5 { - t.Error("Union: Union of 2 sets doesn't have the proper number of items.") - } - if settype := reflect.TypeOf(z).String(); settype != "*set.SetNonTS" { - t.Error("Union should derive its set type from the first passed set, got", settype) - } - -} - -func Test_Difference(t *testing.T) { - s := New("1", "2", "3") - r := New("3", "4", "5") - x := New("5", "6", "7") - u := Difference(s, r, x) - - if u.Size() != 2 { - t.Error("Difference: the set doesn't have all items in it.") - } - - if !u.Has("1", "2") { - t.Error("Difference: items are not availabile in the set.") - } - - y := Difference(r, r) - if y.Size() != 0 { - t.Error("Difference: size should be zero") - } - -} - -func Test_Intersection(t *testing.T) { - s1 := New("1", "3", "4", "5") - s2 := New("2", "3", "5", "6") - s3 := New("4", "5", "6", "7") - u := Intersection(s1, s2, s3) - - if u.Size() != 1 { - t.Error("Intersection: the set doesn't have all items in it.") - } - - if !u.Has("5") { - t.Error("Intersection: items after intersection are not availabile in the set.") - } -} - -func Test_SymmetricDifference(t *testing.T) { - s := New("1", "2", "3") - r := New("3", "4", "5") - u := SymmetricDifference(s, r) - - if u.Size() != 4 { - t.Error("SymmetricDifference: the set doesn't have all items in it.") - } - - if !u.Has("1", "2", "4", "5") { - t.Error("SymmetricDifference: items are not availabile in the set.") - } -} - -func Test_StringSlice(t *testing.T) { - s := New("san francisco", "istanbul", 3.14, 1321, "ankara") - u := StringSlice(s) - - if len(u) != 3 { - t.Error("StringSlice: slice should only have three items") - } - - for _, item := range u { - r := reflect.TypeOf(item) - if r.Kind().String() != "string" { - t.Error("StringSlice: slice item should be a string") - } - } -} - -func Test_IntSlice(t *testing.T) { - s := New("san francisco", "istanbul", 3.14, 1321, "ankara", 8876) - u := IntSlice(s) - - if len(u) != 2 { - t.Error("IntSlice: slice should only have two items") - } - - for _, item := range u { - r := reflect.TypeOf(item) - if r.Kind().String() != "int" { - t.Error("Intslice: slice item should be a int") - } - } -} - -func BenchmarkSetEquality(b *testing.B) { - s := New() - u := New() - - for i := 0; i < b.N; i++ { - s.Add(i) - u.Add(i) - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - s.IsEqual(u) - } -} - -func BenchmarkSubset(b *testing.B) { - s := New() - u := New() - - for i := 0; i < b.N; i++ { - s.Add(i) - u.Add(i) - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - s.IsSubset(u) - } -} - -func benchmarkIntersection(b *testing.B, numberOfItems int) { - s1 := New() - s2 := New() - - for i := 0; i < numberOfItems/2; i++ { - s1.Add(i) - } - for i := 0; i < numberOfItems; i++ { - s2.Add(i) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - Intersection(s1, s2) - } -} - -func BenchmarkIntersection10(b *testing.B) { - benchmarkIntersection(b, 10) -} - -func BenchmarkIntersection100(b *testing.B) { - benchmarkIntersection(b, 100) -} - -func BenchmarkIntersection1000(b *testing.B) { - benchmarkIntersection(b, 1000) -} - -func BenchmarkIntersection10000(b *testing.B) { - benchmarkIntersection(b, 10000) -} - -func BenchmarkIntersection100000(b *testing.B) { - benchmarkIntersection(b, 100000) -} - -func BenchmarkIntersection1000000(b *testing.B) { - benchmarkIntersection(b, 1000000) -} diff --git a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_ts.go b/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_ts.go deleted file mode 100644 index 50f532565..000000000 --- a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_ts.go +++ /dev/null @@ -1,200 +0,0 @@ -package set - -import ( - "sync" -) - -// Set defines a thread safe set data structure. -type Set struct { - set - l sync.RWMutex // we name it because we don't want to expose it -} - -// New creates and initialize a new Set. It's accept a variable number of -// arguments to populate the initial set. If nothing passed a Set with zero -// size is created. -func New(items ...interface{}) *Set { - s := &Set{} - s.m = make(map[interface{}]struct{}) - - // Ensure interface compliance - var _ Interface = s - - s.Add(items...) - return s -} - -// New creates and initalizes a new Set interface. It accepts a variable -// number of arguments to populate the initial set. If nothing is passed a -// zero size Set based on the struct is created. -func (s *Set) New(items ...interface{}) Interface { - return New(items...) -} - -// Add includes the specified items (one or more) to the set. The underlying -// Set s is modified. If passed nothing it silently returns. -func (s *Set) Add(items ...interface{}) { - if len(items) == 0 { - return - } - - s.l.Lock() - defer s.l.Unlock() - - for _, item := range items { - s.m[item] = keyExists - } -} - -// Remove deletes the specified items from the set. The underlying Set s is -// modified. If passed nothing it silently returns. -func (s *Set) Remove(items ...interface{}) { - if len(items) == 0 { - return - } - - s.l.Lock() - defer s.l.Unlock() - - for _, item := range items { - delete(s.m, item) - } -} - -// Pop deletes and return an item from the set. The underlying Set s is -// modified. If set is empty, nil is returned. -func (s *Set) Pop() interface{} { - s.l.RLock() - for item := range s.m { - s.l.RUnlock() - s.l.Lock() - delete(s.m, item) - s.l.Unlock() - return item - } - s.l.RUnlock() - return nil -} - -// Has looks for the existence of items passed. It returns false if nothing is -// passed. For multiple items it returns true only if all of the items exist. -func (s *Set) Has(items ...interface{}) bool { - // assume checked for empty item, which not exist - if len(items) == 0 { - return false - } - - s.l.RLock() - defer s.l.RUnlock() - - has := true - for _, item := range items { - if _, has = s.m[item]; !has { - break - } - } - return has -} - -// Size returns the number of items in a set. -func (s *Set) Size() int { - s.l.RLock() - defer s.l.RUnlock() - - l := len(s.m) - return l -} - -// Clear removes all items from the set. -func (s *Set) Clear() { - s.l.Lock() - defer s.l.Unlock() - - s.m = make(map[interface{}]struct{}) -} - -// IsEqual test whether s and t are the same in size and have the same items. -func (s *Set) IsEqual(t Interface) bool { - s.l.RLock() - defer s.l.RUnlock() - - // Force locking only if given set is threadsafe. - if conv, ok := t.(*Set); ok { - conv.l.RLock() - defer conv.l.RUnlock() - } - - // return false if they are no the same size - if sameSize := len(s.m) == t.Size(); !sameSize { - return false - } - - equal := true - t.Each(func(item interface{}) bool { - _, equal = s.m[item] - return equal // if false, Each() will end - }) - - return equal -} - -// IsSubset tests whether t is a subset of s. -func (s *Set) IsSubset(t Interface) (subset bool) { - s.l.RLock() - defer s.l.RUnlock() - - subset = true - - t.Each(func(item interface{}) bool { - _, subset = s.m[item] - return subset - }) - - return -} - -// Each traverses the items in the Set, calling the provided function for each -// set member. Traversal will continue until all items in the Set have been -// visited, or if the closure returns false. -func (s *Set) Each(f func(item interface{}) bool) { - s.l.RLock() - defer s.l.RUnlock() - - for item := range s.m { - if !f(item) { - break - } - } -} - -// List returns a slice of all items. There is also StringSlice() and -// IntSlice() methods for returning slices of type string or int. -func (s *Set) List() []interface{} { - s.l.RLock() - defer s.l.RUnlock() - - list := make([]interface{}, 0, len(s.m)) - - for item := range s.m { - list = append(list, item) - } - - return list -} - -// Copy returns a new Set with a copy of s. -func (s *Set) Copy() Interface { - return New(s.List()...) -} - -// Merge is like Union, however it modifies the current set it's applied on -// with the given t set. -func (s *Set) Merge(t Interface) { - s.l.Lock() - defer s.l.Unlock() - - t.Each(func(item interface{}) bool { - s.m[item] = keyExists - return true - }) -} diff --git a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_ts_test.go b/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_ts_test.go deleted file mode 100644 index 8d2f509d0..000000000 --- a/Godeps/_workspace/src/gopkg.in/fatih/set.v0/set_ts_test.go +++ /dev/null @@ -1,321 +0,0 @@ -package set - -import ( - "reflect" - "strconv" - "strings" - "testing" -) - -func TestSet_New(t *testing.T) { - s := New() - - if s.Size() != 0 { - t.Error("New: calling without any parameters should create a set with zero size") - } - - u := s.New() - if u.Size() != 0 { - t.Error("New: creating a new set via s.New() should create a set with zero size") - } -} - -func TestSet_New_parameters(t *testing.T) { - s := New("string", "another_string", 1, 3.14) - - if s.Size() != 4 { - t.Error("New: calling with parameters should create a set with size of four") - } -} - -func TestSet_Add(t *testing.T) { - s := New() - s.Add(1) - s.Add(2) - s.Add(2) // duplicate - s.Add("fatih") - s.Add("zeynep") - s.Add("zeynep") // another duplicate - - if s.Size() != 4 { - t.Error("Add: items are not unique. The set size should be four") - } - - if !s.Has(1, 2, "fatih", "zeynep") { - t.Error("Add: added items are not availabile in the set.") - } -} - -func TestSet_Add_multiple(t *testing.T) { - s := New() - s.Add("ankara", "san francisco", 3.14) - - if s.Size() != 3 { - t.Error("Add: items are not unique. The set size should be three") - } - - if !s.Has("ankara", "san francisco", 3.14) { - t.Error("Add: added items are not availabile in the set.") - } -} - -func TestSet_Remove(t *testing.T) { - s := New() - s.Add(1) - s.Add(2) - s.Add("fatih") - - s.Remove(1) - if s.Size() != 2 { - t.Error("Remove: set size should be two after removing") - } - - s.Remove(1) - if s.Size() != 2 { - t.Error("Remove: set size should be not change after trying to remove a non-existing item") - } - - s.Remove(2) - s.Remove("fatih") - if s.Size() != 0 { - t.Error("Remove: set size should be zero") - } - - s.Remove("fatih") // try to remove something from a zero length set -} - -func TestSet_Remove_multiple(t *testing.T) { - s := New() - s.Add("ankara", "san francisco", 3.14, "istanbul") - s.Remove("ankara", "san francisco", 3.14) - - if s.Size() != 1 { - t.Error("Remove: items are not unique. The set size should be four") - } - - if !s.Has("istanbul") { - t.Error("Add: added items are not availabile in the set.") - } -} - -func TestSet_Pop(t *testing.T) { - s := New() - s.Add(1) - s.Add(2) - s.Add("fatih") - - a := s.Pop() - if s.Size() != 2 { - t.Error("Pop: set size should be two after popping out") - } - - if s.Has(a) { - t.Error("Pop: returned item should not exist") - } - - s.Pop() - s.Pop() - b := s.Pop() - if b != nil { - t.Error("Pop: should return nil because set is empty") - } - - s.Pop() // try to remove something from a zero length set -} - -func TestSet_Has(t *testing.T) { - s := New("1", "2", "3", "4") - - if !s.Has("1") { - t.Error("Has: the item 1 exist, but 'Has' is returning false") - } - - if !s.Has("1", "2", "3", "4") { - t.Error("Has: the items all exist, but 'Has' is returning false") - } -} - -func TestSet_Clear(t *testing.T) { - s := New() - s.Add(1) - s.Add("istanbul") - s.Add("san francisco") - - s.Clear() - if s.Size() != 0 { - t.Error("Clear: set size should be zero") - } -} - -func TestSet_IsEmpty(t *testing.T) { - s := New() - - empty := s.IsEmpty() - if !empty { - t.Error("IsEmpty: set is empty, it should be true") - } - - s.Add(2) - s.Add(3) - notEmpty := s.IsEmpty() - - if notEmpty { - t.Error("IsEmpty: set is filled, it should be false") - } -} - -func TestSet_IsEqual(t *testing.T) { - s := New("1", "2", "3") - u := New("1", "2", "3") - - ok := s.IsEqual(u) - if !ok { - t.Error("IsEqual: set s and t are equal. However it returns false") - } - - // same size, different content - a := New("1", "2", "3") - b := New("4", "5", "6") - - ok = a.IsEqual(b) - if ok { - t.Error("IsEqual: set a and b are now equal (1). However it returns true") - } - - // different size, similar content - a = New("1", "2", "3") - b = New("1", "2", "3", "4") - - ok = a.IsEqual(b) - if ok { - t.Error("IsEqual: set s and t are now equal (2). However it returns true") - } - -} - -func TestSet_IsSubset(t *testing.T) { - s := New("1", "2", "3", "4") - u := New("1", "2", "3") - - ok := s.IsSubset(u) - if !ok { - t.Error("IsSubset: u is a subset of s. However it returns false") - } - - ok = u.IsSubset(s) - if ok { - t.Error("IsSubset: s is not a subset of u. However it returns true") - } - -} - -func TestSet_IsSuperset(t *testing.T) { - s := New("1", "2", "3", "4") - u := New("1", "2", "3") - - ok := u.IsSuperset(s) - if !ok { - t.Error("IsSuperset: s is a superset of u. However it returns false") - } - - ok = s.IsSuperset(u) - if ok { - t.Error("IsSuperset: u is not a superset of u. However it returns true") - } - -} - -func TestSet_String(t *testing.T) { - s := New() - if s.String() != "[]" { - t.Errorf("String: output is not what is excepted '%s'", s.String()) - } - - s.Add("1", "2", "3", "4") - if !strings.HasPrefix(s.String(), "[") { - t.Error("String: output should begin with a square bracket") - } - - if !strings.HasSuffix(s.String(), "]") { - t.Error("String: output should end with a square bracket") - } -} - -func TestSet_List(t *testing.T) { - s := New("1", "2", "3", "4") - - // this returns a slice of interface{} - if len(s.List()) != 4 { - t.Error("List: slice size should be four.") - } - - for _, item := range s.List() { - r := reflect.TypeOf(item) - if r.Kind().String() != "string" { - t.Error("List: slice item should be a string") - } - } -} - -func TestSet_Copy(t *testing.T) { - s := New("1", "2", "3", "4") - r := s.Copy() - - if !s.IsEqual(r) { - t.Error("Copy: set s and r are not equal") - } -} - -func TestSet_Merge(t *testing.T) { - s := New("1", "2", "3") - r := New("3", "4", "5") - s.Merge(r) - - if s.Size() != 5 { - t.Error("Merge: the set doesn't have all items in it.") - } - - if !s.Has("1", "2", "3", "4", "5") { - t.Error("Merge: merged items are not availabile in the set.") - } -} - -func TestSet_Separate(t *testing.T) { - s := New("1", "2", "3") - r := New("3", "5") - s.Separate(r) - - if s.Size() != 2 { - t.Error("Separate: the set doesn't have all items in it.") - } - - if !s.Has("1", "2") { - t.Error("Separate: items after separation are not availabile in the set.") - } -} - -func TestSet_RaceAdd(t *testing.T) { - // Create two sets. Add concurrently items to each of them. Remove from the - // other one. - // "go test -race" should detect this if the library is not thread-safe. - s := New() - u := New() - - go func() { - for i := 0; i < 1000; i++ { - item := "item" + strconv.Itoa(i) - go func(i int) { - s.Add(item) - u.Add(item) - }(i) - } - }() - - for i := 0; i < 1000; i++ { - item := "item" + strconv.Itoa(i) - go func(i int) { - s.Add(item) - u.Add(item) - }(i) - } -} diff --git a/INSTALL/README.md b/INSTALL/README.md index ba888e1f8..88e416da5 100644 --- a/INSTALL/README.md +++ b/INSTALL/README.md @@ -22,9 +22,9 @@ WARNING: THIS STEP WILL GIVE CONTROL OF THE CURRENT USER TO THE DEV TEAM. go get -u github.com/tendermint/tendermint/cmd/barak nohup barak -config="$GOPATH/src/github.com/tendermint/tendermint/cmd/barak/seed" & -### Install/Update Tendermint +### Install/Update MintDB go get -u github.com/tendermint/tendermint/cmd/tendermint mkdir -p ~/.tendermint - cp $GOPATH/src/github.com/tendermint/go-config/tendermint/genesis.json ~/.tendermint/ + cp $GOPATH/src/github.com/tendermint/tendermint/config/tendermint/genesis.json ~/.tendermint/ tendermint node --seeds="goldenalchemist.chaintest.net:46656" diff --git a/LICENSE.md b/LICENSE.md index ddb473e82..1989fc548 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,206 +1,664 @@ -Tendermint Core +Tendermint MintDB Copyright (C) 2015 Tendermint -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . - -//-------------------------------------------------------------------------------- - -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. \ No newline at end of file diff --git a/Makefile b/Makefile index a0648ff5b..df81f4819 100644 --- a/Makefile +++ b/Makefile @@ -41,11 +41,6 @@ list_deps: get_deps: go get github.com/tendermint/tendermint/... -gen_client: - go get -u github.com/ebuchman/go-rpc-gen - go install github.com/ebuchman/go-rpc-gen - go generate rpc/core_client/*.go - revision: -echo `git rev-parse --verify HEAD` > $(TMROOT)/revision -echo `git rev-parse --verify HEAD` >> $(TMROOT)/revision_history diff --git a/benchmarks/chan_test.go b/benchmarks/chan_test.go new file mode 100644 index 000000000..bb227b98b --- /dev/null +++ b/benchmarks/chan_test.go @@ -0,0 +1,19 @@ +package main + +import ( + "testing" +) + +func BenchmarkChanMakeClose(b *testing.B) { + b.StopTimer() + b.StartTimer() + + for j := 0; j < b.N; j++ { + foo := make(chan struct{}) + close(foo) + something, ok := <-foo + if ok { + b.Error(something, ok) + } + } +} diff --git a/benchmarks/map_test.go b/benchmarks/map_test.go new file mode 100644 index 000000000..a8fb13486 --- /dev/null +++ b/benchmarks/map_test.go @@ -0,0 +1,39 @@ +package main + +import ( + . "github.com/tendermint/go-common" + "testing" +) + +func BenchmarkSomething(b *testing.B) { + b.StopTimer() + numItems := 100000 + numChecks := 100000 + keys := make([]string, numItems) + for i := 0; i < numItems; i++ { + keys[i] = RandStr(32) + } + txs := make([]string, numChecks) + for i := 0; i < numChecks; i++ { + txs[i] = RandStr(32) + } + b.StartTimer() + + counter := 0 + for j := 0; j < b.N; j++ { + foo := make(map[string]string) + for _, key := range keys { + foo[key] = key + } + for _, tx := range txs { + if _, ok := foo[tx]; ok { + counter++ + } + } + for _, tx := range txs { + if _, ok := foo[tx]; ok { + counter++ + } + } + } +} diff --git a/blockchain/reactor.go b/blockchain/reactor.go index 6ba6b66fc..0a9778308 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -11,6 +11,7 @@ import ( "github.com/tendermint/go-p2p" "github.com/tendermint/go-wire" "github.com/tendermint/tendermint/events" + "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -43,6 +44,7 @@ type BlockchainReactor struct { sw *p2p.Switch state *sm.State + proxyAppCtx proxy.AppContext // same as consensus.proxyAppCtx store *BlockStore pool *BlockPool sync bool @@ -53,7 +55,7 @@ type BlockchainReactor struct { evsw events.Fireable } -func NewBlockchainReactor(state *sm.State, store *BlockStore, sync bool) *BlockchainReactor { +func NewBlockchainReactor(state *sm.State, proxyAppCtx proxy.AppContext, store *BlockStore, sync bool) *BlockchainReactor { if state.LastBlockHeight != store.Height() && state.LastBlockHeight != store.Height()-1 { // XXX double check this logic. PanicSanity(Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())) @@ -67,6 +69,7 @@ func NewBlockchainReactor(state *sm.State, store *BlockStore, sync bool) *Blockc ) bcR := &BlockchainReactor{ state: state, + proxyAppCtx: proxyAppCtx, store: store, pool: pool, sync: sync, @@ -226,11 +229,16 @@ FOR_LOOP: break SYNC_LOOP } else { bcR.pool.PopRequest() - err := sm.ExecBlock(bcR.state, first, firstPartsHeader) + err := bcR.state.ExecBlock(bcR.proxyAppCtx, first, firstPartsHeader) if err != nil { // TODO This is bad, are we zombie? PanicQ(Fmt("Failed to process committed block: %v", err)) } + err = bcR.state.Commit(bcR.proxyAppCtx) + if err != nil { + // TODO Handle gracefully. + PanicQ(Fmt("Failed to commit block at application: %v", err)) + } bcR.store.SaveBlock(first, firstParts, second.LastValidation) bcR.state.Save() } diff --git a/config/tendermint/config.go b/config/tendermint/config.go index 2c35f98d1..a955cb80f 100644 --- a/config/tendermint/config.go +++ b/config/tendermint/config.go @@ -52,6 +52,7 @@ func GetConfig(rootDir string) cfg.Config { } mapConfig.SetRequired("chain_id") // blows up if you try to use it before setting. mapConfig.SetDefault("genesis_file", rootDir+"/genesis.json") + mapConfig.SetDefault("proxy_app", "tcp://127.0.0.1:46658") mapConfig.SetDefault("moniker", "anonymous") mapConfig.SetDefault("node_laddr", "0.0.0.0:46656") // mapConfig.SetDefault("seeds", "goldenalchemist.chaintest.net:46656") @@ -72,6 +73,7 @@ func GetConfig(rootDir string) cfg.Config { var defaultConfigTmpl = `# This is a TOML config file. # For more information, see https://github.com/toml-lang/toml +proxy_app = "tcp://127.0.0.1:46658" moniker = "__MONIKER__" node_laddr = "0.0.0.0:46656" seeds = "" diff --git a/config/tendermint_test/config.go b/config/tendermint_test/config.go index ca06d731d..1a7754c71 100644 --- a/config/tendermint_test/config.go +++ b/config/tendermint_test/config.go @@ -58,6 +58,7 @@ func GetConfig(rootDir string) cfg.Config { } mapConfig.SetDefault("chain_id", "tendermint_test") mapConfig.SetDefault("genesis_file", rootDir+"/genesis.json") + mapConfig.SetDefault("proxy_app", "tcp://127.0.0.1:36658") mapConfig.SetDefault("moniker", "anonymous") mapConfig.SetDefault("node_laddr", "0.0.0.0:36656") mapConfig.SetDefault("fast_sync", false) @@ -77,6 +78,7 @@ func GetConfig(rootDir string) cfg.Config { var defaultConfigTmpl = `# This is a TOML config file. # For more information, see https://github.com/toml-lang/toml +proxy_app = "tcp://127.0.0.1:36658" moniker = "__MONIKER__" node_laddr = "0.0.0.0:36656" seeds = "" diff --git a/consensus/common_test.go b/consensus/common_test.go new file mode 100644 index 000000000..0c75bc7fb --- /dev/null +++ b/consensus/common_test.go @@ -0,0 +1,335 @@ +package consensus + +import ( + "bytes" + "fmt" + "sort" + "testing" + "time" + + dbm "github.com/tendermint/go-db" + bc "github.com/tendermint/tendermint/blockchain" + _ "github.com/tendermint/tendermint/config/tendermint_test" + "github.com/tendermint/tendermint/events" + mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + + "github.com/tendermint/tmsp/example" +) + +var chainID string + +func init() { + chainID = config.GetString("chain_id") +} + +type validatorStub struct { + Height int + Round int + *types.PrivValidator +} + +func NewValidatorStub(privValidator *types.PrivValidator) *validatorStub { + return &validatorStub{ + PrivValidator: privValidator, + } +} + +func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) { + vote := &types.Vote{ + Height: vs.Height, + Round: vs.Round, + Type: voteType, + BlockHash: hash, + BlockPartsHeader: header, + } + err := vs.PrivValidator.SignVote(chainID, vote) + return vote, err +} + +// convenienve function for testing +func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSetHeader) *types.Vote { + v, err := vs.signVote(voteType, hash, header) + if err != nil { + panic(fmt.Errorf("failed to sign vote: %v", err)) + } + return v +} + +// create proposal block from cs1 but sign it with vs +func decideProposal(cs1 *ConsensusState, cs2 *validatorStub, height, round int) (proposal *types.Proposal, block *types.Block) { + block, blockParts := cs1.createProposalBlock() + if block == nil { // on error + panic("error creating proposal block") + } + + // Make proposal + proposal = types.NewProposal(height, round, blockParts.Header(), cs1.Votes.POLRound()) + if err := cs2.SignProposal(chainID, proposal); err != nil { + panic(err) + } + return +} + +//------------------------------------------------------------------------------- +// utils + +func nilRound(t *testing.T, startRound int, cs1 *ConsensusState, vss ...*validatorStub) { + height, round := cs1.Height, cs1.Round + + waitFor(t, cs1, height, round, RoundStepPrevote) + + signAddVoteToFromMany(types.VoteTypePrevote, cs1, nil, cs1.ProposalBlockParts.Header(), vss...) + + waitFor(t, cs1, height, round, RoundStepPrecommit) + + signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, cs1.ProposalBlockParts.Header(), vss...) + + waitFor(t, cs1, height, round+1, RoundStepNewRound) +} + +// NOTE: this switches the propser as far as `perspectiveOf` is concerned, +// but for simplicity we return a block it generated. +func changeProposer(t *testing.T, perspectiveOf *ConsensusState, newProposer *validatorStub) *types.Block { + _, v1 := perspectiveOf.Validators.GetByAddress(perspectiveOf.privValidator.Address) + v1.Accum, v1.VotingPower = 0, 0 + if updated := perspectiveOf.Validators.Update(v1); !updated { + t.Fatal("failed to update validator") + } + _, v2 := perspectiveOf.Validators.GetByAddress(newProposer.Address) + v2.Accum, v2.VotingPower = 100, 100 + if updated := perspectiveOf.Validators.Update(v2); !updated { + t.Fatal("failed to update validator") + } + + // make the proposal + propBlock, _ := perspectiveOf.createProposalBlock() + if propBlock == nil { + t.Fatal("Failed to create proposal block with cs2") + } + return propBlock +} + +func fixVotingPower(t *testing.T, cs1 *ConsensusState, addr2 []byte) { + _, v1 := cs1.Validators.GetByAddress(cs1.privValidator.Address) + _, v2 := cs1.Validators.GetByAddress(addr2) + v1.Accum, v1.VotingPower = v2.Accum, v2.VotingPower + if updated := cs1.Validators.Update(v1); !updated { + t.Fatal("failed to update validator") + } +} + +func addVoteToFromMany(to *ConsensusState, votes []*types.Vote, froms ...*validatorStub) { + if len(votes) != len(froms) { + panic("len(votes) and len(froms) must match") + } + for i, from := range froms { + addVoteToFrom(to, from, votes[i]) + } +} + +func addVoteToFrom(to *ConsensusState, from *validatorStub, vote *types.Vote) { + valIndex, _ := to.Validators.GetByAddress(from.PrivValidator.Address) + added, err := to.TryAddVote(valIndex, vote, "") + if _, ok := err.(*types.ErrVoteConflictingSignature); ok { + // let it fly + } else if !added { + fmt.Println("to, from, vote:", to.Height, from.Height, vote.Height) + panic(fmt.Sprintln("Failed to add vote. Err:", err)) + } else if err != nil { + panic(fmt.Sprintln("Failed to add vote:", err)) + } +} + +func signVoteMany(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote { + votes := make([]*types.Vote, len(vss)) + for i, vs := range vss { + votes[i] = signVote(vs, voteType, hash, header) + } + return votes +} + +// add vote to one cs from another +func signAddVoteToFromMany(voteType byte, to *ConsensusState, hash []byte, header types.PartSetHeader, froms ...*validatorStub) { + for _, from := range froms { + vote := signVote(from, voteType, hash, header) + addVoteToFrom(to, from, vote) + } +} + +func signAddVoteToFrom(voteType byte, to *ConsensusState, from *validatorStub, hash []byte, header types.PartSetHeader) *types.Vote { + vote := signVote(from, voteType, hash, header) + addVoteToFrom(to, from, vote) + return vote +} + +func ensureNoNewStep(t *testing.T, cs *ConsensusState) { + timeout := time.NewTicker(2 * time.Second) + select { + case <-timeout.C: + break + case <-cs.NewStepCh(): + panic("We should be stuck waiting for more votes, not moving to the next step") + } +} + +func ensureNewStep(t *testing.T, cs *ConsensusState) *RoundState { + timeout := time.NewTicker(2 * time.Second) + select { + case <-timeout.C: + panic("We should have gone to the next step, not be stuck waiting") + case rs := <-cs.NewStepCh(): + return rs + } +} + +func waitFor(t *testing.T, cs *ConsensusState, height int, round int, step RoundStepType) { + for { + rs := ensureNewStep(t, cs) + if CompareHRS(rs.Height, rs.Round, rs.Step, height, round, step) < 0 { + continue + } else { + break + } + } +} + +func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *validatorStub, blockHash []byte) { + prevotes := cs.Votes.Prevotes(round) + var vote *types.Vote + if vote = prevotes.GetByAddress(privVal.Address); vote == nil { + panic("Failed to find prevote from validator") + } + if blockHash == nil { + if vote.BlockHash != nil { + panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockHash)) + } + } else { + if !bytes.Equal(vote.BlockHash, blockHash) { + panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockHash)) + } + } +} + +func incrementHeight(vss ...*validatorStub) { + for _, vs := range vss { + vs.Height += 1 + } +} + +func incrementRound(vss ...*validatorStub) { + for _, vs := range vss { + vs.Round += 1 + } +} + +func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) { + precommits := cs.Votes.Precommits(thisRound) + var vote *types.Vote + if vote = precommits.GetByAddress(privVal.Address); vote == nil { + panic("Failed to find precommit from validator") + } + + if votedBlockHash == nil { + if vote.BlockHash != nil { + panic("Expected precommit to be for nil") + } + } else { + if !bytes.Equal(vote.BlockHash, votedBlockHash) { + panic("Expected precommit to be for proposal block") + } + } + + if lockedBlockHash == nil { + if cs.LockedRound != lockRound || cs.LockedBlock != nil { + panic(fmt.Sprintf("Expected to be locked on nil at round %d. Got locked at round %d with block %v", lockRound, cs.LockedRound, cs.LockedBlock)) + } + } else { + if cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash) { + panic(fmt.Sprintf("Expected block to be locked on round %d, got %d. Got locked block %X, expected %X", lockRound, cs.LockedRound, cs.LockedBlock.Hash(), lockedBlockHash)) + } + } + +} + +func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) { + // verify the prevote + validatePrevote(t, cs, thisRound, privVal, votedBlockHash) + // verify precommit + cs.mtx.Lock() + validatePrecommit(t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash) + cs.mtx.Unlock() +} + +func simpleConsensusState(nValidators int) (*ConsensusState, []*validatorStub) { + // Get State + state, privVals := randGenesisState(nValidators, false, 10) + + // fmt.Println(state.Validators) + + vss := make([]*validatorStub, nValidators) + + // make consensus state for lead validator + + // Get BlockStore + blockDB := dbm.NewMemDB() + blockStore := bc.NewBlockStore(blockDB) + + // one for mempool, one for consensus + app := example.NewCounterApplication() + appCMem := app.Open() + appCCon := app.Open() + proxyAppCtxMem := proxy.NewLocalAppContext(appCMem) + proxyAppCtxCon := proxy.NewLocalAppContext(appCCon) + + // Make Mempool + mempool := mempl.NewMempool(proxyAppCtxMem) + + // Make ConsensusReactor + cs := NewConsensusState(state, proxyAppCtxCon, blockStore, mempool) + cs.SetPrivValidator(privVals[0]) + + evsw := events.NewEventSwitch() + cs.SetFireable(evsw) + + // read off the NewHeightStep + <-cs.NewStepCh() + + for i := 0; i < nValidators; i++ { + vss[i] = NewValidatorStub(privVals[i]) + } + // since cs1 starts at 1 + incrementHeight(vss[1:]...) + + return cs, vss +} + +func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidator) { + db := dbm.NewMemDB() + genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower) + s0 := sm.MakeGenesisState(db, genDoc) + s0.Save() + return s0, privValidators +} + +func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidator) { + validators := make([]types.GenesisValidator, numValidators) + privValidators := make([]*types.PrivValidator, numValidators) + for i := 0; i < numValidators; i++ { + val, privVal := types.RandValidator(randPower, minPower) + validators[i] = types.GenesisValidator{ + PubKey: val.PubKey, + Amount: val.VotingPower, + } + privValidators[i] = privVal + } + sort.Sort(types.PrivValidatorsByAddress(privValidators)) + return &types.GenesisDoc{ + GenesisTime: time.Now(), + ChainID: config.GetString("chain_id"), + Validators: validators, + }, privValidators + +} diff --git a/consensus/state.go b/consensus/state.go index 6a46a4e5e..867f019d0 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -12,6 +12,7 @@ import ( bc "github.com/tendermint/tendermint/blockchain" "github.com/tendermint/tendermint/events" mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -153,10 +154,11 @@ func (rs *RoundState) StringShort() string { type ConsensusState struct { BaseService - blockStore *bc.BlockStore - mempoolReactor *mempl.MempoolReactor - privValidator *types.PrivValidator - newStepCh chan *RoundState + proxyAppCtx proxy.AppContext + blockStore *bc.BlockStore + mempool *mempl.Mempool + privValidator *types.PrivValidator + newStepCh chan *RoundState mtx sync.Mutex RoundState @@ -166,16 +168,14 @@ type ConsensusState struct { evsw events.Fireable evc *events.EventCache // set in stageBlock and passed into state - - decideProposalFunc func(cs *ConsensusState, height int, round int) } -func NewConsensusState(state *sm.State, blockStore *bc.BlockStore, mempoolReactor *mempl.MempoolReactor) *ConsensusState { +func NewConsensusState(state *sm.State, proxyAppCtx proxy.AppContext, blockStore *bc.BlockStore, mempool *mempl.Mempool) *ConsensusState { cs := &ConsensusState{ - blockStore: blockStore, - mempoolReactor: mempoolReactor, - newStepCh: make(chan *RoundState, 10), - decideProposalFunc: decideProposal, + proxyAppCtx: proxyAppCtx, + blockStore: blockStore, + mempool: mempool, + newStepCh: make(chan *RoundState, 10), } cs.updateToState(state) // Don't call scheduleRound0 yet. @@ -185,10 +185,6 @@ func NewConsensusState(state *sm.State, blockStore *bc.BlockStore, mempoolReacto return cs } -func (cs *ConsensusState) SetDecideProposalFunc(f func(cs *ConsensusState, height int, round int)) { - cs.decideProposalFunc = f -} - // Reconstruct LastCommit from SeenValidation, which we saved along with the block, // (which happens even before saving the state) func (cs *ConsensusState) reconstructLastCommit(state *sm.State) { @@ -426,11 +422,6 @@ func (cs *ConsensusState) EnterPropose(height int, round int) { } func (cs *ConsensusState) decideProposal(height, round int) { - cs.decideProposalFunc(cs, height, round) -} - -// Decides on the next proposal and sets them onto cs.Proposal* -func decideProposal(cs *ConsensusState, height, round int) { var block *types.Block var blockParts *types.PartSet @@ -441,6 +432,9 @@ func decideProposal(cs *ConsensusState, height, round int) { } else { // Create a new proposal block from state/txs from the mempool. block, blockParts = cs.createProposalBlock() + if block == nil { // on error + return + } } // Make proposal @@ -476,6 +470,7 @@ func (cs *ConsensusState) isProposalComplete() bool { } // Create the next block to propose and return it. +// Returns nil block upon error. // NOTE: keep it side-effect free for clarity. func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts *types.PartSet) { var validation *types.Validation @@ -491,7 +486,14 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts log.Error("EnterPropose: Cannot propose anything: No validation for the previous block.") return } - txs := cs.mempoolReactor.Mempool.GetProposalTxs() + + // Mempool run transactions and the resulting hash + txs, hash, err := cs.mempool.Reap() + if err != nil { + log.Warn("createProposalBlock: Error getting proposal txs", "error", err) + return nil, nil + } + block = &types.Block{ Header: &types.Header{ ChainID: cs.state.ChainID, @@ -501,7 +503,8 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts NumTxs: len(txs), LastBlockHash: cs.state.LastBlockHash, LastBlockParts: cs.state.LastBlockParts, - StateHash: nil, // Will set afterwards. + ValidatorsHash: cs.state.Validators.Hash(), + AppHash: hash, }, LastValidation: validation, Data: &types.Data{ @@ -509,15 +512,8 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts }, } block.FillHeader() - - // Set the block.Header.StateHash. - err := cs.state.ComputeBlockStateHash(block) - if err != nil { - log.Error("EnterPropose: Error setting state hash", "error", err) - return - } - blockParts = block.MakePartSet() + return block, blockParts } @@ -940,13 +936,13 @@ func (cs *ConsensusState) TryAddVote(valIndex int, vote *types.Vote, peerKey str return added, err } else if _, ok := err.(*types.ErrVoteConflictingSignature); ok { log.Warn("Found conflicting vote. Publish evidence") - /* XXX + /* TODO evidenceTx := &types.DupeoutTx{ Address: address, VoteA: *errDupe.VoteA, VoteB: *errDupe.VoteB, } - cs.mempoolReactor.BroadcastTx(evidenceTx) // shouldn't need to check returned err + cs.mempool.BroadcastTx(evidenceTx) // shouldn't need to check returned err */ return added, err } else { @@ -998,7 +994,7 @@ func (cs *ConsensusState) addVote(valIndex int, vote *types.Vote, peerKey string switch vote.Type { case types.VoteTypePrevote: prevotes := cs.Votes.Prevotes(vote.Round) - log.Info(Fmt("Added to prevotes: %v", prevotes.StringShort())) + log.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort()) // First, unlock if prevotes is a valid POL. // >> lockRound < POLRound <= unlockOrChangeLockRound (see spec) // NOTE: If (lockRound < POLRound) but !(POLRound <= unlockOrChangeLockRound), @@ -1033,7 +1029,7 @@ func (cs *ConsensusState) addVote(valIndex int, vote *types.Vote, peerKey string } case types.VoteTypePrecommit: precommits := cs.Votes.Precommits(vote.Round) - log.Info(Fmt("Added to precommit: %v", precommits.StringShort())) + log.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort()) hash, _, ok := precommits.TwoThirdsMajority() if ok { go func() { @@ -1078,22 +1074,26 @@ func (cs *ConsensusState) stageBlock(block *types.Block, blockParts *types.PartS return nil } + // Create a new event cache to cache all events. + cs.evc = events.NewEventCache(cs.evsw) + // Create a copy of the state for staging stateCopy := cs.state.Copy() - // reset the event cache and pass it into the state - cs.evc = events.NewEventCache(cs.evsw) stateCopy.SetFireable(cs.evc) - // Commit block onto the copied state. - // NOTE: Basic validation is done in state.AppendBlock(). - err := sm.ExecBlock(stateCopy, block, blockParts.Header()) + // Run the block on the State: + // + update validator sets + // + first rolls back proxyAppCtx + // + run txs on the proxyAppCtx or rollback + err := stateCopy.ExecBlock(cs.proxyAppCtx, block, blockParts.Header()) if err != nil { return err - } else { - cs.stagedBlock = block - cs.stagedState = stateCopy - return nil } + + // Everything looks good! + cs.stagedBlock = block + cs.stagedState = stateCopy + return nil } func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) { @@ -1139,11 +1139,18 @@ func (cs *ConsensusState) saveBlock(block *types.Block, blockParts *types.PartSe cs.blockStore.SaveBlock(block, blockParts, seenValidation) } + // Commit to proxyAppCtx + err := cs.stagedState.Commit(cs.proxyAppCtx) + if err != nil { + // TODO: handle this gracefully. + PanicQ(Fmt("Commit failed for applicaiton")) + } + // Save the state. cs.stagedState.Save() // Update mempool. - cs.mempoolReactor.ResetForBlockAndState(block, cs.stagedState) + cs.mempool.Update(block) // Fire off event if cs.evsw != nil && cs.evc != nil { @@ -1161,3 +1168,22 @@ func (cs *ConsensusState) SetFireable(evsw events.Fireable) { func (cs *ConsensusState) String() string { return Fmt("ConsensusState(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step) } + +func CompareHRS(h1, r1 int, s1 RoundStepType, h2, r2 int, s2 RoundStepType) int { + if h1 < h2 { + return -1 + } else if h1 > h2 { + return 1 + } + if r1 < r2 { + return -1 + } else if r1 > r2 { + return 1 + } + if s1 < s2 { + return -1 + } else if s1 > s2 { + return 1 + } + return 0 +} diff --git a/consensus/state_test.go b/consensus/state_test.go index 5adc940e7..ca12cbf1b 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -7,6 +7,7 @@ import ( "time" _ "github.com/tendermint/tendermint/config/tendermint_test" + //"github.com/tendermint/tendermint/events" "github.com/tendermint/tendermint/events" "github.com/tendermint/tendermint/types" ) @@ -14,7 +15,8 @@ import ( /* ProposeSuite -x * TestProposerSelection - round robin ordering +x * TestProposerSelection0 - round robin ordering, round 0 +x * TestProposerSelection2 - round robin ordering, round 2++ x * TestEnterProposeNoValidator - timeout into prevote round x * TestEnterPropose - finish propose without timing out (we have the proposal) x * TestBadProposal - 2 vals, bad proposal (bad block state hash), should prevote and precommit nil @@ -48,154 +50,131 @@ func init() { timeoutPropose = 500 * time.Millisecond } -func TestProposerSelection(t *testing.T) { - css, _ := simpleConsensusState(3) // test needs more work for more than 3 validators - cs1 := css[0] +func TestProposerSelection0(t *testing.T) { + cs1, vss := simpleConsensusState(3) // test needs more work for more than 3 validators cs1.newStepCh = make(chan *RoundState) // so it blocks + height, round := cs1.Height, cs1.Round - cs1.SetDecideProposalFunc(nilProposal) + cs1.EnterNewRound(height, round, false) - cs1.EnterNewRound(cs1.Height, 0, false) - - // everyone just votes nil. we get a new proposer each round - for i := 0; i < len(css); i++ { - if i == len(css)-1 { - // reset cs1's decideProposal function for later - cs1.SetDecideProposalFunc(decideProposal) - } - prop := cs1.Validators.Proposer() - if !bytes.Equal(prop.Address, css[i].privValidator.Address) { - t.Fatalf("expected proposer to be validator %d. Got %X", i, prop.Address) - } - nilRound(t, 0, cs1, css[1:]...) - incrementRound(css[1:]...) - - } - - // now we should be back at first validator. // lets commit a block and ensure proposer for the next height is correct - height := cs1.Height prop := cs1.Validators.Proposer() if !bytes.Equal(prop.Address, cs1.privValidator.Address) { t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address) } - signAddVoteToFromMany(types.VoteTypePrevote, cs1, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), css[1:]...) - <-cs1.NewStepCh() // prevotes - signAddVoteToFromMany(types.VoteTypePrecommit, cs1, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), css[1:]...) - <-cs1.NewStepCh() // - <-cs1.NewStepCh() // go to next round - if cs1.Height != height+1 { - t.Fatal("Expected height to increment. Got", cs1.Height) - } + + waitFor(t, cs1, height, round, RoundStepPrevote) + + signAddVoteToFromMany(types.VoteTypePrevote, cs1, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vss[1:]...) + + waitFor(t, cs1, height, round, RoundStepPrecommit) + + signAddVoteToFromMany(types.VoteTypePrecommit, cs1, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vss[1:]...) + + waitFor(t, cs1, height, round, RoundStepPrecommit) + waitFor(t, cs1, height, round+1, RoundStepPropose) prop = cs1.Validators.Proposer() - if !bytes.Equal(prop.Address, css[1].privValidator.Address) { + if !bytes.Equal(prop.Address, vss[1].Address) { t.Fatalf("expected proposer to be validator %d. Got %X", 1, prop.Address) } +} - // Now let's do it all again, but starting from round 2 instead of 0 - - css, _ = simpleConsensusState(3) // test needs more work for more than 3 validators - cs1 = css[0] +// Now let's do it all again, but starting from round 2 instead of 0 +func TestProposerSelection2(t *testing.T) { + cs1, vss := simpleConsensusState(3) // test needs more work for more than 3 validators cs1.newStepCh = make(chan *RoundState) // so it blocks - cs1.SetDecideProposalFunc(nilProposal) - // this time we jump in at round 2 - incrementRound(css[1:]...) - incrementRound(css[1:]...) + incrementRound(vss[1:]...) + incrementRound(vss[1:]...) cs1.EnterNewRound(cs1.Height, 2, false) // everyone just votes nil. we get a new proposer each round - for i := 0; i < len(css); i++ { + for i := 0; i < len(vss); i++ { prop := cs1.Validators.Proposer() - if !bytes.Equal(prop.Address, css[(i+2)%len(css)].privValidator.Address) { - t.Fatalf("expected proposer to be validator %d. Got %X", (i+2)%len(css), prop.Address) + if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].Address) { + t.Fatalf("expected proposer to be validator %d. Got %X", (i+2)%len(vss), prop.Address) } - nilRound(t, 2, cs1, css[1:]...) - incrementRound(css[1:]...) + nilRound(t, 2, cs1, vss[1:]...) + incrementRound(vss[1:]...) } } // a non-validator should timeout into the prevote round func TestEnterProposeNoPrivValidator(t *testing.T) { - css, _ := simpleConsensusState(1) - cs := css[0] + cs, _ := simpleConsensusState(1) cs.SetPrivValidator(nil) + height, round := cs.Height, cs.Round - timeoutChan := make(chan struct{}) + // Listen for propose timeout event + timeoutEventReceived := false evsw := events.NewEventSwitch() evsw.OnStart() evsw.AddListenerForEvent("tester", types.EventStringTimeoutPropose(), func(data types.EventData) { - timeoutChan <- struct{}{} + timeoutEventReceived = true }) cs.SetFireable(evsw) // starts a go routine for EnterPropose - cs.EnterNewRound(cs.Height, 0, false) + cs.EnterNewRound(height, round, false) - // go to prevote - <-cs.NewStepCh() + // Wait until the prevote step + waitFor(t, cs, height, round, RoundStepPrevote) // if we're not a validator, EnterPropose should timeout - select { - case rs := <-cs.NewStepCh(): - log.Info(rs.String()) + if timeoutEventReceived == false { t.Fatal("Expected EnterPropose to timeout") - case <-timeoutChan: - rs := cs.GetRoundState() - if rs.Proposal != nil { - t.Error("Expected to make no proposal, since no privValidator") - } - break + } + if cs.GetRoundState().Proposal != nil { + t.Error("Expected to make no proposal, since no privValidator") } } // a validator should not timeout of the prevote round (TODO: unless the block is really big!) func TestEnterPropose(t *testing.T) { - css, _ := simpleConsensusState(1) - cs := css[0] + cs, _ := simpleConsensusState(1) + height, round := cs.Height, cs.Round - timeoutChan := make(chan struct{}) + // Listen for propose timeout event + timeoutEventReceived := false evsw := events.NewEventSwitch() evsw.OnStart() evsw.AddListenerForEvent("tester", types.EventStringTimeoutPropose(), func(data types.EventData) { - timeoutChan <- struct{}{} + timeoutEventReceived = true }) cs.SetFireable(evsw) // starts a go routine for EnterPropose - cs.EnterNewRound(cs.Height, 0, false) + cs.EnterNewRound(height, round, false) - // go to prevote - <-cs.NewStepCh() + // Wait until the prevote step + waitFor(t, cs, height, round, RoundStepPrevote) - // if we are a validator, we expect it not to timeout - select { - case <-cs.NewStepCh(): - rs := cs.GetRoundState() + // Check that Proposal, ProposalBlock, ProposalBlockParts are set. + rs := cs.GetRoundState() + if rs.Proposal == nil { + t.Error("rs.Proposal should be set") + } + if rs.ProposalBlock == nil { + t.Error("rs.ProposalBlock should be set") + } + if rs.ProposalBlockParts.Total() == 0 { + t.Error("rs.ProposalBlockParts should be set") + } - // Check that Proposal, ProposalBlock, ProposalBlockParts are set. - if rs.Proposal == nil { - t.Error("rs.Proposal should be set") - } - if rs.ProposalBlock == nil { - t.Error("rs.ProposalBlock should be set") - } - if rs.ProposalBlockParts.Total() == 0 { - t.Error("rs.ProposalBlockParts should be set") - } - break - case <-timeoutChan: + // if we're not a validator, EnterPropose should timeout + if timeoutEventReceived == true { t.Fatal("Expected EnterPropose not to timeout") } } func TestBadProposal(t *testing.T) { - css, privVals := simpleConsensusState(2) - cs1, cs2 := css[0], css[1] + cs1, vss := simpleConsensusState(2) cs1.newStepCh = make(chan *RoundState) // so it blocks + height, round := cs1.Height, cs1.Round + cs2 := vss[1] timeoutChan := make(chan struct{}) evsw := events.NewEventSwitch() @@ -212,36 +191,41 @@ func TestBadProposal(t *testing.T) { propBlock := changeProposer(t, cs1, cs2) // make the block bad by tampering with statehash - stateHash := propBlock.StateHash + stateHash := propBlock.AppHash stateHash[0] = byte((stateHash[0] + 1) % 255) - propBlock.StateHash = stateHash + propBlock.AppHash = stateHash propBlockParts := propBlock.MakePartSet() - proposal := types.NewProposal(cs2.Height, cs2.Round, propBlockParts.Header(), cs2.Votes.POLRound()) - if err := cs2.privValidator.SignProposal(cs2.state.ChainID, proposal); err != nil { + proposal := types.NewProposal(cs2.Height, cs2.Round, propBlockParts.Header(), -1) + if err := cs2.SignProposal(chainID, proposal); err != nil { t.Fatal("failed to sign bad proposal", err) } // start round - cs1.EnterNewRound(cs1.Height, 0, false) + cs1.EnterNewRound(height, round, false) // now we're on a new round and not the proposer - <-cs1.NewStepCh() + waitFor(t, cs1, height, round, RoundStepPropose) // so set the proposal block (and fix voting power) cs1.mtx.Lock() cs1.Proposal, cs1.ProposalBlock, cs1.ProposalBlockParts = proposal, propBlock, propBlockParts - fixVotingPower(t, cs1, privVals[1].Address) + fixVotingPower(t, cs1, vss[1].Address) cs1.mtx.Unlock() // and wait for timeout <-timeoutChan // go to prevote, prevote for nil (proposal is bad) - <-cs1.NewStepCh() - validatePrevote(t, cs1, 0, privVals[0], nil) + waitFor(t, cs1, height, round, RoundStepPrevote) + + validatePrevote(t, cs1, round, vss[0], nil) // add bad prevote from cs2. we should precommit nil signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header()) - _, _, _ = <-cs1.NewStepCh(), <-timeoutChan, <-cs1.NewStepCh() - validatePrecommit(t, cs1, 0, 0, privVals[0], nil, nil) + + waitFor(t, cs1, height, round, RoundStepPrevoteWait) + <-timeoutChan + waitFor(t, cs1, height, round, RoundStepPrecommit) + + validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header()) } @@ -250,13 +234,14 @@ func TestBadProposal(t *testing.T) { // propose, prevote, and precommit a block func TestFullRound1(t *testing.T) { - css, privVals := simpleConsensusState(1) - cs := css[0] + cs, vss := simpleConsensusState(1) + height, round := cs.Height, cs.Round // starts a go routine for EnterPropose - cs.EnterNewRound(cs.Height, 0, false) + cs.EnterNewRound(height, round, false) + // wait to finish propose and prevote - _, _ = <-cs.NewStepCh(), <-cs.NewStepCh() + waitFor(t, cs, height, round, RoundStepPrevote) // we should now be in precommit // verify our prevote is there @@ -264,42 +249,39 @@ func TestFullRound1(t *testing.T) { propBlockHash := cs.ProposalBlock.Hash() cs.mtx.Unlock() + // Wait until Precommit + waitFor(t, cs, height, round, RoundStepPrecommit) + // the proposed block should be prevoted, precommitted, and locked - validatePrevoteAndPrecommit(t, cs, 0, 0, privVals[0], propBlockHash, propBlockHash, nil) + validatePrevoteAndPrecommit(t, cs, round, round, vss[0], propBlockHash, propBlockHash) } // nil is proposed, so prevote and precommit nil func TestFullRoundNil(t *testing.T) { - css, privVals := simpleConsensusState(1) - cs := css[0] - cs.newStepCh = make(chan *RoundState) // so it blocks - - cs.SetDecideProposalFunc(nilProposal) + cs, vss := simpleConsensusState(1) + height, round := cs.Height, cs.Round - // starts a go routine for EnterPropose - cs.EnterNewRound(cs.Height, 0, false) + // Skip the propose step + cs.EnterPrevote(height, round, true) - // wait to finish propose (we should time out) - <-cs.NewStepCh() - - // wait to finish prevote - <-cs.NewStepCh() + // Wait until Precommit + waitFor(t, cs, height, round, RoundStepPrecommit) // should prevote and precommit nil - validatePrevoteAndPrecommit(t, cs, 0, 0, privVals[0], nil, nil, nil) + validatePrevoteAndPrecommit(t, cs, round, 0, vss[0], nil, nil) } // run through propose, prevote, precommit commit with two validators // where the first validator has to wait for votes from the second func TestFullRound2(t *testing.T) { - css, privVals := simpleConsensusState(2) - cs1, cs2 := css[0], css[1] + cs1, vss := simpleConsensusState(2) + cs2 := vss[1] cs1.newStepCh = make(chan *RoundState) // so it blocks - cs2.newStepCh = make(chan *RoundState) // so it blocks + height, round := cs1.Height, cs1.Round // start round and wait for propose and prevote - cs1.EnterNewRound(cs1.Height, 0, false) - _, _ = <-cs1.NewStepCh(), <-cs1.NewStepCh() + cs1.EnterNewRound(height, round, false) + waitFor(t, cs1, height, round, RoundStepPrevote) // we should now be stuck in limbo forever, waiting for more prevotes ensureNoNewStep(t, cs1) @@ -310,10 +292,10 @@ func TestFullRound2(t *testing.T) { signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, propBlockHash, propPartsHeader) // wait to finish precommit - <-cs1.NewStepCh() + waitFor(t, cs1, cs1.Height, 0, RoundStepPrecommit) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, privVals[0], propBlockHash, propBlockHash) + validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) // we should now be stuck in limbo forever, waiting for more precommits ensureNoNewStep(t, cs1) @@ -322,10 +304,7 @@ func TestFullRound2(t *testing.T) { signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, propBlockHash, propPartsHeader) // wait to finish commit, propose in next height - _, rs := <-cs1.NewStepCh(), <-cs1.NewStepCh() - if rs.Height != 2 { - t.Fatal("Expected height to increment") - } + waitFor(t, cs1, height+1, 0, RoundStepNewHeight) } //------------------------------------------------------------------------------------------ @@ -335,9 +314,10 @@ func TestFullRound2(t *testing.T) { // val1 proposes the first 2 rounds, and is locked in the first. // val2 proposes the next two. val1 should precommit nil on all (except first where he locks) func TestLockNoPOL(t *testing.T) { - css, privVals := simpleConsensusState(2) - cs1, cs2 := css[0], css[1] + cs1, vss := simpleConsensusState(2) + cs2 := vss[1] cs1.newStepCh = make(chan *RoundState) // so it blocks + height := cs1.Height timeoutChan := make(chan struct{}) evsw := events.NewEventSwitch() @@ -355,8 +335,8 @@ func TestLockNoPOL(t *testing.T) { */ // start round and wait for propose and prevote - cs1.EnterNewRound(cs1.Height, 0, false) - _, _ = <-cs1.NewStepCh(), <-cs1.NewStepCh() + cs1.EnterNewRound(height, 0, false) + waitFor(t, cs1, height, 0, RoundStepPrevote) // we should now be stuck in limbo forever, waiting for more prevotes // prevote arrives from cs2: @@ -367,10 +347,10 @@ func TestLockNoPOL(t *testing.T) { cs1.mtx.Unlock() // wait to finish precommit - <-cs1.NewStepCh() + waitFor(t, cs1, height, 0, RoundStepPrecommit) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, privVals[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) // we should now be stuck in limbo forever, waiting for more precommits // lets add one for a different block @@ -381,7 +361,8 @@ func TestLockNoPOL(t *testing.T) { // (note we're entering precommit for a second time this round) // but with invalid args. then we EnterPrecommitWait, and the timeout to new round - _, _ = <-cs1.NewStepCh(), <-timeoutChan + waitFor(t, cs1, height, 0, RoundStepPrecommitWait) + <-timeoutChan log.Info("#### ONTO ROUND 2") /* @@ -391,27 +372,31 @@ func TestLockNoPOL(t *testing.T) { incrementRound(cs2) // now we're on a new round and not the proposer, so wait for timeout - _, _ = <-cs1.NewStepCh(), <-timeoutChan + waitFor(t, cs1, height, 1, RoundStepPropose) + <-timeoutChan + if cs1.ProposalBlock != nil { t.Fatal("Expected proposal block to be nil") } // wait to finish prevote - <-cs1.NewStepCh() + waitFor(t, cs1, height, 1, RoundStepPrevote) // we should have prevoted our locked block - validatePrevote(t, cs1, 1, privVals[0], cs1.LockedBlock.Hash()) + validatePrevote(t, cs1, 1, vss[0], cs1.LockedBlock.Hash()) // add a conflicting prevote from the other validator signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, hash, cs1.ProposalBlockParts.Header()) // now we're going to enter prevote again, but with invalid args // and then prevote wait, which should timeout. then wait for precommit - _, _, _ = <-cs1.NewStepCh(), <-timeoutChan, <-cs1.NewStepCh() + waitFor(t, cs1, height, 1, RoundStepPrevoteWait) + <-timeoutChan + waitFor(t, cs1, height, 1, RoundStepPrecommit) // the proposed block should still be locked and our precommit added // we should precommit nil and be locked on the proposal - validatePrecommit(t, cs1, 1, 0, privVals[0], nil, theBlockHash) + validatePrecommit(t, cs1, 1, 0, vss[0], nil, theBlockHash) // add conflicting precommit from cs2 // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round @@ -419,7 +404,8 @@ func TestLockNoPOL(t *testing.T) { // (note we're entering precommit for a second time this round, but with invalid args // then we EnterPrecommitWait and timeout into NewRound - _, _ = <-cs1.NewStepCh(), <-timeoutChan + waitFor(t, cs1, height, 1, RoundStepPrecommitWait) + <-timeoutChan log.Info("#### ONTO ROUND 3") /* @@ -428,7 +414,7 @@ func TestLockNoPOL(t *testing.T) { incrementRound(cs2) - <-cs1.newStepCh + waitFor(t, cs1, height, 2, RoundStepPropose) // now we're on a new round and are the proposer if cs1.ProposalBlock != cs1.LockedBlock { @@ -436,20 +422,24 @@ func TestLockNoPOL(t *testing.T) { } // go to prevote, prevote for locked block - <-cs1.NewStepCh() - validatePrevote(t, cs1, 0, privVals[0], cs1.LockedBlock.Hash()) + waitFor(t, cs1, height, 2, RoundStepPrevote) + + validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash()) // TODO: quick fastforward to new round, set proposer signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, hash, cs1.ProposalBlockParts.Header()) - _, _, _ = <-cs1.NewStepCh(), <-timeoutChan, <-cs1.NewStepCh() - validatePrecommit(t, cs1, 2, 0, privVals[0], nil, theBlockHash) // precommit nil but be locked on proposal + + waitFor(t, cs1, height, 2, RoundStepPrevoteWait) + <-timeoutChan + waitFor(t, cs1, height, 2, RoundStepPrecommit) + + validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, cs1.ProposalBlockParts.Header()) // NOTE: conflicting precommits at same height - <-cs1.NewStepCh() + waitFor(t, cs1, height, 2, RoundStepPrecommitWait) // before we time out into new round, set next proposal block - cs2.decideProposal(cs2.Height, cs2.Round+1) - prop, propBlock := cs2.Proposal, cs2.ProposalBlock + prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1) if prop == nil || propBlock == nil { t.Fatal("Failed to create proposal block with cs2") } @@ -464,27 +454,34 @@ func TestLockNoPOL(t *testing.T) { */ // now we're on a new round and not the proposer - <-cs1.NewStepCh() + waitFor(t, cs1, height, 3, RoundStepPropose) + // so set the proposal block cs1.mtx.Lock() cs1.Proposal, cs1.ProposalBlock = prop, propBlock cs1.mtx.Unlock() + // and wait for timeout <-timeoutChan // go to prevote, prevote for locked block (not proposal) - <-cs1.NewStepCh() - validatePrevote(t, cs1, 0, privVals[0], cs1.LockedBlock.Hash()) + waitFor(t, cs1, height, 3, RoundStepPrevote) + + validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash()) signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header()) - _, _, _ = <-cs1.NewStepCh(), <-timeoutChan, <-cs1.NewStepCh() - validatePrecommit(t, cs1, 2, 0, privVals[0], nil, theBlockHash) // precommit nil but locked on proposal + + waitFor(t, cs1, height, 3, RoundStepPrevoteWait) + <-timeoutChan + waitFor(t, cs1, height, 3, RoundStepPrecommit) + + validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header()) // NOTE: conflicting precommits at same height } // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka func TestLockPOLRelock(t *testing.T) { - css, privVals := simpleConsensusState(4) - cs1, cs2, cs3, cs4 := css[0], css[1], css[2], css[3] + cs1, vss := simpleConsensusState(4) + cs2, cs3, cs4 := vss[1], vss[2], vss[3] cs1.newStepCh = make(chan *RoundState) // so it blocks timeoutChan := make(chan *types.EventDataRoundState) @@ -534,7 +531,7 @@ func TestLockPOLRelock(t *testing.T) { <-donePrecommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, privVals[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) donePrecommitWait := make(chan struct{}) go func() { @@ -550,17 +547,13 @@ func TestLockPOLRelock(t *testing.T) { // before we time out into new round, set next proposer // and next proposal block - _, v1 := cs1.Validators.GetByAddress(privVals[0].Address) + _, v1 := cs1.Validators.GetByAddress(vss[0].Address) v1.VotingPower = 1 if updated := cs1.Validators.Update(v1); !updated { t.Fatal("failed to update validator") } - cs2.decideProposal(cs2.Height, cs2.Round+1) - prop, propBlock := cs2.Proposal, cs2.ProposalBlock - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with cs2") - } + prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1) incrementRound(cs2, cs3, cs4) @@ -592,7 +585,7 @@ func TestLockPOLRelock(t *testing.T) { } // go to prevote, prevote for locked block (not proposal), move on _, _ = <-voteChan, <-cs1.NewStepCh() - validatePrevote(t, cs1, 0, privVals[0], theBlockHash) + validatePrevote(t, cs1, 0, vss[0], theBlockHash) donePrecommit = make(chan struct{}) go func() { @@ -610,7 +603,7 @@ func TestLockPOLRelock(t *testing.T) { <-donePrecommit // we should have unlocked and locked on the new block - validatePrecommit(t, cs1, 1, 1, privVals[0], propBlockHash, propBlockHash) + validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) donePrecommitWait = make(chan struct{}) go func() { @@ -635,8 +628,8 @@ func TestLockPOLRelock(t *testing.T) { // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka func TestLockPOLUnlock(t *testing.T) { - css, privVals := simpleConsensusState(4) - cs1, cs2, cs3, cs4 := css[0], css[1], css[2], css[3] + cs1, vss := simpleConsensusState(4) + cs2, cs3, cs4 := vss[1], vss[2], vss[3] cs1.newStepCh = make(chan *RoundState) // so it blocks timeoutChan := make(chan *types.EventDataRoundState) @@ -682,7 +675,7 @@ func TestLockPOLUnlock(t *testing.T) { <-donePrecommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, privVals[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) donePrecommitWait := make(chan struct{}) go func() { @@ -696,17 +689,13 @@ func TestLockPOLUnlock(t *testing.T) { // before we time out into new round, set next proposer // and next proposal block - _, v1 := cs1.Validators.GetByAddress(privVals[0].Address) + _, v1 := cs1.Validators.GetByAddress(vss[0].Address) v1.VotingPower = 1 if updated := cs1.Validators.Update(v1); !updated { t.Fatal("failed to update validator") } - cs2.decideProposal(cs2.Height, cs2.Round+1) - prop, propBlock := cs2.Proposal, cs2.ProposalBlock - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with cs2") - } + prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1) incrementRound(cs2, cs3, cs4) @@ -732,7 +721,7 @@ func TestLockPOLUnlock(t *testing.T) { // go to prevote, prevote for locked block (not proposal) _, _ = <-voteChan, <-cs1.NewStepCh() - validatePrevote(t, cs1, 0, privVals[0], lockedBlockHash) + validatePrevote(t, cs1, 0, vss[0], lockedBlockHash) donePrecommit = make(chan struct{}) go func() { @@ -749,7 +738,7 @@ func TestLockPOLUnlock(t *testing.T) { // we should have unlocked // NOTE: we don't lock on nil, so LockedRound is still 0 - validatePrecommit(t, cs1, 1, 0, privVals[0], nil, nil) + validatePrecommit(t, cs1, 1, 0, vss[0], nil, nil) donePrecommitWait = make(chan struct{}) go func() { @@ -767,8 +756,8 @@ func TestLockPOLUnlock(t *testing.T) { // then a polka at round 2 that we lock on // then we see the polka from round 1 but shouldn't unlock func TestLockPOLSafety1(t *testing.T) { - css, privVals := simpleConsensusState(4) - cs1, cs2, cs3, cs4 := css[0], css[1], css[2], css[3] + cs1, vss := simpleConsensusState(4) + cs2, cs3, cs4 := vss[1], vss[2], vss[3] cs1.newStepCh = make(chan *RoundState) // so it blocks timeoutChan := make(chan *types.EventDataRoundState) @@ -796,14 +785,14 @@ func TestLockPOLSafety1(t *testing.T) { propBlock := cs1.ProposalBlock - validatePrevote(t, cs1, 0, privVals[0], cs1.ProposalBlock.Hash()) + validatePrevote(t, cs1, 0, vss[0], cs1.ProposalBlock.Hash()) // the others sign a polka but we don't see it prevotes := signVoteMany(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet().Header(), cs2, cs3, cs4) // before we time out into new round, set next proposer // and next proposal block - _, v1 := cs1.Validators.GetByAddress(privVals[0].Address) + _, v1 := cs1.Validators.GetByAddress(vss[0].Address) v1.VotingPower = 1 if updated := cs1.Validators.Update(v1); !updated { t.Fatal("failed to update validator") @@ -814,11 +803,7 @@ func TestLockPOLSafety1(t *testing.T) { // we do see them precommit nil signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, cs2, cs3, cs4) - cs2.decideProposal(cs2.Height, cs2.Round+1) - prop, propBlock := cs2.Proposal, cs2.ProposalBlock - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with cs2") - } + prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1) incrementRound(cs2, cs3, cs4) @@ -843,7 +828,7 @@ func TestLockPOLSafety1(t *testing.T) { log.Warn("new prop", "hash", fmt.Sprintf("%X", propBlockHash)) // go to prevote, prevote for proposal block _, _ = <-voteChan, <-cs1.NewStepCh() - validatePrevote(t, cs1, 1, privVals[0], propBlockHash) + validatePrevote(t, cs1, 1, vss[0], propBlockHash) // now we see the others prevote for it, so we should lock on it donePrecommit := make(chan struct{}) @@ -861,7 +846,7 @@ func TestLockPOLSafety1(t *testing.T) { <-donePrecommit // we should have precommitted - validatePrecommit(t, cs1, 1, 1, privVals[0], propBlockHash, propBlockHash) + validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) // now we see precommits for nil donePrecommitWait := make(chan struct{}) @@ -889,7 +874,7 @@ func TestLockPOLSafety1(t *testing.T) { _, _ = <-voteChan, <-cs1.NewStepCh() // we should prevote what we're locked on - validatePrevote(t, cs1, 2, privVals[0], propBlockHash) + validatePrevote(t, cs1, 2, vss[0], propBlockHash) // add prevotes from the earlier round addVoteToFromMany(cs1, prevotes, cs2, cs3, cs4) @@ -904,8 +889,8 @@ func TestLockPOLSafety1(t *testing.T) { // we lock on P1 at R1, don't see P2, and unlock using P3 at R3 // then we should make sure we don't lock using P2 func TestLockPOLSafety2(t *testing.T) { - css, privVals := simpleConsensusState(4) - cs1, cs2, cs3, cs4 := css[0], css[1], css[2], css[3] + cs1, vss := simpleConsensusState(4) + cs2, cs3, cs4 := vss[1], vss[2], vss[3] cs1.newStepCh = make(chan *RoundState) // so it blocks timeoutChan := make(chan *types.EventDataRoundState) @@ -943,7 +928,7 @@ func TestLockPOLSafety2(t *testing.T) { <-donePrecommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, privVals[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) donePrecommitWait := make(chan struct{}) go func() { @@ -957,17 +942,13 @@ func TestLockPOLSafety2(t *testing.T) { // before we time out into new round, set next proposer // and next proposal block - _, v1 := cs1.Validators.GetByAddress(privVals[0].Address) + _, v1 := cs1.Validators.GetByAddress(vss[0].Address) v1.VotingPower = 1 if updated := cs1.Validators.Update(v1); !updated { t.Fatal("failed to update validator") } - cs2.decideProposal(cs2.Height, cs2.Round+1) - prop, propBlock := cs2.Proposal, cs2.ProposalBlock - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with cs2") - } + prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1) incrementRound(cs2, cs3, cs4) @@ -984,7 +965,7 @@ func TestLockPOLSafety2(t *testing.T) { _, _ = <-cs1.NewStepCh(), <-timeoutChan // go to prevote, prevote for locked block _, _ = <-voteChan, <-cs1.NewStepCh() - validatePrevote(t, cs1, 0, privVals[0], cs1.LockedBlock.Hash()) + validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash()) // the others sign a polka but we don't see it prevotes := signVoteMany(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet().Header(), cs2, cs3, cs4) @@ -1015,7 +996,7 @@ func TestLockPOLSafety2(t *testing.T) { // we should have unlocked // NOTE: we don't lock on nil, so LockedRound is still 0 - validatePrecommit(t, cs1, 2, 0, privVals[0], nil, nil) + validatePrecommit(t, cs1, 2, 0, vss[0], nil, nil) donePrecommitWait = make(chan struct{}) go func() { @@ -1048,7 +1029,7 @@ func TestLockPOLSafety2(t *testing.T) { log.Warn("Done adding prevotes!") // we should prevote it now - validatePrevote(t, cs1, 3, privVals[0], cs1.ProposalBlock.Hash()) + validatePrevote(t, cs1, 3, vss[0], cs1.ProposalBlock.Hash()) // but we shouldn't precommit it precommits := cs1.Votes.Precommits(3) @@ -1062,8 +1043,8 @@ func TestLockPOLSafety2(t *testing.T) { // SlashingSuite func TestSlashingPrevotes(t *testing.T) { - css, _ := simpleConsensusState(2) - cs1, cs2 := css[0], css[1] + cs1, vss := simpleConsensusState(2) + cs2 := vss[1] cs1.newStepCh = make(chan *RoundState) // so it blocks // start round and wait for propose and prevote @@ -1089,8 +1070,8 @@ func TestSlashingPrevotes(t *testing.T) { } func TestSlashingPrecommits(t *testing.T) { - css, _ := simpleConsensusState(2) - cs1, cs2 := css[0], css[1] + cs1, vss := simpleConsensusState(2) + cs2 := vss[1] cs1.newStepCh = make(chan *RoundState) // so it blocks // start round and wait for propose and prevote @@ -1130,8 +1111,8 @@ func TestSlashingPrecommits(t *testing.T) { // 4 vals. // we receive a final precommit after going into next round, but others might have gone to commit already! func TestHalt1(t *testing.T) { - css, privVals := simpleConsensusState(4) - cs1, cs2, cs3, cs4 := css[0], css[1], css[2], css[3] + cs1, vss := simpleConsensusState(4) + cs2, cs3, cs4 := vss[1], vss[2], vss[3] cs1.newStepCh = make(chan *RoundState) // so it blocks timeoutChan := make(chan struct{}) @@ -1157,7 +1138,7 @@ func TestHalt1(t *testing.T) { <-donePrecommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, privVals[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) donePrecommitWait := make(chan struct{}) go func() { @@ -1184,7 +1165,7 @@ func TestHalt1(t *testing.T) { // go to prevote, prevote for locked block _, _ = <-cs1.NewStepCh(), <-cs1.NewStepCh() - validatePrevote(t, cs1, 0, privVals[0], cs1.LockedBlock.Hash()) + validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash()) // now we receive the precommit from the previous round addVoteToFrom(cs1, cs4, precommit4) diff --git a/consensus/test.go b/consensus/test.go deleted file mode 100644 index 6f210aa81..000000000 --- a/consensus/test.go +++ /dev/null @@ -1,258 +0,0 @@ -package consensus - -import ( - "bytes" - "fmt" - "testing" - "time" - - dbm "github.com/tendermint/go-db" - "github.com/tendermint/go-p2p" - bc "github.com/tendermint/tendermint/blockchain" - "github.com/tendermint/tendermint/events" - mempl "github.com/tendermint/tendermint/mempool" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -//------------------------------------------------------------------------------- -// utils - -func nilProposal(cs *ConsensusState, height, round int) { - // Make proposal - proposal := types.NewProposal(height, round, types.PartSetHeader{}, -1) - err := cs.privValidator.SignProposal(cs.state.ChainID, proposal) - if err == nil { - log.Notice("Signed and set proposal", "height", height, "round", round, "proposal", proposal) - // Set fields - cs.Proposal = proposal - cs.ProposalBlock = nil - cs.ProposalBlockParts = nil - } else { - log.Warn("EnterPropose: Error signing proposal", "height", height, "round", round, "error", err) - } -} - -func nilRound(t *testing.T, startRound int, cs1 *ConsensusState, css ...*ConsensusState) { - round := cs1.Round - if round == startRound { - _, _ = <-cs1.NewStepCh(), <-cs1.NewStepCh() - } - signAddVoteToFromMany(types.VoteTypePrevote, cs1, nil, cs1.ProposalBlockParts.Header(), css...) - <-cs1.NewStepCh() // prevotes - signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, cs1.ProposalBlockParts.Header(), css...) - <-cs1.NewStepCh() // - <-cs1.NewStepCh() // go to next round - if cs1.Round != round+1 { - t.Fatal("Expected round to increment. Got", cs1.Round) - } -} - -func changeProposer(t *testing.T, perspectiveOf, newProposer *ConsensusState) *types.Block { - _, v1 := perspectiveOf.Validators.GetByAddress(perspectiveOf.privValidator.Address) - v1.Accum, v1.VotingPower = 0, 0 - if updated := perspectiveOf.Validators.Update(v1); !updated { - t.Fatal("failed to update validator") - } - _, v2 := perspectiveOf.Validators.GetByAddress(newProposer.privValidator.Address) - v2.Accum, v2.VotingPower = 100, 100 - if updated := perspectiveOf.Validators.Update(v2); !updated { - t.Fatal("failed to update validator") - } - - // make the proposal - propBlock, _ := newProposer.createProposalBlock() - if propBlock == nil { - t.Fatal("Failed to create proposal block with cs2") - } - return propBlock -} - -func fixVotingPower(t *testing.T, cs1 *ConsensusState, addr2 []byte) { - _, v1 := cs1.Validators.GetByAddress(cs1.privValidator.Address) - _, v2 := cs1.Validators.GetByAddress(addr2) - v1.Accum, v1.VotingPower = v2.Accum, v2.VotingPower - if updated := cs1.Validators.Update(v1); !updated { - t.Fatal("failed to update validator") - } -} - -func addVoteToFromMany(to *ConsensusState, votes []*types.Vote, froms ...*ConsensusState) { - if len(votes) != len(froms) { - panic("len(votes) and len(froms) must match") - } - for i, from := range froms { - addVoteToFrom(to, from, votes[i]) - } -} - -func addVoteToFrom(to, from *ConsensusState, vote *types.Vote) { - valIndex, _ := to.Validators.GetByAddress(from.privValidator.Address) - added, err := to.TryAddVote(valIndex, vote, "") - if _, ok := err.(*types.ErrVoteConflictingSignature); ok { - // let it fly - } else if !added { - panic(fmt.Sprintln("Failed to add vote. Err:", err)) - } else if err != nil { - panic(fmt.Sprintln("Failed to add vote:", err)) - } -} - -func signVote(from *ConsensusState, voteType byte, hash []byte, header types.PartSetHeader) *types.Vote { - vote, err := from.signVote(voteType, hash, header) - if err != nil { - panic(fmt.Sprintln("Failed to sign vote", err)) - } - return vote -} - -func signVoteMany(voteType byte, hash []byte, header types.PartSetHeader, css ...*ConsensusState) []*types.Vote { - votes := make([]*types.Vote, len(css)) - for i, cs := range css { - votes[i] = signVote(cs, voteType, hash, header) - } - return votes -} - -// add vote to one cs from another -func signAddVoteToFromMany(voteType byte, to *ConsensusState, hash []byte, header types.PartSetHeader, froms ...*ConsensusState) { - for _, from := range froms { - vote := signVote(from, voteType, hash, header) - addVoteToFrom(to, from, vote) - } -} - -func signAddVoteToFrom(voteType byte, to, from *ConsensusState, hash []byte, header types.PartSetHeader) *types.Vote { - vote := signVote(from, voteType, hash, header) - addVoteToFrom(to, from, vote) - return vote -} - -func ensureNoNewStep(t *testing.T, cs *ConsensusState) { - timeout := time.NewTicker(2 * time.Second) - select { - case <-timeout.C: - break - case <-cs.NewStepCh(): - panic("We should be stuck waiting for more votes, not moving to the next step") - } -} - -func ensureNewStep(t *testing.T, cs *ConsensusState) { - timeout := time.NewTicker(2 * time.Second) - select { - case <-timeout.C: - panic("We should have gone to the next step, not be stuck waiting") - case <-cs.NewStepCh(): - break - } -} - -func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *types.PrivValidator, blockHash []byte) { - prevotes := cs.Votes.Prevotes(round) - var vote *types.Vote - if vote = prevotes.GetByAddress(privVal.Address); vote == nil { - panic("Failed to find prevote from validator") - } - if blockHash == nil { - if vote.BlockHash != nil { - panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockHash)) - } - } else { - if !bytes.Equal(vote.BlockHash, blockHash) { - panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockHash)) - } - } -} - -func incrementRound(css ...*ConsensusState) { - for _, cs := range css { - cs.Round += 1 - } -} - -func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *types.PrivValidator, votedBlockHash, lockedBlockHash []byte) { - precommits := cs.Votes.Precommits(thisRound) - var vote *types.Vote - if vote = precommits.GetByAddress(privVal.Address); vote == nil { - panic("Failed to find precommit from validator") - } - - if votedBlockHash == nil { - if vote.BlockHash != nil { - panic("Expected precommit to be for nil") - } - } else { - if !bytes.Equal(vote.BlockHash, votedBlockHash) { - panic("Expected precommit to be for proposal block") - } - } - - if lockedBlockHash == nil { - if cs.LockedRound != lockRound || cs.LockedBlock != nil { - panic(fmt.Sprintf("Expected to be locked on nil at round %d. Got locked at round %d with block %v", lockRound, cs.LockedRound, cs.LockedBlock)) - } - } else { - if cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash) { - panic(fmt.Sprintf("Expected block to be locked on round %d, got %d. Got locked block %X, expected %X", lockRound, cs.LockedRound, cs.LockedBlock.Hash(), lockedBlockHash)) - } - } - -} - -func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *types.PrivValidator, votedBlockHash, lockedBlockHash []byte, f func()) { - // verify the prevote - validatePrevote(t, cs, thisRound, privVal, votedBlockHash) - if f != nil { - f() - } - // wait to finish precommit - <-cs.NewStepCh() - // verify precommit - cs.mtx.Lock() - validatePrecommit(t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash) - cs.mtx.Unlock() -} - -func simpleConsensusState(nValidators int) ([]*ConsensusState, []*types.PrivValidator) { - // Get State - state, privVals := sm.RandGenesisState(nValidators, false, 10) - - fmt.Println(state.Validators) - - css := make([]*ConsensusState, nValidators) - for i := 0; i < nValidators; i++ { - // Get BlockStore - blockDB := dbm.NewMemDB() - blockStore := bc.NewBlockStore(blockDB) - - // Make MempoolReactor - mempool := mempl.NewMempool(state.Copy()) - mempoolReactor := mempl.NewMempoolReactor(mempool) - - mempoolReactor.SetSwitch(p2p.NewSwitch()) - - // Make ConsensusReactor - cs := NewConsensusState(state, blockStore, mempoolReactor) - cs.SetPrivValidator(privVals[i]) - - evsw := events.NewEventSwitch() - cs.SetFireable(evsw) - - // read off the NewHeightStep - <-cs.NewStepCh() - - css[i] = cs - } - - return css, privVals -} - -func randConsensusState() (*ConsensusState, []*types.PrivValidator) { - state, privValidators := sm.RandGenesisState(10, false, 1000) - blockStore := bc.NewBlockStore(dbm.NewMemDB()) - mempool := mempl.NewMempool(state) - mempoolReactor := mempl.NewMempoolReactor(mempool) - cs := NewConsensusState(state, blockStore, mempoolReactor) - return cs, privValidators -} diff --git a/mempool/mempool.go b/mempool/mempool.go index 66b2e2e44..b135cd5cb 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -1,84 +1,167 @@ -/* -Mempool receives new transactions and applies them to the latest committed state. -If the transaction is acceptable, then it broadcasts the tx to peers. - -When this node happens to be the next proposer, it simply uses the recently -modified state (and the associated transactions) to construct a proposal. -*/ - package mempool import ( + "bytes" "sync" + "sync/atomic" - sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/go-clist" + . "github.com/tendermint/go-common" + "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" + tmsp "github.com/tendermint/tmsp/types" ) +/* + +The mempool pushes new txs onto the proxyAppCtx. +It gets a stream of (req, res) tuples from the proxy. +The memool stores good txs in a concurrent linked-list. + +Multiple concurrent go-routines can traverse this linked-list +safely by calling .NextWait() on each element. + +So we have several go-routines: +1. Consensus calling Update() and Reap() synchronously +2. Many mempool reactor's peer routines calling AppendTx() +3. Many mempool reactor's peer routines traversing the txs linked list +4. Another goroutine calling GarbageCollectTxs() periodically + +To manage these goroutines, there are three methods of locking. +1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe) +2. Mutations to the linked-list elements are atomic +3. AppendTx() calls can be paused upon Update() and Reap(), protected by .proxyMtx + +Garbage collection of old elements from mempool.txs is handlde via +the DetachPrev() call, which makes old elements not reachable by +peer broadcastTxRoutine() automatically garbage collected. + + + +*/ + type Mempool struct { - mtx sync.Mutex - state *sm.State - txs []types.Tx // TODO: we need to add a map to facilitate replace-by-fee + proxyMtx sync.Mutex + proxyAppCtx proxy.AppContext + txs *clist.CList // concurrent linked-list of good txs + counter int64 // simple incrementing counter + height int // the last block Update()'d to + expected *clist.CElement // pointer to .txs for next response } -func NewMempool(state *sm.State) *Mempool { - return &Mempool{ - state: state, +func NewMempool(proxyAppCtx proxy.AppContext) *Mempool { + mempool := &Mempool{ + proxyAppCtx: proxyAppCtx, + txs: clist.New(), + counter: 0, + height: 0, + expected: nil, } + proxyAppCtx.SetResponseCallback(mempool.resCb) + return mempool } -func (mem *Mempool) GetState() *sm.State { - return mem.state +// Return the first element of mem.txs for peer goroutines to call .NextWait() on. +// Blocks until txs has elements. +func (mem *Mempool) TxsFrontWait() *clist.CElement { + return mem.txs.FrontWait() } -func (mem *Mempool) GetHeight() int { - mem.mtx.Lock() - defer mem.mtx.Unlock() - return mem.state.LastBlockHeight -} +// Try a new transaction in the mempool. +// Potentially blocking if we're blocking on Update() or Reap(). +func (mem *Mempool) AppendTx(tx types.Tx) (err error) { + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() -// Apply tx to the state and remember it. -func (mem *Mempool) AddTx(tx types.Tx) (err error) { - mem.mtx.Lock() - defer mem.mtx.Unlock() - err = sm.ExecTx(mem.state, tx, nil) - if err != nil { - log.Info("AddTx() error", "tx", tx, "error", err) + if err = mem.proxyAppCtx.Error(); err != nil { return err - } else { - log.Info("AddTx() success", "tx", tx) - mem.txs = append(mem.txs, tx) - return nil } + mem.proxyAppCtx.AppendTxAsync(tx) + return nil } -func (mem *Mempool) GetProposalTxs() []types.Tx { - mem.mtx.Lock() - defer mem.mtx.Unlock() - log.Info("GetProposalTxs:", "txs", mem.txs) - return mem.txs +// TMSP callback function +// CONTRACT: No other goroutines mutate mem.expected concurrently. +func (mem *Mempool) resCb(req tmsp.Request, res tmsp.Response) { + switch res := res.(type) { + case tmsp.ResponseAppendTx: + reqAppendTx := req.(tmsp.RequestAppendTx) + if mem.expected == nil { // Normal operation + if res.RetCode == tmsp.RetCodeOK { + mem.counter++ + memTx := &mempoolTx{ + counter: mem.counter, + height: int64(mem.height), + tx: reqAppendTx.TxBytes, + } + mem.txs.PushBack(memTx) + } else { + // ignore bad transaction + // TODO: handle other retcodes + } + } else { // During Update() + // TODO Log sane warning if mem.expected is nil. + memTx := mem.expected.Value.(*mempoolTx) + if !bytes.Equal(reqAppendTx.TxBytes, memTx.tx) { + PanicSanity("Unexpected tx response from proxy") + } + if res.RetCode == tmsp.RetCodeOK { + // Good, nothing to do. + } else { + // TODO: handle other retcodes + // Tx became invalidated due to newly committed block. + // NOTE: Concurrent traversal of mem.txs via CElement.Next() still works. + mem.txs.Remove(mem.expected) + mem.expected.DetachPrev() + } + mem.expected = mem.expected.Next() + } + default: + // ignore other messages + } } -// We use this to inform peer routines of how the mempool has been updated -type ResetInfo struct { - Height int - Included []Range - Invalid []Range +// Get the valid transactions run so far, and the hash of +// the application state that results from those transactions. +func (mem *Mempool) Reap() ([]types.Tx, []byte, error) { + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + // First, get the hash of txs run so far + hash, err := mem.proxyAppCtx.GetHashSync() + if err != nil { + return nil, nil, err + } + + // And collect all the transactions. + txs := mem.collectTxs() + + return txs, hash, nil } -type Range struct { - Start int - Length int +func (mem *Mempool) collectTxs() []types.Tx { + txs := make([]types.Tx, 0, mem.txs.Len()) + for e := mem.txs.Front(); e != nil; e = e.Next() { + memTx := e.Value.(*mempoolTx) + txs = append(txs, memTx.tx) + } + return txs } -// "block" is the new block being committed. -// "state" is the result of state.AppendBlock("block"). +// "block" is the new block that was committed. // Txs that are present in "block" are discarded from mempool. -// Txs that have become invalid in the new "state" are also discarded. -func (mem *Mempool) ResetForBlockAndState(block *types.Block, state *sm.State) ResetInfo { - mem.mtx.Lock() - defer mem.mtx.Unlock() - mem.state = state.Copy() +// NOTE: this should be called *after* block is committed by consensus. +// CONTRACT: block is valid and next in sequence. +func (mem *Mempool) Update(block *types.Block) error { + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + // Rollback mempool synchronously + // TODO: test that proxyAppCtx's state matches the block's + err := mem.proxyAppCtx.RollbackSync() + if err != nil { + return err + } // First, create a lookup map of txns in new block. blockTxsMap := make(map[string]struct{}) @@ -86,50 +169,58 @@ func (mem *Mempool) ResetForBlockAndState(block *types.Block, state *sm.State) R blockTxsMap[string(tx)] = struct{}{} } - // Now we filter all txs from mem.txs that are in blockTxsMap, - // and ExecTx on what remains. Only valid txs are kept. - // We track the ranges of txs included in the block and invalidated by it - // so we can tell peer routines - var ri = ResetInfo{Height: block.Height} - var validTxs []types.Tx - includedStart, invalidStart := -1, -1 - for i, tx := range mem.txs { - if _, ok := blockTxsMap[string(tx)]; ok { - startRange(&includedStart, i) // start counting included txs - endRange(&invalidStart, i, &ri.Invalid) // stop counting invalid txs - log.Info("Filter out, already committed", "tx", tx) - } else { - endRange(&includedStart, i, &ri.Included) // stop counting included txs - err := sm.ExecTx(mem.state, tx, nil) - if err != nil { - startRange(&invalidStart, i) // start counting invalid txs - log.Info("Filter out, no longer valid", "tx", tx, "error", err) - } else { - endRange(&invalidStart, i, &ri.Invalid) // stop counting invalid txs - log.Info("Filter in, new, valid", "tx", tx) - validTxs = append(validTxs, tx) - } + // Remove transactions that are already in block. + // Return the remaining potentially good txs. + goodTxs := mem.filterTxs(block.Height, blockTxsMap) + + // Set height and expected + mem.height = block.Height + mem.expected = mem.txs.Front() + + // Push good txs to proxyAppCtx + // NOTE: resCb() may be called concurrently. + for _, tx := range goodTxs { + mem.proxyAppCtx.AppendTxAsync(tx) + if err := mem.proxyAppCtx.Error(); err != nil { + return err } } - endRange(&includedStart, len(mem.txs)-1, &ri.Included) // stop counting included txs - endRange(&invalidStart, len(mem.txs)-1, &ri.Invalid) // stop counting invalid txs - // We're done! - log.Info("New txs", "txs", validTxs, "oldTxs", mem.txs) - mem.txs = validTxs - return ri + // NOTE: Even though we return immediately without e.g. + // calling mem.proxyAppCtx.FlushSync(), + // New mempool txs will still have to wait until + // all goodTxs are re-processed. + // So we could make synchronous calls here to proxyAppCtx. + + return nil } -func startRange(start *int, i int) { - if *start < 0 { - *start = i +func (mem *Mempool) filterTxs(height int, blockTxsMap map[string]struct{}) []types.Tx { + goodTxs := make([]types.Tx, 0, mem.txs.Len()) + for e := mem.txs.Front(); e != nil; e = e.Next() { + memTx := e.Value.(*mempoolTx) + if _, ok := blockTxsMap[string(memTx.tx)]; ok { + // Remove the tx since already in block. + mem.txs.Remove(e) + e.DetachPrev() + continue + } + // Good tx! + atomic.StoreInt64(&memTx.height, int64(height)) + goodTxs = append(goodTxs, memTx.tx) } + return goodTxs } -func endRange(start *int, i int, ranger *[]Range) { - if *start >= 0 { - length := i - *start - *ranger = append(*ranger, Range{*start, length}) - *start = -1 - } +//-------------------------------------------------------------------------------- + +// A transaction that successfully ran +type mempoolTx struct { + counter int64 // a simple incrementing counter + height int64 // height that this tx had been validated in + tx types.Tx // +} + +func (memTx *mempoolTx) Height() int { + return int(atomic.LoadInt64(&memTx.height)) } diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go new file mode 100644 index 000000000..4cc55283c --- /dev/null +++ b/mempool/mempool_test.go @@ -0,0 +1,118 @@ +package mempool + +import ( + "encoding/binary" + "testing" + + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tmsp/example" + tmsp "github.com/tendermint/tmsp/types" +) + +func TestSerialReap(t *testing.T) { + + app := example.NewCounterApplication() + appCtxMempool := app.Open() + appCtxMempool.SetOption("serial", "on") + proxyAppCtx := proxy.NewLocalAppContext(appCtxMempool) + mempool := NewMempool(proxyAppCtx) + + // Create another AppContext for committing. + appCtxConsensus := app.Open() + appCtxConsensus.SetOption("serial", "on") + + appendTxsRange := func(start, end int) { + // Append some txs. + for i := start; i < end; i++ { + + // This will succeed + txBytes := make([]byte, 32) + _ = binary.PutVarint(txBytes, int64(i)) + err := mempool.AppendTx(txBytes) + if err != nil { + t.Fatal("Error after AppendTx: %v", err) + } + + // This will fail because not serial (incrementing) + // However, error should still be nil. + // It just won't show up on Reap(). + err = mempool.AppendTx(txBytes) + if err != nil { + t.Fatal("Error after AppendTx: %v", err) + } + + } + } + + reapCheck := func(exp int) { + txs, _, err := mempool.Reap() + if err != nil { + t.Error("Error in mempool.Reap()", err) + } + if len(txs) != exp { + t.Fatalf("Expected to reap %v txs but got %v", exp, len(txs)) + } + } + + updateRange := func(start, end int) { + txs := make([]types.Tx, 0) + for i := start; i < end; i++ { + txBytes := make([]byte, 32) + _ = binary.PutVarint(txBytes, int64(i)) + txs = append(txs, txBytes) + } + blockHeader := &types.Header{Height: 0} + blockData := &types.Data{Txs: txs} + block := &types.Block{Header: blockHeader, Data: blockData} + err := mempool.Update(block) + if err != nil { + t.Error("Error in mempool.Update()", err) + } + } + + commitRange := func(start, end int) { + // Append some txs. + for i := start; i < end; i++ { + txBytes := make([]byte, 32) + _ = binary.PutVarint(txBytes, int64(i)) + _, retCode := appCtxConsensus.AppendTx(txBytes) + if retCode != tmsp.RetCodeOK { + t.Error("Error committing tx", retCode) + } + } + retCode := appCtxConsensus.Commit() + if retCode != tmsp.RetCodeOK { + t.Error("Error committing range", retCode) + } + } + + //---------------------------------------- + + // Append some txs. + appendTxsRange(0, 100) + + // Reap the txs. + reapCheck(100) + + // Reap again. We should get the same amount + reapCheck(100) + + // Append 0 to 999, we should reap 900 txs + // because 100 were already counted. + appendTxsRange(0, 1000) + + // Reap the txs. + reapCheck(1000) + + // Reap again. We should get the same amount + reapCheck(1000) + + // Commit from the conensus AppContext + commitRange(0, 500) + updateRange(0, 500) + + // We should have 500 left. + reapCheck(500) + +} diff --git a/mempool/reactor.go b/mempool/reactor.go index a1f61a4c5..ed8b8b260 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -2,35 +2,30 @@ package mempool import ( "bytes" - "errors" "fmt" "reflect" "time" + "github.com/tendermint/go-clist" . "github.com/tendermint/go-common" "github.com/tendermint/go-p2p" "github.com/tendermint/go-wire" "github.com/tendermint/tendermint/events" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) -var ( +const ( MempoolChannel = byte(0x30) - checkExecutedTxsMilliseconds = 1 // check for new mempool txs to send to peer - txsToSendPerCheck = 64 // send up to this many txs from the mempool per check - newBlockChCapacity = 100 // queue to process this many ResetInfos per peer - maxMempoolMessageSize = 1048576 // 1MB TODO make it configurable + maxMempoolMessageSize = 1048576 // 1MB TODO make it configurable + peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount ) // MempoolReactor handles mempool tx broadcasting amongst peers. type MempoolReactor struct { p2p.BaseReactor - - Mempool *Mempool - - evsw events.Fireable + Mempool *Mempool // TODO: un-expose + evsw events.Fireable } func NewMempoolReactor(mempool *Mempool) *MempoolReactor { @@ -53,11 +48,7 @@ func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { // Implements Reactor func (memR *MempoolReactor) AddPeer(peer *p2p.Peer) { - // Each peer gets a go routine on which we broadcast transactions in the same order we applied them to our state. - newBlockChan := make(chan ResetInfo, newBlockChCapacity) - peer.Data.Set(types.PeerMempoolChKey, newBlockChan) - timer := time.NewTicker(time.Millisecond * time.Duration(checkExecutedTxsMilliseconds)) - go memR.broadcastTxRoutine(timer.C, newBlockChan, peer) + go memR.broadcastTxRoutine(peer) } // Implements Reactor @@ -76,7 +67,7 @@ func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) { switch msg := msg.(type) { case *TxMessage: - err := memR.Mempool.AddTx(msg.Tx) + err := memR.Mempool.AppendTx(msg.Tx) if err != nil { // Bad, seen, or conflicting tx. log.Info("Could not add tx", "tx", msg.Tx) @@ -90,30 +81,9 @@ func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) { } } -// "block" is the new block being committed. -// "state" is the result of state.AppendBlock("block"). -// Txs that are present in "block" are discarded from mempool. -// Txs that have become invalid in the new "state" are also discarded. -func (memR *MempoolReactor) ResetForBlockAndState(block *types.Block, state *sm.State) { - ri := memR.Mempool.ResetForBlockAndState(block, state) - for _, peer := range memR.Switch.Peers().List() { - peerMempoolChI := peer.Data.Get(types.PeerMempoolChKey) - if peerMempoolChI == nil { - // peer was added to switch but not yet to the memR - continue - } - peerMempoolCh := peerMempoolChI.(chan ResetInfo) - select { - case peerMempoolCh <- ri: - default: - memR.Switch.StopPeerForError(peer, errors.New("Peer's mempool push channel full")) - } - } -} - -// Just an alias for AddTx since broadcasting happens in peer routines +// Just an alias for AppendTx since broadcasting happens in peer routines func (memR *MempoolReactor) BroadcastTx(tx types.Tx) error { - return memR.Mempool.AddTx(tx) + return memR.Mempool.AppendTx(tx) } type PeerState interface { @@ -126,91 +96,42 @@ type Peer interface { Get(string) interface{} } -// send new mempool txs to peer, strictly in order we applied them to our state. -// new blocks take chunks out of the mempool, but we've already sent some txs to the peer. -// so we wait to hear that the peer has progressed to the new height, and then continue sending txs from where we left off -func (memR *MempoolReactor) broadcastTxRoutine(tickerChan <-chan time.Time, newBlockChan chan ResetInfo, peer Peer) { - var height = memR.Mempool.GetHeight() - var txsSent int // new txs sent for height. (reset every new height) - +// Send new mempool txs to peer. +// TODO: Handle mempool or reactor shutdown? +// As is this routine may block forever if no new txs come in. +func (memR *MempoolReactor) broadcastTxRoutine(peer Peer) { + var next *clist.CElement for { - select { - case <-tickerChan: - if !peer.IsRunning() { - return - } - - // make sure the peer is up to date - if peerState_i := peer.Get(types.PeerStateKey); peerState_i != nil { - peerState := peerState_i.(PeerState) - if peerState.GetHeight() < height { - continue - } - } else { + if !memR.IsRunning() { + return // Quit! + } + if next == nil { + // This happens because the CElement we were looking at got + // garbage collected (removed). That is, .NextWait() returned nil. + // Go ahead and start from the beginning. + next = memR.Mempool.TxsFrontWait() // Wait until a tx is available + } + memTx := next.Value.(*mempoolTx) + // make sure the peer is up to date + height := memTx.Height() + if peerState_i := peer.Get(types.PeerStateKey); peerState_i != nil { + peerState := peerState_i.(PeerState) + if peerState.GetHeight() < height-1 { // Allow for a lag of 1 block + time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) continue } - - // check the mempool for new transactions - newTxs := memR.getNewTxs(height) - txsSentLoop := 0 - start := time.Now() - - TX_LOOP: - for i := txsSent; i < len(newTxs) && txsSentLoop < txsToSendPerCheck; i++ { - tx := newTxs[i] - msg := &TxMessage{Tx: tx} - success := peer.Send(MempoolChannel, msg) - if !success { - break TX_LOOP - } else { - txsSentLoop += 1 - } - } - - if txsSentLoop > 0 { - txsSent += txsSentLoop - log.Info("Sent txs to peer", "txsSentLoop", txsSentLoop, - "took", time.Since(start), "txsSent", txsSent, "newTxs", len(newTxs)) - } - - case ri := <-newBlockChan: - height = ri.Height - - // find out how many txs below what we've sent were included in a block and how many became invalid - included := tallyRangesUpTo(ri.Included, txsSent) - invalidated := tallyRangesUpTo(ri.Invalid, txsSent) - - txsSent -= included + invalidated - } - } -} - -// fetch new txs from the mempool -func (memR *MempoolReactor) getNewTxs(height int) (txs []types.Tx) { - memR.Mempool.mtx.Lock() - defer memR.Mempool.mtx.Unlock() - - // if the mempool got ahead of us just return empty txs - if memR.Mempool.state.LastBlockHeight != height { - return - } - return memR.Mempool.txs -} - -// return the size of ranges less than upTo -func tallyRangesUpTo(ranger []Range, upTo int) int { - totalUpTo := 0 - for _, r := range ranger { - if r.Start >= upTo { - break } - if r.Start+r.Length >= upTo { - totalUpTo += upTo - r.Start - break + // send memTx + msg := &TxMessage{Tx: memTx.tx} + success := peer.Send(MempoolChannel, msg) + if !success { + time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) + continue } - totalUpTo += r.Length + + next = next.NextWait() + continue } - return totalUpTo } // implements events.Eventable diff --git a/node/node.go b/node/node.go index 8b4106773..77ff70544 100644 --- a/node/node.go +++ b/node/node.go @@ -19,6 +19,7 @@ import ( "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/events" mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/rpc" "github.com/tendermint/tendermint/rpc/core" "github.com/tendermint/tendermint/rpc/server" @@ -39,7 +40,7 @@ type Node struct { consensusState *consensus.ConsensusState consensusReactor *consensus.ConsensusReactor privValidator *types.PrivValidator - genDoc *types.GenesisDoc + genesisDoc *types.GenesisDoc privKey crypto.PrivKeyEd25519 } @@ -49,27 +50,14 @@ func NewNode() *Node { blockStore := bc.NewBlockStore(blockStoreDB) // Get State - stateDB := dbm.GetDB("state") - state := sm.LoadState(stateDB) - var genDoc *types.GenesisDoc - if state == nil { - genDoc, state = sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file")) - state.Save() - // write the gendoc to db - buf, n, err := new(bytes.Buffer), new(int), new(error) - wire.WriteJSON(genDoc, buf, n, err) - stateDB.Set(types.GenDocKey, buf.Bytes()) - if *err != nil { - Exit(Fmt("Unable to write gendoc to db: %v", err)) - } - } else { - genDocBytes := stateDB.Get(types.GenDocKey) - err := new(error) - wire.ReadJSONPtr(&genDoc, genDocBytes, err) - if *err != nil { - Exit(Fmt("Unable to read gendoc from db: %v", err)) - } - } + state := getState() + + // Create two proxyAppCtx connections, + // one for the consensus and one for the mempool. + proxyAddr := config.GetString("proxy_app") + proxyAppCtxMempool := getProxyApp(proxyAddr, state.LastAppHash) + proxyAppCtxConsensus := getProxyApp(proxyAddr, state.LastAppHash) + // add the chainid to the global config config.Set("chain_id", state.ChainID) @@ -92,14 +80,14 @@ func NewNode() *Node { pexReactor := p2p.NewPEXReactor(book) // Make BlockchainReactor - bcReactor := bc.NewBlockchainReactor(state.Copy(), blockStore, config.GetBool("fast_sync")) + bcReactor := bc.NewBlockchainReactor(state.Copy(), proxyAppCtxConsensus, blockStore, config.GetBool("fast_sync")) // Make MempoolReactor - mempool := mempl.NewMempool(state.Copy()) + mempool := mempl.NewMempool(proxyAppCtxMempool) mempoolReactor := mempl.NewMempoolReactor(mempool) // Make ConsensusReactor - consensusState := consensus.NewConsensusState(state.Copy(), blockStore, mempoolReactor) + consensusState := consensus.NewConsensusState(state.Copy(), proxyAppCtxConsensus, blockStore, mempool) consensusReactor := consensus.NewConsensusReactor(consensusState, blockStore, config.GetBool("fast_sync")) if privValidator != nil { consensusReactor.SetPrivValidator(privValidator) @@ -135,7 +123,7 @@ func NewNode() *Node { consensusState: consensusState, consensusReactor: consensusReactor, privValidator: privValidator, - genDoc: genDoc, + genesisDoc: state.GenesisDoc, privKey: privKey, } } @@ -207,7 +195,7 @@ func (n *Node) StartRPC() (net.Listener, error) { core.SetMempoolReactor(n.mempoolReactor) core.SetSwitch(n.sw) core.SetPrivValidator(n.privValidator) - core.SetGenDoc(n.genDoc) + core.SetGenesisDoc(n.genesisDoc) listenAddr := config.GetString("rpc_laddr") @@ -289,8 +277,8 @@ func RunNode() { log.Notice(Fmt("Waiting for genesis file %v...", genDocFile)) for { time.Sleep(time.Second) - if FileExists(genDocFile) { - break + if !FileExists(genDocFile) { + continue } jsonBlob, err := ioutil.ReadFile(genDocFile) if err != nil { @@ -334,3 +322,38 @@ func RunNode() { n.Stop() }) } + +// Load the most recent state from "state" db, +// or create a new one (and save) from genesis. +func getState() *sm.State { + stateDB := dbm.GetDB("state") + state := sm.LoadState(stateDB) + if state == nil { + state = sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file")) + state.Save() + } + return state +} + +// Get a connection to the proxyAppCtx addr. +// Check the current hash, and panic if it doesn't match. +func getProxyApp(addr string, hash []byte) proxy.AppContext { + proxyConn, err := Connect(addr) + if err != nil { + Exit(Fmt("Failed to connect to proxy for mempool: %v", err)) + } + proxyAppCtx := proxy.NewRemoteAppContext(proxyConn, 1024) + + proxyAppCtx.Start() + + // Check the hash + currentHash, err := proxyAppCtx.GetHashSync() + if err != nil { + PanicCrisis(Fmt("Error in getting proxyAppCtx hash: %v", err)) + } + if !bytes.Equal(hash, currentHash) { + PanicCrisis(Fmt("ProxyApp hash does not match. Expected %X, got %X", hash, currentHash)) + } + + return proxyAppCtx +} diff --git a/node/node_test.go b/node/node_test.go index b94d3b95b..49e7d3ddf 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -4,11 +4,25 @@ import ( "testing" "time" + . "github.com/tendermint/go-common" "github.com/tendermint/go-p2p" _ "github.com/tendermint/tendermint/config/tendermint_test" + "github.com/tendermint/tmsp/example" + "github.com/tendermint/tmsp/server" ) func TestNodeStartStop(t *testing.T) { + + // Start a dummy app + go func() { + _, err := server.StartListener(config.GetString("proxy_app"), example.NewDummyApplication()) + if err != nil { + Exit(err.Error()) + } + }() + // wait for the server + time.Sleep(time.Second * 2) + // Create & start node n := NewNode() l := p2p.NewDefaultListener("tcp", config.GetString("node_laddr"), config.GetBool("skip_upnp")) diff --git a/proxy/app_context.go b/proxy/app_context.go new file mode 100644 index 000000000..85ad1367c --- /dev/null +++ b/proxy/app_context.go @@ -0,0 +1,28 @@ +package proxy + +import ( + tmsp "github.com/tendermint/tmsp/types" +) + +type Callback func(tmsp.Request, tmsp.Response) + +type AppContext interface { + SetResponseCallback(Callback) + Error() error + + EchoAsync(msg string) + FlushAsync() + AppendTxAsync(tx []byte) + GetHashAsync() + CommitAsync() + RollbackAsync() + SetOptionAsync(key string, value string) + AddListenerAsync(key string) + RemListenerAsync(key string) + + InfoSync() (info []string, err error) + FlushSync() error + GetHashSync() (hash []byte, err error) + CommitSync() error + RollbackSync() error +} diff --git a/proxy/local_app_context.go b/proxy/local_app_context.go new file mode 100644 index 000000000..b6d1b2678 --- /dev/null +++ b/proxy/local_app_context.go @@ -0,0 +1,123 @@ +package proxy + +import ( + tmsp "github.com/tendermint/tmsp/types" +) + +type localAppContext struct { + tmsp.AppContext + Callback +} + +func NewLocalAppContext(app tmsp.AppContext) *localAppContext { + return &localAppContext{ + AppContext: app, + } +} + +func (app *localAppContext) SetResponseCallback(cb Callback) { + app.Callback = cb +} + +// TODO: change tmsp.AppContext to include Error()? +func (app *localAppContext) Error() error { + return nil +} + +func (app *localAppContext) EchoAsync(msg string) { + msg2 := app.AppContext.Echo(msg) + app.Callback( + tmsp.RequestEcho{msg}, + tmsp.ResponseEcho{msg2}, + ) +} + +func (app *localAppContext) FlushAsync() { + // Do nothing +} + +func (app *localAppContext) SetOptionAsync(key string, value string) { + retCode := app.AppContext.SetOption(key, value) + app.Callback( + tmsp.RequestSetOption{key, value}, + tmsp.ResponseSetOption{retCode}, + ) +} + +func (app *localAppContext) AppendTxAsync(tx []byte) { + events, retCode := app.AppContext.AppendTx(tx) + app.Callback( + tmsp.RequestAppendTx{tx}, + tmsp.ResponseAppendTx{retCode}, + ) + for _, event := range events { + app.Callback( + nil, + tmsp.ResponseEvent{event}, + ) + } +} + +func (app *localAppContext) GetHashAsync() { + hash, retCode := app.AppContext.GetHash() + app.Callback( + tmsp.RequestGetHash{}, + tmsp.ResponseGetHash{retCode, hash}, + ) +} + +func (app *localAppContext) CommitAsync() { + retCode := app.AppContext.Commit() + app.Callback( + tmsp.RequestCommit{}, + tmsp.ResponseCommit{retCode}, + ) +} + +func (app *localAppContext) RollbackAsync() { + retCode := app.AppContext.Rollback() + app.Callback( + tmsp.RequestRollback{}, + tmsp.ResponseRollback{retCode}, + ) +} + +func (app *localAppContext) AddListenerAsync(key string) { + retCode := app.AppContext.AddListener(key) + app.Callback( + tmsp.RequestAddListener{key}, + tmsp.ResponseAddListener{retCode}, + ) +} + +func (app *localAppContext) RemListenerAsync(key string) { + retCode := app.AppContext.RemListener(key) + app.Callback( + tmsp.RequestRemListener{key}, + tmsp.ResponseRemListener{retCode}, + ) +} + +func (app *localAppContext) InfoSync() (info []string, err error) { + info = app.AppContext.Info() + return info, nil +} + +func (app *localAppContext) FlushSync() error { + return nil +} + +func (app *localAppContext) GetHashSync() (hash []byte, err error) { + hash, retCode := app.AppContext.GetHash() + return hash, retCode.Error() +} + +func (app *localAppContext) CommitSync() (err error) { + retCode := app.AppContext.Commit() + return retCode.Error() +} + +func (app *localAppContext) RollbackSync() (err error) { + retCode := app.AppContext.Rollback() + return retCode.Error() +} diff --git a/proxy/proxy_app.go b/proxy/proxy_app.go deleted file mode 100644 index 10afee609..000000000 --- a/proxy/proxy_app.go +++ /dev/null @@ -1,349 +0,0 @@ -package proxy - -import ( - "bufio" - "container/list" - "errors" - "fmt" - "net" - "reflect" - "sync" - - . "github.com/tendermint/go-common" - "github.com/tendermint/go-wire" - tmsp "github.com/tendermint/tmsp/types" -) - -const maxResponseSize = 1048576 // 1MB - -// This is goroutine-safe, but users should beware that -// the application in general is not meant to be interfaced -// with concurrent callers. -// In other words, the mempool and consensus modules need to -// exclude each other w/ an external mutex. -type ProxyApp struct { - QuitService - sync.Mutex - - reqQueue chan QueuedRequest - - mtx sync.Mutex - conn net.Conn - bufWriter *bufio.Writer - err error - reqSent *list.List - reqPending *list.Element // Next element in reqSent waiting for response - resReceived *list.List - eventsReceived *list.List -} - -func NewProxyApp(conn net.Conn, bufferSize int) *ProxyApp { - p := &ProxyApp{ - reqQueue: make(chan QueuedRequest, bufferSize), - conn: conn, - bufWriter: bufio.NewWriter(conn), - reqSent: list.New(), - reqPending: nil, - resReceived: list.New(), - eventsReceived: list.New(), - } - p.QuitService = *NewQuitService(nil, "ProxyApp", p) - return p -} - -func (p *ProxyApp) OnStart() error { - p.QuitService.OnStart() - go p.sendRequestsRoutine() - go p.recvResponseRoutine() - return nil -} - -func (p *ProxyApp) OnStop() { - p.QuitService.OnStop() - p.conn.Close() -} - -func (p *ProxyApp) StopForError(err error) { - p.mtx.Lock() - fmt.Println("Stopping ProxyApp for error:", err) - if p.err == nil { - p.err = err - } - p.mtx.Unlock() - p.Stop() -} - -func (p *ProxyApp) Error() error { - p.mtx.Lock() - defer p.mtx.Unlock() - return p.err -} - -//---------------------------------------- - -func (p *ProxyApp) sendRequestsRoutine() { - for { - var n int - var err error - select { - case <-p.QuitService.Quit: - return - case qreq := <-p.reqQueue: - wire.WriteBinary(qreq.Request, p.bufWriter, &n, &err) - if err != nil { - p.StopForError(err) - return - } - if _, ok := qreq.Request.(tmsp.RequestFlush); ok { - err = p.bufWriter.Flush() - if err != nil { - p.StopForError(err) - return - } - } - p.didSendReq(qreq) - } - } -} - -func (p *ProxyApp) recvResponseRoutine() { - r := bufio.NewReader(p.conn) // Buffer reads - for { - var res tmsp.Response - var n int - var err error - wire.ReadBinaryPtr(&res, r, maxResponseSize, &n, &err) - if err != nil { - p.StopForError(err) - return - } - switch res := res.(type) { - case tmsp.ResponseException: - p.StopForError(errors.New(res.Error)) - case tmsp.ResponseEvent: - p.didRecvEvent(res.Event) - default: - err := p.didRecvResponse(res) - if err != nil { - p.StopForError(err) - } - } - } -} - -func (p *ProxyApp) didSendReq(qreq QueuedRequest) { - p.mtx.Lock() - defer p.mtx.Unlock() - - p.reqSent.PushBack(qreq) - if p.reqPending == nil { - p.reqPending = p.reqSent.Front() - } -} - -func (p *ProxyApp) didRecvResponse(res tmsp.Response) error { - p.mtx.Lock() - defer p.mtx.Unlock() - - if p.reqPending == nil { - return fmt.Errorf("Unexpected result type %v when nothing expected", - reflect.TypeOf(res)) - } else { - qreq := p.reqPending.Value.(QueuedRequest) - if !resMatchesReq(qreq.Request, res) { - return fmt.Errorf("Unexpected result type %v when response to %v expected", - reflect.TypeOf(res), reflect.TypeOf(qreq.Request)) - } - if qreq.Sync { - qreq.Done() - } - p.reqPending = p.reqPending.Next() - } - p.resReceived.PushBack(res) - return nil -} - -func (p *ProxyApp) didRecvEvent(event tmsp.Event) { - p.mtx.Lock() - defer p.mtx.Unlock() - - p.eventsReceived.PushBack(event) -} - -//---------------------------------------- - -func (p *ProxyApp) EchoAsync(key string) { - p.queueRequestAsync(tmsp.RequestEcho{key}) -} - -func (p *ProxyApp) FlushAsync() { - p.queueRequestAsync(tmsp.RequestFlush{}) -} - -func (p *ProxyApp) AppendTxAsync(tx []byte) { - p.queueRequestAsync(tmsp.RequestAppendTx{tx}) -} - -func (p *ProxyApp) GetHashAsync() { - p.queueRequestAsync(tmsp.RequestGetHash{}) -} - -/* -func (p *ProxyApp) CommitAsync() { - p.queueRequestAsync(tmsp.RequestCommit{}) -} - -func (p *ProxyApp) RollbackAsync() { - p.queueRequestAsync(tmsp.RequestRollback{}) -} -*/ - -func (p *ProxyApp) SetEventsModeAsync(mode tmsp.EventsMode) { - p.queueRequestAsync(tmsp.RequestSetEventsMode{mode}) -} - -func (p *ProxyApp) AddListenerAsync(key string) { - p.queueRequestAsync(tmsp.RequestAddListener{key}) -} - -func (p *ProxyApp) RemListenerAsync(key string) { - p.queueRequestAsync(tmsp.RequestRemListener{key}) -} - -//---------------------------------------- - -// Get valid txs, root hash, events; or error -// Clears internal buffers -func (p *ProxyApp) ReapSync(commit bool) (txs [][]byte, hash []byte, events []tmsp.Event, err error) { - if commit { - // Send asynchronous commit - p.queueRequestAsync(tmsp.RequestCommit{}) - // NOTE: we're assuming that there won't be a race condition. - } - // Get hash. - p.queueRequestAsync(tmsp.RequestGetHash{}) - // Flush everything. - p.queueRequestSync(tmsp.RequestFlush{}) - // Maybe there was an error in response matching - if p.err != nil { - return nil, nil, nil, p.err - } - // Process the resReceived/reqSent/reqPending. - if p.resReceived.Len() != p.reqSent.Len() { - PanicSanity("Unmatched requests & responses") - } - var commitCounter = 0 - txs = make([][]byte, 0, p.reqSent.Len()) - events = make([]tmsp.Event, 0, p.eventsReceived.Len()) - reqE, resE := p.reqSent.Front(), p.resReceived.Front() - for ; reqE != nil; reqE, resE = reqE.Next(), resE.Next() { - req, res := reqE.Value.(tmsp.Request), resE.Value.(tmsp.Response) - switch req := req.(type) { - case tmsp.RequestAppendTx: - txs = append(txs, req.TxBytes) - case tmsp.RequestGetHash: - hash = res.(tmsp.ResponseGetHash).Hash - case tmsp.RequestCommit: - if commitCounter > 0 { - PanicSanity("Unexpected Commit response") - } - commitCounter++ - case tmsp.RequestRollback: - PanicSanity("Unexpected Rollback response") - default: - // ignore other messages - } - } - for eE := p.eventsReceived.Front(); eE != nil; eE = eE.Next() { - events = append(events, eE.Value.(tmsp.Event)) - } - - return txs, hash, events, nil -} - -// Rollback or error -// Clears internal buffers -func (p *ProxyApp) RollbackSync() (err error) { - // Get hash. - p.queueRequestAsync(tmsp.RequestRollback{}) - // Flush everything. - p.queueRequestSync(tmsp.RequestFlush{}) - // Maybe there was an error in response matching - if p.err != nil { - return p.err - } - p.reqSent = list.New() - p.reqPending = nil - p.resReceived = list.New() - p.eventsReceived = list.New() - return nil -} - -func (p *ProxyApp) InfoSync() []string { - p.queueRequestAsync(tmsp.RequestInfo{}) - p.queueRequestSync(tmsp.RequestFlush{}) - return p.resReceived.Back().Prev().Value.(tmsp.ResponseInfo).Data -} - -func (p *ProxyApp) FlushSync() { - p.queueRequestSync(tmsp.RequestFlush{}) -} - -//---------------------------------------- - -func (p *ProxyApp) queueRequestAsync(req tmsp.Request) { - qreq := QueuedRequest{Request: req} - p.reqQueue <- qreq -} - -func (p *ProxyApp) queueRequestSync(req tmsp.Request) { - qreq := QueuedRequest{ - req, - true, - waitGroup1(), - } - p.reqQueue <- qreq - qreq.Wait() -} - -//---------------------------------------- - -func waitGroup1() (wg *sync.WaitGroup) { - wg = &sync.WaitGroup{} - wg.Add(1) - return -} - -func resMatchesReq(req tmsp.Request, res tmsp.Response) (ok bool) { - switch req.(type) { - case tmsp.RequestEcho: - _, ok = res.(tmsp.ResponseEcho) - case tmsp.RequestFlush: - _, ok = res.(tmsp.ResponseFlush) - case tmsp.RequestInfo: - _, ok = res.(tmsp.ResponseInfo) - case tmsp.RequestAppendTx: - _, ok = res.(tmsp.ResponseAppendTx) - case tmsp.RequestGetHash: - _, ok = res.(tmsp.ResponseGetHash) - case tmsp.RequestCommit: - _, ok = res.(tmsp.ResponseCommit) - case tmsp.RequestRollback: - _, ok = res.(tmsp.ResponseRollback) - case tmsp.RequestSetEventsMode: - _, ok = res.(tmsp.ResponseSetEventsMode) - case tmsp.RequestAddListener: - _, ok = res.(tmsp.ResponseAddListener) - case tmsp.RequestRemListener: - _, ok = res.(tmsp.ResponseRemListener) - default: - return false - } - return -} - -type QueuedRequest struct { - tmsp.Request - Sync bool - *sync.WaitGroup -} diff --git a/proxy/remote_app_context.go b/proxy/remote_app_context.go new file mode 100644 index 000000000..09a7db5a5 --- /dev/null +++ b/proxy/remote_app_context.go @@ -0,0 +1,306 @@ +package proxy + +import ( + "bufio" + "container/list" + "errors" + "fmt" + "net" + "reflect" + "sync" + + . "github.com/tendermint/go-common" + "github.com/tendermint/go-wire" + tmsp "github.com/tendermint/tmsp/types" +) + +const maxResponseSize = 1048576 // 1MB + +// This is goroutine-safe, but users should beware that +// the application in general is not meant to be interfaced +// with concurrent callers. +type remoteAppContext struct { + QuitService + sync.Mutex + + reqQueue chan *reqRes + + mtx sync.Mutex + conn net.Conn + bufWriter *bufio.Writer + err error + reqSent *list.List + resCb func(tmsp.Request, tmsp.Response) +} + +func NewRemoteAppContext(conn net.Conn, bufferSize int) *remoteAppContext { + app := &remoteAppContext{ + reqQueue: make(chan *reqRes, bufferSize), + conn: conn, + bufWriter: bufio.NewWriter(conn), + reqSent: list.New(), + resCb: nil, + } + app.QuitService = *NewQuitService(nil, "remoteAppContext", app) + return app +} + +func (app *remoteAppContext) OnStart() error { + app.QuitService.OnStart() + go app.sendRequestsRoutine() + go app.recvResponseRoutine() + return nil +} + +func (app *remoteAppContext) OnStop() { + app.QuitService.OnStop() + app.conn.Close() +} + +func (app *remoteAppContext) SetResponseCallback(resCb Callback) { + app.mtx.Lock() + defer app.mtx.Unlock() + app.resCb = resCb +} + +func (app *remoteAppContext) StopForError(err error) { + app.mtx.Lock() + fmt.Println("Stopping remoteAppContext for error:", err) + if app.err == nil { + app.err = err + } + app.mtx.Unlock() + app.Stop() +} + +func (app *remoteAppContext) Error() error { + app.mtx.Lock() + defer app.mtx.Unlock() + return app.err +} + +//---------------------------------------- + +func (app *remoteAppContext) sendRequestsRoutine() { + for { + var n int + var err error + select { + case <-app.QuitService.Quit: + return + case reqres := <-app.reqQueue: + wire.WriteBinary(reqres.Request, app.bufWriter, &n, &err) + if err != nil { + app.StopForError(err) + return + } + if _, ok := reqres.Request.(tmsp.RequestFlush); ok { + err = app.bufWriter.Flush() + if err != nil { + app.StopForError(err) + return + } + } + app.didSendReq(reqres) + } + } +} + +func (app *remoteAppContext) recvResponseRoutine() { + r := bufio.NewReader(app.conn) // Buffer reads + for { + var res tmsp.Response + var n int + var err error + wire.ReadBinaryPtr(&res, r, maxResponseSize, &n, &err) + if err != nil { + app.StopForError(err) + return + } + switch res := res.(type) { + case tmsp.ResponseException: + app.StopForError(errors.New(res.Error)) + default: + err := app.didRecvResponse(res) + if err != nil { + app.StopForError(err) + } + } + } +} + +func (app *remoteAppContext) didSendReq(reqres *reqRes) { + app.mtx.Lock() + defer app.mtx.Unlock() + app.reqSent.PushBack(reqres) +} + +func (app *remoteAppContext) didRecvResponse(res tmsp.Response) error { + app.mtx.Lock() + defer app.mtx.Unlock() + + // Special logic for events which have no corresponding requests. + if _, ok := res.(tmsp.ResponseEvent); ok && app.resCb != nil { + app.resCb(nil, res) + return nil + } + + // Get the first reqRes + next := app.reqSent.Front() + if next == nil { + return fmt.Errorf("Unexpected result type %v when nothing expected", reflect.TypeOf(res)) + } + reqres := next.Value.(*reqRes) + if !resMatchesReq(reqres.Request, res) { + return fmt.Errorf("Unexpected result type %v when response to %v expected", + reflect.TypeOf(res), reflect.TypeOf(reqres.Request)) + } + + reqres.Response = res // Set response + reqres.Done() // Release waiters + app.reqSent.Remove(next) // Pop first item from linked list + + // Callback if there is a listener + if app.resCb != nil { + app.resCb(reqres.Request, res) + } + + return nil +} + +//---------------------------------------- + +func (app *remoteAppContext) EchoAsync(msg string) { + app.queueRequest(tmsp.RequestEcho{msg}) +} + +func (app *remoteAppContext) FlushAsync() { + app.queueRequest(tmsp.RequestFlush{}) +} + +func (app *remoteAppContext) SetOptionAsync(key string, value string) { + app.queueRequest(tmsp.RequestSetOption{key, value}) +} + +func (app *remoteAppContext) AppendTxAsync(tx []byte) { + app.queueRequest(tmsp.RequestAppendTx{tx}) +} + +func (app *remoteAppContext) GetHashAsync() { + app.queueRequest(tmsp.RequestGetHash{}) +} + +func (app *remoteAppContext) CommitAsync() { + app.queueRequest(tmsp.RequestCommit{}) +} + +func (app *remoteAppContext) RollbackAsync() { + app.queueRequest(tmsp.RequestRollback{}) +} + +func (app *remoteAppContext) AddListenerAsync(key string) { + app.queueRequest(tmsp.RequestAddListener{key}) +} + +func (app *remoteAppContext) RemListenerAsync(key string) { + app.queueRequest(tmsp.RequestRemListener{key}) +} + +//---------------------------------------- + +func (app *remoteAppContext) InfoSync() (info []string, err error) { + reqres := app.queueRequest(tmsp.RequestInfo{}) + app.FlushSync() + if app.err != nil { + return nil, app.err + } + return reqres.Response.(tmsp.ResponseInfo).Data, nil +} + +func (app *remoteAppContext) FlushSync() error { + app.queueRequest(tmsp.RequestFlush{}).Wait() + return app.err +} + +func (app *remoteAppContext) GetHashSync() (hash []byte, err error) { + reqres := app.queueRequest(tmsp.RequestGetHash{}) + app.FlushSync() + if app.err != nil { + return nil, app.err + } + return reqres.Response.(tmsp.ResponseGetHash).Hash, nil +} + +// Commits or error +func (app *remoteAppContext) CommitSync() (err error) { + app.queueRequest(tmsp.RequestCommit{}) + app.FlushSync() + return app.err +} + +// Rollback or error +// Clears internal buffers +func (app *remoteAppContext) RollbackSync() (err error) { + app.queueRequest(tmsp.RequestRollback{}) + app.FlushSync() + return app.err +} + +//---------------------------------------- + +func (app *remoteAppContext) queueRequest(req tmsp.Request) *reqRes { + reqres := NewreqRes(req) + // TODO: set app.err if reqQueue times out + app.reqQueue <- reqres + return reqres +} + +//---------------------------------------- + +func resMatchesReq(req tmsp.Request, res tmsp.Response) (ok bool) { + switch req.(type) { + case tmsp.RequestEcho: + _, ok = res.(tmsp.ResponseEcho) + case tmsp.RequestFlush: + _, ok = res.(tmsp.ResponseFlush) + case tmsp.RequestInfo: + _, ok = res.(tmsp.ResponseInfo) + case tmsp.RequestSetOption: + _, ok = res.(tmsp.ResponseSetOption) + case tmsp.RequestAppendTx: + _, ok = res.(tmsp.ResponseAppendTx) + case tmsp.RequestGetHash: + _, ok = res.(tmsp.ResponseGetHash) + case tmsp.RequestCommit: + _, ok = res.(tmsp.ResponseCommit) + case tmsp.RequestRollback: + _, ok = res.(tmsp.ResponseRollback) + case tmsp.RequestAddListener: + _, ok = res.(tmsp.ResponseAddListener) + case tmsp.RequestRemListener: + _, ok = res.(tmsp.ResponseRemListener) + default: + return false + } + return +} + +type reqRes struct { + tmsp.Request + *sync.WaitGroup + tmsp.Response // Not set atomically, so be sure to use WaitGroup. +} + +func NewreqRes(req tmsp.Request) *reqRes { + return &reqRes{ + Request: req, + WaitGroup: waitGroup1(), + Response: nil, + } +} + +func waitGroup1() (wg *sync.WaitGroup) { + wg = &sync.WaitGroup{} + wg.Add(1) + return +} diff --git a/proxy/proxy_app_test.go b/proxy/remote_app_context_test.go similarity index 81% rename from proxy/proxy_app_test.go rename to proxy/remote_app_context_test.go index c387bbbba..15e97eee7 100644 --- a/proxy/proxy_app_test.go +++ b/proxy/remote_app_context_test.go @@ -26,7 +26,8 @@ func TestEcho(t *testing.T) { logBuffer := bytes.NewBuffer(nil) logConn := logio.NewLoggedConn(conn, logBuffer) - proxy := NewProxyApp(logConn, 10) + proxy := NewRemoteAppContext(logConn, 10) + proxy.SetResponseCallback(nil) proxy.Start() for i := 0; i < 1000; i++ { @@ -34,17 +35,11 @@ func TestEcho(t *testing.T) { } proxy.FlushSync() - if proxy.reqSent.Len() != 1001 { - t.Error(Fmt("Expected 1001 requests sent, got %v", - proxy.reqSent.Len())) - } - if proxy.resReceived.Len() != 1001 { - t.Error(Fmt("Expected 1001 responses received, got %v", - proxy.resReceived.Len())) - } - if t.Failed() { - logio.PrintReader(logBuffer) - } + /* + if t.Failed() { + logio.PrintReader(logBuffer) + } + */ } func BenchmarkEcho(b *testing.B) { @@ -61,7 +56,7 @@ func BenchmarkEcho(b *testing.B) { b.Log("Connected") } - proxy := NewProxyApp(conn, 10) + proxy := NewRemoteAppContext(conn, 10) proxy.Start() echoString := strings.Repeat(" ", 200) b.StartTimer() // Start benchmarking tests @@ -91,10 +86,12 @@ func TestInfo(t *testing.T) { logBuffer := bytes.NewBuffer(nil) logConn := logio.NewLoggedConn(conn, logBuffer) - proxy := NewProxyApp(logConn, 10) + proxy := NewRemoteAppContext(logConn, 10) proxy.Start() - data := proxy.InfoSync() - + data, err := proxy.InfoSync() + if err != nil { + t.Errorf("Unexpected error: %v", err) + } if data[0] != "size:0" { t.Error("Expected ResponseInfo with one element 'size:0' but got something else") } diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index cf9119835..fa294f719 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -18,6 +18,6 @@ func BroadcastTx(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { } func ListUnconfirmedTxs() (*ctypes.ResultListUnconfirmedTxs, error) { - txs := mempoolReactor.Mempool.GetProposalTxs() - return &ctypes.ResultListUnconfirmedTxs{len(txs), txs}, nil + txs, _, err := mempoolReactor.Mempool.Reap() + return &ctypes.ResultListUnconfirmedTxs{len(txs), txs}, err } diff --git a/rpc/core/net.go b/rpc/core/net.go index 3dd9118ef..1413595c1 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -1,23 +1,14 @@ package core import ( - dbm "github.com/tendermint/go-db" ctypes "github.com/tendermint/tendermint/rpc/core/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) //----------------------------------------------------------------------------- -// cache the genesis state -var genesisState *sm.State - +// TODO Move to status.go or node.go func Status() (*ctypes.ResultStatus, error) { - db := dbm.NewMemDB() - if genesisState == nil { - genesisState = sm.MakeGenesisState(db, genDoc) - } - genesisHash := genesisState.Hash() latestHeight := blockStore.Height() var ( latestBlockMeta *types.BlockMeta @@ -32,7 +23,6 @@ func Status() (*ctypes.ResultStatus, error) { return &ctypes.ResultStatus{ NodeInfo: p2pSwitch.NodeInfo(), - GenesisHash: genesisHash, PubKey: privValidator.PubKey, LatestBlockHash: latestBlockHash, LatestBlockHeight: latestHeight, diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 5fbb22d83..ae00d7b47 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -40,6 +40,6 @@ func SetPrivValidator(pv *types.PrivValidator) { privValidator = pv } -func SetGenDoc(doc *types.GenesisDoc) { +func SetGenesisDoc(doc *types.GenesisDoc) { genDoc = doc } diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 9867842f7..dccd0be9f 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -19,7 +19,6 @@ type ResultGetBlock struct { type ResultStatus struct { NodeInfo *p2p.NodeInfo `json:"node_info"` - GenesisHash []byte `json:"genesis_hash"` PubKey crypto.PubKey `json:"pub_key"` LatestBlockHash []byte `json:"latest_block_hash"` LatestBlockHeight int `json:"latest_block_height"` diff --git a/state/execution.go b/state/execution.go index 9c43e1c9a..69b533c89 100644 --- a/state/execution.go +++ b/state/execution.go @@ -3,32 +3,109 @@ package state import ( "bytes" "errors" + "fmt" . "github.com/tendermint/go-common" - "github.com/tendermint/tendermint/events" + "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" + tmsp "github.com/tendermint/tmsp/types" ) -// NOTE: If an error occurs during block execution, state will be left -// at an invalid state. Copy the state before calling ExecBlock! -func ExecBlock(s *State, block *types.Block, blockPartsHeader types.PartSetHeader) error { - err := execBlock(s, block, blockPartsHeader) +// Execute the block to mutate State. +// Also, execute txs on the proxyAppCtx and validate apphash +// Rolls back before executing transactions. +// Rolls back if invalid, but never commits. +func (s *State) ExecBlock(proxyAppCtx proxy.AppContext, block *types.Block, blockPartsHeader types.PartSetHeader) error { + + // Validate the block. + err := s.validateBlock(block) if err != nil { return err } - // State.Hash should match block.StateHash - stateHash := s.Hash() - if !bytes.Equal(stateHash, block.StateHash) { - return errors.New(Fmt("Invalid state hash. Expected %X, got %X", - stateHash, block.StateHash)) + + // Update the validator set + valSet := s.Validators.Copy() + // Update valSet with signatures from block. + updateValidatorsWithBlock(s.LastValidators, valSet, block) + // TODO: Update the validator set (e.g. block.Data.ValidatorUpdates?) + nextValSet := valSet.Copy() + + // First, rollback. + if err != nil { + proxyAppCtx.RollbackSync() + return err } + + // Execute, or rollback. (Does not commit) + err = s.execBlockOnProxyApp(proxyAppCtx, block) + if err != nil { + proxyAppCtx.RollbackSync() + return err + } + + // All good! + nextValSet.IncrementAccum(1) + s.Validators = nextValSet + s.LastValidators = valSet + s.LastAppHash = block.AppHash + s.LastBlockHeight = block.Height + s.LastBlockHash = block.Hash() + s.LastBlockParts = blockPartsHeader + s.LastBlockTime = block.Time + return nil } -// executes transactions of a block, does not check block.StateHash -// NOTE: If an error occurs during block execution, state will be left -// at an invalid state. Copy the state before calling execBlock! -func execBlock(s *State, block *types.Block, blockPartsHeader types.PartSetHeader) error { +// Commits block on proxyAppCtx. +func (s *State) Commit(proxyAppCtx proxy.AppContext) error { + err := proxyAppCtx.CommitSync() + return err +} + +// Executes transactions on proxyAppCtx. +func (s *State) execBlockOnProxyApp(proxyAppCtx proxy.AppContext, block *types.Block) error { + // Execute transactions and get hash + var invalidTxErr error + proxyCb := func(req tmsp.Request, res tmsp.Response) { + switch res := res.(type) { + case tmsp.ResponseAppendTx: + reqAppendTx := req.(tmsp.RequestAppendTx) + if res.RetCode != tmsp.RetCodeOK { + if invalidTxErr == nil { + invalidTxErr = InvalidTxError{reqAppendTx.TxBytes, res.RetCode} + } + } + case tmsp.ResponseEvent: + s.evc.FireEvent(types.EventStringApp(), types.EventDataApp{res.Key, res.Data}) + } + } + proxyAppCtx.SetResponseCallback(proxyCb) + for _, tx := range block.Data.Txs { + proxyAppCtx.AppendTxAsync(tx) + if err := proxyAppCtx.Error(); err != nil { + return err + } + } + hash, err := proxyAppCtx.GetHashSync() + if err != nil { + log.Warn("Error computing proxyAppCtx hash", "error", err) + return err + } + if invalidTxErr != nil { + log.Warn("Invalid transaction in block") + return invalidTxErr + } + + // Check that appHash matches + if !bytes.Equal(block.AppHash, hash) { + log.Warn(Fmt("App hash in proposal was %X, computed %X instead", block.AppHash, hash)) + return InvalidAppHashError{block.AppHash, hash} + } + + return nil +} + +func (s *State) validateBlock(block *types.Block) error { // Basic block validation. err := block.ValidateBasic(s.ChainID, s.LastBlockHeight, s.LastBlockHash, s.LastBlockParts, s.LastBlockTime) if err != nil { @@ -42,8 +119,8 @@ func execBlock(s *State, block *types.Block, blockPartsHeader types.PartSetHeade } } else { if len(block.LastValidation.Precommits) != s.LastValidators.Size() { - return errors.New(Fmt("Invalid block validation size. Expected %v, got %v", - s.LastValidators.Size(), len(block.LastValidation.Precommits))) + return fmt.Errorf("Invalid block validation size. Expected %v, got %v", + s.LastValidators.Size(), len(block.LastValidation.Precommits)) } err := s.LastValidators.VerifyValidation( s.ChainID, s.LastBlockHash, s.LastBlockParts, block.Height-1, block.LastValidation) @@ -52,66 +129,53 @@ func execBlock(s *State, block *types.Block, blockPartsHeader types.PartSetHeade } } - // Update Validator.LastCommitHeight as necessary. + return nil +} + +// Updates the LastCommitHeight of the validators in valSet, in place. +// Assumes that lastValSet matches the valset of block.LastValidators +// CONTRACT: lastValSet is not mutated. +func updateValidatorsWithBlock(lastValSet *types.ValidatorSet, valSet *types.ValidatorSet, block *types.Block) { + for i, precommit := range block.LastValidation.Precommits { if precommit == nil { continue } - _, val := s.LastValidators.GetByIndex(i) + _, val := lastValSet.GetByIndex(i) if val == nil { PanicCrisis(Fmt("Failed to fetch validator at index %v", i)) } - if _, val_ := s.Validators.GetByAddress(val.Address); val_ != nil { + if _, val_ := valSet.GetByAddress(val.Address); val_ != nil { val_.LastCommitHeight = block.Height - 1 - updated := s.Validators.Update(val_) + updated := valSet.Update(val_) if !updated { PanicCrisis("Failed to update validator LastCommitHeight") } } else { + // XXX This is not an error if validator was removed. + // But, we don't mutate validators yet so go ahead and panic. PanicCrisis("Could not find validator") } } - // Remember LastValidators - s.LastValidators = s.Validators.Copy() - - // Execute each tx - for _, tx := range block.Data.Txs { - err := ExecTx(s, tx, s.evc) - if err != nil { - return InvalidTxError{tx, err} - } - } - - // Increment validator AccumPowers - s.Validators.IncrementAccum(1) - s.LastBlockHeight = block.Height - s.LastBlockHash = block.Hash() - s.LastBlockParts = blockPartsHeader - s.LastBlockTime = block.Time - return nil -} - -// If the tx is invalid, an error will be returned. -// Unlike ExecBlock(), state will not be altered. -func ExecTx(s *State, tx types.Tx, evc events.Fireable) (err error) { - - // TODO: do something with fees - //fees := int64(0) - //_s := blockCache.State() // hack to access validators and block height - - // XXX Query ledger application - return nil - } //----------------------------------------------------------------------------- type InvalidTxError struct { - Tx types.Tx - Reason error + Tx types.Tx + tmsp.RetCode } func (txErr InvalidTxError) Error() string { - return Fmt("Invalid tx: [%v] reason: [%v]", txErr.Tx, txErr.Reason) + return Fmt("Invalid tx: [%v] code: [%v]", txErr.Tx, txErr.RetCode) +} + +type InvalidAppHashError struct { + Expected []byte + Got []byte +} + +func (hashErr InvalidAppHashError) Error() string { + return Fmt("Invalid hash: [%X] got: [%X]", hashErr.Expected, hashErr.Got) } diff --git a/state/state.go b/state/state.go index c9153da80..32a6bf179 100644 --- a/state/state.go +++ b/state/state.go @@ -3,11 +3,11 @@ package state import ( "bytes" "io/ioutil" + "sync" "time" . "github.com/tendermint/go-common" dbm "github.com/tendermint/go-db" - "github.com/tendermint/go-merkle" "github.com/tendermint/go-wire" "github.com/tendermint/tendermint/events" "github.com/tendermint/tendermint/types" @@ -21,7 +21,9 @@ var ( // NOTE: not goroutine-safe. type State struct { - DB dbm.DB + mtx sync.Mutex + db dbm.DB + GenesisDoc *types.GenesisDoc ChainID string LastBlockHeight int LastBlockHash []byte @@ -29,24 +31,19 @@ type State struct { LastBlockTime time.Time Validators *types.ValidatorSet LastValidators *types.ValidatorSet + LastAppHash []byte evc events.Fireable // typically an events.EventCache } func LoadState(db dbm.DB) *State { - s := &State{DB: db} + s := &State{db: db} buf := db.Get(stateKey) if len(buf) == 0 { return nil } else { r, n, err := bytes.NewReader(buf), new(int), new(error) - s.ChainID = wire.ReadString(r, 0, n, err) - s.LastBlockHeight = wire.ReadVarint(r, n, err) - s.LastBlockHash = wire.ReadByteSlice(r, 0, n, err) - s.LastBlockParts = wire.ReadBinary(types.PartSetHeader{}, r, 0, n, err).(types.PartSetHeader) - s.LastBlockTime = wire.ReadTime(r, n, err) - s.Validators = wire.ReadBinary(&types.ValidatorSet{}, r, 0, n, err).(*types.ValidatorSet) - s.LastValidators = wire.ReadBinary(&types.ValidatorSet{}, r, 0, n, err).(*types.ValidatorSet) + wire.ReadBinaryPtr(&s, r, 0, n, err) if *err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED Exit(Fmt("Data has been corrupted or its spec has changed: %v\n", *err)) @@ -56,77 +53,52 @@ func LoadState(db dbm.DB) *State { return s } -func (s *State) Save() { - buf, n, err := new(bytes.Buffer), new(int), new(error) - wire.WriteString(s.ChainID, buf, n, err) - wire.WriteVarint(s.LastBlockHeight, buf, n, err) - wire.WriteByteSlice(s.LastBlockHash, buf, n, err) - wire.WriteBinary(s.LastBlockParts, buf, n, err) - wire.WriteTime(s.LastBlockTime, buf, n, err) - wire.WriteBinary(s.Validators, buf, n, err) - wire.WriteBinary(s.LastValidators, buf, n, err) - if *err != nil { - PanicCrisis(*err) - } - s.DB.Set(stateKey, buf.Bytes()) -} - -// CONTRACT: -// Copy() is a cheap way to take a snapshot, -// as if State were copied by value. func (s *State) Copy() *State { return &State{ - DB: s.DB, + db: s.db, + GenesisDoc: s.GenesisDoc, ChainID: s.ChainID, LastBlockHeight: s.LastBlockHeight, LastBlockHash: s.LastBlockHash, LastBlockParts: s.LastBlockParts, LastBlockTime: s.LastBlockTime, - Validators: s.Validators.Copy(), // TODO remove need for Copy() here. - LastValidators: s.LastValidators.Copy(), // That is, make updates to the validator set + Validators: s.Validators.Copy(), + LastValidators: s.LastValidators.Copy(), + LastAppHash: s.LastAppHash, evc: nil, } } -// Returns a hash that represents the state data, excluding Last* -func (s *State) Hash() []byte { - return merkle.SimpleHashFromMap(map[string]interface{}{ - "Validators": s.Validators, - }) -} +func (s *State) Save() { + s.mtx.Lock() + defer s.mtx.Unlock() -// Mutates the block in place and updates it with new state hash. -func (s *State) ComputeBlockStateHash(block *types.Block) error { - sCopy := s.Copy() - // sCopy has no event cache in it, so this won't fire events - err := execBlock(sCopy, block, types.PartSetHeader{}) - if err != nil { - return err + buf, n, err := new(bytes.Buffer), new(int), new(error) + wire.WriteBinary(s, buf, n, err) + if *err != nil { + PanicCrisis(*err) } - // Set block.StateHash - block.StateHash = sCopy.Hash() - return nil -} - -func (s *State) SetDB(db dbm.DB) { - s.DB = db + s.db.Set(stateKey, buf.Bytes()) } // Implements events.Eventable. Typically uses events.EventCache func (s *State) SetFireable(evc events.Fireable) { + s.mtx.Lock() + defer s.mtx.Unlock() + s.evc = evc } //----------------------------------------------------------------------------- // Genesis -func MakeGenesisStateFromFile(db dbm.DB, genDocFile string) (*types.GenesisDoc, *State) { - jsonBlob, err := ioutil.ReadFile(genDocFile) +func MakeGenesisStateFromFile(db dbm.DB, genDocFile string) *State { + genDocJSON, err := ioutil.ReadFile(genDocFile) if err != nil { Exit(Fmt("Couldn't read GenesisDoc file: %v", err)) } - genDoc := types.GenesisDocFromJSON(jsonBlob) - return genDoc, MakeGenesisState(db, genDoc) + genDoc := types.GenesisDocFromJSON(genDocJSON) + return MakeGenesisState(db, genDoc) } func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) *State { @@ -138,8 +110,6 @@ func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) *State { genDoc.GenesisTime = time.Now() } - // XXX Speak to application, ensure genesis state. - // Make validators slice validators := make([]*types.Validator, len(genDoc.Validators)) for i, val := range genDoc.Validators { @@ -155,7 +125,8 @@ func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) *State { } return &State{ - DB: db, + db: db, + GenesisDoc: genDoc, ChainID: genDoc.ChainID, LastBlockHeight: 0, LastBlockHash: nil, @@ -163,13 +134,6 @@ func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) *State { LastBlockTime: genDoc.GenesisTime, Validators: types.NewValidatorSet(validators), LastValidators: types.NewValidatorSet(nil), + LastAppHash: genDoc.AppHash, } } - -func RandGenesisState(numValidators int, randPower bool, minPower int64) (*State, []*types.PrivValidator) { - db := dbm.NewMemDB() - genDoc, privValidators := types.RandGenesisDoc(numValidators, randPower, minPower) - s0 := MakeGenesisState(db, genDoc) - s0.Save() - return s0, privValidators -} diff --git a/types/block.go b/types/block.go index 20d139ff5..752d6a562 100644 --- a/types/block.go +++ b/types/block.go @@ -57,7 +57,7 @@ func (b *Block) ValidateBasic(chainID string, lastBlockHeight int, lastBlockHash if !bytes.Equal(b.DataHash, b.Data.Hash()) { return errors.New(Fmt("Wrong Block.Header.DataHash. Expected %X, got %X", b.DataHash, b.Data.Hash())) } - // NOTE: the StateHash is validated later. + // NOTE: the AppHash and ValidatorsHash are validated later. return nil } @@ -67,8 +67,7 @@ func (b *Block) FillHeader() { } // Computes and returns the block hash. -// If the block is incomplete (e.g. missing Header.StateHash) -// then the hash is nil, to prevent the usage of that hash. +// If the block is incomplete, block hash is nil for safety. func (b *Block) Hash() []byte { if b.Header == nil || b.Data == nil || b.LastValidation == nil { return nil @@ -133,12 +132,13 @@ type Header struct { LastBlockParts PartSetHeader `json:"last_block_parts"` LastValidationHash []byte `json:"last_validation_hash"` DataHash []byte `json:"data_hash"` - StateHash []byte `json:"state_hash"` + ValidatorsHash []byte `json:"validators_hash"` + AppHash []byte `json:"app_hash"` } // NOTE: hash is nil if required fields are missing. func (h *Header) Hash() []byte { - if len(h.StateHash) == 0 { + if len(h.ValidatorsHash) == 0 { return nil } return merkle.SimpleHashFromMap(map[string]interface{}{ @@ -151,7 +151,8 @@ func (h *Header) Hash() []byte { "LastBlockParts": h.LastBlockParts, "LastValidation": h.LastValidationHash, "Data": h.DataHash, - "State": h.StateHash, + "Validators": h.ValidatorsHash, + "App": h.AppHash, }) } @@ -165,9 +166,12 @@ func (h *Header) StringIndented(indent string) string { %s Time: %v %s Fees: %v %s NumTxs: %v -%s LastBlockHash: %X +%s LastBlock: %X %s LastBlockParts: %v -%s StateHash: %X +%s LastValidation: %X +%s Data: %X +%s Validators: %X +%s App: %X %s}#%X`, indent, h.ChainID, indent, h.Height, @@ -176,7 +180,10 @@ func (h *Header) StringIndented(indent string) string { indent, h.NumTxs, indent, h.LastBlockHash, indent, h.LastBlockParts, - indent, h.StateHash, + indent, h.LastValidationHash, + indent, h.DataHash, + indent, h.ValidatorsHash, + indent, h.AppHash, indent, h.Hash()) } diff --git a/types/events.go b/types/events.go index be442f8ba..3b1598414 100644 --- a/types/events.go +++ b/types/events.go @@ -1,37 +1,31 @@ package types import ( - "fmt" "time" - . "github.com/tendermint/go-common" "github.com/tendermint/go-wire" ) // Functions to generate eventId strings -func EventStringAccInput(addr []byte) string { return fmt.Sprintf("Acc/%X/Input", addr) } -func EventStringAccOutput(addr []byte) string { return fmt.Sprintf("Acc/%X/Output", addr) } -func EventStringAccCall(addr []byte) string { return fmt.Sprintf("Acc/%X/Call", addr) } -func EventStringLogEvent(addr []byte) string { return fmt.Sprintf("Log/%X", addr) } -func EventStringPermissions(name string) string { return fmt.Sprintf("Permissions/%s", name) } -func EventStringNameReg(name string) string { return fmt.Sprintf("NameReg/%s", name) } -func EventStringBond() string { return "Bond" } -func EventStringUnbond() string { return "Unbond" } -func EventStringRebond() string { return "Rebond" } -func EventStringDupeout() string { return "Dupeout" } -func EventStringNewBlock() string { return "NewBlock" } -func EventStringFork() string { return "Fork" } - -func EventStringNewRound() string { return fmt.Sprintf("NewRound") } -func EventStringTimeoutPropose() string { return fmt.Sprintf("TimeoutPropose") } -func EventStringCompleteProposal() string { return fmt.Sprintf("CompleteProposal") } -func EventStringPolka() string { return fmt.Sprintf("Polka") } -func EventStringUnlock() string { return fmt.Sprintf("Unlock") } -func EventStringLock() string { return fmt.Sprintf("Lock") } -func EventStringRelock() string { return fmt.Sprintf("Relock") } -func EventStringTimeoutWait() string { return fmt.Sprintf("TimeoutWait") } -func EventStringVote() string { return fmt.Sprintf("Vote") } +// Reserved +func EventStringBond() string { return "Bond" } +func EventStringUnbond() string { return "Unbond" } +func EventStringRebond() string { return "Rebond" } +func EventStringDupeout() string { return "Dupeout" } +func EventStringFork() string { return "Fork" } + +func EventStringNewBlock() string { return "NewBlock" } +func EventStringNewRound() string { return "NewRound" } +func EventStringTimeoutPropose() string { return "TimeoutPropose" } +func EventStringCompleteProposal() string { return "CompleteProposal" } +func EventStringPolka() string { return "Polka" } +func EventStringUnlock() string { return "Unlock" } +func EventStringLock() string { return "Lock" } +func EventStringRelock() string { return "Relock" } +func EventStringTimeoutWait() string { return "TimeoutWait" } +func EventStringVote() string { return "Vote" } +func EventStringApp() string { return "App" } //---------------------------------------- @@ -39,8 +33,7 @@ const ( EventDataTypeNewBlock = byte(0x01) EventDataTypeFork = byte(0x02) EventDataTypeTx = byte(0x03) - EventDataTypeCall = byte(0x04) - EventDataTypeLog = byte(0x05) + EventDataTypeApp = byte(0x04) // Custom app event EventDataTypeRoundState = byte(0x11) EventDataTypeVote = byte(0x12) @@ -55,8 +48,7 @@ var _ = wire.RegisterInterface( wire.ConcreteType{EventDataNewBlock{}, EventDataTypeNewBlock}, // wire.ConcreteType{EventDataFork{}, EventDataTypeFork }, wire.ConcreteType{EventDataTx{}, EventDataTypeTx}, - wire.ConcreteType{EventDataCall{}, EventDataTypeCall}, - wire.ConcreteType{EventDataLog{}, EventDataTypeLog}, + wire.ConcreteType{EventDataApp{}, EventDataTypeApp}, wire.ConcreteType{EventDataRoundState{}, EventDataTypeRoundState}, wire.ConcreteType{EventDataVote{}, EventDataTypeVote}, ) @@ -68,36 +60,16 @@ type EventDataNewBlock struct { Block *Block `json:"block"` } -// All txs fire EventDataTx, but only CallTx might have Return or Exception +// All txs fire EventDataTx type EventDataTx struct { Tx Tx `json:"tx"` Return []byte `json:"return"` Exception string `json:"exception"` } -// EventDataCall fires when we call a contract, and when a contract calls another contract -type EventDataCall struct { - CallData *CallData `json:"call_data"` - Origin []byte `json:"origin"` - TxID []byte `json:"tx_id"` - Return []byte `json:"return"` - Exception string `json:"exception"` -} - -type CallData struct { - Caller []byte `json:"caller"` - Callee []byte `json:"callee"` - Data []byte `json:"data"` - Value int64 `json:"value"` - Gas int64 `json:"gas"` -} - -// EventDataLog fires when a contract executes the LOG opcode -type EventDataLog struct { - Address Word256 `json:"address"` - Topics []Word256 `json:"topics"` - Data []byte `json:"data"` - Height int64 `json:"height"` +type EventDataApp struct { + Key string `json:"key"` + Data []byte `json:"bytes"` } // We fire the most recent round state that led to the event @@ -125,7 +97,6 @@ type EventDataVote struct { func (_ EventDataNewBlock) AssertIsEventData() {} func (_ EventDataTx) AssertIsEventData() {} -func (_ EventDataCall) AssertIsEventData() {} -func (_ EventDataLog) AssertIsEventData() {} +func (_ EventDataApp) AssertIsEventData() {} func (_ EventDataRoundState) AssertIsEventData() {} func (_ EventDataVote) AssertIsEventData() {} diff --git a/types/genesis.go b/types/genesis.go index 96bdf5052..6f0eaa9f3 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -1,7 +1,6 @@ package types import ( - "sort" "time" . "github.com/tendermint/go-common" @@ -27,6 +26,7 @@ type GenesisDoc struct { GenesisTime time.Time `json:"genesis_time"` ChainID string `json:"chain_id"` Validators []GenesisValidator `json:"validators"` + AppHash []byte `json:"app_hash"` } //------------------------------------------------------------ @@ -40,26 +40,3 @@ func GenesisDocFromJSON(jsonBlob []byte) (genState *GenesisDoc) { } return } - -//------------------------------------------------------------ -// Make random genesis state - -func RandGenesisDoc(numValidators int, randPower bool, minPower int64) (*GenesisDoc, []*PrivValidator) { - validators := make([]GenesisValidator, numValidators) - privValidators := make([]*PrivValidator, numValidators) - for i := 0; i < numValidators; i++ { - val, privVal := RandValidator(randPower, minPower) - validators[i] = GenesisValidator{ - PubKey: val.PubKey, - Amount: val.VotingPower, - } - privValidators[i] = privVal - } - sort.Sort(PrivValidatorsByAddress(privValidators)) - return &GenesisDoc{ - GenesisTime: time.Now(), - ChainID: "tendermint_test", - Validators: validators, - }, privValidators - -} diff --git a/types/proposal.go b/types/proposal.go index bb212a47d..d69b60649 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -23,6 +23,7 @@ type Proposal struct { Signature crypto.SignatureEd25519 `json:"signature"` } +// polRound: -1 if no polRound. func NewProposal(height int, round int, blockPartsHeader PartSetHeader, polRound int) *Proposal { return &Proposal{ Height: height,