Browse Source

Merge pull request #119 from tendermint/sdk2

Sdk2
pull/1842/head
Ethan Buchman 7 years ago
committed by GitHub
parent
commit
1afc034006
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 2093 additions and 897 deletions
  1. +1
    -1
      .gitignore
  2. +89
    -29
      Makefile
  3. +1
    -1
      circle.yml
  4. +53
    -0
      common/bytes.go
  5. +65
    -0
      common/bytes_test.go
  6. +4
    -0
      common/errors.go
  7. +39
    -17
      common/heap.go
  8. +0
    -153
      common/http.go
  9. +0
    -250
      common/http_test.go
  10. +67
    -0
      common/kvpair.go
  11. +101
    -0
      common/types.pb.go
  12. +24
    -0
      common/types.proto
  13. +151
    -0
      db/backend_test.go
  14. +151
    -24
      db/c_level_db.go
  15. +14
    -4
      db/c_level_db_test.go
  16. +155
    -0
      db/common_test.go
  17. +6
    -36
      db/db.go
  18. +254
    -0
      db/fsdb.go
  19. +147
    -44
      db/go_level_db.go
  20. +4
    -4
      db/go_level_db_test.go
  21. +50
    -0
      db/mem_batch.go
  22. +138
    -75
      db/mem_db.go
  23. +0
    -48
      db/mem_db_test.go
  24. +133
    -0
      db/types.go
  25. +60
    -0
      db/util.go
  26. +93
    -0
      db/util_test.go
  27. +26
    -26
      glide.lock
  28. +0
    -1
      glide.yaml
  29. +86
    -0
      merkle/simple_map.go
  30. +47
    -0
      merkle/simple_map_test.go
  31. +131
    -0
      merkle/simple_proof.go
  32. +3
    -184
      merkle/simple_tree.go

+ 1
- 1
.gitignore View File

@ -1,4 +1,4 @@
*.swp
*.sw[opqr]
vendor
.glide


+ 89
- 29
Makefile View File

@ -1,57 +1,117 @@
.PHONY: all test get_vendor_deps ensure_tools
GOTOOLS = \
github.com/Masterminds/glide \
github.com/alecthomas/gometalinter
github.com/gogo/protobuf/protoc-gen-gogo \
github.com/gogo/protobuf/gogoproto
# github.com/alecthomas/gometalinter.v2 \
PACKAGES=$(shell go list ./... | grep -v '/vendor/')
REPO:=github.com/tendermint/tmlibs
GOTOOLS_CHECK = glide gometalinter.v2 protoc protoc-gen-gogo
INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
all: test
all: check get_vendor_deps protoc build test install metalinter
test:
@echo "--> Running linter"
@make metalinter_test
@echo "--> Running go test"
@go test $(PACKAGES)
check: check_tools
########################################
### Build
protoc:
## If you get the following error,
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
## See https://stackoverflow.com/a/25518702
protoc $(INCLUDE) --gogo_out=plugins=grpc:. common/*.proto
@echo "--> adding nolint declarations to protobuf generated files"
@awk '/package common/ { print "//nolint: gas"; print; next }1' common/types.pb.go > common/types.pb.go.new
@mv common/types.pb.go.new common/types.pb.go
build:
# Nothing to build!
install:
# Nothing to install!
########################################
### Tools & dependencies
check_tools:
@# https://stackoverflow.com/a/25668869
@echo "Found tools: $(foreach tool,$(GOTOOLS_CHECK),\
$(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))"
get_tools:
@echo "--> Installing tools"
go get -u -v $(GOTOOLS)
# @gometalinter.v2 --install
get_vendor_deps: ensure_tools
get_protoc:
@# https://github.com/google/protobuf/releases
curl -L https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz | tar xvz && \
cd protobuf-3.4.1 && \
DIST_LANG=cpp ./configure && \
make && \
make install && \
cd .. && \
rm -rf protobuf-3.4.1
update_tools:
@echo "--> Updating tools"
@go get -u $(GOTOOLS)
get_vendor_deps:
@rm -rf vendor/
@echo "--> Running glide install"
@glide install
ensure_tools:
go get $(GOTOOLS)
@gometalinter --install
########################################
### Testing
test:
go test -tags gcc `glide novendor`
metalinter:
gometalinter --vendor --deadline=600s --enable-all --disable=lll ./...
########################################
### Formatting, linting, and vetting
metalinter_test:
gometalinter --vendor --deadline=600s --disable-all \
fmt:
@go fmt ./...
metalinter:
@echo "==> Running linter"
gometalinter.v2 --vendor --deadline=600s --disable-all \
--enable=deadcode \
--enable=goconst \
--enable=goimports \
--enable=gosimple \
--enable=ineffassign \
--enable=interfacer \
--enable=ineffassign \
--enable=megacheck \
--enable=misspell \
--enable=staticcheck \
--enable=misspell \
--enable=staticcheck \
--enable=safesql \
--enable=structcheck \
--enable=unconvert \
--enable=structcheck \
--enable=unconvert \
--enable=unused \
--enable=varcheck \
--enable=varcheck \
--enable=vetshadow \
--enable=vet \
./...
#--enable=maligned \
#--enable=gas \
#--enable=aligncheck \
#--enable=dupl \
#--enable=errcheck \
#--enable=gocyclo \
#--enable=goimports \
#--enable=golint \ <== comments on anything exported
#--enable=gotype \
#--enable=unparam \
#--enable=interfacer \
#--enable=unparam \
#--enable=vet \
metalinter_all:
protoc $(INCLUDE) --lint_out=. types/*.proto
gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./...
# To avoid unintended conflicts with file names, always add to .PHONY
# unless there is a reason not to.
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
.PHONY: check protoc build check_tools get_tools get_protoc update_tools get_vendor_deps test fmt metalinter metalinter_all

+ 1
- 1
circle.yml View File

@ -15,7 +15,7 @@ dependencies:
test:
override:
- cd $PROJECT_PATH && make get_vendor_deps && bash ./test.sh
- cd $PROJECT_PATH && make get_tools && make get_vendor_deps && bash ./test.sh
post:
- cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt
- cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}"

+ 53
- 0
common/bytes.go View File

@ -0,0 +1,53 @@
package common
import (
"encoding/hex"
"fmt"
"strings"
)
// The main purpose of HexBytes is to enable HEX-encoding for json/encoding.
type HexBytes []byte
// Marshal needed for protobuf compatibility
func (bz HexBytes) Marshal() ([]byte, error) {
return bz, nil
}
// Unmarshal needed for protobuf compatibility
func (bz *HexBytes) Unmarshal(data []byte) error {
*bz = data
return nil
}
// This is the point of Bytes.
func (bz HexBytes) MarshalJSON() ([]byte, error) {
s := strings.ToUpper(hex.EncodeToString(bz))
jbz := make([]byte, len(s)+2)
jbz[0] = '"'
copy(jbz[1:], []byte(s))
jbz[len(jbz)-1] = '"'
return jbz, nil
}
// This is the point of Bytes.
func (bz *HexBytes) UnmarshalJSON(data []byte) error {
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
return fmt.Errorf("Invalid hex string: %s", data)
}
bz2, err := hex.DecodeString(string(data[1 : len(data)-1]))
if err != nil {
return err
}
*bz = bz2
return nil
}
// Allow it to fulfill various interfaces in light-client, etc...
func (bz HexBytes) Bytes() []byte {
return bz
}
func (bz HexBytes) String() string {
return strings.ToUpper(hex.EncodeToString(bz))
}

+ 65
- 0
common/bytes_test.go View File

@ -0,0 +1,65 @@
package common
import (
"encoding/json"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
// This is a trivial test for protobuf compatibility.
func TestMarshal(t *testing.T) {
bz := []byte("hello world")
dataB := HexBytes(bz)
bz2, err := dataB.Marshal()
assert.Nil(t, err)
assert.Equal(t, bz, bz2)
var dataB2 HexBytes
err = (&dataB2).Unmarshal(bz)
assert.Nil(t, err)
assert.Equal(t, dataB, dataB2)
}
// Test that the hex encoding works.
func TestJSONMarshal(t *testing.T) {
type TestStruct struct {
B1 []byte
B2 HexBytes
}
cases := []struct {
input []byte
expected string
}{
{[]byte(``), `{"B1":"","B2":""}`},
{[]byte(`a`), `{"B1":"YQ==","B2":"61"}`},
{[]byte(`abc`), `{"B1":"YWJj","B2":"616263"}`},
}
for i, tc := range cases {
t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) {
ts := TestStruct{B1: tc.input, B2: tc.input}
// Test that it marshals correctly to JSON.
jsonBytes, err := json.Marshal(ts)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, string(jsonBytes), tc.expected)
// TODO do fuzz testing to ensure that unmarshal fails
// Test that unmarshaling works correctly.
ts2 := TestStruct{}
err = json.Unmarshal(jsonBytes, &ts2)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, ts2.B1, tc.input)
assert.Equal(t, ts2.B2, HexBytes(tc.input))
})
}
}

+ 4
- 0
common/errors.go View File

@ -22,6 +22,7 @@ func (se StackError) Error() string {
// A panic resulting from a sanity check means there is a programmer error
// and some guarantee is not satisfied.
// XXX DEPRECATED
func PanicSanity(v interface{}) {
panic(Fmt("Panicked on a Sanity Check: %v", v))
}
@ -29,17 +30,20 @@ func PanicSanity(v interface{}) {
// A panic here means something has gone horribly wrong, in the form of data corruption or
// failure of the operating system. In a correct/healthy system, these should never fire.
// If they do, it's indicative of a much more serious problem.
// XXX DEPRECATED
func PanicCrisis(v interface{}) {
panic(Fmt("Panicked on a Crisis: %v", v))
}
// Indicates a failure of consensus. Someone was malicious or something has
// gone horribly wrong. These should really boot us into an "emergency-recover" mode
// XXX DEPRECATED
func PanicConsensus(v interface{}) {
panic(Fmt("Panicked on a Consensus Failure: %v", v))
}
// For those times when we're not sure if we should panic
// XXX DEPRECATED
func PanicQ(v interface{}) {
panic(Fmt("Panicked questionably: %v", v))
}

+ 39
- 17
common/heap.go View File

@ -1,28 +1,25 @@
package common
import (
"bytes"
"container/heap"
)
type Comparable interface {
Less(o interface{}) bool
}
//-----------------------------------------------------------------------------
/*
Example usage:
Example usage:
```
h := NewHeap()
h.Push(String("msg1"), 1)
h.Push(String("msg3"), 3)
h.Push(String("msg2"), 2)
h.Push("msg1", 1)
h.Push("msg3", 3)
h.Push("msg2", 2)
fmt.Println(h.Pop())
fmt.Println(h.Pop())
fmt.Println(h.Pop())
fmt.Println(h.Pop()) // msg1
fmt.Println(h.Pop()) // msg2
fmt.Println(h.Pop()) // msg3
```
*/
type Heap struct {
pq priorityQueue
}
@ -35,7 +32,15 @@ func (h *Heap) Len() int64 {
return int64(len(h.pq))
}
func (h *Heap) Push(value interface{}, priority Comparable) {
func (h *Heap) Push(value interface{}, priority int) {
heap.Push(&h.pq, &pqItem{value: value, priority: cmpInt(priority)})
}
func (h *Heap) PushBytes(value interface{}, priority []byte) {
heap.Push(&h.pq, &pqItem{value: value, priority: cmpBytes(priority)})
}
func (h *Heap) PushComparable(value interface{}, priority Comparable) {
heap.Push(&h.pq, &pqItem{value: value, priority: priority})
}
@ -56,8 +61,6 @@ func (h *Heap) Pop() interface{} {
}
//-----------------------------------------------------------------------------
///////////////////////
// From: http://golang.org/pkg/container/heap/#example__priorityQueue
type pqItem struct {
@ -101,3 +104,22 @@ func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority Compar
item.priority = priority
heap.Fix(pq, item.index)
}
//--------------------------------------------------------------------------------
// Comparable
type Comparable interface {
Less(o interface{}) bool
}
type cmpInt int
func (i cmpInt) Less(o interface{}) bool {
return int(i) < int(o.(cmpInt))
}
type cmpBytes []byte
func (bz cmpBytes) Less(o interface{}) bool {
return bytes.Compare([]byte(bz), []byte(o.(cmpBytes))) < 0
}

+ 0
- 153
common/http.go View File

@ -1,153 +0,0 @@
package common
import (
"encoding/json"
"io"
"net/http"
"gopkg.in/go-playground/validator.v9"
"github.com/pkg/errors"
)
type ErrorResponse struct {
Success bool `json:"success,omitempty"`
// Err is the error message if Success is false
Err string `json:"error,omitempty"`
// Code is set if Success is false
Code int `json:"code,omitempty"`
}
// ErrorWithCode makes an ErrorResponse with the
// provided err's Error() content, and status code.
// It panics if err is nil.
func ErrorWithCode(err error, code int) *ErrorResponse {
return &ErrorResponse{
Err: err.Error(),
Code: code,
}
}
// Ensure that ErrorResponse implements error
var _ error = (*ErrorResponse)(nil)
func (er *ErrorResponse) Error() string {
return er.Err
}
// Ensure that ErrorResponse implements httpCoder
var _ httpCoder = (*ErrorResponse)(nil)
func (er *ErrorResponse) HTTPCode() int {
return er.Code
}
var errNilBody = errors.Errorf("expecting a non-nil body")
// FparseJSON unmarshals into save, the body of the provided reader.
// Since it uses json.Unmarshal, save must be of a pointer type
// or compatible with json.Unmarshal.
func FparseJSON(r io.Reader, save interface{}) error {
if r == nil {
return errors.Wrap(errNilBody, "Reader")
}
dec := json.NewDecoder(r)
if err := dec.Decode(save); err != nil {
return errors.Wrap(err, "Decode/Unmarshal")
}
return nil
}
// ParseRequestJSON unmarshals into save, the body of the
// request. It closes the body of the request after parsing.
// Since it uses json.Unmarshal, save must be of a pointer type
// or compatible with json.Unmarshal.
func ParseRequestJSON(r *http.Request, save interface{}) error {
if r == nil || r.Body == nil {
return errNilBody
}
defer r.Body.Close()
return FparseJSON(r.Body, save)
}
// ParseRequestAndValidateJSON unmarshals into save, the body of the
// request and invokes a validator on the saved content. To ensure
// validation, make sure to set tags "validate" on your struct as
// per https://godoc.org/gopkg.in/go-playground/validator.v9.
// It closes the body of the request after parsing.
// Since it uses json.Unmarshal, save must be of a pointer type
// or compatible with json.Unmarshal.
func ParseRequestAndValidateJSON(r *http.Request, save interface{}) error {
if r == nil || r.Body == nil {
return errNilBody
}
defer r.Body.Close()
return FparseAndValidateJSON(r.Body, save)
}
// FparseAndValidateJSON like FparseJSON unmarshals into save,
// the body of the provided reader. However, it invokes the validator
// to check the set validators on your struct fields as per
// per https://godoc.org/gopkg.in/go-playground/validator.v9.
// Since it uses json.Unmarshal, save must be of a pointer type
// or compatible with json.Unmarshal.
func FparseAndValidateJSON(r io.Reader, save interface{}) error {
if err := FparseJSON(r, save); err != nil {
return err
}
return validate(save)
}
var theValidator = validator.New()
func validate(obj interface{}) error {
return errors.Wrap(theValidator.Struct(obj), "Validate")
}
// WriteSuccess JSON marshals the content provided, to an HTTP
// response, setting the provided status code and setting header
// "Content-Type" to "application/json".
func WriteSuccess(w http.ResponseWriter, data interface{}) {
WriteCode(w, data, 200)
}
// WriteCode JSON marshals content, to an HTTP response,
// setting the provided status code, and setting header
// "Content-Type" to "application/json". If JSON marshalling fails
// with an error, WriteCode instead writes out the error invoking
// WriteError.
func WriteCode(w http.ResponseWriter, out interface{}, code int) {
blob, err := json.MarshalIndent(out, "", " ")
if err != nil {
WriteError(w, err)
} else {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(blob)
}
}
type httpCoder interface {
HTTPCode() int
}
// WriteError is a convenience function to write out an
// error to an http.ResponseWriter, to send out an error
// that's structured as JSON i.e the form
// {"error": sss, "code": ddd}
// If err implements the interface HTTPCode() int,
// it will use that status code otherwise, it will
// set code to be http.StatusBadRequest
func WriteError(w http.ResponseWriter, err error) {
code := http.StatusBadRequest
if httpC, ok := err.(httpCoder); ok {
code = httpC.HTTPCode()
}
WriteCode(w, ErrorWithCode(err, code), code)
}

+ 0
- 250
common/http_test.go View File

@ -1,250 +0,0 @@
package common_test
import (
"bytes"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tmlibs/common"
)
func TestWriteSuccess(t *testing.T) {
w := httptest.NewRecorder()
common.WriteSuccess(w, "foo")
assert.Equal(t, w.Code, 200, "should get a 200")
}
var blankErrResponse = new(common.ErrorResponse)
func TestWriteError(t *testing.T) {
tests := [...]struct {
msg string
code int
}{
0: {
msg: "this is a message",
code: 419,
},
}
for i, tt := range tests {
w := httptest.NewRecorder()
msg := tt.msg
// First check without a defined code, should send back a 400
common.WriteError(w, errors.New(msg))
assert.Equal(t, w.Code, http.StatusBadRequest, "#%d: should get a 400", i)
blob, err := ioutil.ReadAll(w.Body)
if err != nil {
assert.Fail(t, "expecting a successful ioutil.ReadAll", "#%d", i)
continue
}
recv := new(common.ErrorResponse)
if err := json.Unmarshal(blob, recv); err != nil {
assert.Fail(t, "expecting a successful json.Unmarshal", "#%d", i)
continue
}
assert.Equal(t, reflect.DeepEqual(recv, blankErrResponse), false, "expecting a non-blank error response")
// Now test with an error that's .HTTPCode() int conforming
// Reset w
w = httptest.NewRecorder()
common.WriteError(w, common.ErrorWithCode(errors.New("foo"), tt.code))
assert.Equal(t, w.Code, tt.code, "case #%d", i)
}
}
type marshalFailer struct{}
var errFooFailed = errors.New("foo failed here")
func (mf *marshalFailer) MarshalJSON() ([]byte, error) {
return nil, errFooFailed
}
func TestWriteCode(t *testing.T) {
codes := [...]int{
0: http.StatusOK,
1: http.StatusBadRequest,
2: http.StatusUnauthorized,
3: http.StatusInternalServerError,
}
for i, code := range codes {
w := httptest.NewRecorder()
common.WriteCode(w, "foo", code)
assert.Equal(t, w.Code, code, "#%d", i)
// Then for the failed JSON marshaling
w = httptest.NewRecorder()
common.WriteCode(w, &marshalFailer{}, code)
wantCode := http.StatusBadRequest
assert.Equal(t, w.Code, wantCode, "#%d", i)
assert.True(t, strings.Contains(w.Body.String(), errFooFailed.Error()),
"#%d: expected %q in the error message", i, errFooFailed)
}
}
type saver struct {
Foo int `json:"foo" validate:"min=10"`
Bar string `json:"bar"`
}
type rcloser struct {
closeOnce sync.Once
body *bytes.Buffer
closeChan chan bool
}
var errAlreadyClosed = errors.New("already closed")
func (rc *rcloser) Close() error {
var err = errAlreadyClosed
rc.closeOnce.Do(func() {
err = nil
rc.closeChan <- true
close(rc.closeChan)
})
return err
}
func (rc *rcloser) Read(b []byte) (int, error) {
return rc.body.Read(b)
}
var _ io.ReadCloser = (*rcloser)(nil)
func makeReq(strBody string) (*http.Request, <-chan bool) {
closeChan := make(chan bool, 1)
buf := new(bytes.Buffer)
buf.Write([]byte(strBody))
req := &http.Request{
Header: make(http.Header),
Body: &rcloser{body: buf, closeChan: closeChan},
}
return req, closeChan
}
func TestParseRequestJSON(t *testing.T) {
tests := [...]struct {
body string
wantErr bool
useNil bool
}{
0: {wantErr: true, body: ``},
1: {body: `{}`},
2: {body: `{"foo": 2}`}, // Not that the validate tags don't matter here since we are just parsing
3: {body: `{"foo": "abcd"}`, wantErr: true},
4: {useNil: true, wantErr: true},
}
for i, tt := range tests {
req, closeChan := makeReq(tt.body)
if tt.useNil {
req.Body = nil
}
sav := new(saver)
err := common.ParseRequestJSON(req, sav)
if tt.wantErr {
assert.NotEqual(t, err, nil, "#%d: want non-nil error", i)
continue
}
assert.Equal(t, err, nil, "#%d: want nil error", i)
wasClosed := <-closeChan
assert.Equal(t, wasClosed, true, "#%d: should have invoked close", i)
}
}
func TestFparseJSON(t *testing.T) {
r1 := strings.NewReader(`{"foo": 1}`)
sav := new(saver)
require.Equal(t, common.FparseJSON(r1, sav), nil, "expecting successful parsing")
r2 := strings.NewReader(`{"bar": "blockchain"}`)
require.Equal(t, common.FparseJSON(r2, sav), nil, "expecting successful parsing")
require.Equal(t, reflect.DeepEqual(sav, &saver{Foo: 1, Bar: "blockchain"}), true, "should have parsed both")
// Now with a nil body
require.NotEqual(t, nil, common.FparseJSON(nil, sav), "expecting a nil error report")
}
func TestFparseAndValidateJSON(t *testing.T) {
r1 := strings.NewReader(`{"foo": 1}`)
sav := new(saver)
require.NotEqual(t, common.FparseAndValidateJSON(r1, sav), nil, "expecting validation to fail")
r1 = strings.NewReader(`{"foo": 100}`)
require.Equal(t, common.FparseJSON(r1, sav), nil, "expecting successful parsing")
r2 := strings.NewReader(`{"bar": "blockchain"}`)
require.Equal(t, common.FparseAndValidateJSON(r2, sav), nil, "expecting successful parsing")
require.Equal(t, reflect.DeepEqual(sav, &saver{Foo: 100, Bar: "blockchain"}), true, "should have parsed both")
// Now with a nil body
require.NotEqual(t, nil, common.FparseJSON(nil, sav), "expecting a nil error report")
}
var blankSaver = new(saver)
func TestParseAndValidateRequestJSON(t *testing.T) {
tests := [...]struct {
body string
wantErr bool
useNil bool
}{
0: {wantErr: true, body: ``},
1: {body: `{}`, wantErr: true}, // Here it should fail since Foo doesn't meet the minimum value
2: {body: `{"foo": 2}`, wantErr: true}, // Here validation should fail
3: {body: `{"foo": "abcd"}`, wantErr: true},
4: {useNil: true, wantErr: true},
5: {body: `{"foo": 100}`}, // Must succeed
}
for i, tt := range tests {
req, closeChan := makeReq(tt.body)
if tt.useNil {
req.Body = nil
}
sav := new(saver)
err := common.ParseRequestAndValidateJSON(req, sav)
if tt.wantErr {
assert.NotEqual(t, err, nil, "#%d: want non-nil error", i)
continue
}
assert.Equal(t, err, nil, "#%d: want nil error", i)
assert.False(t, reflect.DeepEqual(blankSaver, sav), "#%d: expecting a set saver", i)
wasClosed := <-closeChan
assert.Equal(t, wasClosed, true, "#%d: should have invoked close", i)
}
}
func TestErrorWithCode(t *testing.T) {
tests := [...]struct {
code int
err error
}{
0: {code: 500, err: errors.New("funky")},
1: {code: 406, err: errors.New("purist")},
}
for i, tt := range tests {
errRes := common.ErrorWithCode(tt.err, tt.code)
assert.Equal(t, errRes.Error(), tt.err.Error(), "#%d: expecting the error values to be equal", i)
assert.Equal(t, errRes.Code, tt.code, "expecting the same status code", i)
assert.Equal(t, errRes.HTTPCode(), tt.code, "expecting the same status code", i)
}
}

+ 67
- 0
common/kvpair.go View File

@ -0,0 +1,67 @@
package common
import (
"bytes"
"sort"
)
//----------------------------------------
// KVPair
/*
Defined in types.proto
type KVPair struct {
Key []byte
Value []byte
}
*/
type KVPairs []KVPair
// Sorting
func (kvs KVPairs) Len() int { return len(kvs) }
func (kvs KVPairs) Less(i, j int) bool {
switch bytes.Compare(kvs[i].Key, kvs[j].Key) {
case -1:
return true
case 0:
return bytes.Compare(kvs[i].Value, kvs[j].Value) < 0
case 1:
return false
default:
panic("invalid comparison result")
}
}
func (kvs KVPairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] }
func (kvs KVPairs) Sort() { sort.Sort(kvs) }
//----------------------------------------
// KI64Pair
/*
Defined in types.proto
type KI64Pair struct {
Key []byte
Value int64
}
*/
type KI64Pairs []KI64Pair
// Sorting
func (kvs KI64Pairs) Len() int { return len(kvs) }
func (kvs KI64Pairs) Less(i, j int) bool {
switch bytes.Compare(kvs[i].Key, kvs[j].Key) {
case -1:
return true
case 0:
return kvs[i].Value < kvs[j].Value
case 1:
return false
default:
panic("invalid comparison result")
}
}
func (kvs KI64Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] }
func (kvs KI64Pairs) Sort() { sort.Sort(kvs) }

+ 101
- 0
common/types.pb.go View File

@ -0,0 +1,101 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: common/types.proto
/*
Package common is a generated protocol buffer package.
It is generated from these files:
common/types.proto
It has these top-level messages:
KVPair
KI64Pair
*/
//nolint: gas
package common
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// Define these here for compatibility but use tmlibs/common.KVPair.
type KVPair struct {
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *KVPair) Reset() { *m = KVPair{} }
func (m *KVPair) String() string { return proto.CompactTextString(m) }
func (*KVPair) ProtoMessage() {}
func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} }
func (m *KVPair) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *KVPair) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
// Define these here for compatibility but use tmlibs/common.KI64Pair.
type KI64Pair struct {
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *KI64Pair) Reset() { *m = KI64Pair{} }
func (m *KI64Pair) String() string { return proto.CompactTextString(m) }
func (*KI64Pair) ProtoMessage() {}
func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} }
func (m *KI64Pair) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *KI64Pair) GetValue() int64 {
if m != nil {
return m.Value
}
return 0
}
func init() {
proto.RegisterType((*KVPair)(nil), "common.KVPair")
proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair")
}
func init() { proto.RegisterFile("common/types.proto", fileDescriptorTypes) }
var fileDescriptorTypes = []byte{
// 137 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0xce, 0xcf, 0xcd,
0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62,
0x83, 0x88, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7,
0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0xa5, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x68,
0x53, 0x32, 0xe0, 0x62, 0xf3, 0x0e, 0x0b, 0x48, 0xcc, 0x2c, 0x12, 0x12, 0xe0, 0x62, 0xce, 0x4e,
0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12,
0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x92, 0x11, 0x17, 0x87, 0xb7, 0xa7, 0x99,
0x09, 0x31, 0x7a, 0x98, 0xa1, 0x7a, 0x92, 0xd8, 0xc0, 0x96, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff,
0xff, 0x5c, 0xb8, 0x46, 0xc5, 0xb9, 0x00, 0x00, 0x00,
}

+ 24
- 0
common/types.proto View File

@ -0,0 +1,24 @@
syntax = "proto3";
package common;
// For more information on gogo.proto, see:
// https://github.com/gogo/protobuf/blob/master/extensions.md
// NOTE: Try really hard not to use custom types,
// it's often complicated, broken, nor not worth it.
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
//----------------------------------------
// Abstract types
// Define these here for compatibility but use tmlibs/common.KVPair.
message KVPair {
bytes key = 1;
bytes value = 2;
}
// Define these here for compatibility but use tmlibs/common.KI64Pair.
message KI64Pair {
bytes key = 1;
int64 value = 2;
}

+ 151
- 0
db/backend_test.go View File

@ -0,0 +1,151 @@
package db
import (
"fmt"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tmlibs/common"
)
func cleanupDBDir(dir, name string) {
os.RemoveAll(filepath.Join(dir, name) + ".db")
}
func testBackendGetSetDelete(t *testing.T, backend string) {
// Default
dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend))
defer dir.Close()
db := NewDB("testdb", backend, dirname)
// A nonexistent key should return nil, even if the key is empty
require.Nil(t, db.Get([]byte("")))
// A nonexistent key should return nil, even if the key is nil
require.Nil(t, db.Get(nil))
// A nonexistent key should return nil.
key := []byte("abc")
require.Nil(t, db.Get(key))
// Set empty value.
db.Set(key, []byte(""))
require.NotNil(t, db.Get(key))
require.Empty(t, db.Get(key))
// Set nil value.
db.Set(key, nil)
require.NotNil(t, db.Get(key))
require.Empty(t, db.Get(key))
// Delete.
db.Delete(key)
require.Nil(t, db.Get(key))
}
func TestBackendsGetSetDelete(t *testing.T) {
for dbType, _ := range backends {
testBackendGetSetDelete(t, dbType)
}
}
func withDB(t *testing.T, creator dbCreator, fn func(DB)) {
name := cmn.Fmt("test_%x", cmn.RandStr(12))
db, err := creator(name, "")
defer cleanupDBDir("", name)
assert.Nil(t, err)
fn(db)
db.Close()
}
func TestBackendsNilKeys(t *testing.T) {
// Test all backends.
for dbType, creator := range backends {
withDB(t, creator, func(db DB) {
t.Run(fmt.Sprintf("Testing %s", dbType), func(t *testing.T) {
// Nil keys are treated as the empty key for most operations.
expect := func(key, value []byte) {
if len(key) == 0 { // nil or empty
assert.Equal(t, db.Get(nil), db.Get([]byte("")))
assert.Equal(t, db.Has(nil), db.Has([]byte("")))
}
assert.Equal(t, db.Get(key), value)
assert.Equal(t, db.Has(key), value != nil)
}
// Not set
expect(nil, nil)
// Set nil value
db.Set(nil, nil)
expect(nil, []byte(""))
// Set empty value
db.Set(nil, []byte(""))
expect(nil, []byte(""))
// Set nil, Delete nil
db.Set(nil, []byte("abc"))
expect(nil, []byte("abc"))
db.Delete(nil)
expect(nil, nil)
// Set nil, Delete empty
db.Set(nil, []byte("abc"))
expect(nil, []byte("abc"))
db.Delete([]byte(""))
expect(nil, nil)
// Set empty, Delete nil
db.Set([]byte(""), []byte("abc"))
expect(nil, []byte("abc"))
db.Delete(nil)
expect(nil, nil)
// Set empty, Delete empty
db.Set([]byte(""), []byte("abc"))
expect(nil, []byte("abc"))
db.Delete([]byte(""))
expect(nil, nil)
// SetSync nil, DeleteSync nil
db.SetSync(nil, []byte("abc"))
expect(nil, []byte("abc"))
db.DeleteSync(nil)
expect(nil, nil)
// SetSync nil, DeleteSync empty
db.SetSync(nil, []byte("abc"))
expect(nil, []byte("abc"))
db.DeleteSync([]byte(""))
expect(nil, nil)
// SetSync empty, DeleteSync nil
db.SetSync([]byte(""), []byte("abc"))
expect(nil, []byte("abc"))
db.DeleteSync(nil)
expect(nil, nil)
// SetSync empty, DeleteSync empty
db.SetSync([]byte(""), []byte("abc"))
expect(nil, []byte("abc"))
db.DeleteSync([]byte(""))
expect(nil, nil)
})
})
}
}
func TestGoLevelDBBackendStr(t *testing.T) {
name := cmn.Fmt("test_%x", cmn.RandStr(12))
db := NewDB(name, GoLevelDBBackendStr, "")
defer cleanupDBDir("", name)
_, ok := db.(*GoLevelDB)
assert.True(t, ok)
}

+ 151
- 24
db/c_level_db.go View File

@ -3,12 +3,11 @@
package db
import (
"bytes"
"fmt"
"path"
"path/filepath"
"github.com/jmhodges/levigo"
. "github.com/tendermint/tmlibs/common"
)
func init() {
@ -19,6 +18,8 @@ func init() {
registerDBCreator(CLevelDBBackendStr, dbCreator, false)
}
var _ DB = (*CLevelDB)(nil)
type CLevelDB struct {
db *levigo.DB
ro *levigo.ReadOptions
@ -27,7 +28,7 @@ type CLevelDB struct {
}
func NewCLevelDB(name string, dir string) (*CLevelDB, error) {
dbPath := path.Join(dir, name+".db")
dbPath := filepath.Join(dir, name+".db")
opts := levigo.NewOptions()
opts.SetCache(levigo.NewLRUCache(1 << 30))
@ -49,39 +50,56 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) {
return database, nil
}
// Implements DB.
func (db *CLevelDB) Get(key []byte) []byte {
key = nonNilBytes(key)
res, err := db.db.Get(db.ro, key)
if err != nil {
PanicCrisis(err)
panic(err)
}
return res
}
// Implements DB.
func (db *CLevelDB) Has(key []byte) bool {
return db.Get(key) != nil
}
// Implements DB.
func (db *CLevelDB) Set(key []byte, value []byte) {
key = nonNilBytes(key)
value = nonNilBytes(value)
err := db.db.Put(db.wo, key, value)
if err != nil {
PanicCrisis(err)
panic(err)
}
}
// Implements DB.
func (db *CLevelDB) SetSync(key []byte, value []byte) {
key = nonNilBytes(key)
value = nonNilBytes(value)
err := db.db.Put(db.woSync, key, value)
if err != nil {
PanicCrisis(err)
panic(err)
}
}
// Implements DB.
func (db *CLevelDB) Delete(key []byte) {
key = nonNilBytes(key)
err := db.db.Delete(db.wo, key)
if err != nil {
PanicCrisis(err)
panic(err)
}
}
// Implements DB.
func (db *CLevelDB) DeleteSync(key []byte) {
key = nonNilBytes(key)
err := db.db.Delete(db.woSync, key)
if err != nil {
PanicCrisis(err)
panic(err)
}
}
@ -89,6 +107,7 @@ func (db *CLevelDB) DB() *levigo.DB {
return db.db
}
// Implements DB.
func (db *CLevelDB) Close() {
db.db.Close()
db.ro.Close()
@ -96,57 +115,165 @@ func (db *CLevelDB) Close() {
db.woSync.Close()
}
// Implements DB.
func (db *CLevelDB) Print() {
iter := db.db.NewIterator(db.ro)
defer iter.Close()
for iter.Seek(nil); iter.Valid(); iter.Next() {
key := iter.Key()
value := iter.Value()
itr := db.Iterator(nil, nil)
defer itr.Close()
for ; itr.Valid(); itr.Next() {
key := itr.Key()
value := itr.Value()
fmt.Printf("[%X]:\t[%X]\n", key, value)
}
}
// Implements DB.
func (db *CLevelDB) Stats() map[string]string {
// TODO: Find the available properties for the C LevelDB implementation
keys := []string{}
stats := make(map[string]string)
for _, key := range keys {
str, err := db.db.GetProperty(key)
if err == nil {
stats[key] = str
}
str := db.db.PropertyValue(key)
stats[key] = str
}
return stats
}
func (db *CLevelDB) Iterator() Iterator {
return db.db.NewIterator(nil, nil)
}
//----------------------------------------
// Batch
// Implements DB.
func (db *CLevelDB) NewBatch() Batch {
batch := levigo.NewWriteBatch()
return &cLevelDBBatch{db, batch}
}
//--------------------------------------------------------------------------------
type cLevelDBBatch struct {
db *CLevelDB
batch *levigo.WriteBatch
}
// Implements Batch.
func (mBatch *cLevelDBBatch) Set(key, value []byte) {
mBatch.batch.Put(key, value)
}
// Implements Batch.
func (mBatch *cLevelDBBatch) Delete(key []byte) {
mBatch.batch.Delete(key)
}
// Implements Batch.
func (mBatch *cLevelDBBatch) Write() {
err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch)
if err != nil {
PanicCrisis(err)
panic(err)
}
}
//----------------------------------------
// Iterator
// NOTE This is almost identical to db/go_level_db.Iterator
// Before creating a third version, refactor.
func (db *CLevelDB) Iterator(start, end []byte) Iterator {
itr := db.db.NewIterator(db.ro)
return newCLevelDBIterator(itr, start, end, false)
}
func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator {
panic("not implemented yet") // XXX
}
var _ Iterator = (*cLevelDBIterator)(nil)
type cLevelDBIterator struct {
source *levigo.Iterator
start, end []byte
isReverse bool
isInvalid bool
}
func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator {
if isReverse {
panic("not implemented yet") // XXX
}
if start != nil {
source.Seek(start)
} else {
source.SeekToFirst()
}
return &cLevelDBIterator{
source: source,
start: start,
end: end,
isReverse: isReverse,
isInvalid: false,
}
}
func (itr cLevelDBIterator) Domain() ([]byte, []byte) {
return itr.start, itr.end
}
func (itr cLevelDBIterator) Valid() bool {
// Once invalid, forever invalid.
if itr.isInvalid {
return false
}
// Panic on DB error. No way to recover.
itr.assertNoError()
// If source is invalid, invalid.
if !itr.source.Valid() {
itr.isInvalid = true
return false
}
// If key is end or past it, invalid.
var end = itr.end
var key = itr.source.Key()
if end != nil && bytes.Compare(end, key) <= 0 {
itr.isInvalid = true
return false
}
// It's valid.
return true
}
func (itr cLevelDBIterator) Key() []byte {
itr.assertNoError()
itr.assertIsValid()
return itr.source.Key()
}
func (itr cLevelDBIterator) Value() []byte {
itr.assertNoError()
itr.assertIsValid()
return itr.source.Value()
}
func (itr cLevelDBIterator) Next() {
itr.assertNoError()
itr.assertIsValid()
itr.source.Next()
}
func (itr cLevelDBIterator) Close() {
itr.source.Close()
}
func (itr cLevelDBIterator) assertNoError() {
if err := itr.source.GetError(); err != nil {
panic(err)
}
}
func (itr cLevelDBIterator) assertIsValid() {
if !itr.Valid() {
panic("cLevelDBIterator is invalid")
}
}

+ 14
- 4
db/c_level_db_test.go View File

@ -7,7 +7,8 @@ import (
"fmt"
"testing"
. "github.com/tendermint/tmlibs/common"
"github.com/stretchr/testify/assert"
cmn "github.com/tendermint/tmlibs/common"
)
func BenchmarkRandomReadsWrites2(b *testing.B) {
@ -18,7 +19,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) {
for i := 0; i < int(numItems); i++ {
internal[int64(i)] = int64(0)
}
db, err := NewCLevelDB(Fmt("test_%x", RandStr(12)), "")
db, err := NewCLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "")
if err != nil {
b.Fatal(err.Error())
return
@ -30,7 +31,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) {
for i := 0; i < b.N; i++ {
// Write something
{
idx := (int64(RandInt()) % numItems)
idx := (int64(cmn.RandInt()) % numItems)
internal[idx] += 1
val := internal[idx]
idxBytes := int642Bytes(int64(idx))
@ -43,7 +44,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) {
}
// Read something
{
idx := (int64(RandInt()) % numItems)
idx := (int64(cmn.RandInt()) % numItems)
val := internal[idx]
idxBytes := int642Bytes(int64(idx))
valBytes := db.Get(idxBytes)
@ -84,3 +85,12 @@ func bytes2Int64(buf []byte) int64 {
return int64(binary.BigEndian.Uint64(buf))
}
*/
func TestCLevelDBBackendStr(t *testing.T) {
name := cmn.Fmt("test_%x", cmn.RandStr(12))
db := NewDB(name, LevelDBBackendStr, "")
defer cleanupDBDir("", name)
_, ok := db.(*CLevelDB)
assert.True(t, ok)
}

+ 155
- 0
db/common_test.go View File

@ -0,0 +1,155 @@
package db
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tmlibs/common"
)
func checkValid(t *testing.T, itr Iterator, expected bool) {
valid := itr.Valid()
require.Equal(t, expected, valid)
}
func checkNext(t *testing.T, itr Iterator, expected bool) {
itr.Next()
valid := itr.Valid()
require.Equal(t, expected, valid)
}
func checkNextPanics(t *testing.T, itr Iterator) {
assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't")
}
func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) {
k, v := itr.Key(), itr.Value()
assert.Exactly(t, key, k)
assert.Exactly(t, value, v)
}
func checkInvalid(t *testing.T, itr Iterator) {
checkValid(t, itr, false)
checkKeyPanics(t, itr)
checkValuePanics(t, itr)
checkNextPanics(t, itr)
}
func checkKeyPanics(t *testing.T, itr Iterator) {
assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't")
}
func checkValuePanics(t *testing.T, itr Iterator) {
assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't")
}
func newTempDB(t *testing.T, backend string) (db DB) {
dir, dirname := cmn.Tempdir("test_go_iterator")
db = NewDB("testdb", backend, dirname)
dir.Close()
return db
}
func TestDBIteratorSingleKey(t *testing.T) {
for backend, _ := range backends {
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
db := newTempDB(t, backend)
db.SetSync(bz("1"), bz("value_1"))
itr := db.Iterator(nil, nil)
checkValid(t, itr, true)
checkNext(t, itr, false)
checkValid(t, itr, false)
checkNextPanics(t, itr)
// Once invalid...
checkInvalid(t, itr)
})
}
}
func TestDBIteratorTwoKeys(t *testing.T) {
for backend, _ := range backends {
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
db := newTempDB(t, backend)
db.SetSync(bz("1"), bz("value_1"))
db.SetSync(bz("2"), bz("value_1"))
{ // Fail by calling Next too much
itr := db.Iterator(nil, nil)
checkValid(t, itr, true)
checkNext(t, itr, true)
checkValid(t, itr, true)
checkNext(t, itr, false)
checkValid(t, itr, false)
checkNextPanics(t, itr)
// Once invalid...
checkInvalid(t, itr)
}
})
}
}
func TestDBIteratorMany(t *testing.T) {
for backend, _ := range backends {
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
db := newTempDB(t, backend)
keys := make([][]byte, 100)
for i := 0; i < 100; i++ {
keys[i] = []byte{byte(i)}
}
value := []byte{5}
for _, k := range keys {
db.Set(k, value)
}
itr := db.Iterator(nil, nil)
defer itr.Close()
for ; itr.Valid(); itr.Next() {
assert.Equal(t, db.Get(itr.Key()), itr.Value())
}
})
}
}
func TestDBIteratorEmpty(t *testing.T) {
for backend, _ := range backends {
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
db := newTempDB(t, backend)
itr := db.Iterator(nil, nil)
checkInvalid(t, itr)
})
}
}
func TestDBIteratorEmptyBeginAfter(t *testing.T) {
for backend, _ := range backends {
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
db := newTempDB(t, backend)
itr := db.Iterator(bz("1"), nil)
checkInvalid(t, itr)
})
}
}
func TestDBIteratorNonemptyBeginAfter(t *testing.T) {
for backend, _ := range backends {
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
db := newTempDB(t, backend)
db.SetSync(bz("1"), bz("value_1"))
itr := db.Iterator(bz("2"), nil)
checkInvalid(t, itr)
})
}
}

+ 6
- 36
db/db.go View File

@ -1,46 +1,16 @@
package db
import . "github.com/tendermint/tmlibs/common"
import "fmt"
type DB interface {
Get([]byte) []byte
Set([]byte, []byte)
SetSync([]byte, []byte)
Delete([]byte)
DeleteSync([]byte)
Close()
NewBatch() Batch
Iterator() Iterator
IteratorPrefix([]byte) Iterator
// For debugging
Print()
Stats() map[string]string
}
type Batch interface {
Set(key, value []byte)
Delete(key []byte)
Write()
}
type Iterator interface {
Next() bool
Key() []byte
Value() []byte
Release()
Error() error
}
//-----------------------------------------------------------------------------
//----------------------------------------
// Main entry
const (
LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb.
LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb unless +gcc
CLevelDBBackendStr = "cleveldb"
GoLevelDBBackendStr = "goleveldb"
MemDBBackendStr = "memdb"
FSDBBackendStr = "fsdb" // using the filesystem naively
)
type dbCreator func(name string, dir string) (DB, error)
@ -58,7 +28,7 @@ func registerDBCreator(backend string, creator dbCreator, force bool) {
func NewDB(name string, backend string, dir string) DB {
db, err := backends[backend](name, dir)
if err != nil {
PanicSanity(Fmt("Error initializing DB: %v", err))
panic(fmt.Sprintf("Error initializing DB: %v", err))
}
return db
}

+ 254
- 0
db/fsdb.go View File

@ -0,0 +1,254 @@
package db
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"sort"
"sync"
"github.com/pkg/errors"
cmn "github.com/tendermint/tmlibs/common"
)
const (
keyPerm = os.FileMode(0600)
dirPerm = os.FileMode(0700)
)
func init() {
registerDBCreator(FSDBBackendStr, func(name string, dir string) (DB, error) {
dbPath := filepath.Join(dir, name+".db")
return NewFSDB(dbPath), nil
}, false)
}
var _ DB = (*FSDB)(nil)
// It's slow.
type FSDB struct {
mtx sync.Mutex
dir string
}
func NewFSDB(dir string) *FSDB {
err := os.MkdirAll(dir, dirPerm)
if err != nil {
panic(errors.Wrap(err, "Creating FSDB dir "+dir))
}
database := &FSDB{
dir: dir,
}
return database
}
func (db *FSDB) Get(key []byte) []byte {
db.mtx.Lock()
defer db.mtx.Unlock()
key = escapeKey(key)
path := db.nameToPath(key)
value, err := read(path)
if os.IsNotExist(err) {
return nil
} else if err != nil {
panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key))
}
return value
}
func (db *FSDB) Has(key []byte) bool {
db.mtx.Lock()
defer db.mtx.Unlock()
key = escapeKey(key)
path := db.nameToPath(key)
return cmn.FileExists(path)
}
func (db *FSDB) Set(key []byte, value []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.SetNoLock(key, value)
}
func (db *FSDB) SetSync(key []byte, value []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.SetNoLock(key, value)
}
// NOTE: Implements atomicSetDeleter.
func (db *FSDB) SetNoLock(key []byte, value []byte) {
key = escapeKey(key)
value = nonNilBytes(value)
path := db.nameToPath(key)
err := write(path, value)
if err != nil {
panic(errors.Wrapf(err, "Setting key %s (0x%X)", string(key), key))
}
}
func (db *FSDB) Delete(key []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.DeleteNoLock(key)
}
func (db *FSDB) DeleteSync(key []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.DeleteNoLock(key)
}
// NOTE: Implements atomicSetDeleter.
func (db *FSDB) DeleteNoLock(key []byte) {
key = escapeKey(key)
path := db.nameToPath(key)
err := remove(path)
if os.IsNotExist(err) {
return
} else if err != nil {
panic(errors.Wrapf(err, "Removing key %s (0x%X)", string(key), key))
}
}
func (db *FSDB) Close() {
// Nothing to do.
}
func (db *FSDB) Print() {
db.mtx.Lock()
defer db.mtx.Unlock()
panic("FSDB.Print not yet implemented")
}
func (db *FSDB) Stats() map[string]string {
db.mtx.Lock()
defer db.mtx.Unlock()
panic("FSDB.Stats not yet implemented")
}
func (db *FSDB) NewBatch() Batch {
db.mtx.Lock()
defer db.mtx.Unlock()
// Not sure we would ever want to try...
// It doesn't seem easy for general filesystems.
panic("FSDB.NewBatch not yet implemented")
}
func (db *FSDB) Mutex() *sync.Mutex {
return &(db.mtx)
}
func (db *FSDB) Iterator(start, end []byte) Iterator {
db.mtx.Lock()
defer db.mtx.Unlock()
// We need a copy of all of the keys.
// Not the best, but probably not a bottleneck depending.
keys, err := list(db.dir, start, end)
if err != nil {
panic(errors.Wrapf(err, "Listing keys in %s", db.dir))
}
sort.Strings(keys)
return newMemDBIterator(db, keys, start, end)
}
func (db *FSDB) ReverseIterator(start, end []byte) Iterator {
panic("not implemented yet") // XXX
}
func (db *FSDB) nameToPath(name []byte) string {
n := url.PathEscape(string(name))
return filepath.Join(db.dir, n)
}
// Read some bytes to a file.
// CONTRACT: returns os errors directly without wrapping.
func read(path string) ([]byte, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
d, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
return d, nil
}
// Write some bytes from a file.
// CONTRACT: returns os errors directly without wrapping.
func write(path string, d []byte) error {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, keyPerm)
if err != nil {
return err
}
defer f.Close()
_, err = f.Write(d)
if err != nil {
return err
}
err = f.Sync()
return err
}
// Remove a file.
// CONTRACT: returns os errors directly without wrapping.
func remove(path string) error {
return os.Remove(path)
}
// List keys in a directory, stripping of escape sequences and dir portions.
// CONTRACT: returns os errors directly without wrapping.
func list(dirPath string, start, end []byte) ([]string, error) {
dir, err := os.Open(dirPath)
if err != nil {
return nil, err
}
defer dir.Close()
names, err := dir.Readdirnames(0)
if err != nil {
return nil, err
}
var keys []string
for _, name := range names {
n, err := url.PathUnescape(name)
if err != nil {
return nil, fmt.Errorf("Failed to unescape %s while listing", name)
}
key := unescapeKey([]byte(n))
if IsKeyInDomain(key, start, end, false) {
keys = append(keys, string(key))
}
}
return keys, nil
}
// To support empty or nil keys, while the file system doesn't allow empty
// filenames.
func escapeKey(key []byte) []byte {
return []byte("k_" + string(key))
}
func unescapeKey(escKey []byte) []byte {
if len(escKey) < 2 {
panic(fmt.Sprintf("Invalid esc key: %x", escKey))
}
if string(escKey[:2]) != "k_" {
panic(fmt.Sprintf("Invalid esc key: %x", escKey))
}
return escKey[2:]
}

+ 147
- 44
db/go_level_db.go View File

@ -1,14 +1,14 @@
package db
import (
"bytes"
"fmt"
"path"
"path/filepath"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
. "github.com/tendermint/tmlibs/common"
)
@ -21,54 +21,75 @@ func init() {
registerDBCreator(GoLevelDBBackendStr, dbCreator, false)
}
var _ DB = (*GoLevelDB)(nil)
type GoLevelDB struct {
db *leveldb.DB
}
func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) {
dbPath := path.Join(dir, name+".db")
dbPath := filepath.Join(dir, name+".db")
db, err := leveldb.OpenFile(dbPath, nil)
if err != nil {
return nil, err
}
database := &GoLevelDB{db: db}
database := &GoLevelDB{
db: db,
}
return database, nil
}
// Implements DB.
func (db *GoLevelDB) Get(key []byte) []byte {
key = nonNilBytes(key)
res, err := db.db.Get(key, nil)
if err != nil {
if err == errors.ErrNotFound {
return nil
} else {
PanicCrisis(err)
panic(err)
}
}
return res
}
// Implements DB.
func (db *GoLevelDB) Has(key []byte) bool {
return db.Get(key) != nil
}
// Implements DB.
func (db *GoLevelDB) Set(key []byte, value []byte) {
key = nonNilBytes(key)
value = nonNilBytes(value)
err := db.db.Put(key, value, nil)
if err != nil {
PanicCrisis(err)
}
}
// Implements DB.
func (db *GoLevelDB) SetSync(key []byte, value []byte) {
key = nonNilBytes(key)
value = nonNilBytes(value)
err := db.db.Put(key, value, &opt.WriteOptions{Sync: true})
if err != nil {
PanicCrisis(err)
}
}
// Implements DB.
func (db *GoLevelDB) Delete(key []byte) {
key = nonNilBytes(key)
err := db.db.Delete(key, nil)
if err != nil {
PanicCrisis(err)
}
}
// Implements DB.
func (db *GoLevelDB) DeleteSync(key []byte) {
key = nonNilBytes(key)
err := db.db.Delete(key, &opt.WriteOptions{Sync: true})
if err != nil {
PanicCrisis(err)
@ -79,10 +100,12 @@ func (db *GoLevelDB) DB() *leveldb.DB {
return db.db
}
// Implements DB.
func (db *GoLevelDB) Close() {
db.db.Close()
}
// Implements DB.
func (db *GoLevelDB) Print() {
str, _ := db.db.GetProperty("leveldb.stats")
fmt.Printf("%v\n", str)
@ -95,6 +118,7 @@ func (db *GoLevelDB) Print() {
}
}
// Implements DB.
func (db *GoLevelDB) Stats() map[string]string {
keys := []string{
"leveldb.num-files-at-level{n}",
@ -117,71 +141,150 @@ func (db *GoLevelDB) Stats() map[string]string {
return stats
}
type goLevelDBIterator struct {
source iterator.Iterator
//----------------------------------------
// Batch
// Implements DB.
func (db *GoLevelDB) NewBatch() Batch {
batch := new(leveldb.Batch)
return &goLevelDBBatch{db, batch}
}
// Key returns a copy of the current key.
func (it *goLevelDBIterator) Key() []byte {
key := it.source.Key()
k := make([]byte, len(key))
copy(k, key)
type goLevelDBBatch struct {
db *GoLevelDB
batch *leveldb.Batch
}
return k
// Implements Batch.
func (mBatch *goLevelDBBatch) Set(key, value []byte) {
mBatch.batch.Put(key, value)
}
// Value returns a copy of the current value.
func (it *goLevelDBIterator) Value() []byte {
val := it.source.Value()
v := make([]byte, len(val))
copy(v, val)
// Implements Batch.
func (mBatch *goLevelDBBatch) Delete(key []byte) {
mBatch.batch.Delete(key)
}
return v
// Implements Batch.
func (mBatch *goLevelDBBatch) Write() {
err := mBatch.db.db.Write(mBatch.batch, nil)
if err != nil {
panic(err)
}
}
func (it *goLevelDBIterator) Error() error {
return it.source.Error()
//----------------------------------------
// Iterator
// NOTE This is almost identical to db/c_level_db.Iterator
// Before creating a third version, refactor.
// Implements DB.
func (db *GoLevelDB) Iterator(start, end []byte) Iterator {
itr := db.db.NewIterator(nil, nil)
return newGoLevelDBIterator(itr, start, end, false)
}
func (it *goLevelDBIterator) Next() bool {
return it.source.Next()
// Implements DB.
func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator {
panic("not implemented yet") // XXX
}
func (it *goLevelDBIterator) Release() {
it.source.Release()
type goLevelDBIterator struct {
source iterator.Iterator
start []byte
end []byte
isReverse bool
isInvalid bool
}
func (db *GoLevelDB) Iterator() Iterator {
return &goLevelDBIterator{db.db.NewIterator(nil, nil)}
var _ Iterator = (*goLevelDBIterator)(nil)
func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator {
if isReverse {
panic("not implemented yet") // XXX
}
source.Seek(start)
return &goLevelDBIterator{
source: source,
start: start,
end: end,
isReverse: isReverse,
isInvalid: false,
}
}
func (db *GoLevelDB) IteratorPrefix(prefix []byte) Iterator {
return &goLevelDBIterator{db.db.NewIterator(util.BytesPrefix(prefix), nil)}
// Implements Iterator.
func (itr *goLevelDBIterator) Domain() ([]byte, []byte) {
return itr.start, itr.end
}
func (db *GoLevelDB) NewBatch() Batch {
batch := new(leveldb.Batch)
return &goLevelDBBatch{db, batch}
// Implements Iterator.
func (itr *goLevelDBIterator) Valid() bool {
// Once invalid, forever invalid.
if itr.isInvalid {
return false
}
// Panic on DB error. No way to recover.
itr.assertNoError()
// If source is invalid, invalid.
if !itr.source.Valid() {
itr.isInvalid = true
return false
}
// If key is end or past it, invalid.
var end = itr.end
var key = itr.source.Key()
if end != nil && bytes.Compare(end, key) <= 0 {
itr.isInvalid = true
return false
}
// Valid
return true
}
//--------------------------------------------------------------------------------
// Implements Iterator.
func (itr *goLevelDBIterator) Key() []byte {
// Key returns a copy of the current key.
// See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88
itr.assertNoError()
itr.assertIsValid()
return cp(itr.source.Key())
}
type goLevelDBBatch struct {
db *GoLevelDB
batch *leveldb.Batch
// Implements Iterator.
func (itr *goLevelDBIterator) Value() []byte {
// Value returns a copy of the current value.
// See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88
itr.assertNoError()
itr.assertIsValid()
return cp(itr.source.Value())
}
func (mBatch *goLevelDBBatch) Set(key, value []byte) {
mBatch.batch.Put(key, value)
// Implements Iterator.
func (itr *goLevelDBIterator) Next() {
itr.assertNoError()
itr.assertIsValid()
itr.source.Next()
}
func (mBatch *goLevelDBBatch) Delete(key []byte) {
mBatch.batch.Delete(key)
// Implements Iterator.
func (itr *goLevelDBIterator) Close() {
itr.source.Release()
}
func (mBatch *goLevelDBBatch) Write() {
err := mBatch.db.db.Write(mBatch.batch, nil)
if err != nil {
PanicCrisis(err)
func (itr *goLevelDBIterator) assertNoError() {
if err := itr.source.Error(); err != nil {
panic(err)
}
}
func (itr goLevelDBIterator) assertIsValid() {
if !itr.Valid() {
panic("goLevelDBIterator is invalid")
}
}

+ 4
- 4
db/go_level_db_test.go View File

@ -6,7 +6,7 @@ import (
"fmt"
"testing"
. "github.com/tendermint/tmlibs/common"
cmn "github.com/tendermint/tmlibs/common"
)
func BenchmarkRandomReadsWrites(b *testing.B) {
@ -17,7 +17,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) {
for i := 0; i < int(numItems); i++ {
internal[int64(i)] = int64(0)
}
db, err := NewGoLevelDB(Fmt("test_%x", RandStr(12)), "")
db, err := NewGoLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "")
if err != nil {
b.Fatal(err.Error())
return
@ -29,7 +29,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) {
for i := 0; i < b.N; i++ {
// Write something
{
idx := (int64(RandInt()) % numItems)
idx := (int64(cmn.RandInt()) % numItems)
internal[idx] += 1
val := internal[idx]
idxBytes := int642Bytes(int64(idx))
@ -42,7 +42,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) {
}
// Read something
{
idx := (int64(RandInt()) % numItems)
idx := (int64(cmn.RandInt()) % numItems)
val := internal[idx]
idxBytes := int642Bytes(int64(idx))
valBytes := db.Get(idxBytes)


+ 50
- 0
db/mem_batch.go View File

@ -0,0 +1,50 @@
package db
import "sync"
type atomicSetDeleter interface {
Mutex() *sync.Mutex
SetNoLock(key, value []byte)
DeleteNoLock(key []byte)
}
type memBatch struct {
db atomicSetDeleter
ops []operation
}
type opType int
const (
opTypeSet opType = 1
opTypeDelete opType = 2
)
type operation struct {
opType
key []byte
value []byte
}
func (mBatch *memBatch) Set(key, value []byte) {
mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value})
}
func (mBatch *memBatch) Delete(key []byte) {
mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil})
}
func (mBatch *memBatch) Write() {
mtx := mBatch.db.Mutex()
mtx.Lock()
defer mtx.Unlock()
for _, op := range mBatch.ops {
switch op.opType {
case opTypeSet:
mBatch.db.SetNoLock(op.key, op.value)
case opTypeDelete:
mBatch.db.DeleteNoLock(op.key)
}
}
}

+ 138
- 75
db/mem_db.go View File

@ -3,7 +3,6 @@ package db
import (
"fmt"
"sort"
"strings"
"sync"
)
@ -13,46 +12,87 @@ func init() {
}, false)
}
var _ DB = (*MemDB)(nil)
type MemDB struct {
mtx sync.Mutex
db map[string][]byte
}
func NewMemDB() *MemDB {
database := &MemDB{db: make(map[string][]byte)}
database := &MemDB{
db: make(map[string][]byte),
}
return database
}
// Implements DB.
func (db *MemDB) Get(key []byte) []byte {
db.mtx.Lock()
defer db.mtx.Unlock()
key = nonNilBytes(key)
return db.db[string(key)]
}
// Implements DB.
func (db *MemDB) Has(key []byte) bool {
db.mtx.Lock()
defer db.mtx.Unlock()
key = nonNilBytes(key)
_, ok := db.db[string(key)]
return ok
}
// Implements DB.
func (db *MemDB) Set(key []byte, value []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.db[string(key)] = value
db.SetNoLock(key, value)
}
// Implements DB.
func (db *MemDB) SetSync(key []byte, value []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.SetNoLock(key, value)
}
// Implements atomicSetDeleter.
func (db *MemDB) SetNoLock(key []byte, value []byte) {
key = nonNilBytes(key)
value = nonNilBytes(value)
db.db[string(key)] = value
}
// Implements DB.
func (db *MemDB) Delete(key []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
delete(db.db, string(key))
db.DeleteNoLock(key)
}
// Implements DB.
func (db *MemDB) DeleteSync(key []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.DeleteNoLock(key)
}
// Implements atomicSetDeleter.
func (db *MemDB) DeleteNoLock(key []byte) {
key = nonNilBytes(key)
delete(db.db, string(key))
}
// Implements DB.
func (db *MemDB) Close() {
// Close is a noop since for an in-memory
// database, we don't have a destination
@ -61,120 +101,143 @@ func (db *MemDB) Close() {
// See the discussion in https://github.com/tendermint/tmlibs/pull/56
}
// Implements DB.
func (db *MemDB) Print() {
db.mtx.Lock()
defer db.mtx.Unlock()
for key, value := range db.db {
fmt.Printf("[%X]:\t[%X]\n", []byte(key), value)
}
}
// Implements DB.
func (db *MemDB) Stats() map[string]string {
db.mtx.Lock()
defer db.mtx.Unlock()
stats := make(map[string]string)
stats["database.type"] = "memDB"
stats["database.size"] = fmt.Sprintf("%d", len(db.db))
return stats
}
type memDBIterator struct {
last int
keys []string
db *MemDB
}
//----------------------------------------
// Batch
func newMemDBIterator() *memDBIterator {
return &memDBIterator{}
}
func (it *memDBIterator) Next() bool {
if it.last >= len(it.keys)-1 {
return false
}
it.last++
return true
}
// Implements DB.
func (db *MemDB) NewBatch() Batch {
db.mtx.Lock()
defer db.mtx.Unlock()
func (it *memDBIterator) Key() []byte {
return []byte(it.keys[it.last])
return &memBatch{db, nil}
}
func (it *memDBIterator) Value() []byte {
return it.db.Get(it.Key())
func (db *MemDB) Mutex() *sync.Mutex {
return &(db.mtx)
}
func (it *memDBIterator) Release() {
it.db = nil
it.keys = nil
}
//----------------------------------------
// Iterator
func (it *memDBIterator) Error() error {
return nil
}
// Implements DB.
func (db *MemDB) Iterator(start, end []byte) Iterator {
db.mtx.Lock()
defer db.mtx.Unlock()
func (db *MemDB) Iterator() Iterator {
return db.IteratorPrefix([]byte{})
keys := db.getSortedKeys(start, end, false)
return newMemDBIterator(db, keys, start, end)
}
func (db *MemDB) IteratorPrefix(prefix []byte) Iterator {
it := newMemDBIterator()
it.db = db
it.last = -1
// Implements DB.
func (db *MemDB) ReverseIterator(start, end []byte) Iterator {
db.mtx.Lock()
defer db.mtx.Unlock()
// unfortunately we need a copy of all of the keys
for key, _ := range db.db {
if strings.HasPrefix(key, string(prefix)) {
it.keys = append(it.keys, key)
}
}
// and we need to sort them
sort.Strings(it.keys)
return it
keys := db.getSortedKeys(end, start, true)
return newMemDBIterator(db, keys, start, end)
}
func (db *MemDB) NewBatch() Batch {
return &memDBBatch{db, nil}
// We need a copy of all of the keys.
// Not the best, but probably not a bottleneck depending.
type memDBIterator struct {
db DB
cur int
keys []string
start []byte
end []byte
}
var _ Iterator = (*memDBIterator)(nil)
// Keys is expected to be in reverse order for reverse iterators.
func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator {
return &memDBIterator{
db: db,
cur: 0,
keys: keys,
start: start,
end: end,
}
}
//--------------------------------------------------------------------------------
// Implements Iterator.
func (itr *memDBIterator) Domain() ([]byte, []byte) {
return itr.start, itr.end
}
type memDBBatch struct {
db *MemDB
ops []operation
// Implements Iterator.
func (itr *memDBIterator) Valid() bool {
return 0 <= itr.cur && itr.cur < len(itr.keys)
}
type opType int
// Implements Iterator.
func (itr *memDBIterator) Next() {
itr.assertIsValid()
itr.cur++
}
const (
opTypeSet = 1
opTypeDelete = 2
)
// Implements Iterator.
func (itr *memDBIterator) Key() []byte {
itr.assertIsValid()
return []byte(itr.keys[itr.cur])
}
type operation struct {
opType
key []byte
value []byte
// Implements Iterator.
func (itr *memDBIterator) Value() []byte {
itr.assertIsValid()
key := []byte(itr.keys[itr.cur])
return itr.db.Get(key)
}
func (mBatch *memDBBatch) Set(key, value []byte) {
mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value})
// Implements Iterator.
func (itr *memDBIterator) Close() {
itr.keys = nil
itr.db = nil
}
func (mBatch *memDBBatch) Delete(key []byte) {
mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil})
func (itr *memDBIterator) assertIsValid() {
if !itr.Valid() {
panic("memDBIterator is invalid")
}
}
func (mBatch *memDBBatch) Write() {
mBatch.db.mtx.Lock()
defer mBatch.db.mtx.Unlock()
//----------------------------------------
// Misc.
for _, op := range mBatch.ops {
if op.opType == opTypeSet {
mBatch.db.db[string(op.key)] = op.value
} else if op.opType == opTypeDelete {
delete(mBatch.db.db, string(op.key))
func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string {
keys := []string{}
for key, _ := range db.db {
if IsKeyInDomain([]byte(key), start, end, false) {
keys = append(keys, key)
}
}
sort.Strings(keys)
if reverse {
nkeys := len(keys)
for i := 0; i < nkeys/2; i++ {
keys[i] = keys[nkeys-i-1]
}
}
return keys
}

+ 0
- 48
db/mem_db_test.go View File

@ -1,48 +0,0 @@
package db
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMemDbIterator(t *testing.T) {
db := NewMemDB()
keys := make([][]byte, 100)
for i := 0; i < 100; i++ {
keys[i] = []byte{byte(i)}
}
value := []byte{5}
for _, k := range keys {
db.Set(k, value)
}
iter := db.Iterator()
i := 0
for iter.Next() {
assert.Equal(t, db.Get(iter.Key()), iter.Value(), "values dont match for key")
i += 1
}
assert.Equal(t, i, len(db.db), "iterator didnt cover whole db")
}
func TestMemDBClose(t *testing.T) {
db := NewMemDB()
copyDB := func(orig map[string][]byte) map[string][]byte {
copy := make(map[string][]byte)
for k, v := range orig {
copy[k] = v
}
return copy
}
k, v := []byte("foo"), []byte("bar")
db.Set(k, v)
require.Equal(t, db.Get(k), v, "expecting a successful get")
copyBefore := copyDB(db.db)
db.Close()
require.Equal(t, db.Get(k), v, "Close is a noop, expecting a successful get")
copyAfter := copyDB(db.db)
require.Equal(t, copyBefore, copyAfter, "Close is a noop and shouldn't modify any internal data")
}

+ 133
- 0
db/types.go View File

@ -0,0 +1,133 @@
package db
type DB interface {
// Get returns nil iff key doesn't exist.
// A nil key is interpreted as an empty byteslice.
// CONTRACT: key, value readonly []byte
Get([]byte) []byte
// Has checks if a key exists.
// A nil key is interpreted as an empty byteslice.
// CONTRACT: key, value readonly []byte
Has(key []byte) bool
// Set sets the key.
// A nil key is interpreted as an empty byteslice.
// CONTRACT: key, value readonly []byte
Set([]byte, []byte)
SetSync([]byte, []byte)
// Delete deletes the key.
// A nil key is interpreted as an empty byteslice.
// CONTRACT: key readonly []byte
Delete([]byte)
DeleteSync([]byte)
// Iterate over a domain of keys in ascending order. End is exclusive.
// Start must be less than end, or the Iterator is invalid.
// A nil start is interpreted as an empty byteslice.
// If end is nil, iterates up to the last item (inclusive).
// CONTRACT: No writes may happen within a domain while an iterator exists over it.
// CONTRACT: start, end readonly []byte
Iterator(start, end []byte) Iterator
// Iterate over a domain of keys in descending order. End is exclusive.
// Start must be greater than end, or the Iterator is invalid.
// If start is nil, iterates from the last/greatest item (inclusive).
// If end is nil, iterates up to the first/least item (iclusive).
// CONTRACT: No writes may happen within a domain while an iterator exists over it.
// CONTRACT: start, end readonly []byte
ReverseIterator(start, end []byte) Iterator
// Closes the connection.
Close()
// Creates a batch for atomic updates.
NewBatch() Batch
// For debugging
Print()
// Stats returns a map of property values for all keys and the size of the cache.
Stats() map[string]string
}
//----------------------------------------
// Batch
type Batch interface {
SetDeleter
Write()
}
type SetDeleter interface {
Set(key, value []byte) // CONTRACT: key, value readonly []byte
Delete(key []byte) // CONTRACT: key readonly []byte
}
//----------------------------------------
// Iterator
/*
Usage:
var itr Iterator = ...
defer itr.Close()
for ; itr.Valid(); itr.Next() {
k, v := itr.Key(); itr.Value()
// ...
}
*/
type Iterator interface {
// The start & end (exclusive) limits to iterate over.
// If end < start, then the Iterator goes in reverse order.
//
// A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate
// over anything with the prefix []byte{12, 13}.
//
// The smallest key is the empty byte array []byte{} - see BeginningKey().
// The largest key is the nil byte array []byte(nil) - see EndingKey().
// CONTRACT: start, end readonly []byte
Domain() (start []byte, end []byte)
// Valid returns whether the current position is valid.
// Once invalid, an Iterator is forever invalid.
Valid() bool
// Next moves the iterator to the next sequential key in the database, as
// defined by order of iteration.
//
// If Valid returns false, this method will panic.
Next()
// Key returns the key of the cursor.
// If Valid returns false, this method will panic.
// CONTRACT: key readonly []byte
Key() (key []byte)
// Value returns the value of the cursor.
// If Valid returns false, this method will panic.
// CONTRACT: value readonly []byte
Value() (value []byte)
// Close releases the Iterator.
Close()
}
// For testing convenience.
func bz(s string) []byte {
return []byte(s)
}
// We defensively turn nil keys or values into []byte{} for
// most operations.
func nonNilBytes(bz []byte) []byte {
if bz == nil {
return []byte{}
} else {
return bz
}
}

+ 60
- 0
db/util.go View File

@ -0,0 +1,60 @@
package db
import (
"bytes"
)
func IteratePrefix(db DB, prefix []byte) Iterator {
var start, end []byte
if len(prefix) == 0 {
start = nil
end = nil
} else {
start = cp(prefix)
end = cpIncr(prefix)
}
return db.Iterator(start, end)
}
//----------------------------------------
func cp(bz []byte) (ret []byte) {
ret = make([]byte, len(bz))
copy(ret, bz)
return ret
}
// CONTRACT: len(bz) > 0
func cpIncr(bz []byte) (ret []byte) {
ret = cp(bz)
for i := len(bz) - 1; i >= 0; i-- {
if ret[i] < byte(0xFF) {
ret[i] += 1
return
} else {
ret[i] = byte(0x00)
}
}
return nil
}
// See DB interface documentation for more information.
func IsKeyInDomain(key, start, end []byte, isReverse bool) bool {
if !isReverse {
if bytes.Compare(key, start) < 0 {
return false
}
if end != nil && bytes.Compare(end, key) <= 0 {
return false
}
return true
} else {
if start != nil && bytes.Compare(start, key) < 0 {
return false
}
if end != nil && bytes.Compare(key, end) <= 0 {
return false
}
return true
}
}

+ 93
- 0
db/util_test.go View File

@ -0,0 +1,93 @@
package db
import (
"fmt"
"testing"
)
// Empty iterator for empty db.
func TestPrefixIteratorNoMatchNil(t *testing.T) {
for backend, _ := range backends {
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
db := newTempDB(t, backend)
itr := IteratePrefix(db, []byte("2"))
checkInvalid(t, itr)
})
}
}
// Empty iterator for db populated after iterator created.
func TestPrefixIteratorNoMatch1(t *testing.T) {
for backend, _ := range backends {
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
db := newTempDB(t, backend)
itr := IteratePrefix(db, []byte("2"))
db.SetSync(bz("1"), bz("value_1"))
checkInvalid(t, itr)
})
}
}
// Empty iterator for prefix starting after db entry.
func TestPrefixIteratorNoMatch2(t *testing.T) {
for backend, _ := range backends {
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
db := newTempDB(t, backend)
db.SetSync(bz("3"), bz("value_3"))
itr := IteratePrefix(db, []byte("4"))
checkInvalid(t, itr)
})
}
}
// Iterator with single val for db with single val, starting from that val.
func TestPrefixIteratorMatch1(t *testing.T) {
for backend, _ := range backends {
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
db := newTempDB(t, backend)
db.SetSync(bz("2"), bz("value_2"))
itr := IteratePrefix(db, bz("2"))
checkValid(t, itr, true)
checkItem(t, itr, bz("2"), bz("value_2"))
checkNext(t, itr, false)
// Once invalid...
checkInvalid(t, itr)
})
}
}
// Iterator with prefix iterates over everything with same prefix.
func TestPrefixIteratorMatches1N(t *testing.T) {
for backend, _ := range backends {
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
db := newTempDB(t, backend)
// prefixed
db.SetSync(bz("a/1"), bz("value_1"))
db.SetSync(bz("a/3"), bz("value_3"))
// not
db.SetSync(bz("b/3"), bz("value_3"))
db.SetSync(bz("a-3"), bz("value_3"))
db.SetSync(bz("a.3"), bz("value_3"))
db.SetSync(bz("abcdefg"), bz("value_3"))
itr := IteratePrefix(db, bz("a/"))
checkValid(t, itr, true)
checkItem(t, itr, bz("a/1"), bz("value_1"))
checkNext(t, itr, true)
checkItem(t, itr, bz("a/3"), bz("value_3"))
// Bad!
checkNext(t, itr, false)
//Once invalid...
checkInvalid(t, itr)
})
}
}

+ 26
- 26
glide.lock View File

@ -1,24 +1,24 @@
hash: 1f3d3426e823e4a8e6d4473615fcc86c767bbea6da9114ea1e7e0a9f0ccfa129
updated: 2017-12-05T23:47:13.202024407Z
hash: 325b2f9c7e84696f88fa88126a22eb1e1e91c2be5f60402d17bfaad6713b33c2
updated: 2017-12-28T18:27:21.247160207-08:00
imports:
- name: github.com/fsnotify/fsnotify
version: 4da3e2cfbabc9f751898f250b49f2439785783a1
- name: github.com/go-kit/kit
version: 53f10af5d5c7375d4655a3d6852457ed17ab5cc7
version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c
subpackages:
- log
- log/level
- log/term
- name: github.com/go-logfmt/logfmt
version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
- name: github.com/go-playground/locales
version: e4cbcb5d0652150d40ad0646651076b6bd2be4f6
subpackages:
- currency
- name: github.com/go-playground/universal-translator
version: 71201497bace774495daed26a3874fd339e0b538
- name: github.com/go-stack/stack
version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc
version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf
- name: github.com/gogo/protobuf
version: 342cbe0a04158f6dcb03ca0079991a51a4248c02
subpackages:
- gogoproto
- proto
- protoc-gen-gogo/descriptor
- name: github.com/golang/snappy
version: 553a641470496b2327abcac10b36396bd98e45c9
- name: github.com/hashicorp/hcl
@ -39,33 +39,35 @@ imports:
- name: github.com/kr/logfmt
version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0
- name: github.com/magiconair/properties
version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934
version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a
- name: github.com/mattn/go-colorable
version: 6fcc0c1fd9b620311d821b106a400b35dc95c497
- name: github.com/mattn/go-isatty
version: 6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c
version: a5cdd64afdee435007ee3e9f6ed4684af949d568
- name: github.com/mitchellh/mapstructure
version: 06020f85339e21b2478f756a78e295255ffa4d6a
- name: github.com/pelletier/go-buffruneio
version: c37440a7cf42ac63b919c752ca73a85067e05992
- name: github.com/pelletier/go-toml
version: 4e9e0ee19b60b13eb79915933f44d8ed5f268bdd
version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a
- name: github.com/pkg/errors
version: f15c970de5b76fac0b59abb32d62c17cc7bed265
version: 645ef00459ed84a119197bfb8d8205042c6df63d
- name: github.com/spf13/afero
version: 8d919cbe7e2627e417f3e45c3c0e489a5b7e2536
version: 5660eeed305fe5f69c8fc6cf899132a459a97064
subpackages:
- mem
- name: github.com/spf13/cast
version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4
- name: github.com/spf13/cobra
version: de2d9c4eca8f3c1de17d48b096b6504e0296f003
version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b
- name: github.com/spf13/jwalterweatherman
version: 12bd96e66386c1960ab0f74ced1362f66f552f7b
- name: github.com/spf13/pflag
version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea
version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f
- name: github.com/spf13/viper
version: 4dddf7c62e16bce5807744018f5b753bfe21bbd2
version: 8ef37cbca71638bf32f3d5e194117d4cb46da163
- name: github.com/syndtr/goleveldb
version: adf24ef3f94bd13ec4163060b21a5678f22b429b
version: b89cc31ef7977104127d34c1bd31ebd1a9db2199
subpackages:
- leveldb
- leveldb/cache
@ -80,7 +82,7 @@ imports:
- leveldb/table
- leveldb/util
- name: github.com/tendermint/go-wire
version: 2baffcb6b690057568bc90ef1d457efb150b979a
version: 27be46e25124ddf775e23317a83647ce62a93f6b
subpackages:
- data
- data/base58
@ -89,22 +91,20 @@ imports:
subpackages:
- term
- name: golang.org/x/crypto
version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122
version: edd5e9b0879d13ee6970a50153d85b8fec9f7686
subpackages:
- ripemd160
- name: golang.org/x/sys
version: 8b4580aae2a0dd0c231a45d3ccb8434ff533b840
version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86
subpackages:
- unix
- name: golang.org/x/text
version: 57961680700a5336d15015c8c50686ca5ba362a4
version: c01e4764d870b77f8abe5096ee19ad20d80e8075
subpackages:
- transform
- unicode/norm
- name: gopkg.in/go-playground/validator.v9
version: 61caf9d3038e1af346dbf5c2e16f6678e1548364
- name: gopkg.in/yaml.v2
version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5
version: eb3733d160e74a9c7e442f435eb3bea458e1d19f
testImports:
- name: github.com/davecgh/go-spew
version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9


+ 0
- 1
glide.yaml View File

@ -23,7 +23,6 @@ import:
- package: golang.org/x/crypto
subpackages:
- ripemd160
- package: gopkg.in/go-playground/validator.v9
testImport:
- package: github.com/stretchr/testify
subpackages:


+ 86
- 0
merkle/simple_map.go View File

@ -0,0 +1,86 @@
package merkle
import (
"github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
"golang.org/x/crypto/ripemd160"
)
type SimpleMap struct {
kvs cmn.KVPairs
sorted bool
}
func NewSimpleMap() *SimpleMap {
return &SimpleMap{
kvs: nil,
sorted: false,
}
}
func (sm *SimpleMap) Set(key string, value interface{}) {
sm.sorted = false
// Is value Hashable?
var vBytes []byte
if hashable, ok := value.(Hashable); ok {
vBytes = hashable.Hash()
} else {
vBytes = wire.BinaryBytes(value)
}
sm.kvs = append(sm.kvs, cmn.KVPair{
Key: []byte(key),
Value: vBytes,
})
}
// Merkle root hash of items sorted by key.
// NOTE: Behavior is undefined when key is duplicate.
func (sm *SimpleMap) Hash() []byte {
sm.Sort()
return hashKVPairs(sm.kvs)
}
func (sm *SimpleMap) Sort() {
if sm.sorted {
return
}
sm.kvs.Sort()
sm.sorted = true
}
// Returns a copy of sorted KVPairs.
// CONTRACT: The returned slice must not be mutated.
func (sm *SimpleMap) KVPairs() cmn.KVPairs {
sm.Sort()
kvs := make(cmn.KVPairs, len(sm.kvs))
copy(kvs, sm.kvs)
return kvs
}
//----------------------------------------
// A local extension to KVPair that can be hashed.
type kvPair cmn.KVPair
func (kv kvPair) Hash() []byte {
hasher, n, err := ripemd160.New(), new(int), new(error)
wire.WriteByteSlice(kv.Key, hasher, n, err)
if *err != nil {
panic(*err)
}
wire.WriteByteSlice(kv.Value, hasher, n, err)
if *err != nil {
panic(*err)
}
return hasher.Sum(nil)
}
func hashKVPairs(kvs cmn.KVPairs) []byte {
kvsH := make([]Hashable, 0, len(kvs))
for _, kvp := range kvs {
kvsH = append(kvsH, kvPair(kvp))
}
return SimpleHashFromHashables(kvsH)
}

+ 47
- 0
merkle/simple_map_test.go View File

@ -0,0 +1,47 @@
package merkle
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSimpleMap(t *testing.T) {
{
db := NewSimpleMap()
db.Set("key1", "value1")
assert.Equal(t, "3bb53f017d2f5b4f144692aa829a5c245ac2b123", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
}
{
db := NewSimpleMap()
db.Set("key1", "value2")
assert.Equal(t, "14a68db29e3f930ffaafeff5e07c17a439384f39", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
}
{
db := NewSimpleMap()
db.Set("key1", "value1")
db.Set("key2", "value2")
assert.Equal(t, "275c6367f4be335f9c482b6ef72e49c84e3f8bda", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
}
{
db := NewSimpleMap()
db.Set("key2", "value2") // NOTE: out of order
db.Set("key1", "value1")
assert.Equal(t, "275c6367f4be335f9c482b6ef72e49c84e3f8bda", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
}
{
db := NewSimpleMap()
db.Set("key1", "value1")
db.Set("key2", "value2")
db.Set("key3", "value3")
assert.Equal(t, "48d60701cb4c96916f68a958b3368205ebe3809b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
}
{
db := NewSimpleMap()
db.Set("key2", "value2") // NOTE: out of order
db.Set("key1", "value1")
db.Set("key3", "value3")
assert.Equal(t, "48d60701cb4c96916f68a958b3368205ebe3809b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match")
}
}

+ 131
- 0
merkle/simple_proof.go View File

@ -0,0 +1,131 @@
package merkle
import (
"bytes"
"fmt"
)
type SimpleProof struct {
Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child.
}
// proofs[0] is the proof for items[0].
func SimpleProofsFromHashables(items []Hashable) (rootHash []byte, proofs []*SimpleProof) {
trails, rootSPN := trailsFromHashables(items)
rootHash = rootSPN.Hash
proofs = make([]*SimpleProof, len(items))
for i, trail := range trails {
proofs[i] = &SimpleProof{
Aunts: trail.FlattenAunts(),
}
}
return
}
// Verify that leafHash is a leaf hash of the simple-merkle-tree
// which hashes to rootHash.
func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool {
computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts)
return computedHash != nil && bytes.Equal(computedHash, rootHash)
}
func (sp *SimpleProof) String() string {
return sp.StringIndented("")
}
func (sp *SimpleProof) StringIndented(indent string) string {
return fmt.Sprintf(`SimpleProof{
%s Aunts: %X
%s}`,
indent, sp.Aunts,
indent)
}
// Use the leafHash and innerHashes to get the root merkle hash.
// If the length of the innerHashes slice isn't exactly correct, the result is nil.
func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte {
// Recursive impl.
if index >= total {
return nil
}
switch total {
case 0:
panic("Cannot call computeHashFromAunts() with 0 total")
case 1:
if len(innerHashes) != 0 {
return nil
}
return leafHash
default:
if len(innerHashes) == 0 {
return nil
}
numLeft := (total + 1) / 2
if index < numLeft {
leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1])
if leftHash == nil {
return nil
}
return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1])
} else {
rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1])
if rightHash == nil {
return nil
}
return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash)
}
}
}
// Helper structure to construct merkle proof.
// The node and the tree is thrown away afterwards.
// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil.
// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or
// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child.
type SimpleProofNode struct {
Hash []byte
Parent *SimpleProofNode
Left *SimpleProofNode // Left sibling (only one of Left,Right is set)
Right *SimpleProofNode // Right sibling (only one of Left,Right is set)
}
// Starting from a leaf SimpleProofNode, FlattenAunts() will return
// the inner hashes for the item corresponding to the leaf.
func (spn *SimpleProofNode) FlattenAunts() [][]byte {
// Nonrecursive impl.
innerHashes := [][]byte{}
for spn != nil {
if spn.Left != nil {
innerHashes = append(innerHashes, spn.Left.Hash)
} else if spn.Right != nil {
innerHashes = append(innerHashes, spn.Right.Hash)
} else {
break
}
spn = spn.Parent
}
return innerHashes
}
// trails[0].Hash is the leaf hash for items[0].
// trails[i].Parent.Parent....Parent == root for all i.
func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *SimpleProofNode) {
// Recursive impl.
switch len(items) {
case 0:
return nil, nil
case 1:
trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil}
return []*SimpleProofNode{trail}, trail
default:
lefts, leftRoot := trailsFromHashables(items[:(len(items)+1)/2])
rights, rightRoot := trailsFromHashables(items[(len(items)+1)/2:])
rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash)
root := &SimpleProofNode{rootHash, nil, nil, nil}
leftRoot.Parent = root
leftRoot.Right = rightRoot
rightRoot.Parent = root
rightRoot.Left = leftRoot
return append(lefts, rights...), root
}
}

+ 3
- 184
merkle/simple_tree.go View File

@ -25,10 +25,6 @@ For larger datasets, use IAVLTree.
package merkle
import (
"bytes"
"fmt"
"sort"
"golang.org/x/crypto/ripemd160"
"github.com/tendermint/go-wire"
@ -92,186 +88,9 @@ func SimpleHashFromHashables(items []Hashable) []byte {
// Convenience for SimpleHashFromHashes.
func SimpleHashFromMap(m map[string]interface{}) []byte {
kpPairsH := MakeSortedKVPairs(m)
return SimpleHashFromHashables(kpPairsH)
}
//--------------------------------------------------------------------------------
/* Convenience struct for key-value pairs.
A list of KVPairs is hashed via `SimpleHashFromHashables`.
NOTE: Each `Value` is encoded for hashing without extra type information,
so the user is presumed to be aware of the Value types.
*/
type KVPair struct {
Key string
Value interface{}
}
func (kv KVPair) Hash() []byte {
hasher, n, err := ripemd160.New(), new(int), new(error)
wire.WriteString(kv.Key, hasher, n, err)
if kvH, ok := kv.Value.(Hashable); ok {
wire.WriteByteSlice(kvH.Hash(), hasher, n, err)
} else {
wire.WriteBinary(kv.Value, hasher, n, err)
}
if *err != nil {
PanicSanity(*err)
}
return hasher.Sum(nil)
}
type KVPairs []KVPair
func (kvps KVPairs) Len() int { return len(kvps) }
func (kvps KVPairs) Less(i, j int) bool { return kvps[i].Key < kvps[j].Key }
func (kvps KVPairs) Swap(i, j int) { kvps[i], kvps[j] = kvps[j], kvps[i] }
func (kvps KVPairs) Sort() { sort.Sort(kvps) }
func MakeSortedKVPairs(m map[string]interface{}) []Hashable {
kvPairs := []KVPair{}
sm := NewSimpleMap()
for k, v := range m {
kvPairs = append(kvPairs, KVPair{k, v})
}
KVPairs(kvPairs).Sort()
kvPairsH := []Hashable{}
for _, kvp := range kvPairs {
kvPairsH = append(kvPairsH, kvp)
}
return kvPairsH
}
//--------------------------------------------------------------------------------
type SimpleProof struct {
Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child.
}
// proofs[0] is the proof for items[0].
func SimpleProofsFromHashables(items []Hashable) (rootHash []byte, proofs []*SimpleProof) {
trails, rootSPN := trailsFromHashables(items)
rootHash = rootSPN.Hash
proofs = make([]*SimpleProof, len(items))
for i, trail := range trails {
proofs[i] = &SimpleProof{
Aunts: trail.FlattenAunts(),
}
}
return
}
// Verify that leafHash is a leaf hash of the simple-merkle-tree
// which hashes to rootHash.
func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool {
computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts)
if computedHash == nil {
return false
}
if !bytes.Equal(computedHash, rootHash) {
return false
}
return true
}
func (sp *SimpleProof) String() string {
return sp.StringIndented("")
}
func (sp *SimpleProof) StringIndented(indent string) string {
return fmt.Sprintf(`SimpleProof{
%s Aunts: %X
%s}`,
indent, sp.Aunts,
indent)
}
// Use the leafHash and innerHashes to get the root merkle hash.
// If the length of the innerHashes slice isn't exactly correct, the result is nil.
func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte {
// Recursive impl.
if index >= total {
return nil
}
switch total {
case 0:
PanicSanity("Cannot call computeHashFromAunts() with 0 total")
return nil
case 1:
if len(innerHashes) != 0 {
return nil
}
return leafHash
default:
if len(innerHashes) == 0 {
return nil
}
numLeft := (total + 1) / 2
if index < numLeft {
leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1])
if leftHash == nil {
return nil
}
return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1])
} else {
rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1])
if rightHash == nil {
return nil
}
return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash)
}
}
}
// Helper structure to construct merkle proof.
// The node and the tree is thrown away afterwards.
// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil.
// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or
// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child.
type SimpleProofNode struct {
Hash []byte
Parent *SimpleProofNode
Left *SimpleProofNode // Left sibling (only one of Left,Right is set)
Right *SimpleProofNode // Right sibling (only one of Left,Right is set)
}
// Starting from a leaf SimpleProofNode, FlattenAunts() will return
// the inner hashes for the item corresponding to the leaf.
func (spn *SimpleProofNode) FlattenAunts() [][]byte {
// Nonrecursive impl.
innerHashes := [][]byte{}
for spn != nil {
if spn.Left != nil {
innerHashes = append(innerHashes, spn.Left.Hash)
} else if spn.Right != nil {
innerHashes = append(innerHashes, spn.Right.Hash)
} else {
break
}
spn = spn.Parent
}
return innerHashes
}
// trails[0].Hash is the leaf hash for items[0].
// trails[i].Parent.Parent....Parent == root for all i.
func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *SimpleProofNode) {
// Recursive impl.
switch len(items) {
case 0:
return nil, nil
case 1:
trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil}
return []*SimpleProofNode{trail}, trail
default:
lefts, leftRoot := trailsFromHashables(items[:(len(items)+1)/2])
rights, rightRoot := trailsFromHashables(items[(len(items)+1)/2:])
rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash)
root := &SimpleProofNode{rootHash, nil, nil, nil}
leftRoot.Parent = root
leftRoot.Right = rightRoot
rightRoot.Parent = root
rightRoot.Left = leftRoot
return append(lefts, rights...), root
sm.Set(k, v)
}
return sm.Hash()
}

Loading…
Cancel
Save