diff --git a/.circleci/config.yml b/.circleci/config.yml index 9c51bc48f..7ad793549 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,7 +3,7 @@ version: 2 defaults: &defaults working_directory: /go/src/github.com/tendermint/tendermint docker: - - image: circleci/golang:1.12.0 + - image: circleci/golang environment: GOBIN: /tmp/workspace/bin @@ -14,6 +14,9 @@ docs_update_config: &docs_update_config environment: AWS_REGION: us-east-1 +release_management_docker: &release_management_docker + machine: true + jobs: setup_dependencies: <<: *defaults @@ -192,7 +195,7 @@ jobs: name: run localnet and exit on failure command: | set -x - docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang:1.11.4 make build-linux + docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux make localnet-start & ./scripts/localnet-blocks-test.sh 40 5 10 localhost @@ -256,6 +259,105 @@ jobs: echo "Website build started" fi + prepare_build: + <<: *defaults + steps: + - checkout + - run: + name: Get next release number + command: | + export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`" + echo "Last tag: ${LAST_TAG}" + if [ -z "${LAST_TAG}" ]; then + export LAST_TAG="${CIRCLE_BRANCH}" + echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag." + fi + export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`" + echo "Next tag: ${NEXT_TAG}" + echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source + - run: + name: Build dependencies + command: | + make get_tools get_vendor_deps + - persist_to_workspace: + root: . + paths: + - "release-version.source" + - save_cache: + key: v1-release-deps-{{ .Branch }}-{{ .Revision }} + paths: + - "vendor" + + build_artifacts: + <<: *defaults + parallelism: 4 + steps: + - checkout + - restore_cache: + keys: + - v1-release-deps-{{ .Branch }}-{{ .Revision }} + - attach_workspace: + at: /tmp/workspace + - run: + name: Build artifact + command: | + # Setting CIRCLE_TAG because we do not tag the release ourselves. + source /tmp/workspace/release-version.source + if test ${CIRCLE_NODE_INDEX:-0} == 0 ;then export GOOS=linux GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi + if test ${CIRCLE_NODE_INDEX:-0} == 1 ;then export GOOS=darwin GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi + if test ${CIRCLE_NODE_INDEX:-0} == 2 ;then export GOOS=windows GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi + if test ${CIRCLE_NODE_INDEX:-0} == 3 ;then export GOOS=linux GOARCH=arm && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi + - persist_to_workspace: + root: build + paths: + - "*.zip" + - "tendermint_linux_amd64" + + release_artifacts: + <<: *defaults + steps: + - checkout + - attach_workspace: + at: /tmp/workspace + - run: + name: Deploy to GitHub + command: | + # Setting CIRCLE_TAG because we do not tag the release ourselves. + source /tmp/workspace/release-version.source + echo "---" + ls -la /tmp/workspace/*.zip + echo "---" + python -u scripts/release_management/sha-files.py + echo "---" + cat /tmp/workspace/SHA256SUMS + echo "---" + export RELEASE_ID="`python -u scripts/release_management/github-draft.py`" + echo "Release ID: ${RELEASE_ID}" + #Todo: Parallelize uploads + export GOOS=linux GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" + export GOOS=darwin GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" + export GOOS=windows GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" + export GOOS=linux GOARCH=arm && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" + python -u scripts/release_management/github-upload.py --file "/tmp/workspace/SHA256SUMS" --id "${RELEASE_ID}" + python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}" + + release_docker: + <<: *release_management_docker + steps: + - checkout + - attach_workspace: + at: /tmp/workspace + - run: + name: Deploy to Docker Hub + command: | + # Setting CIRCLE_TAG because we do not tag the release ourselves. + source /tmp/workspace/release-version.source + cp /tmp/workspace/tendermint_linux_amd64 DOCKER/tendermint + docker build --label="tendermint" --tag="tendermint/tendermint:${CIRCLE_TAG}" --tag="tendermint/tendermint:latest" "DOCKER" + docker login -u "${DOCKERHUB_USER}" --password-stdin <<< "${DOCKERHUB_PASS}" + docker push "tendermint/tendermint" + docker logout + workflows: version: 2 test-suite: @@ -292,3 +394,25 @@ workflows: - upload_coverage: requires: - test_cover + release: + jobs: + - prepare_build + - build_artifacts: + requires: + - prepare_build + - release_artifacts: + requires: + - prepare_build + - build_artifacts + filters: + branches: + only: + - /v[0-9]+\.[0-9]+/ + - release_docker: + requires: + - prepare_build + - build_artifacts + filters: + branches: + only: + - /v[0-9]+\.[0-9]+/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 31ee14c51..52a926aed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,56 @@ # Changelog +## v0.31.3 + +*April 1st, 2019* + +This release includes two security sensitive fixes: it ensures generated private +keys are valid, and it prevents certain DNS lookups that would cause the node to +panic if the lookup failed. + +### BUG FIXES: + +- [crypto/secp256k1] [\#3439](https://github.com/tendermint/tendermint/issues/3439) + Ensure generated private keys are valid by randomly sampling until a valid key is found. + Previously, it was possible (though rare!) to generate keys that exceeded the curve order. + Such keys would lead to invalid signatures. +- [p2p] [\#3522](https://github.com/tendermint/tendermint/issues/3522) Memoize + socket address in peer connections to avoid DNS lookups. Previously, failed + DNS lookups could cause the node to panic. + +## v0.31.2 + +*March 30th, 2019* + +This release fixes a regression from v0.31.1 where Tendermint panics under +mempool load for external ABCI apps. + +Special thanks to external contributors on this release: +@guagualvcha + +### BREAKING CHANGES: + +* CLI/RPC/Config + +* Apps + +* Go API +- [libs/autofile] [\#3504](https://github.com/tendermint/tendermint/issues/3504) Remove unused code in autofile package. Deleted functions: `Group.Search`, `Group.FindLast`, `GroupReader.ReadLine`, `GroupReader.PushLine`, `MakeSimpleSearchFunc` (@guagualvcha) + +* Blockchain Protocol + +* P2P Protocol + +### FEATURES: + +### IMPROVEMENTS: + +- [circle] [\#3497](https://github.com/tendermint/tendermint/issues/3497) Move release management to CircleCI + +### BUG FIXES: + +- [mempool] [\#3512](https://github.com/tendermint/tendermint/issues/3512) Fix panic from concurrent access to txsMap, a regression for external ABCI apps introduced in v0.31.1 + ## v0.31.1 *March 27th, 2019* diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 470282aa9..045247937 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,4 +1,4 @@ -## v0.32.0 +## v0.31.2 ** diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index 4a855f425..6a7f289f5 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -1,5 +1,5 @@ -FROM alpine:3.7 -MAINTAINER Greg Szabo +FROM alpine:3.9 +LABEL maintainer="hello@tendermint.com" # Tendermint will be looking for the genesis file in /tendermint/config/genesis.json # (unless you change `genesis_file` in config.toml). You can put your config.toml and diff --git a/Makefile b/Makefile index 79ae6aaba..7c2ce1d9c 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,7 @@ GOTOOLS = \ github.com/square/certstrap GOBIN?=${GOPATH}/bin PACKAGES=$(shell go list ./...) +OUTPUT?=build/tendermint INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf BUILD_TAGS?='tendermint' @@ -19,13 +20,13 @@ check: check_tools get_vendor_deps ### Build Tendermint build: - CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ + CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ build_c: - CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o build/tendermint ./cmd/tendermint/ + CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o $(OUTPUT) ./cmd/tendermint/ build_race: - CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint + CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint install: CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint @@ -109,7 +110,7 @@ draw_deps: get_deps_bin_size: @# Copy of build recipe with additional flags to perform binary size analysis - $(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1)) + $(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1)) @find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log @echo "Results can be found here: $(CURDIR)/deps_bin_size.log" @@ -261,7 +262,7 @@ check_dep: ### Docker image build-docker: - cp build/tendermint DOCKER/tendermint + cp $(OUTPUT) DOCKER/tendermint docker build --label=tendermint --tag="tendermint/tendermint" DOCKER rm -rf DOCKER/tendermint diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 562676605..3b401bd3c 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -26,16 +26,17 @@ var _ Client = (*socketClient)(nil) type socketClient struct { cmn.BaseService - reqQueue chan *ReqRes - flushTimer *cmn.ThrottleTimer + addr string mustConnect bool + conn net.Conn + + reqQueue chan *ReqRes + flushTimer *cmn.ThrottleTimer mtx sync.Mutex - addr string - conn net.Conn err error - reqSent *list.List - resCb func(*types.Request, *types.Response) // listens to all callbacks + reqSent *list.List // list of requests sent, waiting for response + resCb func(*types.Request, *types.Response) // called on all requests, if set. } @@ -86,6 +87,7 @@ func (cli *socketClient) OnStop() { cli.mtx.Lock() defer cli.mtx.Unlock() if cli.conn != nil { + // does this really need a mutex? cli.conn.Close() } @@ -207,12 +209,15 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error { reqres.Done() // Release waiters cli.reqSent.Remove(next) // Pop first item from linked list - // Notify reqRes listener if set + // Notify reqRes listener if set (request specific callback). + // NOTE: it is possible this callback isn't set on the reqres object. + // at this point, in which case it will be called after, when it is set. + // TODO: should we move this after the resCb call so the order is always consistent? if cb := reqres.GetCallback(); cb != nil { cb(res) } - // Notify client listener if set + // Notify client listener if set (global callback). if cli.resCb != nil { cli.resCb(reqres.Request, res) } diff --git a/blockchain/pool_test.go b/blockchain/pool_test.go index e24f6131e..01d7dba20 100644 --- a/blockchain/pool_test.go +++ b/blockchain/pool_test.go @@ -42,7 +42,9 @@ func (p testPeer) runInputRoutine() { func (p testPeer) simulateInput(input inputData) { block := &types.Block{Header: types.Header{Height: input.request.Height}} input.pool.AddBlock(input.request.PeerID, block, 123) - input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height) + // TODO: uncommenting this creates a race which is detected by: https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856 + // see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890 + // input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height) } type testPeers map[p2p.ID]testPeer diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index 78857c45c..fc64a0b0f 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -6,6 +6,7 @@ import ( "crypto/subtle" "fmt" "io" + "math/big" "golang.org/x/crypto/ripemd160" @@ -65,32 +66,61 @@ func (privKey PrivKeySecp256k1) Equals(other crypto.PrivKey) bool { } // GenPrivKey generates a new ECDSA private key on curve secp256k1 private key. -// It uses OS randomness in conjunction with the current global random seed -// in tendermint/libs/common to generate the private key. +// It uses OS randomness to generate the private key. func GenPrivKey() PrivKeySecp256k1 { return genPrivKey(crypto.CReader()) } // genPrivKey generates a new secp256k1 private key using the provided reader. func genPrivKey(rand io.Reader) PrivKeySecp256k1 { - privKeyBytes := [32]byte{} - _, err := io.ReadFull(rand, privKeyBytes[:]) - if err != nil { - panic(err) + var privKeyBytes [32]byte + d := new(big.Int) + for { + privKeyBytes = [32]byte{} + _, err := io.ReadFull(rand, privKeyBytes[:]) + if err != nil { + panic(err) + } + + d.SetBytes(privKeyBytes[:]) + // break if we found a valid point (i.e. > 0 and < N == curverOrder) + isValidFieldElement := 0 < d.Sign() && d.Cmp(secp256k1.S256().N) < 0 + if isValidFieldElement { + break + } } - // crypto.CRandBytes is guaranteed to be 32 bytes long, so it can be - // casted to PrivKeySecp256k1. + return PrivKeySecp256k1(privKeyBytes) } +var one = new(big.Int).SetInt64(1) + // GenPrivKeySecp256k1 hashes the secret with SHA2, and uses // that 32 byte output to create the private key. +// +// It makes sure the private key is a valid field element by setting: +// +// c = sha256(secret) +// k = (c mod (n − 1)) + 1, where n = curve order. +// // NOTE: secret should be the output of a KDF like bcrypt, // if it's derived from user input. func GenPrivKeySecp256k1(secret []byte) PrivKeySecp256k1 { - privKey32 := sha256.Sum256(secret) - // sha256.Sum256() is guaranteed to be 32 bytes long, so it can be - // casted to PrivKeySecp256k1. + secHash := sha256.Sum256(secret) + // to guarantee that we have a valid field element, we use the approach of: + // "Suite B Implementer’s Guide to FIPS 186-3", A.2.1 + // https://apps.nsa.gov/iaarchive/library/ia-guidance/ia-solutions-for-classified/algorithm-guidance/suite-b-implementers-guide-to-fips-186-3-ecdsa.cfm + // see also https://github.com/golang/go/blob/0380c9ad38843d523d9c9804fe300cb7edd7cd3c/src/crypto/ecdsa/ecdsa.go#L89-L101 + fe := new(big.Int).SetBytes(secHash[:]) + n := new(big.Int).Sub(secp256k1.S256().N, one) + fe.Mod(fe, n) + fe.Add(fe, one) + + feB := fe.Bytes() + var privKey32 [32]byte + // copy feB over to fixed 32 byte privKey32 and pad (if necessary) + copy(privKey32[32-len(feB):32], feB) + return PrivKeySecp256k1(privKey32) } diff --git a/crypto/secp256k1/secp256k1_cgo_test.go b/crypto/secp256k1/secp256k1_cgo_test.go new file mode 100644 index 000000000..edb207b53 --- /dev/null +++ b/crypto/secp256k1/secp256k1_cgo_test.go @@ -0,0 +1,39 @@ +// +build libsecp256k1 + +package secp256k1 + +import ( + "github.com/magiconair/properties/assert" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPrivKeySecp256k1SignVerify(t *testing.T) { + msg := []byte("A.1.2 ECC Key Pair Generation by Testing Candidates") + priv := GenPrivKey() + tests := []struct { + name string + privKey PrivKeySecp256k1 + wantSignErr bool + wantVerifyPasses bool + }{ + {name: "valid sign-verify round", privKey: priv, wantSignErr: false, wantVerifyPasses: true}, + {name: "invalid private key", privKey: [32]byte{}, wantSignErr: true, wantVerifyPasses: false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.privKey.Sign(msg) + if tt.wantSignErr { + require.Error(t, err) + t.Logf("Got error: %s", err) + return + } + require.NoError(t, err) + require.NotNil(t, got) + + pub := tt.privKey.PubKey() + assert.Equal(t, tt.wantVerifyPasses, pub.VerifyBytes(msg, got)) + }) + } +} diff --git a/crypto/secp256k1/secp256k1_internal_test.go b/crypto/secp256k1/secp256k1_internal_test.go new file mode 100644 index 000000000..305f12020 --- /dev/null +++ b/crypto/secp256k1/secp256k1_internal_test.go @@ -0,0 +1,45 @@ +package secp256k1 + +import ( + "bytes" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + underlyingSecp256k1 "github.com/btcsuite/btcd/btcec" +) + +func Test_genPrivKey(t *testing.T) { + + empty := make([]byte, 32) + oneB := big.NewInt(1).Bytes() + onePadded := make([]byte, 32) + copy(onePadded[32-len(oneB):32], oneB) + t.Logf("one padded: %v, len=%v", onePadded, len(onePadded)) + + validOne := append(empty, onePadded...) + tests := []struct { + name string + notSoRand []byte + shouldPanic bool + }{ + {"empty bytes (panics because 1st 32 bytes are zero and 0 is not a valid field element)", empty, true}, + {"curve order: N", underlyingSecp256k1.S256().N.Bytes(), true}, + {"valid because 0 < 1 < N", validOne, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.shouldPanic { + require.Panics(t, func() { + genPrivKey(bytes.NewReader(tt.notSoRand)) + }) + return + } + got := genPrivKey(bytes.NewReader(tt.notSoRand)) + fe := new(big.Int).SetBytes(got[:]) + require.True(t, fe.Cmp(underlyingSecp256k1.S256().N) < 0) + require.True(t, fe.Sign() > 0) + }) + } +} diff --git a/crypto/secp256k1/secp256k1_nocgo_test.go b/crypto/secp256k1/secp256k1_nocgo_test.go index a06a0e3d1..17cb75815 100644 --- a/crypto/secp256k1/secp256k1_nocgo_test.go +++ b/crypto/secp256k1/secp256k1_nocgo_test.go @@ -6,7 +6,6 @@ import ( "testing" secp256k1 "github.com/btcsuite/btcd/btcec" - "github.com/stretchr/testify/require" ) diff --git a/crypto/secp256k1/secpk256k1_test.go b/crypto/secp256k1/secp256k1_test.go similarity index 74% rename from crypto/secp256k1/secpk256k1_test.go rename to crypto/secp256k1/secp256k1_test.go index 0f0b5adce..2488b5399 100644 --- a/crypto/secp256k1/secpk256k1_test.go +++ b/crypto/secp256k1/secp256k1_test.go @@ -2,6 +2,7 @@ package secp256k1_test import ( "encoding/hex" + "math/big" "testing" "github.com/btcsuite/btcutil/base58" @@ -84,3 +85,28 @@ func TestSecp256k1LoadPrivkeyAndSerializeIsIdentity(t *testing.T) { require.Equal(t, privKeyBytes[:], serializedBytes) } } + +func TestGenPrivKeySecp256k1(t *testing.T) { + // curve oder N + N := underlyingSecp256k1.S256().N + tests := []struct { + name string + secret []byte + }{ + {"empty secret", []byte{}}, + {"some long secret", []byte("We live in a society exquisitely dependent on science and technology, in which hardly anyone knows anything about science and technology.")}, + {"another seed used in cosmos tests #1", []byte{0}}, + {"another seed used in cosmos tests #2", []byte("mySecret")}, + {"another seed used in cosmos tests #3", []byte("")}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotPrivKey := secp256k1.GenPrivKeySecp256k1(tt.secret) + require.NotNil(t, gotPrivKey) + // interpret as a big.Int and make sure it is a valid field element: + fe := new(big.Int).SetBytes(gotPrivKey[:]) + require.True(t, fe.Cmp(N) < 0) + require.True(t, fe.Sign() > 0) + }) + } +} diff --git a/libs/autofile/group.go b/libs/autofile/group.go index d1ea0de75..ce73466e4 100644 --- a/libs/autofile/group.go +++ b/libs/autofile/group.go @@ -331,172 +331,6 @@ func (g *Group) NewReader(index int) (*GroupReader, error) { return r, nil } -// Returns -1 if line comes after, 0 if found, 1 if line comes before. -type SearchFunc func(line string) (int, error) - -// Searches for the right file in Group, then returns a GroupReader to start -// streaming lines. -// Returns true if an exact match was found, otherwise returns the next greater -// line that starts with prefix. -// CONTRACT: Caller must close the returned GroupReader -func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error) { - g.mtx.Lock() - minIndex, maxIndex := g.minIndex, g.maxIndex - g.mtx.Unlock() - // Now minIndex/maxIndex may change meanwhile, - // but it shouldn't be a big deal - // (maybe we'll want to limit scanUntil though) - - for { - curIndex := (minIndex + maxIndex + 1) / 2 - - // Base case, when there's only 1 choice left. - if minIndex == maxIndex { - r, err := g.NewReader(maxIndex) - if err != nil { - return nil, false, err - } - match, err := scanUntil(r, prefix, cmp) - if err != nil { - r.Close() - return nil, false, err - } - return r, match, err - } - - // Read starting roughly at the middle file, - // until we find line that has prefix. - r, err := g.NewReader(curIndex) - if err != nil { - return nil, false, err - } - foundIndex, line, err := scanNext(r, prefix) - r.Close() - if err != nil { - return nil, false, err - } - - // Compare this line to our search query. - val, err := cmp(line) - if err != nil { - return nil, false, err - } - if val < 0 { - // Line will come later - minIndex = foundIndex - } else if val == 0 { - // Stroke of luck, found the line - r, err := g.NewReader(foundIndex) - if err != nil { - return nil, false, err - } - match, err := scanUntil(r, prefix, cmp) - if !match { - panic("Expected match to be true") - } - if err != nil { - r.Close() - return nil, false, err - } - return r, true, err - } else { - // We passed it - maxIndex = curIndex - 1 - } - } - -} - -// Scans and returns the first line that starts with 'prefix' -// Consumes line and returns it. -func scanNext(r *GroupReader, prefix string) (int, string, error) { - for { - line, err := r.ReadLine() - if err != nil { - return 0, "", err - } - if !strings.HasPrefix(line, prefix) { - continue - } - index := r.CurIndex() - return index, line, nil - } -} - -// Returns true iff an exact match was found. -// Pushes line, does not consume it. -func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) (bool, error) { - for { - line, err := r.ReadLine() - if err != nil { - return false, err - } - if !strings.HasPrefix(line, prefix) { - continue - } - val, err := cmp(line) - if err != nil { - return false, err - } - if val < 0 { - continue - } else if val == 0 { - r.PushLine(line) - return true, nil - } else { - r.PushLine(line) - return false, nil - } - } -} - -// Searches backwards for the last line in Group with prefix. -// Scans each file forward until the end to find the last match. -func (g *Group) FindLast(prefix string) (match string, found bool, err error) { - g.mtx.Lock() - minIndex, maxIndex := g.minIndex, g.maxIndex - g.mtx.Unlock() - - r, err := g.NewReader(maxIndex) - if err != nil { - return "", false, err - } - defer r.Close() - - // Open files from the back and read -GROUP_LOOP: - for i := maxIndex; i >= minIndex; i-- { - err := r.SetIndex(i) - if err != nil { - return "", false, err - } - // Scan each line and test whether line matches - for { - line, err := r.ReadLine() - if err == io.EOF { - if found { - return match, found, nil - } - continue GROUP_LOOP - } else if err != nil { - return "", false, err - } - if strings.HasPrefix(line, prefix) { - match = line - found = true - } - if r.CurIndex() > i { - if found { - return match, found, nil - } - continue GROUP_LOOP - } - } - } - - return -} - // GroupInfo holds information about the group. type GroupInfo struct { MinIndex int // index of the first file in the group, including head @@ -654,48 +488,6 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { } } -// ReadLine reads a line (without delimiter). -// just return io.EOF if no new lines found. -func (gr *GroupReader) ReadLine() (string, error) { - gr.mtx.Lock() - defer gr.mtx.Unlock() - - // From PushLine - if gr.curLine != nil { - line := string(gr.curLine) - gr.curLine = nil - return line, nil - } - - // Open file if not open yet - if gr.curReader == nil { - err := gr.openFile(gr.curIndex) - if err != nil { - return "", err - } - } - - // Iterate over files until line is found - var linePrefix string - for { - bytesRead, err := gr.curReader.ReadBytes('\n') - if err == io.EOF { - // Open the next file - if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { - return "", err1 - } - if len(bytesRead) > 0 && bytesRead[len(bytesRead)-1] == byte('\n') { - return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil - } - linePrefix += string(bytesRead) - continue - } else if err != nil { - return "", err - } - return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil - } -} - // IF index > gr.Group.maxIndex, returns io.EOF // CONTRACT: caller should hold gr.mtx func (gr *GroupReader) openFile(index int) error { @@ -725,20 +517,6 @@ func (gr *GroupReader) openFile(index int) error { return nil } -// PushLine makes the given line the current one, so the next time somebody -// calls ReadLine, this line will be returned. -// panics if called twice without calling ReadLine. -func (gr *GroupReader) PushLine(line string) { - gr.mtx.Lock() - defer gr.mtx.Unlock() - - if gr.curLine == nil { - gr.curLine = []byte(line) - } else { - panic("PushLine failed, already have line") - } -} - // CurIndex returns cursor's file index. func (gr *GroupReader) CurIndex() int { gr.mtx.Lock() @@ -753,32 +531,3 @@ func (gr *GroupReader) SetIndex(index int) error { defer gr.mtx.Unlock() return gr.openFile(index) } - -//-------------------------------------------------------------------------------- - -// A simple SearchFunc that assumes that the marker is of form -// . -// For example, if prefix is '#HEIGHT:', the markers of expected to be of the form: -// -// #HEIGHT:1 -// ... -// #HEIGHT:2 -// ... -func MakeSimpleSearchFunc(prefix string, target int) SearchFunc { - return func(line string) (int, error) { - if !strings.HasPrefix(line, prefix) { - return -1, fmt.Errorf("Marker line did not have prefix: %v", prefix) - } - i, err := strconv.Atoi(line[len(prefix):]) - if err != nil { - return -1, fmt.Errorf("Failed to parse marker line: %v", err.Error()) - } - if target < i { - return 1, nil - } else if target == i { - return 0, nil - } else { - return -1, nil - } - } -} diff --git a/libs/autofile/group_test.go b/libs/autofile/group_test.go index 68870df87..c300aba71 100644 --- a/libs/autofile/group_test.go +++ b/libs/autofile/group_test.go @@ -1,13 +1,9 @@ package autofile import ( - "errors" - "fmt" "io" "io/ioutil" "os" - "strconv" - "strings" "testing" "github.com/stretchr/testify/assert" @@ -106,107 +102,6 @@ func TestCheckHeadSizeLimit(t *testing.T) { destroyTestGroup(t, g) } -func TestSearch(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 10*1000) - - // Create some files in the group that have several INFO lines in them. - // Try to put the INFO lines in various spots. - for i := 0; i < 100; i++ { - // The random junk at the end ensures that this INFO linen - // is equally likely to show up at the end. - _, err := g.Head.Write([]byte(fmt.Sprintf("INFO %v %v\n", i, cmn.RandStr(123)))) - require.NoError(t, err, "Failed to write to head") - g.checkHeadSizeLimit() - for j := 0; j < 10; j++ { - _, err1 := g.Head.Write([]byte(cmn.RandStr(123) + "\n")) - require.NoError(t, err1, "Failed to write to head") - g.checkHeadSizeLimit() - } - } - - // Create a search func that searches for line - makeSearchFunc := func(target int) SearchFunc { - return func(line string) (int, error) { - parts := strings.Split(line, " ") - if len(parts) != 3 { - return -1, errors.New("Line did not have 3 parts") - } - i, err := strconv.Atoi(parts[1]) - if err != nil { - return -1, errors.New("Failed to parse INFO: " + err.Error()) - } - if target < i { - return 1, nil - } else if target == i { - return 0, nil - } else { - return -1, nil - } - } - } - - // Now search for each number - for i := 0; i < 100; i++ { - gr, match, err := g.Search("INFO", makeSearchFunc(i)) - require.NoError(t, err, "Failed to search for line, tc #%d", i) - assert.True(t, match, "Expected Search to return exact match, tc #%d", i) - line, err := gr.ReadLine() - require.NoError(t, err, "Failed to read line after search, tc #%d", i) - if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", i)) { - t.Fatalf("Failed to get correct line, tc #%d", i) - } - // Make sure we can continue to read from there. - cur := i + 1 - for { - line, err := gr.ReadLine() - if err == io.EOF { - if cur == 99+1 { - // OK! - break - } else { - t.Fatalf("Got EOF after the wrong INFO #, tc #%d", i) - } - } else if err != nil { - t.Fatalf("Error reading line, tc #%d, err:\n%s", i, err) - } - if !strings.HasPrefix(line, "INFO ") { - continue - } - if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", cur)) { - t.Fatalf("Unexpected INFO #. Expected %v got:\n%v, tc #%d", cur, line, i) - } - cur++ - } - gr.Close() - } - - // Now search for something that is too small. - // We should get the first available line. - { - gr, match, err := g.Search("INFO", makeSearchFunc(-999)) - require.NoError(t, err, "Failed to search for line") - assert.False(t, match, "Expected Search to not return exact match") - line, err := gr.ReadLine() - require.NoError(t, err, "Failed to read line after search") - if !strings.HasPrefix(line, "INFO 0 ") { - t.Error("Failed to fetch correct line, which is the earliest INFO") - } - err = gr.Close() - require.NoError(t, err, "Failed to close GroupReader") - } - - // Now search for something that is too large. - // We should get an EOF error. - { - gr, _, err := g.Search("INFO", makeSearchFunc(999)) - assert.Equal(t, io.EOF, err) - assert.Nil(t, gr) - } - - // Cleanup - destroyTestGroup(t, g) -} - func TestRotateFile(t *testing.T) { g := createTestGroupWithHeadSizeLimit(t, 0) g.WriteLine("Line 1") @@ -237,100 +132,6 @@ func TestRotateFile(t *testing.T) { destroyTestGroup(t, g) } -func TestFindLast1(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) - - g.WriteLine("Line 1") - g.WriteLine("Line 2") - g.WriteLine("# a") - g.WriteLine("Line 3") - g.FlushAndSync() - g.RotateFile() - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("Line 6") - g.WriteLine("# b") - g.FlushAndSync() - - match, found, err := g.FindLast("#") - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, "# b", match) - - // Cleanup - destroyTestGroup(t, g) -} - -func TestFindLast2(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) - - g.WriteLine("Line 1") - g.WriteLine("Line 2") - g.WriteLine("Line 3") - g.FlushAndSync() - g.RotateFile() - g.WriteLine("# a") - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("# b") - g.WriteLine("Line 6") - g.FlushAndSync() - - match, found, err := g.FindLast("#") - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, "# b", match) - - // Cleanup - destroyTestGroup(t, g) -} - -func TestFindLast3(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) - - g.WriteLine("Line 1") - g.WriteLine("# a") - g.WriteLine("Line 2") - g.WriteLine("# b") - g.WriteLine("Line 3") - g.FlushAndSync() - g.RotateFile() - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("Line 6") - g.FlushAndSync() - - match, found, err := g.FindLast("#") - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, "# b", match) - - // Cleanup - destroyTestGroup(t, g) -} - -func TestFindLast4(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) - - g.WriteLine("Line 1") - g.WriteLine("Line 2") - g.WriteLine("Line 3") - g.FlushAndSync() - g.RotateFile() - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("Line 6") - g.FlushAndSync() - - match, found, err := g.FindLast("#") - assert.NoError(t, err) - assert.False(t, found) - assert.Empty(t, match) - - // Cleanup - destroyTestGroup(t, g) -} - func TestWrite(t *testing.T) { g := createTestGroupWithHeadSizeLimit(t, 0) diff --git a/mempool/cache_test.go b/mempool/cache_test.go index 26e560b6e..ea9f63fd6 100644 --- a/mempool/cache_test.go +++ b/mempool/cache_test.go @@ -19,7 +19,7 @@ func TestCacheRemove(t *testing.T) { for i := 0; i < numTxs; i++ { // probability of collision is 2**-256 txBytes := make([]byte, 32) - rand.Read(txBytes) + rand.Read(txBytes) // nolint: gosec txs[i] = txBytes cache.Push(txBytes) // make sure its added to both the linked list and the map diff --git a/mempool/mempool.go b/mempool/mempool.go index bd3cbf7d9..a5b14466a 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -149,6 +149,11 @@ func TxID(tx []byte) string { return fmt.Sprintf("%X", types.Tx(tx).Hash()) } +// txKey is the fixed length array sha256 hash used as the key in maps. +func txKey(tx types.Tx) [sha256.Size]byte { + return sha256.Sum256(tx) +} + // Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus // round. Transaction validity is checked using the CheckTx abci message before the transaction is // added to the pool. The Mempool uses a concurrent list structure for storing transactions that @@ -159,23 +164,27 @@ type Mempool struct { proxyMtx sync.Mutex proxyAppConn proxy.AppConnMempool txs *clist.CList // concurrent linked-list of good txs - // map for quick access to txs - // Used in CheckTx to record the tx sender. - txsMap map[[sha256.Size]byte]*clist.CElement - height int64 // the last block Update()'d to - rechecking int32 // for re-checking filtered txs on Update() - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here + preCheck PreCheckFunc + postCheck PostCheckFunc + + // Track whether we're rechecking txs. + // These are not protected by a mutex and are expected to be mutated + // in serial (ie. by abci responses which are called in serial). + recheckCursor *clist.CElement // next expected response + recheckEnd *clist.CElement // re-checking stops here + + // notify listeners (ie. consensus) when txs are available notifiedTxsAvailable bool txsAvailable chan struct{} // fires once for each height, when the mempool is not empty - preCheck PreCheckFunc - postCheck PostCheckFunc - // Atomic integers + // Map for quick access to txs to record sender in CheckTx. + // txsMap: txKey -> CElement + txsMap sync.Map - // Used to check if the mempool size is bigger than the allowed limit. - // See TxsBytes - txsBytes int64 + // Atomic integers + height int64 // the last block Update()'d to + rechecking int32 // for re-checking filtered txs on Update() + txsBytes int64 // total size of mempool, in bytes // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. @@ -203,7 +212,6 @@ func NewMempool( config: config, proxyAppConn: proxyAppConn, txs: clist.New(), - txsMap: make(map[[sha256.Size]byte]*clist.CElement), height: height, rechecking: 0, recheckCursor: nil, @@ -216,7 +224,7 @@ func NewMempool( } else { mempool.cache = nopTxCache{} } - proxyAppConn.SetResponseCallback(mempool.resCb) + proxyAppConn.SetResponseCallback(mempool.globalCb) for _, option := range options { option(mempool) } @@ -319,7 +327,7 @@ func (mem *Mempool) Flush() { e.DetachPrev() } - mem.txsMap = make(map[[sha256.Size]byte]*clist.CElement) + mem.txsMap = sync.Map{} _ = atomic.SwapInt64(&mem.txsBytes, 0) } @@ -380,13 +388,12 @@ func (mem *Mempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo // CACHE if !mem.cache.Push(tx) { - // record the sender - e, ok := mem.txsMap[sha256.Sum256(tx)] - // The check is needed because tx may be in cache, but not in the mempool. - // E.g. after we've committed a block, txs are removed from the mempool, - // but not from the cache. - if ok { - memTx := e.Value.(*mempoolTx) + // Record a new sender for a tx we've already seen. + // Note it's possible a tx is still in the cache but no longer in the mempool + // (eg. after committing a block, txs are removed from mempool but not cache), + // so we only record the sender for txs still in the mempool. + if e, ok := mem.txsMap.Load(txKey(tx)); ok { + memTx := e.(*clist.CElement).Value.(*mempoolTx) if _, loaded := memTx.senders.LoadOrStore(txInfo.PeerID, true); loaded { // TODO: consider punishing peer for dups, // its non-trivial since invalid txs can become valid, @@ -416,25 +423,21 @@ func (mem *Mempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo if err = mem.proxyAppConn.Error(); err != nil { return err } + reqRes := mem.proxyAppConn.CheckTxAsync(tx) - if cb != nil { - composedCallback := func(res *abci.Response) { - mem.reqResCb(tx, txInfo.PeerID)(res) - cb(res) - } - reqRes.SetCallback(composedCallback) - } else { - reqRes.SetCallback(mem.reqResCb(tx, txInfo.PeerID)) - } + reqRes.SetCallback(mem.reqResCb(tx, txInfo.PeerID, cb)) return nil } -// Global callback, which is called in the absence of the specific callback. -// -// In recheckTxs because no reqResCb (specific) callback is set, this callback -// will be called. -func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) { +// Global callback that will be called after every ABCI response. +// Having a single global callback avoids needing to set a callback for each request. +// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), +// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that +// include this information. If we're not in the midst of a recheck, this function will just return, +// so the request specific callback can do the work. +// When rechecking, we don't need the peerID, so the recheck callback happens here. +func (mem *Mempool) globalCb(req *abci.Request, res *abci.Response) { if mem.recheckCursor == nil { return } @@ -446,35 +449,50 @@ func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) { mem.metrics.Size.Set(float64(mem.Size())) } -// Specific callback, which allows us to incorporate local information, like -// the peer that sent us this tx, so we can avoid sending it back to the same -// peer. +// Request specific callback that should be set on individual reqRes objects +// to incorporate local information when processing the response. +// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. +// NOTE: alternatively, we could include this information in the ABCI request itself. +// +// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called +// when all other response processing is complete. // // Used in CheckTxWithInfo to record PeerID who sent us the tx. -func (mem *Mempool) reqResCb(tx []byte, peerID uint16) func(res *abci.Response) { +func (mem *Mempool) reqResCb(tx []byte, peerID uint16, externalCb func(*abci.Response)) func(res *abci.Response) { return func(res *abci.Response) { if mem.recheckCursor != nil { - return + // this should never happen + panic("recheck cursor is not nil in reqResCb") } mem.resCbFirstTime(tx, peerID, res) // update metrics mem.metrics.Size.Set(float64(mem.Size())) + + // passed in by the caller of CheckTx, eg. the RPC + if externalCb != nil { + externalCb(res) + } } } +// Called from: +// - resCbFirstTime (lock not held) if tx is valid func (mem *Mempool) addTx(memTx *mempoolTx) { e := mem.txs.PushBack(memTx) - mem.txsMap[sha256.Sum256(memTx.tx)] = e + mem.txsMap.Store(txKey(memTx.tx), e) atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) } +// Called from: +// - Update (lock held) if tx was committed +// - resCbRecheck (lock not held) if tx was invalidated func (mem *Mempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { mem.txs.Remove(elem) elem.DetachPrev() - delete(mem.txsMap, sha256.Sum256(tx)) + mem.txsMap.Delete(txKey(tx)) atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) if removeFromCache { @@ -733,7 +751,7 @@ func (mem *Mempool) recheckTxs(txs []types.Tx) { mem.recheckEnd = mem.txs.Back() // Push txs to proxyAppConn - // NOTE: reqResCb may be called concurrently. + // NOTE: globalCb may be called concurrently. for _, tx := range txs { mem.proxyAppConn.CheckTxAsync(tx) } @@ -746,8 +764,11 @@ func (mem *Mempool) recheckTxs(txs []types.Tx) { type mempoolTx struct { height int64 // height that this tx had been validated in gasWanted int64 // amount of gas this tx states it will require - senders sync.Map // ids of peers who've sent us this tx (as a map for quick lookups) tx types.Tx // + + // ids of peers who've sent us this tx (as a map for quick lookups). + // senders: PeerID -> bool + senders sync.Map } // Height returns the height for this transaction @@ -798,7 +819,7 @@ func (cache *mapTxCache) Push(tx types.Tx) bool { defer cache.mtx.Unlock() // Use the tx hash in the cache - txHash := sha256.Sum256(tx) + txHash := txKey(tx) if moved, exists := cache.map_[txHash]; exists { cache.list.MoveToBack(moved) return false @@ -820,7 +841,7 @@ func (cache *mapTxCache) Push(tx types.Tx) bool { // Remove removes the given tx from the cache. func (cache *mapTxCache) Remove(tx types.Tx) { cache.mtx.Lock() - txHash := sha256.Sum256(tx) + txHash := txKey(tx) popped := cache.map_[txHash] delete(cache.map_, txHash) if popped != nil { diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index dc7d595af..d5f25396d 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -6,6 +6,7 @@ import ( "encoding/binary" "fmt" "io/ioutil" + mrand "math/rand" "os" "path/filepath" "testing" @@ -18,6 +19,7 @@ import ( "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" + abciserver "github.com/tendermint/tendermint/abci/server" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" @@ -510,6 +512,54 @@ func TestMempoolTxsBytes(t *testing.T) { assert.EqualValues(t, 0, mempool.TxsBytes()) } +// This will non-deterministically catch some concurrency failures like +// https://github.com/tendermint/tendermint/issues/3509 +// TODO: all of the tests should probably also run using the remote proxy app +// since otherwise we're not actually testing the concurrency of the mempool here! +func TestMempoolRemoteAppConcurrency(t *testing.T) { + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) + app := kvstore.NewKVStoreApplication() + cc, server := newRemoteApp(t, sockPath, app) + defer server.Stop() + config := cfg.ResetTestRoot("mempool_test") + mempool, cleanup := newMempoolWithAppAndConfig(cc, config) + defer cleanup() + + // generate small number of txs + nTxs := 10 + txLen := 200 + txs := make([]types.Tx, nTxs) + for i := 0; i < nTxs; i++ { + txs[i] = cmn.RandBytes(txLen) + } + + // simulate a group of peers sending them over and over + N := config.Mempool.Size + maxPeers := 5 + for i := 0; i < N; i++ { + peerID := mrand.Intn(maxPeers) + txNum := mrand.Intn(nTxs) + tx := txs[int(txNum)] + + // this will err with ErrTxInCache many times ... + mempool.CheckTxWithInfo(tx, nil, TxInfo{PeerID: uint16(peerID)}) + } + err := mempool.FlushAppConn() + require.NoError(t, err) +} + +// caller must close server +func newRemoteApp(t *testing.T, addr string, app abci.Application) (clientCreator proxy.ClientCreator, server cmn.Service) { + clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true) + + // Start server + server = abciserver.NewSocketServer(addr, app) + server.SetLogger(log.TestingLogger().With("module", "abci-server")) + if err := server.Start(); err != nil { + t.Fatalf("Error starting socket server: %v", err.Error()) + } + return clientCreator, server +} func checksumIt(data []byte) string { h := sha256.New() h.Write(data) diff --git a/node/node.go b/node/node.go index 3501b6a7a..e91d36357 100644 --- a/node/node.go +++ b/node/node.go @@ -489,7 +489,7 @@ func NewNode(config *cfg.Config, addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) // Add ourselves to addrbook to prevent dialing ourselves - addrBook.AddOurAddress(nodeInfo.NetAddress()) + addrBook.AddOurAddress(sw.NetAddress()) addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) if config.P2P.PexReactor { diff --git a/p2p/mock/peer.go b/p2p/mock/peer.go index 5ee81f67e..bdcf012de 100644 --- a/p2p/mock/peer.go +++ b/p2p/mock/peer.go @@ -62,7 +62,7 @@ func (mp *Peer) Get(key string) interface{} { func (mp *Peer) Set(key string, value interface{}) { mp.kv[key] = value } -func (mp *Peer) RemoteIP() net.IP { return mp.ip } -func (mp *Peer) OriginalAddr() *p2p.NetAddress { return mp.addr } -func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *Peer) CloseConn() error { return nil } +func (mp *Peer) RemoteIP() net.IP { return mp.ip } +func (mp *Peer) SocketAddr() *p2p.NetAddress { return mp.addr } +func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } +func (mp *Peer) CloseConn() error { return nil } diff --git a/p2p/node_info.go b/p2p/node_info.go index 699fd7f1e..e80f1e1b7 100644 --- a/p2p/node_info.go +++ b/p2p/node_info.go @@ -23,14 +23,8 @@ func MaxNodeInfoSize() int { // NodeInfo exposes basic info of a node // and determines if we're compatible. type NodeInfo interface { - nodeInfoAddress - nodeInfoTransport -} - -// nodeInfoAddress exposes just the core info of a node. -type nodeInfoAddress interface { ID() ID - NetAddress() *NetAddress + nodeInfoTransport } // nodeInfoTransport validates a nodeInfo and checks @@ -221,7 +215,7 @@ func (info DefaultNodeInfo) NetAddress() *NetAddress { if err != nil { switch err.(type) { case ErrNetAddressLookup: - // XXX If the peer provided a host name and the lookup fails here + // XXX If the peer provided a host name and the lookup fails here // we're out of luck. // TODO: use a NetAddress in DefaultNodeInfo default: diff --git a/p2p/peer.go b/p2p/peer.go index 73332a2aa..fab3b42d4 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -29,7 +29,7 @@ type Peer interface { NodeInfo() NodeInfo // peer's info Status() tmconn.ConnectionStatus - OriginalAddr() *NetAddress // original address for outbound peers + SocketAddr() *NetAddress // actual address of the socket Send(byte, []byte) bool TrySend(byte, []byte) bool @@ -46,7 +46,7 @@ type peerConn struct { persistent bool conn net.Conn // source connection - originalAddr *NetAddress // nil for inbound connections + socketAddr *NetAddress // cached RemoteIP() ip net.IP @@ -55,14 +55,14 @@ type peerConn struct { func newPeerConn( outbound, persistent bool, conn net.Conn, - originalAddr *NetAddress, + socketAddr *NetAddress, ) peerConn { return peerConn{ - outbound: outbound, - persistent: persistent, - conn: conn, - originalAddr: originalAddr, + outbound: outbound, + persistent: persistent, + conn: conn, + socketAddr: socketAddr, } } @@ -223,13 +223,12 @@ func (p *peer) NodeInfo() NodeInfo { return p.nodeInfo } -// OriginalAddr returns the original address, which was used to connect with -// the peer. Returns nil for inbound peers. -func (p *peer) OriginalAddr() *NetAddress { - if p.peerConn.outbound { - return p.peerConn.originalAddr - } - return nil +// SocketAddr returns the address of the socket. +// For outbound peers, it's the address dialed (after DNS resolution). +// For inbound peers, it's the address returned by the underlying connection +// (not what's reported in the peer's NodeInfo). +func (p *peer) SocketAddr() *NetAddress { + return p.peerConn.socketAddr } // Status returns the peer's ConnectionStatus. diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 1d2372fb0..4bacb07d0 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -29,7 +29,7 @@ func (mp *mockPeer) IsPersistent() bool { return true } func (mp *mockPeer) Get(s string) interface{} { return s } func (mp *mockPeer) Set(string, interface{}) {} func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } -func (mp *mockPeer) OriginalAddr() *NetAddress { return nil } +func (mp *mockPeer) SocketAddr() *NetAddress { return nil } func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } func (mp *mockPeer) CloseConn() error { return nil } diff --git a/p2p/peer_test.go b/p2p/peer_test.go index 90be31131..bf61beb4f 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -109,25 +109,27 @@ func testOutboundPeerConn( persistent bool, ourNodePrivKey crypto.PrivKey, ) (peerConn, error) { + + var pc peerConn conn, err := testDial(addr, config) if err != nil { - return peerConn{}, cmn.ErrorWrap(err, "Error creating peer") + return pc, cmn.ErrorWrap(err, "Error creating peer") } - pc, err := testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr) + pc, err = testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr) if err != nil { if cerr := conn.Close(); cerr != nil { - return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) + return pc, cmn.ErrorWrap(err, cerr.Error()) } - return peerConn{}, err + return pc, err } // ensure dialed ID matches connection ID if addr.ID != pc.ID() { if cerr := conn.Close(); cerr != nil { - return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) + return pc, cmn.ErrorWrap(err, cerr.Error()) } - return peerConn{}, ErrSwitchAuthenticationFailure{addr, pc.ID()} + return pc, ErrSwitchAuthenticationFailure{addr, pc.ID()} } return pc, nil diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 01d1d8db5..0ce116326 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -167,7 +167,7 @@ func (r *PEXReactor) AddPeer(p Peer) { } } else { // inbound peer is its own source - addr := p.NodeInfo().NetAddress() + addr := p.SocketAddr() src := addr // add to book. dont RequestAddrs right away because @@ -309,7 +309,7 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { } r.requestsSent.Delete(id) - srcAddr := src.NodeInfo().NetAddress() + srcAddr := src.SocketAddr() for _, netAddr := range addrs { // Validate netAddr. Disconnect from a peer if it sends us invalid data. if netAddr == nil { diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 9e23058a5..4a6118c63 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -96,7 +96,7 @@ func TestPEXReactorRunning(t *testing.T) { } addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) { - addr := switches[otherSwitchIndex].NodeInfo().NetAddress() + addr := switches[otherSwitchIndex].NetAddress() books[switchIndex].AddAddress(addr, addr) } @@ -127,7 +127,7 @@ func TestPEXReactorReceive(t *testing.T) { r.RequestAddrs(peer) size := book.Size() - addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} + addrs := []*p2p.NetAddress{peer.SocketAddr()} msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) r.Receive(PexChannel, peer, msg) assert.Equal(t, size+1, book.Size()) @@ -184,7 +184,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { assert.True(t, r.requestsSent.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) - addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} + addrs := []*p2p.NetAddress{peer.SocketAddr()} msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) // receive some addrs. should clear the request @@ -229,7 +229,7 @@ func TestCheckSeeds(t *testing.T) { badPeerConfig = &PEXReactorConfig{ Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", - seed.NodeInfo().NetAddress().String()}, + seed.NetAddress().String()}, } peer = testCreatePeerWithConfig(dir, 2, badPeerConfig) require.Nil(t, peer.Start()) @@ -263,12 +263,13 @@ func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { defer os.RemoveAll(dir) // nolint: errcheck // 1. create peer - peer := testCreateDefaultPeer(dir, 1) - require.Nil(t, peer.Start()) - defer peer.Stop() + peerSwitch := testCreateDefaultPeer(dir, 1) + require.Nil(t, peerSwitch.Start()) + defer peerSwitch.Stop() // 2. Create seed which knows about the peer - seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peer.NodeInfo().NetAddress()}, []*p2p.NetAddress{peer.NodeInfo().NetAddress()}) + peerAddr := peerSwitch.NetAddress() + seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr}) require.Nil(t, seed.Start()) defer seed.Stop() @@ -295,7 +296,7 @@ func TestPEXReactorCrawlStatus(t *testing.T) { // Create a peer, add it to the peer set and the addrbook. peer := p2p.CreateRandomPeer(false) p2p.AddPeerToSwitch(pexR.Switch, peer) - addr1 := peer.NodeInfo().NetAddress() + addr1 := peer.SocketAddr() pexR.book.AddAddress(addr1, addr1) // Add a non-connected address to the book. @@ -359,7 +360,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { reactor := switches[0].Reactors()["pex"].(*PEXReactor) peerID := switches[1].NodeInfo().ID() - err = switches[1].DialPeerWithAddress(switches[0].NodeInfo().NetAddress(), false) + err = switches[1].DialPeerWithAddress(switches[0].NetAddress(), false) assert.NoError(t, err) // sleep up to a second while waiting for the peer to send us a message. @@ -397,7 +398,7 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { pexR.RequestAddrs(peer) size := book.Size() - addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} + addrs := []*p2p.NetAddress{peer.SocketAddr()} msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) pexR.Receive(PexChannel, peer, msg) assert.Equal(t, size, book.Size()) @@ -414,7 +415,7 @@ func TestPEXReactorDialPeer(t *testing.T) { sw.SetAddrBook(book) peer := mock.NewPeer(nil) - addr := peer.NodeInfo().NetAddress() + addr := peer.SocketAddr() assert.Equal(t, 0, pexR.AttemptsToDial(addr)) @@ -547,7 +548,7 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) // Starting and stopping the peer is left to the caller func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch { conf := &PEXReactorConfig{ - Seeds: []string{seed.NodeInfo().NetAddress().String()}, + Seeds: []string{seed.NetAddress().String()}, } return testCreatePeerWithConfig(dir, id, conf) } diff --git a/p2p/switch.go b/p2p/switch.go index 9e04fe7ce..76da9ad0c 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -86,6 +86,12 @@ type Switch struct { metrics *Metrics } +// NetAddress returns the address the switch is listening on. +func (sw *Switch) NetAddress() *NetAddress { + addr := sw.transport.NetAddress() + return &addr +} + // SwitchOption sets an optional parameter on the Switch. type SwitchOption func(*Switch) @@ -289,13 +295,7 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { sw.stopAndRemovePeer(peer, reason) if peer.IsPersistent() { - addr := peer.OriginalAddr() - if addr == nil { - // FIXME: persistent peers can't be inbound right now. - // self-reported address for inbound persistent peers - addr = peer.NodeInfo().NetAddress() - } - go sw.reconnectToPeer(addr) + go sw.reconnectToPeer(peer.SocketAddr()) } } @@ -383,7 +383,7 @@ func (sw *Switch) SetAddrBook(addrBook AddrBook) { // like contributed to consensus. func (sw *Switch) MarkPeerAsGood(peer Peer) { if sw.addrBook != nil { - sw.addrBook.MarkGood(peer.NodeInfo().NetAddress()) + sw.addrBook.MarkGood(peer.SocketAddr()) } } @@ -400,7 +400,7 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b sw.Logger.Error("Error in peer's address", "err", err) } - ourAddr := sw.nodeInfo.NetAddress() + ourAddr := sw.NetAddress() // TODO: this code feels like it's in the wrong place. // The integration tests depend on the addrBook being saved @@ -536,7 +536,7 @@ func (sw *Switch) acceptRoutine() { if in >= sw.config.MaxNumInboundPeers { sw.Logger.Info( "Ignoring inbound connection: already have enough inbound peers", - "address", p.NodeInfo().NetAddress().String(), + "address", p.SocketAddr(), "have", in, "max", sw.config.MaxNumInboundPeers, ) @@ -653,7 +653,7 @@ func (sw *Switch) addPeer(p Peer) error { return err } - p.SetLogger(sw.Logger.With("peer", p.NodeInfo().NetAddress())) + p.SetLogger(sw.Logger.With("peer", p.SocketAddr())) // Handle the shut down case where the switch has stopped but we're // concurrently trying to add a peer. diff --git a/p2p/switch_test.go b/p2p/switch_test.go index d5dd178b6..ab8ae9e9f 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -161,10 +161,6 @@ func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, r func TestSwitchFiltersOutItself(t *testing.T) { s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc) - // addr := s1.NodeInfo().NetAddress() - - // // add ourselves like we do in node.go#427 - // s1.addrBook.AddOurAddress(addr) // simulate s1 having a public IP by creating a remote peer with the same ID rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg} @@ -498,7 +494,7 @@ func TestSwitchAcceptRoutine(t *testing.T) { rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} remotePeers = append(remotePeers, rp) rp.Start() - c, err := rp.Dial(sw.NodeInfo().NetAddress()) + c, err := rp.Dial(sw.NetAddress()) require.NoError(t, err) // spawn a reading routine to prevent connection from closing go func(c net.Conn) { @@ -517,7 +513,7 @@ func TestSwitchAcceptRoutine(t *testing.T) { // 2. check we close new connections if we already have MaxNumInboundPeers peers rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp.Start() - conn, err := rp.Dial(sw.NodeInfo().NetAddress()) + conn, err := rp.Dial(sw.NetAddress()) require.NoError(t, err) // check conn is closed one := make([]byte, 1) @@ -537,6 +533,10 @@ type errorTransport struct { acceptErr error } +func (et errorTransport) NetAddress() NetAddress { + panic("not implemented") +} + func (et errorTransport) Accept(c peerConfig) (Peer, error) { return nil, et.acceptErr } diff --git a/p2p/test_util.go b/p2p/test_util.go index 2d320df85..df60539ba 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -35,7 +35,8 @@ func CreateRandomPeer(outbound bool) *peer { addr, netAddr := CreateRoutableAddr() p := &peer{ peerConn: peerConn{ - outbound: outbound, + outbound: outbound, + socketAddr: netAddr, }, nodeInfo: mockNodeInfo{netAddr}, mconn: &conn.MConnection{}, @@ -174,10 +175,15 @@ func MakeSwitch( PrivKey: ed25519.GenPrivKey(), } nodeInfo := testNodeInfo(nodeKey.ID(), fmt.Sprintf("node%d", i)) + addr, err := NewNetAddressString( + IDAddressString(nodeKey.ID(), nodeInfo.(DefaultNodeInfo).ListenAddr), + ) + if err != nil { + panic(err) + } t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg)) - addr := nodeInfo.NetAddress() if err := t.Listen(*addr); err != nil { panic(err) } @@ -214,7 +220,7 @@ func testPeerConn( cfg *config.P2PConfig, outbound, persistent bool, ourNodePrivKey crypto.PrivKey, - originalAddr *NetAddress, + socketAddr *NetAddress, ) (pc peerConn, err error) { conn := rawConn @@ -231,12 +237,7 @@ func testPeerConn( } // Only the information we already have - return peerConn{ - outbound: outbound, - persistent: persistent, - conn: conn, - originalAddr: originalAddr, - }, nil + return newPeerConn(outbound, persistent, conn, socketAddr), nil } //---------------------------------------------------------------- diff --git a/p2p/transport.go b/p2p/transport.go index d36065ab1..6717db483 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -24,6 +24,7 @@ type IPResolver interface { // accept is the container to carry the upgraded connection and NodeInfo from an // asynchronously running routine to the Accept method. type accept struct { + netAddr *NetAddress conn net.Conn nodeInfo NodeInfo err error @@ -47,6 +48,9 @@ type peerConfig struct { // the transport. Each transport is also responsible to filter establishing // peers specific to its domain. type Transport interface { + // Listening address. + NetAddress() NetAddress + // Accept returns a newly connected Peer. Accept(peerConfig) (Peer, error) @@ -115,6 +119,7 @@ func MultiplexTransportResolver(resolver IPResolver) MultiplexTransportOption { // MultiplexTransport accepts and dials tcp connections and upgrades them to // multiplexed peers. type MultiplexTransport struct { + netAddr NetAddress listener net.Listener acceptc chan accept @@ -161,6 +166,11 @@ func NewMultiplexTransport( } } +// NetAddress implements Transport. +func (mt *MultiplexTransport) NetAddress() NetAddress { + return mt.netAddr +} + // Accept implements Transport. func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) { select { @@ -173,7 +183,7 @@ func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) { cfg.outbound = false - return mt.wrapPeer(a.conn, a.nodeInfo, cfg, nil), nil + return mt.wrapPeer(a.conn, a.nodeInfo, cfg, a.netAddr), nil case <-mt.closec: return nil, ErrTransportClosed{} } @@ -224,6 +234,7 @@ func (mt *MultiplexTransport) Listen(addr NetAddress) error { return err } + mt.netAddr = addr mt.listener = ln go mt.acceptPeers() @@ -258,15 +269,21 @@ func (mt *MultiplexTransport) acceptPeers() { var ( nodeInfo NodeInfo secretConn *conn.SecretConnection + netAddr *NetAddress ) err := mt.filterConn(c) if err == nil { secretConn, nodeInfo, err = mt.upgrade(c, nil) + if err == nil { + addr := c.RemoteAddr() + id := PubKeyToID(secretConn.RemotePubKey()) + netAddr = NewNetAddress(id, addr) + } } select { - case mt.acceptc <- accept{secretConn, nodeInfo, err}: + case mt.acceptc <- accept{netAddr, secretConn, nodeInfo, err}: // Make the upgraded peer available. case <-mt.closec: // Give up if the transport was closed. @@ -426,14 +443,14 @@ func (mt *MultiplexTransport) wrapPeer( c net.Conn, ni NodeInfo, cfg peerConfig, - dialedAddr *NetAddress, + socketAddr *NetAddress, ) Peer { peerConn := newPeerConn( cfg.outbound, cfg.persistent, c, - dialedAddr, + socketAddr, ) p := newPeer( diff --git a/p2p/transport_test.go b/p2p/transport_test.go index 81f9d1b8e..35fd9c66b 100644 --- a/p2p/transport_test.go +++ b/p2p/transport_test.go @@ -8,6 +8,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/p2p/conn" ) @@ -142,43 +144,23 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { func TestTransportMultiplexAcceptMultiple(t *testing.T) { mt := testSetupMultiplexTransport(t) + id, addr := mt.nodeKey.ID(), mt.listener.Addr().String() + laddr, err := NewNetAddressStringWithOptionalID(IDAddressString(id, addr)) + require.NoError(t, err) var ( - seed = rand.New(rand.NewSource(time.Now().UnixNano())) - errc = make(chan error, seed.Intn(64)+64) + seed = rand.New(rand.NewSource(time.Now().UnixNano())) + nDialers = seed.Intn(64) + 64 + errc = make(chan error, nDialers) ) // Setup dialers. - for i := 0; i < cap(errc); i++ { - go func() { - var ( - pv = ed25519.GenPrivKey() - dialer = newMultiplexTransport( - testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName), - NodeKey{ - PrivKey: pv, - }, - ) - ) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } - - _, err = dialer.Dial(*addr, peerConfig{}) - if err != nil { - errc <- err - return - } - - // Signal that the connection was established. - errc <- nil - }() + for i := 0; i < nDialers; i++ { + go testDialer(*laddr, errc) } // Catch connection errors. - for i := 0; i < cap(errc); i++ { + for i := 0; i < nDialers; i++ { if err := <-errc; err != nil { t.Fatal(err) } @@ -216,6 +198,27 @@ func TestTransportMultiplexAcceptMultiple(t *testing.T) { } } +func testDialer(dialAddr NetAddress, errc chan error) { + var ( + pv = ed25519.GenPrivKey() + dialer = newMultiplexTransport( + testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName), + NodeKey{ + PrivKey: pv, + }, + ) + ) + + _, err := dialer.Dial(dialAddr, peerConfig{}) + if err != nil { + errc <- err + return + } + + // Signal that the connection was established. + errc <- nil +} + func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { mt := testSetupMultiplexTransport(t) @@ -591,6 +594,7 @@ func TestTransportHandshake(t *testing.T) { } } +// create listener func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport { var ( pv = ed25519.GenPrivKey() diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 3850999d3..ad23a461c 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -218,7 +218,7 @@ func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState } peerStates[i] = ctypes.PeerStateInfo{ // Peer basic info. - NodeAddress: peer.NodeInfo().NetAddress().String(), + NodeAddress: peer.SocketAddr().String(), // Peer consensus state. PeerState: peerStateJSON, } diff --git a/scripts/release_management/README.md b/scripts/release_management/README.md new file mode 100644 index 000000000..e92f1ccf6 --- /dev/null +++ b/scripts/release_management/README.md @@ -0,0 +1,65 @@ +# Release management scripts + +## Overview +The scripts in this folder are used for release management in CircleCI. Although the scripts are fully configurable using input parameters, +the default settings were modified to accommodate CircleCI execution. + +# Build scripts +These scripts help during the build process. They prepare the release files. + +## bump-semver.py +Bumps the semantic version of the input `--version`. Versions are expected in vMAJOR.MINOR.PATCH format or vMAJOR.MINOR format. + +In vMAJOR.MINOR format, the result will be patch version 0 of that version, for example `v1.2 -> v1.2.0`. + +In vMAJOR.MINOR.PATCH format, the result will be a bumped PATCH version, for example `v1.2.3 -> v1.2.4`. + +If the PATCH number contains letters, it is considered a development version, in which case, the result is the non-development version of that number. +The patch number will not be bumped, only the "-dev" or similar additional text will be removed. For example: `v1.2.6-rc1 -> v1.2.6`. + +## zip-file.py +Specialized ZIP command for release management. Special features: +1. Uses Python ZIP libaries, so the `zip` command does not need to be installed. +1. Can only zip one file. +1. Optionally gets file version, Go OS and architecture. +1. By default all inputs and output is formatted exactly how CircleCI needs it. + +By default, the command will try to ZIP the file at `build/tendermint_${GOOS}_${GOARCH}`. +This can be changed with the `--file` input parameter. + +By default, the command will output the ZIP file to `build/tendermint_${CIRCLE_TAG}_${GOOS}_${GOARCH}.zip`. +This can be changed with the `--destination` (folder), `--version`, `--goos` and `--goarch` input parameters respectively. + +## sha-files.py +Specialized `shasum` command for release management. Special features: +1. Reads all ZIP files in the given folder. +1. By default all inputs and output is formatted exactly how CircleCI needs it. + +By default, the command will look up all ZIP files in the `build/` folder. + +By default, the command will output results into the `build/SHA256SUMS` file. + +# GitHub management +Uploading build results to GitHub requires at least these steps: +1. Create a new release on GitHub with content +2. Upload all binaries to the release +3. Publish the release +The below scripts help with these steps. + +## github-draft.py +Creates a GitHub release and fills the content with the CHANGELOG.md link. The version number can be changed by the `--version` parameter. + +By default, the command will use the tendermint/tendermint organization/repo, which can be changed using the `--org` and `--repo` parameters. + +By default, the command will get the version number from the `${CIRCLE_TAG}` variable. + +Returns the GitHub release ID. + +## github-upload.py +Upload a file to a GitHub release. The release is defined by the mandatory `--id` (release ID) input parameter. + +By default, the command will upload the file `/tmp/workspace/tendermint_${CIRCLE_TAG}_${GOOS}_${GOARCH}.zip`. This can be changed by the `--file` input parameter. + +## github-publish.py +Publish a GitHub release. The release is defined by the mandatory `--id` (release ID) input parameter. + diff --git a/scripts/release_management/bump-semver.py b/scripts/release_management/bump-semver.py new file mode 100755 index 000000000..b13a10342 --- /dev/null +++ b/scripts/release_management/bump-semver.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +# Bump the release number of a semantic version number and print it. --version is required. +# Version is +# - vA.B.C, in which case vA.B.C+1 will be returned +# - vA.B.C-devorwhatnot in which case vA.B.C will be returned +# - vA.B in which case vA.B.0 will be returned + +import re +import argparse + + +def semver(ver): + if re.match('v[0-9]+\.[0-9]+',ver) is None: + ver="v0.0" + #raise argparse.ArgumentTypeError('--version must be a semantic version number with major, minor and patch numbers') + return ver + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--version", help="Version number to bump, e.g.: v1.0.0", required=True, type=semver) + args = parser.parse_args() + + found = re.match('(v[0-9]+\.[0-9]+)(\.(.+))?', args.version) + majorminorprefix = found.group(1) + patch = found.group(3) + if patch is None: + patch = "0-new" + + if re.match('[0-9]+$',patch) is None: + patchfound = re.match('([0-9]+)',patch) + patch = int(patchfound.group(1)) + else: + patch = int(patch) + 1 + + print("{0}.{1}".format(majorminorprefix, patch)) diff --git a/scripts/release_management/github-draft.py b/scripts/release_management/github-draft.py new file mode 100755 index 000000000..1fccd38b9 --- /dev/null +++ b/scripts/release_management/github-draft.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +# Create a draft release on GitHub. By default in the tendermint/tendermint repo. +# Optimized for CircleCI + +import argparse +import httplib +import json +import os +from base64 import b64encode + +def request(org, repo, data): + user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") + headers = { + 'User-Agent': 'tenderbot', + 'Accept': 'application/vnd.github.v3+json', + 'Authorization': 'Basic %s' % user_and_pass + } + + conn = httplib.HTTPSConnection('api.github.com', timeout=5) + conn.request('POST', '/repos/{0}/{1}/releases'.format(org,repo), data, headers) + response = conn.getresponse() + if response.status < 200 or response.status > 299: + print("{0}: {1}".format(response.status, response.reason)) + conn.close() + raise IOError(response.reason) + responsedata = response.read() + conn.close() + return json.loads(responsedata) + + +def create_draft(org,repo,branch,version): + draft = { + 'tag_name': version, + 'target_commitish': '{0}'.format(branch), + 'name': '{0} (WARNING: ALPHA SOFTWARE)'.format(version), + 'body': 'https://github.com/{0}/{1}/blob/master/CHANGELOG.md#{2}'.format(org,repo,version.replace('v','').replace('.','')), + 'draft': True, + 'prerelease': False + } + data=json.dumps(draft) + return request(org, repo, data) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--org", default="tendermint", help="GitHub organization") + parser.add_argument("--repo", default="tendermint", help="GitHub repository") + parser.add_argument("--branch", default=os.environ.get('CIRCLE_BRANCH'), help="Branch to build from, e.g.: v1.0") + parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for binary, e.g.: v1.0.0") + args = parser.parse_args() + + if not os.environ.has_key('GITHUB_USERNAME'): + raise parser.error('environment variable GITHUB_USERNAME is required') + + if not os.environ.has_key('GITHUB_TOKEN'): + raise parser.error('environment variable GITHUB_TOKEN is required') + + release = create_draft(args.org,args.repo,args.branch,args.version) + + print(release["id"]) + diff --git a/scripts/release_management/github-openpr.py b/scripts/release_management/github-openpr.py new file mode 100755 index 000000000..af0434f02 --- /dev/null +++ b/scripts/release_management/github-openpr.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +# Open a PR against the develop branch. --branch required. +# Optimized for CircleCI + +import json +import os +import argparse +import httplib +from base64 import b64encode + + +def request(org, repo, data): + user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") + headers = { + 'User-Agent': 'tenderbot', + 'Accept': 'application/vnd.github.v3+json', + 'Authorization': 'Basic %s' % user_and_pass + } + + conn = httplib.HTTPSConnection('api.github.com', timeout=5) + conn.request('POST', '/repos/{0}/{1}/pulls'.format(org,repo), data, headers) + response = conn.getresponse() + if response.status < 200 or response.status > 299: + print(response) + conn.close() + raise IOError(response.reason) + responsedata = response.read() + conn.close() + return json.loads(responsedata) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--org", default="tendermint", help="GitHub organization. Defaults to tendermint.") + parser.add_argument("--repo", default="tendermint", help="GitHub repository. Defaults to tendermint.") + parser.add_argument("--head", help="The name of the branch where your changes are implemented.", required=True) + parser.add_argument("--base", help="The name of the branch you want the changes pulled into.", required=True) + parser.add_argument("--title", default="Security release {0}".format(os.environ.get('CIRCLE_TAG')), help="The title of the pull request.") + args = parser.parse_args() + + if not os.environ.has_key('GITHUB_USERNAME'): + raise parser.error('GITHUB_USERNAME not set.') + + if not os.environ.has_key('GITHUB_TOKEN'): + raise parser.error('GITHUB_TOKEN not set.') + + if os.environ.get('CIRCLE_TAG') is None: + raise parser.error('CIRCLE_TAG not set.') + + result = request(args.org, args.repo, data=json.dumps({'title':"{0}".format(args.title),'head':"{0}".format(args.head),'base':"{0}".format(args.base),'body':""})) + print(result['html_url']) diff --git a/scripts/release_management/github-public-newbranch.bash b/scripts/release_management/github-public-newbranch.bash new file mode 100644 index 000000000..ca2fa1314 --- /dev/null +++ b/scripts/release_management/github-public-newbranch.bash @@ -0,0 +1,28 @@ +#!/bin/sh + +# github-public-newbranch.bash - create public branch from the security repository + +set -euo pipefail + +# Create new branch +BRANCH="${CIRCLE_TAG:-v0.0.0}-security-`date -u +%Y%m%d%H%M%S`" +# Check if the patch release exist already as a branch +if [ -n "`git branch | grep '${BRANCH}'`" ]; then + echo "WARNING: Branch ${BRANCH} already exists." +else + echo "Creating branch ${BRANCH}." + git branch "${BRANCH}" +fi + +# ... and check it out +git checkout "${BRANCH}" + +# Add entry to public repository +git remote add tendermint-origin git@github.com:tendermint/tendermint.git + +# Push branch and tag to public repository +git push tendermint-origin +git push tendermint-origin --tags + +# Create a PR from the public branch to the assumed release branch in public (release branch has to exist) +python -u scripts/release_management/github-openpr.py --head "${BRANCH}" --base "${BRANCH:%.*}" diff --git a/scripts/release_management/github-publish.py b/scripts/release_management/github-publish.py new file mode 100755 index 000000000..31071aecd --- /dev/null +++ b/scripts/release_management/github-publish.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +# Publish an existing GitHub draft release. --id required. +# Optimized for CircleCI + +import json +import os +import argparse +import httplib +from base64 import b64encode + + +def request(org, repo, id, data): + user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") + headers = { + 'User-Agent': 'tenderbot', + 'Accept': 'application/vnd.github.v3+json', + 'Authorization': 'Basic %s' % user_and_pass + } + + conn = httplib.HTTPSConnection('api.github.com', timeout=5) + conn.request('POST', '/repos/{0}/{1}/releases/{2}'.format(org,repo,id), data, headers) + response = conn.getresponse() + if response.status < 200 or response.status > 299: + print(response) + conn.close() + raise IOError(response.reason) + responsedata = response.read() + conn.close() + return json.loads(responsedata) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--org", default="tendermint", help="GitHub organization") + parser.add_argument("--repo", default="tendermint", help="GitHub repository") + parser.add_argument("--id", help="GitHub release ID", required=True, type=int) + parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for the release, e.g.: v1.0.0") + args = parser.parse_args() + + if not os.environ.has_key('GITHUB_USERNAME'): + raise parser.error('GITHUB_USERNAME not set.') + + if not os.environ.has_key('GITHUB_TOKEN'): + raise parser.error('GITHUB_TOKEN not set.') + + try: + result = request(args.org, args.repo, args.id, data=json.dumps({'draft':False,'tag_name':"{0}".format(args.version)})) + except IOError as e: + print(e) + result = request(args.org, args.repo, args.id, data=json.dumps({'draft':False,'tag_name':"{0}-autorelease".format(args.version)})) + + print(result['name']) diff --git a/scripts/release_management/github-upload.py b/scripts/release_management/github-upload.py new file mode 100755 index 000000000..77c76a755 --- /dev/null +++ b/scripts/release_management/github-upload.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python + +# Upload a file to a GitHub draft release. --id and --file are required. +# Optimized for CircleCI + +import json +import os +import re +import argparse +import mimetypes +import httplib +from base64 import b64encode + + +def request(baseurl, path, mimetype, mimeencoding, data): + user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") + + headers = { + 'User-Agent': 'tenderbot', + 'Accept': 'application/vnd.github.v3.raw+json', + 'Authorization': 'Basic %s' % user_and_pass, + 'Content-Type': mimetype, + 'Content-Encoding': mimeencoding + } + + conn = httplib.HTTPSConnection(baseurl, timeout=5) + conn.request('POST', path, data, headers) + response = conn.getresponse() + if response.status < 200 or response.status > 299: + print(response) + conn.close() + raise IOError(response.reason) + responsedata = response.read() + conn.close() + return json.loads(responsedata) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--id", help="GitHub release ID", required=True, type=int) + parser.add_argument("--file", default="/tmp/workspace/tendermint_{0}_{1}_{2}.zip".format(os.environ.get('CIRCLE_TAG'),os.environ.get('GOOS'),os.environ.get('GOARCH')), help="File to upload") + parser.add_argument("--return-id-only", help="Return only the release ID after upload to GitHub.", action='store_true') + args = parser.parse_args() + + if not os.environ.has_key('GITHUB_USERNAME'): + raise parser.error('GITHUB_USERNAME not set.') + + if not os.environ.has_key('GITHUB_TOKEN'): + raise parser.error('GITHUB_TOKEN not set.') + + mimetypes.init() + filename = os.path.basename(args.file) + mimetype,mimeencoding = mimetypes.guess_type(filename, strict=False) + if mimetype is None: + mimetype = 'application/zip' + if mimeencoding is None: + mimeencoding = 'utf8' + + with open(args.file,'rb') as f: + asset = f.read() + + result = request('uploads.github.com', '/repos/tendermint/tendermint/releases/{0}/assets?name={1}'.format(args.id, filename), mimetype, mimeencoding, asset) + + if args.return_id_only: + print(result['id']) + else: + print(result['browser_download_url']) + diff --git a/scripts/release_management/sha-files.py b/scripts/release_management/sha-files.py new file mode 100755 index 000000000..2a9ee0d59 --- /dev/null +++ b/scripts/release_management/sha-files.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python + +# Create SHA256 summaries from all ZIP files in a folder +# Optimized for CircleCI + +import re +import os +import argparse +import zipfile +import hashlib + + +BLOCKSIZE = 65536 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--folder", default="/tmp/workspace", help="Folder to look for, for ZIP files") + parser.add_argument("--shafile", default="/tmp/workspace/SHA256SUMS", help="SHA256 summaries File") + args = parser.parse_args() + + for filename in os.listdir(args.folder): + if re.search('\.zip$',filename) is None: + continue + if not os.path.isfile(os.path.join(args.folder, filename)): + continue + with open(args.shafile,'a+') as shafile: + hasher = hashlib.sha256() + with open(os.path.join(args.folder, filename),'r') as f: + buf = f.read(BLOCKSIZE) + while len(buf) > 0: + hasher.update(buf) + buf = f.read(BLOCKSIZE) + shafile.write("{0} {1}\n".format(hasher.hexdigest(),filename)) + diff --git a/scripts/release_management/zip-file.py b/scripts/release_management/zip-file.py new file mode 100755 index 000000000..5d2f5b2c8 --- /dev/null +++ b/scripts/release_management/zip-file.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# ZIP one file as "tendermint" into a ZIP like tendermint_VERSION_OS_ARCH.zip +# Use environment variables CIRCLE_TAG, GOOS and GOARCH for easy input parameters. +# Optimized for CircleCI + +import os +import argparse +import zipfile +import hashlib + + +BLOCKSIZE = 65536 + + +def zip_asset(file,destination,arcname,version,goos,goarch): + filename = os.path.basename(file) + output = "{0}/{1}_{2}_{3}_{4}.zip".format(destination,arcname,version,goos,goarch) + + with zipfile.ZipFile(output,'w') as f: + f.write(filename=file,arcname=arcname) + f.comment=filename + return output + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--file", default="build/tendermint_{0}_{1}".format(os.environ.get('GOOS'),os.environ.get('GOARCH')), help="File to zip") + parser.add_argument("--destination", default="build", help="Destination folder for files") + parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for binary, e.g.: v1.0.0") + parser.add_argument("--goos", default=os.environ.get('GOOS'), help="GOOS parameter") + parser.add_argument("--goarch", default=os.environ.get('GOARCH'), help="GOARCH parameter") + args = parser.parse_args() + + if args.version is None: + raise parser.error("argument --version is required") + if args.goos is None: + raise parser.error("argument --goos is required") + if args.goarch is None: + raise parser.error("argument --goarch is required") + + file = zip_asset(args.file,args.destination,"tendermint",args.version,args.goos,args.goarch) + print(file) + diff --git a/version/version.go b/version/version.go index 9090fc7e6..a42a8f005 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.31.1" + TMCoreSemVer = "0.31.3" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.16.0"