diff --git a/.circleci/config.yml b/.circleci/config.yml index 9c51bc48f..7ad793549 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,7 +3,7 @@ version: 2 defaults: &defaults working_directory: /go/src/github.com/tendermint/tendermint docker: - - image: circleci/golang:1.12.0 + - image: circleci/golang environment: GOBIN: /tmp/workspace/bin @@ -14,6 +14,9 @@ docs_update_config: &docs_update_config environment: AWS_REGION: us-east-1 +release_management_docker: &release_management_docker + machine: true + jobs: setup_dependencies: <<: *defaults @@ -192,7 +195,7 @@ jobs: name: run localnet and exit on failure command: | set -x - docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang:1.11.4 make build-linux + docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux make localnet-start & ./scripts/localnet-blocks-test.sh 40 5 10 localhost @@ -256,6 +259,105 @@ jobs: echo "Website build started" fi + prepare_build: + <<: *defaults + steps: + - checkout + - run: + name: Get next release number + command: | + export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`" + echo "Last tag: ${LAST_TAG}" + if [ -z "${LAST_TAG}" ]; then + export LAST_TAG="${CIRCLE_BRANCH}" + echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag." + fi + export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`" + echo "Next tag: ${NEXT_TAG}" + echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source + - run: + name: Build dependencies + command: | + make get_tools get_vendor_deps + - persist_to_workspace: + root: . + paths: + - "release-version.source" + - save_cache: + key: v1-release-deps-{{ .Branch }}-{{ .Revision }} + paths: + - "vendor" + + build_artifacts: + <<: *defaults + parallelism: 4 + steps: + - checkout + - restore_cache: + keys: + - v1-release-deps-{{ .Branch }}-{{ .Revision }} + - attach_workspace: + at: /tmp/workspace + - run: + name: Build artifact + command: | + # Setting CIRCLE_TAG because we do not tag the release ourselves. + source /tmp/workspace/release-version.source + if test ${CIRCLE_NODE_INDEX:-0} == 0 ;then export GOOS=linux GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi + if test ${CIRCLE_NODE_INDEX:-0} == 1 ;then export GOOS=darwin GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi + if test ${CIRCLE_NODE_INDEX:-0} == 2 ;then export GOOS=windows GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi + if test ${CIRCLE_NODE_INDEX:-0} == 3 ;then export GOOS=linux GOARCH=arm && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi + - persist_to_workspace: + root: build + paths: + - "*.zip" + - "tendermint_linux_amd64" + + release_artifacts: + <<: *defaults + steps: + - checkout + - attach_workspace: + at: /tmp/workspace + - run: + name: Deploy to GitHub + command: | + # Setting CIRCLE_TAG because we do not tag the release ourselves. + source /tmp/workspace/release-version.source + echo "---" + ls -la /tmp/workspace/*.zip + echo "---" + python -u scripts/release_management/sha-files.py + echo "---" + cat /tmp/workspace/SHA256SUMS + echo "---" + export RELEASE_ID="`python -u scripts/release_management/github-draft.py`" + echo "Release ID: ${RELEASE_ID}" + #Todo: Parallelize uploads + export GOOS=linux GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" + export GOOS=darwin GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" + export GOOS=windows GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" + export GOOS=linux GOARCH=arm && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" + python -u scripts/release_management/github-upload.py --file "/tmp/workspace/SHA256SUMS" --id "${RELEASE_ID}" + python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}" + + release_docker: + <<: *release_management_docker + steps: + - checkout + - attach_workspace: + at: /tmp/workspace + - run: + name: Deploy to Docker Hub + command: | + # Setting CIRCLE_TAG because we do not tag the release ourselves. + source /tmp/workspace/release-version.source + cp /tmp/workspace/tendermint_linux_amd64 DOCKER/tendermint + docker build --label="tendermint" --tag="tendermint/tendermint:${CIRCLE_TAG}" --tag="tendermint/tendermint:latest" "DOCKER" + docker login -u "${DOCKERHUB_USER}" --password-stdin <<< "${DOCKERHUB_PASS}" + docker push "tendermint/tendermint" + docker logout + workflows: version: 2 test-suite: @@ -292,3 +394,25 @@ workflows: - upload_coverage: requires: - test_cover + release: + jobs: + - prepare_build + - build_artifacts: + requires: + - prepare_build + - release_artifacts: + requires: + - prepare_build + - build_artifacts + filters: + branches: + only: + - /v[0-9]+\.[0-9]+/ + - release_docker: + requires: + - prepare_build + - build_artifacts + filters: + branches: + only: + - /v[0-9]+\.[0-9]+/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 31ee14c51..e5ac75e9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,38 @@ # Changelog +## v0.31.2 + +*March 30th, 2019* + +This release fixes a regression from v0.31.1 where Tendermint panics under +mempool load for external ABCI apps. + +Special thanks to external contributors on this release: +@guagualvcha + +### BREAKING CHANGES: + +* CLI/RPC/Config + +* Apps + +* Go API +- [libs/autofile] [\#3504](https://github.com/tendermint/tendermint/issues/3504) Remove unused code in autofile package. Deleted functions: `Group.Search`, `Group.FindLast`, `GroupReader.ReadLine`, `GroupReader.PushLine`, `MakeSimpleSearchFunc` (@guagualvcha) + +* Blockchain Protocol + +* P2P Protocol + +### FEATURES: + +### IMPROVEMENTS: + +- [circle] [\#3497](https://github.com/tendermint/tendermint/issues/3497) Move release management to CircleCI + +### BUG FIXES: + +- [mempool] [\#3512](https://github.com/tendermint/tendermint/issues/3512) Fix panic from concurrent access to txsMap, a regression for external ABCI apps introduced in v0.31.1 + ## v0.31.1 *March 27th, 2019* diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 470282aa9..045247937 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,4 +1,4 @@ -## v0.32.0 +## v0.31.2 ** diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index 4a855f425..6a7f289f5 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -1,5 +1,5 @@ -FROM alpine:3.7 -MAINTAINER Greg Szabo +FROM alpine:3.9 +LABEL maintainer="hello@tendermint.com" # Tendermint will be looking for the genesis file in /tendermint/config/genesis.json # (unless you change `genesis_file` in config.toml). You can put your config.toml and diff --git a/Makefile b/Makefile index 79ae6aaba..7c2ce1d9c 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,7 @@ GOTOOLS = \ github.com/square/certstrap GOBIN?=${GOPATH}/bin PACKAGES=$(shell go list ./...) +OUTPUT?=build/tendermint INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf BUILD_TAGS?='tendermint' @@ -19,13 +20,13 @@ check: check_tools get_vendor_deps ### Build Tendermint build: - CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ + CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ build_c: - CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o build/tendermint ./cmd/tendermint/ + CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o $(OUTPUT) ./cmd/tendermint/ build_race: - CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint + CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint install: CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint @@ -109,7 +110,7 @@ draw_deps: get_deps_bin_size: @# Copy of build recipe with additional flags to perform binary size analysis - $(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1)) + $(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1)) @find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log @echo "Results can be found here: $(CURDIR)/deps_bin_size.log" @@ -261,7 +262,7 @@ check_dep: ### Docker image build-docker: - cp build/tendermint DOCKER/tendermint + cp $(OUTPUT) DOCKER/tendermint docker build --label=tendermint --tag="tendermint/tendermint" DOCKER rm -rf DOCKER/tendermint diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 562676605..3b401bd3c 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -26,16 +26,17 @@ var _ Client = (*socketClient)(nil) type socketClient struct { cmn.BaseService - reqQueue chan *ReqRes - flushTimer *cmn.ThrottleTimer + addr string mustConnect bool + conn net.Conn + + reqQueue chan *ReqRes + flushTimer *cmn.ThrottleTimer mtx sync.Mutex - addr string - conn net.Conn err error - reqSent *list.List - resCb func(*types.Request, *types.Response) // listens to all callbacks + reqSent *list.List // list of requests sent, waiting for response + resCb func(*types.Request, *types.Response) // called on all requests, if set. } @@ -86,6 +87,7 @@ func (cli *socketClient) OnStop() { cli.mtx.Lock() defer cli.mtx.Unlock() if cli.conn != nil { + // does this really need a mutex? cli.conn.Close() } @@ -207,12 +209,15 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error { reqres.Done() // Release waiters cli.reqSent.Remove(next) // Pop first item from linked list - // Notify reqRes listener if set + // Notify reqRes listener if set (request specific callback). + // NOTE: it is possible this callback isn't set on the reqres object. + // at this point, in which case it will be called after, when it is set. + // TODO: should we move this after the resCb call so the order is always consistent? if cb := reqres.GetCallback(); cb != nil { cb(res) } - // Notify client listener if set + // Notify client listener if set (global callback). if cli.resCb != nil { cli.resCb(reqres.Request, res) } diff --git a/blockchain/pool_test.go b/blockchain/pool_test.go index e24f6131e..01d7dba20 100644 --- a/blockchain/pool_test.go +++ b/blockchain/pool_test.go @@ -42,7 +42,9 @@ func (p testPeer) runInputRoutine() { func (p testPeer) simulateInput(input inputData) { block := &types.Block{Header: types.Header{Height: input.request.Height}} input.pool.AddBlock(input.request.PeerID, block, 123) - input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height) + // TODO: uncommenting this creates a race which is detected by: https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856 + // see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890 + // input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height) } type testPeers map[p2p.ID]testPeer diff --git a/libs/autofile/group.go b/libs/autofile/group.go index d1ea0de75..ce73466e4 100644 --- a/libs/autofile/group.go +++ b/libs/autofile/group.go @@ -331,172 +331,6 @@ func (g *Group) NewReader(index int) (*GroupReader, error) { return r, nil } -// Returns -1 if line comes after, 0 if found, 1 if line comes before. -type SearchFunc func(line string) (int, error) - -// Searches for the right file in Group, then returns a GroupReader to start -// streaming lines. -// Returns true if an exact match was found, otherwise returns the next greater -// line that starts with prefix. -// CONTRACT: Caller must close the returned GroupReader -func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error) { - g.mtx.Lock() - minIndex, maxIndex := g.minIndex, g.maxIndex - g.mtx.Unlock() - // Now minIndex/maxIndex may change meanwhile, - // but it shouldn't be a big deal - // (maybe we'll want to limit scanUntil though) - - for { - curIndex := (minIndex + maxIndex + 1) / 2 - - // Base case, when there's only 1 choice left. - if minIndex == maxIndex { - r, err := g.NewReader(maxIndex) - if err != nil { - return nil, false, err - } - match, err := scanUntil(r, prefix, cmp) - if err != nil { - r.Close() - return nil, false, err - } - return r, match, err - } - - // Read starting roughly at the middle file, - // until we find line that has prefix. - r, err := g.NewReader(curIndex) - if err != nil { - return nil, false, err - } - foundIndex, line, err := scanNext(r, prefix) - r.Close() - if err != nil { - return nil, false, err - } - - // Compare this line to our search query. - val, err := cmp(line) - if err != nil { - return nil, false, err - } - if val < 0 { - // Line will come later - minIndex = foundIndex - } else if val == 0 { - // Stroke of luck, found the line - r, err := g.NewReader(foundIndex) - if err != nil { - return nil, false, err - } - match, err := scanUntil(r, prefix, cmp) - if !match { - panic("Expected match to be true") - } - if err != nil { - r.Close() - return nil, false, err - } - return r, true, err - } else { - // We passed it - maxIndex = curIndex - 1 - } - } - -} - -// Scans and returns the first line that starts with 'prefix' -// Consumes line and returns it. -func scanNext(r *GroupReader, prefix string) (int, string, error) { - for { - line, err := r.ReadLine() - if err != nil { - return 0, "", err - } - if !strings.HasPrefix(line, prefix) { - continue - } - index := r.CurIndex() - return index, line, nil - } -} - -// Returns true iff an exact match was found. -// Pushes line, does not consume it. -func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) (bool, error) { - for { - line, err := r.ReadLine() - if err != nil { - return false, err - } - if !strings.HasPrefix(line, prefix) { - continue - } - val, err := cmp(line) - if err != nil { - return false, err - } - if val < 0 { - continue - } else if val == 0 { - r.PushLine(line) - return true, nil - } else { - r.PushLine(line) - return false, nil - } - } -} - -// Searches backwards for the last line in Group with prefix. -// Scans each file forward until the end to find the last match. -func (g *Group) FindLast(prefix string) (match string, found bool, err error) { - g.mtx.Lock() - minIndex, maxIndex := g.minIndex, g.maxIndex - g.mtx.Unlock() - - r, err := g.NewReader(maxIndex) - if err != nil { - return "", false, err - } - defer r.Close() - - // Open files from the back and read -GROUP_LOOP: - for i := maxIndex; i >= minIndex; i-- { - err := r.SetIndex(i) - if err != nil { - return "", false, err - } - // Scan each line and test whether line matches - for { - line, err := r.ReadLine() - if err == io.EOF { - if found { - return match, found, nil - } - continue GROUP_LOOP - } else if err != nil { - return "", false, err - } - if strings.HasPrefix(line, prefix) { - match = line - found = true - } - if r.CurIndex() > i { - if found { - return match, found, nil - } - continue GROUP_LOOP - } - } - } - - return -} - // GroupInfo holds information about the group. type GroupInfo struct { MinIndex int // index of the first file in the group, including head @@ -654,48 +488,6 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { } } -// ReadLine reads a line (without delimiter). -// just return io.EOF if no new lines found. -func (gr *GroupReader) ReadLine() (string, error) { - gr.mtx.Lock() - defer gr.mtx.Unlock() - - // From PushLine - if gr.curLine != nil { - line := string(gr.curLine) - gr.curLine = nil - return line, nil - } - - // Open file if not open yet - if gr.curReader == nil { - err := gr.openFile(gr.curIndex) - if err != nil { - return "", err - } - } - - // Iterate over files until line is found - var linePrefix string - for { - bytesRead, err := gr.curReader.ReadBytes('\n') - if err == io.EOF { - // Open the next file - if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { - return "", err1 - } - if len(bytesRead) > 0 && bytesRead[len(bytesRead)-1] == byte('\n') { - return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil - } - linePrefix += string(bytesRead) - continue - } else if err != nil { - return "", err - } - return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil - } -} - // IF index > gr.Group.maxIndex, returns io.EOF // CONTRACT: caller should hold gr.mtx func (gr *GroupReader) openFile(index int) error { @@ -725,20 +517,6 @@ func (gr *GroupReader) openFile(index int) error { return nil } -// PushLine makes the given line the current one, so the next time somebody -// calls ReadLine, this line will be returned. -// panics if called twice without calling ReadLine. -func (gr *GroupReader) PushLine(line string) { - gr.mtx.Lock() - defer gr.mtx.Unlock() - - if gr.curLine == nil { - gr.curLine = []byte(line) - } else { - panic("PushLine failed, already have line") - } -} - // CurIndex returns cursor's file index. func (gr *GroupReader) CurIndex() int { gr.mtx.Lock() @@ -753,32 +531,3 @@ func (gr *GroupReader) SetIndex(index int) error { defer gr.mtx.Unlock() return gr.openFile(index) } - -//-------------------------------------------------------------------------------- - -// A simple SearchFunc that assumes that the marker is of form -// . -// For example, if prefix is '#HEIGHT:', the markers of expected to be of the form: -// -// #HEIGHT:1 -// ... -// #HEIGHT:2 -// ... -func MakeSimpleSearchFunc(prefix string, target int) SearchFunc { - return func(line string) (int, error) { - if !strings.HasPrefix(line, prefix) { - return -1, fmt.Errorf("Marker line did not have prefix: %v", prefix) - } - i, err := strconv.Atoi(line[len(prefix):]) - if err != nil { - return -1, fmt.Errorf("Failed to parse marker line: %v", err.Error()) - } - if target < i { - return 1, nil - } else if target == i { - return 0, nil - } else { - return -1, nil - } - } -} diff --git a/libs/autofile/group_test.go b/libs/autofile/group_test.go index 68870df87..c300aba71 100644 --- a/libs/autofile/group_test.go +++ b/libs/autofile/group_test.go @@ -1,13 +1,9 @@ package autofile import ( - "errors" - "fmt" "io" "io/ioutil" "os" - "strconv" - "strings" "testing" "github.com/stretchr/testify/assert" @@ -106,107 +102,6 @@ func TestCheckHeadSizeLimit(t *testing.T) { destroyTestGroup(t, g) } -func TestSearch(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 10*1000) - - // Create some files in the group that have several INFO lines in them. - // Try to put the INFO lines in various spots. - for i := 0; i < 100; i++ { - // The random junk at the end ensures that this INFO linen - // is equally likely to show up at the end. - _, err := g.Head.Write([]byte(fmt.Sprintf("INFO %v %v\n", i, cmn.RandStr(123)))) - require.NoError(t, err, "Failed to write to head") - g.checkHeadSizeLimit() - for j := 0; j < 10; j++ { - _, err1 := g.Head.Write([]byte(cmn.RandStr(123) + "\n")) - require.NoError(t, err1, "Failed to write to head") - g.checkHeadSizeLimit() - } - } - - // Create a search func that searches for line - makeSearchFunc := func(target int) SearchFunc { - return func(line string) (int, error) { - parts := strings.Split(line, " ") - if len(parts) != 3 { - return -1, errors.New("Line did not have 3 parts") - } - i, err := strconv.Atoi(parts[1]) - if err != nil { - return -1, errors.New("Failed to parse INFO: " + err.Error()) - } - if target < i { - return 1, nil - } else if target == i { - return 0, nil - } else { - return -1, nil - } - } - } - - // Now search for each number - for i := 0; i < 100; i++ { - gr, match, err := g.Search("INFO", makeSearchFunc(i)) - require.NoError(t, err, "Failed to search for line, tc #%d", i) - assert.True(t, match, "Expected Search to return exact match, tc #%d", i) - line, err := gr.ReadLine() - require.NoError(t, err, "Failed to read line after search, tc #%d", i) - if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", i)) { - t.Fatalf("Failed to get correct line, tc #%d", i) - } - // Make sure we can continue to read from there. - cur := i + 1 - for { - line, err := gr.ReadLine() - if err == io.EOF { - if cur == 99+1 { - // OK! - break - } else { - t.Fatalf("Got EOF after the wrong INFO #, tc #%d", i) - } - } else if err != nil { - t.Fatalf("Error reading line, tc #%d, err:\n%s", i, err) - } - if !strings.HasPrefix(line, "INFO ") { - continue - } - if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", cur)) { - t.Fatalf("Unexpected INFO #. Expected %v got:\n%v, tc #%d", cur, line, i) - } - cur++ - } - gr.Close() - } - - // Now search for something that is too small. - // We should get the first available line. - { - gr, match, err := g.Search("INFO", makeSearchFunc(-999)) - require.NoError(t, err, "Failed to search for line") - assert.False(t, match, "Expected Search to not return exact match") - line, err := gr.ReadLine() - require.NoError(t, err, "Failed to read line after search") - if !strings.HasPrefix(line, "INFO 0 ") { - t.Error("Failed to fetch correct line, which is the earliest INFO") - } - err = gr.Close() - require.NoError(t, err, "Failed to close GroupReader") - } - - // Now search for something that is too large. - // We should get an EOF error. - { - gr, _, err := g.Search("INFO", makeSearchFunc(999)) - assert.Equal(t, io.EOF, err) - assert.Nil(t, gr) - } - - // Cleanup - destroyTestGroup(t, g) -} - func TestRotateFile(t *testing.T) { g := createTestGroupWithHeadSizeLimit(t, 0) g.WriteLine("Line 1") @@ -237,100 +132,6 @@ func TestRotateFile(t *testing.T) { destroyTestGroup(t, g) } -func TestFindLast1(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) - - g.WriteLine("Line 1") - g.WriteLine("Line 2") - g.WriteLine("# a") - g.WriteLine("Line 3") - g.FlushAndSync() - g.RotateFile() - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("Line 6") - g.WriteLine("# b") - g.FlushAndSync() - - match, found, err := g.FindLast("#") - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, "# b", match) - - // Cleanup - destroyTestGroup(t, g) -} - -func TestFindLast2(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) - - g.WriteLine("Line 1") - g.WriteLine("Line 2") - g.WriteLine("Line 3") - g.FlushAndSync() - g.RotateFile() - g.WriteLine("# a") - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("# b") - g.WriteLine("Line 6") - g.FlushAndSync() - - match, found, err := g.FindLast("#") - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, "# b", match) - - // Cleanup - destroyTestGroup(t, g) -} - -func TestFindLast3(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) - - g.WriteLine("Line 1") - g.WriteLine("# a") - g.WriteLine("Line 2") - g.WriteLine("# b") - g.WriteLine("Line 3") - g.FlushAndSync() - g.RotateFile() - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("Line 6") - g.FlushAndSync() - - match, found, err := g.FindLast("#") - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, "# b", match) - - // Cleanup - destroyTestGroup(t, g) -} - -func TestFindLast4(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) - - g.WriteLine("Line 1") - g.WriteLine("Line 2") - g.WriteLine("Line 3") - g.FlushAndSync() - g.RotateFile() - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("Line 6") - g.FlushAndSync() - - match, found, err := g.FindLast("#") - assert.NoError(t, err) - assert.False(t, found) - assert.Empty(t, match) - - // Cleanup - destroyTestGroup(t, g) -} - func TestWrite(t *testing.T) { g := createTestGroupWithHeadSizeLimit(t, 0) diff --git a/mempool/cache_test.go b/mempool/cache_test.go index 26e560b6e..ea9f63fd6 100644 --- a/mempool/cache_test.go +++ b/mempool/cache_test.go @@ -19,7 +19,7 @@ func TestCacheRemove(t *testing.T) { for i := 0; i < numTxs; i++ { // probability of collision is 2**-256 txBytes := make([]byte, 32) - rand.Read(txBytes) + rand.Read(txBytes) // nolint: gosec txs[i] = txBytes cache.Push(txBytes) // make sure its added to both the linked list and the map diff --git a/mempool/mempool.go b/mempool/mempool.go index bd3cbf7d9..a5b14466a 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -149,6 +149,11 @@ func TxID(tx []byte) string { return fmt.Sprintf("%X", types.Tx(tx).Hash()) } +// txKey is the fixed length array sha256 hash used as the key in maps. +func txKey(tx types.Tx) [sha256.Size]byte { + return sha256.Sum256(tx) +} + // Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus // round. Transaction validity is checked using the CheckTx abci message before the transaction is // added to the pool. The Mempool uses a concurrent list structure for storing transactions that @@ -159,23 +164,27 @@ type Mempool struct { proxyMtx sync.Mutex proxyAppConn proxy.AppConnMempool txs *clist.CList // concurrent linked-list of good txs - // map for quick access to txs - // Used in CheckTx to record the tx sender. - txsMap map[[sha256.Size]byte]*clist.CElement - height int64 // the last block Update()'d to - rechecking int32 // for re-checking filtered txs on Update() - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here + preCheck PreCheckFunc + postCheck PostCheckFunc + + // Track whether we're rechecking txs. + // These are not protected by a mutex and are expected to be mutated + // in serial (ie. by abci responses which are called in serial). + recheckCursor *clist.CElement // next expected response + recheckEnd *clist.CElement // re-checking stops here + + // notify listeners (ie. consensus) when txs are available notifiedTxsAvailable bool txsAvailable chan struct{} // fires once for each height, when the mempool is not empty - preCheck PreCheckFunc - postCheck PostCheckFunc - // Atomic integers + // Map for quick access to txs to record sender in CheckTx. + // txsMap: txKey -> CElement + txsMap sync.Map - // Used to check if the mempool size is bigger than the allowed limit. - // See TxsBytes - txsBytes int64 + // Atomic integers + height int64 // the last block Update()'d to + rechecking int32 // for re-checking filtered txs on Update() + txsBytes int64 // total size of mempool, in bytes // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. @@ -203,7 +212,6 @@ func NewMempool( config: config, proxyAppConn: proxyAppConn, txs: clist.New(), - txsMap: make(map[[sha256.Size]byte]*clist.CElement), height: height, rechecking: 0, recheckCursor: nil, @@ -216,7 +224,7 @@ func NewMempool( } else { mempool.cache = nopTxCache{} } - proxyAppConn.SetResponseCallback(mempool.resCb) + proxyAppConn.SetResponseCallback(mempool.globalCb) for _, option := range options { option(mempool) } @@ -319,7 +327,7 @@ func (mem *Mempool) Flush() { e.DetachPrev() } - mem.txsMap = make(map[[sha256.Size]byte]*clist.CElement) + mem.txsMap = sync.Map{} _ = atomic.SwapInt64(&mem.txsBytes, 0) } @@ -380,13 +388,12 @@ func (mem *Mempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo // CACHE if !mem.cache.Push(tx) { - // record the sender - e, ok := mem.txsMap[sha256.Sum256(tx)] - // The check is needed because tx may be in cache, but not in the mempool. - // E.g. after we've committed a block, txs are removed from the mempool, - // but not from the cache. - if ok { - memTx := e.Value.(*mempoolTx) + // Record a new sender for a tx we've already seen. + // Note it's possible a tx is still in the cache but no longer in the mempool + // (eg. after committing a block, txs are removed from mempool but not cache), + // so we only record the sender for txs still in the mempool. + if e, ok := mem.txsMap.Load(txKey(tx)); ok { + memTx := e.(*clist.CElement).Value.(*mempoolTx) if _, loaded := memTx.senders.LoadOrStore(txInfo.PeerID, true); loaded { // TODO: consider punishing peer for dups, // its non-trivial since invalid txs can become valid, @@ -416,25 +423,21 @@ func (mem *Mempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo if err = mem.proxyAppConn.Error(); err != nil { return err } + reqRes := mem.proxyAppConn.CheckTxAsync(tx) - if cb != nil { - composedCallback := func(res *abci.Response) { - mem.reqResCb(tx, txInfo.PeerID)(res) - cb(res) - } - reqRes.SetCallback(composedCallback) - } else { - reqRes.SetCallback(mem.reqResCb(tx, txInfo.PeerID)) - } + reqRes.SetCallback(mem.reqResCb(tx, txInfo.PeerID, cb)) return nil } -// Global callback, which is called in the absence of the specific callback. -// -// In recheckTxs because no reqResCb (specific) callback is set, this callback -// will be called. -func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) { +// Global callback that will be called after every ABCI response. +// Having a single global callback avoids needing to set a callback for each request. +// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), +// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that +// include this information. If we're not in the midst of a recheck, this function will just return, +// so the request specific callback can do the work. +// When rechecking, we don't need the peerID, so the recheck callback happens here. +func (mem *Mempool) globalCb(req *abci.Request, res *abci.Response) { if mem.recheckCursor == nil { return } @@ -446,35 +449,50 @@ func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) { mem.metrics.Size.Set(float64(mem.Size())) } -// Specific callback, which allows us to incorporate local information, like -// the peer that sent us this tx, so we can avoid sending it back to the same -// peer. +// Request specific callback that should be set on individual reqRes objects +// to incorporate local information when processing the response. +// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. +// NOTE: alternatively, we could include this information in the ABCI request itself. +// +// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called +// when all other response processing is complete. // // Used in CheckTxWithInfo to record PeerID who sent us the tx. -func (mem *Mempool) reqResCb(tx []byte, peerID uint16) func(res *abci.Response) { +func (mem *Mempool) reqResCb(tx []byte, peerID uint16, externalCb func(*abci.Response)) func(res *abci.Response) { return func(res *abci.Response) { if mem.recheckCursor != nil { - return + // this should never happen + panic("recheck cursor is not nil in reqResCb") } mem.resCbFirstTime(tx, peerID, res) // update metrics mem.metrics.Size.Set(float64(mem.Size())) + + // passed in by the caller of CheckTx, eg. the RPC + if externalCb != nil { + externalCb(res) + } } } +// Called from: +// - resCbFirstTime (lock not held) if tx is valid func (mem *Mempool) addTx(memTx *mempoolTx) { e := mem.txs.PushBack(memTx) - mem.txsMap[sha256.Sum256(memTx.tx)] = e + mem.txsMap.Store(txKey(memTx.tx), e) atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) } +// Called from: +// - Update (lock held) if tx was committed +// - resCbRecheck (lock not held) if tx was invalidated func (mem *Mempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { mem.txs.Remove(elem) elem.DetachPrev() - delete(mem.txsMap, sha256.Sum256(tx)) + mem.txsMap.Delete(txKey(tx)) atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) if removeFromCache { @@ -733,7 +751,7 @@ func (mem *Mempool) recheckTxs(txs []types.Tx) { mem.recheckEnd = mem.txs.Back() // Push txs to proxyAppConn - // NOTE: reqResCb may be called concurrently. + // NOTE: globalCb may be called concurrently. for _, tx := range txs { mem.proxyAppConn.CheckTxAsync(tx) } @@ -746,8 +764,11 @@ func (mem *Mempool) recheckTxs(txs []types.Tx) { type mempoolTx struct { height int64 // height that this tx had been validated in gasWanted int64 // amount of gas this tx states it will require - senders sync.Map // ids of peers who've sent us this tx (as a map for quick lookups) tx types.Tx // + + // ids of peers who've sent us this tx (as a map for quick lookups). + // senders: PeerID -> bool + senders sync.Map } // Height returns the height for this transaction @@ -798,7 +819,7 @@ func (cache *mapTxCache) Push(tx types.Tx) bool { defer cache.mtx.Unlock() // Use the tx hash in the cache - txHash := sha256.Sum256(tx) + txHash := txKey(tx) if moved, exists := cache.map_[txHash]; exists { cache.list.MoveToBack(moved) return false @@ -820,7 +841,7 @@ func (cache *mapTxCache) Push(tx types.Tx) bool { // Remove removes the given tx from the cache. func (cache *mapTxCache) Remove(tx types.Tx) { cache.mtx.Lock() - txHash := sha256.Sum256(tx) + txHash := txKey(tx) popped := cache.map_[txHash] delete(cache.map_, txHash) if popped != nil { diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index dc7d595af..d5f25396d 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -6,6 +6,7 @@ import ( "encoding/binary" "fmt" "io/ioutil" + mrand "math/rand" "os" "path/filepath" "testing" @@ -18,6 +19,7 @@ import ( "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" + abciserver "github.com/tendermint/tendermint/abci/server" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" @@ -510,6 +512,54 @@ func TestMempoolTxsBytes(t *testing.T) { assert.EqualValues(t, 0, mempool.TxsBytes()) } +// This will non-deterministically catch some concurrency failures like +// https://github.com/tendermint/tendermint/issues/3509 +// TODO: all of the tests should probably also run using the remote proxy app +// since otherwise we're not actually testing the concurrency of the mempool here! +func TestMempoolRemoteAppConcurrency(t *testing.T) { + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) + app := kvstore.NewKVStoreApplication() + cc, server := newRemoteApp(t, sockPath, app) + defer server.Stop() + config := cfg.ResetTestRoot("mempool_test") + mempool, cleanup := newMempoolWithAppAndConfig(cc, config) + defer cleanup() + + // generate small number of txs + nTxs := 10 + txLen := 200 + txs := make([]types.Tx, nTxs) + for i := 0; i < nTxs; i++ { + txs[i] = cmn.RandBytes(txLen) + } + + // simulate a group of peers sending them over and over + N := config.Mempool.Size + maxPeers := 5 + for i := 0; i < N; i++ { + peerID := mrand.Intn(maxPeers) + txNum := mrand.Intn(nTxs) + tx := txs[int(txNum)] + + // this will err with ErrTxInCache many times ... + mempool.CheckTxWithInfo(tx, nil, TxInfo{PeerID: uint16(peerID)}) + } + err := mempool.FlushAppConn() + require.NoError(t, err) +} + +// caller must close server +func newRemoteApp(t *testing.T, addr string, app abci.Application) (clientCreator proxy.ClientCreator, server cmn.Service) { + clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true) + + // Start server + server = abciserver.NewSocketServer(addr, app) + server.SetLogger(log.TestingLogger().With("module", "abci-server")) + if err := server.Start(); err != nil { + t.Fatalf("Error starting socket server: %v", err.Error()) + } + return clientCreator, server +} func checksumIt(data []byte) string { h := sha256.New() h.Write(data) diff --git a/scripts/release_management/README.md b/scripts/release_management/README.md new file mode 100644 index 000000000..e92f1ccf6 --- /dev/null +++ b/scripts/release_management/README.md @@ -0,0 +1,65 @@ +# Release management scripts + +## Overview +The scripts in this folder are used for release management in CircleCI. Although the scripts are fully configurable using input parameters, +the default settings were modified to accommodate CircleCI execution. + +# Build scripts +These scripts help during the build process. They prepare the release files. + +## bump-semver.py +Bumps the semantic version of the input `--version`. Versions are expected in vMAJOR.MINOR.PATCH format or vMAJOR.MINOR format. + +In vMAJOR.MINOR format, the result will be patch version 0 of that version, for example `v1.2 -> v1.2.0`. + +In vMAJOR.MINOR.PATCH format, the result will be a bumped PATCH version, for example `v1.2.3 -> v1.2.4`. + +If the PATCH number contains letters, it is considered a development version, in which case, the result is the non-development version of that number. +The patch number will not be bumped, only the "-dev" or similar additional text will be removed. For example: `v1.2.6-rc1 -> v1.2.6`. + +## zip-file.py +Specialized ZIP command for release management. Special features: +1. Uses Python ZIP libaries, so the `zip` command does not need to be installed. +1. Can only zip one file. +1. Optionally gets file version, Go OS and architecture. +1. By default all inputs and output is formatted exactly how CircleCI needs it. + +By default, the command will try to ZIP the file at `build/tendermint_${GOOS}_${GOARCH}`. +This can be changed with the `--file` input parameter. + +By default, the command will output the ZIP file to `build/tendermint_${CIRCLE_TAG}_${GOOS}_${GOARCH}.zip`. +This can be changed with the `--destination` (folder), `--version`, `--goos` and `--goarch` input parameters respectively. + +## sha-files.py +Specialized `shasum` command for release management. Special features: +1. Reads all ZIP files in the given folder. +1. By default all inputs and output is formatted exactly how CircleCI needs it. + +By default, the command will look up all ZIP files in the `build/` folder. + +By default, the command will output results into the `build/SHA256SUMS` file. + +# GitHub management +Uploading build results to GitHub requires at least these steps: +1. Create a new release on GitHub with content +2. Upload all binaries to the release +3. Publish the release +The below scripts help with these steps. + +## github-draft.py +Creates a GitHub release and fills the content with the CHANGELOG.md link. The version number can be changed by the `--version` parameter. + +By default, the command will use the tendermint/tendermint organization/repo, which can be changed using the `--org` and `--repo` parameters. + +By default, the command will get the version number from the `${CIRCLE_TAG}` variable. + +Returns the GitHub release ID. + +## github-upload.py +Upload a file to a GitHub release. The release is defined by the mandatory `--id` (release ID) input parameter. + +By default, the command will upload the file `/tmp/workspace/tendermint_${CIRCLE_TAG}_${GOOS}_${GOARCH}.zip`. This can be changed by the `--file` input parameter. + +## github-publish.py +Publish a GitHub release. The release is defined by the mandatory `--id` (release ID) input parameter. + diff --git a/scripts/release_management/bump-semver.py b/scripts/release_management/bump-semver.py new file mode 100755 index 000000000..b13a10342 --- /dev/null +++ b/scripts/release_management/bump-semver.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +# Bump the release number of a semantic version number and print it. --version is required. +# Version is +# - vA.B.C, in which case vA.B.C+1 will be returned +# - vA.B.C-devorwhatnot in which case vA.B.C will be returned +# - vA.B in which case vA.B.0 will be returned + +import re +import argparse + + +def semver(ver): + if re.match('v[0-9]+\.[0-9]+',ver) is None: + ver="v0.0" + #raise argparse.ArgumentTypeError('--version must be a semantic version number with major, minor and patch numbers') + return ver + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--version", help="Version number to bump, e.g.: v1.0.0", required=True, type=semver) + args = parser.parse_args() + + found = re.match('(v[0-9]+\.[0-9]+)(\.(.+))?', args.version) + majorminorprefix = found.group(1) + patch = found.group(3) + if patch is None: + patch = "0-new" + + if re.match('[0-9]+$',patch) is None: + patchfound = re.match('([0-9]+)',patch) + patch = int(patchfound.group(1)) + else: + patch = int(patch) + 1 + + print("{0}.{1}".format(majorminorprefix, patch)) diff --git a/scripts/release_management/github-draft.py b/scripts/release_management/github-draft.py new file mode 100755 index 000000000..1fccd38b9 --- /dev/null +++ b/scripts/release_management/github-draft.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +# Create a draft release on GitHub. By default in the tendermint/tendermint repo. +# Optimized for CircleCI + +import argparse +import httplib +import json +import os +from base64 import b64encode + +def request(org, repo, data): + user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") + headers = { + 'User-Agent': 'tenderbot', + 'Accept': 'application/vnd.github.v3+json', + 'Authorization': 'Basic %s' % user_and_pass + } + + conn = httplib.HTTPSConnection('api.github.com', timeout=5) + conn.request('POST', '/repos/{0}/{1}/releases'.format(org,repo), data, headers) + response = conn.getresponse() + if response.status < 200 or response.status > 299: + print("{0}: {1}".format(response.status, response.reason)) + conn.close() + raise IOError(response.reason) + responsedata = response.read() + conn.close() + return json.loads(responsedata) + + +def create_draft(org,repo,branch,version): + draft = { + 'tag_name': version, + 'target_commitish': '{0}'.format(branch), + 'name': '{0} (WARNING: ALPHA SOFTWARE)'.format(version), + 'body': 'https://github.com/{0}/{1}/blob/master/CHANGELOG.md#{2}'.format(org,repo,version.replace('v','').replace('.','')), + 'draft': True, + 'prerelease': False + } + data=json.dumps(draft) + return request(org, repo, data) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--org", default="tendermint", help="GitHub organization") + parser.add_argument("--repo", default="tendermint", help="GitHub repository") + parser.add_argument("--branch", default=os.environ.get('CIRCLE_BRANCH'), help="Branch to build from, e.g.: v1.0") + parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for binary, e.g.: v1.0.0") + args = parser.parse_args() + + if not os.environ.has_key('GITHUB_USERNAME'): + raise parser.error('environment variable GITHUB_USERNAME is required') + + if not os.environ.has_key('GITHUB_TOKEN'): + raise parser.error('environment variable GITHUB_TOKEN is required') + + release = create_draft(args.org,args.repo,args.branch,args.version) + + print(release["id"]) + diff --git a/scripts/release_management/github-openpr.py b/scripts/release_management/github-openpr.py new file mode 100755 index 000000000..af0434f02 --- /dev/null +++ b/scripts/release_management/github-openpr.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +# Open a PR against the develop branch. --branch required. +# Optimized for CircleCI + +import json +import os +import argparse +import httplib +from base64 import b64encode + + +def request(org, repo, data): + user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") + headers = { + 'User-Agent': 'tenderbot', + 'Accept': 'application/vnd.github.v3+json', + 'Authorization': 'Basic %s' % user_and_pass + } + + conn = httplib.HTTPSConnection('api.github.com', timeout=5) + conn.request('POST', '/repos/{0}/{1}/pulls'.format(org,repo), data, headers) + response = conn.getresponse() + if response.status < 200 or response.status > 299: + print(response) + conn.close() + raise IOError(response.reason) + responsedata = response.read() + conn.close() + return json.loads(responsedata) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--org", default="tendermint", help="GitHub organization. Defaults to tendermint.") + parser.add_argument("--repo", default="tendermint", help="GitHub repository. Defaults to tendermint.") + parser.add_argument("--head", help="The name of the branch where your changes are implemented.", required=True) + parser.add_argument("--base", help="The name of the branch you want the changes pulled into.", required=True) + parser.add_argument("--title", default="Security release {0}".format(os.environ.get('CIRCLE_TAG')), help="The title of the pull request.") + args = parser.parse_args() + + if not os.environ.has_key('GITHUB_USERNAME'): + raise parser.error('GITHUB_USERNAME not set.') + + if not os.environ.has_key('GITHUB_TOKEN'): + raise parser.error('GITHUB_TOKEN not set.') + + if os.environ.get('CIRCLE_TAG') is None: + raise parser.error('CIRCLE_TAG not set.') + + result = request(args.org, args.repo, data=json.dumps({'title':"{0}".format(args.title),'head':"{0}".format(args.head),'base':"{0}".format(args.base),'body':""})) + print(result['html_url']) diff --git a/scripts/release_management/github-public-newbranch.bash b/scripts/release_management/github-public-newbranch.bash new file mode 100644 index 000000000..ca2fa1314 --- /dev/null +++ b/scripts/release_management/github-public-newbranch.bash @@ -0,0 +1,28 @@ +#!/bin/sh + +# github-public-newbranch.bash - create public branch from the security repository + +set -euo pipefail + +# Create new branch +BRANCH="${CIRCLE_TAG:-v0.0.0}-security-`date -u +%Y%m%d%H%M%S`" +# Check if the patch release exist already as a branch +if [ -n "`git branch | grep '${BRANCH}'`" ]; then + echo "WARNING: Branch ${BRANCH} already exists." +else + echo "Creating branch ${BRANCH}." + git branch "${BRANCH}" +fi + +# ... and check it out +git checkout "${BRANCH}" + +# Add entry to public repository +git remote add tendermint-origin git@github.com:tendermint/tendermint.git + +# Push branch and tag to public repository +git push tendermint-origin +git push tendermint-origin --tags + +# Create a PR from the public branch to the assumed release branch in public (release branch has to exist) +python -u scripts/release_management/github-openpr.py --head "${BRANCH}" --base "${BRANCH:%.*}" diff --git a/scripts/release_management/github-publish.py b/scripts/release_management/github-publish.py new file mode 100755 index 000000000..31071aecd --- /dev/null +++ b/scripts/release_management/github-publish.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +# Publish an existing GitHub draft release. --id required. +# Optimized for CircleCI + +import json +import os +import argparse +import httplib +from base64 import b64encode + + +def request(org, repo, id, data): + user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") + headers = { + 'User-Agent': 'tenderbot', + 'Accept': 'application/vnd.github.v3+json', + 'Authorization': 'Basic %s' % user_and_pass + } + + conn = httplib.HTTPSConnection('api.github.com', timeout=5) + conn.request('POST', '/repos/{0}/{1}/releases/{2}'.format(org,repo,id), data, headers) + response = conn.getresponse() + if response.status < 200 or response.status > 299: + print(response) + conn.close() + raise IOError(response.reason) + responsedata = response.read() + conn.close() + return json.loads(responsedata) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--org", default="tendermint", help="GitHub organization") + parser.add_argument("--repo", default="tendermint", help="GitHub repository") + parser.add_argument("--id", help="GitHub release ID", required=True, type=int) + parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for the release, e.g.: v1.0.0") + args = parser.parse_args() + + if not os.environ.has_key('GITHUB_USERNAME'): + raise parser.error('GITHUB_USERNAME not set.') + + if not os.environ.has_key('GITHUB_TOKEN'): + raise parser.error('GITHUB_TOKEN not set.') + + try: + result = request(args.org, args.repo, args.id, data=json.dumps({'draft':False,'tag_name':"{0}".format(args.version)})) + except IOError as e: + print(e) + result = request(args.org, args.repo, args.id, data=json.dumps({'draft':False,'tag_name':"{0}-autorelease".format(args.version)})) + + print(result['name']) diff --git a/scripts/release_management/github-upload.py b/scripts/release_management/github-upload.py new file mode 100755 index 000000000..77c76a755 --- /dev/null +++ b/scripts/release_management/github-upload.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python + +# Upload a file to a GitHub draft release. --id and --file are required. +# Optimized for CircleCI + +import json +import os +import re +import argparse +import mimetypes +import httplib +from base64 import b64encode + + +def request(baseurl, path, mimetype, mimeencoding, data): + user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") + + headers = { + 'User-Agent': 'tenderbot', + 'Accept': 'application/vnd.github.v3.raw+json', + 'Authorization': 'Basic %s' % user_and_pass, + 'Content-Type': mimetype, + 'Content-Encoding': mimeencoding + } + + conn = httplib.HTTPSConnection(baseurl, timeout=5) + conn.request('POST', path, data, headers) + response = conn.getresponse() + if response.status < 200 or response.status > 299: + print(response) + conn.close() + raise IOError(response.reason) + responsedata = response.read() + conn.close() + return json.loads(responsedata) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--id", help="GitHub release ID", required=True, type=int) + parser.add_argument("--file", default="/tmp/workspace/tendermint_{0}_{1}_{2}.zip".format(os.environ.get('CIRCLE_TAG'),os.environ.get('GOOS'),os.environ.get('GOARCH')), help="File to upload") + parser.add_argument("--return-id-only", help="Return only the release ID after upload to GitHub.", action='store_true') + args = parser.parse_args() + + if not os.environ.has_key('GITHUB_USERNAME'): + raise parser.error('GITHUB_USERNAME not set.') + + if not os.environ.has_key('GITHUB_TOKEN'): + raise parser.error('GITHUB_TOKEN not set.') + + mimetypes.init() + filename = os.path.basename(args.file) + mimetype,mimeencoding = mimetypes.guess_type(filename, strict=False) + if mimetype is None: + mimetype = 'application/zip' + if mimeencoding is None: + mimeencoding = 'utf8' + + with open(args.file,'rb') as f: + asset = f.read() + + result = request('uploads.github.com', '/repos/tendermint/tendermint/releases/{0}/assets?name={1}'.format(args.id, filename), mimetype, mimeencoding, asset) + + if args.return_id_only: + print(result['id']) + else: + print(result['browser_download_url']) + diff --git a/scripts/release_management/sha-files.py b/scripts/release_management/sha-files.py new file mode 100755 index 000000000..2a9ee0d59 --- /dev/null +++ b/scripts/release_management/sha-files.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python + +# Create SHA256 summaries from all ZIP files in a folder +# Optimized for CircleCI + +import re +import os +import argparse +import zipfile +import hashlib + + +BLOCKSIZE = 65536 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--folder", default="/tmp/workspace", help="Folder to look for, for ZIP files") + parser.add_argument("--shafile", default="/tmp/workspace/SHA256SUMS", help="SHA256 summaries File") + args = parser.parse_args() + + for filename in os.listdir(args.folder): + if re.search('\.zip$',filename) is None: + continue + if not os.path.isfile(os.path.join(args.folder, filename)): + continue + with open(args.shafile,'a+') as shafile: + hasher = hashlib.sha256() + with open(os.path.join(args.folder, filename),'r') as f: + buf = f.read(BLOCKSIZE) + while len(buf) > 0: + hasher.update(buf) + buf = f.read(BLOCKSIZE) + shafile.write("{0} {1}\n".format(hasher.hexdigest(),filename)) + diff --git a/scripts/release_management/zip-file.py b/scripts/release_management/zip-file.py new file mode 100755 index 000000000..5d2f5b2c8 --- /dev/null +++ b/scripts/release_management/zip-file.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# ZIP one file as "tendermint" into a ZIP like tendermint_VERSION_OS_ARCH.zip +# Use environment variables CIRCLE_TAG, GOOS and GOARCH for easy input parameters. +# Optimized for CircleCI + +import os +import argparse +import zipfile +import hashlib + + +BLOCKSIZE = 65536 + + +def zip_asset(file,destination,arcname,version,goos,goarch): + filename = os.path.basename(file) + output = "{0}/{1}_{2}_{3}_{4}.zip".format(destination,arcname,version,goos,goarch) + + with zipfile.ZipFile(output,'w') as f: + f.write(filename=file,arcname=arcname) + f.comment=filename + return output + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--file", default="build/tendermint_{0}_{1}".format(os.environ.get('GOOS'),os.environ.get('GOARCH')), help="File to zip") + parser.add_argument("--destination", default="build", help="Destination folder for files") + parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for binary, e.g.: v1.0.0") + parser.add_argument("--goos", default=os.environ.get('GOOS'), help="GOOS parameter") + parser.add_argument("--goarch", default=os.environ.get('GOARCH'), help="GOARCH parameter") + args = parser.parse_args() + + if args.version is None: + raise parser.error("argument --version is required") + if args.goos is None: + raise parser.error("argument --goos is required") + if args.goarch is None: + raise parser.error("argument --goarch is required") + + file = zip_asset(args.file,args.destination,"tendermint",args.version,args.goos,args.goarch) + print(file) + diff --git a/version/version.go b/version/version.go index 9090fc7e6..ac52c4ec1 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.31.1" + TMCoreSemVer = "0.31.2" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.16.0"