From e1a3f16fa48d9f5ef0a245d2949e6dca4fd22578 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 15 May 2018 08:48:27 -0700 Subject: [PATCH 01/18] Comment tweaks --- consensus/replay.go | 60 +++++++++++++++++++++++++-------------------- consensus/state.go | 25 +++++++++++-------- 2 files changed, 49 insertions(+), 36 deletions(-) diff --git a/consensus/replay.go b/consensus/replay.go index 5b5a48425..265ab5388 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -26,20 +26,24 @@ import ( var crc32c = crc32.MakeTable(crc32.Castagnoli) // Functionality to replay blocks and messages on recovery from a crash. -// There are two general failure scenarios: failure during consensus, and failure while applying the block. -// The former is handled by the WAL, the latter by the proxyApp Handshake on restart, -// which ultimately hands off the work to the WAL. +// There are two general failure scenarios: +// +// 1. failure during consensus +// 2. failure while applying the block +// +// The former is handled by the WAL, the latter by the proxyApp Handshake on +// restart, which ultimately hands off the work to the WAL. //----------------------------------------- -// recover from failure during consensus -// by replaying messages from the WAL +// 1. Recover from failure during consensus +// (by replaying messages from the WAL) +//----------------------------------------- -// Unmarshal and apply a single message to the consensus state -// as if it were received in receiveRoutine -// Lines that start with "#" are ignored. -// NOTE: receiveRoutine should not be running +// Unmarshal and apply a single message to the consensus state as if it were +// received in receiveRoutine. Lines that start with "#" are ignored. +// NOTE: receiveRoutine should not be running. func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan interface{}) error { - // skip meta messages + // Skip meta messages which exist for demarcating boundaries. if _, ok := msg.Msg.(EndHeightMessage); ok { return nil } @@ -89,17 +93,18 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan return nil } -// replay only those messages since the last block. -// timeoutRoutine should run concurrently to read off tickChan +// Replay only those messages since the last block. `timeoutRoutine` should +// run concurrently to read off tickChan. func (cs *ConsensusState) catchupReplay(csHeight int64) error { - // set replayMode + + // Set replayMode to true so we don't log signing errors. cs.replayMode = true defer func() { cs.replayMode = false }() - // Ensure that ENDHEIGHT for this height doesn't exist. + // Ensure that #ENDHEIGHT for this height doesn't exist. // NOTE: This is just a sanity check. As far as we know things work fine // without it, and Handshake could reuse ConsensusState if it weren't for - // this check (since we can crash after writing ENDHEIGHT). + // this check (since we can crash after writing #ENDHEIGHT). // // Ignore data corruption errors since this is a sanity check. gr, found, err := cs.wal.SearchForEndHeight(csHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) @@ -115,7 +120,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error { return fmt.Errorf("WAL should not contain #ENDHEIGHT %d", csHeight) } - // Search for last height marker + // Search for last height marker. // // Ignore data corruption errors in previous heights because we only care about last height gr, found, err = cs.wal.SearchForEndHeight(csHeight-1, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) @@ -182,10 +187,11 @@ func makeHeightSearchFunc(height int64) auto.SearchFunc { } }*/ -//---------------------------------------------- -// Recover from failure during block processing -// by handshaking with the app to figure out where -// we were last and using the WAL to recover there +//--------------------------------------------------- +// 2. Recover from failure while applying the block. +// (by handshaking with the app to figure out where +// we were last, and using the WAL to recover there.) +//--------------------------------------------------- type Handshaker struct { stateDB dbm.DB @@ -220,7 +226,8 @@ func (h *Handshaker) NBlocks() int { // TODO: retry the handshake/replay if it fails ? func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { - // handshake is done via info request on the query conn + + // Handshake is done via ABCI Info on the query conn. res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version}) if err != nil { return fmt.Errorf("Error calling Info: %v", err) @@ -234,15 +241,16 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) - // TODO: check version + // TODO: check app version. - // replay blocks up to the latest in the blockstore + // Replay blocks up to the latest in the blockstore. _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) if err != nil { return fmt.Errorf("Error on replay: %v", err) } - h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) + h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", + "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) // TODO: (on restart) replay mempool @@ -250,7 +258,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { } // Replay all blocks since appBlockHeight and ensure the result matches the current state. -// Returns the final AppHash or an error +// Returns the final AppHash or an error. func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) { storeBlockHeight := h.store.Height() @@ -314,7 +322,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight // We haven't run Commit (both the state and app are one block behind), // so replayBlock with the real app. // NOTE: We could instead use the cs.WAL on cs.Start, - // but we'd have to allow the WAL to replay a block that wrote it's ENDHEIGHT + // but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT h.logger.Info("Replay last block using real app") state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) return state.AppHash, err diff --git a/consensus/state.go b/consensus/state.go index bee7efa2e..7592269bf 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1210,23 +1210,28 @@ func (cs *ConsensusState) finalizeCommit(height int64) { fail.Fail() // XXX - // Finish writing to the WAL for this height. - // NOTE: If we fail before writing this, we'll never write it, - // and just recover by running ApplyBlock in the Handshake. - // If we moved it before persisting the block, we'd have to allow - // WAL replay for blocks with an #ENDHEIGHT - // As is, ConsensusState should not be started again - // until we successfully call ApplyBlock (ie. here or in Handshake after restart) + // Write EndHeightMessage{} for this height, implying that the blockstore + // has saved the block. + // + // If we crash before writing this EndHeightMessage{}, we will recover by + // running ApplyBlock during the ABCI handshake when we restart. If we + // didn't save the block to the blockstore before writing + // EndHeightMessage{}, we'd have to change WAL replay -- currently it + // complains about replaying for heights where an #ENDHEIGHT entry already + // exists. + // + // Either way, the ConsensusState should not be resumed until we + // successfully call ApplyBlock (ie. later here, or in Handshake after + // restart). cs.wal.Save(EndHeightMessage{height}) fail.Fail() // XXX - // Create a copy of the state for staging - // and an event cache for txs + // Create a copy of the state for staging and an event cache for txs. stateCopy := cs.state.Copy() // Execute and commit the block, update and save the state, and update the mempool. - // NOTE: the block.AppHash wont reflect these txs until the next block + // NOTE The block.AppHash wont reflect these txs until the next block. var err error stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block) if err != nil { From 68a0b3f95b331e04efabff9817fd60b8c1ce3b28 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 15 May 2018 22:42:29 -0400 Subject: [PATCH 02/18] version bump. add roadmap back. minor fixes --- CHANGELOG.md | 2 ++ ROADMAP.md | 23 +++++++++++++++++++++++ p2p/pex/addrbook.go | 2 +- p2p/switch.go | 2 +- version/version.go | 4 ++-- 5 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 ROADMAP.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 9de1aa295..1c1b47b08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,7 @@ # Changelog +## 0.19.4 (TBD) + ## 0.19.3 (May 14th, 2018) FEATURES diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 000000000..60c284333 --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,23 @@ +# Roadmap + +BREAKING CHANGES: +- Better support for injecting randomness +- Upgrade consensus for more real-time use of evidence + +FEATURES: +- Use the chain as its own CA for nodes and validators +- Tooling to run multiple blockchains/apps, possibly in a single process +- State syncing (without transaction replay) +- Add authentication and rate-limitting to the RPC + +IMPROVEMENTS: +- Improve subtleties around mempool caching and logic +- Consensus optimizations: + - cache block parts for faster agreement after round changes + - propagate block parts rarest first +- Better testing of the consensus state machine (ie. use a DSL) +- Auto compiled serialization/deserialization code instead of go-wire reflection + +BUG FIXES: +- Graceful handling/recovery for apps that have non-determinism or fail to halt +- Graceful handling/recovery for violations of safety, or liveness diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index 4408c3b91..dc51761fe 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -186,7 +186,7 @@ func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) { if ka == nil { return } - a.Logger.Info("Remove address from book", "addr", ka.Addr, "ID", ka.ID) + a.Logger.Info("Remove address from book", "addr", ka.Addr, "ID", ka.ID()) a.removeFromAllBuckets(ka) } diff --git a/p2p/switch.go b/p2p/switch.go index bccae393b..f62e5f992 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -565,7 +565,7 @@ func (sw *Switch) addPeer(pc peerConn) error { if sw.nodeKey.ID() == peerID { addr := peerNodeInfo.NetAddress() - // remove the given address from the address book if we're added it earlier + // remove the given address from the address book if we added it earlier sw.addrBook.RemoveAddress(addr) // add the given address to the address book to avoid dialing ourselves diff --git a/version/version.go b/version/version.go index c7d1d03a0..67d46ea33 100644 --- a/version/version.go +++ b/version/version.go @@ -4,13 +4,13 @@ package version const ( Maj = "0" Min = "19" - Fix = "3" + Fix = "4" ) var ( // Version is the current version of Tendermint // Must be a string because scripts like dist.sh read this file. - Version = "0.19.3" + Version = "0.19.4-dev" // GitCommit is the current HEAD set using ldflags. GitCommit string From 775b015173a6b8f9fb11adeabe8f47d7e64ee70b Mon Sep 17 00:00:00 2001 From: Zach Date: Thu, 17 May 2018 01:57:28 -0400 Subject: [PATCH 03/18] docs: add diagram, closes #1565 (#1577) --- docs/app-architecture.rst | 40 +++++++++++++++---------- docs/assets/tm-application-example.png | Bin 0 -> 26691 bytes 2 files changed, 24 insertions(+), 16 deletions(-) create mode 100644 docs/assets/tm-application-example.png diff --git a/docs/app-architecture.rst b/docs/app-architecture.rst index e7a0d0e74..4a7c414ec 100644 --- a/docs/app-architecture.rst +++ b/docs/app-architecture.rst @@ -66,15 +66,14 @@ and possibly await a response). And one method to query app-specific data from the ABCI application. Pros: -* Server code already written -* Access to block headers to validate merkle proofs (nice for light clients) -* Basic read/write functionality is supported + +- Server code already written +- Access to block headers to validate merkle proofs (nice for light clients) +- Basic read/write functionality is supported Cons: -* Limited interface to app. All queries must be serialized into -[]byte (less expressive than JSON over HTTP) and there is no way to push -data from ABCI app to the client (eg. notify me if account X receives a -transaction) + +- Limited interface to app. All queries must be serialized into []byte (less expressive than JSON over HTTP) and there is no way to push data from ABCI app to the client (eg. notify me if account X receives a transaction) Custom ABCI server ~~~~~~~~~~~~~~~~~~ @@ -92,14 +91,19 @@ store. For "reads", we can do any queries we wish that are supported by our architecture, using any web technology that is useful. The general architecture is shown in the following diagram: -Pros: \* Separates application logic from blockchain logic \* Allows -much richer, more flexible client-facing API \* Allows pub-sub, watching -certain fields, etc. +.. figure:: assets/tm-application-example.png + +Pros: + +- Separates application logic from blockchain logic +- Allows much richer, more flexible client-facing API +- Allows pub-sub, watching certain fields, etc. -Cons: \* Access to ABCI app can be dangerous (be VERY careful not to -write unless it comes from the validator node) \* No direct access to -the blockchain headers to verify tx \* You must write your own API (but -maybe that's a pro...) +Cons: + +- Access to ABCI app can be dangerous (be VERY careful not to write unless it comes from the validator node) +- No direct access to the blockchain headers to verify tx +- You must write your own API (but maybe that's a pro...) Hybrid solutions ~~~~~~~~~~~~~~~~ @@ -108,9 +112,13 @@ Likely the least secure but most versatile. The client can access both the tendermint node for all blockchain info, as well as a custom app server, for complex queries and pub-sub on the abci app. -Pros: All from both above solutions +Pros: + +- All from both above solutions + +Cons: -Cons: Even more complexity; even more attack vectors (less +- Even more complexity; even more attack vectors (less security) Scalability diff --git a/docs/assets/tm-application-example.png b/docs/assets/tm-application-example.png new file mode 100644 index 0000000000000000000000000000000000000000..47d4e928cb7bc9c696814fa01f7b62cd37e1d7c0 GIT binary patch literal 26691 zcmeFZXHZn@)-~D)5=4>+l7k3{AW?#35kW;kMM02^0+MrvCZiH1BN7x86af(lO%5VS zf&w<7kt~uWHkmsY?!C`>&-eYgRqwr3U#Q|7x6-}Vvz{>Lm}88&9^X(`r8vTL1cgFT zT)C`#3xy)Uqfm$FNQvQJzR;A2p-{wEJ#XK2wsBW=G`6?6Zg>BIH41e}t0_OI?AY!z z{&ws9X5%ZOZ^(x3yH0f7xA=yQx`)4_;h3S$IymXr-$*66YTobMNEPD!-DLEwNuRI% z5=le&l5M%*W*urXOmR&?B}TkKB7cpl?#XMcL?h1|2R zZ)um$(E8GEnasA^CG~R$i(jHwJvJcr_^92ts=j5;I}#8oEie)8dzTzLnbyoD1x{sknUChgtIS)dm8l z2895lH>Hze4%vGT^;Vf&r*F$W^mxY7!h*wGr6AOzw3s?P?q9&5{(R_#Wzg&$xyiT1 zMDj1)Jl{v$v=O37Iy=rKOO2tOU>ScAvfNzDJbp@ba-;3R=k>$i9%=n1qOmj+XQO&a za6WIv=9E2I@!OmS&3dX?xDn>>qhwN?T6#IO@f+1D1|QEE?}ne~Sn)qA^4sk9^RmYp zJhGoSM{7A+zsy?kFje1Cxk#8ip+4D7uD$$J`t5yoPemgqpI7Nm!VNqsNL+dE_Fd%e z7d>m9`?$gDT5q>&0&{uGk$C)4J-(oTWz=0S(Gyp;^HZfl_-aXxjeJB*g=tTHhLkvt z$vd=1R@PF9^%>12?-u8Wz57IO$8QpA%bX^`xh|&e*7gwjgjKFS8J+(*#b(vW@oKg_ zgINPV`U9mTtNd(cYScl@37fJ~w)ECA(5ysrR7uv`^r0eCk%vL=62&NUK4p$c=@PlR zM|r|k&A4RtCs!MDl%7y{w32HwOJucATZsuauI5k8aDL>(8+^-bu`1Z&&ZE7{*E~}e zJw!FA_};X194krsjP8+2^HaBM!`j@3-aD(USy(oo-X33$k0ZLt%Bbt7Ic27${I8zO zUAR`aru#0-?1EC6jM!9}3|Hd$4CcFg%w_Y>So)ckUs)&+cM6P^3cQ({Go(d_s1FVj z*h@dC~Rdonxr3T&*TkRk1x?6LP+jZ#mpQI;~#HtB%O9bWWCH%C4;_IeRT& zCnS~W2(CDbIb~H#qhH6@VSxD}d%9-&Tr#~1-Mx>38Cbs|oW$)a7nw4t#9Qp*Beu;l z+UGu#$|p6rk492#>i4MQ9*vz{$USCX)bKX>vAE{isoK*?*O}4mjjhqyCxrrHIl3}t z((YS+=%5K5y8SUb78jw^av`oN@}7a*>4{1bGLZ{)7azY64GeC9Db*d6`MK>g~HN;{o0IC?eS!G*RRLt`)G+{321fJ&b18H-gDr# z+p(@JHWxiMEb?BK?+}kNM_unu!Tyay(&np8sb=fy>V(<9xu@Po2DXbHa}W^GkdLn6 zB*{*o)sZ-IUZnpJM>AgPb%k;$@o%r5P))-=aY8rxf(gI7{4B4Vc`zp~@RujHf9~{F zO;HY``Q5(O*3UcaZ!Y>uM6)l*tw3;Zcz<%{Kpyo)>Pyl1mfu?JCHvUTAw(YBi~Sa za0yd{eDl?ZmpC2xv+kM{5OUFm}MXPnI1lib4;Y16I(kgodhW2K!T@FpITU1T{JV}p%*gnZXXm$zIIwb zV9trLbTDGNJNsg8Z@p!)!Ac3L)~x74cK3^%98&Ynv|#n@!0zeF*Dk5&orterzmD?z z^hudT#&xcIfSwdt?4cjVu3j&Z9Z^kI`Yt#Er*`!!N06eYr}VRD&)`hFH*KOM<+Pvc z6Ja?TDg~eedVw5 zbi3pUiHOh)d#_NSMkZQfEi5d`@=Fc}9BjLO3L@?Yi8?FS%O0E+m{@SF5P8sg{7aHZ zW|PzO^mN~Q<3N^ypFho5q@76zOYKNdaLB%-E3hk($}Tr^o#x{sK;72XHeH|VZ*FZ3 zpgzPDLHz2~tDOA&5O`$jLq%ro0d14Z{jXhSzZFY;){CKWRPq1&^RY0%ohV5{op)Xm+ zEbB_8#|li3vNF+!qM}Jt&a+-Wzc6}ljD||NEwK3ID|{fMk^a=cJs`NlA|a0uEhEm8M%-TC%XSQ-w|NQTObTBS*Y#+eYmz}6L7u^=$Ixo z>(BK3P&yUdDE9H;!-tNcx9{9(F%An0Yv@RoF&1W+phhWrcwlz6o;*47>C>m$dDqMr zF9=}B@DmXcf$|$0Gl4yrbd6_Ix(mKC>FRD^XlZE~pptpv!UX~eTil~Jn%S+{g;`dn zYinzc78JyUCMG6EqRAP(mN#$TCd!!A-wZfRZY0bQotPL3qphEIepynY<$D%&?b*32 z)szwrW6Bd#Q<-ID3~=BaQ*gGDY#tLd_sz`KJy;H1`XAQwlv}ELC=Q2X>r9n)IpVcG zH!b#Yh@Xq=B4y}mN88F;TIT~Zv(|!GTX~%lYn><;_2*JFf{EmCp2t5&#A5l2W})S@@|8s`7U|dD6LuF<5`MiXjF`Tb2YN z!v(RAo>c~v!OrbJHh-Pl~p*U9e#ez%?0&ahiK(Ce$TsiQ%j4u${;2{;E;F|atghUu zkd~35IdLNNFgbngwDZ_VT_EiAip6S(VU1~WUMSSPQrlqaL#mN11YIuWe7wA>lxDuZ z)!t2Sov+U27F0vzIU!^)w<1OKGuw%hRX{m`-x_nf=)Zjb4;7%kDcES4h+0^ z7+1-9^@;!$$tE}BqN`GF`fWu&dusMN4=seyjIS2nzLBEb+=tZ?1g5{1+{xAA&f7^gmckm1cFspI1G%_4MUtd5Fd2>rUJ<7zzK6m)8P ze*5~BAoD9}N;`d*>zu1s?R@v_+H!GLNC*i(J#`MnQyQfgA{tNNheKX_cXioQ~-V=d8=;`^RO_ys$zu*ux-wwnx+ zI~&Bf)z}pKZVlV|_r?tq=;7bv%t`uMHFQl^_MbRv+iz|^I4qEGY$o?*pd%S)Gwsquq*6syel#j!6fEf)a2wYIet`fR(Ba=r(o z>-XJdZ^z@8vvk?eg0EG2j|!lyogMEziZ|@*A_tQt95VuOCzMa~wb74Ck0cS(5P|BM z8Otwea?Cn9I@C&vnO`$LemqVeb~u*jdO)2~bfSK-B^}^ApC?7z;W7}D0kZ(W{0w3L z$@Awd2pX1@90yzr&-4d%HB7c8-qhEpNL44{Br)SiIBhJzU zNJfcgM=w_4yZZaDU_wvKnI0^Y5vumzv7A3(8cj`FRIQOP07^jZ$B!R3n(2A-C@Lzn zd-~HwD!CxYkgdnC)6P=%BZ^d*S!Zb$d2a^wY+Ba}R!DSR7U%kE&^495042`eD;dFI zoT!zI+MkK%ynFYlTSFn@*Ds5tb~AuB0R3KOWT5Ei=;RZfq*#`d{R^tMjvRnK4oS%q zP%T8W3-R&!LrQ#kDeN@5-F2w%n>T6+JR!Lr5;UMdNL3k z_`}J^wv-vxzka4S*Q-)qLBY^Atb2OS1=A2p1vS>ooSf!dR;c!y#=n)!&dk7M`2RH- z;r|ol1S|LU?OXXt!4~(WvBpP_A723Mpn=&g|GA*HfO>B~sw7}gdDr)cwM3hkTVS2v z#)f;DUcN!1jL+6N6I0VFK`QUf$wXLR6#UI$qAVZL?Y!J^a@+C&k4b z7tr~T73!>=ryHD=b?Voer$@;gpJ{9h;B4JexQnM}xrT?AZ%CyOMh!aypbtt{0Yf&} znWqH>iM!SuanWR{IaWB9&b@e)thJ)Z-UxXGQ;z z4y7d?B@WcSo|1ErZm1eVwYIqOaU%2Ebr0mvY(Kbr`eFb>aMgpgoPq*Xp9hTJqfStd zOV-iXN(WK@Dkx>H#XFvet6&e>U3x02sjbZ$hVLsh-A!y%N>7Kos z70Dj8xx$N9D7${qK+cGNFes>Hc9PLWt>5$1wFSX>(Nm9z;ymP5X@WLL&*mu4Y_wdy z&PpZn)=Dm0vOSTc>*1`E@kQ=iwAJbD-Zcwb-6_$(cSSRK3REZY%`iq{%#~$tDRZR^j2H}U_PC;$&(U0JJ8JWRssfx}r&0Dvk6B0roB~@?;I?j}R^mAmBbY*szX!Q2bZZ2r7XAJ(9#O&_V9WteHA7pf9*-j>`kryfGyl6wTbBR zjE&;%Lzw?qx7#mYISQ$e`B%lCRrC=IqQ0B1J!(dGQ(FWhky={*ROd{ zesxBE)Q5m|IDPtbZO+9k)-IoA^cpOcg|G? zru@ZMi}&b#Uyog2iSCtH*8z@Y$)e%w=c8>=S`nIt| zq)&hE7BA)ZmuA`A>sO_IY4y6w?OAP3RO|i6i+PJbU8f^1)VqhXE3lD~ktMBTS#wI) z^cEwR4vkCRX)Sn%6JdlC*(YnZA1}jm@sxEylM?kw0uo8JHeU@j?lZHU#viDE-{oFc zr@wsHkp77n-l$j%S29S5f|wejlIh=aKjuND)0Dc-!XF|Bg@W_cCN?)eSjq)KJ~cHJ zcthp;_fHKB40tvVVa4&lS=B-Ynw4Y%ilx{JJN70!Eyi$oU;tqu>}q!D0U1E<@*kXE zaKS+SxdtQ#)cDIzKwR_{Tb+R1{L97c#UIYZ!rGc8JXwY_tHwW1t$uu}vuWpHw=jC| zT==m=-RoL=n)MP_7W7R%wtyC}y#Cs=t@q-BoO+v^SH4x1#5;Cw;r8*N3zLyc%SR1$ z^R0ajefn6nuXkyZhg5{Z(2i;Gtj|62nOP0IdqbgVW$i>&e4I?_tR(8ab^iyp=clX za3bFsRFyZ)uB@y~mPn;SMv{Ro1d;H`lP8q}aA-Yc1#K$w^CKO3PZ3?%{jo_sR$$Xh45|m^ z7c+zMB~kHdQEo#r!oE%C;klzZhBgg{*RG)0vnMb5+sD2$*=n?w0LWo7`#le#Q-Bx* zLIOCJJ9qA+J0{+*bYC(7l8i$^VK^J-qwlmR*avhesutMULip9DM^V2(Ck}|4vGMUy zM~tWb!s>MQqlk#La@pULiH2jf{)F(i#?gk*$tECurJQ(A1=YPKTGSlV7o97QtSYTO zN@gP4Dm%F#q9$}tfw6vkvzlmO7K5(NKgQ1O{v~oEW$lc}byANM40{AV>B4mt19DVf zB|H26O)Q1W8fN*rs}kw8J%d%wjhkf$&+`Dt8Gy>bU?#@$>j!pz`7(EB;hXs8#Zbx% zq+d;0kELOYd`7MQyTFRfUCACAr7h<4%qQv3_biTo+Nqq?2|jW5>{^Z;rZs!up6(69 zMN`Im-Qk8bU| zFG}#M{Ci%n<ps=@isV~t^xlJ6kOG&VM(%H{L3zglE;PXo~J zpLK-70z%ia69PNU&3Ui!(E9Gq8n#760m^D`Zx38M@1}GaJ??ZHbEYJ}qBq#DQ*4=2 z@qLsRDk$YXUAH7Qr4WK3Dd+DYx8XI_`&HlXX=)ONHjJ*7IXCV0Cd8(RrL@bj!6D8m z!0)QE0%{2&sx77zUVHE$=N*23)*?>j-!2wg;IqAcfz*FuV#0UdB-w}t`8>5ajcv@G zaOHf}Ab0>E)|ITRtk4+oUqlrOq@2pvVIX`opqRbo#hp^a&?=GMlpE#IX-%b8dNe~O z*>Y|9mZQv(#m2 z>=Gu3+F+kuKpq*j_lBRXB*V5^5Ba1zhW6#<5DMlH2p(KOt16KyD=Qmq{2pbHcK+pOiw|M8 z{k@qy3S~XTFW1208s*d*3$PKMoDvtOgTuadY_99Y#X{?TQQO4mTt$rLLeS}ig`!{n zX>y^nu8+4e4jwUV%Gy=fyl}Kc|E2uFV_xEFHzV<)XGZ2E;NaoTy?OJ7LG*reMyE-G zO@*@lpSye>wI88EZxBjLOPA>@?F1noU1Bz5r!jq#`$(EPPG(a5QH}8UOvr97+rfov zj6e5MY-`IMeBHMpJePUB!1G`bJ0XnPl5>V@2V>lz5X-mitNF7YOU!0^L+{bOgvex! zNq#9SH{O3sf-Jx%yCesfhP^vf2R9o2-|t38?|+t&mcC(V7}b_2MAat|=htgV;Y#Zn zY?}1fteItqR=dnDp_* zl6iY6(25)YMfw>?CfOg={A(Fk-9ERQi2)iYvF;yRD{C07)zpv+?#|X|bF{Oy{frdM zKt-Ya8cph;TmgoWD@yic=W1?lu87yV_5BAAE|3xcn)>$b7O0jNv%4Yk>J*vPO**bw zXgE5GYUS&ZRSZRfdSwDq-Lh4Q%^-1TgFgaIl}M2?91?_2$#}1};U#r-N~j|Y(7P^> z3Jmwx422nv!jJpdV_pCUfSH$1Fi$4y|LDKev}^&Dx2bB<{fi*HHLr9 z-)^b{lGs|k8Z;LW9ST4aqIr4#;ylH}1A?IG=JMpDM~^_*VFg$PHS@J=%7BkwtEWBz zvI3+7D{DR)rR|`o4u5oUr#HXU}dEUYYc#DjYY~H1?K`ldUIP_ z?654bb7M?O#wLH{myR5*g!hTw(bZ5;itMl zlBLhgc19qkGsw6K?5!y5(;}sYh$-<8uSGd1M@e|qpDV(-gfgeAs|y91bjECt#l?$+ z@DDd`-3kD9Z6r*nx)!M+!Cb(_$w{!k#J=wj^^XZ$)g4lT{qK=F-&C9Ey386eUtnH^ zXuLky3`)es#jWz;7vi5jB>+AP2wt7Yv;w@hpqQAZZ>n=_S~d<2+oaf$e=2!{PW?RA zG&{ZW0x@3RrdjcCAO2?FSfbifB)4%|cIKSRQBpAgO3`Oe2i-DDM2;TkP zMoFk%s(q^H$S1|Y(A85svn*uf>{zg z0~Ctblslc+`{w3tZE8hMo!FFXJUzX=5J?oXyX&T%*Dcf_Op1z$`Lyb1ceh}1w6us( z!TAUF##H8~2Eo`}H6R zkW5t5y^1iN7B z=)gJ&A*F0xs87Zpq@(>9mwi3u=gPgjo=+s~-hL1aNGceODM-+NAWp@R34^fg$%BRqCvod|T@T>X9hz9(jKIZ&a2G(&Z|w{@qeuR2 z$c=afRz^yQC_#kL?4a^k50>N&W7|yseyGQ|L_vL~I2$U7F#5A&C}+BJOznQVH>jhk z{(wMzZpU;f2a_8ma(z1sZZ+em9ulFNon^l-ecbMIe?Rt9FNk8dAl81^tup~#X9Jez zJZcSyiBGIAXRh*sB6e!X{7Vu|oW#*{fH_okb(v6(KSJ8Vn}p&yt2@Ko~+Cycg>iHUh{h0P}zBK~K{ z4;)F#7f#n~{!DrL?AejrUZyw+eIc_V#!ORkBaS&~ZjHwq?=*qK>& z{Vxbj*Q!4jA^D>g{`W{>Zz&4AP&kg#nE}KTg#d*V+wvBxk_9pnj^S_yk+E`xCSUP? zO+-S)Cm`Y=SdhS()i(6G|@so;gju5erYJX=r`*~I2`bbsA$KLpuZuuAR_W0zX_ z3yc6a_I-FjUcLE~`MI1I!jR?U=3WH0)>?uYvEaSHU>HHM%*n|?Lg^9|T1GIHVDmX_ z;4w*um^6~4UCyY4Gx($T@#rSgD7g@txLIf_hRT?8&Fc96T*VG49jmwvAE5m60lGGy1sq0hA!rwl8L(mb&O8|sF3M-{OZ*W~E{p*} z`vhs4l9LF*r8vM;P|99@pYqxhL`6SGZ47Q0z3)@dbcCFoGbkF(y#U^%1EJYy?9*d_ zewSQa#GR#CgQ%0lt?2ax6FVnSMo>GrPdrr>hE5F_8thZxbqP@b+!1FGDJNpxtE`ko z`2kY}`sO6#AfSo~NgCL_%T{I`&mTiUXXt`{r85FX273T`V~|cTk(rFr&aSRwo#;KW zG>@OjMv~kdlq^zy_iQnj7I@z%C~Qv9B4h!m&1m72?0<$2`EYs-uoGeg13RdoemM~* z5$rjHbBEf;_*>SM_x;xYaPAih+Au@5);}s4l}PX3zn}5`JvBHL#mv5dJ4qSJ8suQV zeR~EF@n1r+l;`T{lU!V&4+H?Bhk^=+(}VgCAcIj$3^x*dc&fyUZE6*%;9Hsk}No_l2jVXIvDq^)T3 zd+@FOv4g_UIOOHoKxY3JEAE7W^feZI!XO%JsHb-n>b%dNRsIW<4+JR()cg{=VZpYc zdDjLb;f%L$kEBT2sW>}-+zI9>E(Qld#>pD05@D z)r8>$A7z!ky(Z=+P8(jsTsk;ph0zm)ivzxbA1Knny0G!CIt_6TNN2gM7Z241I8wpE zjBoQ-XWe;8H!HB%Kd9HBP-EjPn98(bL;0NVttp@9J< z3@nyc1M1gI)hGxp882R3NbrIpl!=+yxLEgTdU_O8jDYPxJ!1w}?5|(Hp!B|AWyKFF z4Tw2kr}jLr%0JJ?bF6^t|kYJEVvP=5e)1}Dme-1T)=0CeE=#ppxb zWiOpdxK8Z%_vpc~0>yRL>>5&daRfyx*v6%%BGw7`3K$&_@9mWVI(_ieq&aHICMwxyX>RdT<^7CI$S$O8?| zFZcV+K9!$jV?)60HWbxW>UOFCd&i>`Lb|7k5Z82@TP-=~Z@{21zjXXs8Tt8?P+P-k zi;s`562$J_E{7Tca4X>3rPXeYwy2ubZ)zYpA~lVTO`f-8HT5B|$3lK3M!|V-FUA9= z_FXI>S}dVFyR%!dBx|$4w`9~N**BJ$wdgTWqYx=cW^>w&u5E}NG&zKsDIFw0Y31Ij zmKmCMZjHhhKVRJk32&9*ZoVGoB>EgsGK3N-*A3FYS_HwAE-^v0hTw!RQ>#{3PdrU! zxZCfeqW+Xb(7TgyuQyWiCVQM$Qk&dyveMl3PJlT=sP${mUBe%2EG_H7sqq*jI)W41 zzyg6g4$_|Vh-2c_Ex{wlJb|k8YIZ2zrDz&<(mtl0@`fr1$IQ$uV%c*V0)OKP!{(@a z0`v&_gp9gav+r9&j=e8B+9p(^tftoH*XC0nMCLx5SDNo7hjpwxA4?^7FvMRr=!P{s zE$Aw(&&sQv*i@r=LdwL_$I8gC7F_vmX2s0!`({ zSlH$3*MXFvQrHm&gULHBe);_!CxB;T8tARqombbu#tmkU18fDJQuxjnh&fi@-}zG? z;!=(L1O(WGM807S8}b)B^!B~Vl@6H$bF_NrK$%14&LwpaBqcyQah)FkMcuYyUcwqQ z_pV97_|<-om)?IKqq=5v%WL7P(w6^=x`c>q+#+U*4K}I~41W3H^7Ryn(PZ9|BK#Xm76eIFt;^z=k08F39d+qmN5S7y~*<`S$JOut5VLl5SEc zfkkZrE{r_2LH6V;tOB4hBq_bVmK;!4CIzX_df+3O66qWcqiKM(^Ic(voZ~Scx=QzT zf1^nOEX;m6Ij1XutAP;1AZ~g5hYwz#;b=%k%aCR9%goF`_7~&?2M-i2pUtdZGnIcI zXX|$30auIMnKMVgl3bS=CI19MYlBJQ$Y?10=Y0JNVe<}_tn6&ymPUcV@q^>ja$m{T zHh;%#1VX{1Cr^-i6e8%&d-uYga;c6%xsNciFclNY>p<@aJ5F5IE3+qX8}=#P*&4L1 zpa6SwP_N}B2ZzGSh2h-=w+#S$6YGG*LK(!&j(#yK+DURIGoBwPUN!||#&Sv&)b=A_ zFUR`4+nOa-=@^Lzb1o*7MJWioh{3x>05s)N39m&8Hpozw_#a--2!ri8f#qM{OYj12 zQNm^*Xn21Qzs$EeFrW*GiAzq7Rm%!H?a(?t7b4P}@dAF#=>Xcnv<5l(e>8b*!06^QxR+qoWIkbaoQBQoHKS*OgW9 zF`UO#Dn!ugw>YrQ3!8mmV3Bpd;kyg0fLC)>aWNf!$c?>vXIX8t5DIvYpZUs|dwJh; zNrH(E9}Kp9Q3%~xh3+e{VLrzcVc1pVoNa~O54$EQ_;K875 zKuncE)X=+g2?`aY4)Gus)EDanr%A76Bap;Z`IuqXK^rNTnd3;k0Sj#ctuq#umgO)= zD=UQJ+8H}hqX@_oOey`d;%*NykjB7%cfblRHJ*T-MaS=!(*BvT@(?Ih_~^{i-N{9a zp#t#3zGT%1Upvud=*TW^87EK!=MV`e%WI2M>1Z~HQu1(D5$M3!yw(Yu_mS5EhQz_a zLCR;#IZBN|#%%%iVZ2?yiflX@z#yArqHV@XTm>lk@HA#$k_b4<7u?y;J7x*kbc!-X zT)uocx^yMXa7Sqmw+D8^<9u$py#upCQZXYJKyu=2>RnH)nez2Ps{2mO)!p+}?5F$n z@*w0PlxS}8?7d;=Ex2*}_NnwUHSS9%;V%=D*nIskI2tFka_Jyf$}7}=*M=GHoYmce zrk%q3Eind(1z@j)eM5YIm}Jbow6xS|Jzw~r_)qMHb5Jdi?ALEI?~mtkGrV*6F7lY! zO>Z1?d!IzfZ#B<8^U5f8o!5mhUPr<{0+5d!d^3=%13sh^x>TmBPjO_&tsXu&QlljR z<)Hp|hrQw>pRI*4grfmXC`;%0h0dH&GBEhzlD7|T)%p()+I~a5(QFtArg#5?@(qfw zVIyDx1fiW|(;MZ8PslGA!*&`r?4|h1(ep^u0h6_y2^X< z0PJJ(ll(YHZxBT$;BOPpTRFo`RE8AV*iZtk3BbuxFDc;5d^&uf;Di_!jx|NfeNfdSfVYF~KrxFgNK9YZ z**{_E&AERP2ZTA{9$hKu<(v?Am7I5_Q&!FDv7XzWI75u~?b{=zHM?hner*+(!%5x5 zKsv-Zb;@mQcA_HzT{SD{7P&1NAgKU$&9DzXb!RJ--IoDq(Lf$I-wu5b0BX?{tF#HD zwMdTe23g|Z*|pQaF=;I^=+`C(&kr!64_u4ui^EZHZjgZhpAgtPutp=+*%_v`U-0%V z^rFDxUw}id62TNKIqcoNXz0+^-X33*w*bTm5(e+tRrSt#dU?UX)eP$qkHd!#q5}Hm z$4iG?Q?Qa#d3u;*Bvfp1!;hegqjh_Ba|`kR!0Gz6X6mzh*3I`~swj|{9BwGYCMdr# z5}09w7Ixp?#fv!o{DQD{zj-?YIo&vkeD_w;$EOtbF|uO55E#A!(+k!jlj(1-3>;yb z@^z%i@uM5`0I`GSU=PJP+wkeBS#2DYi9tv>fKoOVyB&qzA%rJi#$sI!?d%3uv=_b= z)Lf~#%)9$?2rAe@&o#?yDH82gI&V_H?WI8r7R3L!nxrY<9X3|d_XYOR9FyvW{IspW z$tB8YzJf-(oe>fRQdhD1Bk1LGT{TB|QxotzbC`3nau9f^Fy>}v$QY4`4u@(o4vs>-I*5L2LEL0_}kZdb{MMp6I(c*uW7&>LfLV7@J= zNf^Igm`l>Cf5}luK;ZWL^d@9I5OauuMh&j#Jc`!zxd4`m?$d#Zj>B2;iMZzx90oVy zNk6t&{R492=c(}_^KX#epQ4L)KRu?Ea-Cy{WLFT}KRyqIqj9mTJ~0%^(36N9S)0kH!mP|K5wo}RoMo~0Csm>!11NGD!9(wg=ksfy(tPG-P-9bw>tlWGBofOl?Y5z zE9)=mJJNvtai9W%8cFWzv~yBOhzbCk(B96Jf|D?r= zDRayAZ!ZAN>TtGDR%;z}eUdab(BM%)1ZH zyI$451eZAFrUBg{yu+yD?CenxJ88l9+3PoSc-9f~8)WxUU~rYay=8-_-2wAQCnulh z805lsDo(o^yiok>d zZD9;26DbuSmw`#ae)=0X;6d1E}k#!qdd<9TS5(qCUPs96^gns|?K9Bc!m2 z*EXmt>XZ+I@&s5R#oXR8bA@e2Tg)W=&Wv_V{n=7nK>F^SIq-}~vx1%;3lM&cNd9bY zZmtDg*#Ss&ki;>>#QdG#T(kO3!N>+ao2|9Psd~)7;2h^|fX4wJmNu^Fd zT7s+KYWXqkTyIg7CNSDWZD|Lr+glI$}obrGOr2@O2%A z;(M5M$`P|u?%LsS2EYbbpS)k5!?>v)OP)G9thr@LQ}a#)U|49&{tEV$39IpM0yylRo)AlRJt0gJ?SKi{%U@RDj6X9c%-`CHn*9(J~NO3hjogjU=Lu{&6E|oOW9iw(b)~%n87f zL8mqlK;WBb0-IFe&^b}Xf3AJe((=qfvs{oK@Z^nv>jM7E?;9y3aU8|#Uq0(U{(ehSQ{;X#b;m#h3YiSwF^?+IYpr=7=Jkq` z2oq|~-;h>92C+oGUQoVNicThU$xPUL1q(99J7);3l(dR+yJ`TdKWTt?3GbHh^V~{= zn>|hkg%RpM)Y_^L;*?SVmlV8c0yQ&jcyJpK9H3K^pr^YAaZBw4?dHN;Cl2LgqxoP@ zY>;hhL)#|N4UPR<$h~Rd-Y;-u8EOAV-S}o8E(Ka*CtK;jHj>`y1{5aH*3jNwB}gj( zM99qSEEs5x^|+qKpa~J4>v8(&JhY_U4}^zCOg|!jI_;pXo#bOaR4ZK2P%N0(RxpVW z_h8&ZI>QKTR9k|c;gA*taBiTG{{I5>t`#`AbfLP&*_KxBK%whaFcnM<6=%v$<$)95 z3%{^7&_OO#=?AT7Thm9tE45%YS4F#qKY!-lwsW^#{|3@`f)eZ3O!z(9*}!fP%N8hUPyr7!Ro;eS5cXK2?Wg5?mZ-Z-SFfx-6n z8Uq2h9R4;dOWD^499>Z24e#$7?sV?&M9Dfq$37y99K6&56hREu72c5mmix!h0S9gl z2x5dNp!v-@(;~inxdsX~{CE(T0_y;?WD2!0^oH#1YyyZ4bV%CaIX+qKIC)hkgM^ah zx2JFK+dF+#UXn=P{8x+U*w}7UR#+s(KMetY9CL_s16)??ed=~LcdI;$v8D&jR^Xt! z-C=5M`~oWq=?LKy9zNs?q6YHHpc8o{kWUQkM2X4&%hQ9YY~xpE6yneO(=h4PqA$%Y z4K=-9r8^CDRQ_?7rOH|j0v?i$jp;zW(=9#%d^Gaf5ZAdr0uYUyoF?Z5^~$Kp=>_~i z=ztbM$oDA4gmCEA(9vN;+;f~nP)AG@9figOVC%c4oed7%JOmLP1#kXfgRG5MmcShZ zo|2ZfY3Nkr8A^rM7a&8Dkf=8j9zk4I=D&G0(&h?T4Q&t~9ON}Sv{L%0<_TrLK!q#!zJRl{s*M#D2z*P~K%GZYM;UFX#hq-c<%^bOj!$4VwoxrHHL9 zPY;!a*}G~d3vzOxiAE7#*8xQIx9;x0x-+w~vXGJmHViU%vulXHiE1`CDc1&>*zZRH zMgvr1kj2w|))_xXFi+vD*aH1;ZQ&2x$YZ=$XNQ3i^`P#ZU4xc9&?%v@0Fk0#ZoF`( z5~0dOrU9Oa{v669x>z?)p2YN_7W!A$3%1THSPO7v0ow@;f0ZSrCXGiB4-ht#pt*iY zJmo0;)x?Tk%V6-;>@Rw>?*d7+JPtUZB?(OEiMW)RxOF$*yn{qY`)aKq8<0B4i3udbQ22v$T7xaf zKRU);JIRgpaY}^sJx(x z6`FUl=`5txCv;H5D>e?)Fm`a8B_7T=9w&%LUS(4kaCmt?Ha7NYa`I!?zoeW-=a{Mz zkeh(9&3<$F0pj-R@YH~{eSq<7W%a`{V<2jPN(TiI{6M(EPz-7Gt7UJ!VxIBVcS=Oo z8qDE#=Lkl?@o?arWo9N+xPg8AZLgz%?jTLS{J1wS==QLLJ4g|h72Z<;%s+7D1SrGZ z{q+SQiMK0?4I7CzEyN%4ZnyBSncR9Dv=N}+|14hX@|(UVPwp!PC0*q!rsK1EtLzfr zXY*3Rp=bWEWqeoJIlng(J30(AN`=fb{>}|KVYf=Hn~g$0kv?KgITJfJQs^*WXC(c( zaCf26osv!1jI>GKNnW8~wc=6T__)(CHZR%G%C&@Wku6|5i{Z^0XAP_M2nh*i78kXJ z8G6cHX3xpX=L7Jaoz71G4y)WdJX`?9$b4VnMPx&W{CqYz47JZ~1A_(GSLwS;wSe1G zhlhK5vf#+UD_yRFThS9#x;40d=j>+r*CTQZAYq&l6#NbW^7-P+d-v{%OGvy1p2uQ$ zYYlR7H{4|n$oMl1N2LvqR;~|Nb)2<;{L zkK5vpR9d&`R=0al0LEkyl2A^_23LbneI>a@_>`ASiiipX_8ag*9UdMYXJE#r!h+`5 zKR{Z11Hs?S`v_FFcl;FDAEa z!beSN0k2r<=w8-Fo6GxfadExP&+j;20u0ZO*ujyk`t8M?qR28@w}h!3Pamk>vk(*R z6xG(;*4CDpx7Rw>_NN_r+ujAlVmf6oGY8t?^Tj>*rQ@Dsypqm~Cb^30YI*M1mf1aB z4UJA0G|mBvskPPBob2rEk-d;|uw|@B_F$yO)Nwj#<69loaPnIjVpqoU=ODQ}T@k}8 zJvSWd_~}}rXZX)&;1x5s5u$$O_R^}0vvW>W)lgt&EBaIFmGyrQAX35TA6;iufw?plTSyt+c@0!n){WB>YcGVU zT(?#q!s`vb%r+RMJHV8QNsI#=;4=+R5YTz~N-fdTN6I>_6v8p9tb5nLeF#eIj!w=K za3LVE+?()GDmmtqB8_7<%pgV@qa$k^Y8ggjGY%OvGz`Xf&wlOq{qz0*{eIu~pWic& z=Y8JiectE3?(4qp`~ByT_`x9Z1Z2NAYLUvyP=Gx7i;3?&Q$1LpltpWF=1EbJ27=EF z?L^}7_$g2UF3f)YT?L0f#1#|UDpCFXk^@sbx48kEyFFs0yg*%wi4S3!R5q+xcBM*E zRB%#b&k%U2`K-}p1X7lA@sA<=#FfZi-l$lWEi*3sapji;!7%={=(p0UFUl@%nc;+O zt>{Ga{c_p~thY=BH?a;0{QJ*OPQOp0Nb-uyK6?xZc>xwkS!UzpE2F-?%lgg~=MycM%n z(1UQo_Dbxm3|scif#91wohsFpc#7oq5fk0D4|ktAC7O?jL$hLV^e?P$@vV|f1&%sX zupYUS8*4`?d8JNqoYiE@hQ$w-NvprxKsd@2m&18@o*V5qU6jX3SUiPYrA zc2L^O_T$cs1kluGmB{UH+xo+!z0M)nvXqqb?{WJ$m%R$zPNh;LgF`RC!x9X#gb#t~ z_(ml~GdZ4Ub=#5CGewN|N0)5*!?c(!t^A-lY5omIbEqn0_x;|0<_%BwmHfE7V)3fw zJo*@I6jkgZ#!;w;%f5c~>Xjd{!4@v+42YH9=4wnuyybOGGt7yVW zhxX{;Ydl=4(gXec3te4$pX`|8S^0=aR|En9nOe7ieYygb{i0XqI#iUDfGfNMmh_n% zVPqu^`A?$;_6dNCn)x;Kv8;VAJ8$38Y6kw(h&~QE6L+*Sva|q?!k#W9{@qnuKlG{mwDjci-t@BH+IC}D`|Ja&4KhfH_GyeH1s8NX5~G|WO(<>C zu5idGdZTDv~hFtJoleRO3~rueu6<|S#WyM;_&Y9kpj?q6yey{(6>PNG#ON20p# z-fHu3*RoH_jNsJhY2r$DwVPZo9j2bh|NBAZy5sR=iMZGs2?_aRKy()%*yP@Lg~a;; zT#$|mSj+*aR|2T6G%0B#XBA{ceM3X~_PTm{p;iX=z?Sv{+iz@UHonT?3K~9P-YZQz zph%f2IU(qmory$}oBM^Ms1#jad(Om?Ak69nsz&92{OX(KcQ%6=D4hdXxufZ>0b>&@ zo$1BJlMZ9JG<{>EhgNfb?x^~5hLVfT_b0J(dIe~`jKr5puwBx37D?BYrX|Oxpv~?V z|JJ1)FYi=bj42HyIv?GuQL<&ghZyLyai&YJ0v;jRp|ZUE7;FJ+YwONIu!yygY~|wW zItg@9TCs?~6M$sw{@MMHAgmA#=*6wYx`NaV^j&Dnk+4rS9kq}i4!P1+z$U+gi~@|O zX9Yao@U@1nXAQ&~a98<(BtvPtJ84jcwj^0+K;Fc*rZ}BTe35-?XHB+GP4<^y-3&ux zRPms^k#Q&fBZ}^aCJpr12N~HuthKs*{^sHD?K!^J`PzA*`H8MC(rtUUn&{?#d9sGt zS>$&(KS|JwAN_+rNsRYHneH*Ux;Gb75EevlzudngZH{#m9`2=%KZ?pDzV!h zZZkSMs=H@TH+X${lmMzKGGhLbSH#3Fc}2z7N~0^Dt4TTq$M-8?PM^*Zr7pR+xtTkC zh!l$M2TjxUM_KC{mn1v59Ckm4QIUO_t&h?*<#wOP)SSXZKERbQrlqs`y~BSvQ}*#y z*Z4{MtMxW@B}Tj};qAktZUq}d*xNHnX7|pU@vlcuU{35Qk>99%oos`-Kx{inEeOv) zq77rAYhbuAA9v=-;jc$U(+?v3{zFj!6yTz3Xy_BjO9F$mj|5IXlok=R8xV?oJE|;x z)oKQLm0-dtMt5SM^keAQ0>@I3W0VuF!b}gwwMaVAjgV{mhPHIMo}$NZEi4P;3`^&P zVD3f;nk@_Rq_L+S9^6^Nb|YjO?D6vTtFJXT7n4-)5J(PhO{RB2q?@jw5_HJ8|ieqXlKA4 z3L2=j2ioRcYiq351PCeypg3oJ@X#a;v8*QiIkCyesVKnPM#mmi_o!E?ucMmzXMYR% z^1zvO=lmlImU5Qo$z&}?8Y8(c-7zxq#~OGi;wPO+|gaa+M zc6JM^GciccHS%UvlX0-VX-3tV-luTXq>_`UlAl`25gDk7$)5)U3~GtSMa2)YgJsDr=Zc^?HN;$zBmF`Ons0bC znmM(mz(@fUeJ&lWvy7Ukd64~6M`XSmLQI zQ@qf9oZOzGwdRyt7^BMx0t@X2W0g(@7COU%aXLO^@DMQ!2o)bfEceEs6yXBdzfvSE z>u+KChkNFbf;Oanw<0y9)Ny-|=kFW|5q62Xq`^|am(|%H%VcW!61kq26Z#SJ>Sg88 zwoP8ck4*2@hY_M;vtUNPGE4Oo&4g>?H@zt*>Dk+E&MewxAfl+84yiBzE|%ou*wy6K zDo1YtLou|CusnUU7S^{wU3J8mExX9HYSynZ{QR@9VfBkw*ikLNWO!ur#%;yzK ztkn^wxXfPKAls{Q*&v!5x_0mAP0Q-(T*a{|+1M#~F5f7ygK*fjnV2c>EY^dY%?Svk z!oA<&?)}}UtukNc1T4c5TbQJgFEP7WlRGO3h=WKO1mJ<(0q`KP2^@w0r^Yw0DW(Sx zzEC>wps}&hgRlZLZKVg<5mK}EZ42Up+(0rTJ^g%CR8&6;4?+3Ova*fBIi4CWL0z<_ zUi4-v)f?y)@E90SwE+?USASHGp1%!DUOHMEyEF$x`a)fGbdV$4(!!zw%8V`FpkhI~ zb{Oo*lsc029ez<9C*|q7+v#--$DGv-^~>^OfG>7-a7ZBSe0n_&h-J_Y%gBXGvfJHGw>`P}7>H zu0eKy2Uq}iDxn!hUFdVm=a*Mj_R(nZLP)|o57LK-57!mJix4bK86P}Y40>Yoj}o0w`{g*9%$*k!T2f)o~DiVw|0CgUAEw1nu%A_E()L z2MmXR6tGS_hbmmC2A~OQfnFwauP+f0C{QGX=N&TZBm^+8a*!V%iX%Wd(XE@-L&s{u zg5D}rQOz>61#Yw#a=`8Zu9%y{0j2`v0`7kYFcs44e;I(HLzXME)|H?zhs2ixgt61t ziE7Xhs!Y*!(~4_q3_)RX0%BrrZf;osKYLj$1N%uX7gz^SUEPAJadm)AIEJbKY6AOo zuU@-0JUa*8eqQPs4CYT71!Ul<7M~M&gk8IK1qKCyq0jpH**)MlpnzLCkH-hyJxBQ$ zAjSZnM)E?|2WI?gO5OF;Od$77B&J)1%urt?y$n*cM|zu2n&~rwQ09Pb`P~aD~Ffp63qCxP~79 zl*{`trKF_fH>{fn_trKI#5ZFr70VGg^=;dFfjz8)8qu3KZF&UT``Ov!P!Oi4wY9Y( zH7k1U(`bV-AMRJQ_kb4Uh3>3Y`TLf)@F7tvFLF>$-7=52H3BkKi_uY~tl`WQ-t6pS zK*n_t7T`piTzY>GMsPr*-w470RRUR+Jva9kyTpfQ4P}V)Sf;D|A0aeyr)4e$d)~*V zExdgN)E#3&t_ufZNG-8~JNTE91zK|7xVz+)si}(cBV!Bj7)F_@8LZFz`=;UvF&ChB>vaZSo}=(mxHZNgg){dXiYLeHlIuBxZlq(Wc7k H?9cxINgZ4^ literal 0 HcmV?d00001 From 754be1887c47d04fbd4fc29bf9bb5793e14cca3e Mon Sep 17 00:00:00 2001 From: Zach Date: Thu, 17 May 2018 01:58:15 -0400 Subject: [PATCH 04/18] spec: move to final location (#1576) --- docs/conf.py | 2 +- docs/{specification/new-spec => spec}/README.md | 0 docs/{specification/new-spec => spec}/abci.md | 0 docs/{specification/new-spec => spec}/bft-time.md | 0 docs/{specification/new-spec => spec}/blockchain.md | 0 docs/{specification/new-spec => spec}/encoding.md | 0 docs/{specification/new-spec => spec}/light-client.md | 0 docs/{specification/new-spec => spec}/p2p/config.md | 0 docs/{specification/new-spec => spec}/p2p/connection.md | 0 docs/{specification/new-spec => spec}/p2p/node.md | 0 docs/{specification/new-spec => spec}/p2p/peer.md | 0 docs/{specification/new-spec => spec}/pre-amino.md | 0 .../new-spec => spec}/reactors/block_sync/impl.md | 0 .../new-spec => spec}/reactors/block_sync/reactor.md | 0 .../new-spec => spec}/reactors/consensus/consensus-reactor.md | 0 .../new-spec => spec}/reactors/consensus/consensus.md | 0 .../new-spec => spec}/reactors/consensus/proposer-selection.md | 0 .../{specification/new-spec => spec}/reactors/mempool/README.md | 0 .../new-spec => spec}/reactors/mempool/concurrency.md | 0 .../{specification/new-spec => spec}/reactors/mempool/config.md | 0 .../new-spec => spec}/reactors/mempool/functionality.md | 0 .../new-spec => spec}/reactors/mempool/messages.md | 0 docs/{specification/new-spec => spec}/reactors/pex/pex.md | 0 docs/{specification/new-spec => spec}/scripts/crypto.go | 0 docs/{specification/new-spec => spec}/state.md | 0 25 files changed, 1 insertion(+), 1 deletion(-) rename docs/{specification/new-spec => spec}/README.md (100%) rename docs/{specification/new-spec => spec}/abci.md (100%) rename docs/{specification/new-spec => spec}/bft-time.md (100%) rename docs/{specification/new-spec => spec}/blockchain.md (100%) rename docs/{specification/new-spec => spec}/encoding.md (100%) rename docs/{specification/new-spec => spec}/light-client.md (100%) rename docs/{specification/new-spec => spec}/p2p/config.md (100%) rename docs/{specification/new-spec => spec}/p2p/connection.md (100%) rename docs/{specification/new-spec => spec}/p2p/node.md (100%) rename docs/{specification/new-spec => spec}/p2p/peer.md (100%) rename docs/{specification/new-spec => spec}/pre-amino.md (100%) rename docs/{specification/new-spec => spec}/reactors/block_sync/impl.md (100%) rename docs/{specification/new-spec => spec}/reactors/block_sync/reactor.md (100%) rename docs/{specification/new-spec => spec}/reactors/consensus/consensus-reactor.md (100%) rename docs/{specification/new-spec => spec}/reactors/consensus/consensus.md (100%) rename docs/{specification/new-spec => spec}/reactors/consensus/proposer-selection.md (100%) rename docs/{specification/new-spec => spec}/reactors/mempool/README.md (100%) rename docs/{specification/new-spec => spec}/reactors/mempool/concurrency.md (100%) rename docs/{specification/new-spec => spec}/reactors/mempool/config.md (100%) rename docs/{specification/new-spec => spec}/reactors/mempool/functionality.md (100%) rename docs/{specification/new-spec => spec}/reactors/mempool/messages.md (100%) rename docs/{specification/new-spec => spec}/reactors/pex/pex.md (100%) rename docs/{specification/new-spec => spec}/scripts/crypto.go (100%) rename docs/{specification/new-spec => spec}/state.md (100%) diff --git a/docs/conf.py b/docs/conf.py index 9d4f6b017..96e6ca7ff 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -71,7 +71,7 @@ language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'specification/new-spec', 'examples'] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'spec', 'examples'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' diff --git a/docs/specification/new-spec/README.md b/docs/spec/README.md similarity index 100% rename from docs/specification/new-spec/README.md rename to docs/spec/README.md diff --git a/docs/specification/new-spec/abci.md b/docs/spec/abci.md similarity index 100% rename from docs/specification/new-spec/abci.md rename to docs/spec/abci.md diff --git a/docs/specification/new-spec/bft-time.md b/docs/spec/bft-time.md similarity index 100% rename from docs/specification/new-spec/bft-time.md rename to docs/spec/bft-time.md diff --git a/docs/specification/new-spec/blockchain.md b/docs/spec/blockchain.md similarity index 100% rename from docs/specification/new-spec/blockchain.md rename to docs/spec/blockchain.md diff --git a/docs/specification/new-spec/encoding.md b/docs/spec/encoding.md similarity index 100% rename from docs/specification/new-spec/encoding.md rename to docs/spec/encoding.md diff --git a/docs/specification/new-spec/light-client.md b/docs/spec/light-client.md similarity index 100% rename from docs/specification/new-spec/light-client.md rename to docs/spec/light-client.md diff --git a/docs/specification/new-spec/p2p/config.md b/docs/spec/p2p/config.md similarity index 100% rename from docs/specification/new-spec/p2p/config.md rename to docs/spec/p2p/config.md diff --git a/docs/specification/new-spec/p2p/connection.md b/docs/spec/p2p/connection.md similarity index 100% rename from docs/specification/new-spec/p2p/connection.md rename to docs/spec/p2p/connection.md diff --git a/docs/specification/new-spec/p2p/node.md b/docs/spec/p2p/node.md similarity index 100% rename from docs/specification/new-spec/p2p/node.md rename to docs/spec/p2p/node.md diff --git a/docs/specification/new-spec/p2p/peer.md b/docs/spec/p2p/peer.md similarity index 100% rename from docs/specification/new-spec/p2p/peer.md rename to docs/spec/p2p/peer.md diff --git a/docs/specification/new-spec/pre-amino.md b/docs/spec/pre-amino.md similarity index 100% rename from docs/specification/new-spec/pre-amino.md rename to docs/spec/pre-amino.md diff --git a/docs/specification/new-spec/reactors/block_sync/impl.md b/docs/spec/reactors/block_sync/impl.md similarity index 100% rename from docs/specification/new-spec/reactors/block_sync/impl.md rename to docs/spec/reactors/block_sync/impl.md diff --git a/docs/specification/new-spec/reactors/block_sync/reactor.md b/docs/spec/reactors/block_sync/reactor.md similarity index 100% rename from docs/specification/new-spec/reactors/block_sync/reactor.md rename to docs/spec/reactors/block_sync/reactor.md diff --git a/docs/specification/new-spec/reactors/consensus/consensus-reactor.md b/docs/spec/reactors/consensus/consensus-reactor.md similarity index 100% rename from docs/specification/new-spec/reactors/consensus/consensus-reactor.md rename to docs/spec/reactors/consensus/consensus-reactor.md diff --git a/docs/specification/new-spec/reactors/consensus/consensus.md b/docs/spec/reactors/consensus/consensus.md similarity index 100% rename from docs/specification/new-spec/reactors/consensus/consensus.md rename to docs/spec/reactors/consensus/consensus.md diff --git a/docs/specification/new-spec/reactors/consensus/proposer-selection.md b/docs/spec/reactors/consensus/proposer-selection.md similarity index 100% rename from docs/specification/new-spec/reactors/consensus/proposer-selection.md rename to docs/spec/reactors/consensus/proposer-selection.md diff --git a/docs/specification/new-spec/reactors/mempool/README.md b/docs/spec/reactors/mempool/README.md similarity index 100% rename from docs/specification/new-spec/reactors/mempool/README.md rename to docs/spec/reactors/mempool/README.md diff --git a/docs/specification/new-spec/reactors/mempool/concurrency.md b/docs/spec/reactors/mempool/concurrency.md similarity index 100% rename from docs/specification/new-spec/reactors/mempool/concurrency.md rename to docs/spec/reactors/mempool/concurrency.md diff --git a/docs/specification/new-spec/reactors/mempool/config.md b/docs/spec/reactors/mempool/config.md similarity index 100% rename from docs/specification/new-spec/reactors/mempool/config.md rename to docs/spec/reactors/mempool/config.md diff --git a/docs/specification/new-spec/reactors/mempool/functionality.md b/docs/spec/reactors/mempool/functionality.md similarity index 100% rename from docs/specification/new-spec/reactors/mempool/functionality.md rename to docs/spec/reactors/mempool/functionality.md diff --git a/docs/specification/new-spec/reactors/mempool/messages.md b/docs/spec/reactors/mempool/messages.md similarity index 100% rename from docs/specification/new-spec/reactors/mempool/messages.md rename to docs/spec/reactors/mempool/messages.md diff --git a/docs/specification/new-spec/reactors/pex/pex.md b/docs/spec/reactors/pex/pex.md similarity index 100% rename from docs/specification/new-spec/reactors/pex/pex.md rename to docs/spec/reactors/pex/pex.md diff --git a/docs/specification/new-spec/scripts/crypto.go b/docs/spec/scripts/crypto.go similarity index 100% rename from docs/specification/new-spec/scripts/crypto.go rename to docs/spec/scripts/crypto.go diff --git a/docs/specification/new-spec/state.md b/docs/spec/state.md similarity index 100% rename from docs/specification/new-spec/state.md rename to docs/spec/state.md From efc01cf5822d009c251b70749598cadf4a8593a6 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 10 May 2018 14:02:31 +0400 Subject: [PATCH 05/18] stop localnet before starting in order to avoid having to stop it manually --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4601c970c..05d1889f0 100755 --- a/Makefile +++ b/Makefile @@ -194,7 +194,7 @@ build-linux: GOOS=linux GOARCH=amd64 $(MAKE) build # Run a 4-node testnet locally -localnet-start: +localnet-start: localnet-stop @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi docker-compose up From 0d93424c6a186e017c5268dc651753c3fb52ffe2 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 10 May 2018 14:03:05 +0400 Subject: [PATCH 06/18] disable indexer by default --- config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index b76f5ed19..b5bd87cee 100644 --- a/config/config.go +++ b/config/config.go @@ -515,7 +515,7 @@ type TxIndexConfig struct { // DefaultTxIndexConfig returns a default configuration for the transaction indexer. func DefaultTxIndexConfig() *TxIndexConfig { return &TxIndexConfig{ - Indexer: "kv", + Indexer: "null", IndexTags: "", IndexAllTags: false, } From 7c14fa820d0817560592c7e476d2c9cd9044a236 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 10 May 2018 14:04:27 +0400 Subject: [PATCH 07/18] do not log txs at info level BEFORE: ``` ./tm-bench -c 5 -r 1000 127.0.0.1:46657 Stats Avg StdDev Max Txs/sec 1826 843 2744 Blocks/sec 1.100 0.300 2 ``` AFTER: ``` ./tm-bench -T 30 -c 5 -r 1000 127.0.0.1:46657 Stats Avg StdDev Max Txs/sec 6120 1970 9776 Blocks/sec 1.000 0.000 1 ``` --- mempool/mempool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mempool/mempool.go b/mempool/mempool.go index ec4f98478..6bffd42b4 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -255,7 +255,7 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { tx: tx, } mem.txs.PushBack(memTx) - mem.logger.Info("Added good transaction", "tx", tx, "res", r) + mem.logger.Debug("Added good transaction", "tx", tx, "res", r) mem.notifyTxsAvailable() } else { // ignore bad transaction From bbe135595713764c0f129f3d481519627b3445ea Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 11 May 2018 12:09:16 +0400 Subject: [PATCH 08/18] log only hash, not tx itself --- mempool/mempool.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mempool/mempool.go b/mempool/mempool.go index 6bffd42b4..aa2aa4f41 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -3,6 +3,7 @@ package mempool import ( "bytes" "container/list" + "fmt" "sync" "sync/atomic" "time" @@ -255,11 +256,11 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { tx: tx, } mem.txs.PushBack(memTx) - mem.logger.Debug("Added good transaction", "tx", tx, "res", r) + mem.logger.Info("Added good transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r) mem.notifyTxsAvailable() } else { // ignore bad transaction - mem.logger.Info("Rejected bad transaction", "tx", tx, "res", r) + mem.logger.Info("Rejected bad transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r) // remove from cache (it might be good later) mem.cache.Remove(tx) From 58e3246ffc7d49ce76312278882a4e84ab417311 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 11 May 2018 12:09:41 +0400 Subject: [PATCH 09/18] batch index txs --- config/config.go | 2 +- state/execution.go | 14 ++++-------- state/txindex/indexer.go | 2 +- state/txindex/indexer_service.go | 37 ++++++++++++++++++++++++++------ state/txindex/kv/kv_test.go | 4 ++-- 5 files changed, 39 insertions(+), 20 deletions(-) diff --git a/config/config.go b/config/config.go index b5bd87cee..b76f5ed19 100644 --- a/config/config.go +++ b/config/config.go @@ -515,7 +515,7 @@ type TxIndexConfig struct { // DefaultTxIndexConfig returns a default configuration for the transaction indexer. func DefaultTxIndexConfig() *TxIndexConfig { return &TxIndexConfig{ - Indexer: "null", + Indexer: "kv", IndexTags: "", IndexAllTags: false, } diff --git a/state/execution.go b/state/execution.go index 0ce5e44f1..3fe35e2fa 100644 --- a/state/execution.go +++ b/state/execution.go @@ -341,23 +341,17 @@ func updateState(s State, blockID types.BlockID, header *types.Header, // Fire TxEvent for every tx. // NOTE: if Tendermint crashes before commit, some or all of these events may be published again. func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *types.Block, abciResponses *ABCIResponses) { - // NOTE: do we still need this buffer ? - txEventBuffer := types.NewTxEventBuffer(eventBus, int(block.NumTxs)) + eventBus.PublishEventNewBlock(types.EventDataNewBlock{block}) + eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{block.Header}) + for i, tx := range block.Data.Txs { - txEventBuffer.PublishEventTx(types.EventDataTx{types.TxResult{ + eventBus.PublishEventTx(types.EventDataTx{types.TxResult{ Height: block.Height, Index: uint32(i), Tx: tx, Result: *(abciResponses.DeliverTx[i]), }}) } - - eventBus.PublishEventNewBlock(types.EventDataNewBlock{block}) - eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{block.Header}) - err := txEventBuffer.Flush() - if err != nil { - logger.Error("Failed to flush event buffer", "err", err) - } } //---------------------------------------------------------------------------------------------------- diff --git a/state/txindex/indexer.go b/state/txindex/indexer.go index bd51fbb29..e23840f14 100644 --- a/state/txindex/indexer.go +++ b/state/txindex/indexer.go @@ -34,7 +34,7 @@ type Batch struct { } // NewBatch creates a new Batch. -func NewBatch(n int) *Batch { +func NewBatch(n int64) *Batch { return &Batch{ Ops: make([]*types.TxResult, n), } diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go index f5420f631..dd12bdf9d 100644 --- a/state/txindex/indexer_service.go +++ b/state/txindex/indexer_service.go @@ -11,6 +11,8 @@ const ( subscriber = "IndexerService" ) +// IndexerService connects event bus and transaction indexer together in order +// to index transactions coming from event bus. type IndexerService struct { cmn.BaseService @@ -18,6 +20,7 @@ type IndexerService struct { eventBus *types.EventBus } +// NewIndexerService returns a new service instance. func NewIndexerService(idr TxIndexer, eventBus *types.EventBus) *IndexerService { is := &IndexerService{idr: idr, eventBus: eventBus} is.BaseService = *cmn.NewBaseService(nil, "IndexerService", is) @@ -27,15 +30,37 @@ func NewIndexerService(idr TxIndexer, eventBus *types.EventBus) *IndexerService // OnStart implements cmn.Service by subscribing for all transactions // and indexing them by tags. func (is *IndexerService) OnStart() error { - ch := make(chan interface{}) - if err := is.eventBus.Subscribe(context.Background(), subscriber, types.EventQueryTx, ch); err != nil { + blockHeadersCh := make(chan interface{}) + if err := is.eventBus.Subscribe(context.Background(), subscriber, types.EventQueryNewBlockHeader, blockHeadersCh); err != nil { return err } + + txsCh := make(chan interface{}) + if err := is.eventBus.Subscribe(context.Background(), subscriber, types.EventQueryTx, txsCh); err != nil { + return err + } + go func() { - for event := range ch { - // TODO: may be not perfomant to write one event at a time - txResult := event.(types.EventDataTx).TxResult - is.idr.Index(&txResult) + var numTxs, got int64 + var batch *Batch + for { + select { + case e := <-blockHeadersCh: + numTxs = e.(types.EventDataNewBlockHeader).Header.NumTxs + batch = NewBatch(numTxs) + case e := <-txsCh: + if batch == nil { + panic("Expected pubsub to send block header first, but got tx event") + } + txResult := e.(types.EventDataTx).TxResult + batch.Add(&txResult) + got++ + if numTxs == got { + is.idr.AddBatch(batch) + batch = nil + got = 0 + } + } } }() return nil diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index 74a2dd7cb..a8537219d 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -190,7 +190,7 @@ func txResultWithTags(tags []cmn.KVPair) *types.TxResult { } } -func benchmarkTxIndex(txsCount int, b *testing.B) { +func benchmarkTxIndex(txsCount int64, b *testing.B) { tx := types.Tx("HELLO WORLD") txResult := &types.TxResult{ Height: 1, @@ -215,7 +215,7 @@ func benchmarkTxIndex(txsCount int, b *testing.B) { indexer := NewTxIndex(store) batch := txindex.NewBatch(txsCount) - for i := 0; i < txsCount; i++ { + for i := int64(0); i < txsCount; i++ { if err := batch.Add(txResult); err != nil { b.Fatal(err) } From 6f7333fd5f764e3664cb146303c447c8fdf1ced6 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 11 May 2018 20:26:24 +0400 Subject: [PATCH 10/18] fix tests --- state/txindex/indexer_service.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go index dd12bdf9d..edcb362e6 100644 --- a/state/txindex/indexer_service.go +++ b/state/txindex/indexer_service.go @@ -45,10 +45,16 @@ func (is *IndexerService) OnStart() error { var batch *Batch for { select { - case e := <-blockHeadersCh: + case e, ok := <-blockHeadersCh: + if !ok { + return + } numTxs = e.(types.EventDataNewBlockHeader).Header.NumTxs batch = NewBatch(numTxs) - case e := <-txsCh: + case e, ok := <-txsCh: + if !ok { + return + } if batch == nil { panic("Expected pubsub to send block header first, but got tx event") } From 5e3a23df6d9d8e886970e3e17d65fad523cde30b Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 14 May 2018 11:10:59 +0400 Subject: [PATCH 11/18] simplify indexer service main loop --- node/node.go | 1 + state/txindex/indexer_service.go | 30 +++++++++++------------------- 2 files changed, 12 insertions(+), 19 deletions(-) diff --git a/node/node.go b/node/node.go index fdc466695..1bd382eb8 100644 --- a/node/node.go +++ b/node/node.go @@ -343,6 +343,7 @@ func NewNode(config *cfg.Config, } indexerService := txindex.NewIndexerService(txIndexer, eventBus) + indexerService.SetLogger(logger.With("module", "txindex")) // run the profile server profileHost := config.ProfListenAddress diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go index edcb362e6..93e6269e8 100644 --- a/state/txindex/indexer_service.go +++ b/state/txindex/indexer_service.go @@ -41,32 +41,24 @@ func (is *IndexerService) OnStart() error { } go func() { - var numTxs, got int64 - var batch *Batch for { - select { - case e, ok := <-blockHeadersCh: - if !ok { - return - } - numTxs = e.(types.EventDataNewBlockHeader).Header.NumTxs - batch = NewBatch(numTxs) - case e, ok := <-txsCh: + e, ok := <-blockHeadersCh + if !ok { + return + } + header := e.(types.EventDataNewBlockHeader).Header + batch := NewBatch(header.NumTxs) + for i := int64(0); i < header.NumTxs; i++ { + e, ok := <-txsCh if !ok { + is.Logger.Error("Failed to index all transactions due to closed transactions channel", "height", header.Height, "numTxs", header.NumTxs, "numProcessed", i) return } - if batch == nil { - panic("Expected pubsub to send block header first, but got tx event") - } txResult := e.(types.EventDataTx).TxResult batch.Add(&txResult) - got++ - if numTxs == got { - is.idr.AddBatch(batch) - batch = nil - got = 0 - } } + is.idr.AddBatch(batch) + is.Logger.Info("Indexed block", "height", header.Height) } }() return nil From d832bde280106dc911ba74b618a95caec81ed728 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 17 May 2018 10:43:38 +0400 Subject: [PATCH 12/18] update Vagrantfile --- Vagrantfile | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index ac8da0cc1..095a6b061 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -10,31 +10,37 @@ Vagrant.configure("2") do |config| end config.vm.provision "shell", inline: <<-SHELL - # add docker repo - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable" - - # and golang 1.9 support - # official repo doesn't have race detection runtime... - # add-apt-repository ppa:gophers/archive - add-apt-repository ppa:longsleep/golang-backports + apt-get update # install base requirements - apt-get update apt-get install -y --no-install-recommends wget curl jq zip \ make shellcheck bsdmainutils psmisc - apt-get install -y docker-ce golang-1.9-go apt-get install -y language-pack-en + # install docker + apt-get install -y --no-install-recommends apt-transport-https \ + ca-certificates curl software-properties-common + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - + add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" + apt-get install -y docker-ce + usermod -a -G docker vagrant + + # install go + wget -q https://dl.google.com/go/go1.10.1.linux-amd64.tar.gz + tar -xvf go1.10.1.linux-amd64.tar.gz + mv go /usr/local + rm -f go1.10.1.linux-amd64.tar.gz + # cleanup apt-get autoremove -y - # needed for docker - usermod -a -G docker vagrant - # set env variables - echo 'export PATH=$PATH:/usr/lib/go-1.9/bin:/home/vagrant/go/bin' >> /home/vagrant/.bash_profile + echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile + echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile From b5c4098c533a85360456adf1789459aa9a95df62 Mon Sep 17 00:00:00 2001 From: Zach Date: Thu, 17 May 2018 10:05:59 -0400 Subject: [PATCH 13/18] update docs/examples & terraform/ansible (#1534) * update docs/examples * ansible: add node IDs from docs/examples * better monikers * ansible: clearer paths * upgrade version * examples: updates * docs: consolidate terraform & ansible * remove deprecated info, small reorgs * docs build fix * docs: t&a critical commit * s/dummy/kvstore/g * terraform/DO region unavailable, persistent error can't be bothered to debug rn * terraform: need vars * networks: t&a standalone integration script for DO * t&a more updates * examples: add script that shows what the testnet command does * use AMS3, since AMS2 is not available --- .../ansible => docs}/assets/a_plus_t.png | Bin docs/conf.py | 4 - docs/deploy-testnets.rst | 62 +++---- docs/examples/getting-started.md | 16 +- docs/examples/init_testnet.sh | 69 +++++++ docs/examples/install_tendermint.sh | 2 +- docs/examples/node0/config/config.toml | 169 ++++++++++++++++++ docs/examples/node0/config/genesis.json | 39 ++++ docs/examples/node0/config/node_key.json | 1 + .../examples/node0/config/priv_validator.json | 14 ++ docs/examples/node1/config.toml | 15 -- docs/examples/node1/config/config.toml | 169 ++++++++++++++++++ docs/examples/node1/config/genesis.json | 39 ++++ docs/examples/node1/config/node_key.json | 1 + .../examples/node1/config/priv_validator.json | 14 ++ docs/examples/node1/genesis.json | 42 ----- docs/examples/node1/node_key.json | 6 - docs/examples/node1/priv_validator.json | 15 -- docs/examples/node2/config.toml | 15 -- docs/examples/node2/config/config.toml | 169 ++++++++++++++++++ docs/examples/node2/config/genesis.json | 39 ++++ docs/examples/node2/config/node_key.json | 1 + .../examples/node2/config/priv_validator.json | 14 ++ docs/examples/node2/genesis.json | 42 ----- docs/examples/node2/node_key.json | 6 - docs/examples/node2/priv_validator.json | 15 -- docs/examples/node3/config.toml | 15 -- docs/examples/node3/config/config.toml | 169 ++++++++++++++++++ docs/examples/node3/config/genesis.json | 39 ++++ docs/examples/node3/config/node_key.json | 1 + .../examples/node3/config/priv_validator.json | 14 ++ docs/examples/node3/genesis.json | 42 ----- docs/examples/node3/node_key.json | 6 - docs/examples/node3/priv_validator.json | 15 -- docs/examples/node4/config.toml | 15 -- docs/examples/node4/genesis.json | 42 ----- docs/examples/node4/node_key.json | 6 - docs/examples/node4/priv_validator.json | 15 -- docs/index.rst | 3 +- docs/specification/corruption.rst | 1 + docs/terraform-and-ansible.rst | 138 ++++++++++++++ networks/remote/ansible/README.rst | 52 ------ .../install/templates/systemd.service.j2 | 2 +- networks/remote/integration.sh | 132 ++++++++++++++ networks/remote/terraform/README.rst | 33 ---- .../remote/terraform/cluster/variables.tf | 2 +- 46 files changed, 1264 insertions(+), 456 deletions(-) rename {networks/remote/ansible => docs}/assets/a_plus_t.png (100%) create mode 100644 docs/examples/init_testnet.sh create mode 100644 docs/examples/node0/config/config.toml create mode 100644 docs/examples/node0/config/genesis.json create mode 100644 docs/examples/node0/config/node_key.json create mode 100644 docs/examples/node0/config/priv_validator.json delete mode 100644 docs/examples/node1/config.toml create mode 100644 docs/examples/node1/config/config.toml create mode 100644 docs/examples/node1/config/genesis.json create mode 100644 docs/examples/node1/config/node_key.json create mode 100644 docs/examples/node1/config/priv_validator.json delete mode 100644 docs/examples/node1/genesis.json delete mode 100644 docs/examples/node1/node_key.json delete mode 100644 docs/examples/node1/priv_validator.json delete mode 100644 docs/examples/node2/config.toml create mode 100644 docs/examples/node2/config/config.toml create mode 100644 docs/examples/node2/config/genesis.json create mode 100644 docs/examples/node2/config/node_key.json create mode 100644 docs/examples/node2/config/priv_validator.json delete mode 100644 docs/examples/node2/genesis.json delete mode 100644 docs/examples/node2/node_key.json delete mode 100644 docs/examples/node2/priv_validator.json delete mode 100644 docs/examples/node3/config.toml create mode 100644 docs/examples/node3/config/config.toml create mode 100644 docs/examples/node3/config/genesis.json create mode 100644 docs/examples/node3/config/node_key.json create mode 100644 docs/examples/node3/config/priv_validator.json delete mode 100644 docs/examples/node3/genesis.json delete mode 100644 docs/examples/node3/node_key.json delete mode 100644 docs/examples/node3/priv_validator.json delete mode 100644 docs/examples/node4/config.toml delete mode 100644 docs/examples/node4/genesis.json delete mode 100644 docs/examples/node4/node_key.json delete mode 100644 docs/examples/node4/priv_validator.json create mode 100644 docs/terraform-and-ansible.rst delete mode 100644 networks/remote/ansible/README.rst create mode 100644 networks/remote/integration.sh delete mode 100644 networks/remote/terraform/README.rst diff --git a/networks/remote/ansible/assets/a_plus_t.png b/docs/assets/a_plus_t.png similarity index 100% rename from networks/remote/ansible/assets/a_plus_t.png rename to docs/assets/a_plus_t.png diff --git a/docs/conf.py b/docs/conf.py index 96e6ca7ff..8fefec4da 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -184,9 +184,6 @@ if os.path.isdir(tools_dir) != True: if os.path.isdir(assets_dir) != True: os.mkdir(assets_dir) -urllib.urlretrieve(tools_repo+tools_branch+'/ansible/README.rst', filename=tools_dir+'/ansible.rst') -urllib.urlretrieve(tools_repo+tools_branch+'/ansible/assets/a_plus_t.png', filename=assets_dir+'/a_plus_t.png') - urllib.urlretrieve(tools_repo+tools_branch+'/docker/README.rst', filename=tools_dir+'/docker.rst') urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/README.rst', filename=tools_dir+'/mintnet-kubernetes.rst') @@ -195,7 +192,6 @@ urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/gce2.png' urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/statefulset.png', filename=assets_dir+'/statefulset.png') urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets_dir+'/t_plus_k.png') -urllib.urlretrieve(tools_repo+tools_branch+'/terraform-digitalocean/README.rst', filename=tools_dir+'/terraform-digitalocean.rst') urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking.rst') urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/monitoring.rst') diff --git a/docs/deploy-testnets.rst b/docs/deploy-testnets.rst index 32355e4ae..d7ea97b36 100644 --- a/docs/deploy-testnets.rst +++ b/docs/deploy-testnets.rst @@ -3,8 +3,7 @@ Deploy a Testnet Now that we've seen how ABCI works, and even played with a few applications on a single validator node, it's time to deploy a test -network to four validator nodes. For this deployment, we'll use the -``basecoin`` application. +network to four validator nodes. Manual Deployments ------------------ @@ -24,67 +23,46 @@ Here are the steps to setting up a testnet manually: ``tendermint init`` 4) Compile a list of public keys for each validator into a ``genesis.json`` file and replace the existing file with it. -5) Run ``tendermint node --p2p.persistent_peers=< peer addresses >`` on each node, +5) Run ``tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >`` on each node, where ``< peer addresses >`` is a comma separated list of the IP:PORT combination for each node. The default port for Tendermint is ``46656``. Thus, if the IP addresses of your nodes were ``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command would look like: - ``tendermint node --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:46656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:46656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:46656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:46656``. + ``tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:46656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:46656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:46656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:46656``. After a few seconds, all the nodes should connect to each other and start making blocks! For more information, see the Tendermint Networks section of `the guide to using Tendermint `__. -Automated Deployments ---------------------- - While the manual deployment is easy enough, an automated deployment is usually quicker. The below examples show different tools that can be used for automated deployments. -Automated Deployment using Kubernetes -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The `mintnet-kubernetes tool `__ -allows automating the deployment of a Tendermint network on an already -provisioned Kubernetes cluster. For simple provisioning of a Kubernetes -cluster, check out the `Google Cloud Platform `__. - -Automated Deployment using Terraform and Ansible -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The `terraform-digitalocean tool `__ -allows creating a set of servers on the DigitalOcean cloud. - -The `ansible playbooks `__ -allow creating and managing a ``basecoin`` or ``ethermint`` testnet on provisioned servers. +Automated Deployments +--------------------- -Package Deployment on Linux for developers -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Local +^^^^^ -The ``tendermint`` and ``basecoin`` applications can be installed from RPM or DEB packages on -Linux machines for development purposes. The packages are configured to be validators on the -one-node network that the machine represents. The services are not started after installation, -this way giving an opportunity to reconfigure the applications before starting. +With ``docker`` installed, run the command: -The Ansible playbooks in the previous section use this repository to install ``basecoin``. -After installation, additional steps are executed to make sure that the multi-node testnet has -the right configuration before start. +:: -Install from the CentOS/RedHat repository: + make localnet-start -:: +from the root of the tendermint repository. This will spin up a 4-node local testnet. - rpm --import https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint - wget -O /etc/yum.repos.d/tendermint.repo https://tendermint-packages.interblock.io/centos/7/os/x86_64/tendermint.repo - yum install basecoin +Cloud Deployment using Kubernetes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Install from the Debian/Ubuntu repository: +The `mintnet-kubernetes tool `__ +allows automating the deployment of a Tendermint network on an already +provisioned Kubernetes cluster. For simple provisioning of a Kubernetes +cluster, check out the `Google Cloud Platform `__. -:: +Cloud Deployment using Terraform and Ansible +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | apt-key add - - wget -O /etc/apt/sources.list.d/tendermint.list https://tendermint-packages.interblock.io/debian/tendermint.list - apt-get update && apt-get install basecoin +See the `next section <./terraform-and-ansible.html>`__ for details. diff --git a/docs/examples/getting-started.md b/docs/examples/getting-started.md index 59015b79f..28841e164 100644 --- a/docs/examples/getting-started.md +++ b/docs/examples/getting-started.md @@ -10,10 +10,10 @@ documentation](http://tendermint.readthedocs.io/en/master/). ### Quick Install -On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vNLfY), like so: +On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vpgEI), like so: ``` -curl -L https://git.io/vxWlX | bash +curl -L https://git.io/vpgEI | bash source ~/.profile ``` @@ -24,7 +24,7 @@ The script is also used to facilitate cluster deployment below. ### Manual Install Requires: -- `go` minimum version 1.9 +- `go` minimum version 1.10 - `$GOPATH` environment variable must be set - `$GOPATH/bin` must be on your `$PATH` (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH) @@ -125,7 +125,7 @@ addresses below as IP1, IP2, IP3, IP4. Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY): ``` -curl -L https://git.io/vNLfY | bash +curl -L https://git.io/vpgEI | bash source ~/.profile ``` @@ -134,10 +134,10 @@ This will install `go` and other dependencies, get the Tendermint source code, t Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence: ``` -tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656" -tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656" -tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656" -tendermint node --home ./node4 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656" +tendermint node --home ./node0 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656" +tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656" +tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656" +tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656" ``` Note that after the third node is started, blocks will start to stream in diff --git a/docs/examples/init_testnet.sh b/docs/examples/init_testnet.sh new file mode 100644 index 000000000..cd83751ec --- /dev/null +++ b/docs/examples/init_testnet.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +# make all the files +tendermint init --home ./tester/node0 +tendermint init --home ./tester/node1 +tendermint init --home ./tester/node2 +tendermint init --home ./tester/node3 + +file0=./tester/node0/config/genesis.json +file1=./tester/node1/config/genesis.json +file2=./tester/node2/config/genesis.json +file3=./tester/node3/config/genesis.json + +genesis_time=`cat $file0 | jq '.genesis_time'` +chain_id=`cat $file0 | jq '.chain_id'` + +value0=`cat $file0 | jq '.validators[0].pub_key.value'` +value1=`cat $file1 | jq '.validators[0].pub_key.value'` +value2=`cat $file2 | jq '.validators[0].pub_key.value'` +value3=`cat $file3 | jq '.validators[0].pub_key.value'` + +rm $file0 +rm $file1 +rm $file2 +rm $file3 + +echo "{ + \"genesis_time\": $genesis_time, + \"chain_id\": $chain_id, + \"validators\": [ + { + \"pub_key\": { + \"type\": \"AC26791624DE60\", + \"value\": $value0 + }, + \"power:\": 10, + \"name\":, \"\" + }, + { + \"pub_key\": { + \"type\": \"AC26791624DE60\", + \"value\": $value1 + }, + \"power:\": 10, + \"name\":, \"\" + }, + { + \"pub_key\": { + \"type\": \"AC26791624DE60\", + \"value\": $value2 + }, + \"power:\": 10, + \"name\":, \"\" + }, + { + \"pub_key\": { + \"type\": \"AC26791624DE60\", + \"value\": $value3 + }, + \"power:\": 10, + \"name\":, \"\" + } + ], + \"app_hash\": \"\" +}" >> $file0 + +cp $file0 $file1 +cp $file0 $file2 +cp $file2 $file3 \ No newline at end of file diff --git a/docs/examples/install_tendermint.sh b/docs/examples/install_tendermint.sh index aeb87db5f..5a9c49d78 100644 --- a/docs/examples/install_tendermint.sh +++ b/docs/examples/install_tendermint.sh @@ -26,7 +26,7 @@ go get $REPO cd $GOPATH/src/$REPO ## build -git checkout v0.18.0 +git checkout master make get_tools make get_vendor_deps make install diff --git a/docs/examples/node0/config/config.toml b/docs/examples/node0/config/config.toml new file mode 100644 index 000000000..a1893c65d --- /dev/null +++ b/docs/examples/node0/config/config.toml @@ -0,0 +1,169 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:46658" + +# A custom human readable name for this node +moniker = "alpha" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: leveldb | memdb +db_backend = "leveldb" + +# Database directory +db_path = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,*:error" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_file = "config/priv_validator.json" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:46657" + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:46656" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +# Do not add private peers to this list if you don't want them advertised +persistent_peers = "" + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +addr_book_strict = true + +# Time to wait before flushing messages out on the connection, in ms +flush_throttle_timeout = 100 + +# Maximum number of peers to connect to +max_num_peers = 50 + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 512000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 512000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Authenticated encryption +auth_enc = true + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +##### mempool configuration options ##### +[mempool] + +recheck = true +recheck_empty = true +broadcast = true +wal_dir = "data/mempool.wal" + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +# All timeouts are in milliseconds +timeout_propose = 3000 +timeout_propose_delta = 500 +timeout_prevote = 1000 +timeout_prevote_delta = 500 +timeout_precommit = 1000 +timeout_precommit_delta = 500 +timeout_commit = 1000 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# BlockSize +max_block_size_txs = 10000 +max_block_size_bytes = 1 + +# EmptyBlocks mode and possible interval between empty blocks in seconds +create_empty_blocks = true +create_empty_blocks_interval = 0 + +# Reactor sleep duration parameters are in milliseconds +peer_gossip_sleep_duration = 100 +peer_query_maj23_sleep_duration = 2000 + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" (default) +# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is tx hash) +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags. Note this may be not +# desirable (see the comment above). IndexTags has a precedence over +# IndexAllTags (i.e. when given both, IndexTags will be indexed). +index_all_tags = false diff --git a/docs/examples/node0/config/genesis.json b/docs/examples/node0/config/genesis.json new file mode 100644 index 000000000..b9c12e31b --- /dev/null +++ b/docs/examples/node0/config/genesis.json @@ -0,0 +1,39 @@ +{ + "genesis_time": "0001-01-01T00:00:00Z", + "chain_id": "test-chain-A2i3OZ", + "validators": [ + { + "pub_key": { + "type": "AC26791624DE60", + "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" + }, + "power": 10, + "name": "" + }, + { + "pub_key": { + "type": "AC26791624DE60", + "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" + }, + "power": 10, + "name": "" + }, + { + "pub_key": { + "type": "AC26791624DE60", + "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" + }, + "power": 10, + "name": "" + }, + { + "pub_key": { + "type": "AC26791624DE60", + "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" + }, + "power": 10, + "name": "" + } + ], + "app_hash": "" +} diff --git a/docs/examples/node0/config/node_key.json b/docs/examples/node0/config/node_key.json new file mode 100644 index 000000000..f4cdd093f --- /dev/null +++ b/docs/examples/node0/config/node_key.json @@ -0,0 +1 @@ +{"priv_key":{"type":"954568A3288910","value":"7lY+k6EDllG8Q9gVbF5313t/ag2YGkBVKdVa0YHJ9xO5k0w3Q/hke0Z7UFT1KgVDGRUEKzwAwwjwFQUvgF0ZWg=="}} \ No newline at end of file diff --git a/docs/examples/node0/config/priv_validator.json b/docs/examples/node0/config/priv_validator.json new file mode 100644 index 000000000..e758b75b8 --- /dev/null +++ b/docs/examples/node0/config/priv_validator.json @@ -0,0 +1,14 @@ +{ + "address": "122A9414774A2FCAD026201DA477EF3F41970EF0", + "pub_key": { + "type": "AC26791624DE60", + "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" + }, + "last_height": 0, + "last_round": 0, + "last_step": 0, + "priv_key": { + "type": "954568A3288910", + "value": "YLxp3ho+kySgAnzjBptbxDzSGw2ntGZLsIHQsaVxY/cP6TgB2Odg9ZsH3CZp3XfsF2mj+QC6U6hNFCsvL9BziQ==" + } +} \ No newline at end of file diff --git a/docs/examples/node1/config.toml b/docs/examples/node1/config.toml deleted file mode 100644 index 10bbf7105..000000000 --- a/docs/examples/node1/config.toml +++ /dev/null @@ -1,15 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -proxy_app = "tcp://127.0.0.1:46658" -moniker = "penguin" -fast_sync = true -db_backend = "leveldb" -log_level = "state:info,*:error" - -[rpc] -laddr = "tcp://0.0.0.0:46657" - -[p2p] -laddr = "tcp://0.0.0.0:46656" -seeds = "" diff --git a/docs/examples/node1/config/config.toml b/docs/examples/node1/config/config.toml new file mode 100644 index 000000000..c298be9ee --- /dev/null +++ b/docs/examples/node1/config/config.toml @@ -0,0 +1,169 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:46658" + +# A custom human readable name for this node +moniker = "bravo" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: leveldb | memdb +db_backend = "leveldb" + +# Database directory +db_path = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,*:error" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_file = "config/priv_validator.json" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:46657" + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:46656" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +# Do not add private peers to this list if you don't want them advertised +persistent_peers = "" + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +addr_book_strict = true + +# Time to wait before flushing messages out on the connection, in ms +flush_throttle_timeout = 100 + +# Maximum number of peers to connect to +max_num_peers = 50 + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 512000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 512000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Authenticated encryption +auth_enc = true + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +##### mempool configuration options ##### +[mempool] + +recheck = true +recheck_empty = true +broadcast = true +wal_dir = "data/mempool.wal" + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +# All timeouts are in milliseconds +timeout_propose = 3000 +timeout_propose_delta = 500 +timeout_prevote = 1000 +timeout_prevote_delta = 500 +timeout_precommit = 1000 +timeout_precommit_delta = 500 +timeout_commit = 1000 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# BlockSize +max_block_size_txs = 10000 +max_block_size_bytes = 1 + +# EmptyBlocks mode and possible interval between empty blocks in seconds +create_empty_blocks = true +create_empty_blocks_interval = 0 + +# Reactor sleep duration parameters are in milliseconds +peer_gossip_sleep_duration = 100 +peer_query_maj23_sleep_duration = 2000 + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" (default) +# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is tx hash) +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags. Note this may be not +# desirable (see the comment above). IndexTags has a precedence over +# IndexAllTags (i.e. when given both, IndexTags will be indexed). +index_all_tags = false diff --git a/docs/examples/node1/config/genesis.json b/docs/examples/node1/config/genesis.json new file mode 100644 index 000000000..b9c12e31b --- /dev/null +++ b/docs/examples/node1/config/genesis.json @@ -0,0 +1,39 @@ +{ + "genesis_time": "0001-01-01T00:00:00Z", + "chain_id": "test-chain-A2i3OZ", + "validators": [ + { + "pub_key": { + "type": "AC26791624DE60", + "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" + }, + "power": 10, + "name": "" + }, + { + "pub_key": { + "type": "AC26791624DE60", + "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" + }, + "power": 10, + "name": "" + }, + { + "pub_key": { + "type": "AC26791624DE60", + "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" + }, + "power": 10, + "name": "" + }, + { + "pub_key": { + "type": "AC26791624DE60", + "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" + }, + "power": 10, + "name": "" + } + ], + "app_hash": "" +} diff --git a/docs/examples/node1/config/node_key.json b/docs/examples/node1/config/node_key.json new file mode 100644 index 000000000..374efe63b --- /dev/null +++ b/docs/examples/node1/config/node_key.json @@ -0,0 +1 @@ +{"priv_key":{"type":"954568A3288910","value":"H71dc/TIG7nTselfa9nG0WRArXLKYnm7P5eFCk2lk8ASKQ3sIHpbdxCSHQD/RcdHe7TiabJeuOssNPvPWiyQEQ=="}} \ No newline at end of file diff --git a/docs/examples/node1/config/priv_validator.json b/docs/examples/node1/config/priv_validator.json new file mode 100644 index 000000000..caf3dbc59 --- /dev/null +++ b/docs/examples/node1/config/priv_validator.json @@ -0,0 +1,14 @@ +{ + "address": "BEA1B57F5806CF9AC4D54C8CF806DED5C0F102E1", + "pub_key": { + "type": "AC26791624DE60", + "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" + }, + "last_height": 0, + "last_round": 0, + "last_step": 0, + "priv_key": { + "type": "954568A3288910", + "value": "o0IqrHSPtd5YqGefodWxpJuRzvuVBjgbH785vbMgk7Vvno3kYJHVp1xVG4Q2N8rD+aubZ2SFPvA1ldX9IOwqxQ==" + } +} \ No newline at end of file diff --git a/docs/examples/node1/genesis.json b/docs/examples/node1/genesis.json deleted file mode 100644 index 78ff6ab3b..000000000 --- a/docs/examples/node1/genesis.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "genesis_time":"0001-01-01T00:00:00Z", - "chain_id":"test-chain-wt7apy", - "validators":[ - { - "pub_key":{ - "type":"ed25519", - "data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30" - }, - "power":10, - "name":"node1" - } - , - { - "pub_key":{ - "type":"ed25519", - "data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F" - }, - "power":10, - "name":"node2" - } - , - { - "pub_key":{ - "type":"ed25519", - "data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A" - }, - "power":10, - "name":"node3" - } - , - { - "pub_key":{ - "type":"ed25519", - "data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07" - }, - "power":10, - "name":"node4" - } - ], - "app_hash":"" -} diff --git a/docs/examples/node1/node_key.json b/docs/examples/node1/node_key.json deleted file mode 100644 index de1c41718..000000000 --- a/docs/examples/node1/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key" : { - "data" : "DA9BAABEA7211A6D93D9A1986B4279EAB3021FAA1653D459D53E6AB4D1CFB4C69BF7D52E48CF00AC5779AA0A6D3C368955D5636A677F72370B8ED19989714CFC", - "type" : "ed25519" - } -} diff --git a/docs/examples/node1/priv_validator.json b/docs/examples/node1/priv_validator.json deleted file mode 100644 index f6c5634a1..000000000 --- a/docs/examples/node1/priv_validator.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "address":"4DC2756029CE0D8F8C6C3E4C3CE6EE8C30AF352F", - "pub_key":{ - "type":"ed25519", - "data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30" - }, - "last_height":0, - "last_round":0, - "last_step":0, - "last_signature":null, - "priv_key":{ - "type":"ed25519", - "data":"4D3648E1D93C8703E436BFF814728B6BD270CFDFD686DF5385E8ACBEB7BE2D7DF08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30" - } -} diff --git a/docs/examples/node2/config.toml b/docs/examples/node2/config.toml deleted file mode 100644 index 10bbf7105..000000000 --- a/docs/examples/node2/config.toml +++ /dev/null @@ -1,15 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -proxy_app = "tcp://127.0.0.1:46658" -moniker = "penguin" -fast_sync = true -db_backend = "leveldb" -log_level = "state:info,*:error" - -[rpc] -laddr = "tcp://0.0.0.0:46657" - -[p2p] -laddr = "tcp://0.0.0.0:46656" -seeds = "" diff --git a/docs/examples/node2/config/config.toml b/docs/examples/node2/config/config.toml new file mode 100644 index 000000000..cedd91b53 --- /dev/null +++ b/docs/examples/node2/config/config.toml @@ -0,0 +1,169 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:46658" + +# A custom human readable name for this node +moniker = "charlie" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: leveldb | memdb +db_backend = "leveldb" + +# Database directory +db_path = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,*:error" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_file = "config/priv_validator.json" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:46657" + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:46656" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +# Do not add private peers to this list if you don't want them advertised +persistent_peers = "" + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +addr_book_strict = true + +# Time to wait before flushing messages out on the connection, in ms +flush_throttle_timeout = 100 + +# Maximum number of peers to connect to +max_num_peers = 50 + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 512000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 512000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Authenticated encryption +auth_enc = true + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +##### mempool configuration options ##### +[mempool] + +recheck = true +recheck_empty = true +broadcast = true +wal_dir = "data/mempool.wal" + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +# All timeouts are in milliseconds +timeout_propose = 3000 +timeout_propose_delta = 500 +timeout_prevote = 1000 +timeout_prevote_delta = 500 +timeout_precommit = 1000 +timeout_precommit_delta = 500 +timeout_commit = 1000 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# BlockSize +max_block_size_txs = 10000 +max_block_size_bytes = 1 + +# EmptyBlocks mode and possible interval between empty blocks in seconds +create_empty_blocks = true +create_empty_blocks_interval = 0 + +# Reactor sleep duration parameters are in milliseconds +peer_gossip_sleep_duration = 100 +peer_query_maj23_sleep_duration = 2000 + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" (default) +# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is tx hash) +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags. Note this may be not +# desirable (see the comment above). IndexTags has a precedence over +# IndexAllTags (i.e. when given both, IndexTags will be indexed). +index_all_tags = false diff --git a/docs/examples/node2/config/genesis.json b/docs/examples/node2/config/genesis.json new file mode 100644 index 000000000..b9c12e31b --- /dev/null +++ b/docs/examples/node2/config/genesis.json @@ -0,0 +1,39 @@ +{ + "genesis_time": "0001-01-01T00:00:00Z", + "chain_id": "test-chain-A2i3OZ", + "validators": [ + { + "pub_key": { + "type": "AC26791624DE60", + "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" + }, + "power": 10, + "name": "" + }, + { + "pub_key": { + "type": "AC26791624DE60", + "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" + }, + "power": 10, + "name": "" + }, + { + "pub_key": { + "type": "AC26791624DE60", + "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" + }, + "power": 10, + "name": "" + }, + { + "pub_key": { + "type": "AC26791624DE60", + "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" + }, + "power": 10, + "name": "" + } + ], + "app_hash": "" +} diff --git a/docs/examples/node2/config/node_key.json b/docs/examples/node2/config/node_key.json new file mode 100644 index 000000000..52a978bb0 --- /dev/null +++ b/docs/examples/node2/config/node_key.json @@ -0,0 +1 @@ +{"priv_key":{"type":"954568A3288910","value":"COHZ/Y2cWGWxJNkRwtpQBt5sYvOnb6Gpz0lO46XERRJFBIdSWD5x1UMGRSTmnvW1ec5G4bMdg6zUZKOZD+vVPg=="}} \ No newline at end of file diff --git a/docs/examples/node2/config/priv_validator.json b/docs/examples/node2/config/priv_validator.json new file mode 100644 index 000000000..65fa30484 --- /dev/null +++ b/docs/examples/node2/config/priv_validator.json @@ -0,0 +1,14 @@ +{ + "address": "F0AA266949FB29ADA0B679C27889ED930BD1BDA1", + "pub_key": { + "type": "AC26791624DE60", + "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" + }, + "last_height": 0, + "last_round": 0, + "last_step": 0, + "priv_key": { + "type": "954568A3288910", + "value": "khADeZ5K/8u/L99DFaZNRq8V5g+EHWbwfqFjhCrppaAiBkOkm8YDRMBqaJwDyKtzL5Ff8GRSWPoNfAzv3XLAhQ==" + } +} \ No newline at end of file diff --git a/docs/examples/node2/genesis.json b/docs/examples/node2/genesis.json deleted file mode 100644 index 78ff6ab3b..000000000 --- a/docs/examples/node2/genesis.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "genesis_time":"0001-01-01T00:00:00Z", - "chain_id":"test-chain-wt7apy", - "validators":[ - { - "pub_key":{ - "type":"ed25519", - "data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30" - }, - "power":10, - "name":"node1" - } - , - { - "pub_key":{ - "type":"ed25519", - "data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F" - }, - "power":10, - "name":"node2" - } - , - { - "pub_key":{ - "type":"ed25519", - "data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A" - }, - "power":10, - "name":"node3" - } - , - { - "pub_key":{ - "type":"ed25519", - "data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07" - }, - "power":10, - "name":"node4" - } - ], - "app_hash":"" -} diff --git a/docs/examples/node2/node_key.json b/docs/examples/node2/node_key.json deleted file mode 100644 index 4e8b0b100..000000000 --- a/docs/examples/node2/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key" : { - "data" : "F7BCABA165DFC0DDD50AE563EFB285BAA236EA805D35612504238A36EFA105958756442B1D9F942D7ABD259F2D59671657B6378E9C7194342A7AAA47A66D1E95", - "type" : "ed25519" - } -} diff --git a/docs/examples/node2/priv_validator.json b/docs/examples/node2/priv_validator.json deleted file mode 100644 index 7733196e6..000000000 --- a/docs/examples/node2/priv_validator.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "address": "DD6C63A762608A9DDD4A845657743777F63121D6", - "pub_key": { - "type": "ed25519", - "data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0, - "last_signature": null, - "priv_key": { - "type": "ed25519", - "data": "7B0DE666FF5E9B437D284BCE767F612381890C018B93B0A105D2E829A568DA6FA8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F" - } -} diff --git a/docs/examples/node3/config.toml b/docs/examples/node3/config.toml deleted file mode 100644 index 10bbf7105..000000000 --- a/docs/examples/node3/config.toml +++ /dev/null @@ -1,15 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -proxy_app = "tcp://127.0.0.1:46658" -moniker = "penguin" -fast_sync = true -db_backend = "leveldb" -log_level = "state:info,*:error" - -[rpc] -laddr = "tcp://0.0.0.0:46657" - -[p2p] -laddr = "tcp://0.0.0.0:46656" -seeds = "" diff --git a/docs/examples/node3/config/config.toml b/docs/examples/node3/config/config.toml new file mode 100644 index 000000000..7e04b0c39 --- /dev/null +++ b/docs/examples/node3/config/config.toml @@ -0,0 +1,169 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:46658" + +# A custom human readable name for this node +moniker = "delta" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: leveldb | memdb +db_backend = "leveldb" + +# Database directory +db_path = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,*:error" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_file = "config/priv_validator.json" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:46657" + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:46656" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +# Do not add private peers to this list if you don't want them advertised +persistent_peers = "" + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +addr_book_strict = true + +# Time to wait before flushing messages out on the connection, in ms +flush_throttle_timeout = 100 + +# Maximum number of peers to connect to +max_num_peers = 50 + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 512000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 512000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Authenticated encryption +auth_enc = true + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +##### mempool configuration options ##### +[mempool] + +recheck = true +recheck_empty = true +broadcast = true +wal_dir = "data/mempool.wal" + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +# All timeouts are in milliseconds +timeout_propose = 3000 +timeout_propose_delta = 500 +timeout_prevote = 1000 +timeout_prevote_delta = 500 +timeout_precommit = 1000 +timeout_precommit_delta = 500 +timeout_commit = 1000 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# BlockSize +max_block_size_txs = 10000 +max_block_size_bytes = 1 + +# EmptyBlocks mode and possible interval between empty blocks in seconds +create_empty_blocks = true +create_empty_blocks_interval = 0 + +# Reactor sleep duration parameters are in milliseconds +peer_gossip_sleep_duration = 100 +peer_query_maj23_sleep_duration = 2000 + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" (default) +# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is tx hash) +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags. Note this may be not +# desirable (see the comment above). IndexTags has a precedence over +# IndexAllTags (i.e. when given both, IndexTags will be indexed). +index_all_tags = false diff --git a/docs/examples/node3/config/genesis.json b/docs/examples/node3/config/genesis.json new file mode 100644 index 000000000..b9c12e31b --- /dev/null +++ b/docs/examples/node3/config/genesis.json @@ -0,0 +1,39 @@ +{ + "genesis_time": "0001-01-01T00:00:00Z", + "chain_id": "test-chain-A2i3OZ", + "validators": [ + { + "pub_key": { + "type": "AC26791624DE60", + "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" + }, + "power": 10, + "name": "" + }, + { + "pub_key": { + "type": "AC26791624DE60", + "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" + }, + "power": 10, + "name": "" + }, + { + "pub_key": { + "type": "AC26791624DE60", + "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" + }, + "power": 10, + "name": "" + }, + { + "pub_key": { + "type": "AC26791624DE60", + "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" + }, + "power": 10, + "name": "" + } + ], + "app_hash": "" +} diff --git a/docs/examples/node3/config/node_key.json b/docs/examples/node3/config/node_key.json new file mode 100644 index 000000000..bde4e0ede --- /dev/null +++ b/docs/examples/node3/config/node_key.json @@ -0,0 +1 @@ +{"priv_key":{"type":"954568A3288910","value":"9Y9xp/tUJJ6pHTF5SUV0bGKYSdVbFtMHu+Lr8S0JBSZAwneaejnfOEU1LMKOnQ07skrDUaJcj5di3jAyjxJzqg=="}} \ No newline at end of file diff --git a/docs/examples/node3/config/priv_validator.json b/docs/examples/node3/config/priv_validator.json new file mode 100644 index 000000000..1d985a00b --- /dev/null +++ b/docs/examples/node3/config/priv_validator.json @@ -0,0 +1,14 @@ +{ + "address": "9A1A6914EB5F4FF0269C7EEEE627C27310CC64F9", + "pub_key": { + "type": "AC26791624DE60", + "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" + }, + "last_height": 0, + "last_round": 0, + "last_step": 0, + "priv_key": { + "type": "954568A3288910", + "value": "jb52LZ5gp+eQ8nJlFK1z06nBMp1gD8ICmyzdM1icGOgoYBl/Fm8hntptt4hDzlTUQIbr4jrYpJ1ofy6VzT46JQ==" + } +} \ No newline at end of file diff --git a/docs/examples/node3/genesis.json b/docs/examples/node3/genesis.json deleted file mode 100644 index 78ff6ab3b..000000000 --- a/docs/examples/node3/genesis.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "genesis_time":"0001-01-01T00:00:00Z", - "chain_id":"test-chain-wt7apy", - "validators":[ - { - "pub_key":{ - "type":"ed25519", - "data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30" - }, - "power":10, - "name":"node1" - } - , - { - "pub_key":{ - "type":"ed25519", - "data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F" - }, - "power":10, - "name":"node2" - } - , - { - "pub_key":{ - "type":"ed25519", - "data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A" - }, - "power":10, - "name":"node3" - } - , - { - "pub_key":{ - "type":"ed25519", - "data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07" - }, - "power":10, - "name":"node4" - } - ], - "app_hash":"" -} diff --git a/docs/examples/node3/node_key.json b/docs/examples/node3/node_key.json deleted file mode 100644 index 32fdeee9d..000000000 --- a/docs/examples/node3/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key" : { - "data" : "95136FCC97E4446B3141EDF9841078107ECE755E99925D79CCBF91085492680B3CA1034D9917DF1DED4E4AB2D9BC225919F6CB2176F210D2368697CC339DF4E7", - "type" : "ed25519" - } -} diff --git a/docs/examples/node3/priv_validator.json b/docs/examples/node3/priv_validator.json deleted file mode 100644 index d570b1279..000000000 --- a/docs/examples/node3/priv_validator.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "address": "6D6A1E313B407B5474106CA8759C976B777AB659", - "pub_key": { - "type": "ed25519", - "data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0, - "last_signature": null, - "priv_key": { - "type": "ed25519", - "data": "622432A370111A5C25CFE121E163FE709C9D5C95F551EDBD7A2C69A8545C9B76E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A" - } -} diff --git a/docs/examples/node4/config.toml b/docs/examples/node4/config.toml deleted file mode 100644 index 10bbf7105..000000000 --- a/docs/examples/node4/config.toml +++ /dev/null @@ -1,15 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -proxy_app = "tcp://127.0.0.1:46658" -moniker = "penguin" -fast_sync = true -db_backend = "leveldb" -log_level = "state:info,*:error" - -[rpc] -laddr = "tcp://0.0.0.0:46657" - -[p2p] -laddr = "tcp://0.0.0.0:46656" -seeds = "" diff --git a/docs/examples/node4/genesis.json b/docs/examples/node4/genesis.json deleted file mode 100644 index 78ff6ab3b..000000000 --- a/docs/examples/node4/genesis.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "genesis_time":"0001-01-01T00:00:00Z", - "chain_id":"test-chain-wt7apy", - "validators":[ - { - "pub_key":{ - "type":"ed25519", - "data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30" - }, - "power":10, - "name":"node1" - } - , - { - "pub_key":{ - "type":"ed25519", - "data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F" - }, - "power":10, - "name":"node2" - } - , - { - "pub_key":{ - "type":"ed25519", - "data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A" - }, - "power":10, - "name":"node3" - } - , - { - "pub_key":{ - "type":"ed25519", - "data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07" - }, - "power":10, - "name":"node4" - } - ], - "app_hash":"" -} diff --git a/docs/examples/node4/node_key.json b/docs/examples/node4/node_key.json deleted file mode 100644 index 955fc989a..000000000 --- a/docs/examples/node4/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key" : { - "data" : "8895D6C9A1B46AB83A8E2BAE2121B8C3E245B9E9126EBD797FEAC5058285F2F64FDE2E8182C88AD5185A49D837C581465D57BD478C41865A66D7D9742D8AEF57", - "type" : "ed25519" - } -} diff --git a/docs/examples/node4/priv_validator.json b/docs/examples/node4/priv_validator.json deleted file mode 100644 index 1ea7831bb..000000000 --- a/docs/examples/node4/priv_validator.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "address": "829A9663611D3DD88A3D84EA0249679D650A0755", - "pub_key": { - "type": "ed25519", - "data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0, - "last_signature": null, - "priv_key": { - "type": "ed25519", - "data": "0A604D1C9AE94A50150BF39E603239092F9392E4773F4D8F4AC1D86E6438E89E2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07" - } -} diff --git a/docs/index.rst b/docs/index.rst index a89adb296..99ca11e73 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -40,10 +40,9 @@ Tendermint Tools :maxdepth: 2 deploy-testnets.rst - tools/ansible.rst + terraform-and-ansible.rst tools/docker.rst tools/mintnet-kubernetes.rst - tools/terraform-digitalocean.rst tools/benchmarking.rst tools/monitoring.rst diff --git a/docs/specification/corruption.rst b/docs/specification/corruption.rst index 33e5ba0b0..6ae19fb18 100644 --- a/docs/specification/corruption.rst +++ b/docs/specification/corruption.rst @@ -38,6 +38,7 @@ Recovering from data corruption can be hard and time-consuming. Here are two app 1) Delete the WAL file and restart Tendermint. It will attempt to sync with other peers. 2) Try to repair the WAL file manually: + 1. Create a backup of the corrupted WAL file: .. code:: bash diff --git a/docs/terraform-and-ansible.rst b/docs/terraform-and-ansible.rst new file mode 100644 index 000000000..f11b67d5f --- /dev/null +++ b/docs/terraform-and-ansible.rst @@ -0,0 +1,138 @@ +Terraform & Ansible +=================== + +Automated deployments are done using `Terraform `__ to create servers on Digital Ocean then +`Ansible `__ to create and manage testnets on those servers. + +Install +------- + +NOTE: see the `integration bash script `__ that can be run on a fresh DO droplet and will automatically spin up a 4 node testnet. The script more or less does everything described below. + +- Install `Terraform `__ and `Ansible `__ on a Linux machine. +- Create a `DigitalOcean API token `__ with read and write capability. +- Install the python dopy package (``pip install dopy``) +- Create SSH keys (``ssh-keygen``) +- Set environment variables: + +:: + + export DO_API_TOKEN="abcdef01234567890abcdef01234567890" + export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub" + +These will be used by both ``terraform`` and ``ansible``. + +Terraform +--------- + +This step will create four Digital Ocean droplets. First, go to the correct directory: + +:: + + cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/terraform + +then: + +:: + + terraform init + terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" + +and you will get a list of IP addresses that belong to your droplets. + +With the droplets created and running, let's setup Ansible. + +Using Ansible +------------- + +The playbooks in `the ansible directory `__ +run ansible roles to configure the sentry node architecture. You must switch to this directory to run ansible (``cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible``). + +There are several roles that are self-explanatory: + +First, we configure our droplets by specifying the paths for tendermint (``BINARY``) and the node files (``CONFIGDIR``). The latter expects any number of directories named ``node0, node1, ...`` and so on (equal to the number of droplets created). For this example, we use pre-created files from `this directory `__. To create your own files, use either the ``tendermint testnet`` command or review `manual deployments <./deploy-testnets.html>`__. + +Here's the command to run: + +:: + + ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples + +Voila! All your droplets now have the ``tendermint`` binary and required configuration files to run a testnet. + +Next, we run the install role: + +:: + + ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml + +which as you'll see below, executes ``tendermint node --proxy_app=kvstore`` on all droplets. Although we'll soon be modifying this role and running it again, this first execution allows us to get each ``node_info.id`` that corresponds to each ``node_info.listen_addr``. (This part will be automated in the future). In your browser (or using ``curl``), for every droplet, go to IP:46657/status and note the two just mentioned ``node_info`` fields. Notice that blocks aren't being created (``latest_block_height`` should be zero and not increasing). + +Next, open ``roles/install/templates/systemd.service.j2`` and look for the line ``ExecStart`` which should look something like: + +:: + + ExecStart=/usr/bin/tendermint node --proxy_app=kvstore + +and add the ``--p2p.persistent_peers`` flag with the relevant information for each node. The resulting file should look something like: + +:: + + [Unit] + Description={{service}} + Requires=network-online.target + After=network-online.target + + [Service] + Restart=on-failure + User={{service}} + Group={{service}} + PermissionsStartOnly=true + ExecStart=/usr/bin/tendermint node --proxy_app=kvstore --p2p.persistent_peers=167b80242c300bf0ccfb3ced3dec60dc2a81776e@165.227.41.206:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@165.227.43.146:46656,303a1a4312c30525c99ba66522dd81cca56a361a@159.89.115.32:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@159.89.119.125:46656 + ExecReload=/bin/kill -HUP $MAINPID + KillSignal=SIGTERM + + [Install] + WantedBy=multi-user.target + +Then, stop the nodes: + +:: + + ansible-playbook -i inventory/digital_ocean.py -l sentrynet stop.yml + +Finally, we run the install role again: + +:: + + ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml + +to re-run ``tendermint node`` with the new flag, on all droplets. The ``latest_block_hash`` should now be changing and ``latest_block_height`` increasing. Your testnet is now up and running :) + +Peek at the logs with the status role: + +:: + + ansible-playbook -i inventory/digital_ocean.py -l sentrynet status.yml + +Logging +------- + +The crudest way is the status role described above. You can also ship logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) service provider. You can set up your nodes to log there automatically. Create an account and get your API key from the notes on `this page `__, then: + +:: + + yum install systemd-devel || echo "This will only work on RHEL-based systems." + apt-get install libsystemd-dev || echo "This will only work on Debian-based systems." + + go get github.com/mheese/journalbeat + ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 + +Cleanup +------- + +To remove your droplets, run: + +:: + + terraform destroy -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" diff --git a/networks/remote/ansible/README.rst b/networks/remote/ansible/README.rst deleted file mode 100644 index 713e124e9..000000000 --- a/networks/remote/ansible/README.rst +++ /dev/null @@ -1,52 +0,0 @@ -Using Ansible -============= - -.. figure:: assets/a_plus_t.png - :alt: Ansible plus Tendermint - - Ansible plus Tendermint - -The playbooks in `the ansible directory `__ -run ansible `roles `__ to configure the sentry node architecture. - -Prerequisites -------------- - -- Install `Ansible 2.0 or higher `__ on a linux machine. -- Create a `DigitalOcean API token `__ with read and write capability. -- Create SSH keys -- Install the python dopy package (for the digital_ocean.py script) - -Build ------ - -:: - - export DO_API_TOKEN="abcdef01234567890abcdef01234567890" - export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub" - - - ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml - - # The scripts assume that you have your validator set up already. - # You can create the folder structure for the sentry nodes using `tendermint testnet`. - # For example: tendermint testnet --v 0 --n 4 --o build/ - # Then copy your genesis.json and modify the config.toml as you see fit. - - # Reconfig the sentry nodes with a new BINARY and the configuration files from the build folder: - ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=`pwd`/build/tendermint -e CONFIGDIR=`pwd`/build - -Shipping logs to logz.io ------------------------- - -Logz.io is an Elastic stack (Elastic search, Logstash and Kibana) service provider. You can set up your nodes to log there automatically. Create an account and get your API key from the notes on `this page `__. - -:: - - yum install systemd-devel || echo "This will only work on RHEL-based systems." - apt-get install libsystemd-dev || echo "This will only work on Debian-based systems." - - go get github.com/mheese/journalbeat - ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 - - diff --git a/networks/remote/ansible/roles/install/templates/systemd.service.j2 b/networks/remote/ansible/roles/install/templates/systemd.service.j2 index 34ba3ecf5..17b3de4d1 100644 --- a/networks/remote/ansible/roles/install/templates/systemd.service.j2 +++ b/networks/remote/ansible/roles/install/templates/systemd.service.j2 @@ -8,7 +8,7 @@ Restart=on-failure User={{service}} Group={{service}} PermissionsStartOnly=true -ExecStart=/usr/bin/tendermint node --proxy_app=dummy +ExecStart=/usr/bin/tendermint node --proxy_app=kvstore ExecReload=/bin/kill -HUP $MAINPID KillSignal=SIGTERM diff --git a/networks/remote/integration.sh b/networks/remote/integration.sh new file mode 100644 index 000000000..c576e807b --- /dev/null +++ b/networks/remote/integration.sh @@ -0,0 +1,132 @@ +#!/usr/bin/env bash + +# XXX: this script is intended to be run from a fresh Digital Ocean droplet + +# NOTE: you must set this manually now +echo "export DO_API_TOKEN=\"yourToken\"" >> ~/.profile + +sudo apt-get update -y +sudo apt-get upgrade -y +sudo apt-get install -y jq unzip python-pip software-properties-common make + +# get and unpack golang +curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz +tar -xvf go1.10.linux-amd64.tar.gz + +## move binary and add to path +mv go /usr/local +echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile + +## create the goApps directory, set GOPATH, and put it on PATH +mkdir goApps +echo "export GOPATH=/root/goApps" >> ~/.profile +echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile + +source ~/.profile + +## get the code and move into repo +REPO=github.com/tendermint/tendermint +go get $REPO +cd $GOPATH/src/$REPO + +## build +git checkout zach/ansible +make get_tools +make get_vendor_deps +make build + +# generate an ssh key +ssh-keygen -f $HOME/.ssh/id_rsa -t rsa -N '' +echo "export SSH_KEY_FILE=\"\$HOME/.ssh/id_rsa.pub\"" >> ~/.profile +source ~/.profile + +# install terraform +wget https://releases.hashicorp.com/terraform/0.11.7/terraform_0.11.7_linux_amd64.zip +unzip terraform_0.11.7_linux_amd64.zip -d /usr/bin/ + +# install ansible +sudo apt-get update -y +sudo apt-add-repository ppa:ansible/ansible -y +sudo apt-get update -y +sudo apt-get install ansible -y + +# required by ansible +pip install dopy + +# the next two commands are directory sensitive +cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/terraform + +terraform init +terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" -auto-approve + +# let the droplets boot +sleep 60 + +# get the IPs +ip0=`terraform output -json public_ips | jq '.value[0]'` +ip1=`terraform output -json public_ips | jq '.value[1]'` +ip2=`terraform output -json public_ips | jq '.value[2]'` +ip3=`terraform output -json public_ips | jq '.value[3]'` + +# to remove quotes +strip() { + opt=$1 + temp="${opt%\"}" + temp="${temp#\"}" + echo $temp +} + +ip0=$(strip $ip0) +ip1=$(strip $ip1) +ip2=$(strip $ip2) +ip3=$(strip $ip3) + +# all the ansible commands are also directory specific +cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible + +ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml +ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples + +sleep 10 + +# get each nodes ID then populate the ansible file +id0=`curl $ip0:46657/status | jq .result.node_info.id` +id1=`curl $ip1:46657/status | jq .result.node_info.id` +id2=`curl $ip2:46657/status | jq .result.node_info.id` +id3=`curl $ip3:46657/status | jq .result.node_info.id` + +id0=$(strip $id0) +id1=$(strip $id1) +id2=$(strip $id2) +id3=$(strip $id3) + +# remove file we'll re-write to with new info +old_ansible_file=$GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible/roles/install/templates/systemd.service.j2 +rm $old_ansible_file + +# need to populate the `--p2p.persistent_peers` flag +echo "[Unit] +Description={{service}} +Requires=network-online.target +After=network-online.target + +[Service] +Restart=on-failure +User={{service}} +Group={{service}} +PermissionsStartOnly=true +ExecStart=/usr/bin/tendermint node --proxy_app=kvstore --p2p.persistent_peers=$id0@$ip0:46656,$id1@$ip1:46656,$id2@$ip2:46656,$id3@$ip3:46656 +ExecReload=/bin/kill -HUP \$MAINPID +KillSignal=SIGTERM + +[Install] +WantedBy=multi-user.target +" >> $old_ansible_file + +# now, we can re-run the install command +ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml + +# and finally restart it all +ansible-playbook -i inventory/digital_ocean.py -l sentrynet restart.yml + +echo "congratulations, your testnet is now running :)" diff --git a/networks/remote/terraform/README.rst b/networks/remote/terraform/README.rst deleted file mode 100644 index 243d1c148..000000000 --- a/networks/remote/terraform/README.rst +++ /dev/null @@ -1,33 +0,0 @@ -Using Terraform -=============== - -This is a `Terraform `__ configuration that sets up DigitalOcean droplets. - -Prerequisites -------------- - -- Install `HashiCorp Terraform `__ on a linux machine. -- Create a `DigitalOcean API token `__ with read and write capability. -- Create SSH keys - -Build ------ - -:: - - export DO_API_TOKEN="abcdef01234567890abcdef01234567890" - export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub" - - terraform init - terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" - -At the end you will get a list of IP addresses that belongs to your new droplets. - -Destroy -------- - -Run the below: - -:: - - terraform destroy diff --git a/networks/remote/terraform/cluster/variables.tf b/networks/remote/terraform/cluster/variables.tf index 3aa837a27..1b6a70072 100644 --- a/networks/remote/terraform/cluster/variables.tf +++ b/networks/remote/terraform/cluster/variables.tf @@ -5,7 +5,7 @@ variable "name" { variable "regions" { description = "Regions to launch in" type = "list" - default = ["AMS2", "FRA1", "LON1", "NYC3", "SFO2", "SGP1", "TOR1"] + default = ["AMS3", "FRA1", "LON1", "NYC3", "SFO2", "SGP1", "TOR1"] } variable "ssh_key" { From e5220360c5305164fa24e6e7ee9d812f3e1582eb Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 17 May 2018 13:17:50 -0400 Subject: [PATCH 14/18] AddPart always verifies --- consensus/state.go | 6 +++--- types/part_set.go | 8 +++----- types/part_set_test.go | 6 +++--- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index 7592269bf..191993356 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -584,7 +584,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { err = cs.setProposal(msg.Proposal) case *BlockPartMessage: // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit - _, err = cs.addProposalBlockPart(msg.Height, msg.Part, peerID != "") + _, err = cs.addProposalBlockPart(msg.Height, msg.Part) if err != nil && msg.Round != cs.Round { err = nil } @@ -1298,7 +1298,7 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error { // NOTE: block is not necessarily valid. // Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block. -func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, verify bool) (added bool, err error) { +func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (added bool, err error) { // Blocks might be reused, so round mismatch is OK if cs.Height != height { return false, nil @@ -1309,7 +1309,7 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, v return false, nil // TODO: bad peer? Return error? } - added, err = cs.ProposalBlockParts.AddPart(part, verify) + added, err = cs.ProposalBlockParts.AddPart(part) if err != nil { return added, err } diff --git a/types/part_set.go b/types/part_set.go index cad3a03fe..18cfe802c 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -176,7 +176,7 @@ func (ps *PartSet) Total() int { return ps.total } -func (ps *PartSet) AddPart(part *Part, verify bool) (bool, error) { +func (ps *PartSet) AddPart(part *Part) (bool, error) { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -191,10 +191,8 @@ func (ps *PartSet) AddPart(part *Part, verify bool) (bool, error) { } // Check hash proof - if verify { - if !part.Proof.Verify(part.Index, ps.total, part.Hash(), ps.Hash()) { - return false, ErrPartSetInvalidProof - } + if !part.Proof.Verify(part.Index, ps.total, part.Hash(), ps.Hash()) { + return false, ErrPartSetInvalidProof } // Add part diff --git a/types/part_set_test.go b/types/part_set_test.go index 76b538c1f..545b4d42b 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -34,7 +34,7 @@ func TestBasicPartSet(t *testing.T) { for i := 0; i < partSet.Total(); i++ { part := partSet.GetPart(i) //t.Logf("\n%v", part) - added, err := partSet2.AddPart(part, true) + added, err := partSet2.AddPart(part) if !added || err != nil { t.Errorf("Failed to add part %v, error: %v", i, err) } @@ -74,7 +74,7 @@ func TestWrongProof(t *testing.T) { // Test adding a part with wrong trail. part := partSet.GetPart(0) part.Proof.Aunts[0][0] += byte(0x01) - added, err := partSet2.AddPart(part, true) + added, err := partSet2.AddPart(part) if added || err == nil { t.Errorf("Expected to fail adding a part with bad trail.") } @@ -82,7 +82,7 @@ func TestWrongProof(t *testing.T) { // Test adding a part with wrong bytes. part = partSet.GetPart(1) part.Bytes[0] += byte(0x01) - added, err = partSet2.AddPart(part, true) + added, err = partSet2.AddPart(part) if added || err == nil { t.Errorf("Expected to fail adding a part with bad bytes.") } From 8e46df14e7d37d7e176e90e4d3f3c51f88b7af6c Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 17 May 2018 13:59:41 -0400 Subject: [PATCH 15/18] improve consensus logger --- consensus/state.go | 77 ++++++++++++++++++++++++++++++---------------- 1 file changed, 50 insertions(+), 27 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index 191993356..d84d6d67c 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -586,6 +586,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit _, err = cs.addProposalBlockPart(msg.Height, msg.Part) if err != nil && msg.Round != cs.Round { + cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round) err = nil } case *VoteMessage: @@ -610,7 +611,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg)) } if err != nil { - cs.Logger.Error("Error with msg", "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg) + cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg) } } @@ -667,16 +668,18 @@ func (cs *ConsensusState) handleTxsAvailable(height int64) { // Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) // NOTE: cs.StartTime was already set for height. func (cs *ConsensusState) enterNewRound(height int64, round int) { + logger := cs.Logger.With("height", height, "round", round) + if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { - cs.Logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } if now := time.Now(); cs.StartTime.After(now) { - cs.Logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now) + logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now) } - cs.Logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) // Increment validators if necessary validators := cs.Validators @@ -695,6 +698,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) { // and meanwhile we might have received a proposal // for round 0. } else { + logger.Info("Resetting Proposal info") cs.Proposal = nil cs.ProposalBlock = nil cs.ProposalBlockParts = nil @@ -757,11 +761,13 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) { // Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval // Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool func (cs *ConsensusState) enterPropose(height int64, round int) { + logger := cs.Logger.With("height", height, "round", round) + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { - cs.Logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } - cs.Logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPropose: @@ -781,22 +787,22 @@ func (cs *ConsensusState) enterPropose(height int64, round int) { // Nothing more to do if we're not a validator if cs.privValidator == nil { - cs.Logger.Debug("This node is not a validator") + logger.Debug("This node is not a validator") return } // if not a validator, we're done if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) { - cs.Logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators) + logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators) return } - cs.Logger.Debug("This node is a validator") + logger.Debug("This node is a validator") if cs.isProposer() { - cs.Logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator) + logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator) cs.decideProposal(height, round) } else { - cs.Logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator) + logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator) } } @@ -959,14 +965,16 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { // Enter: any +2/3 prevotes at next round. func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { + logger := cs.Logger.With("height", height, "round", round) + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { - cs.Logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) } - cs.Logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPrevoteWait: @@ -985,12 +993,14 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { // else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil, // else, precommit nil otherwise. func (cs *ConsensusState) enterPrecommit(height int64, round int) { + logger := cs.Logger.With("height", height, "round", round) + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { - cs.Logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } - cs.Logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPrecommit: @@ -1004,9 +1014,9 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { // If we don't have a polka, we must precommit nil. if !ok { if cs.LockedBlock != nil { - cs.Logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil") + logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil") } else { - cs.Logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.") + logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.") } cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) return @@ -1024,9 +1034,9 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { // +2/3 prevoted nil. Unlock and precommit nil. if len(blockID.Hash) == 0 { if cs.LockedBlock == nil { - cs.Logger.Info("enterPrecommit: +2/3 prevoted for nil.") + logger.Info("enterPrecommit: +2/3 prevoted for nil.") } else { - cs.Logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking") + logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking") cs.LockedRound = 0 cs.LockedBlock = nil cs.LockedBlockParts = nil @@ -1040,7 +1050,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { // If we're already locked on that block, precommit it, and update the LockedRound if cs.LockedBlock.HashesTo(blockID.Hash) { - cs.Logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking") + logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking") cs.LockedRound = round cs.eventBus.PublishEventRelock(cs.RoundStateEvent()) cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader) @@ -1049,7 +1059,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { // If +2/3 prevoted for proposal block, stage and precommit it if cs.ProposalBlock.HashesTo(blockID.Hash) { - cs.Logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash) + logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash) // Validate the block. if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) @@ -1079,14 +1089,16 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { // Enter: any +2/3 precommits for next round. func (cs *ConsensusState) enterPrecommitWait(height int64, round int) { + logger := cs.Logger.With("height", height, "round", round) + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) { - cs.Logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } if !cs.Votes.Precommits(round).HasTwoThirdsAny() { cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) } - cs.Logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPrecommitWait: @@ -1101,11 +1113,13 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) { // Enter: +2/3 precommits for block func (cs *ConsensusState) enterCommit(height int64, commitRound int) { + logger := cs.Logger.With("height", height, "round", round) + if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { - cs.Logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) + logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) return } - cs.Logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) + logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterCommit: @@ -1128,6 +1142,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { // Move them over to ProposalBlock if they match the commit hash, // otherwise they'll be cleared in updateToState. if cs.LockedBlock.HashesTo(blockID.Hash) { + l.Info("Commit is for locked block. Set ProposalBlock=LockedBlock", "blockHash", blockID.Hash) cs.ProposalBlock = cs.LockedBlock cs.ProposalBlockParts = cs.LockedBlockParts } @@ -1135,6 +1150,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { // If we don't have the block being committed, set up to get it. if !cs.ProposalBlock.HashesTo(blockID.Hash) { if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { + l.Info("Commit is for a block we don't know about. Set ProposalBlock=nil", "proposal", cs.ProposalBlock.Hash(), "commit", blockID.Hash) // We're getting the wrong block. // Set up ProposalBlockParts and keep waiting. cs.ProposalBlock = nil @@ -1147,19 +1163,21 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { // If we have the block AND +2/3 commits for it, finalize. func (cs *ConsensusState) tryFinalizeCommit(height int64) { + logger := cs.Logger.With("height", height) + if cs.Height != height { cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) } blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() if !ok || len(blockID.Hash) == 0 { - cs.Logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for .", "height", height) + logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for .") return } if !cs.ProposalBlock.HashesTo(blockID.Hash) { // TODO: this happens every time if we're not a validator (ugly logs) // TODO: ^^ wait, why does it matter that we're a validator? - cs.Logger.Info("Attempt to finalize failed. We don't have the commit block.", "height", height, "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash) + logger.Info("Attempt to finalize failed. We don't have the commit block.", "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash) return } @@ -1301,11 +1319,13 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error { func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (added bool, err error) { // Blocks might be reused, so round mismatch is OK if cs.Height != height { + cs.Logger.Debug("Received block part from wrong height", "height", height) return false, nil } // We're not expecting a block part. if cs.ProposalBlockParts == nil { + cs.Logger.Info("Received a block part when we're not expecting any", "height", height) return false, nil // TODO: bad peer? Return error? } @@ -1327,6 +1347,8 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) ( blockID, hasTwoThirds := prevotes.TwoThirdsMajority() if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) { if cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.Logger.Info("Updating valid block to new proposal block", + "valid-round", cs.Round, "valid-block-hash", cs.ProposalBlock.Hash()) cs.ValidRound = cs.Round cs.ValidBlock = cs.ProposalBlock cs.ValidBlockParts = cs.ProposalBlockParts @@ -1458,6 +1480,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, (vote.Round <= cs.Round) && cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.Logger.Info("Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round) cs.ValidRound = vote.Round cs.ValidBlock = cs.ProposalBlock cs.ValidBlockParts = cs.ProposalBlockParts From 547e8223b98229548fc9f3887ddf9803ebba87c4 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 17 May 2018 14:06:58 -0400 Subject: [PATCH 16/18] fix --- blockchain/store_test.go | 2 +- consensus/replay_test.go | 2 +- consensus/state.go | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/blockchain/store_test.go b/blockchain/store_test.go index a0d53e0cf..a1bd0fd5d 100644 --- a/blockchain/store_test.go +++ b/blockchain/store_test.go @@ -97,7 +97,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2}) uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0}) - uncontiguousPartSet.AddPart(part2, false) + uncontiguousPartSet.AddPart(part2) header1 := types.Header{ Height: 1, diff --git a/consensus/replay_test.go b/consensus/replay_test.go index c706cef0c..ff0eee1ce 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -538,7 +538,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { case *types.PartSetHeader: thisBlockParts = types.NewPartSetFromHeader(*p) case *types.Part: - _, err := thisBlockParts.AddPart(p, false) + _, err := thisBlockParts.AddPart(p) if err != nil { return nil, nil, err } diff --git a/consensus/state.go b/consensus/state.go index d84d6d67c..e4477a9b4 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1113,7 +1113,7 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) { // Enter: +2/3 precommits for block func (cs *ConsensusState) enterCommit(height int64, commitRound int) { - logger := cs.Logger.With("height", height, "round", round) + logger := cs.Logger.With("height", height, "commitRound", commitRound) if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) @@ -1142,7 +1142,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { // Move them over to ProposalBlock if they match the commit hash, // otherwise they'll be cleared in updateToState. if cs.LockedBlock.HashesTo(blockID.Hash) { - l.Info("Commit is for locked block. Set ProposalBlock=LockedBlock", "blockHash", blockID.Hash) + logger.Info("Commit is for locked block. Set ProposalBlock=LockedBlock", "blockHash", blockID.Hash) cs.ProposalBlock = cs.LockedBlock cs.ProposalBlockParts = cs.LockedBlockParts } @@ -1150,7 +1150,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { // If we don't have the block being committed, set up to get it. if !cs.ProposalBlock.HashesTo(blockID.Hash) { if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { - l.Info("Commit is for a block we don't know about. Set ProposalBlock=nil", "proposal", cs.ProposalBlock.Hash(), "commit", blockID.Hash) + logger.Info("Commit is for a block we don't know about. Set ProposalBlock=nil", "proposal", cs.ProposalBlock.Hash(), "commit", blockID.Hash) // We're getting the wrong block. // Set up ProposalBlockParts and keep waiting. cs.ProposalBlock = nil From b4d6bf7697be11f1105e4f83a98fe3d1adf3409b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 17 May 2018 15:39:49 -0400 Subject: [PATCH 17/18] add back new-spec/README to tell people it moved --- docs/specification/new-spec/README.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 docs/specification/new-spec/README.md diff --git a/docs/specification/new-spec/README.md b/docs/specification/new-spec/README.md new file mode 100644 index 000000000..20e8e89d7 --- /dev/null +++ b/docs/specification/new-spec/README.md @@ -0,0 +1 @@ +Spec moved to [docs/spec](./docs/spec). From 19ccd1842fe5efffcc2ff32e6cfc127ca0cd1f9b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 17 May 2018 15:47:07 -0400 Subject: [PATCH 18/18] version and changelog --- CHANGELOG.md | 13 ++++++++++++- version/version.go | 2 +- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c1b47b08..cd3193235 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,17 @@ # Changelog -## 0.19.4 (TBD) +## 0.19.4 (May 17th, 2018) + +IMPROVEMENTS + +- [state] Improve tx indexing by using batches +- [consensus, state] Improve logging (more consensus logs, fewer tx logs) +- [spec] Moved to `docs/spec` (TODO cleanup the rest of the docs ...) + + +BUG FIXES + +- [consensus] Fix issue #1575 where a late proposer can get stuck ## 0.19.3 (May 14th, 2018) diff --git a/version/version.go b/version/version.go index 67d46ea33..6bf9bdd40 100644 --- a/version/version.go +++ b/version/version.go @@ -10,7 +10,7 @@ const ( var ( // Version is the current version of Tendermint // Must be a string because scripts like dist.sh read this file. - Version = "0.19.4-dev" + Version = "0.19.4" // GitCommit is the current HEAD set using ldflags. GitCommit string