Browse Source

fewer structs. remove viper from consensus

pull/484/head
Ethan Buchman 8 years ago
parent
commit
75b6c5215f
17 changed files with 372 additions and 300 deletions
  1. +1
    -1
      blockchain/reactor.go
  2. +5
    -5
      cmd/tendermint/commands/init.go
  3. +7
    -17
      cmd/tendermint/commands/replay.go
  4. +4
    -2
      cmd/tendermint/commands/reset_priv_validator.go
  5. +26
    -7
      cmd/tendermint/commands/root.go
  6. +44
    -38
      cmd/tendermint/commands/run_node.go
  7. +2
    -2
      cmd/tendermint/commands/show_validator.go
  8. +73
    -0
      config/config.go
  9. +0
    -89
      config/tendermint/types.go
  10. +1
    -0
      consensus/common_test.go
  11. +10
    -14
      consensus/replay_file.go
  12. +105
    -60
      consensus/state.go
  13. +2
    -2
      mempool/mempool.go
  14. +15
    -6
      mempool/reactor.go
  15. +58
    -55
      node/node.go
  16. +17
    -0
      p2p/config.go
  17. +2
    -2
      types/block.go

+ 1
- 1
blockchain/reactor.go View File

@ -222,7 +222,7 @@ FOR_LOOP:
// We need both to sync the first block.
break SYNC_LOOP
}
firstParts := first.MakePartSet(types.DefaultPartSetSize)
firstParts := first.MakePartSet(types.DefaultBlockPartSize)
firstPartsHeader := firstParts.Header()
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling


+ 5
- 5
cmd/tendermint/commands/init.go View File

@ -5,8 +5,8 @@ import (
"github.com/spf13/cobra"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
)
var initFilesCmd = &cobra.Command{
@ -20,13 +20,13 @@ func init() {
}
func initFiles(cmd *cobra.Command, args []string) {
privValFile := config.GetString("priv_validator_file")
privValFile := config.PrivValidatorFile
if _, err := os.Stat(privValFile); os.IsNotExist(err) {
privValidator := types.GenPrivValidator()
privValidator.SetFile(privValFile)
privValidator.Save()
genFile := config.GetString("genesis_file")
genFile := config.GenesisFile
if _, err := os.Stat(genFile); os.IsNotExist(err) {
genDoc := types.GenesisDoc{
@ -40,8 +40,8 @@ func initFiles(cmd *cobra.Command, args []string) {
genDoc.SaveAs(genFile)
}
log.Notice("Initialized tendermint", "genesis", config.GetString("genesis_file"), "priv_validator", config.GetString("priv_validator_file"))
log.Notice("Initialized tendermint", "genesis", config.GenesisFile, "priv_validator", config.PrivValidatorFile)
} else {
log.Notice("Already initialized", "priv_validator", config.GetString("priv_validator_file"))
log.Notice("Already initialized", "priv_validator", config.PrivValidatorFile)
}
}

+ 7
- 17
cmd/tendermint/commands/replay.go View File

@ -1,36 +1,26 @@
package commands
import (
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/consensus"
"github.com/spf13/cobra"
)
var replayCmd = &cobra.Command{
Use: "replay [walfile]",
Use: "replay",
Short: "Replay messages from WAL",
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 1 {
consensus.RunReplayFile(config, args[1], false)
} else {
fmt.Println("replay requires an argument (walfile)")
}
config := getConfig()
consensus.RunReplayFile(config.Config, config.Consensus, false)
},
}
var replayConsoleCmd = &cobra.Command{
Use: "replay_console [walfile]",
Use: "replay_console",
Short: "Replay messages from WAL in a console",
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 1 {
consensus.RunReplayFile(config, args[1], true)
} else {
fmt.Println("replay_console requires an argument (walfile)")
}
config := getConfig()
consensus.RunReplayFile(config.Config, config.Consensus, true)
},
}


+ 4
- 2
cmd/tendermint/commands/reset_priv_validator.go View File

@ -29,13 +29,15 @@ func init() {
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetAll(cmd *cobra.Command, args []string) {
ResetAll(config.GetString("db_dir"), config.GetString("priv_validator_file"), log)
config := getConfig()
ResetAll(config.DBDir, config.PrivValidatorFile, log)
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) {
resetPrivValidatorLocal(config.GetString("priv_validator_file"), log)
config := getConfig()
resetPrivValidatorLocal(config.PrivValidatorFile, log)
}
// Exported so other CLI tools can use it


+ 26
- 7
cmd/tendermint/commands/root.go View File

@ -2,16 +2,34 @@ package commands
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
tmcfg "github.com/tendermint/tendermint/config/tendermint"
"github.com/tendermint/tendermint/node"
"github.com/tendermint/tmlibs/logger"
)
var (
config = tmcfg.GetConfig("")
log = logger.New("module", "main")
viperConfig *viper.Viper
config *node.Config
log = logger.New("module", "main")
)
func init() {
// Set config to be used as defaults by flags.
// This will be overwritten by whatever is unmarshalled from viper
config = node.NewDefaultConfig("")
}
// unmarshal viper into the Tendermint config
func getConfig() *node.Config {
config := new(node.Config)
if err := viperConfig.Unmarshal(config); err != nil {
panic(err)
}
return config
}
//global flag
var logLevel string
@ -19,13 +37,14 @@ var RootCmd = &cobra.Command{
Use: "tendermint",
Short: "Tendermint Core (BFT Consensus) in Go",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
// set the log level in the config and logger
config.Set("node.log_level", logLevel)
logger.SetLogLevel(logLevel)
// set the log level
config := getConfig()
logger.SetLogLevel(config.LogLevel)
},
}
func init() {
//parse flag and set config
RootCmd.PersistentFlags().StringVar(&logLevel, "log_level", config.GetString("node.log_level"), "Log level")
RootCmd.PersistentFlags().StringVar(&logLevel, "log_level", config.LogLevel, "Log level")
viperConfig.BindPFlag("log_level", RootCmd.Flags().Lookup("log_level"))
}

+ 44
- 38
cmd/tendermint/commands/run_node.go View File

@ -14,10 +14,9 @@ import (
)
var runNodeCmd = &cobra.Command{
Use: "node",
Short: "Run the tendermint node",
PreRun: setConfigFlags,
RunE: runNode,
Use: "node",
Short: "Run the tendermint node",
RunE: runNode,
}
//flags
@ -35,49 +34,55 @@ var (
)
func init() {
// bind flags
// configuration options
runNodeCmd.Flags().StringVar(&moniker, "moniker", config.GetString("node.moniker"),
// node flags
runNodeCmd.Flags().StringVar(&moniker, "moniker", config.Moniker,
"Node Name")
runNodeCmd.Flags().StringVar(&nodeLaddr, "node_laddr", config.GetString("node.listen_addr"),
"Node listen address. (0.0.0.0:0 means any interface, any port)")
runNodeCmd.Flags().StringVar(&seeds, "seeds", config.GetString("network.seeds"),
"Comma delimited host:port seed nodes")
runNodeCmd.Flags().BoolVar(&fastSync, "fast_sync", config.GetBool("blockchain.fast_sync"),
viperConfig.BindPFlag("moniker", runNodeCmd.Flags().Lookup("moniker"))
runNodeCmd.Flags().BoolVar(&fastSync, "fast_sync", config.FastSync,
"Fast blockchain syncing")
runNodeCmd.Flags().BoolVar(&skipUPNP, "skip_upnp", config.GetBool("network.skip_upnp"),
"Skip UPNP configuration")
runNodeCmd.Flags().StringVar(&rpcLaddr, "rpc_laddr", config.GetString("rpc.listen_addr"),
"RPC listen address. Port required")
runNodeCmd.Flags().StringVar(&grpcLaddr, "grpc_laddr", config.GetString("grpc.listen_addr"),
"GRPC listen address (BroadcastTx only). Port required")
runNodeCmd.Flags().StringVar(&proxyApp, "proxy_app", config.GetString("abci.proxy_app"),
viperConfig.BindPFlag("fast_sync", runNodeCmd.Flags().Lookup("fast_sync"))
// abci flags
runNodeCmd.Flags().StringVar(&proxyApp, "proxy_app", config.ProxyApp,
"Proxy app address, or 'nilapp' or 'dummy' for local testing.")
runNodeCmd.Flags().StringVar(&abciTransport, "abci", config.GetString("abci.mode"),
viperConfig.BindPFlag("proxy_app", runNodeCmd.Flags().Lookup("proxy_app"))
runNodeCmd.Flags().StringVar(&abciTransport, "abci", config.ABCI,
"Specify abci transport (socket | grpc)")
viperConfig.BindPFlag("abci", runNodeCmd.Flags().Lookup("abci"))
// rpc flags
runNodeCmd.Flags().StringVar(&rpcLaddr, "rpc_laddr", config.RPCListenAddress,
"RPC listen address. Port required")
viperConfig.BindPFlag("rpc_laddr", runNodeCmd.Flags().Lookup("rpc_laddr"))
runNodeCmd.Flags().StringVar(&grpcLaddr, "grpc_laddr", config.GRPCListenAddress,
"GRPC listen address (BroadcastTx only). Port required")
viperConfig.BindPFlag("grpc_laddr", runNodeCmd.Flags().Lookup("grpc_laddr"))
// p2p flags
runNodeCmd.Flags().StringVar(&nodeLaddr, "p2p.laddr", config.P2P.ListenAddress,
"Node listen address. (0.0.0.0:0 means any interface, any port)")
viperConfig.BindPFlag("p2p.laddr", runNodeCmd.Flags().Lookup("p2p.laddr"))
runNodeCmd.Flags().StringVar(&seeds, "p2p.seeds", config.P2P.Seeds,
"Comma delimited host:port seed nodes")
viperConfig.BindPFlag("p2p.seeds", runNodeCmd.Flags().Lookup("p2p.seeds"))
runNodeCmd.Flags().BoolVar(&skipUPNP, "p2p.skip_upnp", config.P2P.SkipUPNP,
"Skip UPNP configuration")
viperConfig.BindPFlag("p2p.skip_upnp", runNodeCmd.Flags().Lookup("p2p.skip_upnp"))
// feature flags
runNodeCmd.Flags().BoolVar(&pex, "pex", config.GetBool("pex_reactor"),
runNodeCmd.Flags().BoolVar(&pex, "p2p.pex", config.P2P.PexReactor,
"Enable Peer-Exchange (dev feature)")
RootCmd.AddCommand(runNodeCmd)
}
func setConfigFlags(cmd *cobra.Command, args []string) {
// Merge parsed flag values onto config
config.Set("node.moniker", moniker)
config.Set("node.listen_addr", nodeLaddr)
config.Set("network.seeds", seeds)
config.Set("network.skip_upnp", skipUPNP)
config.Set("network.pex_reactor", pex)
config.Set("blockchain.fast_sync", fastSync)
config.Set("rpc.listen_addr", rpcLaddr)
config.Set("rpc.grpc_listen_addr", grpcLaddr)
config.Set("abci.proxy_app", proxyApp)
config.Set("abci.mode", abciTransport)
}
// Users wishing to:
// * Use an external signer for their validators
// * Supply an in-proc abci app
@ -90,7 +95,7 @@ func runNode(cmd *cobra.Command, args []string) error {
// This is for Mintnet compatibility.
// TODO: If Mintnet gets deprecated or genesis_file is
// always available, remove.
genDocFile := config.GetString("genesis_file")
genDocFile := config.GenesisFile
if !cmn.FileExists(genDocFile) {
log.Notice(cmn.Fmt("Waiting for genesis file %v...", genDocFile))
for {
@ -109,12 +114,13 @@ func runNode(cmd *cobra.Command, args []string) error {
if genDoc.ChainID == "" {
return fmt.Errorf("Genesis doc %v must include non-empty chain_id", genDocFile)
}
config.Set("chain_id", genDoc.ChainID)
// config.SetChainID("chain_id", genDoc.ChainID) TODO
}
}
// Create & start node
n := node.NewNodeDefault(config) //tmConfig)
n := node.NewNodeDefault(getConfig())
if _, err := n.Start(); err != nil {
return fmt.Errorf("Failed to start node: %v", err)
} else {


+ 2
- 2
cmd/tendermint/commands/show_validator.go View File

@ -20,8 +20,8 @@ func init() {
}
func showValidator(cmd *cobra.Command, args []string) {
privValidatorFile := config.GetString("priv_validator_file")
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
config := getConfig()
privValidator := types.LoadOrGenPrivValidator(config.PrivValidatorFile)
pubKeyJSONBytes, _ := data.ToJSON(privValidator.PubKey)
fmt.Println(string(pubKeyJSONBytes))
}

+ 73
- 0
config/config.go View File

@ -0,0 +1,73 @@
package config
// Config struct for a Tendermint node
type Config struct {
// The ID of the chain to join (should be signed with every transaction and vote)
ChainID string `mapstructure:"chain_id"`
// A JSON file containing the initial validator set and other meta data
GenesisFile string `mapstructure:"genesis_file"`
// A JSON file containing the private key to use as a validator in the consensus protocol
PrivValidatorFile string `mapstructure:"priv_validator_file"`
// A custom human readable name for this node
Moniker string `mapstructure:"moniker"`
// TCP or UNIX socket address of the ABCI application,
// or the name of an ABCI application compiled in with the Tendermint binary
ProxyApp string `mapstructure:"proxy_app"`
// Mechanism to connect to the ABCI application: socket | grpc
ABCI string `mapstructure:"abci"`
// Output level for logging
LogLevel string `mapstructure:"log_level"`
// TCP or UNIX socket address for the profiling server to listen on
ProfListenAddress string `mapstructure:"prof_laddr"`
// If this node is many blocks behind the tip of the chain, FastSync
// allows them to catchup quickly by downloading blocks in parallel
// and verifying their commits
FastSync bool `mapstructure:"fast_sync"`
// If true, query the ABCI app on connecting to a new peer
// so the app can decide if we should keep the connection or not
FilterPeers bool `mapstructure:"filter_peers"` // false
// What indexer to use for transactions
TxIndex string `mapstructure:"tx_index"`
// Database backend: leveldb | memdb
DBBackend string `mapstructure:"db_backend"`
// Database directory
DBDir string `mapstructure:"db_dir"`
// TCP or UNIX socket address for the RPC server to listen on
RPCListenAddress string `mapstructure:"rpc_laddr"`
// TCP or UNIX socket address for the gRPC server to listen on
// NOTE: This server only supports /broadcast_tx_commit
GRPCListenAddress string `mapstructure:"grpc_laddr"`
}
func NewDefaultConfig(rootDir string) *Config {
return &Config{
GenesisFile: rootDir + "/genesis.json",
PrivValidatorFile: rootDir + "/priv_validator.json",
Moniker: "anonymous",
ProxyApp: "tcp://127.0.0.1:46658",
ABCI: "socket",
LogLevel: "info",
ProfListenAddress: "",
FastSync: true,
FilterPeers: false,
TxIndex: "kv",
DBBackend: "leveldb",
DBDir: rootDir + "/data",
RPCListenAddress: "tcp://0.0.0.0:46657",
GRPCListenAddress: "",
}
}

+ 0
- 89
config/tendermint/types.go View File

@ -1,89 +0,0 @@
package tendermint
type Config struct {
Node NodeConfig `mapstructure:"node"`
Chain ChainConfig `mapstructure:"chain"`
ABCI ABCIConfig `mapstructure:"abci"`
Network NetworkConfig `mapstructure:"network"`
Blockchain BlockchainConfig `mapstructure:"blockchain"`
Consensus ConsensusConfig `mapstructure:"consensus"`
Block BlockConfig `mapstructure:"block"`
Mempool MempoolConfig `mapstructure:"mempool"`
RPC RPCConfig `mapstructure:"rpc"`
DB DBConfig `mapstructure:"db"`
}
type NodeConfig struct {
Moniker string `mapstructure:"moniker"` // "anonymous"
PrivValidatorFile string `mapstructure:"priv_validator_file"` // rootDir+"/priv_validator.json")
LogLevel string `mapstructure:"log_level"` // info
ProfListenAddr string `mapstructure:"prof_laddr"` // ""
}
type ChainConfig struct {
ChainID string `mapstructure:"chain_id"`
GenesisFile string `mapstructure:"genesis_file"` // rootDir/genesis.json
}
type ABCIConfig struct {
ProxyApp string `mapstructure:"proxy_app"` // tcp://0.0.0.0:46658
Mode string `mapstructure:"mode"` // socket
FilterPeers bool `mapstructure:"filter_peers"` // false
}
type NetworkConfig struct {
ListenAddr string `mapstructure:"listen_adddr"` // "tcp://0.0.0.0:46656")
Seeds string `mapstructure:"seeds"` // []string ...
SkipUPNP bool `mapstructure:"skip_upnp"`
AddrBookFile string `mapstructure:"addr_book_file"` // rootDir+"/addrbook.json")
AddrBookString bool `mapstructure:"addr_book_string"` // true
PexReactor bool `mapstructure:"pex_reactor"` // false
}
type BlockchainConfig struct {
FastSync bool `mapstructure:"fast_sync"` // true
}
type ConsensusConfig struct {
WalFile string `mapstructure:"wal_file"` //rootDir+"/data/cs.wal/wal")
WalLight bool `mapstructure:"wal_light"` // false
// all timeouts are in ms
TimeoutPropose int `mapstructure:"timeout_propose"` // 3000
TimeoutProposeDelta int `mapstructure:"timeout_propose_delta"` // 500
TimeoutPrevote int `mapstructure:"timeout_prevote"` // 1000
TimeoutPrevoteDelta int `mapstructure:"timeout_prevote_delta"` // 500
TimeoutPrecommit int `mapstructure:"timeout_precommit"` // 1000
TimeoutPrecommitDelta int `mapstructure:"timeout_precommit_delta"` // 500
TimeoutCommit int `mapstructure:"timeout_commit"` // 1000
// make progress asap (no `timeout_commit`) on full precommit votes
SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` // false
}
type BlockConfig struct {
MaxTxs int `mapstructure:"max_txs"` // 10000
PartSize int `mapstructure:"part_size"` // 65536
DisableDataHash bool `mapstructure:"disable_data_hash"` // false
}
type MempoolConfig struct {
Recheck bool `mapstructure:"recheck"` // true
RecheckEmpty bool `mapstructure:"recheck_empty"` // true
Broadcast bool `mapstructure:"broadcast"` // true
WalDir string `mapstructure:"wal_dir"` // rootDir+"/data/mempool.wal")
}
type RPCConfig struct {
RPCListenAddress string `mapstructure:"rpc_listen_addr"` // "tcp://0.0.0.0:46657")
GRPCListenAddress string `mapstructure:"grpc_listen_addr"` // ""
}
type DBConfig struct {
Backend string `mapstructure:"backend"` // leveldb
Dir string `mapstructure:"dir"` // rootDir/data
TxIndex string `mapstructure:"tx_index"` // "kv"
}

+ 1
- 0
consensus/common_test.go View File

@ -28,6 +28,7 @@ import (
"github.com/tendermint/abci/example/dummy"
)
// genesis, chain_id, priv_val
var config *viper.Viper // NOTE: must be reset for each _test.go file
var ensureTimeout = time.Duration(2)


+ 10
- 14
consensus/replay_file.go View File

@ -8,9 +8,8 @@ import (
"strconv"
"strings"
"github.com/spf13/viper"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
@ -21,10 +20,10 @@ import (
//--------------------------------------------------------
// replay messages interactively or all at once
func RunReplayFile(config *viper.Viper, walFile string, console bool) {
consensusState := newConsensusStateForReplay(config)
func RunReplayFile(config *cfg.Config, csConfig *Config, console bool) {
consensusState := newConsensusStateForReplay(config, csConfig)
if err := consensusState.ReplayFile(walFile, console); err != nil {
if err := consensusState.ReplayFile(csConfig.WalFile, console); err != nil {
cmn.Exit(cmn.Fmt("Error during consensus replay: %v", err))
}
}
@ -236,33 +235,30 @@ func (pb *playback) replayConsoleLoop() int {
//--------------------------------------------------------------------------------
// convenience for replay mode
func newConsensusStateForReplay(config *viper.Viper) *ConsensusState {
func newConsensusStateForReplay(config *cfg.Config, csConfig *Config) *ConsensusState {
// Get BlockStore
blockStoreDB := dbm.NewDB("blockstore", config.GetString("db_backend"), config.GetString("db_dir"))
blockStoreDB := dbm.NewDB("blockstore", config.DBBackend, config.DBDir)
blockStore := bc.NewBlockStore(blockStoreDB)
// Get State
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
stateDB := dbm.NewDB("state", config.DBBackend, config.DBDir)
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile)
// Create proxyAppConn connection (consensus, mempool, query)
clientCreator := proxy.DefaultClientCreator(config.GetString("proxy_app"), config.GetString("abci"), config.GetString("db_dir"))
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir)
proxyApp := proxy.NewAppConns(clientCreator, NewHandshaker(state, blockStore))
_, err := proxyApp.Start()
if err != nil {
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
}
// add the chainid to the global config
config.Set("chain_id", state.ChainID)
// Make event switch
eventSwitch := types.NewEventSwitch()
if _, err := eventSwitch.Start(); err != nil {
cmn.Exit(cmn.Fmt("Failed to start event switch: %v", err))
}
consensusState := NewConsensusState(config, state.Copy(), proxyApp.Consensus(), blockStore, types.MockMempool{})
consensusState := NewConsensusState(csConfig, state.Copy(), proxyApp.Consensus(), blockStore, types.MockMempool{})
consensusState.SetEventSwitch(eventSwitch)
return consensusState
}

+ 105
- 60
consensus/state.go View File

@ -10,7 +10,6 @@ import (
"time"
"github.com/ebuchman/fail-test"
"github.com/spf13/viper"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/proxy"
@ -20,53 +19,90 @@ import (
)
//-----------------------------------------------------------------------------
// Timeout Parameters
// TimeoutParams holds timeouts and deltas for each round step.
// All timeouts and deltas in milliseconds.
type TimeoutParams struct {
Propose0 int
ProposeDelta int
Prevote0 int
PrevoteDelta int
Precommit0 int
PrecommitDelta int
Commit0 int
SkipTimeoutCommit bool
// Config
// Config holds timeouts and details about the WAL, the block structure,
// and timeouts in the consensus protocol.
type Config struct {
WalFile string `mapstructure:"wal_file"`
WalLight bool `mapstructure:"wal_light"`
// All timeouts are in ms
TimeoutPropose int `mapstructure:"timeout_propose"`
TimeoutProposeDelta int `mapstructure:"timeout_propose_delta"`
TimeoutPrevote int `mapstructure:"timeout_prevote"`
TimeoutPrevoteDelta int `mapstructure:"timeout_prevote_delta"`
TimeoutPrecommit int `mapstructure:"timeout_precommit"`
TimeoutPrecommitDelta int `mapstructure:"timeout_precommit_delta"`
TimeoutCommit int `mapstructure:"timeout_commit"`
// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"`
// BlockSize
MaxBlockSizeTxs int `mapstructure:"block_size_txs"`
MaxBlockSizeBytes int `mapstructure:"block_size_bytes"`
// TODO: This probably shouldn't be exposed but it makes it
// easy to write tests for the wal/replay
BlockPartSize int `mapstructure:"block_part_size"`
chainID string
}
func NewDefaultConfig(rootDir string) *Config {
return &Config{
WalFile: rootDir + "/data/cs.wal/wal",
WalLight: false,
TimeoutPropose: 3000,
TimeoutProposeDelta: 500,
TimeoutPrevote: 1000,
TimeoutPrevoteDelta: 500,
TimeoutPrecommit: 1000,
TimeoutPrecommitDelta: 500,
TimeoutCommit: 1000,
SkipTimeoutCommit: false,
MaxBlockSizeTxs: 10000,
MaxBlockSizeBytes: 1, // TODO
BlockPartSize: types.DefaultBlockPartSize,
}
}
func NewTestConfig(rootDir string) *Config {
config := NewDefaultConfig(rootDir)
config.TimeoutPropose = 2000
config.TimeoutProposeDelta = 1
config.TimeoutPrevote = 10
config.TimeoutPrevoteDelta = 1
config.TimeoutPrecommit = 10
config.TimeoutPrecommitDelta = 1
config.TimeoutCommit = 10
config.SkipTimeoutCommit = true
return config
}
func (cfg *Config) SetChainID(chainID string) {
cfg.chainID = chainID
}
// Wait this long for a proposal
func (tp *TimeoutParams) Propose(round int) time.Duration {
return time.Duration(tp.Propose0+tp.ProposeDelta*round) * time.Millisecond
func (cfg *Config) Propose(round int) time.Duration {
return time.Duration(cfg.TimeoutPropose+cfg.TimeoutProposeDelta*round) * time.Millisecond
}
// After receiving any +2/3 prevote, wait this long for stragglers
func (tp *TimeoutParams) Prevote(round int) time.Duration {
return time.Duration(tp.Prevote0+tp.PrevoteDelta*round) * time.Millisecond
func (cfg *Config) Prevote(round int) time.Duration {
return time.Duration(cfg.TimeoutPrevote+cfg.TimeoutPrevoteDelta*round) * time.Millisecond
}
// After receiving any +2/3 precommits, wait this long for stragglers
func (tp *TimeoutParams) Precommit(round int) time.Duration {
return time.Duration(tp.Precommit0+tp.PrecommitDelta*round) * time.Millisecond
func (cfg *Config) Precommit(round int) time.Duration {
return time.Duration(cfg.TimeoutPrecommit+cfg.TimeoutPrecommitDelta*round) * time.Millisecond
}
// After receiving +2/3 precommits for a single block (a commit), wait this long for stragglers in the next height's RoundStepNewHeight
func (tp *TimeoutParams) Commit(t time.Time) time.Time {
return t.Add(time.Duration(tp.Commit0) * time.Millisecond)
}
// InitTimeoutParamsFromConfig initializes parameters from config
func InitTimeoutParamsFromConfig(config *viper.Viper) *TimeoutParams {
return &TimeoutParams{
Propose0: config.GetInt("timeout_propose"),
ProposeDelta: config.GetInt("timeout_propose_delta"),
Prevote0: config.GetInt("timeout_prevote"),
PrevoteDelta: config.GetInt("timeout_prevote_delta"),
Precommit0: config.GetInt("timeout_precommit"),
PrecommitDelta: config.GetInt("timeout_precommit_delta"),
Commit0: config.GetInt("timeout_commit"),
SkipTimeoutCommit: config.GetBool("skip_timeout_commit"),
}
func (cfg *Config) Commit(t time.Time) time.Time {
return t.Add(time.Duration(cfg.TimeoutCommit) * time.Millisecond)
}
//-----------------------------------------------------------------------------
@ -224,38 +260,48 @@ type PrivValidator interface {
type ConsensusState struct {
cmn.BaseService
config *viper.Viper
// config details
config *Config
privValidator PrivValidator // for signing votes
// services for creating and executing blocks
proxyAppConn proxy.AppConnConsensus
blockStore types.BlockStore
mempool types.Mempool
privValidator PrivValidator // for signing votes
// internal state
mtx sync.Mutex
RoundState
state *sm.State // State until height-1.
peerMsgQueue chan msgInfo // serializes msgs affecting state (proposals, block parts, votes)
internalMsgQueue chan msgInfo // like peerMsgQueue but for our own proposals, parts, votes
timeoutTicker TimeoutTicker // ticker for timeouts
timeoutParams *TimeoutParams // parameters and functions for timeout intervals
// state changes may be triggered by msgs from peers,
// msgs from ourself, or by timeouts
peerMsgQueue chan msgInfo
internalMsgQueue chan msgInfo
timeoutTicker TimeoutTicker
// we use PubSub to trigger msg broadcasts in the reactor,
// and to notify external subscribers, eg. through a websocket
evsw types.EventSwitch
// a Write-Ahead Log ensures we can recover from any kind of crash
// and helps us avoid signing conflicting votes
wal *WAL
replayMode bool // so we don't log signing errors during replay
nSteps int // used for testing to limit the number of transitions the state makes
// for tests where we want to limit the number of transitions the state makes
nSteps int
// allow certain function to be overwritten for testing
// some functions can be overwritten for testing
decideProposal func(height, round int)
doPrevote func(height, round int)
setProposal func(proposal *types.Proposal) error
// closed when we finish shutting down
done chan struct{}
}
func NewConsensusState(config *viper.Viper, state *sm.State, proxyAppConn proxy.AppConnConsensus, blockStore types.BlockStore, mempool types.Mempool) *ConsensusState {
func NewConsensusState(config *Config, state *sm.State, proxyAppConn proxy.AppConnConsensus, blockStore types.BlockStore, mempool types.Mempool) *ConsensusState {
cs := &ConsensusState{
config: config,
proxyAppConn: proxyAppConn,
@ -264,7 +310,6 @@ func NewConsensusState(config *viper.Viper, state *sm.State, proxyAppConn proxy.
peerMsgQueue: make(chan msgInfo, msgQueueSize),
internalMsgQueue: make(chan msgInfo, msgQueueSize),
timeoutTicker: NewTimeoutTicker(),
timeoutParams: InitTimeoutParamsFromConfig(config),
done: make(chan struct{}),
}
// set function defaults (may be overwritten before calling Start)
@ -341,7 +386,7 @@ func (cs *ConsensusState) LoadCommit(height int) *types.Commit {
func (cs *ConsensusState) OnStart() error {
walFile := cs.config.GetString("cs_wal_file")
walFile := cs.config.WalFile
if err := cs.OpenWAL(walFile); err != nil {
log.Error("Error loading ConsensusState wal", "error", err.Error())
return err
@ -406,7 +451,7 @@ func (cs *ConsensusState) OpenWAL(walFile string) (err error) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
wal, err := NewWAL(walFile, cs.config.GetBool("cs_wal_light"))
wal, err := NewWAL(walFile, cs.config.WalLight)
if err != nil {
return err
}
@ -512,7 +557,7 @@ func (cs *ConsensusState) reconstructLastCommit(state *sm.State) {
return
}
seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight)
lastPrecommits := types.NewVoteSet(cs.config.GetString("chain_id"), state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators)
lastPrecommits := types.NewVoteSet(cs.config.chainID, state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators)
for _, precommit := range seenCommit.Precommits {
if precommit == nil {
continue
@ -572,9 +617,9 @@ func (cs *ConsensusState) updateToState(state *sm.State) {
// to be gathered for the first block.
// And alternative solution that relies on clocks:
// cs.StartTime = state.LastBlockTime.Add(timeoutCommit)
cs.StartTime = cs.timeoutParams.Commit(time.Now())
cs.StartTime = cs.config.Commit(time.Now())
} else {
cs.StartTime = cs.timeoutParams.Commit(cs.CommitTime)
cs.StartTime = cs.config.Commit(cs.CommitTime)
}
cs.Validators = validators
cs.Proposal = nil
@ -583,7 +628,7 @@ func (cs *ConsensusState) updateToState(state *sm.State) {
cs.LockedRound = 0
cs.LockedBlock = nil
cs.LockedBlockParts = nil
cs.Votes = NewHeightVoteSet(cs.config.GetString("chain_id"), height, validators)
cs.Votes = NewHeightVoteSet(cs.config.chainID, height, validators)
cs.CommitRound = -1
cs.LastCommit = lastPrecommits
cs.LastValidators = state.LastValidators
@ -799,7 +844,7 @@ func (cs *ConsensusState) enterPropose(height int, round int) {
}()
// If we don't get the proposal and all block parts quick enough, enterPrevote
cs.scheduleTimeout(cs.timeoutParams.Propose(round), height, round, RoundStepPropose)
cs.scheduleTimeout(cs.config.Propose(round), height, round, RoundStepPropose)
// Nothing more to do if we're not a validator
if cs.privValidator == nil {
@ -893,10 +938,10 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
}
// Mempool validated transactions
txs := cs.mempool.Reap(cs.config.GetInt("block_size"))
txs := cs.mempool.Reap(cs.config.MaxBlockSizeTxs)
return types.MakeBlock(cs.Height, cs.state.ChainID, txs, commit,
cs.state.LastBlockID, cs.state.Validators.Hash(), cs.state.AppHash, cs.config.GetInt("block_part_size"))
cs.state.LastBlockID, cs.state.Validators.Hash(), cs.state.AppHash, cs.config.BlockPartSize)
}
// Enter: `timeoutPropose` after entering Propose.
@ -982,7 +1027,7 @@ func (cs *ConsensusState) enterPrevoteWait(height int, round int) {
}()
// Wait for some more prevotes; enterPrecommit
cs.scheduleTimeout(cs.timeoutParams.Prevote(round), height, round, RoundStepPrevoteWait)
cs.scheduleTimeout(cs.config.Prevote(round), height, round, RoundStepPrevoteWait)
}
// Enter: +2/3 precomits for block or nil.
@ -1102,7 +1147,7 @@ func (cs *ConsensusState) enterPrecommitWait(height int, round int) {
}()
// Wait for some more precommits; enterNewRound
cs.scheduleTimeout(cs.timeoutParams.Precommit(round), height, round, RoundStepPrecommitWait)
cs.scheduleTimeout(cs.config.Precommit(round), height, round, RoundStepPrecommitWait)
}
@ -1397,7 +1442,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
types.FireEventVote(cs.evsw, types.EventDataVote{vote})
// if we can skip timeoutCommit and have all the votes now,
if cs.timeoutParams.SkipTimeoutCommit && cs.LastCommit.HasAll() {
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
// go straight to new round (skip timeout commit)
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, RoundStepNewHeight)
cs.enterNewRound(cs.Height, 0)
@ -1460,7 +1505,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
cs.enterPrecommit(height, vote.Round)
cs.enterCommit(height, vote.Round)
if cs.timeoutParams.SkipTimeoutCommit && precommits.HasAll() {
if cs.config.SkipTimeoutCommit && precommits.HasAll() {
// if we have all the votes now,
// go straight to new round (skip timeout commit)
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, RoundStepNewHeight)


+ 2
- 2
mempool/mempool.go View File

@ -47,7 +47,7 @@ TODO: Better handle abci client errors. (make it automatically handle connection
const cacheSize = 100000
type Mempool struct {
config Config
config *Config
proxyMtx sync.Mutex
proxyAppConn proxy.AppConnMempool
@ -66,7 +66,7 @@ type Mempool struct {
wal *auto.AutoFile
}
func NewMempool(config Config, proxyAppConn proxy.AppConnMempool) *Mempool {
func NewMempool(config *Config, proxyAppConn proxy.AppConnMempool) *Mempool {
mempool := &Mempool{
config: config,
proxyAppConn: proxyAppConn,


+ 15
- 6
mempool/reactor.go View File

@ -22,21 +22,30 @@ const (
)
type Config struct {
Recheck bool // true
RecheckEmpty bool // true
Broadcast bool // true
WalDir string // rootDir+"/data/mempool.wal")
Recheck bool `mapstructure:"recheck"` // true
RecheckEmpty bool `mapstructure:"recheck_empty"` // true
Broadcast bool `mapstructure:"broadcast"` // true
WalDir string `mapstructure:"wal_dir"` //
}
func NewDefaultConfig(rootDir string) *Config {
return &Config{
Recheck: true,
RecheckEmpty: true,
Broadcast: true,
WalDir: rootDir + "/data/mempool.wal",
}
}
// MempoolReactor handles mempool tx broadcasting amongst peers.
type MempoolReactor struct {
p2p.BaseReactor
config Config
config *Config
Mempool *Mempool
evsw types.EventSwitch
}
func NewMempoolReactor(config Config, mempool *Mempool) *MempoolReactor {
func NewMempoolReactor(config *Config, mempool *Mempool) *MempoolReactor {
memR := &MempoolReactor{
config: config,
Mempool: mempool,


+ 58
- 55
node/node.go View File

@ -13,7 +13,8 @@ import (
crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
bc "github.com/tendermint/tendermint/blockchain"
tmcfg "github.com/tendermint/tendermint/config/tendermint"
cfg "github.com/tendermint/tendermint/config"
// tmcfg "github.com/tendermint/tendermint/config/tendermint"
"github.com/tendermint/tendermint/consensus"
mempl "github.com/tendermint/tendermint/mempool"
p2p "github.com/tendermint/tendermint/p2p"
@ -34,11 +35,30 @@ import (
_ "net/http/pprof"
)
type Config struct {
// Top level options use an anonymous struct
*cfg.Config `mapstructure:",squash"`
// Options for services
P2P *p2p.NetworkConfig `mapstructure:"p2p"`
Mempool *mempl.Config `mapstructure:"mempool"`
Consensus *consensus.Config `mapstructure:"consensus"`
}
func NewDefaultConfig(rootDir string) *Config {
return &Config{
Config: cfg.NewDefaultConfig(rootDir),
P2P: p2p.NewDefaultConfig(rootDir),
Mempool: mempl.NewDefaultConfig(rootDir),
Consensus: consensus.NewDefaultConfig(rootDir),
}
}
type Node struct {
cmn.BaseService
// config
config *viper.Viper // user config
config *Config
genesisDoc *types.GenesisDoc // initial validator set
privValidator *types.PrivValidator // local node's validator key
@ -59,35 +79,27 @@ type Node struct {
txIndexer txindex.TxIndexer
}
func NewNodeDefault(config *viper.Viper) *Node {
func NewNodeDefault(config *Config) *Node {
// Get PrivValidator
privValidatorFile := config.GetString("priv_validator_file")
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
return NewNode(config, privValidator, proxy.DefaultClientCreator(
config.GetString("proxy_app"),
config.GetString("abci"),
config.GetString("db_dir"),
))
// config.ABCI.ProxyApp, config.ABCI.Mode, config.DB.Dir))
privValidator := types.LoadOrGenPrivValidator(config.PrivValidatorFile)
return NewNode(config, privValidator,
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir))
}
func NewNode(config *viper.Viper, privValidator *types.PrivValidator, clientCreator proxy.ClientCreator) *Node {
tmConfig := new(tmcfg.Config)
if err := config.Unmarshal(tmConfig); err != nil {
panic(err)
}
func NewNode(config *Config, privValidator *types.PrivValidator, clientCreator proxy.ClientCreator) *Node {
// Get BlockStore
blockStoreDB := dbm.NewDB("blockstore", tmConfig.DB.Backend, tmConfig.DB.Dir)
blockStoreDB := dbm.NewDB("blockstore", config.DBBackend, config.DBDir)
blockStore := bc.NewBlockStore(blockStoreDB)
// Get State
stateDB := dbm.NewDB("state", tmConfig.DB.Backend, tmConfig.DB.Dir)
state := sm.GetState(stateDB, tmConfig.Chain.GenesisFile)
stateDB := dbm.NewDB("state", config.DBBackend, config.DBDir)
state := sm.GetState(stateDB, config.GenesisFile)
// add the chainid and number of validators to the global config
config.Set("chain_id", state.ChainID)
// TODO: Set ChainID. eg:
// config.Consensus.SetChainID(state.ChainID) // ...
// but actually consensus doesnt need it since the cs has the state ...
// Create the proxyApp, which manages connections (consensus, mempool, query)
// and sync tendermint and the app by replaying any necessary blocks
@ -101,9 +113,9 @@ func NewNode(config *viper.Viper, privValidator *types.PrivValidator, clientCrea
// Transaction indexing
var txIndexer txindex.TxIndexer
switch tmConfig.DB.TxIndex {
switch config.TxIndex {
case "kv":
store := dbm.NewDB("tx_index", tmConfig.DB.Backend, tmConfig.DB.Dir)
store := dbm.NewDB("tx_index", config.DBBackend, config.DBDir)
txIndexer = kv.NewTxIndex(store)
default:
txIndexer = &null.TxIndex{}
@ -122,7 +134,7 @@ func NewNode(config *viper.Viper, privValidator *types.PrivValidator, clientCrea
// Decide whether to fast-sync or not
// We don't fast-sync when the only validator is us.
fastSync := config.GetBool("fast_sync")
fastSync := config.FastSync
if state.Validators.Size() == 1 {
addr, _ := state.Validators.GetByIndex(0)
if bytes.Equal(privValidator.Address, addr) {
@ -134,21 +146,20 @@ func NewNode(config *viper.Viper, privValidator *types.PrivValidator, clientCrea
bcReactor := bc.NewBlockchainReactor(state.Copy(), proxyApp.Consensus(), blockStore, fastSync)
// Make MempoolReactor
mempool := mempl.NewMempool(mempoolConfig(config), proxyApp.Mempool())
mempoolReactor := mempl.NewMempoolReactor(mempoolConfig(config), mempool)
mempool := mempl.NewMempool(config.Mempool, proxyApp.Mempool())
mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool)
// Make ConsensusReactor
consensusState := consensus.NewConsensusState(config, state.Copy(), proxyApp.Consensus(), blockStore, mempool)
consensusState := consensus.NewConsensusState(config.Consensus, state.Copy(), proxyApp.Consensus(), blockStore, mempool)
if privValidator != nil {
consensusState.SetPrivValidator(privValidator)
}
consensusReactor := consensus.NewConsensusReactor(consensusState, fastSync)
// Make p2p network switch
// TODO : p2pConfig := config.P2P
p2pConfig := viper.New()
if config.IsSet("p2p") { //TODO verify this necessary, where is this ever set?
p2pConfig = config.Get("p2p").(*viper.Viper)
}
sw := p2p.NewSwitch(p2pConfig)
sw.AddReactor("MEMPOOL", mempoolReactor)
sw.AddReactor("BLOCKCHAIN", bcReactor)
@ -156,8 +167,8 @@ func NewNode(config *viper.Viper, privValidator *types.PrivValidator, clientCrea
// Optionally, start the pex reactor
var addrBook *p2p.AddrBook
if config.GetBool("pex_reactor") {
addrBook = p2p.NewAddrBook(config.GetString("addrbook_file"), config.GetBool("addrbook_strict"))
if config.P2P.PexReactor {
addrBook = p2p.NewAddrBook(config.P2P.AddrBookFile, config.P2P.AddrBookStrict)
pexReactor := p2p.NewPEXReactor(addrBook)
sw.AddReactor("PEX", pexReactor)
}
@ -165,7 +176,7 @@ func NewNode(config *viper.Viper, privValidator *types.PrivValidator, clientCrea
// Filter peers by addr or pubkey with an ABCI query.
// If the query return code is OK, add peer.
// XXX: Query format subject to change
if config.GetBool("filter_peers") {
if config.FilterPeers {
// NOTE: addr is ip:port
sw.SetAddrFilter(func(addr net.Addr) error {
resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/addr/%s", addr.String())})
@ -194,7 +205,7 @@ func NewNode(config *viper.Viper, privValidator *types.PrivValidator, clientCrea
SetEventSwitch(eventSwitch, bcReactor, mempoolReactor, consensusReactor)
// run the profile server
profileHost := config.GetString("prof_laddr")
profileHost := config.ProfListenAddress
if profileHost != "" {
go func() {
@ -227,8 +238,8 @@ func NewNode(config *viper.Viper, privValidator *types.PrivValidator, clientCrea
func (n *Node) OnStart() error {
// Create & add listener
protocol, address := ProtocolAndAddress(n.config.GetString("node_laddr"))
l := p2p.NewDefaultListener(protocol, address, n.config.GetBool("skip_upnp"))
protocol, address := ProtocolAndAddress(n.config.P2P.ListenAddress)
l := p2p.NewDefaultListener(protocol, address, n.config.P2P.SkipUPNP)
n.sw.AddListener(l)
// Start the switch
@ -240,16 +251,16 @@ func (n *Node) OnStart() error {
}
// If seeds exist, add them to the address book and dial out
if n.config.GetString("seeds") != "" {
if n.config.P2P.Seeds != "" {
// dial out
seeds := strings.Split(n.config.GetString("seeds"), ",")
seeds := strings.Split(n.config.P2P.Seeds, ",")
if err := n.DialSeeds(seeds); err != nil {
return err
}
}
// Run the RPC server
if n.config.GetString("rpc_laddr") != "" {
if n.config.RPCListenAddress != "" {
listeners, err := n.startRPC()
if err != nil {
return err
@ -313,7 +324,7 @@ func (n *Node) ConfigureRPC() {
func (n *Node) startRPC() ([]net.Listener, error) {
n.ConfigureRPC()
listenAddrs := strings.Split(n.config.GetString("rpc_laddr"), ",")
listenAddrs := strings.Split(n.config.RPCListenAddress, ",")
// we may expose the rpc over both a unix and tcp socket
listeners := make([]net.Listener, len(listenAddrs))
@ -330,7 +341,7 @@ func (n *Node) startRPC() ([]net.Listener, error) {
}
// we expose a simplified api over grpc for convenience to app devs
grpcListenAddr := n.config.GetString("grpc_laddr")
grpcListenAddr := n.config.GRPCListenAddress
if grpcListenAddr != "" {
listener, err := grpccore.StartGRPCServer(grpcListenAddr)
if err != nil {
@ -387,8 +398,8 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo {
nodeInfo := &p2p.NodeInfo{
PubKey: n.privKey.PubKey().Unwrap().(crypto.PubKeyEd25519),
Moniker: n.config.GetString("moniker"),
Network: n.config.GetString("chain_id"),
Moniker: n.config.Moniker,
Network: n.consensusState.GetState().ChainID,
Version: version.Version,
Other: []string{
cmn.Fmt("wire_version=%v", wire.Version),
@ -400,9 +411,10 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo {
}
// include git hash in the nodeInfo if available
if rev, err := cmn.ReadFile(n.config.GetString("revision_file")); err == nil {
// TODO: use ld-flags
/*if rev, err := cmn.ReadFile(n.config.GetString("revision_file")); err == nil {
nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("revision=%v", string(rev)))
}
}*/
if !n.sw.IsListening() {
return nodeInfo
@ -411,7 +423,7 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo {
p2pListener := n.sw.Listeners()[0]
p2pHost := p2pListener.ExternalAddress().IP.String()
p2pPort := p2pListener.ExternalAddress().Port
rpcListenAddr := n.config.GetString("rpc_laddr")
rpcListenAddr := n.config.RPCListenAddress
// We assume that the rpcListener has the same ExternalAddress.
// This is probably true because both P2P and RPC listeners use UPnP,
@ -442,12 +454,3 @@ func ProtocolAndAddress(listenAddr string) (string, string) {
}
//------------------------------------------------------------------------------
func mempoolConfig(config *viper.Viper) mempl.Config {
return mempl.Config{
Recheck: config.GetBool("mempool_recheck"),
RecheckEmpty: config.GetBool("mempool_recheck_empty"),
Broadcast: config.GetBool("mempool_broadcast"),
WalDir: config.GetString("mempool_wal_dir"),
}
}

+ 17
- 0
p2p/config.go View File

@ -4,6 +4,23 @@ import (
"github.com/spf13/viper"
)
// for node.Config
type NetworkConfig struct {
ListenAddress string `mapstructure:"laddr"`
Seeds string `mapstructure:"seeds"`
SkipUPNP bool `mapstructure:"skip_upnp"`
AddrBookFile string `mapstructure:"addr_book_file"`
AddrBookStrict bool `mapstructure:"addr_book_strict"`
PexReactor bool `mapstructure:"pex_reactor"`
}
func NewDefaultConfig(rootDir string) *NetworkConfig {
return &NetworkConfig{
AddrBookFile: rootDir + "/addrbook.json",
AddrBookStrict: true,
}
}
const (
// Switch config keys
configKeyDialTimeoutSeconds = "dial_timeout_seconds"


+ 2
- 2
types/block.go View File

@ -15,8 +15,8 @@ import (
)
const (
MaxBlockSize = 22020096 // 21MB TODO make it configurable
DefaultPartSetSize = 65536 // 64kB TODO: put part size in parts header?
MaxBlockSize = 22020096 // 21MB TODO make it configurable
DefaultBlockPartSize = 65536 // 64kB TODO: put part size in parts header?
)
type Block struct {


Loading…
Cancel
Save