You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

334 lines
10 KiB

  1. package p2p
  2. import (
  3. "errors"
  4. "sort"
  5. "github.com/gogo/protobuf/proto"
  6. "github.com/tendermint/tendermint/libs/log"
  7. )
  8. // ============================================================================
  9. // TODO: Types and business logic below are temporary and will be removed once
  10. // the legacy p2p stack is removed in favor of the new model.
  11. //
  12. // ref: https://github.com/tendermint/tendermint/issues/5670
  13. // ============================================================================
  14. var _ Reactor = (*ReactorShim)(nil)
  15. type (
  16. messageValidator interface {
  17. Validate() error
  18. }
  19. // ReactorShim defines a generic shim wrapper around a BaseReactor. It is
  20. // responsible for wiring up legacy p2p behavior to the new p2p semantics
  21. // (e.g. proxying Envelope messages to legacy peers).
  22. ReactorShim struct {
  23. BaseReactor
  24. Name string
  25. PeerUpdates *PeerUpdates
  26. Channels map[ChannelID]*ChannelShim
  27. }
  28. // ChannelShim defines a generic shim wrapper around a legacy p2p channel
  29. // and the new p2p Channel. It also includes the raw bi-directional Go channels
  30. // so we can proxy message delivery.
  31. ChannelShim struct {
  32. Descriptor *ChannelDescriptor
  33. Channel *Channel
  34. inCh chan<- Envelope
  35. outCh <-chan Envelope
  36. errCh <-chan PeerError
  37. }
  38. // ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel
  39. // and the proto.Message the new p2p Channel is responsible for handling.
  40. // A ChannelDescriptorShim is not contained in ReactorShim, but is rather
  41. // used to construct a ReactorShim.
  42. ChannelDescriptorShim struct {
  43. MsgType proto.Message
  44. Descriptor *ChannelDescriptor
  45. }
  46. )
  47. func NewReactorShim(logger log.Logger, name string, descriptors map[ChannelID]*ChannelDescriptorShim) *ReactorShim {
  48. channels := make(map[ChannelID]*ChannelShim)
  49. for _, cds := range descriptors {
  50. chShim := NewChannelShim(cds, 0)
  51. channels[chShim.Channel.ID] = chShim
  52. }
  53. rs := &ReactorShim{
  54. Name: name,
  55. PeerUpdates: NewPeerUpdates(make(chan PeerUpdate), 0),
  56. Channels: channels,
  57. }
  58. rs.BaseReactor = *NewBaseReactor(name, rs)
  59. rs.SetLogger(logger)
  60. return rs
  61. }
  62. func NewChannelShim(cds *ChannelDescriptorShim, buf uint) *ChannelShim {
  63. inCh := make(chan Envelope, buf)
  64. outCh := make(chan Envelope, buf)
  65. errCh := make(chan PeerError, buf)
  66. return &ChannelShim{
  67. Descriptor: cds.Descriptor,
  68. Channel: NewChannel(
  69. ChannelID(cds.Descriptor.ID),
  70. cds.MsgType,
  71. inCh,
  72. outCh,
  73. errCh,
  74. ),
  75. inCh: inCh,
  76. outCh: outCh,
  77. errCh: errCh,
  78. }
  79. }
  80. // proxyPeerEnvelopes iterates over each p2p Channel and starts a separate
  81. // go-routine where we listen for outbound envelopes sent during Receive
  82. // executions (or anything else that may send on the Channel) and proxy them to
  83. // the corresponding Peer using the To field from the envelope.
  84. func (rs *ReactorShim) proxyPeerEnvelopes() {
  85. for _, cs := range rs.Channels {
  86. go func(cs *ChannelShim) {
  87. for e := range cs.outCh {
  88. msg := proto.Clone(cs.Channel.messageType)
  89. msg.Reset()
  90. wrapper, ok := msg.(Wrapper)
  91. if ok {
  92. if err := wrapper.Wrap(e.Message); err != nil {
  93. rs.Logger.Error(
  94. "failed to proxy envelope; failed to wrap message",
  95. "ch_id", cs.Descriptor.ID,
  96. "err", err,
  97. )
  98. continue
  99. }
  100. } else {
  101. msg = e.Message
  102. }
  103. bz, err := proto.Marshal(msg)
  104. if err != nil {
  105. rs.Logger.Error(
  106. "failed to proxy envelope; failed to encode message",
  107. "ch_id", cs.Descriptor.ID,
  108. "err", err,
  109. )
  110. continue
  111. }
  112. switch {
  113. case e.Broadcast:
  114. rs.Switch.Broadcast(cs.Descriptor.ID, bz)
  115. case e.To != "":
  116. src := rs.Switch.peers.Get(e.To)
  117. if src == nil {
  118. rs.Logger.Debug(
  119. "failed to proxy envelope; failed to find peer",
  120. "ch_id", cs.Descriptor.ID,
  121. "peer", e.To,
  122. )
  123. continue
  124. }
  125. if !src.Send(cs.Descriptor.ID, bz) {
  126. // This usually happens when we try to send across a channel
  127. // that the peer doesn't have open. To avoid bloating the
  128. // logs we set this to be Debug
  129. rs.Logger.Debug(
  130. "failed to proxy message to peer",
  131. "ch_id", cs.Descriptor.ID,
  132. "peer", e.To,
  133. )
  134. }
  135. default:
  136. rs.Logger.Error("failed to proxy envelope; missing peer ID", "ch_id", cs.Descriptor.ID)
  137. }
  138. }
  139. }(cs)
  140. }
  141. }
  142. // handlePeerErrors iterates over each p2p Channel and starts a separate go-routine
  143. // where we listen for peer errors. For each peer error, we find the peer from
  144. // the legacy p2p Switch and execute a StopPeerForError call with the corresponding
  145. // peer error.
  146. func (rs *ReactorShim) handlePeerErrors() {
  147. for _, cs := range rs.Channels {
  148. go func(cs *ChannelShim) {
  149. for pErr := range cs.errCh {
  150. if pErr.NodeID != "" {
  151. peer := rs.Switch.peers.Get(pErr.NodeID)
  152. if peer == nil {
  153. rs.Logger.Error("failed to handle peer error; failed to find peer", "peer", pErr.NodeID)
  154. continue
  155. }
  156. rs.Switch.StopPeerForError(peer, pErr.Err)
  157. }
  158. }
  159. }(cs)
  160. }
  161. }
  162. // OnStart executes the reactor shim's OnStart hook where we start all the
  163. // necessary go-routines in order to proxy peer envelopes and errors per p2p
  164. // Channel.
  165. func (rs *ReactorShim) OnStart() error {
  166. if rs.Switch == nil {
  167. return errors.New("proxyPeerEnvelopes: reactor shim switch is nil")
  168. }
  169. // start envelope proxying and peer error handling in separate go routines
  170. rs.proxyPeerEnvelopes()
  171. rs.handlePeerErrors()
  172. return nil
  173. }
  174. // GetChannel returns a p2p Channel reference for a given ChannelID. If no
  175. // Channel exists, nil is returned.
  176. func (rs *ReactorShim) GetChannel(cID ChannelID) *Channel {
  177. channelShim, ok := rs.Channels[cID]
  178. if ok {
  179. return channelShim.Channel
  180. }
  181. return nil
  182. }
  183. // GetChannels implements the legacy Reactor interface for getting a slice of all
  184. // the supported ChannelDescriptors.
  185. func (rs *ReactorShim) GetChannels() []*ChannelDescriptor {
  186. sortedChIDs := make([]ChannelID, 0, len(rs.Channels))
  187. for cID := range rs.Channels {
  188. sortedChIDs = append(sortedChIDs, cID)
  189. }
  190. sort.Slice(sortedChIDs, func(i, j int) bool { return sortedChIDs[i] < sortedChIDs[j] })
  191. descriptors := make([]*ChannelDescriptor, len(rs.Channels))
  192. for i, cID := range sortedChIDs {
  193. descriptors[i] = rs.Channels[cID].Descriptor
  194. }
  195. return descriptors
  196. }
  197. // AddPeer sends a PeerUpdate with status PeerStatusUp on the PeerUpdateCh.
  198. // The embedding reactor must be sure to listen for messages on this channel to
  199. // handle adding a peer.
  200. func (rs *ReactorShim) AddPeer(peer Peer) {
  201. select {
  202. case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusUp}:
  203. rs.Logger.Debug("sent peer update", "reactor", rs.Name, "peer", peer.ID(), "status", PeerStatusUp)
  204. case <-rs.PeerUpdates.Done():
  205. // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel.
  206. // This is because there may be numerous spawned goroutines that are
  207. // attempting to send on the updateCh go channel and when the reactor stops
  208. // we do not want to preemptively close the channel as that could result in
  209. // panics sending on a closed channel. This also means that reactors MUST
  210. // be certain there are NO listeners on the updateCh channel when closing or
  211. // stopping.
  212. }
  213. }
  214. // RemovePeer sends a PeerUpdate with status PeerStatusDown on the PeerUpdateCh.
  215. // The embedding reactor must be sure to listen for messages on this channel to
  216. // handle removing a peer.
  217. func (rs *ReactorShim) RemovePeer(peer Peer, reason interface{}) {
  218. select {
  219. case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusDown}:
  220. rs.Logger.Debug(
  221. "sent peer update",
  222. "reactor", rs.Name,
  223. "peer", peer.ID(),
  224. "reason", reason,
  225. "status", PeerStatusDown,
  226. )
  227. case <-rs.PeerUpdates.Done():
  228. // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel.
  229. // This is because there may be numerous spawned goroutines that are
  230. // attempting to send on the updateCh go channel and when the reactor stops
  231. // we do not want to preemptively close the channel as that could result in
  232. // panics sending on a closed channel. This also means that reactors MUST
  233. // be certain there are NO listeners on the updateCh channel when closing or
  234. // stopping.
  235. }
  236. }
  237. // Receive implements a generic wrapper around implementing the Receive method
  238. // on the legacy Reactor p2p interface. If the reactor is running, Receive will
  239. // find the corresponding new p2p Channel, create and decode the appropriate
  240. // proto.Message from the msgBytes, execute any validation and finally construct
  241. // and send a p2p Envelope on the appropriate p2p Channel.
  242. func (rs *ReactorShim) Receive(chID byte, src Peer, msgBytes []byte) {
  243. if !rs.IsRunning() {
  244. return
  245. }
  246. cID := ChannelID(chID)
  247. channelShim, ok := rs.Channels[cID]
  248. if !ok {
  249. rs.Logger.Error("unexpected channel", "peer", src, "ch_id", chID)
  250. return
  251. }
  252. msg := proto.Clone(channelShim.Channel.messageType)
  253. msg.Reset()
  254. if err := proto.Unmarshal(msgBytes, msg); err != nil {
  255. rs.Logger.Error("error decoding message", "peer", src, "ch_id", cID, "err", err)
  256. rs.Switch.StopPeerForError(src, err)
  257. return
  258. }
  259. validator, ok := msg.(messageValidator)
  260. if ok {
  261. if err := validator.Validate(); err != nil {
  262. rs.Logger.Error("invalid message", "peer", src, "ch_id", cID, "err", err)
  263. rs.Switch.StopPeerForError(src, err)
  264. return
  265. }
  266. }
  267. wrapper, ok := msg.(Wrapper)
  268. if ok {
  269. var err error
  270. msg, err = wrapper.Unwrap()
  271. if err != nil {
  272. rs.Logger.Error("failed to unwrap message", "peer", src, "ch_id", chID, "err", err)
  273. return
  274. }
  275. }
  276. select {
  277. case channelShim.inCh <- Envelope{From: src.ID(), Message: msg}:
  278. rs.Logger.Debug("proxied envelope", "reactor", rs.Name, "ch_id", cID, "peer", src.ID())
  279. case <-channelShim.Channel.Done():
  280. // NOTE: We explicitly DO NOT close the p2p Channel's inbound go channel.
  281. // This is because there may be numerous spawned goroutines that are
  282. // attempting to send on the inbound channel and when the reactor stops we
  283. // do not want to preemptively close the channel as that could result in
  284. // panics sending on a closed channel. This also means that reactors MUST
  285. // be certain there are NO listeners on the inbound channel when closing or
  286. // stopping.
  287. }
  288. }