You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

586 lines
14 KiB

11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
11 years ago
  1. package main
  2. import (
  3. "bufio"
  4. "container/heap"
  5. "fmt"
  6. "math/rand"
  7. "os"
  8. )
  9. const seed = 0
  10. const numNodes = 50000 // Total number of nodes to simulate
  11. const numNodes8 = (numNodes + 7) / 8
  12. const minNumPeers = 8 // Each node should be connected to at least this many peers
  13. const maxNumPeers = 12 // ... and at most this many
  14. const latencyMS = uint16(500) // One way packet latency
  15. const partTxMS = uint16(3) // Transmission time per peer of 100B of data.
  16. const sendQueueCapacity = 3200 // Amount of messages to queue between peers.
  17. const maxAllowableRank = 2 // After this, the data is considered waste.
  18. const tryUnsolicited = 0.02 // Chance of sending an unsolicited piece of data.
  19. var log *bufio.Writer
  20. func init() {
  21. rand.Seed(seed)
  22. openFile()
  23. }
  24. //-----------------------------------------------------------------------------
  25. func openFile() {
  26. // open output file
  27. fo, err := os.Create("output.txt")
  28. if err != nil {
  29. panic(err)
  30. }
  31. // make a write buffer
  32. log = bufio.NewWriter(fo)
  33. }
  34. func logWrite(s string) {
  35. log.Write([]byte(s))
  36. }
  37. //-----------------------------------------------------------------------------
  38. type Peer struct {
  39. node *Node // Pointer to node
  40. sent uint16 // Time of last packet send, including transmit time.
  41. remote uint8 // SomeNode.peers[x].node.peers[remote].node is SomeNode for all x.
  42. wanted []byte // Bitarray of wanted pieces.
  43. given []byte // Bitarray of given pieces.
  44. }
  45. func newPeer(pNode *Node, remote uint8) *Peer {
  46. peer := &Peer{
  47. node: pNode,
  48. remote: remote,
  49. wanted: make([]byte, numNodes8),
  50. given: make([]byte, numNodes8),
  51. }
  52. for i := 0; i < numNodes8; i++ {
  53. peer.wanted[i] = byte(0xff)
  54. }
  55. return peer
  56. }
  57. // Send a data event to the peer, or return false if queue is "full".
  58. // Depending on how many event packets are "queued" for peer,
  59. // the actual recvTime may be adjusted to be later.
  60. func (p *Peer) sendEventData(event EventData) bool {
  61. desiredRecvTime := event.RecvTime()
  62. minRecvTime := p.sent + partTxMS + latencyMS
  63. if desiredRecvTime >= minRecvTime {
  64. p.node.sendEvent(event)
  65. // p.sent + latencyMS == desiredRecvTime
  66. // when desiredRecvTime == minRecvTime,
  67. // p.sent += partTxMS
  68. p.sent = desiredRecvTime - latencyMS
  69. return true
  70. } else {
  71. if (minRecvTime-desiredRecvTime)/partTxMS > sendQueueCapacity {
  72. return false
  73. } else {
  74. event.time = minRecvTime // Adjust recvTime
  75. p.node.sendEvent(event)
  76. p.sent += partTxMS
  77. return true
  78. }
  79. }
  80. }
  81. // Returns true if the sendQueue is not "full"
  82. func (p *Peer) canSendData(now uint16) bool {
  83. return (p.sent - now) < sendQueueCapacity
  84. }
  85. // Since EventPart events are much smaller, we don't consider the transmit time,
  86. // and assume that the sendQueue is always free.
  87. func (p *Peer) sendEventDataResponse(event EventDataResponse) {
  88. p.node.sendEvent(event)
  89. }
  90. // Does the peer's .wanted (as received by an EventDataResponse event) contain part?
  91. func (p *Peer) wants(part uint16) bool {
  92. return p.wanted[part/8]&(1<<(part%8)) > 0
  93. }
  94. func (p *Peer) setWants(part uint16, want bool) {
  95. if want {
  96. p.wanted[part/8] |= (1 << (part % 8))
  97. } else {
  98. p.wanted[part/8] &= ^(1 << (part % 8))
  99. }
  100. }
  101. func (p *Peer) setGiven(part uint16) {
  102. p.given[part/8] |= (1 << (part % 8))
  103. }
  104. // Reset state in preparation for new "round"
  105. func (p *Peer) reset() {
  106. for i := 0; i < numNodes8; i++ {
  107. p.given[i] = byte(0x00)
  108. }
  109. p.sent = 0
  110. }
  111. //-----------------------------------------------------------------------------
  112. type Node struct {
  113. index int
  114. peers []*Peer
  115. parts []byte // Bitarray of received parts.
  116. partsCount []uint8 // Count of how many times parts were received.
  117. events *Heap
  118. }
  119. // Reset state in preparation for new "round"
  120. func (n *Node) reset() {
  121. for i := 0; i < numNodes8; i++ {
  122. n.parts[i] = byte(0x00)
  123. }
  124. for i := 0; i < numNodes; i++ {
  125. n.partsCount[i] = uint8(0)
  126. }
  127. n.events = NewHeap()
  128. for _, peer := range n.peers {
  129. peer.reset()
  130. }
  131. }
  132. func (n *Node) fill() float64 {
  133. gotten := 0
  134. for _, count := range n.partsCount {
  135. if count > 0 {
  136. gotten += 1
  137. }
  138. }
  139. return float64(gotten) / float64(numNodes)
  140. }
  141. func (n *Node) sendEvent(event Event) {
  142. n.events.Push(event, event.RecvTime())
  143. }
  144. func (n *Node) recvEvent() Event {
  145. return n.events.Pop().(Event)
  146. }
  147. func (n *Node) receive(part uint16) uint8 {
  148. /*
  149. defer func() {
  150. e := recover()
  151. if e != nil {
  152. fmt.Println(part, len(n.parts), len(n.partsCount), part/8)
  153. panic(e)
  154. }
  155. }()
  156. */
  157. n.parts[part/8] |= (1 << (part % 8))
  158. n.partsCount[part] += 1
  159. return n.partsCount[part]
  160. }
  161. // returns false if already connected, or remote node has too many connections.
  162. func (n *Node) canConnectTo(node *Node) bool {
  163. if len(n.peers) > maxNumPeers {
  164. return false
  165. }
  166. for _, peer := range n.peers {
  167. if peer.node == node {
  168. return false
  169. }
  170. }
  171. return true
  172. }
  173. func (n *Node) isFull() bool {
  174. for _, count := range n.partsCount {
  175. if count == 0 {
  176. return false
  177. }
  178. }
  179. return true
  180. }
  181. func (n *Node) String() string {
  182. return fmt.Sprintf("{N:%d}", n.index)
  183. }
  184. //-----------------------------------------------------------------------------
  185. type Event interface {
  186. RecvTime() uint16
  187. }
  188. type EventData struct {
  189. time uint16 // time of receipt.
  190. src uint8 // src node's peer index on destination node
  191. part uint16
  192. }
  193. func (e EventData) RecvTime() uint16 {
  194. return e.time
  195. }
  196. func (e EventData) String() string {
  197. return fmt.Sprintf("[%d:%d:%d]", e.time, e.src, e.part)
  198. }
  199. type EventDataResponse struct {
  200. time uint16 // time of receipt.
  201. src uint8 // src node's peer index on destination node.
  202. part uint16 // in response to given part
  203. rank uint8 // if this is 1, node was first to give peer part.
  204. }
  205. func (e EventDataResponse) RecvTime() uint16 {
  206. return e.time
  207. }
  208. func (e EventDataResponse) String() string {
  209. return fmt.Sprintf("[%d:%d:%d:%d]", e.time, e.src, e.part, e.rank)
  210. }
  211. //-----------------------------------------------------------------------------
  212. func createNetwork() []*Node {
  213. nodes := make([]*Node, numNodes)
  214. for i := 0; i < numNodes; i++ {
  215. n := &Node{
  216. index: i,
  217. peers: []*Peer{},
  218. parts: make([]byte, numNodes8),
  219. partsCount: make([]uint8, numNodes),
  220. events: NewHeap(),
  221. }
  222. nodes[i] = n
  223. }
  224. for i := 0; i < numNodes; i++ {
  225. n := nodes[i]
  226. for j := 0; j < minNumPeers; j++ {
  227. if len(n.peers) > j {
  228. // Already set, continue
  229. continue
  230. }
  231. pidx := rand.Intn(numNodes)
  232. for !n.canConnectTo(nodes[pidx]) {
  233. pidx = rand.Intn(numNodes)
  234. }
  235. // connect to nodes[pidx]
  236. remote := nodes[pidx]
  237. remote_j := len(remote.peers)
  238. n.peers = append(n.peers, newPeer(remote, uint8(remote_j)))
  239. remote.peers = append(remote.peers, newPeer(n, uint8(j)))
  240. }
  241. }
  242. return nodes
  243. }
  244. func countFull(nodes []*Node) (fullCount int) {
  245. for _, node := range nodes {
  246. if node.isFull() {
  247. fullCount += 1
  248. }
  249. }
  250. return fullCount
  251. }
  252. type runStat struct {
  253. time uint16 // time for all events to propagate
  254. fill float64 // avg % of pieces gotten
  255. succ float64 // % of times the sendQueue was not full
  256. dups float64 // % of times that a received data was duplicate
  257. }
  258. func (s runStat) String() string {
  259. return fmt.Sprintf("{t:%v/fi:%.5f/su:%.5f/du:%.5f}", s.time, s.fill, s.succ, s.dups)
  260. }
  261. func main() {
  262. // Global vars
  263. nodes := createNetwork()
  264. runStats := []runStat{}
  265. // Keep iterating and improving .wanted
  266. for {
  267. timeMS := uint16(0)
  268. // Each node sends a part to its peers.
  269. for _, node := range nodes {
  270. // reset all node state.
  271. node.reset()
  272. }
  273. // Each node sends a part to its peers.
  274. for i, node := range nodes {
  275. // TODO: make it staggered.
  276. timeMS := uint16(0) // scoped
  277. for _, peer := range node.peers {
  278. recvTime := timeMS + latencyMS + partTxMS
  279. event := EventData{
  280. time: recvTime,
  281. src: peer.remote,
  282. part: uint16(i),
  283. }
  284. peer.sendEventData(event)
  285. //timeMS += partTxMS
  286. }
  287. }
  288. numEventsZero := 0 // times no events have occured
  289. numSendSuccess := 0 // times data send was successful
  290. numSendFailure := 0 // times data send failed due to queue being full
  291. numReceives := 0 // number of data items received
  292. numDups := 0 // number of data items that were duplicate
  293. // Run simulation
  294. for {
  295. // Lets run the simulation for each user until endTimeMS
  296. // We use latencyMS/2 since causality has at least this much lag.
  297. endTimeMS := timeMS + latencyMS/2
  298. // Print out the network for debugging
  299. /*
  300. fmt.Printf("simulating until %v\n", endTimeMS)
  301. if true {
  302. for i := 0; i < 40; i++ {
  303. node := nodes[i]
  304. fmt.Printf("[%v] parts: %X\n", node.index, node.parts)
  305. }
  306. }
  307. */
  308. numEvents := 0
  309. for _, node := range nodes {
  310. // Iterate over the events of this node until event.time >= endTimeMS
  311. for {
  312. _event, ok := node.events.Peek().(Event)
  313. if !ok || _event.RecvTime() >= endTimeMS {
  314. break
  315. } else {
  316. node.events.Pop()
  317. }
  318. switch _event.(type) {
  319. case EventData:
  320. event := _event.(EventData)
  321. numEvents++
  322. // Process this event
  323. rank := node.receive(event.part)
  324. // Send rank back to peer
  325. // NOTE: in reality, maybe this doesn't always happen.
  326. srcPeer := node.peers[event.src]
  327. srcPeer.setGiven(event.part) // HACK
  328. srcPeer.sendEventDataResponse(EventDataResponse{
  329. time: event.time + latencyMS, // TODO: responseTxMS ?
  330. src: srcPeer.remote,
  331. part: event.part,
  332. rank: rank,
  333. })
  334. //logWrite(fmt.Sprintf("[%v] t:%v s:%v -> n:%v p:%v r:%v\n", len(runStats), event.time, srcPeer.node.index, node.index, event.part, rank))
  335. if rank > 1 {
  336. // Already has this part, ignore this event.
  337. numReceives++
  338. numDups++
  339. continue
  340. } else {
  341. numReceives++
  342. }
  343. // Let's iterate over peers & see which wants this piece.
  344. // We don't need to check peer.given because duplicate parts are ignored.
  345. for _, peer := range node.peers {
  346. if peer.wants(event.part) {
  347. //fmt.Print("w")
  348. sent := peer.sendEventData(EventData{
  349. time: event.time + latencyMS + partTxMS,
  350. src: peer.remote,
  351. part: event.part,
  352. })
  353. if sent {
  354. //logWrite(fmt.Sprintf("[%v] t:%v S:%v n:%v -> p:%v %v WS\n", len(runStats), event.time, srcPeer.node.index, node.index, peer.node.index, event.part))
  355. peer.setGiven(event.part)
  356. numSendSuccess++
  357. } else {
  358. //logWrite(fmt.Sprintf("[%v] t:%v S:%v n:%v -> p:%v %v WF\n", len(runStats), event.time, srcPeer.node.index, node.index, peer.node.index, event.part))
  359. numSendFailure++
  360. }
  361. } else {
  362. //fmt.Print("!")
  363. // Peer doesn't want it, but sporadically we'll try sending it anyways.
  364. /*
  365. if rand.Float32() < tryUnsolicited {
  366. sent := peer.sendEventData(EventData{
  367. time: event.time + latencyMS + partTxMS,
  368. src: peer.remote,
  369. part: event.part,
  370. })
  371. if sent {
  372. //logWrite(fmt.Sprintf("[%v] t:%v S:%v n:%v -> p:%v %v TS\n", len(runStats), event.time, srcPeer.node.index, node.index, peer.node.index, event.part))
  373. peer.setGiven(event.part)
  374. // numSendSuccess++
  375. } else {
  376. //logWrite(fmt.Sprintf("[%v] t:%v S:%v n:%v -> p:%v %v TF\n", len(runStats), event.time, srcPeer.node.index, node.index, peer.node.index, event.part))
  377. // numSendFailure++
  378. }
  379. }*/
  380. }
  381. }
  382. case EventDataResponse:
  383. event := _event.(EventDataResponse)
  384. peer := node.peers[event.src]
  385. // Adjust peer.wanted accordingly
  386. if event.rank <= maxAllowableRank {
  387. peer.setWants(event.part, true)
  388. } else {
  389. peer.setWants(event.part, false)
  390. }
  391. }
  392. }
  393. }
  394. if numEvents == 0 {
  395. numEventsZero++
  396. } else {
  397. numEventsZero = 0
  398. }
  399. // If network is full or numEventsZero > 3, quit.
  400. if countFull(nodes) == numNodes || numEventsZero > 3 {
  401. fmt.Printf("Done! took %v ms. Past: %v\n", timeMS, runStats)
  402. fillSum := 0.0
  403. for _, node := range nodes {
  404. fillSum += node.fill()
  405. }
  406. runStats = append(runStats, runStat{timeMS, fillSum / float64(numNodes), float64(numSendSuccess) / float64(numSendSuccess+numSendFailure), float64(numDups) / float64(numReceives)})
  407. for i := 0; i < 20; i++ {
  408. node := nodes[i]
  409. fmt.Printf("[%v] parts: %X (%f)\n", node.index, node.parts[:80], node.fill())
  410. }
  411. for i := 20; i < 2000; i += 200 {
  412. node := nodes[i]
  413. fmt.Printf("[%v] parts: %X (%f)\n", node.index, node.parts[:80], node.fill())
  414. }
  415. break
  416. } else {
  417. fmt.Printf("simulated %v ms. numEvents: %v Past: %v\n", timeMS, numEvents, runStats)
  418. for i := 0; i < 2; i++ {
  419. peer := nodes[0].peers[i]
  420. fmt.Printf("[0].[%v] wanted: %X\n", i, peer.wanted[:80])
  421. fmt.Printf("[0].[%v] given: %X\n", i, peer.given[:80])
  422. }
  423. for i := 0; i < 5; i++ {
  424. node := nodes[i]
  425. fmt.Printf("[%v] parts: %X (%f)\n", node.index, node.parts[:80], node.fill())
  426. }
  427. }
  428. // Lets increment the timeMS now
  429. timeMS += latencyMS / 2
  430. } // end simulation
  431. } // forever loop
  432. }
  433. // ----------------------------------------------------------------------------
  434. type Heap struct {
  435. pq priorityQueue
  436. }
  437. func NewHeap() *Heap {
  438. return &Heap{pq: make([]*pqItem, 0)}
  439. }
  440. func (h *Heap) Len() int {
  441. return len(h.pq)
  442. }
  443. func (h *Heap) Peek() interface{} {
  444. if len(h.pq) == 0 {
  445. return nil
  446. }
  447. return h.pq[0].value
  448. }
  449. func (h *Heap) Push(value interface{}, priority uint16) {
  450. heap.Push(&h.pq, &pqItem{value: value, priority: priority})
  451. }
  452. func (h *Heap) Pop() interface{} {
  453. if len(h.pq) == 0 {
  454. return nil
  455. }
  456. item := heap.Pop(&h.pq).(*pqItem)
  457. return item.value
  458. }
  459. /*
  460. func main() {
  461. h := NewHeap()
  462. h.Push(String("msg1"), 1)
  463. h.Push(String("msg3"), 3)
  464. h.Push(String("msg2"), 2)
  465. fmt.Println(h.Pop())
  466. fmt.Println(h.Pop())
  467. fmt.Println(h.Pop())
  468. }
  469. */
  470. ///////////////////////
  471. // From: http://golang.org/pkg/container/heap/#example__priorityQueue
  472. type pqItem struct {
  473. value interface{}
  474. priority uint16
  475. index int
  476. }
  477. type priorityQueue []*pqItem
  478. func (pq priorityQueue) Len() int { return len(pq) }
  479. func (pq priorityQueue) Less(i, j int) bool {
  480. return pq[i].priority < pq[j].priority
  481. }
  482. func (pq priorityQueue) Swap(i, j int) {
  483. pq[i], pq[j] = pq[j], pq[i]
  484. pq[i].index = i
  485. pq[j].index = j
  486. }
  487. func (pq *priorityQueue) Push(x interface{}) {
  488. n := len(*pq)
  489. item := x.(*pqItem)
  490. item.index = n
  491. *pq = append(*pq, item)
  492. }
  493. func (pq *priorityQueue) Pop() interface{} {
  494. old := *pq
  495. n := len(old)
  496. item := old[n-1]
  497. item.index = -1 // for safety
  498. *pq = old[0 : n-1]
  499. return item
  500. }
  501. func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority uint16) {
  502. heap.Remove(pq, item.index)
  503. item.value = value
  504. item.priority = priority
  505. heap.Push(pq, item)
  506. }