chunker_test.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. // Copyright 2016 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package storage
  17. import (
  18. "bytes"
  19. "crypto/rand"
  20. "encoding/binary"
  21. "errors"
  22. "fmt"
  23. "io"
  24. "sync"
  25. "testing"
  26. "time"
  27. "github.com/ethereum/go-ethereum/crypto/sha3"
  28. )
  29. /*
  30. Tests TreeChunker by splitting and joining a random byte slice
  31. */
  32. type test interface {
  33. Fatalf(string, ...interface{})
  34. Logf(string, ...interface{})
  35. }
  36. type chunkerTester struct {
  37. inputs map[uint64][]byte
  38. chunks map[string]*Chunk
  39. t test
  40. }
  41. func (self *chunkerTester) Split(chunker Splitter, data io.Reader, size int64, chunkC chan *Chunk, swg *sync.WaitGroup, expectedError error) (key Key, err error) {
  42. // reset
  43. self.chunks = make(map[string]*Chunk)
  44. if self.inputs == nil {
  45. self.inputs = make(map[uint64][]byte)
  46. }
  47. quitC := make(chan bool)
  48. timeout := time.After(600 * time.Second)
  49. if chunkC != nil {
  50. go func() error {
  51. for {
  52. select {
  53. case <-timeout:
  54. return errors.New("Split timeout error")
  55. case <-quitC:
  56. return nil
  57. case chunk := <-chunkC:
  58. // self.chunks = append(self.chunks, chunk)
  59. self.chunks[chunk.Key.String()] = chunk
  60. if chunk.wg != nil {
  61. chunk.wg.Done()
  62. }
  63. }
  64. }
  65. }()
  66. }
  67. key, err = chunker.Split(data, size, chunkC, swg, nil)
  68. if err != nil && expectedError == nil {
  69. err = fmt.Errorf("Split error: %v", err)
  70. }
  71. if chunkC != nil {
  72. if swg != nil {
  73. swg.Wait()
  74. }
  75. close(quitC)
  76. }
  77. return key, err
  78. }
  79. func (self *chunkerTester) Append(chunker Splitter, rootKey Key, data io.Reader, chunkC chan *Chunk, swg *sync.WaitGroup, expectedError error) (key Key, err error) {
  80. quitC := make(chan bool)
  81. timeout := time.After(60 * time.Second)
  82. if chunkC != nil {
  83. go func() error {
  84. for {
  85. select {
  86. case <-timeout:
  87. return errors.New("Append timeout error")
  88. case <-quitC:
  89. return nil
  90. case chunk := <-chunkC:
  91. if chunk != nil {
  92. stored, success := self.chunks[chunk.Key.String()]
  93. if !success {
  94. // Requesting data
  95. self.chunks[chunk.Key.String()] = chunk
  96. if chunk.wg != nil {
  97. chunk.wg.Done()
  98. }
  99. } else {
  100. // getting data
  101. chunk.SData = stored.SData
  102. chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
  103. close(chunk.C)
  104. }
  105. }
  106. }
  107. }
  108. }()
  109. }
  110. key, err = chunker.Append(rootKey, data, chunkC, swg, nil)
  111. if err != nil && expectedError == nil {
  112. err = fmt.Errorf("Append error: %v", err)
  113. }
  114. if chunkC != nil {
  115. if swg != nil {
  116. swg.Wait()
  117. }
  118. close(quitC)
  119. }
  120. return key, err
  121. }
  122. func (self *chunkerTester) Join(chunker Chunker, key Key, c int, chunkC chan *Chunk, quitC chan bool) LazySectionReader {
  123. // reset but not the chunks
  124. reader := chunker.Join(key, chunkC)
  125. timeout := time.After(600 * time.Second)
  126. i := 0
  127. go func() error {
  128. for {
  129. select {
  130. case <-timeout:
  131. return errors.New("Join timeout error")
  132. case chunk, ok := <-chunkC:
  133. if !ok {
  134. close(quitC)
  135. return nil
  136. }
  137. // this just mocks the behaviour of a chunk store retrieval
  138. stored, success := self.chunks[chunk.Key.String()]
  139. if !success {
  140. return errors.New("Not found")
  141. }
  142. chunk.SData = stored.SData
  143. chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
  144. close(chunk.C)
  145. i++
  146. }
  147. }
  148. }()
  149. return reader
  150. }
  151. func testRandomBrokenData(splitter Splitter, n int, tester *chunkerTester) {
  152. data := io.LimitReader(rand.Reader, int64(n))
  153. brokendata := brokenLimitReader(data, n, n/2)
  154. buf := make([]byte, n)
  155. _, err := brokendata.Read(buf)
  156. if err == nil || err.Error() != "Broken reader" {
  157. tester.t.Fatalf("Broken reader is not broken, hence broken. Returns: %v", err)
  158. }
  159. data = io.LimitReader(rand.Reader, int64(n))
  160. brokendata = brokenLimitReader(data, n, n/2)
  161. chunkC := make(chan *Chunk, 1000)
  162. swg := &sync.WaitGroup{}
  163. expectedError := fmt.Errorf("Broken reader")
  164. key, err := tester.Split(splitter, brokendata, int64(n), chunkC, swg, expectedError)
  165. if err == nil || err.Error() != expectedError.Error() {
  166. tester.t.Fatalf("Not receiving the correct error! Expected %v, received %v", expectedError, err)
  167. }
  168. tester.t.Logf(" Key = %v\n", key)
  169. }
  170. func testRandomData(splitter Splitter, n int, tester *chunkerTester) Key {
  171. if tester.inputs == nil {
  172. tester.inputs = make(map[uint64][]byte)
  173. }
  174. input, found := tester.inputs[uint64(n)]
  175. var data io.Reader
  176. if !found {
  177. data, input = testDataReaderAndSlice(n)
  178. tester.inputs[uint64(n)] = input
  179. } else {
  180. data = io.LimitReader(bytes.NewReader(input), int64(n))
  181. }
  182. chunkC := make(chan *Chunk, 1000)
  183. swg := &sync.WaitGroup{}
  184. key, err := tester.Split(splitter, data, int64(n), chunkC, swg, nil)
  185. if err != nil {
  186. tester.t.Fatalf(err.Error())
  187. }
  188. tester.t.Logf(" Key = %v\n", key)
  189. chunkC = make(chan *Chunk, 1000)
  190. quitC := make(chan bool)
  191. chunker := NewTreeChunker(NewChunkerParams())
  192. reader := tester.Join(chunker, key, 0, chunkC, quitC)
  193. output := make([]byte, n)
  194. r, err := reader.Read(output)
  195. if r != n || err != io.EOF {
  196. tester.t.Fatalf("read error read: %v n = %v err = %v\n", r, n, err)
  197. }
  198. if input != nil {
  199. if !bytes.Equal(output, input) {
  200. tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", input, output)
  201. }
  202. }
  203. close(chunkC)
  204. <-quitC
  205. return key
  206. }
  207. func testRandomDataAppend(splitter Splitter, n, m int, tester *chunkerTester) {
  208. if tester.inputs == nil {
  209. tester.inputs = make(map[uint64][]byte)
  210. }
  211. input, found := tester.inputs[uint64(n)]
  212. var data io.Reader
  213. if !found {
  214. data, input = testDataReaderAndSlice(n)
  215. tester.inputs[uint64(n)] = input
  216. } else {
  217. data = io.LimitReader(bytes.NewReader(input), int64(n))
  218. }
  219. chunkC := make(chan *Chunk, 1000)
  220. swg := &sync.WaitGroup{}
  221. key, err := tester.Split(splitter, data, int64(n), chunkC, swg, nil)
  222. if err != nil {
  223. tester.t.Fatalf(err.Error())
  224. }
  225. tester.t.Logf(" Key = %v\n", key)
  226. //create a append data stream
  227. appendInput, found := tester.inputs[uint64(m)]
  228. var appendData io.Reader
  229. if !found {
  230. appendData, appendInput = testDataReaderAndSlice(m)
  231. tester.inputs[uint64(m)] = appendInput
  232. } else {
  233. appendData = io.LimitReader(bytes.NewReader(appendInput), int64(m))
  234. }
  235. chunkC = make(chan *Chunk, 1000)
  236. swg = &sync.WaitGroup{}
  237. newKey, err := tester.Append(splitter, key, appendData, chunkC, swg, nil)
  238. if err != nil {
  239. tester.t.Fatalf(err.Error())
  240. }
  241. tester.t.Logf(" NewKey = %v\n", newKey)
  242. chunkC = make(chan *Chunk, 1000)
  243. quitC := make(chan bool)
  244. chunker := NewTreeChunker(NewChunkerParams())
  245. reader := tester.Join(chunker, newKey, 0, chunkC, quitC)
  246. newOutput := make([]byte, n+m)
  247. r, err := reader.Read(newOutput)
  248. if r != (n + m) {
  249. tester.t.Fatalf("read error read: %v n = %v err = %v\n", r, n, err)
  250. }
  251. newInput := append(input, appendInput...)
  252. if !bytes.Equal(newOutput, newInput) {
  253. tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", newInput, newOutput)
  254. }
  255. close(chunkC)
  256. }
  257. func TestSha3ForCorrectness(t *testing.T) {
  258. tester := &chunkerTester{t: t}
  259. size := 4096
  260. input := make([]byte, size+8)
  261. binary.LittleEndian.PutUint64(input[:8], uint64(size))
  262. io.LimitReader(bytes.NewReader(input[8:]), int64(size))
  263. rawSha3 := sha3.NewKeccak256()
  264. rawSha3.Reset()
  265. rawSha3.Write(input)
  266. rawSha3Output := rawSha3.Sum(nil)
  267. sha3FromMakeFunc := MakeHashFunc(SHA3Hash)()
  268. sha3FromMakeFunc.ResetWithLength(input[:8])
  269. sha3FromMakeFunc.Write(input[8:])
  270. sha3FromMakeFuncOutput := sha3FromMakeFunc.Sum(nil)
  271. if len(rawSha3Output) != len(sha3FromMakeFuncOutput) {
  272. tester.t.Fatalf("Original SHA3 and abstracted Sha3 has different length %v:%v\n", len(rawSha3Output), len(sha3FromMakeFuncOutput))
  273. }
  274. if !bytes.Equal(rawSha3Output, sha3FromMakeFuncOutput) {
  275. tester.t.Fatalf("Original SHA3 and abstracted Sha3 mismatch %v:%v\n", rawSha3Output, sha3FromMakeFuncOutput)
  276. }
  277. }
  278. func TestDataAppend(t *testing.T) {
  279. sizes := []int{1, 1, 1, 4095, 4096, 4097, 1, 1, 1, 123456, 2345678, 2345678}
  280. appendSizes := []int{4095, 4096, 4097, 1, 1, 1, 8191, 8192, 8193, 9000, 3000, 5000}
  281. tester := &chunkerTester{t: t}
  282. chunker := NewPyramidChunker(NewChunkerParams())
  283. for i, s := range sizes {
  284. testRandomDataAppend(chunker, s, appendSizes[i], tester)
  285. }
  286. }
  287. func TestRandomData(t *testing.T) {
  288. sizes := []int{1, 60, 83, 179, 253, 1024, 4095, 4096, 4097, 8191, 8192, 8193, 12287, 12288, 12289, 123456, 2345678}
  289. tester := &chunkerTester{t: t}
  290. chunker := NewTreeChunker(NewChunkerParams())
  291. pyramid := NewPyramidChunker(NewChunkerParams())
  292. for _, s := range sizes {
  293. treeChunkerKey := testRandomData(chunker, s, tester)
  294. pyramidChunkerKey := testRandomData(pyramid, s, tester)
  295. if treeChunkerKey.String() != pyramidChunkerKey.String() {
  296. tester.t.Fatalf("tree chunker and pyramid chunker key mismatch for size %v\n TC: %v\n PC: %v\n", s, treeChunkerKey.String(), pyramidChunkerKey.String())
  297. }
  298. }
  299. cp := NewChunkerParams()
  300. cp.Hash = BMTHash
  301. chunker = NewTreeChunker(cp)
  302. pyramid = NewPyramidChunker(cp)
  303. for _, s := range sizes {
  304. treeChunkerKey := testRandomData(chunker, s, tester)
  305. pyramidChunkerKey := testRandomData(pyramid, s, tester)
  306. if treeChunkerKey.String() != pyramidChunkerKey.String() {
  307. tester.t.Fatalf("tree chunker BMT and pyramid chunker BMT key mismatch for size %v \n TC: %v\n PC: %v\n", s, treeChunkerKey.String(), pyramidChunkerKey.String())
  308. }
  309. }
  310. }
  311. func XTestRandomBrokenData(t *testing.T) {
  312. sizes := []int{1, 60, 83, 179, 253, 1024, 4095, 4096, 4097, 8191, 8192, 8193, 12287, 12288, 12289, 123456, 2345678}
  313. tester := &chunkerTester{t: t}
  314. chunker := NewTreeChunker(NewChunkerParams())
  315. for _, s := range sizes {
  316. testRandomBrokenData(chunker, s, tester)
  317. }
  318. }
  319. func benchReadAll(reader LazySectionReader) {
  320. size, _ := reader.Size(nil)
  321. output := make([]byte, 1000)
  322. for pos := int64(0); pos < size; pos += 1000 {
  323. reader.ReadAt(output, pos)
  324. }
  325. }
  326. func benchmarkJoin(n int, t *testing.B) {
  327. t.ReportAllocs()
  328. for i := 0; i < t.N; i++ {
  329. chunker := NewTreeChunker(NewChunkerParams())
  330. tester := &chunkerTester{t: t}
  331. data := testDataReader(n)
  332. chunkC := make(chan *Chunk, 1000)
  333. swg := &sync.WaitGroup{}
  334. key, err := tester.Split(chunker, data, int64(n), chunkC, swg, nil)
  335. if err != nil {
  336. tester.t.Fatalf(err.Error())
  337. }
  338. chunkC = make(chan *Chunk, 1000)
  339. quitC := make(chan bool)
  340. reader := tester.Join(chunker, key, i, chunkC, quitC)
  341. benchReadAll(reader)
  342. close(chunkC)
  343. <-quitC
  344. }
  345. }
  346. func benchmarkSplitTreeSHA3(n int, t *testing.B) {
  347. t.ReportAllocs()
  348. for i := 0; i < t.N; i++ {
  349. chunker := NewTreeChunker(NewChunkerParams())
  350. tester := &chunkerTester{t: t}
  351. data := testDataReader(n)
  352. _, err := tester.Split(chunker, data, int64(n), nil, nil, nil)
  353. if err != nil {
  354. tester.t.Fatalf(err.Error())
  355. }
  356. }
  357. }
  358. func benchmarkSplitTreeBMT(n int, t *testing.B) {
  359. t.ReportAllocs()
  360. for i := 0; i < t.N; i++ {
  361. cp := NewChunkerParams()
  362. cp.Hash = BMTHash
  363. chunker := NewTreeChunker(cp)
  364. tester := &chunkerTester{t: t}
  365. data := testDataReader(n)
  366. _, err := tester.Split(chunker, data, int64(n), nil, nil, nil)
  367. if err != nil {
  368. tester.t.Fatalf(err.Error())
  369. }
  370. }
  371. }
  372. func benchmarkSplitPyramidSHA3(n int, t *testing.B) {
  373. t.ReportAllocs()
  374. for i := 0; i < t.N; i++ {
  375. splitter := NewPyramidChunker(NewChunkerParams())
  376. tester := &chunkerTester{t: t}
  377. data := testDataReader(n)
  378. _, err := tester.Split(splitter, data, int64(n), nil, nil, nil)
  379. if err != nil {
  380. tester.t.Fatalf(err.Error())
  381. }
  382. }
  383. }
  384. func benchmarkSplitPyramidBMT(n int, t *testing.B) {
  385. t.ReportAllocs()
  386. for i := 0; i < t.N; i++ {
  387. cp := NewChunkerParams()
  388. cp.Hash = BMTHash
  389. splitter := NewPyramidChunker(cp)
  390. tester := &chunkerTester{t: t}
  391. data := testDataReader(n)
  392. _, err := tester.Split(splitter, data, int64(n), nil, nil, nil)
  393. if err != nil {
  394. tester.t.Fatalf(err.Error())
  395. }
  396. }
  397. }
  398. func benchmarkAppendPyramid(n, m int, t *testing.B) {
  399. t.ReportAllocs()
  400. for i := 0; i < t.N; i++ {
  401. chunker := NewPyramidChunker(NewChunkerParams())
  402. tester := &chunkerTester{t: t}
  403. data := testDataReader(n)
  404. data1 := testDataReader(m)
  405. chunkC := make(chan *Chunk, 1000)
  406. swg := &sync.WaitGroup{}
  407. key, err := tester.Split(chunker, data, int64(n), chunkC, swg, nil)
  408. if err != nil {
  409. tester.t.Fatalf(err.Error())
  410. }
  411. chunkC = make(chan *Chunk, 1000)
  412. swg = &sync.WaitGroup{}
  413. _, err = tester.Append(chunker, key, data1, chunkC, swg, nil)
  414. if err != nil {
  415. tester.t.Fatalf(err.Error())
  416. }
  417. close(chunkC)
  418. }
  419. }
  420. func BenchmarkJoin_2(t *testing.B) { benchmarkJoin(100, t) }
  421. func BenchmarkJoin_3(t *testing.B) { benchmarkJoin(1000, t) }
  422. func BenchmarkJoin_4(t *testing.B) { benchmarkJoin(10000, t) }
  423. func BenchmarkJoin_5(t *testing.B) { benchmarkJoin(100000, t) }
  424. func BenchmarkJoin_6(t *testing.B) { benchmarkJoin(1000000, t) }
  425. func BenchmarkJoin_7(t *testing.B) { benchmarkJoin(10000000, t) }
  426. func BenchmarkJoin_8(t *testing.B) { benchmarkJoin(100000000, t) }
  427. func BenchmarkSplitTreeSHA3_2(t *testing.B) { benchmarkSplitTreeSHA3(100, t) }
  428. func BenchmarkSplitTreeSHA3_2h(t *testing.B) { benchmarkSplitTreeSHA3(500, t) }
  429. func BenchmarkSplitTreeSHA3_3(t *testing.B) { benchmarkSplitTreeSHA3(1000, t) }
  430. func BenchmarkSplitTreeSHA3_3h(t *testing.B) { benchmarkSplitTreeSHA3(5000, t) }
  431. func BenchmarkSplitTreeSHA3_4(t *testing.B) { benchmarkSplitTreeSHA3(10000, t) }
  432. func BenchmarkSplitTreeSHA3_4h(t *testing.B) { benchmarkSplitTreeSHA3(50000, t) }
  433. func BenchmarkSplitTreeSHA3_5(t *testing.B) { benchmarkSplitTreeSHA3(100000, t) }
  434. func BenchmarkSplitTreeSHA3_6(t *testing.B) { benchmarkSplitTreeSHA3(1000000, t) }
  435. func BenchmarkSplitTreeSHA3_7(t *testing.B) { benchmarkSplitTreeSHA3(10000000, t) }
  436. func BenchmarkSplitTreeSHA3_8(t *testing.B) { benchmarkSplitTreeSHA3(100000000, t) }
  437. func BenchmarkSplitTreeBMT_2(t *testing.B) { benchmarkSplitTreeBMT(100, t) }
  438. func BenchmarkSplitTreeBMT_2h(t *testing.B) { benchmarkSplitTreeBMT(500, t) }
  439. func BenchmarkSplitTreeBMT_3(t *testing.B) { benchmarkSplitTreeBMT(1000, t) }
  440. func BenchmarkSplitTreeBMT_3h(t *testing.B) { benchmarkSplitTreeBMT(5000, t) }
  441. func BenchmarkSplitTreeBMT_4(t *testing.B) { benchmarkSplitTreeBMT(10000, t) }
  442. func BenchmarkSplitTreeBMT_4h(t *testing.B) { benchmarkSplitTreeBMT(50000, t) }
  443. func BenchmarkSplitTreeBMT_5(t *testing.B) { benchmarkSplitTreeBMT(100000, t) }
  444. func BenchmarkSplitTreeBMT_6(t *testing.B) { benchmarkSplitTreeBMT(1000000, t) }
  445. func BenchmarkSplitTreeBMT_7(t *testing.B) { benchmarkSplitTreeBMT(10000000, t) }
  446. func BenchmarkSplitTreeBMT_8(t *testing.B) { benchmarkSplitTreeBMT(100000000, t) }
  447. func BenchmarkSplitPyramidSHA3_2(t *testing.B) { benchmarkSplitPyramidSHA3(100, t) }
  448. func BenchmarkSplitPyramidSHA3_2h(t *testing.B) { benchmarkSplitPyramidSHA3(500, t) }
  449. func BenchmarkSplitPyramidSHA3_3(t *testing.B) { benchmarkSplitPyramidSHA3(1000, t) }
  450. func BenchmarkSplitPyramidSHA3_3h(t *testing.B) { benchmarkSplitPyramidSHA3(5000, t) }
  451. func BenchmarkSplitPyramidSHA3_4(t *testing.B) { benchmarkSplitPyramidSHA3(10000, t) }
  452. func BenchmarkSplitPyramidSHA3_4h(t *testing.B) { benchmarkSplitPyramidSHA3(50000, t) }
  453. func BenchmarkSplitPyramidSHA3_5(t *testing.B) { benchmarkSplitPyramidSHA3(100000, t) }
  454. func BenchmarkSplitPyramidSHA3_6(t *testing.B) { benchmarkSplitPyramidSHA3(1000000, t) }
  455. func BenchmarkSplitPyramidSHA3_7(t *testing.B) { benchmarkSplitPyramidSHA3(10000000, t) }
  456. func BenchmarkSplitPyramidSHA3_8(t *testing.B) { benchmarkSplitPyramidSHA3(100000000, t) }
  457. func BenchmarkSplitPyramidBMT_2(t *testing.B) { benchmarkSplitPyramidBMT(100, t) }
  458. func BenchmarkSplitPyramidBMT_2h(t *testing.B) { benchmarkSplitPyramidBMT(500, t) }
  459. func BenchmarkSplitPyramidBMT_3(t *testing.B) { benchmarkSplitPyramidBMT(1000, t) }
  460. func BenchmarkSplitPyramidBMT_3h(t *testing.B) { benchmarkSplitPyramidBMT(5000, t) }
  461. func BenchmarkSplitPyramidBMT_4(t *testing.B) { benchmarkSplitPyramidBMT(10000, t) }
  462. func BenchmarkSplitPyramidBMT_4h(t *testing.B) { benchmarkSplitPyramidBMT(50000, t) }
  463. func BenchmarkSplitPyramidBMT_5(t *testing.B) { benchmarkSplitPyramidBMT(100000, t) }
  464. func BenchmarkSplitPyramidBMT_6(t *testing.B) { benchmarkSplitPyramidBMT(1000000, t) }
  465. func BenchmarkSplitPyramidBMT_7(t *testing.B) { benchmarkSplitPyramidBMT(10000000, t) }
  466. func BenchmarkSplitPyramidBMT_8(t *testing.B) { benchmarkSplitPyramidBMT(100000000, t) }
  467. func BenchmarkAppendPyramid_2(t *testing.B) { benchmarkAppendPyramid(100, 1000, t) }
  468. func BenchmarkAppendPyramid_2h(t *testing.B) { benchmarkAppendPyramid(500, 1000, t) }
  469. func BenchmarkAppendPyramid_3(t *testing.B) { benchmarkAppendPyramid(1000, 1000, t) }
  470. func BenchmarkAppendPyramid_4(t *testing.B) { benchmarkAppendPyramid(10000, 1000, t) }
  471. func BenchmarkAppendPyramid_4h(t *testing.B) { benchmarkAppendPyramid(50000, 1000, t) }
  472. func BenchmarkAppendPyramid_5(t *testing.B) { benchmarkAppendPyramid(1000000, 1000, t) }
  473. func BenchmarkAppendPyramid_6(t *testing.B) { benchmarkAppendPyramid(1000000, 1000, t) }
  474. func BenchmarkAppendPyramid_7(t *testing.B) { benchmarkAppendPyramid(10000000, 1000, t) }
  475. func BenchmarkAppendPyramid_8(t *testing.B) { benchmarkAppendPyramid(100000000, 1000, t) }
  476. // go test -timeout 20m -cpu 4 -bench=./swarm/storage -run no
  477. // If you dont add the timeout argument above .. the benchmark will timeout and dump