123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887 |
- // Copyright 2015 The go-ethereum Authors
- // This file is part of the go-ethereum library.
- //
- // The go-ethereum library is free software: you can redistribute it and/or modify
- // it under the terms of the GNU Lesser General Public License as published by
- // the Free Software Foundation, either version 3 of the License, or
- // (at your option) any later version.
- //
- // The go-ethereum library is distributed in the hope that it will be useful,
- // but WITHOUT ANY WARRANTY; without even the implied warranty of
- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- // GNU Lesser General Public License for more details.
- //
- // You should have received a copy of the GNU Lesser General Public License
- // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
- // Contains the block download scheduler to collect download tasks and schedule
- // them in an ordered, and throttled way.
- package downloader
- import (
- "errors"
- "fmt"
- "sync"
- "time"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/metrics"
- "gopkg.in/karalabe/cookiejar.v2/collections/prque"
- )
- var (
- blockCacheItems = 8192 // Maximum number of blocks to cache before throttling the download
- blockCacheMemory = 64 * 1024 * 1024 // Maximum amount of memory to use for block caching
- blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones
- )
- var (
- errNoFetchesPending = errors.New("no fetches pending")
- errStaleDelivery = errors.New("stale delivery")
- )
- // fetchRequest is a currently running data retrieval operation.
- type fetchRequest struct {
- Peer *peerConnection // Peer to which the request was sent
- From uint64 // [eth/62] Requested chain element index (used for skeleton fills only)
- Headers []*types.Header // [eth/62] Requested headers, sorted by request order
- Time time.Time // Time when the request was made
- }
- // fetchResult is a struct collecting partial results from data fetchers until
- // all outstanding pieces complete and the result as a whole can be processed.
- type fetchResult struct {
- Pending int // Number of data fetches still pending
- Hash common.Hash // Hash of the header to prevent recalculating
- Header *types.Header
- Uncles []*types.Header
- Transactions types.Transactions
- Receipts types.Receipts
- }
- // queue represents hashes that are either need fetching or are being fetched
- type queue struct {
- mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
- // Headers are "special", they download in batches, supported by a skeleton chain
- headerHead common.Hash // [eth/62] Hash of the last queued header to verify order
- headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers
- headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for
- headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable
- headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations
- headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers
- headerProced int // [eth/62] Number of headers already processed from the results
- headerOffset uint64 // [eth/62] Number of the first header in the result cache
- headerContCh chan bool // [eth/62] Channel to notify when header download finishes
- // All data retrievals below are based on an already assembles header chain
- blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers
- blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for
- blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations
- blockDonePool map[common.Hash]struct{} // [eth/62] Set of the completed block (body) fetches
- receiptTaskPool map[common.Hash]*types.Header // [eth/63] Pending receipt retrieval tasks, mapping hashes to headers
- receiptTaskQueue *prque.Prque // [eth/63] Priority queue of the headers to fetch the receipts for
- receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations
- receiptDonePool map[common.Hash]struct{} // [eth/63] Set of the completed receipt fetches
- resultCache []*fetchResult // Downloaded but not yet delivered fetch results
- resultOffset uint64 // Offset of the first cached fetch result in the block chain
- resultSize common.StorageSize // Approximate size of a block (exponential moving average)
- lock *sync.Mutex
- active *sync.Cond
- closed bool
- }
- // newQueue creates a new download queue for scheduling block retrieval.
- func newQueue() *queue {
- lock := new(sync.Mutex)
- return &queue{
- headerPendPool: make(map[string]*fetchRequest),
- headerContCh: make(chan bool),
- blockTaskPool: make(map[common.Hash]*types.Header),
- blockTaskQueue: prque.New(),
- blockPendPool: make(map[string]*fetchRequest),
- blockDonePool: make(map[common.Hash]struct{}),
- receiptTaskPool: make(map[common.Hash]*types.Header),
- receiptTaskQueue: prque.New(),
- receiptPendPool: make(map[string]*fetchRequest),
- receiptDonePool: make(map[common.Hash]struct{}),
- resultCache: make([]*fetchResult, blockCacheItems),
- active: sync.NewCond(lock),
- lock: lock,
- }
- }
- // Reset clears out the queue contents.
- func (q *queue) Reset() {
- q.lock.Lock()
- defer q.lock.Unlock()
- q.closed = false
- q.mode = FullSync
- q.headerHead = common.Hash{}
- q.headerPendPool = make(map[string]*fetchRequest)
- q.blockTaskPool = make(map[common.Hash]*types.Header)
- q.blockTaskQueue.Reset()
- q.blockPendPool = make(map[string]*fetchRequest)
- q.blockDonePool = make(map[common.Hash]struct{})
- q.receiptTaskPool = make(map[common.Hash]*types.Header)
- q.receiptTaskQueue.Reset()
- q.receiptPendPool = make(map[string]*fetchRequest)
- q.receiptDonePool = make(map[common.Hash]struct{})
- q.resultCache = make([]*fetchResult, blockCacheItems)
- q.resultOffset = 0
- }
- // Close marks the end of the sync, unblocking WaitResults.
- // It may be called even if the queue is already closed.
- func (q *queue) Close() {
- q.lock.Lock()
- q.closed = true
- q.lock.Unlock()
- q.active.Broadcast()
- }
- // PendingHeaders retrieves the number of header requests pending for retrieval.
- func (q *queue) PendingHeaders() int {
- q.lock.Lock()
- defer q.lock.Unlock()
- return q.headerTaskQueue.Size()
- }
- // PendingBlocks retrieves the number of block (body) requests pending for retrieval.
- func (q *queue) PendingBlocks() int {
- q.lock.Lock()
- defer q.lock.Unlock()
- return q.blockTaskQueue.Size()
- }
- // PendingReceipts retrieves the number of block receipts pending for retrieval.
- func (q *queue) PendingReceipts() int {
- q.lock.Lock()
- defer q.lock.Unlock()
- return q.receiptTaskQueue.Size()
- }
- // InFlightHeaders retrieves whether there are header fetch requests currently
- // in flight.
- func (q *queue) InFlightHeaders() bool {
- q.lock.Lock()
- defer q.lock.Unlock()
- return len(q.headerPendPool) > 0
- }
- // InFlightBlocks retrieves whether there are block fetch requests currently in
- // flight.
- func (q *queue) InFlightBlocks() bool {
- q.lock.Lock()
- defer q.lock.Unlock()
- return len(q.blockPendPool) > 0
- }
- // InFlightReceipts retrieves whether there are receipt fetch requests currently
- // in flight.
- func (q *queue) InFlightReceipts() bool {
- q.lock.Lock()
- defer q.lock.Unlock()
- return len(q.receiptPendPool) > 0
- }
- // Idle returns if the queue is fully idle or has some data still inside.
- func (q *queue) Idle() bool {
- q.lock.Lock()
- defer q.lock.Unlock()
- queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size()
- pending := len(q.blockPendPool) + len(q.receiptPendPool)
- cached := len(q.blockDonePool) + len(q.receiptDonePool)
- return (queued + pending + cached) == 0
- }
- // ShouldThrottleBlocks checks if the download should be throttled (active block (body)
- // fetches exceed block cache).
- func (q *queue) ShouldThrottleBlocks() bool {
- q.lock.Lock()
- defer q.lock.Unlock()
- return q.resultSlots(q.blockPendPool, q.blockDonePool) <= 0
- }
- // ShouldThrottleReceipts checks if the download should be throttled (active receipt
- // fetches exceed block cache).
- func (q *queue) ShouldThrottleReceipts() bool {
- q.lock.Lock()
- defer q.lock.Unlock()
- return q.resultSlots(q.receiptPendPool, q.receiptDonePool) <= 0
- }
- // resultSlots calculates the number of results slots available for requests
- // whilst adhering to both the item and the memory limit too of the results
- // cache.
- func (q *queue) resultSlots(pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}) int {
- // Calculate the maximum length capped by the memory limit
- limit := len(q.resultCache)
- if common.StorageSize(len(q.resultCache))*q.resultSize > common.StorageSize(blockCacheMemory) {
- limit = int((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
- }
- // Calculate the number of slots already finished
- finished := 0
- for _, result := range q.resultCache[:limit] {
- if result == nil {
- break
- }
- if _, ok := donePool[result.Hash]; ok {
- finished++
- }
- }
- // Calculate the number of slots currently downloading
- pending := 0
- for _, request := range pendPool {
- for _, header := range request.Headers {
- if header.Number.Uint64() < q.resultOffset+uint64(limit) {
- pending++
- }
- }
- }
- // Return the free slots to distribute
- return limit - finished - pending
- }
- // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
- // up an already retrieved header skeleton.
- func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
- q.lock.Lock()
- defer q.lock.Unlock()
- // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
- if q.headerResults != nil {
- panic("skeleton assembly already in progress")
- }
- // Schedule all the header retrieval tasks for the skeleton assembly
- q.headerTaskPool = make(map[uint64]*types.Header)
- q.headerTaskQueue = prque.New()
- q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
- q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
- q.headerProced = 0
- q.headerOffset = from
- q.headerContCh = make(chan bool, 1)
- for i, header := range skeleton {
- index := from + uint64(i*MaxHeaderFetch)
- q.headerTaskPool[index] = header
- q.headerTaskQueue.Push(index, -float32(index))
- }
- }
- // RetrieveHeaders retrieves the header chain assemble based on the scheduled
- // skeleton.
- func (q *queue) RetrieveHeaders() ([]*types.Header, int) {
- q.lock.Lock()
- defer q.lock.Unlock()
- headers, proced := q.headerResults, q.headerProced
- q.headerResults, q.headerProced = nil, 0
- return headers, proced
- }
- // Schedule adds a set of headers for the download queue for scheduling, returning
- // the new headers encountered.
- func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
- q.lock.Lock()
- defer q.lock.Unlock()
- // Insert all the headers prioritised by the contained block number
- inserts := make([]*types.Header, 0, len(headers))
- for _, header := range headers {
- // Make sure chain order is honoured and preserved throughout
- hash := header.Hash()
- if header.Number == nil || header.Number.Uint64() != from {
- log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from)
- break
- }
- if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
- log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
- break
- }
- // Make sure no duplicate requests are executed
- if _, ok := q.blockTaskPool[hash]; ok {
- log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
- continue
- }
- if _, ok := q.receiptTaskPool[hash]; ok {
- log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
- continue
- }
- // Queue the header for content retrieval
- q.blockTaskPool[hash] = header
- q.blockTaskQueue.Push(header, -float32(header.Number.Uint64()))
- if q.mode == FastSync {
- q.receiptTaskPool[hash] = header
- q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64()))
- }
- inserts = append(inserts, header)
- q.headerHead = hash
- from++
- }
- return inserts
- }
- // Results retrieves and permanently removes a batch of fetch results from
- // the cache. the result slice will be empty if the queue has been closed.
- func (q *queue) Results(block bool) []*fetchResult {
- q.lock.Lock()
- defer q.lock.Unlock()
- // Count the number of items available for processing
- nproc := q.countProcessableItems()
- for nproc == 0 && !q.closed {
- if !block {
- return nil
- }
- q.active.Wait()
- nproc = q.countProcessableItems()
- }
- // Since we have a batch limit, don't pull more into "dangling" memory
- if nproc > maxResultsProcess {
- nproc = maxResultsProcess
- }
- results := make([]*fetchResult, nproc)
- copy(results, q.resultCache[:nproc])
- if len(results) > 0 {
- // Mark results as done before dropping them from the cache.
- for _, result := range results {
- hash := result.Header.Hash()
- delete(q.blockDonePool, hash)
- delete(q.receiptDonePool, hash)
- }
- // Delete the results from the cache and clear the tail.
- copy(q.resultCache, q.resultCache[nproc:])
- for i := len(q.resultCache) - nproc; i < len(q.resultCache); i++ {
- q.resultCache[i] = nil
- }
- // Advance the expected block number of the first cache entry.
- q.resultOffset += uint64(nproc)
- // Recalculate the result item weights to prevent memory exhaustion
- for _, result := range results {
- size := result.Header.Size()
- for _, uncle := range result.Uncles {
- size += uncle.Size()
- }
- for _, receipt := range result.Receipts {
- size += receipt.Size()
- }
- for _, tx := range result.Transactions {
- size += tx.Size()
- }
- q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
- }
- }
- return results
- }
- // countProcessableItems counts the processable items.
- func (q *queue) countProcessableItems() int {
- for i, result := range q.resultCache {
- if result == nil || result.Pending > 0 {
- return i
- }
- }
- return len(q.resultCache)
- }
- // ReserveHeaders reserves a set of headers for the given peer, skipping any
- // previously failed batches.
- func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
- q.lock.Lock()
- defer q.lock.Unlock()
- // Short circuit if the peer's already downloading something (sanity check to
- // not corrupt state)
- if _, ok := q.headerPendPool[p.id]; ok {
- return nil
- }
- // Retrieve a batch of hashes, skipping previously failed ones
- send, skip := uint64(0), []uint64{}
- for send == 0 && !q.headerTaskQueue.Empty() {
- from, _ := q.headerTaskQueue.Pop()
- if q.headerPeerMiss[p.id] != nil {
- if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok {
- skip = append(skip, from.(uint64))
- continue
- }
- }
- send = from.(uint64)
- }
- // Merge all the skipped batches back
- for _, from := range skip {
- q.headerTaskQueue.Push(from, -float32(from))
- }
- // Assemble and return the block download request
- if send == 0 {
- return nil
- }
- request := &fetchRequest{
- Peer: p,
- From: send,
- Time: time.Now(),
- }
- q.headerPendPool[p.id] = request
- return request
- }
- // ReserveBodies reserves a set of body fetches for the given peer, skipping any
- // previously failed downloads. Beside the next batch of needed fetches, it also
- // returns a flag whether empty blocks were queued requiring processing.
- func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, error) {
- isNoop := func(header *types.Header) bool {
- return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash
- }
- q.lock.Lock()
- defer q.lock.Unlock()
- return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, isNoop)
- }
- // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
- // any previously failed downloads. Beside the next batch of needed fetches, it
- // also returns a flag whether empty receipts were queued requiring importing.
- func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, error) {
- isNoop := func(header *types.Header) bool {
- return header.ReceiptHash == types.EmptyRootHash
- }
- q.lock.Lock()
- defer q.lock.Unlock()
- return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, isNoop)
- }
- // reserveHeaders reserves a set of data download operations for a given peer,
- // skipping any previously failed ones. This method is a generic version used
- // by the individual special reservation functions.
- //
- // Note, this method expects the queue lock to be already held for writing. The
- // reason the lock is not obtained in here is because the parameters already need
- // to access the queue, so they already need a lock anyway.
- func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
- pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) {
- // Short circuit if the pool has been depleted, or if the peer's already
- // downloading something (sanity check not to corrupt state)
- if taskQueue.Empty() {
- return nil, false, nil
- }
- if _, ok := pendPool[p.id]; ok {
- return nil, false, nil
- }
- // Calculate an upper limit on the items we might fetch (i.e. throttling)
- space := q.resultSlots(pendPool, donePool)
- // Retrieve a batch of tasks, skipping previously failed ones
- send := make([]*types.Header, 0, count)
- skip := make([]*types.Header, 0)
- progress := false
- for proc := 0; proc < space && len(send) < count && !taskQueue.Empty(); proc++ {
- header := taskQueue.PopItem().(*types.Header)
- hash := header.Hash()
- // If we're the first to request this task, initialise the result container
- index := int(header.Number.Int64() - int64(q.resultOffset))
- if index >= len(q.resultCache) || index < 0 {
- common.Report("index allocation went beyond available resultCache space")
- return nil, false, errInvalidChain
- }
- if q.resultCache[index] == nil {
- components := 1
- if q.mode == FastSync {
- components = 2
- }
- q.resultCache[index] = &fetchResult{
- Pending: components,
- Hash: hash,
- Header: header,
- }
- }
- // If this fetch task is a noop, skip this fetch operation
- if isNoop(header) {
- donePool[hash] = struct{}{}
- delete(taskPool, hash)
- space, proc = space-1, proc-1
- q.resultCache[index].Pending--
- progress = true
- continue
- }
- // Otherwise unless the peer is known not to have the data, add to the retrieve list
- if p.Lacks(hash) {
- skip = append(skip, header)
- } else {
- send = append(send, header)
- }
- }
- // Merge all the skipped headers back
- for _, header := range skip {
- taskQueue.Push(header, -float32(header.Number.Uint64()))
- }
- if progress {
- // Wake WaitResults, resultCache was modified
- q.active.Signal()
- }
- // Assemble and return the block download request
- if len(send) == 0 {
- return nil, progress, nil
- }
- request := &fetchRequest{
- Peer: p,
- Headers: send,
- Time: time.Now(),
- }
- pendPool[p.id] = request
- return request, progress, nil
- }
- // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
- func (q *queue) CancelHeaders(request *fetchRequest) {
- q.cancel(request, q.headerTaskQueue, q.headerPendPool)
- }
- // CancelBodies aborts a body fetch request, returning all pending headers to the
- // task queue.
- func (q *queue) CancelBodies(request *fetchRequest) {
- q.cancel(request, q.blockTaskQueue, q.blockPendPool)
- }
- // CancelReceipts aborts a body fetch request, returning all pending headers to
- // the task queue.
- func (q *queue) CancelReceipts(request *fetchRequest) {
- q.cancel(request, q.receiptTaskQueue, q.receiptPendPool)
- }
- // Cancel aborts a fetch request, returning all pending hashes to the task queue.
- func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) {
- q.lock.Lock()
- defer q.lock.Unlock()
- if request.From > 0 {
- taskQueue.Push(request.From, -float32(request.From))
- }
- for _, header := range request.Headers {
- taskQueue.Push(header, -float32(header.Number.Uint64()))
- }
- delete(pendPool, request.Peer.id)
- }
- // Revoke cancels all pending requests belonging to a given peer. This method is
- // meant to be called during a peer drop to quickly reassign owned data fetches
- // to remaining nodes.
- func (q *queue) Revoke(peerId string) {
- q.lock.Lock()
- defer q.lock.Unlock()
- if request, ok := q.blockPendPool[peerId]; ok {
- for _, header := range request.Headers {
- q.blockTaskQueue.Push(header, -float32(header.Number.Uint64()))
- }
- delete(q.blockPendPool, peerId)
- }
- if request, ok := q.receiptPendPool[peerId]; ok {
- for _, header := range request.Headers {
- q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64()))
- }
- delete(q.receiptPendPool, peerId)
- }
- }
- // ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
- // canceling them and returning the responsible peers for penalisation.
- func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
- q.lock.Lock()
- defer q.lock.Unlock()
- return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
- }
- // ExpireBodies checks for in flight block body requests that exceeded a timeout
- // allowance, canceling them and returning the responsible peers for penalisation.
- func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
- q.lock.Lock()
- defer q.lock.Unlock()
- return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter)
- }
- // ExpireReceipts checks for in flight receipt requests that exceeded a timeout
- // allowance, canceling them and returning the responsible peers for penalisation.
- func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int {
- q.lock.Lock()
- defer q.lock.Unlock()
- return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter)
- }
- // expire is the generic check that move expired tasks from a pending pool back
- // into a task pool, returning all entities caught with expired tasks.
- //
- // Note, this method expects the queue lock to be already held. The
- // reason the lock is not obtained in here is because the parameters already need
- // to access the queue, so they already need a lock anyway.
- func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int {
- // Iterate over the expired requests and return each to the queue
- expiries := make(map[string]int)
- for id, request := range pendPool {
- if time.Since(request.Time) > timeout {
- // Update the metrics with the timeout
- timeoutMeter.Mark(1)
- // Return any non satisfied requests to the pool
- if request.From > 0 {
- taskQueue.Push(request.From, -float32(request.From))
- }
- for _, header := range request.Headers {
- taskQueue.Push(header, -float32(header.Number.Uint64()))
- }
- // Add the peer to the expiry report along the the number of failed requests
- expiries[id] = len(request.Headers)
- }
- }
- // Remove the expired requests from the pending pool
- for id := range expiries {
- delete(pendPool, id)
- }
- return expiries
- }
- // DeliverHeaders injects a header retrieval response into the header results
- // cache. This method either accepts all headers it received, or none of them
- // if they do not map correctly to the skeleton.
- //
- // If the headers are accepted, the method makes an attempt to deliver the set
- // of ready headers to the processor to keep the pipeline full. However it will
- // not block to prevent stalling other pending deliveries.
- func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {
- q.lock.Lock()
- defer q.lock.Unlock()
- // Short circuit if the data was never requested
- request := q.headerPendPool[id]
- if request == nil {
- return 0, errNoFetchesPending
- }
- headerReqTimer.UpdateSince(request.Time)
- delete(q.headerPendPool, id)
- // Ensure headers can be mapped onto the skeleton chain
- target := q.headerTaskPool[request.From].Hash()
- accepted := len(headers) == MaxHeaderFetch
- if accepted {
- if headers[0].Number.Uint64() != request.From {
- log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash(), request.From)
- accepted = false
- } else if headers[len(headers)-1].Hash() != target {
- log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target)
- accepted = false
- }
- }
- if accepted {
- for i, header := range headers[1:] {
- hash := header.Hash()
- if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
- log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash, "expected", want)
- accepted = false
- break
- }
- if headers[i].Hash() != header.ParentHash {
- log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash)
- accepted = false
- break
- }
- }
- }
- // If the batch of headers wasn't accepted, mark as unavailable
- if !accepted {
- log.Trace("Skeleton filling not accepted", "peer", id, "from", request.From)
- miss := q.headerPeerMiss[id]
- if miss == nil {
- q.headerPeerMiss[id] = make(map[uint64]struct{})
- miss = q.headerPeerMiss[id]
- }
- miss[request.From] = struct{}{}
- q.headerTaskQueue.Push(request.From, -float32(request.From))
- return 0, errors.New("delivery not accepted")
- }
- // Clean up a successful fetch and try to deliver any sub-results
- copy(q.headerResults[request.From-q.headerOffset:], headers)
- delete(q.headerTaskPool, request.From)
- ready := 0
- for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
- ready += MaxHeaderFetch
- }
- if ready > 0 {
- // Headers are ready for delivery, gather them and push forward (non blocking)
- process := make([]*types.Header, ready)
- copy(process, q.headerResults[q.headerProced:q.headerProced+ready])
- select {
- case headerProcCh <- process:
- log.Trace("Pre-scheduled new headers", "peer", id, "count", len(process), "from", process[0].Number)
- q.headerProced += len(process)
- default:
- }
- }
- // Check for termination and return
- if len(q.headerTaskPool) == 0 {
- q.headerContCh <- false
- }
- return len(headers), nil
- }
- // DeliverBodies injects a block body retrieval response into the results queue.
- // The method returns the number of blocks bodies accepted from the delivery and
- // also wakes any threads waiting for data delivery.
- func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) {
- q.lock.Lock()
- defer q.lock.Unlock()
- reconstruct := func(header *types.Header, index int, result *fetchResult) error {
- if types.DeriveSha(types.Transactions(txLists[index])) != header.TxHash || types.CalcUncleHash(uncleLists[index]) != header.UncleHash {
- return errInvalidBody
- }
- result.Transactions = txLists[index]
- result.Uncles = uncleLists[index]
- return nil
- }
- return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, q.blockDonePool, bodyReqTimer, len(txLists), reconstruct)
- }
- // DeliverReceipts injects a receipt retrieval response into the results queue.
- // The method returns the number of transaction receipts accepted from the delivery
- // and also wakes any threads waiting for data delivery.
- func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {
- q.lock.Lock()
- defer q.lock.Unlock()
- reconstruct := func(header *types.Header, index int, result *fetchResult) error {
- if types.DeriveSha(types.Receipts(receiptList[index])) != header.ReceiptHash {
- return errInvalidReceipt
- }
- result.Receipts = receiptList[index]
- return nil
- }
- return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, q.receiptDonePool, receiptReqTimer, len(receiptList), reconstruct)
- }
- // deliver injects a data retrieval response into the results queue.
- //
- // Note, this method expects the queue lock to be already held for writing. The
- // reason the lock is not obtained in here is because the parameters already need
- // to access the queue, so they already need a lock anyway.
- func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
- pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, reqTimer metrics.Timer,
- results int, reconstruct func(header *types.Header, index int, result *fetchResult) error) (int, error) {
- // Short circuit if the data was never requested
- request := pendPool[id]
- if request == nil {
- return 0, errNoFetchesPending
- }
- reqTimer.UpdateSince(request.Time)
- delete(pendPool, id)
- // If no data items were retrieved, mark them as unavailable for the origin peer
- if results == 0 {
- for _, header := range request.Headers {
- request.Peer.MarkLacking(header.Hash())
- }
- }
- // Assemble each of the results with their headers and retrieved data parts
- var (
- accepted int
- failure error
- useful bool
- )
- for i, header := range request.Headers {
- // Short circuit assembly if no more fetch results are found
- if i >= results {
- break
- }
- // Reconstruct the next result if contents match up
- index := int(header.Number.Int64() - int64(q.resultOffset))
- if index >= len(q.resultCache) || index < 0 || q.resultCache[index] == nil {
- failure = errInvalidChain
- break
- }
- if err := reconstruct(header, i, q.resultCache[index]); err != nil {
- failure = err
- break
- }
- hash := header.Hash()
- donePool[hash] = struct{}{}
- q.resultCache[index].Pending--
- useful = true
- accepted++
- // Clean up a successful fetch
- request.Headers[i] = nil
- delete(taskPool, hash)
- }
- // Return all failed or missing fetches to the queue
- for _, header := range request.Headers {
- if header != nil {
- taskQueue.Push(header, -float32(header.Number.Uint64()))
- }
- }
- // Wake up WaitResults
- if accepted > 0 {
- q.active.Signal()
- }
- // If none of the data was good, it's a stale delivery
- switch {
- case failure == nil || failure == errInvalidChain:
- return accepted, failure
- case useful:
- return accepted, fmt.Errorf("partial failure: %v", failure)
- default:
- return accepted, errStaleDelivery
- }
- }
- // Prepare configures the result cache to allow accepting and caching inbound
- // fetch results.
- func (q *queue) Prepare(offset uint64, mode SyncMode) {
- q.lock.Lock()
- defer q.lock.Unlock()
- // Prepare the queue for sync results
- if q.resultOffset < offset {
- q.resultOffset = offset
- }
- q.mode = mode
- }
|