以太坊区块设计分析(下)
Al1ex 发表于 四川 区块链安全 705浏览 · 2024-01-23 09:11

文章前言

本篇文章是对以太坊区块设计分析(上)中对于以太坊的区块设计进行的补充,篇幅有点长,一篇放不下

链条构建

在以太坊启动过程中会调用NewBlockChain来创建一个区块链,其调用流如下:

geth ——> makeFullNode ——> RegisterEthService ——> eth.New ——> core.NewBlockChain

在使用New来创建一个以太坊示例对象时会调用到SetupGenesisBlockWithOverride来加载创世区块并获取链基本配置,调用ReadDatabaseVersion来获取DB版本、调用NewBlockChain来构建以太坊区块链、调用NewTxPool创建一个交易池、调用NewOracle来进行价格预言等:

// filedir:go-ethereum-1.10.2\eth\backend.go    L98
// New creates a new Ethereum object (including the
// initialisation of the common Ethereum object)
func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
    // Ensure configuration values are compatible and sane
    if config.SyncMode == downloader.LightSync {
        return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum")
    }
    if !config.SyncMode.IsValid() {
        return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode)
    }
    if config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(common.Big0) <= 0 {
        log.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.Defaults.Miner.GasPrice)
        config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice)
    }
    if config.NoPruning && config.TrieDirtyCache > 0 {
        if config.SnapshotCache > 0 {
            config.TrieCleanCache += config.TrieDirtyCache * 3 / 5
            config.SnapshotCache += config.TrieDirtyCache * 2 / 5
        } else {
            config.TrieCleanCache += config.TrieDirtyCache
        }
        config.TrieDirtyCache = 0
    }
    log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024)

    // Transfer mining-related config to the ethash config.
    ethashConfig := config.Ethash
    ethashConfig.NotifyFull = config.Miner.NotifyFull

    // Assemble the Ethereum object
    chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/", false)
    if err != nil {
        return nil, err
    }
    chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideBerlin)
    if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
        return nil, genesisErr
    }
    log.Info("Initialised chain configuration", "config", chainConfig)

    if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil {
        log.Error("Failed to recover state", "error", err)
    }
    eth := &Ethereum{
        config:            config,
        chainDb:           chainDb,
        eventMux:          stack.EventMux(),
        accountManager:    stack.AccountManager(),
        engine:            ethconfig.CreateConsensusEngine(stack, chainConfig, &ethashConfig, config.Miner.Notify, config.Miner.Noverify, chainDb),
        closeBloomHandler: make(chan struct{}),
        networkID:         config.NetworkId,
        gasPrice:          config.Miner.GasPrice,
        etherbase:         config.Miner.Etherbase,
        bloomRequests:     make(chan chan *bloombits.Retrieval),
        bloomIndexer:      core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms),
        p2pServer:         stack.Server(),
    }

    bcVersion := rawdb.ReadDatabaseVersion(chainDb)
    var dbVer = "<nil>"
    if bcVersion != nil {
        dbVer = fmt.Sprintf("%d", *bcVersion)
    }
    log.Info("Initialising Ethereum protocol", "network", config.NetworkId, "dbversion", dbVer)

    if !config.SkipBcVersionCheck {
        if bcVersion != nil && *bcVersion > core.BlockChainVersion {
            return nil, fmt.Errorf("database version is v%d, Geth %s only supports v%d", *bcVersion, params.VersionWithMeta, core.BlockChainVersion)
        } else if bcVersion == nil || *bcVersion < core.BlockChainVersion {
            log.Warn("Upgrade blockchain database version", "from", dbVer, "to", core.BlockChainVersion)
            rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)
        }
    }
    var (
        vmConfig = vm.Config{
            EnablePreimageRecording: config.EnablePreimageRecording,
            EWASMInterpreter:        config.EWASMInterpreter,
            EVMInterpreter:          config.EVMInterpreter,
        }
        cacheConfig = &core.CacheConfig{
            TrieCleanLimit:      config.TrieCleanCache,
            TrieCleanJournal:    stack.ResolvePath(config.TrieCleanCacheJournal),
            TrieCleanRejournal:  config.TrieCleanCacheRejournal,
            TrieCleanNoPrefetch: config.NoPrefetch,
            TrieDirtyLimit:      config.TrieDirtyCache,
            TrieDirtyDisabled:   config.NoPruning,
            TrieTimeLimit:       config.TrieTimeout,
            SnapshotLimit:       config.SnapshotCache,
            Preimages:           config.Preimages,
        }
    )
    eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)
    if err != nil {
        return nil, err
    }
    // Rewind the chain in case of an incompatible config upgrade.
    if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
        log.Warn("Rewinding chain to upgrade configuration", "err", compat)
        eth.blockchain.SetHead(compat.RewindTo)
        rawdb.WriteChainConfig(chainDb, genesisHash, chainConfig)
    }
    eth.bloomIndexer.Start(eth.blockchain)

    if config.TxPool.Journal != "" {
        config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal)
    }
    eth.txPool = core.NewTxPool(config.TxPool, chainConfig, eth.blockchain)

    // Permit the downloader to use the trie cache allowance during fast sync
    cacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit + cacheConfig.SnapshotLimit
    checkpoint := config.Checkpoint
    if checkpoint == nil {
        checkpoint = params.TrustedCheckpoints[genesisHash]
    }
    if eth.handler, err = newHandler(&handlerConfig{
        Database:   chainDb,
        Chain:      eth.blockchain,
        TxPool:     eth.txPool,
        Network:    config.NetworkId,
        Sync:       config.SyncMode,
        BloomCache: uint64(cacheLimit),
        EventMux:   eth.eventMux,
        Checkpoint: checkpoint,
        Whitelist:  config.Whitelist,
    }); err != nil {
        return nil, err
    }
    eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock)
    eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData))

    eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil}
    if eth.APIBackend.allowUnprotectedTxs {
        log.Info("Unprotected transactions allowed")
    }
    gpoParams := config.GPO
    if gpoParams.Default == nil {
        gpoParams.Default = config.Miner.GasPrice
    }
    eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams)

    eth.ethDialCandidates, err = setupDiscovery(eth.config.EthDiscoveryURLs)
    if err != nil {
        return nil, err
    }
    eth.snapDialCandidates, err = setupDiscovery(eth.config.SnapDiscoveryURLs)
    if err != nil {
        return nil, err
    }
    // Start the RPC service
    eth.netRPCService = ethapi.NewPublicNetAPI(eth.p2pServer, config.NetworkId)

    // Register the backend on the node
    stack.RegisterAPIs(eth.APIs())
    stack.RegisterProtocols(eth.Protocols())
    stack.RegisterLifecycle(eth)
    // Check for unclean shutdown
    if uncleanShutdowns, discards, err := rawdb.PushUncleanShutdownMarker(chainDb); err != nil {
        log.Error("Could not update unclean-shutdown-marker list", "error", err)
    } else {
        if discards > 0 {
            log.Warn("Old unclean shutdowns found", "count", discards)
        }
        for _, tstamp := range uncleanShutdowns {
            t := time.Unix(int64(tstamp), 0)
            log.Warn("Unclean shutdown detected", "booted", t,
                "age", common.PrettyAge(t))
        }
    }
    return eth, nil
}

NewBlockChain函数通过使用数据库中可用的信息返回完全初始化的块链,其主要做了以下几件事情:

  • 创建各种lru缓存(最近最少使用的算法)
  • 初始化triegc(用于垃圾回收的区块number对应的优先级队列)、stateCache、NewBlockValidator()、NewStateProcessor()、NewStateProcessor
  • NewHeaderChain()初始化区块头部链
  • bc.genesisBlock = bc.GetBlockByNumber(0) 获取创世区块
  • bc.loadLastState() 加载最新的状态数据
  • 检查本地区块链上是否有bad block,如果有调用bc.SetHead回到硬分叉之前的区块头
  • go bc.update()定时处理future block
// filedir:go-ethereum-1.10.2\core\blockchain.go L214
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Ethereum Validator and
// Processor.
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) {
    if cacheConfig == nil {
        cacheConfig = defaultCacheConfig
    }
    bodyCache, _ := lru.New(bodyCacheLimit)
    bodyRLPCache, _ := lru.New(bodyCacheLimit)
    receiptsCache, _ := lru.New(receiptsCacheLimit)
    blockCache, _ := lru.New(blockCacheLimit)
    txLookupCache, _ := lru.New(txLookupCacheLimit)
    futureBlocks, _ := lru.New(maxFutureBlocks)

    bc := &BlockChain{
        chainConfig: chainConfig,
        cacheConfig: cacheConfig,
        db:          db,
        triegc:      prque.New(nil),
        stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
            Cache:     cacheConfig.TrieCleanLimit,
            Journal:   cacheConfig.TrieCleanJournal,
            Preimages: cacheConfig.Preimages,
        }),
        quit:           make(chan struct{}),
        shouldPreserve: shouldPreserve,
        bodyCache:      bodyCache,
        bodyRLPCache:   bodyRLPCache,
        receiptsCache:  receiptsCache,
        blockCache:     blockCache,
        txLookupCache:  txLookupCache,
        futureBlocks:   futureBlocks,
        engine:         engine,
        vmConfig:       vmConfig,
    }
    bc.validator = NewBlockValidator(chainConfig, bc, engine)
    bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
    bc.processor = NewStateProcessor(chainConfig, bc, engine)

    var err error
    bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped)
    if err != nil {
        return nil, err
    }
    bc.genesisBlock = bc.GetBlockByNumber(0)  
    if bc.genesisBlock == nil {
        return nil, ErrNoGenesis
    }

    var nilBlock *types.Block
    bc.currentBlock.Store(nilBlock)
    bc.currentFastBlock.Store(nilBlock)

    // Initialize the chain with ancient data if it isn't empty.
    var txIndexBlock uint64

    if bc.empty() {
        rawdb.InitDatabaseFromFreezer(bc.db)
        // If ancient database is not empty, reconstruct all missing
        // indices in the background.
        frozen, _ := bc.db.Ancients()
        if frozen > 0 {
            txIndexBlock = frozen
        }
    }
    if err := bc.loadLastState(); err != nil {      //加载最新的状态
        return nil, err
    }
    // Make sure the state associated with the block is available
    head := bc.CurrentBlock()
    if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
        // Head state is missing, before the state recovery, find out the
        // disk layer point of snapshot(if it's enabled). Make sure the
        // rewound point is lower than disk layer.
        var diskRoot common.Hash
        if bc.cacheConfig.SnapshotLimit > 0 {
            diskRoot = rawdb.ReadSnapshotRoot(bc.db)
        }
        if diskRoot != (common.Hash{}) {
            log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)

            snapDisk, err := bc.SetHeadBeyondRoot(head.NumberU64(), diskRoot)
            if err != nil {
                return nil, err
            }
            // Chain rewound, persist old snapshot number to indicate recovery procedure
            if snapDisk != 0 {
                rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
            }
        } else {
            log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
            if err := bc.SetHead(head.NumberU64()); err != nil {
                return nil, err
            }
        }
    }
    // Ensure that a previous crash in SetHead doesn't leave extra ancients
    if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
        var (
            needRewind bool
            low        uint64
        )
        // The head full block may be rolled back to a very low height due to
        // blockchain repair. If the head full block is even lower than the ancient
        // chain, truncate the ancient store.
        fullBlock := bc.CurrentBlock()
        if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 {
            needRewind = true
            low = fullBlock.NumberU64()
        }
        // In fast sync, it may happen that ancient data has been written to the
        // ancient store, but the LastFastBlock has not been updated, truncate the
        // extra data here.
        fastBlock := bc.CurrentFastBlock()
        if fastBlock != nil && fastBlock.NumberU64() < frozen-1 {
            needRewind = true
            if fastBlock.NumberU64() < low || low == 0 {
                low = fastBlock.NumberU64()
            }
        }
        if needRewind {
            log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
            if err := bc.SetHead(low); err != nil {
                return nil, err
            }
        }
    }
    // The first thing the node will do is reconstruct the verification data for
    // the head block (ethash cache or clique voting snapshot). Might as well do
    // it in advance.
    bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)

    // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
    for hash := range BadHashes {
        if header := bc.GetHeaderByHash(hash); header != nil {
            // get the canonical block corresponding to the offending header's number
            headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
            // make sure the headerByNumber (if present) is in our current canonical chain
            if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
                log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
                if err := bc.SetHead(header.Number.Uint64() - 1); err != nil {
                    return nil, err
                }
                log.Error("Chain rewind was successful, resuming normal operation")
            }
        }
    }
    // Load any existing snapshot, regenerating it if loading failed
    if bc.cacheConfig.SnapshotLimit > 0 {
        // If the chain was rewound past the snapshot persistent layer (causing
        // a recovery block number to be persisted to disk), check if we're still
        // in recovery mode and in that case, don't invalidate the snapshot on a
        // head mismatch.
        var recover bool

        head := bc.CurrentBlock()
        if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() {
            log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
            recover = true
        }
        bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
    }
    // Take ownership of this particular state
    go bc.update()
    if txLookupLimit != nil {
        bc.txLookupLimit = *txLookupLimit

        bc.wg.Add(1)
        go bc.maintainTxIndex(txIndexBlock)
    }
    // If periodic cache journal is required, spin it up.
    if bc.cacheConfig.TrieCleanRejournal > 0 {
        if bc.cacheConfig.TrieCleanRejournal < time.Minute {
            log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute)
            bc.cacheConfig.TrieCleanRejournal = time.Minute
        }
        triedb := bc.stateCache.TrieDB()
        bc.wg.Add(1)
        go func() {
            defer bc.wg.Done()
            triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
        }()
    }
    return bc, nil
}

这里的"bc.genesisBlock = bc.GetBlockByNumber(0)"用于检索创世区块是否存在,如果不存在则直接return:

bc.genesisBlock = bc.GetBlockByNumber(0)
    if bc.genesisBlock == nil {
        return nil, ErrNoGenesis
    }

之后判断区块数据是否为空,如果为空则调用InitDatabaseFromFreezer根据先前冻结的区块信息进行初始化一次,之后重置txIndexBlock:

if bc.empty() {
        rawdb.InitDatabaseFromFreezer(bc.db)
        // If ancient database is not empty, reconstruct all missing
        // indices in the background.
        frozen, _ := bc.db.Ancients()
        if frozen > 0 {
            txIndexBlock = frozen
        }
    }

这里的loadLastState用来加载最新区块链状态,首先获取最新的区块及其hash,之后检查DB是否为空或者损坏,如果是则调用bc.Reset重置区块链,之后确定区块头的可用性、获取最新的区块、通过日志进行记录:

// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (bc *BlockChain) loadLastState() error {
    // Restore the last known head block
    head := rawdb.ReadHeadBlockHash(bc.db)
    if head == (common.Hash{}) {
        // Corrupt or empty database, init from scratch
        log.Warn("Empty database, resetting chain")
        return bc.Reset()
    }
    // Make sure the entire head block is available
    currentBlock := bc.GetBlockByHash(head)
    if currentBlock == nil {
        // Corrupt or empty database, init from scratch
        log.Warn("Head block missing, resetting chain", "hash", head)
        return bc.Reset()
    }
    // Everything seems to be fine, set as the head block
    bc.currentBlock.Store(currentBlock)
    headBlockGauge.Update(int64(currentBlock.NumberU64()))

    // Restore the last known head header
    currentHeader := currentBlock.Header()
    if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
        if header := bc.GetHeaderByHash(head); header != nil {
            currentHeader = header
        }
    }
    bc.hc.SetCurrentHeader(currentHeader)

    // Restore the last known head fast block
    bc.currentFastBlock.Store(currentBlock)
    headFastBlockGauge.Update(int64(currentBlock.NumberU64()))

    if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
        if block := bc.GetBlockByHash(head); block != nil {
            bc.currentFastBlock.Store(block)
            headFastBlockGauge.Update(int64(block.NumberU64()))
        }
    }
    // Issue a status log for the user
    currentFastBlock := bc.CurrentFastBlock()

    headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
    blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
    fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())

    log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
    log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
    log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
    if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
        log.Info("Loaded last fast-sync pivot marker", "number", *pivot)
    }
    return nil
}

在bc.SetHead()方法中会调用bc.SetHeadBeyondRoot来实现具体的业务逻辑,在SetHeadBeyondRoot中的updateFn用于更新操作,delFn用于清除中间区块头所有的数据和缓存,在函数的末尾会清除缓存之后调用bc.loadLastState()重新加载本地最新状态:

// filedir:go-ethereum-1.10.2\core\blockchain.go    L476
// SetHead rewinds the local chain to a new head. Depending on whether the node
// was fast synced or full synced and in which state, the method will try to
// delete minimal data from disk whilst retaining chain consistency.
func (bc *BlockChain) SetHead(head uint64) error {
    _, err := bc.SetHeadBeyondRoot(head, common.Hash{})
    return err
}

// SetHeadBeyondRoot rewinds the local chain to a new head with the extra condition
// that the rewind must pass the specified state root. This method is meant to be
// used when rewiding with snapshots enabled to ensure that we go back further than
// persistent disk layer. Depending on whether the node was fast synced or full, and
// in which state, the method will try to delete minimal data from disk whilst
// retaining chain consistency.
//
// The method returns the block number where the requested root cap was found.
func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) {
    bc.chainmu.Lock()
    defer bc.chainmu.Unlock()

    // Track the block number of the requested root hash
    var rootNumber uint64 // (no root == always 0)

    // Retrieve the last pivot block to short circuit rollbacks beyond it and the
    // current freezer limit to start nuking id underflown
    pivot := rawdb.ReadLastPivotNumber(bc.db)
    frozen, _ := bc.db.Ancients()

    updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) {
        // Rewind the block chain, ensuring we don't end up with a stateless head
        // block. Note, depth equality is permitted to allow using SetHead as a
        // chain reparation mechanism without deleting any data!
        if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() {
            newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
            if newHeadBlock == nil {
                log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
                newHeadBlock = bc.genesisBlock
            } else {
                // Block exists, keep rewinding until we find one with state,
                // keeping rewinding until we exceed the optional threshold
                // root hash
                beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true)

                for {
                    // If a root threshold was requested but not yet crossed, check
                    if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root {
                        beyondRoot, rootNumber = true, newHeadBlock.NumberU64()
                    }
                    if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
                        log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
                        if pivot == nil || newHeadBlock.NumberU64() > *pivot {
                            parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1)
                            if parent != nil {
                                newHeadBlock = parent
                                continue
                            }
                            log.Error("Missing block in the middle, aiming genesis", "number", newHeadBlock.NumberU64()-1, "hash", newHeadBlock.ParentHash())
                            newHeadBlock = bc.genesisBlock
                        } else {
                            log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot)
                            newHeadBlock = bc.genesisBlock
                        }
                    }
                    if beyondRoot || newHeadBlock.NumberU64() == 0 {
                        log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
                        break
                    }
                    log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root())
                    newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) // Keep rewinding
                }
            }
            rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())

            // Degrade the chain markers if they are explicitly reverted.
            // In theory we should update all in-memory markers in the
            // last step, however the direction of SetHead is from high
            // to low, so it's safe the update in-memory markers directly.
            bc.currentBlock.Store(newHeadBlock)
            headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
        }
        // Rewind the fast block in a simpleton way to the target head
        if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
            newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
            // If either blocks reached nil, reset to the genesis state
            if newHeadFastBlock == nil {
                newHeadFastBlock = bc.genesisBlock
            }
            rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash())

            // Degrade the chain markers if they are explicitly reverted.
            // In theory we should update all in-memory markers in the
            // last step, however the direction of SetHead is from high
            // to low, so it's safe the update in-memory markers directly.
            bc.currentFastBlock.Store(newHeadFastBlock)
            headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
        }
        head := bc.CurrentBlock().NumberU64()

        // If setHead underflown the freezer threshold and the block processing
        // intent afterwards is full block importing, delete the chain segment
        // between the stateful-block and the sethead target.
        var wipe bool
        if head+1 < frozen {
            wipe = pivot == nil || head >= *pivot
        }
        return head, wipe // Only force wipe if full synced
    }
    // Rewind the header chain, deleting all block bodies until then
    delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
        // Ignore the error here since light client won't hit this path
        frozen, _ := bc.db.Ancients()
        if num+1 <= frozen {
            // Truncate all relative data(header, total difficulty, body, receipt
            // and canonical hash) from ancient store.
            if err := bc.db.TruncateAncients(num); err != nil {
                log.Crit("Failed to truncate ancient data", "number", num, "err", err)
            }
            // Remove the hash <-> number mapping from the active store.
            rawdb.DeleteHeaderNumber(db, hash)
        } else {
            // Remove relative body and receipts from the active store.
            // The header, total difficulty and canonical hash will be
            // removed in the hc.SetHead function.
            rawdb.DeleteBody(db, hash, num)
            rawdb.DeleteReceipts(db, hash, num)
        }
        // Todo(rjl493456442) txlookup, bloombits, etc
    }
    // If SetHead was only called as a chain reparation method, try to skip
    // touching the header chain altogether, unless the freezer is broken
    if block := bc.CurrentBlock(); block.NumberU64() == head {
        if target, force := updateFn(bc.db, block.Header()); force {
            bc.hc.SetHead(target, updateFn, delFn)
        }
    } else {
        // Rewind the chain to the requested head and keep going backwards until a
        // block with a state is found or fast sync pivot is passed
        log.Warn("Rewinding blockchain", "target", head)
        bc.hc.SetHead(head, updateFn, delFn)
    }
    // Clear out any stale content from the caches
    bc.bodyCache.Purge()
    bc.bodyRLPCache.Purge()
    bc.receiptsCache.Purge()
    bc.blockCache.Purge()
    bc.txLookupCache.Purge()
    bc.futureBlocks.Purge()

    return rootNumber, bc.loadLastState()
}

loadLastState如下所示,首先到最新的区块头,然后设置currentBlock、currentHeader和currentFastBlock,然后获取到最新区块以及它的hash:

// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (bc *BlockChain) loadLastState() error {
    // Restore the last known head block
    head := rawdb.ReadHeadBlockHash(bc.db)
    if head == (common.Hash{}) {
        // Corrupt or empty database, init from scratch
        log.Warn("Empty database, resetting chain")
        return bc.Reset()
    }
    // Make sure the entire head block is available
    currentBlock := bc.GetBlockByHash(head)
    if currentBlock == nil {
        // Corrupt or empty database, init from scratch
        log.Warn("Head block missing, resetting chain", "hash", head)
        return bc.Reset()
    }
    // Everything seems to be fine, set as the head block
    bc.currentBlock.Store(currentBlock)
    headBlockGauge.Update(int64(currentBlock.NumberU64()))

    // Restore the last known head header
    currentHeader := currentBlock.Header()
    if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
        if header := bc.GetHeaderByHash(head); header != nil {
            currentHeader = header
        }
    }
    bc.hc.SetCurrentHeader(currentHeader)

    // Restore the last known head fast block
    bc.currentFastBlock.Store(currentBlock)
    headFastBlockGauge.Update(int64(currentBlock.NumberU64()))

    if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
        if block := bc.GetBlockByHash(head); block != nil {
            bc.currentFastBlock.Store(block)
            headFastBlockGauge.Update(int64(block.NumberU64()))
        }
    }
    // Issue a status log for the user
    currentFastBlock := bc.CurrentFastBlock()

    headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
    blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
    fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())

    log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
    log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
    log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
    if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
        log.Info("Loaded last fast-sync pivot marker", "number", *pivot)
    }
    return nil
}

之后节点开始重建头块的验证数据(ethash缓存或投票快照),在这里会通过"go bc.update()"定时处理future block:

func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) {
    if cacheConfig == nil {
     ......
    // Load any existing snapshot, regenerating it if loading failed
    if bc.cacheConfig.SnapshotLimit > 0 {
        // If the chain was rewound past the snapshot persistent layer (causing
        // a recovery block number to be persisted to disk), check if we're still
        // in recovery mode and in that case, don't invalidate the snapshot on a
        // head mismatch.
        var recover bool

        head := bc.CurrentBlock()
        if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() {
            log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
            recover = true
        }
        bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
    }
    // Take ownership of this particular state
    go bc.update()
    if txLookupLimit != nil {
        bc.txLookupLimit = *txLookupLimit

        bc.wg.Add(1)
        go bc.maintainTxIndex(txIndexBlock)
    }
    // If periodic cache journal is required, spin it up.
    if bc.cacheConfig.TrieCleanRejournal > 0 {
        if bc.cacheConfig.TrieCleanRejournal < time.Minute {
            log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute)
            bc.cacheConfig.TrieCleanRejournal = time.Minute
        }
        triedb := bc.stateCache.TrieDB()
        bc.wg.Add(1)
        go func() {
            defer bc.wg.Done()
            triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
        }()
    }
    return bc, nil
}

分叉处理

这里我们补充介绍一下reorg()函数,该函数主要用于处理分叉,它的作用就是将原来的分叉链设置成规范链:

// filedir:go-ethereum-1.10.2\core\blockchain.go L2124
// reorg takes two blocks, an old chain and a new chain and will reconstruct the
// blocks and inserts them to be part of the new canonical chain and accumulates
// potential missing transactions and post an event about them.
func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
    var (
        newChain    types.Blocks
        oldChain    types.Blocks
        commonBlock *types.Block

        deletedTxs types.Transactions
        addedTxs   types.Transactions

        deletedLogs [][]*types.Log
        rebirthLogs [][]*types.Log

        // collectLogs collects the logs that were generated or removed during
        // the processing of the block that corresponds with the given hash.
        // These logs are later announced as deleted or reborn
        collectLogs = func(hash common.Hash, removed bool) {
            number := bc.hc.GetBlockNumber(hash)
            if number == nil {
                return
            }
            receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)

            var logs []*types.Log
            for _, receipt := range receipts {
                for _, log := range receipt.Logs {
                    l := *log
                    if removed {
                        l.Removed = true
                    } else {
                    }
                    logs = append(logs, &l)
                }
            }
            if len(logs) > 0 {
                if removed {
                    deletedLogs = append(deletedLogs, logs)
                } else {
                    rebirthLogs = append(rebirthLogs, logs)
                }
            }
        }
        // mergeLogs returns a merged log slice with specified sort order.
        mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log {
            var ret []*types.Log
            if reverse {
                for i := len(logs) - 1; i >= 0; i-- {
                    ret = append(ret, logs[i]...)
                }
            } else {
                for i := 0; i < len(logs); i++ {
                    ret = append(ret, logs[i]...)
                }
            }
            return ret
        }
    )
    // Reduce the longer chain to the same number as the shorter one
    if oldBlock.NumberU64() > newBlock.NumberU64() {
        // Old chain is longer, gather all transactions and logs as deleted ones
        for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
            oldChain = append(oldChain, oldBlock)
            deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
            collectLogs(oldBlock.Hash(), true)
        }
    } else {
        // New chain is longer, stash all blocks away for subsequent insertion
        for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
            newChain = append(newChain, newBlock)
        }
    }
    if oldBlock == nil {
        return fmt.Errorf("invalid old chain")
    }
    if newBlock == nil {
        return fmt.Errorf("invalid new chain")
    }
    // Both sides of the reorg are at the same number, reduce both until the common
    // ancestor is found
    for {
        // If the common ancestor was found, bail out
        if oldBlock.Hash() == newBlock.Hash() {
            commonBlock = oldBlock
            break
        }
        // Remove an old block as well as stash away a new block
        oldChain = append(oldChain, oldBlock)
        deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
        collectLogs(oldBlock.Hash(), true)

        newChain = append(newChain, newBlock)

        // Step back with both chains
        oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
        if oldBlock == nil {
            return fmt.Errorf("invalid old chain")
        }
        newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
        if newBlock == nil {
            return fmt.Errorf("invalid new chain")
        }
    }
    // Ensure the user sees large reorgs
    if len(oldChain) > 0 && len(newChain) > 0 {
        logFn := log.Info
        msg := "Chain reorg detected"
        if len(oldChain) > 63 {
            msg = "Large chain reorg detected"
            logFn = log.Warn
        }
        logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
            "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
        blockReorgAddMeter.Mark(int64(len(newChain)))
        blockReorgDropMeter.Mark(int64(len(oldChain)))
        blockReorgMeter.Mark(1)
    } else {
        log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
    }
    // Insert the new chain(except the head block(reverse order)),
    // taking care of the proper incremental order.
    for i := len(newChain) - 1; i >= 1; i-- {
        // Insert the block in the canonical way, re-writing history
        bc.writeHeadBlock(newChain[i])

        // Collect reborn logs due to chain reorg
        collectLogs(newChain[i].Hash(), false)

        // Collect the new added transactions.
        addedTxs = append(addedTxs, newChain[i].Transactions()...)
    }
    // Delete useless indexes right now which includes the non-canonical
    // transaction indexes, canonical chain indexes which above the head.
    indexesBatch := bc.db.NewBatch()
    for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
        rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash())
    }
    // Delete any canonical number assignments above the new head
    number := bc.CurrentBlock().NumberU64()
    for i := number + 1; ; i++ {
        hash := rawdb.ReadCanonicalHash(bc.db, i)
        if hash == (common.Hash{}) {
            break
        }
        rawdb.DeleteCanonicalHash(indexesBatch, i)
    }
    if err := indexesBatch.Write(); err != nil {
        log.Crit("Failed to delete useless indexes", "err", err)
    }
    // If any logs need to be fired, do it now. In theory we could avoid creating
    // this goroutine if there are no events to fire, but realistcally that only
    // ever happens if we're reorging empty blocks, which will only happen on idle
    // networks where performance is not an issue either way.
    if len(deletedLogs) > 0 {
        bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)})
    }
    if len(rebirthLogs) > 0 {
        bc.logsFeed.Send(mergeLogs(rebirthLogs, false))
    }
    if len(oldChain) > 0 {
        for i := len(oldChain) - 1; i >= 0; i-- {
            bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
        }
    }
    return nil
}

主要过程如下:
Step 1:找到新链和老链的共同祖先,如果老分支比新分支区块高度高,则减少老分支直到与新分支高度相同为止,同时并收集老分支上的交易和日志,如果是新分支高于老分支,则减少新分支,等到达共同高度后,去找到共同祖先(共同回退),继续收集日志和事件

// Reduce the longer chain to the same number as the shorter one
    if oldBlock.NumberU64() > newBlock.NumberU64() {
        // Old chain is longer, gather all transactions and logs as deleted ones
        for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
            oldChain = append(oldChain, oldBlock)
            deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
            collectLogs(oldBlock.Hash(), true)
        }
    } else {
        // New chain is longer, stash all blocks away for subsequent insertion
        for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
            newChain = append(newChain, newBlock)
        }
    }
    if oldBlock == nil {
        return fmt.Errorf("invalid old chain")
    }
    if newBlock == nil {
        return fmt.Errorf("invalid new chain")
    }
    // Both sides of the reorg are at the same number, reduce both until the common
    // ancestor is found
    for {
        // If the common ancestor was found, bail out
        if oldBlock.Hash() == newBlock.Hash() {
            commonBlock = oldBlock
            break
        }
        // Remove an old block as well as stash away a new block
        oldChain = append(oldChain, oldBlock)
        deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
        collectLogs(oldBlock.Hash(), true)

        newChain = append(newChain, newBlock)

        // Step back with both chains
        oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
        if oldBlock == nil {
            return fmt.Errorf("invalid old chain")
        }
        newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
        if newBlock == nil {
            return fmt.Errorf("invalid new chain")
        }
    }
    // Ensure the user sees large reorgs
    if len(oldChain) > 0 && len(newChain) > 0 {
        logFn := log.Info
        msg := "Chain reorg detected"
        if len(oldChain) > 63 {
            msg = "Large chain reorg detected"
            logFn = log.Warn
        }
        logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(),
            "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
        blockReorgAddMeter.Mark(int64(len(newChain)))
        blockReorgDropMeter.Mark(int64(len(oldChain)))
        blockReorgMeter.Mark(1)
    } else {
        log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
    }

step 2:将新链插入到规范链中,同时收集插入到规范链中的所有交易

// Insert the new chain(except the head block(reverse order)),
    // taking care of the proper incremental order.
    for i := len(newChain) - 1; i >= 1; i-- {
        // Insert the block in the canonical way, re-writing history
        bc.writeHeadBlock(newChain[i])

        // Collect reborn logs due to chain reorg
        collectLogs(newChain[i].Hash(), false)

        // Collect the new added transactions.
        addedTxs = append(addedTxs, newChain[i].Transactions()...)
    }

step 3:之后找出待删除列表和待添加列表中的差异,删除那些不在新链上的交易在数据库中的查询入口

// Delete useless indexes right now which includes the non-canonical
    // transaction indexes, canonical chain indexes which above the head.
    indexesBatch := bc.db.NewBatch()
    for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
        rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash())
    }
    // Delete any canonical number assignments above the new head
    number := bc.CurrentBlock().NumberU64()
    for i := number + 1; ; i++ {
        hash := rawdb.ReadCanonicalHash(bc.db, i)
        if hash == (common.Hash{}) {
            break
        }
        rawdb.DeleteCanonicalHash(indexesBatch, i)
    }
    if err := indexesBatch.Write(); err != nil {
        log.Crit("Failed to delete useless indexes", "err", err)
    }

Step 4:向外发送区块被重新组织的事件,以及日志删除事件

// If any logs need to be fired, do it now. In theory we could avoid creating
    // this goroutine if there are no events to fire, but realistcally that only
    // ever happens if we're reorging empty blocks, which will only happen on idle
    // networks where performance is not an issue either way.
    if len(deletedLogs) > 0 {
        bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)})
    }
    if len(rebirthLogs) > 0 {
        bc.logsFeed.Send(mergeLogs(rebirthLogs, false))
    }
    if len(oldChain) > 0 {
        for i := len(oldChain) - 1; i >= 0; i-- {
            bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
        }
    }
    return nil

文末小结

本篇文章详细介绍了区块和区块链的核心数据结构并对区块和区块链的基本操作进行了分析,包括:创世区块的生成、新建区块的流程、区块验证的过程、区块难度目标的计算、区块链的构建、区块的插入、分叉处理等等,在这个过程中我们需要特别注意区块的生成和构建,其次还需要注意密码算法的选型、难度的设计,分叉处理等这些都是公链安全审计评估过程中经常需要着重关注的点

0 条评论
某人
表情
可输入 255