Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions packages/taiko-client/bindings/metadata/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"math/big"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"

pacayaBindings "github.com/taikoxyz/taiko-mono/packages/taiko-client/bindings/pacaya"
shastaBindings "github.com/taikoxyz/taiko-mono/packages/taiko-client/bindings/shasta"
Expand Down Expand Up @@ -57,4 +58,5 @@ type TaikoProposalMetaDataShasta interface {
GetBlobTimestamp(int) uint64
GetRawBlockHeight() *big.Int
GetRawBlockHash() common.Hash
GetLog() types.Log
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
GetLog() types.Log
GetLog() *types.Log

Let's only return a reference here?

}
4 changes: 4 additions & 0 deletions packages/taiko-client/bindings/metadata/metadata_shasta.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,10 @@ func (m *TaikoProposalMetadataShasta) GetCoinbase() common.Address {
return common.Address{}
}

func (m *TaikoProposalMetadataShasta) GetLog() types.Log {
return m.Log
}

// GetBlobHashes returns blob hashes in this proposal.
func (m *TaikoProposalMetadataShasta) GetBlobHashes(idx int) []common.Hash {
var blobHashes []common.Hash
Expand Down
151 changes: 99 additions & 52 deletions packages/taiko-client/driver/preconf_blocks/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (

"github.com/taikoxyz/taiko-mono/packages/taiko-client/bindings/encoding"
"github.com/taikoxyz/taiko-mono/packages/taiko-client/bindings/metadata"
shastaBindings "github.com/taikoxyz/taiko-mono/packages/taiko-client/bindings/shasta"
"github.com/taikoxyz/taiko-mono/packages/taiko-client/internal/metrics"
"github.com/taikoxyz/taiko-mono/packages/taiko-client/pkg/preconf"
"github.com/taikoxyz/taiko-mono/packages/taiko-client/pkg/rpc"
Expand Down Expand Up @@ -1053,77 +1054,123 @@ func (s *PreconfBlockAPIServer) monitorLatestProposalOnChain(ctx context.Context
}

if proposal.IsPacaya() {
stateVars, err := s.rpc.GetProtocolStateVariablesPacaya(&bind.CallOpts{Context: ctx})
if err != nil {
log.Error("Failed to get states from Pacaya Inbox", "error", err)
return
}
s.monitorPacayaProposalOnChain(ctx, proposal)
} else {
s.monitorShastaProposalOnChain(ctx, proposal)
}
}

numBatches := stateVars.Stats2.NumBatches
if numBatches == 0 {
return
}
// monitorPacayaProposalOnChain monitors Pacaya proposals for reorgs.
func (s *PreconfBlockAPIServer) monitorPacayaProposalOnChain(ctx context.Context, proposal *encoding.LastSeenProposal) {
stateVars, err := s.rpc.GetProtocolStateVariablesPacaya(&bind.CallOpts{Context: ctx})
if err != nil {
log.Error("Failed to get states from Pacaya Inbox", "error", err)
return
}

latestSeenBatchID := proposal.Pacaya().GetBatchID()
latestOnChainBatchID := new(big.Int).SetUint64(numBatches - 1)
if latestSeenBatchID.Cmp(latestOnChainBatchID) <= 0 {
return
numBatches := stateVars.Stats2.NumBatches
if numBatches == 0 {
return
}

latestSeenBatchID := proposal.Pacaya().GetBatchID()
latestOnChainBatchID := new(big.Int).SetUint64(numBatches - 1)
if latestSeenBatchID.Cmp(latestOnChainBatchID) <= 0 {
return
}

iterPacaya, err := s.rpc.PacayaClients.TaikoInbox.FilterBatchProposed(
&bind.FilterOpts{Start: stateVars.Stats2.LastProposedIn.Uint64(), Context: ctx},
)
if err != nil {
log.Error("Failed to filter batch proposed event", "err", err)
return
}
defer iterPacaya.Close()

for iterPacaya.Next() {
if new(big.Int).SetUint64(iterPacaya.Event.Meta.BatchId).Cmp(s.latestSeenProposal.Pacaya().GetBatchID()) < 0 {
s.recordLatestSeenProposal(&encoding.LastSeenProposal{
TaikoProposalMetaData: metadata.NewTaikoDataBlockMetadataPacaya(iterPacaya.Event),
PreconfChainReorged: true,
})
}
}

iterPacaya, err := s.rpc.PacayaClients.TaikoInbox.FilterBatchProposed(
&bind.FilterOpts{Start: stateVars.Stats2.LastProposedIn.Uint64(), Context: ctx},
)
if err := iterPacaya.Error(); err != nil {
log.Error("Failed to iterate batch proposed events", "err", err)
}
}

// monitorShastaProposalOnChain monitors Shasta proposals for reorgs.
func (s *PreconfBlockAPIServer) monitorShastaProposalOnChain(ctx context.Context, proposal *encoding.LastSeenProposal) {
shastaProposal := proposal.Shasta()
latestSeenProposalID := shastaProposal.GetProposal().Id
currentProposal := shastaProposal.GetProposal()

proposalHash, err := s.rpc.HashProposalShasta(&bind.CallOpts{Context: ctx}, &currentProposal)
if err != nil {
log.Error("Failed to hash shasta proposal", "err", err)
return
}

onChainProposalHash, err := s.rpc.GetShastaProposalHash(&bind.CallOpts{Context: ctx}, latestSeenProposalID)
if err != nil {
log.Error("Failed to get shasta proposal on chain", "err", err)
return
}

// Check for reorg and handle it
if onChainProposalHash != proposalHash {
s.handleShastaProposalReorg(ctx, latestSeenProposalID)
}
}

// handleShastaProposalReorg handles reorg detection for Shasta proposals.
func (s *PreconfBlockAPIServer) handleShastaProposalReorg(ctx context.Context, latestSeenProposalID *big.Int) {
log.Warn("Shasta proposal reorg detected", "latestSeenProposalID", latestSeenProposalID)

// Find the last valid proposal by searching backwards
maxIterations := latestSeenProposalID.Int64()
for i := int64(1); i <= maxIterations; i++ {
currentProposalID := new(big.Int).Sub(latestSeenProposalID, big.NewInt(i))

onChainHash, err := s.rpc.GetShastaProposalHash(&bind.CallOpts{Context: ctx}, currentProposalID)
if err != nil {
log.Error("Failed to filter batch proposed event", "err", err)
log.Error("Failed to get shasta proposal on chain", "proposalId", currentProposalID, "err", err)
return
}
defer iterPacaya.Close()

for iterPacaya.Next() {
if new(big.Int).SetUint64(iterPacaya.Event.Meta.BatchId).Cmp(s.latestSeenProposal.Pacaya().GetBatchID()) < 0 {
s.recordLatestSeenProposal(&encoding.LastSeenProposal{
TaikoProposalMetaData: metadata.NewTaikoDataBlockMetadataPacaya(iterPacaya.Event),
PreconfChainReorged: true,
})
}
}

if err := iterPacaya.Error(); err != nil {
log.Error("Failed to iterate batch proposed events", "err", err)
}
} else {
latestSeenProposalID := proposal.Shasta().GetProposal().Id
shastaProposal := s.shastaIndexer.GetLastProposal()
if latestSeenProposalID.Cmp(shastaProposal.Proposal.Id) <= 0 {
recordedProposal, err := s.shastaIndexer.GetProposalByID(currentProposalID.Uint64())
if err != nil {
log.Error("Proposal not found in cache", "proposalId", currentProposalID, "err", err)
return
Comment on lines +1133 to 1147

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Reorg handling bails out when proposal cache misses

The new handleShastaProposalReorg walks backwards through proposal IDs using s.shastaIndexer.GetProposalByID, but it returns immediately when a cached proposal is not found (log.Error("Proposal not found in cache"…) followed by return). Because the indexer intentionally prunes old entries (cleanupLegacyProposals) the cache can legitimately miss IDs after a deep reorg or on startup. The previous implementation fetched the needed proposal data directly from the chain via FilterProposed, so it could recover regardless of what remained in the cache. With this change, any reorg deeper than the retained buffer leaves latestSeenProposal pointing at an invalid proposal and the monitor never rewinds to a canonical state, repeatedly logging errors instead of recovering. Consider continuing the search or fetching the proposal from the RPC instead of aborting when the cache is empty.

Useful? React with 👍 / 👎.

}
iterShasta, err := s.rpc.ShastaClients.Inbox.FilterProposed(
&bind.FilterOpts{Start: shastaProposal.RawBlockHeight.Uint64(), Context: ctx},
)

recordedProposalHash, err := s.rpc.HashProposalShasta(&bind.CallOpts{Context: ctx}, recordedProposal.Proposal)
if err != nil {
log.Error("Failed to filter proposed event", "err", err)
log.Error("Failed to hash recorded proposal", "proposalId", currentProposalID, "err", err)
return
}
defer iterShasta.Close()

for iterShasta.Next() {
proposedEventPayload, err := s.rpc.DecodeProposedEventPayload(&bind.CallOpts{Context: ctx}, iterShasta.Event.Data)
if err != nil {
log.Error("Failed to decode proposed event data", "err", err)
return
}
if proposedEventPayload.Proposal.Id.Cmp(s.latestSeenProposal.Shasta().GetProposal().Id) < 0 {
// Found a valid proposal that matches on-chain state
if onChainHash == recordedProposalHash {
if currentProposalID.Cmp(s.latestSeenProposal.Shasta().GetProposal().Id) < 0 {
log.Info("Found valid proposal after reorg", "proposalId", currentProposalID)
s.recordLatestSeenProposal(&encoding.LastSeenProposal{
TaikoProposalMetaData: metadata.NewTaikoProposalMetadataShasta(proposedEventPayload, iterShasta.Event.Raw),
PreconfChainReorged: true,
TaikoProposalMetaData: metadata.NewTaikoProposalMetadataShasta(&shastaBindings.IInboxProposedEventPayload{
Proposal: *recordedProposal.Proposal,
Derivation: *recordedProposal.Derivation,
CoreState: *recordedProposal.CoreState,
}, recordedProposal.Log),
PreconfChainReorged: true,
})
}
}

if err := iterShasta.Error(); err != nil {
log.Error("Failed to iterate proposed events", "err", err)
return
}
}

log.Error("Could not find valid proposal after reorg", "searchedUpTo", latestSeenProposalID)
}

// recordLatestSeenProposal records the latest seen proposal.
Expand Down
17 changes: 12 additions & 5 deletions packages/taiko-client/pkg/state_indexer/indexer.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,10 @@ import (
var (
// maxBlocksPerFilter defines the maximum number of blocks to filter in a single RPC query.
maxBlocksPerFilter uint64 = 1000
reorgSafetyDepth = new(big.Int).SetUint64(64)
// reorgSafetyDepth defines how many blocks back to rewind when a reorg is detected.
reorgSafetyDepth = new(big.Int).SetUint64(64)
// bufferSizeMultiplier determines how many times the buffer size to keep for historical data.
bufferSizeMultiplier uint64 = 2
)

// ProposalPayload represents the payload in a Shasta Proposed event.
Expand All @@ -33,6 +36,7 @@ type ProposalPayload struct {
Derivation *shastaBindings.IInboxDerivation
RawBlockHash common.Hash
RawBlockHeight *big.Int
Log types.Log
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto.

}

// TransitionPayload represents the payload in a Shasta Proved event.
Expand Down Expand Up @@ -362,6 +366,7 @@ func (s *Indexer) onProposedEvent(
Derivation: &derivation,
RawBlockHash: meta.GetRawBlockHash(),
RawBlockHeight: meta.GetRawBlockHeight(),
Log: meta.Shasta().GetLog(),
}

s.proposals.Set(proposal.Id.Uint64(), payload)
Expand Down Expand Up @@ -500,9 +505,10 @@ func (s *Indexer) TransitionRecords() cmap.ConcurrentMap[uint64, *TransitionPayl
// cleanupFinalizedTransitionRecords cleans up transition records that are older than the last finalized proposal ID
// minus the buffer size.
func (s *Indexer) cleanupFinalizedTransitionRecords(lastFinalizedProposalId uint64) {
// We keep two times the buffer size of transition records to avoid future reorg handling.
// We keep bufferSizeMultiplier times the buffer size of transition records to avoid future reorg handling.
threshold := s.bufferSize * bufferSizeMultiplier
for _, key := range s.transitionRecords.Keys() {
if key+(s.bufferSize*2) < lastFinalizedProposalId {
if key+threshold < lastFinalizedProposalId {
log.Trace("Cleaning up finalized Shasta transition record", "proposalId", key)
s.transitionRecords.Remove(key)
}
Expand All @@ -511,9 +517,10 @@ func (s *Indexer) cleanupFinalizedTransitionRecords(lastFinalizedProposalId uint

// cleanupLegacyProposals cleans up proposals that are older than the last proposal ID minus the buffer size.
func (s *Indexer) cleanupLegacyProposals(lastProposalId uint64) {
// We keep two times the buffer size of transition records to avoid future reorg handling.
// We keep bufferSizeMultiplier times the buffer size of proposals to avoid future reorg handling.
threshold := s.bufferSize * bufferSizeMultiplier
for _, key := range s.proposals.Keys() {
if key+(s.bufferSize*2) < lastProposalId {
if key+threshold < lastProposalId {
log.Trace("Cleaning up legacy Shasta proposal", "proposalId", key)
s.proposals.Remove(key)
}
Expand Down
Loading