This commit is contained in:
mr
2026-03-09 14:57:41 +01:00
parent 3751ec554d
commit 83cef6e6f6
47 changed files with 2704 additions and 1034 deletions

View File

@@ -35,8 +35,27 @@ const (
consensusQueryTimeout = 3 * time.Second
// consensusCollectTimeout is the total wait for all native responses.
consensusCollectTimeout = 4 * time.Second
// ProtocolIndexerConsensus is the Phase 2 liveness-voting protocol.
// Each stable indexer is asked which candidates it considers reachable.
ProtocolIndexerConsensus = "/opencloud/indexer/consensus/1.0"
// MinStableAge is the minimum time since native admission before an indexer
// may participate as a voter in Phase 2 liveness voting.
MinStableAge = 2 * time.Minute
)
// IndexerConsensusRequest is sent to stable indexers during Phase 2 liveness voting.
// Each voter replies with which candidates from the list it can currently reach.
type IndexerConsensusRequest struct {
Candidates []string `json:"candidates"`
}
// IndexerConsensusResponse is the reply from a Phase 2 voter.
type IndexerConsensusResponse struct {
Alive []string `json:"alive"`
}
// ConsensusRequest is sent by a node/indexer to a native to validate a candidate
// indexer list. The native replies with what it trusts and what it suggests instead.
type ConsensusRequest struct {
@@ -56,11 +75,12 @@ type ConsensusResponse struct {
// Timestamp + PubKey + Signature allow the native and DHT to verify that the
// registration was produced by the peer that owns the declared PeerID.
type IndexerRegistration struct {
PeerID string `json:"peer_id,omitempty"`
Addr string `json:"addr"`
Timestamp int64 `json:"ts,omitempty"` // Unix nanoseconds (anti-replay)
PubKey []byte `json:"pub_key,omitempty"` // marshaled libp2p public key
Signature []byte `json:"sig,omitempty"` // Sign(signaturePayload())
PeerID string `json:"peer_id,omitempty"`
Addr string `json:"addr"`
Timestamp int64 `json:"ts,omitempty"` // Unix nanoseconds (anti-replay)
PubKey []byte `json:"pub_key,omitempty"` // marshaled libp2p public key
Signature []byte `json:"sig,omitempty"` // Sign(signaturePayload())
FillRate float64 `json:"fill_rate,omitempty"` // connected_nodes / max_nodes (0=empty, 1=full)
}
// SignaturePayload returns the canonical byte slice that is signed/verified.
@@ -106,9 +126,12 @@ type GetIndexersRequest struct {
}
// GetIndexersResponse is returned by the native with live indexer multiaddrs.
// FillRates maps each indexer address to its last reported fill rate (0=empty, 1=full).
// Nodes use fill rates to prefer indexers with available capacity.
type GetIndexersResponse struct {
Indexers []string `json:"indexers"`
IsSelfFallback bool `json:"is_self_fallback,omitempty"`
Indexers []string `json:"indexers"`
IsSelfFallback bool `json:"is_self_fallback,omitempty"`
FillRates map[string]float64 `json:"fill_rates,omitempty"`
}
var StaticNatives = map[string]*pp.AddrInfo{}
@@ -177,8 +200,8 @@ func ConnectToNatives(h host.Host, minIndexer int, maxIndexer int, myPID pp.ID)
logger.Info().Int("candidates", len(candidates)).Bool("fallback", isFallback).Msg("[native] step 1 — pool received")
// Step 2: populate StaticIndexers — consensus for real indexers, direct for fallback.
pool := resolvePool(h, candidates, isFallback, maxIndexer)
replaceStaticIndexers(pool)
pool, admittedAt := resolvePool(h, candidates, isFallback, maxIndexer)
replaceStaticIndexers(pool, admittedAt)
StreamMuIndexes.RLock()
indexerCount := len(StaticIndexers)
@@ -216,7 +239,7 @@ func replenishIndexersFromNative(h host.Host, need int) {
}
logger.Info().Int("candidates", len(candidates)).Bool("fallback", isFallback).Msg("[native] step 4 — candidates received")
pool := resolvePool(h, candidates, isFallback, need)
pool, admittedAt := resolvePool(h, candidates, isFallback, need)
if len(pool) == 0 {
logger.Warn().Msg("[native] step 4 — consensus yielded no confirmed indexers")
return
@@ -226,9 +249,11 @@ func replenishIndexersFromNative(h host.Host, need int) {
StreamMuIndexes.Lock()
for addr, ad := range pool {
StaticIndexers[addr] = ad
if StaticIndexerMeta[addr] == nil {
StaticIndexerMeta[addr] = &IndexerRecord{AdmittedAt: admittedAt}
}
}
total := len(StaticIndexers)
StreamMuIndexes.Unlock()
logger.Info().Int("added", len(pool)).Int("total", total).Msg("[native] step 4 — pool replenished")
@@ -335,9 +360,9 @@ collect:
}
// resolvePool converts a candidate list to a validated addr→AddrInfo map.
// When isFallback is true the native itself is the indexer — no consensus needed.
// When isFallback is false, consensus is run before accepting the candidates.
func resolvePool(h host.Host, candidates []string, isFallback bool, maxIndexer int) map[string]*pp.AddrInfo {
// When isFallback is true the native itself is the indexer — no Phase 1 consensus needed.
// Returns the pool and the admission timestamp (zero for fallback/seed entries).
func resolvePool(h host.Host, candidates []string, isFallback bool, maxIndexer int) (map[string]*pp.AddrInfo, time.Time) {
logger := oclib.GetLogger()
if isFallback {
logger.Info().Strs("addrs", candidates).Msg("[native] resolve — fallback mode, skipping consensus")
@@ -349,9 +374,10 @@ func resolvePool(h host.Host, candidates []string, isFallback bool, maxIndexer i
}
pool[addr] = ad
}
return pool
return pool, time.Time{}
}
// Phase 1 — native admission.
// Round 1.
logger.Info().Int("candidates", len(candidates)).Msg("[native] resolve — consensus round 1")
confirmed, suggestions := clientSideConsensus(h, candidates)
@@ -372,6 +398,7 @@ func resolvePool(h host.Host, candidates []string, isFallback bool, maxIndexer i
logger.Info().Int("confirmed", len(confirmed)).Msg("[native] resolve — consensus round 2 done")
}
admittedAt := time.Now().UTC()
pool := make(map[string]*pp.AddrInfo, len(confirmed))
for _, addr := range confirmed {
ad, err := pp.AddrInfoFromString(addr)
@@ -380,18 +407,130 @@ func resolvePool(h host.Host, candidates []string, isFallback bool, maxIndexer i
}
pool[addr] = ad
}
logger.Info().Int("pool_size", len(pool)).Msg("[native] resolve — pool ready")
return pool
// Phase 2 — indexer liveness vote.
logger.Info().Int("pool_size", len(pool)).Msg("[native] resolve — Phase 1 done, running Phase 2 liveness vote")
pool = indexerLivenessVote(h, pool)
logger.Info().Int("pool_size", len(pool)).Msg("[native] resolve — Phase 2 done, pool ready")
return pool, admittedAt
}
// indexerLivenessVote runs Phase 2 of the hybrid consensus: it queries every
// stable indexer in StaticIndexers (AdmittedAt non-zero, age >= MinStableAge)
// for their view of the candidate list and returns only the candidates confirmed
// by quorum. When no stable voter exists the full admitted set is returned
// unchanged — this is correct on first boot before any indexer is old enough.
func indexerLivenessVote(h host.Host, admitted map[string]*pp.AddrInfo) map[string]*pp.AddrInfo {
logger := oclib.GetLogger()
StreamMuIndexes.RLock()
voters := make([]*pp.AddrInfo, 0, len(StaticIndexers))
for addr, ad := range StaticIndexers {
if meta, ok := StaticIndexerMeta[addr]; ok && meta.IsStableVoter() {
voters = append(voters, ad)
}
}
StreamMuIndexes.RUnlock()
if len(voters) == 0 {
logger.Info().Msg("[phase2] no stable voters yet — trusting Phase 1 result")
return admitted
}
candidates := make([]string, 0, len(admitted))
for addr := range admitted {
candidates = append(candidates, addr)
}
type result struct {
alive map[string]struct{}
ok bool
}
ch := make(chan result, len(voters))
for _, voter := range voters {
go func(v *pp.AddrInfo) {
ctx, cancel := context.WithTimeout(context.Background(), consensusQueryTimeout)
defer cancel()
if err := h.Connect(ctx, *v); err != nil {
ch <- result{}
return
}
s, err := h.NewStream(ctx, v.ID, ProtocolIndexerConsensus)
if err != nil {
ch <- result{}
return
}
s.SetDeadline(time.Now().Add(consensusQueryTimeout))
defer s.Close()
if err := json.NewEncoder(s).Encode(IndexerConsensusRequest{Candidates: candidates}); err != nil {
ch <- result{}
return
}
var resp IndexerConsensusResponse
if err := json.NewDecoder(s).Decode(&resp); err != nil {
ch <- result{}
return
}
alive := make(map[string]struct{}, len(resp.Alive))
for _, a := range resp.Alive {
alive[a] = struct{}{}
}
ch <- result{alive: alive, ok: true}
}(voter)
}
timer := time.NewTimer(consensusCollectTimeout)
defer timer.Stop()
aliveCounts := map[string]int{}
total, collected := 0, 0
collect:
for collected < len(voters) {
select {
case r := <-ch:
collected++
if !r.ok {
continue
}
total++
for addr := range r.alive {
aliveCounts[addr]++
}
case <-timer.C:
break collect
}
}
if total == 0 {
logger.Info().Msg("[phase2] no voter responded — trusting Phase 1 result")
return admitted
}
quorum := conf.GetConfig().ConsensusQuorum
if quorum <= 0 {
quorum = 0.5
}
confirmed := make(map[string]*pp.AddrInfo, len(admitted))
for addr, ad := range admitted {
if float64(aliveCounts[addr]) > float64(total)*quorum {
confirmed[addr] = ad
}
}
logger.Info().Int("admitted", len(admitted)).Int("confirmed", len(confirmed)).Int("voters", total).Msg("[phase2] liveness vote complete")
return confirmed
}
// replaceStaticIndexers atomically replaces the active indexer pool.
// Peers no longer in next have their heartbeat streams closed so the SendHeartbeat
// goroutine stops sending to them on the next tick.
func replaceStaticIndexers(next map[string]*pp.AddrInfo) {
// admittedAt is the time of native admission (zero for fallback/seed entries).
func replaceStaticIndexers(next map[string]*pp.AddrInfo, admittedAt time.Time) {
StreamMuIndexes.Lock()
defer StreamMuIndexes.Unlock()
for addr, ad := range next {
StaticIndexers[addr] = ad
if StaticIndexerMeta[addr] == nil {
StaticIndexerMeta[addr] = &IndexerRecord{AdmittedAt: admittedAt}
}
}
}
@@ -508,8 +647,10 @@ collect:
}
// RegisterWithNative sends a one-shot registration to each configured native indexer.
// fillRateFn, when non-nil, is called to obtain the current fill rate (0=empty, 1=full)
// which the native uses to route new nodes toward less-loaded indexers.
// Should be called periodically every RecommendedHeartbeatInterval.
func RegisterWithNative(h host.Host, nativeAddressesStr string) {
func RegisterWithNative(h host.Host, nativeAddressesStr string, fillRateFn func() float64) {
logger := oclib.GetLogger()
myAddr := ""
if !strings.Contains(h.Addrs()[len(h.Addrs())-1].String(), "127.0.0.1") {
@@ -524,6 +665,9 @@ func RegisterWithNative(h host.Host, nativeAddressesStr string) {
Addr: myAddr,
Timestamp: time.Now().UnixNano(),
}
if fillRateFn != nil {
reg.FillRate = fillRateFn()
}
reg.Sign(h)
for _, addr := range strings.Split(nativeAddressesStr, ",") {
addr = strings.TrimSpace(addr)
@@ -619,7 +763,10 @@ func EnsureNativePeers(h host.Host) {
})
}
func StartNativeRegistration(h host.Host, nativeAddressesStr string) {
// StartNativeRegistration starts a goroutine that periodically registers this
// indexer with all configured native indexers (every RecommendedHeartbeatInterval).
// fillRateFn is called on each registration tick to report current capacity usage.
func StartNativeRegistration(h host.Host, nativeAddressesStr string, fillRateFn func() float64) {
go func() {
// Poll until a routable (non-loopback) address is available before the first
// registration attempt. libp2p may not have discovered external addresses yet
@@ -636,11 +783,11 @@ func StartNativeRegistration(h host.Host, nativeAddressesStr string) {
}
time.Sleep(5 * time.Second)
}
RegisterWithNative(h, nativeAddressesStr)
RegisterWithNative(h, nativeAddressesStr, fillRateFn)
t := time.NewTicker(RecommendedHeartbeatInterval)
defer t.Stop()
for range t.C {
RegisterWithNative(h, nativeAddressesStr)
RegisterWithNative(h, nativeAddressesStr, fillRateFn)
}
}()
}
@@ -917,7 +1064,7 @@ func retryLostNative(ctx context.Context, h host.Host, addr string, nativeProto
NudgeNativeHeartbeat()
replenishIndexersIfNeeded(h)
if nativeProto == ProtocolNativeGetIndexers {
StartNativeRegistration(h, addr) // register back
StartNativeRegistration(h, addr, nil) // register back (fill rate unknown in this context)
}
return
}