demo test + Peer
This commit is contained in:
@@ -5,8 +5,9 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"oc-discovery/conf"
|
||||
"oc-discovery/daemons/node/common"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
@@ -18,17 +19,21 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
type PeerRecordPayload struct {
|
||||
Name string `json:"name"`
|
||||
DID string `json:"did"`
|
||||
PubKey []byte `json:"pub_key"`
|
||||
ExpiryDate time.Time `json:"expiry_date"`
|
||||
}
|
||||
|
||||
type PeerRecord struct {
|
||||
Name string `json:"name"`
|
||||
DID string `json:"did"` // real PEER ID
|
||||
PeerID string `json:"peer_id"`
|
||||
PubKey []byte `json:"pub_key"`
|
||||
APIUrl string `json:"api_url"`
|
||||
StreamAddress string `json:"stream_address"`
|
||||
NATSAddress string `json:"nats_address"`
|
||||
WalletAddress string `json:"wallet_address"`
|
||||
Signature []byte `json:"signature"`
|
||||
ExpiryDate time.Time `json:"expiry_date"`
|
||||
PeerRecordPayload
|
||||
PeerID string `json:"peer_id"`
|
||||
APIUrl string `json:"api_url"`
|
||||
StreamAddress string `json:"stream_address"`
|
||||
NATSAddress string `json:"nats_address"`
|
||||
WalletAddress string `json:"wallet_address"`
|
||||
Signature []byte `json:"signature"`
|
||||
}
|
||||
|
||||
func (p *PeerRecord) Sign() error {
|
||||
@@ -36,13 +41,7 @@ func (p *PeerRecord) Sign() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dht := PeerRecord{
|
||||
Name: p.Name,
|
||||
DID: p.DID,
|
||||
PubKey: p.PubKey,
|
||||
ExpiryDate: p.ExpiryDate,
|
||||
}
|
||||
payload, _ := json.Marshal(dht)
|
||||
payload, _ := json.Marshal(p.PeerRecordPayload)
|
||||
b, err := common.Sign(priv, payload)
|
||||
p.Signature = b
|
||||
return err
|
||||
@@ -51,19 +50,11 @@ func (p *PeerRecord) Sign() error {
|
||||
func (p *PeerRecord) Verify() (crypto.PubKey, error) {
|
||||
pubKey, err := crypto.UnmarshalPublicKey(p.PubKey) // retrieve pub key in message
|
||||
if err != nil {
|
||||
fmt.Println("UnmarshalPublicKey")
|
||||
return pubKey, err
|
||||
}
|
||||
dht := PeerRecord{
|
||||
Name: p.Name,
|
||||
DID: p.DID,
|
||||
PubKey: p.PubKey,
|
||||
ExpiryDate: p.ExpiryDate,
|
||||
}
|
||||
payload, _ := json.Marshal(dht)
|
||||
payload, _ := json.Marshal(p.PeerRecordPayload)
|
||||
|
||||
if ok, _ := common.Verify(pubKey, payload, p.Signature); !ok { // verify minimal message was sign per pubKey
|
||||
fmt.Println("Verify")
|
||||
if ok, _ := pubKey.Verify(payload, p.Signature); !ok { // verify minimal message was sign per pubKey
|
||||
return pubKey, errors.New("invalid signature")
|
||||
}
|
||||
return pubKey, nil
|
||||
@@ -114,6 +105,8 @@ func (pr *PeerRecord) ExtractPeer(ourkey string, key string, pubKey crypto.PubKe
|
||||
type GetValue struct {
|
||||
Key string `json:"key"`
|
||||
PeerID peer.ID `json:"peer_id"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Search bool `json:"search,omitempty"`
|
||||
}
|
||||
|
||||
type GetResponse struct {
|
||||
@@ -125,122 +118,233 @@ func (ix *IndexerService) genKey(did string) string {
|
||||
return "/node/" + did
|
||||
}
|
||||
|
||||
func (ix *IndexerService) genNameKey(name string) string {
|
||||
return "/name/" + name
|
||||
}
|
||||
|
||||
func (ix *IndexerService) genPIDKey(peerID string) string {
|
||||
return "/pid/" + peerID
|
||||
}
|
||||
|
||||
func (ix *IndexerService) initNodeHandler() {
|
||||
ix.Host.SetStreamHandler(common.ProtocolHeartbeat, ix.HandleNodeHeartbeat)
|
||||
logger := oclib.GetLogger()
|
||||
logger.Info().Msg("Init Node Handler")
|
||||
// Each heartbeat from a node carries a freshly signed PeerRecord.
|
||||
// Republish it to the DHT so the record never expires as long as the node
|
||||
// is alive — no separate publish stream needed from the node side.
|
||||
ix.AfterHeartbeat = func(pid peer.ID) {
|
||||
ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel1()
|
||||
res, err := ix.DHT.GetValue(ctx1, ix.genPIDKey(pid.String()))
|
||||
if err != nil {
|
||||
logger.Warn().Err(err)
|
||||
return
|
||||
}
|
||||
did := string(res)
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel2()
|
||||
res, err = ix.DHT.GetValue(ctx2, ix.genKey(did))
|
||||
if err != nil {
|
||||
logger.Warn().Err(err)
|
||||
return
|
||||
}
|
||||
var rec PeerRecord
|
||||
if err := json.Unmarshal(res, &rec); err != nil {
|
||||
logger.Warn().Err(err).Str("peer", pid.String()).Msg("indexer: heartbeat record unmarshal failed")
|
||||
return
|
||||
}
|
||||
if _, err := rec.Verify(); err != nil {
|
||||
logger.Warn().Err(err).Str("peer", pid.String()).Msg("indexer: heartbeat record signature invalid")
|
||||
return
|
||||
}
|
||||
data, err := json.Marshal(rec)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
logger.Info().Msg("REFRESH PutValue " + ix.genKey(rec.DID))
|
||||
if err := ix.DHT.PutValue(ctx, ix.genKey(rec.DID), data); err != nil {
|
||||
logger.Warn().Err(err).Str("did", rec.DID).Msg("indexer: DHT refresh failed")
|
||||
return
|
||||
}
|
||||
if rec.Name != "" {
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ix.DHT.PutValue(ctx2, ix.genNameKey(rec.Name), []byte(rec.DID))
|
||||
cancel2()
|
||||
}
|
||||
if rec.PeerID != "" {
|
||||
ctx3, cancel3 := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ix.DHT.PutValue(ctx3, ix.genPIDKey(rec.PeerID), []byte(rec.DID))
|
||||
cancel3()
|
||||
}
|
||||
}
|
||||
ix.Host.SetStreamHandler(common.ProtocolHeartbeat, ix.HandleHeartbeat)
|
||||
ix.Host.SetStreamHandler(common.ProtocolPublish, ix.handleNodePublish)
|
||||
ix.Host.SetStreamHandler(common.ProtocolGet, ix.handleNodeGet)
|
||||
ix.Host.SetStreamHandler(common.ProtocolIndexerGetNatives, ix.handleGetNatives)
|
||||
}
|
||||
|
||||
func (ix *IndexerService) handleNodePublish(s network.Stream) {
|
||||
defer s.Close()
|
||||
logger := oclib.GetLogger()
|
||||
for {
|
||||
var rec PeerRecord
|
||||
if err := json.NewDecoder(s).Decode(&rec); err != nil {
|
||||
logger.Err(err)
|
||||
continue
|
||||
}
|
||||
rec2 := PeerRecord{
|
||||
Name: rec.Name,
|
||||
DID: rec.DID, // REAL PEER ID
|
||||
PubKey: rec.PubKey,
|
||||
PeerID: rec.PeerID,
|
||||
}
|
||||
if _, err := rec2.Verify(); err != nil {
|
||||
logger.Err(err)
|
||||
continue
|
||||
}
|
||||
if rec.PeerID == "" || rec.ExpiryDate.Before(time.Now().UTC()) { // already expired
|
||||
logger.Err(errors.New(rec.PeerID + " is expired."))
|
||||
continue
|
||||
}
|
||||
pid, err := peer.Decode(rec.PeerID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
ix.StreamMU.Lock()
|
||||
var rec PeerRecord
|
||||
if err := json.NewDecoder(s).Decode(&rec); err != nil {
|
||||
logger.Err(err)
|
||||
return
|
||||
}
|
||||
if _, err := rec.Verify(); err != nil {
|
||||
logger.Err(err)
|
||||
return
|
||||
}
|
||||
if rec.PeerID == "" || rec.ExpiryDate.Before(time.Now().UTC()) {
|
||||
logger.Err(errors.New(rec.PeerID + " is expired."))
|
||||
return
|
||||
}
|
||||
pid, err := peer.Decode(rec.PeerID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ix.StreamRecords[common.ProtocolHeartbeat] == nil {
|
||||
ix.StreamRecords[common.ProtocolHeartbeat] = map[peer.ID]*common.StreamRecord[PeerRecord]{}
|
||||
}
|
||||
streams := ix.StreamRecords[common.ProtocolHeartbeat]
|
||||
ix.StreamMU.Lock()
|
||||
defer ix.StreamMU.Unlock()
|
||||
if ix.StreamRecords[common.ProtocolHeartbeat] == nil {
|
||||
ix.StreamRecords[common.ProtocolHeartbeat] = map[peer.ID]*common.StreamRecord[PeerRecord]{}
|
||||
}
|
||||
streams := ix.StreamRecords[common.ProtocolHeartbeat]
|
||||
if srec, ok := streams[pid]; ok {
|
||||
srec.DID = rec.DID
|
||||
srec.Record = rec
|
||||
srec.HeartbeatStream.UptimeTracker.LastSeen = time.Now().UTC()
|
||||
}
|
||||
|
||||
if srec, ok := streams[pid]; ok {
|
||||
srec.DID = rec.DID
|
||||
srec.Record = rec
|
||||
srec.HeartbeatStream.UptimeTracker.LastSeen = time.Now().UTC()
|
||||
} else {
|
||||
ix.StreamMU.Unlock()
|
||||
logger.Err(errors.New("no heartbeat"))
|
||||
continue
|
||||
}
|
||||
ix.StreamMU.Unlock()
|
||||
|
||||
key := ix.genKey(rec.DID)
|
||||
|
||||
data, err := json.Marshal(rec)
|
||||
if err != nil {
|
||||
logger.Err(err)
|
||||
continue
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := ix.DHT.PutValue(ctx, key, data); err != nil {
|
||||
logger.Err(err)
|
||||
cancel()
|
||||
continue
|
||||
}
|
||||
key := ix.genKey(rec.DID)
|
||||
data, err := json.Marshal(rec)
|
||||
if err != nil {
|
||||
logger.Err(err)
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := ix.DHT.PutValue(ctx, key, data); err != nil {
|
||||
logger.Err(err)
|
||||
cancel()
|
||||
break // response... so quit
|
||||
return
|
||||
}
|
||||
cancel()
|
||||
|
||||
// Secondary index: /name/<name> → DID, so peers can resolve by human-readable name.
|
||||
if rec.Name != "" {
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := ix.DHT.PutValue(ctx2, ix.genNameKey(rec.Name), []byte(rec.DID)); err != nil {
|
||||
logger.Err(err).Str("name", rec.Name).Msg("indexer: failed to write name index")
|
||||
}
|
||||
cancel2()
|
||||
}
|
||||
// Secondary index: /pid/<peerID> → DID, so peers can resolve by libp2p PeerID.
|
||||
if rec.PeerID != "" {
|
||||
ctx3, cancel3 := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := ix.DHT.PutValue(ctx3, ix.genPIDKey(rec.PeerID), []byte(rec.DID)); err != nil {
|
||||
logger.Err(err).Str("pid", rec.PeerID).Msg("indexer: failed to write pid index")
|
||||
}
|
||||
cancel3()
|
||||
}
|
||||
}
|
||||
|
||||
func (ix *IndexerService) handleNodeGet(s network.Stream) {
|
||||
|
||||
defer s.Close()
|
||||
logger := oclib.GetLogger()
|
||||
for {
|
||||
var req GetValue
|
||||
if err := json.NewDecoder(s).Decode(&req); err != nil {
|
||||
logger.Err(err)
|
||||
|
||||
var req GetValue
|
||||
if err := json.NewDecoder(s).Decode(&req); err != nil {
|
||||
logger.Err(err)
|
||||
return
|
||||
}
|
||||
|
||||
resp := GetResponse{Found: false, Records: map[string]PeerRecord{}}
|
||||
|
||||
keys := []string{}
|
||||
// Name substring search — scan in-memory connected nodes first, then DHT exact match.
|
||||
if req.Name != "" {
|
||||
if req.Search {
|
||||
for _, did := range ix.LookupNameIndex(strings.ToLower(req.Name)) {
|
||||
keys = append(keys, did)
|
||||
}
|
||||
} else {
|
||||
// 2. DHT exact-name lookup: covers nodes that published but aren't currently connected.
|
||||
nameCtx, nameCancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
if ch, err := ix.DHT.SearchValue(nameCtx, ix.genNameKey(req.Name)); err == nil {
|
||||
for did := range ch {
|
||||
keys = append(keys, string(did))
|
||||
break
|
||||
}
|
||||
}
|
||||
nameCancel()
|
||||
}
|
||||
} else if req.PeerID != "" {
|
||||
pidCtx, pidCancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
if did, err := ix.DHT.GetValue(pidCtx, ix.genPIDKey(req.PeerID.String())); err == nil {
|
||||
keys = append(keys, string(did))
|
||||
}
|
||||
pidCancel()
|
||||
} else {
|
||||
keys = append(keys, req.Key)
|
||||
}
|
||||
|
||||
// DHT record fetch by DID key (covers exact-name and PeerID paths).
|
||||
if len(keys) > 0 {
|
||||
for _, k := range keys {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
c, err := ix.DHT.GetValue(ctx, ix.genKey(k))
|
||||
cancel()
|
||||
if err == nil {
|
||||
var rec PeerRecord
|
||||
if json.Unmarshal(c, &rec) == nil {
|
||||
// Filter by PeerID only when one was explicitly specified.
|
||||
if req.PeerID == "" || rec.PeerID == req.PeerID.String() {
|
||||
resp.Records[rec.PeerID] = rec
|
||||
}
|
||||
}
|
||||
} else if req.Name == "" && req.PeerID == "" {
|
||||
logger.Err(err).Msg("Failed to fetch PeerRecord from DHT " + req.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp.Found = len(resp.Records) > 0
|
||||
_ = json.NewEncoder(s).Encode(resp)
|
||||
}
|
||||
|
||||
// handleGetNatives returns this indexer's configured native addresses,
|
||||
// excluding any in the request's Exclude list.
|
||||
func (ix *IndexerService) handleGetNatives(s network.Stream) {
|
||||
defer s.Close()
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
var req common.GetIndexerNativesRequest
|
||||
if err := json.NewDecoder(s).Decode(&req); err != nil {
|
||||
logger.Err(err).Msg("indexer get natives: decode")
|
||||
return
|
||||
}
|
||||
|
||||
excludeSet := make(map[string]struct{}, len(req.Exclude))
|
||||
for _, e := range req.Exclude {
|
||||
excludeSet[e] = struct{}{}
|
||||
}
|
||||
|
||||
resp := common.GetIndexerNativesResponse{}
|
||||
for _, addr := range strings.Split(conf.GetConfig().NativeIndexerAddresses, ",") {
|
||||
addr = strings.TrimSpace(addr)
|
||||
if addr == "" {
|
||||
continue
|
||||
}
|
||||
ix.StreamMU.Lock()
|
||||
if _, excluded := excludeSet[addr]; !excluded {
|
||||
resp.Natives = append(resp.Natives, addr)
|
||||
}
|
||||
}
|
||||
|
||||
if ix.StreamRecords[common.ProtocolHeartbeat] == nil {
|
||||
ix.StreamRecords[common.ProtocolHeartbeat] = map[peer.ID]*common.StreamRecord[PeerRecord]{}
|
||||
}
|
||||
resp := GetResponse{
|
||||
Found: false,
|
||||
Records: map[string]PeerRecord{},
|
||||
}
|
||||
streams := ix.StreamRecords[common.ProtocolHeartbeat]
|
||||
|
||||
key := ix.genKey(req.Key)
|
||||
// simple lookup by PeerID (or DID)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
recBytes, err := ix.DHT.SearchValue(ctx, key)
|
||||
if err != nil {
|
||||
logger.Err(err).Msg("Failed to fetch PeerRecord from DHT")
|
||||
cancel()
|
||||
}
|
||||
cancel()
|
||||
for c := range recBytes {
|
||||
var rec PeerRecord
|
||||
if err := json.Unmarshal(c, &rec); err != nil || rec.PeerID != req.PeerID.String() {
|
||||
continue
|
||||
}
|
||||
resp.Found = true
|
||||
resp.Records[rec.PeerID] = rec
|
||||
if srec, ok := streams[req.PeerID]; ok {
|
||||
srec.DID = rec.DID
|
||||
srec.Record = rec
|
||||
srec.HeartbeatStream.UptimeTracker.LastSeen = time.Now().UTC()
|
||||
}
|
||||
}
|
||||
// Not found
|
||||
_ = json.NewEncoder(s).Encode(resp)
|
||||
ix.StreamMU.Unlock()
|
||||
break // response... so quit
|
||||
if err := json.NewEncoder(s).Encode(resp); err != nil {
|
||||
logger.Err(err).Msg("indexer get natives: encode response")
|
||||
}
|
||||
}
|
||||
|
||||
168
daemons/node/indexer/nameindex.go
Normal file
168
daemons/node/indexer/nameindex.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package indexer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"oc-discovery/daemons/node/common"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// TopicNameIndex is the GossipSub topic shared by regular indexers to exchange
|
||||
// add/delete events for the distributed name→peerID mapping.
|
||||
const TopicNameIndex = "oc-name-index"
|
||||
|
||||
// nameIndexDedupWindow suppresses re-emission of the same (action, name, peerID)
|
||||
// tuple within this window, reducing duplicate events when a node is registered
|
||||
// with multiple indexers simultaneously.
|
||||
const nameIndexDedupWindow = 30 * time.Second
|
||||
|
||||
// NameIndexAction indicates whether a name mapping is being added or removed.
|
||||
type NameIndexAction string
|
||||
|
||||
const (
|
||||
NameIndexAdd NameIndexAction = "add"
|
||||
NameIndexDelete NameIndexAction = "delete"
|
||||
)
|
||||
|
||||
// NameIndexEvent is published on TopicNameIndex by each indexer when a node
|
||||
// registers (add) or is evicted by the GC (delete).
|
||||
type NameIndexEvent struct {
|
||||
Action NameIndexAction `json:"action"`
|
||||
Name string `json:"name"`
|
||||
PeerID string `json:"peer_id"`
|
||||
DID string `json:"did"`
|
||||
}
|
||||
|
||||
// nameIndexState holds the local in-memory name index and the sender-side
|
||||
// deduplication tracker.
|
||||
type nameIndexState struct {
|
||||
// index: name → peerID → DID, built from events received from all indexers.
|
||||
index map[string]map[string]string
|
||||
indexMu sync.RWMutex
|
||||
|
||||
// emitted tracks the last emission time for each (action, name, peerID) key
|
||||
// to suppress duplicates within nameIndexDedupWindow.
|
||||
emitted map[string]time.Time
|
||||
emittedMu sync.Mutex
|
||||
}
|
||||
|
||||
// shouldEmit returns true if the (action, name, peerID) tuple has not been
|
||||
// emitted within nameIndexDedupWindow, updating the tracker if so.
|
||||
func (s *nameIndexState) shouldEmit(action NameIndexAction, name, peerID string) bool {
|
||||
key := string(action) + ":" + name + ":" + peerID
|
||||
s.emittedMu.Lock()
|
||||
defer s.emittedMu.Unlock()
|
||||
if t, ok := s.emitted[key]; ok && time.Since(t) < nameIndexDedupWindow {
|
||||
return false
|
||||
}
|
||||
s.emitted[key] = time.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
// onEvent applies a received NameIndexEvent to the local index.
|
||||
// "add" inserts/updates the mapping; "delete" removes it.
|
||||
// Operations are idempotent — duplicate events from multiple indexers are harmless.
|
||||
func (s *nameIndexState) onEvent(evt NameIndexEvent) {
|
||||
if evt.Name == "" || evt.PeerID == "" {
|
||||
return
|
||||
}
|
||||
s.indexMu.Lock()
|
||||
defer s.indexMu.Unlock()
|
||||
switch evt.Action {
|
||||
case NameIndexAdd:
|
||||
if s.index[evt.Name] == nil {
|
||||
s.index[evt.Name] = map[string]string{}
|
||||
}
|
||||
s.index[evt.Name][evt.PeerID] = evt.DID
|
||||
case NameIndexDelete:
|
||||
if s.index[evt.Name] != nil {
|
||||
delete(s.index[evt.Name], evt.PeerID)
|
||||
if len(s.index[evt.Name]) == 0 {
|
||||
delete(s.index, evt.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// initNameIndex joins TopicNameIndex and starts consuming events.
|
||||
// Must be called after ix.PS is ready.
|
||||
func (ix *IndexerService) initNameIndex(ps *pubsub.PubSub) {
|
||||
logger := oclib.GetLogger()
|
||||
ix.nameIndex = &nameIndexState{
|
||||
index: map[string]map[string]string{},
|
||||
emitted: map[string]time.Time{},
|
||||
}
|
||||
|
||||
ps.RegisterTopicValidator(TopicNameIndex, func(_ context.Context, _ pp.ID, _ *pubsub.Message) bool {
|
||||
return true
|
||||
})
|
||||
topic, err := ps.Join(TopicNameIndex)
|
||||
if err != nil {
|
||||
logger.Err(err).Msg("name index: failed to join topic")
|
||||
return
|
||||
}
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.Lock()
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService.LongLivedPubSubs[TopicNameIndex] = topic
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.Unlock()
|
||||
|
||||
common.SubscribeEvents(
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService,
|
||||
context.Background(),
|
||||
TopicNameIndex,
|
||||
-1,
|
||||
func(_ context.Context, evt NameIndexEvent, _ string) {
|
||||
ix.nameIndex.onEvent(evt)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// publishNameEvent emits a NameIndexEvent on TopicNameIndex, subject to the
|
||||
// sender-side deduplication window.
|
||||
func (ix *IndexerService) publishNameEvent(action NameIndexAction, name, peerID, did string) {
|
||||
if ix.nameIndex == nil || name == "" || peerID == "" {
|
||||
return
|
||||
}
|
||||
if !ix.nameIndex.shouldEmit(action, name, peerID) {
|
||||
return
|
||||
}
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.RLock()
|
||||
topic := ix.LongLivedStreamRecordedService.LongLivedPubSubService.LongLivedPubSubs[TopicNameIndex]
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.RUnlock()
|
||||
if topic == nil {
|
||||
return
|
||||
}
|
||||
evt := NameIndexEvent{Action: action, Name: name, PeerID: peerID, DID: did}
|
||||
b, err := json.Marshal(evt)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = topic.Publish(context.Background(), b)
|
||||
}
|
||||
|
||||
// LookupNameIndex searches the distributed name index for peers whose name
|
||||
// contains needle (case-insensitive). Returns peerID → DID for matched peers.
|
||||
// Returns nil if the name index is not initialised (e.g. native indexers).
|
||||
func (ix *IndexerService) LookupNameIndex(needle string) map[string]string {
|
||||
if ix.nameIndex == nil {
|
||||
return nil
|
||||
}
|
||||
result := map[string]string{}
|
||||
needleLow := strings.ToLower(needle)
|
||||
ix.nameIndex.indexMu.RLock()
|
||||
defer ix.nameIndex.indexMu.RUnlock()
|
||||
for name, peers := range ix.nameIndex.index {
|
||||
if strings.Contains(strings.ToLower(name), needleLow) {
|
||||
for peerID, did := range peers {
|
||||
result[peerID] = did
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -4,7 +4,10 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -12,19 +15,24 @@ import (
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
const (
|
||||
// IndexerTTL is 10% above the recommended 60s heartbeat interval.
|
||||
IndexerTTL = 66 * time.Second
|
||||
// IndexerTTL is the lifetime of a live-indexer cache entry. Set to 50% above
|
||||
// the recommended 60s heartbeat interval so a single delayed renewal does not
|
||||
// evict a healthy indexer from the native's cache.
|
||||
IndexerTTL = 90 * time.Second
|
||||
// offloadInterval is how often the native checks if it can release responsible peers.
|
||||
offloadInterval = 30 * time.Second
|
||||
// dhtRefreshInterval is how often the background goroutine queries the DHT for
|
||||
// known-but-expired indexer entries (written by neighbouring natives).
|
||||
dhtRefreshInterval = 30 * time.Second
|
||||
// maxFallbackPeers caps how many peers the native will accept in self-delegation
|
||||
// mode. Beyond this limit the native refuses to act as a fallback indexer so it
|
||||
// is not overwhelmed during prolonged indexer outages.
|
||||
maxFallbackPeers = 50
|
||||
)
|
||||
|
||||
// liveIndexerEntry tracks a registered indexer in the native's in-memory cache and DHT.
|
||||
@@ -43,7 +51,7 @@ type NativeState struct {
|
||||
// knownPeerIDs accumulates all indexer PeerIDs ever seen (local stream or gossip).
|
||||
// Used by refreshIndexersFromDHT to re-hydrate expired entries from the shared DHT,
|
||||
// including entries written by other natives.
|
||||
knownPeerIDs map[string]struct{}
|
||||
knownPeerIDs map[string]string
|
||||
knownMu sync.RWMutex
|
||||
}
|
||||
|
||||
@@ -51,7 +59,7 @@ func newNativeState() *NativeState {
|
||||
return &NativeState{
|
||||
liveIndexers: map[string]*liveIndexerEntry{},
|
||||
responsiblePeers: map[pp.ID]struct{}{},
|
||||
knownPeerIDs: map[string]struct{}{},
|
||||
knownPeerIDs: map[string]string{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,10 +100,12 @@ func (v IndexerRecordValidator) Select(_ string, values [][]byte) (int, error) {
|
||||
// Must be called after DHT is initialized.
|
||||
func (ix *IndexerService) InitNative() {
|
||||
ix.Native = newNativeState()
|
||||
ix.Host.SetStreamHandler(common.ProtocolIndexerHeartbeat, ix.HandleNodeHeartbeat) // specific heartbeat for Indexer.
|
||||
ix.Host.SetStreamHandler(common.ProtocolHeartbeat, ix.HandleHeartbeat) // specific heartbeat for Indexer.
|
||||
ix.Host.SetStreamHandler(common.ProtocolNativeSubscription, ix.handleNativeSubscription)
|
||||
ix.Host.SetStreamHandler(common.ProtocolNativeGetIndexers, ix.handleNativeGetIndexers)
|
||||
ix.Host.SetStreamHandler(common.ProtocolNativeConsensus, ix.handleNativeConsensus)
|
||||
ix.Host.SetStreamHandler(common.ProtocolNativeGetPeers, ix.handleNativeGetPeers)
|
||||
ix.Host.SetStreamHandler(common.ProtocolIndexerGetNatives, ix.handleGetNatives)
|
||||
ix.subscribeIndexerRegistry()
|
||||
// Ensure long connections to other configured natives (native-to-native mesh).
|
||||
common.EnsureNativePeers(ix.Host)
|
||||
@@ -107,8 +117,15 @@ func (ix *IndexerService) InitNative() {
|
||||
// registered indexer PeerIDs to one another, enabling cross-native DHT discovery.
|
||||
func (ix *IndexerService) subscribeIndexerRegistry() {
|
||||
logger := oclib.GetLogger()
|
||||
ix.PS.RegisterTopicValidator(common.TopicIndexerRegistry, func(_ context.Context, _ pp.ID, _ *pubsub.Message) bool {
|
||||
return true
|
||||
ix.PS.RegisterTopicValidator(common.TopicIndexerRegistry, func(_ context.Context, _ pp.ID, msg *pubsub.Message) bool {
|
||||
// Reject empty or syntactically invalid multiaddrs before they reach the
|
||||
// message loop. A compromised native could otherwise gossip arbitrary data.
|
||||
addr := string(msg.Data)
|
||||
if addr == "" {
|
||||
return false
|
||||
}
|
||||
_, err := pp.AddrInfoFromString(addr)
|
||||
return err == nil
|
||||
})
|
||||
topic, err := ix.PS.Join(common.TopicIndexerRegistry)
|
||||
if err != nil {
|
||||
@@ -130,29 +147,38 @@ func (ix *IndexerService) subscribeIndexerRegistry() {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
peerID := string(msg.Data)
|
||||
if peerID == "" {
|
||||
addr := string(msg.Data)
|
||||
if addr == "" {
|
||||
continue
|
||||
}
|
||||
if peer, err := pp.AddrInfoFromString(addr); err == nil {
|
||||
ix.Native.knownMu.Lock()
|
||||
ix.Native.knownPeerIDs[peer.ID.String()] = addr
|
||||
ix.Native.knownMu.Unlock()
|
||||
|
||||
}
|
||||
// A neighbouring native registered this PeerID; add to known set for DHT refresh.
|
||||
ix.Native.knownMu.Lock()
|
||||
ix.Native.knownPeerIDs[peerID] = struct{}{}
|
||||
ix.Native.knownMu.Unlock()
|
||||
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// handleNativeSubscription stores an indexer's alive registration in the DHT cache.
|
||||
// handleNativeSubscription stores an indexer's alive registration in the local cache
|
||||
// immediately, then persists it to the DHT asynchronously.
|
||||
// The stream is temporary: indexer sends one IndexerRegistration and closes.
|
||||
func (ix *IndexerService) handleNativeSubscription(s network.Stream) {
|
||||
defer s.Close()
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
logger.Info().Msg("Subscription")
|
||||
|
||||
var reg common.IndexerRegistration
|
||||
if err := json.NewDecoder(s).Decode(®); err != nil {
|
||||
logger.Err(err).Msg("native subscription: decode")
|
||||
return
|
||||
}
|
||||
logger.Info().Msg("Subscription " + reg.Addr)
|
||||
|
||||
if reg.Addr == "" {
|
||||
logger.Error().Msg("native subscription: missing addr")
|
||||
return
|
||||
@@ -166,30 +192,23 @@ func (ix *IndexerService) handleNativeSubscription(s network.Stream) {
|
||||
reg.PeerID = ad.ID.String()
|
||||
}
|
||||
|
||||
expiry := time.Now().UTC().Add(IndexerTTL)
|
||||
// Build entry with a fresh TTL — must happen before the cache write so the 66s
|
||||
// window is not consumed by DHT retries.
|
||||
entry := &liveIndexerEntry{
|
||||
PeerID: reg.PeerID,
|
||||
Addr: reg.Addr,
|
||||
ExpiresAt: expiry,
|
||||
ExpiresAt: time.Now().UTC().Add(IndexerTTL),
|
||||
}
|
||||
|
||||
// Persist in DHT with 66s TTL.
|
||||
key := ix.genIndexerKey(reg.PeerID)
|
||||
if data, err := json.Marshal(entry); err == nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := ix.DHT.PutValue(ctx, key, data); err != nil {
|
||||
logger.Err(err).Msg("native subscription: DHT put")
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
||||
// Update local cache and known set.
|
||||
// Update local cache and known set immediately so concurrent GetIndexers calls
|
||||
// can already see this indexer without waiting for the DHT write to complete.
|
||||
ix.Native.liveIndexersMu.Lock()
|
||||
_, isRenewal := ix.Native.liveIndexers[reg.PeerID]
|
||||
ix.Native.liveIndexers[reg.PeerID] = entry
|
||||
ix.Native.liveIndexersMu.Unlock()
|
||||
|
||||
ix.Native.knownMu.Lock()
|
||||
ix.Native.knownPeerIDs[reg.PeerID] = struct{}{}
|
||||
ix.Native.knownPeerIDs[reg.PeerID] = reg.Addr
|
||||
ix.Native.knownMu.Unlock()
|
||||
|
||||
// Gossip PeerID to neighbouring natives so they discover it via DHT.
|
||||
@@ -197,16 +216,46 @@ func (ix *IndexerService) handleNativeSubscription(s network.Stream) {
|
||||
topic := ix.LongLivedPubSubs[common.TopicIndexerRegistry]
|
||||
ix.PubsubMu.RUnlock()
|
||||
if topic != nil {
|
||||
if err := topic.Publish(context.Background(), []byte(reg.PeerID)); err != nil {
|
||||
if err := topic.Publish(context.Background(), []byte(reg.Addr)); err != nil {
|
||||
logger.Err(err).Msg("native subscription: registry gossip publish")
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info().Str("peer", reg.PeerID).Msg("native: indexer registered")
|
||||
if isRenewal {
|
||||
logger.Debug().Str("peer", reg.PeerID).Msg("native: indexer TTL renewed : " + fmt.Sprintf("%v", len(ix.Native.liveIndexers)))
|
||||
} else {
|
||||
logger.Info().Str("peer", reg.PeerID).Msg("native: indexer registered : " + fmt.Sprintf("%v", len(ix.Native.liveIndexers)))
|
||||
}
|
||||
|
||||
// Persist in DHT asynchronously — retries must not block the handler or consume
|
||||
// the local cache TTL.
|
||||
key := ix.genIndexerKey(reg.PeerID)
|
||||
data, err := json.Marshal(entry)
|
||||
if err != nil {
|
||||
logger.Err(err).Msg("native subscription: marshal entry")
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := ix.DHT.PutValue(ctx, key, data); err != nil {
|
||||
cancel()
|
||||
logger.Err(err).Msg("native subscription: DHT put " + key)
|
||||
if strings.Contains(err.Error(), "failed to find any peer in table") {
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// handleNativeGetIndexers returns this native's own list of reachable indexers.
|
||||
// If none are available, it self-delegates (becomes the fallback indexer for the caller).
|
||||
// Self-delegation (native acting as temporary fallback indexer) is only permitted
|
||||
// for nodes — never for peers that are themselves registered indexers in knownPeerIDs.
|
||||
// The consensus across natives is the responsibility of the requesting node/indexer.
|
||||
func (ix *IndexerService) handleNativeGetIndexers(s network.Stream) {
|
||||
defer s.Close()
|
||||
@@ -220,14 +269,20 @@ func (ix *IndexerService) handleNativeGetIndexers(s network.Stream) {
|
||||
if req.Count <= 0 {
|
||||
req.Count = 3
|
||||
}
|
||||
|
||||
reachable := ix.reachableLiveIndexers()
|
||||
callerPeerID := s.Conn().RemotePeer().String()
|
||||
reachable := ix.reachableLiveIndexers(req.Count, callerPeerID)
|
||||
var resp common.GetIndexersResponse
|
||||
|
||||
if len(reachable) == 0 {
|
||||
// No indexers known: become temporary fallback for this caller.
|
||||
ix.selfDelegate(s.Conn().RemotePeer(), &resp)
|
||||
logger.Info().Str("peer", s.Conn().RemotePeer().String()).Msg("native: no indexers, acting as fallback")
|
||||
// No live indexers reachable — try to self-delegate.
|
||||
if ix.selfDelegate(s.Conn().RemotePeer(), &resp) {
|
||||
logger.Info().Str("peer", callerPeerID).Msg("native: no indexers, acting as fallback for node")
|
||||
} else {
|
||||
// Fallback pool saturated: return empty so the caller retries another
|
||||
// native instead of piling more load onto this one.
|
||||
logger.Warn().Str("peer", callerPeerID).Int("pool", maxFallbackPeers).Msg(
|
||||
"native: fallback pool saturated, refusing self-delegation")
|
||||
}
|
||||
} else {
|
||||
rand.Shuffle(len(reachable), func(i, j int) { reachable[i], reachable[j] = reachable[j], reachable[i] })
|
||||
if req.Count > len(reachable) {
|
||||
@@ -255,7 +310,7 @@ func (ix *IndexerService) handleNativeConsensus(s network.Stream) {
|
||||
return
|
||||
}
|
||||
|
||||
myList := ix.reachableLiveIndexers()
|
||||
myList := ix.reachableLiveIndexers(-1, s.Conn().RemotePeer().String())
|
||||
mySet := make(map[string]struct{}, len(myList))
|
||||
for _, addr := range myList {
|
||||
mySet[addr] = struct{}{}
|
||||
@@ -285,31 +340,56 @@ func (ix *IndexerService) handleNativeConsensus(s network.Stream) {
|
||||
}
|
||||
|
||||
// selfDelegate marks the caller as a responsible peer and exposes this native's own
|
||||
// address as its temporary indexer.
|
||||
func (ix *IndexerService) selfDelegate(remotePeer pp.ID, resp *common.GetIndexersResponse) {
|
||||
// address as its temporary indexer. Returns false when the fallback pool is saturated
|
||||
// (maxFallbackPeers reached) — the caller must return an empty response so the node
|
||||
// retries later instead of pinning indefinitely to an overloaded native.
|
||||
func (ix *IndexerService) selfDelegate(remotePeer pp.ID, resp *common.GetIndexersResponse) bool {
|
||||
ix.Native.responsibleMu.Lock()
|
||||
ix.Native.responsiblePeers[remotePeer] = struct{}{}
|
||||
ix.Native.responsibleMu.Unlock()
|
||||
resp.IsSelfFallback = true
|
||||
for _, a := range ix.Host.Addrs() {
|
||||
resp.Indexers = []string{a.String() + "/p2p/" + ix.Host.ID().String()}
|
||||
break
|
||||
defer ix.Native.responsibleMu.Unlock()
|
||||
if len(ix.Native.responsiblePeers) >= maxFallbackPeers {
|
||||
return false
|
||||
}
|
||||
ix.Native.responsiblePeers[remotePeer] = struct{}{}
|
||||
resp.IsSelfFallback = true
|
||||
resp.Indexers = []string{ix.Host.Addrs()[len(ix.Host.Addrs())-1].String() + "/p2p/" + ix.Host.ID().String()}
|
||||
return true
|
||||
}
|
||||
|
||||
// reachableLiveIndexers returns the multiaddrs of non-expired, pingable indexers
|
||||
// from the local cache (kept fresh by refreshIndexersFromDHT in background).
|
||||
func (ix *IndexerService) reachableLiveIndexers() []string {
|
||||
func (ix *IndexerService) reachableLiveIndexers(count int, from ...string) []string {
|
||||
ix.Native.liveIndexersMu.RLock()
|
||||
now := time.Now().UTC()
|
||||
candidates := []*liveIndexerEntry{}
|
||||
for _, e := range ix.Native.liveIndexers {
|
||||
if e.ExpiresAt.After(now) {
|
||||
fmt.Println("liveIndexers", slices.Contains(from, e.PeerID), from, e.PeerID)
|
||||
if e.ExpiresAt.After(now) && !slices.Contains(from, e.PeerID) {
|
||||
candidates = append(candidates, e)
|
||||
}
|
||||
}
|
||||
ix.Native.liveIndexersMu.RUnlock()
|
||||
|
||||
fmt.Println("midway...", candidates, from, ix.Native.knownPeerIDs)
|
||||
|
||||
if (count > 0 && len(candidates) < count) || count < 0 {
|
||||
ix.Native.knownMu.RLock()
|
||||
for k, v := range ix.Native.knownPeerIDs {
|
||||
// Include peers whose liveIndexers entry is absent OR expired.
|
||||
// A non-nil but expired entry means the peer was once known but
|
||||
// has since timed out — PeerIsAlive below will decide if it's back.
|
||||
fmt.Println("knownPeerIDs", slices.Contains(from, k), from, k)
|
||||
if !slices.Contains(from, k) {
|
||||
candidates = append(candidates, &liveIndexerEntry{
|
||||
PeerID: k,
|
||||
Addr: v,
|
||||
})
|
||||
}
|
||||
}
|
||||
ix.Native.knownMu.RUnlock()
|
||||
}
|
||||
|
||||
fmt.Println("midway...1", candidates)
|
||||
|
||||
reachable := []string{}
|
||||
for _, e := range candidates {
|
||||
ad, err := pp.AddrInfoFromString(e.Addr)
|
||||
@@ -371,6 +451,12 @@ func (ix *IndexerService) refreshIndexersFromDHT() {
|
||||
ix.Native.liveIndexers[best.PeerID] = best
|
||||
ix.Native.liveIndexersMu.Unlock()
|
||||
logger.Info().Str("peer", best.PeerID).Msg("native: refreshed indexer from DHT")
|
||||
} else {
|
||||
// DHT has no fresh entry — peer is gone, prune from known set.
|
||||
ix.Native.knownMu.Lock()
|
||||
delete(ix.Native.knownPeerIDs, pid)
|
||||
ix.Native.knownMu.Unlock()
|
||||
logger.Info().Str("peer", pid).Msg("native: pruned stale peer from knownPeerIDs")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -387,30 +473,107 @@ func (ix *IndexerService) runOffloadLoop() {
|
||||
defer t.Stop()
|
||||
logger := oclib.GetLogger()
|
||||
for range t.C {
|
||||
fmt.Println("runOffloadLoop", ix.Native.responsiblePeers)
|
||||
ix.Native.responsibleMu.RLock()
|
||||
count := len(ix.Native.responsiblePeers)
|
||||
ix.Native.responsibleMu.RUnlock()
|
||||
if count == 0 {
|
||||
continue
|
||||
}
|
||||
if len(ix.reachableLiveIndexers()) > 0 {
|
||||
ix.Native.responsibleMu.Lock()
|
||||
ix.Native.responsiblePeers = map[pp.ID]struct{}{}
|
||||
ix.Native.responsibleMu.Unlock()
|
||||
ix.Native.responsibleMu.RLock()
|
||||
peerIDS := []string{}
|
||||
for p := range ix.Native.responsiblePeers {
|
||||
peerIDS = append(peerIDS, p.String())
|
||||
}
|
||||
fmt.Println("COUNT --> ", count, len(ix.reachableLiveIndexers(-1, peerIDS...)))
|
||||
ix.Native.responsibleMu.RUnlock()
|
||||
if len(ix.reachableLiveIndexers(-1, peerIDS...)) > 0 {
|
||||
ix.Native.responsibleMu.RLock()
|
||||
released := ix.Native.responsiblePeers
|
||||
ix.Native.responsibleMu.RUnlock()
|
||||
|
||||
// Reset (not Close) heartbeat streams of released peers.
|
||||
// Close() only half-closes the native's write direction — the peer's write
|
||||
// direction stays open and sendHeartbeat never sees an error.
|
||||
// Reset() abruptly terminates both directions, making the peer's next
|
||||
// json.Encode return an error which triggers replenishIndexersFromNative.
|
||||
ix.StreamMU.Lock()
|
||||
if streams := ix.StreamRecords[common.ProtocolHeartbeat]; streams != nil {
|
||||
for pid := range released {
|
||||
if rec, ok := streams[pid]; ok {
|
||||
if rec.HeartbeatStream != nil && rec.HeartbeatStream.Stream != nil {
|
||||
rec.HeartbeatStream.Stream.Reset()
|
||||
}
|
||||
ix.Native.responsibleMu.Lock()
|
||||
delete(ix.Native.responsiblePeers, pid)
|
||||
ix.Native.responsibleMu.Unlock()
|
||||
|
||||
delete(streams, pid)
|
||||
logger.Info().Str("peer", pid.String()).Str("proto", string(common.ProtocolHeartbeat)).Msg(
|
||||
"native: offload — stream reset, peer will reconnect to real indexer")
|
||||
} else {
|
||||
// No recorded heartbeat stream for this peer: either it never
|
||||
// passed the score check (new peer, uptime=0 → score<75) or the
|
||||
// stream was GC'd. We cannot send a Reset signal, so close the
|
||||
// whole connection instead — this makes the peer's sendHeartbeat
|
||||
// return an error, which triggers replenishIndexersFromNative and
|
||||
// migrates it to a real indexer.
|
||||
ix.Native.responsibleMu.Lock()
|
||||
delete(ix.Native.responsiblePeers, pid)
|
||||
ix.Native.responsibleMu.Unlock()
|
||||
go ix.Host.Network().ClosePeer(pid)
|
||||
logger.Info().Str("peer", pid.String()).Msg(
|
||||
"native: offload — no heartbeat stream, closing connection so peer re-requests real indexers")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
ix.StreamMU.Unlock()
|
||||
|
||||
logger.Info().Int("released", count).Msg("native: offloaded responsible peers to real indexers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleNativeGetPeers returns a random selection of this native's known native
|
||||
// contacts, excluding any in the request's Exclude list.
|
||||
func (ix *IndexerService) handleNativeGetPeers(s network.Stream) {
|
||||
defer s.Close()
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
var req common.GetNativePeersRequest
|
||||
if err := json.NewDecoder(s).Decode(&req); err != nil {
|
||||
logger.Err(err).Msg("native get peers: decode")
|
||||
return
|
||||
}
|
||||
if req.Count <= 0 {
|
||||
req.Count = 1
|
||||
}
|
||||
|
||||
excludeSet := make(map[string]struct{}, len(req.Exclude))
|
||||
for _, e := range req.Exclude {
|
||||
excludeSet[e] = struct{}{}
|
||||
}
|
||||
|
||||
common.StreamNativeMu.RLock()
|
||||
candidates := make([]string, 0, len(common.StaticNatives))
|
||||
for addr := range common.StaticNatives {
|
||||
if _, excluded := excludeSet[addr]; !excluded {
|
||||
candidates = append(candidates, addr)
|
||||
}
|
||||
}
|
||||
common.StreamNativeMu.RUnlock()
|
||||
|
||||
rand.Shuffle(len(candidates), func(i, j int) { candidates[i], candidates[j] = candidates[j], candidates[i] })
|
||||
if req.Count > len(candidates) {
|
||||
req.Count = len(candidates)
|
||||
}
|
||||
|
||||
resp := common.GetNativePeersResponse{Peers: candidates[:req.Count]}
|
||||
if err := json.NewEncoder(s).Encode(resp); err != nil {
|
||||
logger.Err(err).Msg("native get peers: encode response")
|
||||
}
|
||||
}
|
||||
|
||||
// StartNativeRegistration starts a goroutine that periodically registers this
|
||||
// indexer with all configured native indexers (every RecommendedHeartbeatInterval).
|
||||
func StartNativeRegistration(h host.Host, nativeAddressesStr string) {
|
||||
go func() {
|
||||
common.RegisterWithNative(h, nativeAddressesStr)
|
||||
t := time.NewTicker(common.RecommendedHeartbeatInterval)
|
||||
defer t.Stop()
|
||||
for range t.C {
|
||||
common.RegisterWithNative(h, nativeAddressesStr)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// IndexerService manages the indexer node's state: stream records, DHT, pubsub.
|
||||
@@ -22,6 +23,7 @@ type IndexerService struct {
|
||||
mu sync.RWMutex
|
||||
IsNative bool
|
||||
Native *NativeState // non-nil when IsNative == true
|
||||
nameIndex *nameIndexState
|
||||
}
|
||||
|
||||
// NewIndexerService creates an IndexerService.
|
||||
@@ -43,22 +45,34 @@ func NewIndexerService(h host.Host, ps *pubsub.PubSub, maxNode int, isNative boo
|
||||
}
|
||||
ix.PS = ps
|
||||
|
||||
if ix.isStrictIndexer {
|
||||
if ix.isStrictIndexer && !isNative {
|
||||
logger.Info().Msg("connect to indexers as strict indexer...")
|
||||
common.ConnectToIndexers(h, 0, 5, ix.Host.ID())
|
||||
common.ConnectToIndexers(h, conf.GetConfig().MinIndexer, conf.GetConfig().MaxIndexer, ix.Host.ID())
|
||||
logger.Info().Msg("subscribe to decentralized search flow as strict indexer...")
|
||||
ix.SubscribeToSearch(ix.PS, nil)
|
||||
go ix.SubscribeToSearch(ix.PS, nil)
|
||||
}
|
||||
|
||||
if !isNative {
|
||||
logger.Info().Msg("init distributed name index...")
|
||||
ix.initNameIndex(ps)
|
||||
ix.LongLivedStreamRecordedService.AfterDelete = func(pid pp.ID, name, did string) {
|
||||
ix.publishNameEvent(NameIndexDelete, name, pid.String(), did)
|
||||
}
|
||||
}
|
||||
|
||||
if ix.DHT, err = dht.New(
|
||||
context.Background(),
|
||||
ix.Host,
|
||||
dht.Mode(dht.ModeServer),
|
||||
dht.ProtocolPrefix("oc"), // 🔥 réseau privé
|
||||
dht.Validator(record.NamespacedValidator{
|
||||
"node": PeerRecordValidator{},
|
||||
"indexer": IndexerRecordValidator{}, // for native indexer registry
|
||||
"name": DefaultValidator{},
|
||||
"pid": DefaultValidator{},
|
||||
}),
|
||||
); err != nil {
|
||||
logger.Info().Msg(err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -67,11 +81,10 @@ func NewIndexerService(h host.Host, ps *pubsub.PubSub, maxNode int, isNative boo
|
||||
ix.InitNative()
|
||||
} else {
|
||||
ix.initNodeHandler()
|
||||
}
|
||||
|
||||
// Register with configured natives so this indexer appears in their cache
|
||||
if nativeAddrs := conf.GetConfig().NativeIndexerAddresses; nativeAddrs != "" {
|
||||
StartNativeRegistration(ix.Host, nativeAddrs)
|
||||
// Register with configured natives so this indexer appears in their cache
|
||||
if nativeAddrs := conf.GetConfig().NativeIndexerAddresses; nativeAddrs != "" {
|
||||
common.StartNativeRegistration(ix.Host, nativeAddrs)
|
||||
}
|
||||
}
|
||||
return ix
|
||||
}
|
||||
@@ -79,6 +92,9 @@ func NewIndexerService(h host.Host, ps *pubsub.PubSub, maxNode int, isNative boo
|
||||
func (ix *IndexerService) Close() {
|
||||
ix.DHT.Close()
|
||||
ix.PS.UnregisterTopicValidator(common.TopicPubSubSearch)
|
||||
if ix.nameIndex != nil {
|
||||
ix.PS.UnregisterTopicValidator(TopicNameIndex)
|
||||
}
|
||||
for _, s := range ix.StreamRecords {
|
||||
for _, ss := range s {
|
||||
ss.HeartbeatStream.Stream.Close()
|
||||
|
||||
@@ -6,6 +6,16 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type DefaultValidator struct{}
|
||||
|
||||
func (v DefaultValidator) Validate(key string, value []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v DefaultValidator) Select(key string, values [][]byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
type PeerRecordValidator struct{}
|
||||
|
||||
func (v PeerRecordValidator) Validate(key string, value []byte) error {
|
||||
@@ -26,14 +36,7 @@ func (v PeerRecordValidator) Validate(key string, value []byte) error {
|
||||
}
|
||||
|
||||
// Signature verification
|
||||
rec2 := PeerRecord{
|
||||
Name: rec.Name,
|
||||
DID: rec.DID,
|
||||
PubKey: rec.PubKey,
|
||||
PeerID: rec.PeerID,
|
||||
}
|
||||
|
||||
if _, err := rec2.Verify(); err != nil {
|
||||
if _, err := rec.Verify(); err != nil {
|
||||
return errors.New("invalid signature")
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user