62 Commits

Author SHA1 Message Date
mr
fa341494d9 change dev opps + ci 2026-04-10 15:26:49 +02:00
mr
29b26d366e oc-discovery -> conf 2026-04-08 10:04:41 +02:00
mr
46dee0a6cb To UTC 2026-03-19 08:26:46 +01:00
mr
2108df5283 NATS rationalized 2026-03-18 16:45:33 +01:00
mr
380de4c80b rationalized NATS 2026-03-18 16:45:11 +01:00
mr
83285c2ab5 Debug 2026-03-17 11:57:22 +01:00
mr
edcfecd24b search 2026-03-12 15:57:41 +01:00
mr
80117ee36f add groups 2026-03-12 08:57:06 +01:00
mr
780a0c530d Change 2026-03-11 19:29:39 +01:00
mr
d0af40f4c7 Simple Architecture 2026-03-11 16:28:15 +01:00
mr
83cef6e6f6 saved 2026-03-09 14:57:41 +01:00
mr
3751ec554d Full Flow : Catalog + Peer 2026-03-05 15:22:02 +01:00
mr
ef3d998ead demo test + Peer 2026-03-03 16:38:24 +01:00
mr
79aa3cc2b3 adjust 2026-02-26 09:14:34 +01:00
mr
779e36aaef Pass + Doc 2026-02-24 14:31:37 +01:00
mr
572da29fd4 check up if peer is sourced. 2026-02-20 15:01:01 +01:00
mr
3eae5791a1 Native Indexer Mode 2026-02-20 12:42:18 +01:00
mr
88fd05066c update lightest peer and nats behaviors 2026-02-18 14:32:44 +01:00
mr
0250c3b339 Peer Discovery -> DHT // no more pubsub state 2026-02-18 13:29:50 +01:00
mr
6a5ffb9a92 Indexer Quality Score TrustLess 2026-02-17 13:11:22 +01:00
mr
fa914958b6 Keep Peer Caching + Resource Verification. 2026-02-09 13:28:00 +01:00
mr
1c0b2b4312 better tagging 2026-02-09 09:45:41 +01:00
mr
631e2846fe remove apk 2026-02-09 08:55:50 +01:00
mr
d985d8339a Change of state Conn Management 2026-02-05 16:17:33 +01:00
mr
ea14ad3933 Closure On change of state 2026-02-05 16:17:14 +01:00
mr
2e31df89c2 oc-discovery + auto create peer 2026-02-05 15:47:29 +01:00
mr
425cbdfe7d stream address 2026-02-05 15:36:22 +01:00
mr
8ee5b84e21 publish-registry 2026-02-05 12:14:02 +01:00
mr
552bb17e2b Connectivity ok 2026-02-05 11:23:11 +01:00
mr
88e29073a2 dockerfile default 2026-02-05 09:31:51 +01:00
mr
b429ee9816 base demo files 2026-02-05 08:57:00 +01:00
mr
c716225283 base demo 2026-02-05 08:56:55 +01:00
mr
3bc01c3a04 Debug Spread Get Peer 2026-02-04 11:35:19 +01:00
mr
1ebbb54dd1 compact conf 2026-02-03 16:21:29 +01:00
mr
c958d106b7 compact conf 2026-02-03 16:21:22 +01:00
mr
5442d625c6 Bad Init 2026-02-03 15:47:28 +01:00
mr
60ed7048cd Missing Config 2026-02-03 15:37:33 +01:00
mr
7b68a608dd standardize Deployment 2026-02-03 15:31:06 +01:00
mr
1c2ea9ca96 Set up -> Stream is Working 2026-02-03 15:25:15 +01:00
mr
0ff21c0818 discovery 2026-02-03 08:47:22 +01:00
mr
6ca762abbf Add logic service NATS Peer x Cache + Retrieve a lost peer partner. 2026-02-02 12:43:43 +01:00
mr
0ffe98045e No Blacklisted + Hoping and diffused research 2026-02-02 12:14:01 +01:00
mr
c3352499fa First Starting debug 2026-02-02 09:05:58 +01:00
mr
562d86125e FULL OC-DISCOVERY LOGIC 2026-01-30 16:57:36 +01:00
mr
d50e5d56f7 Update NATS 2026-01-28 17:31:34 +01:00
mr
38cd862947 Daemon Search Then 2026-01-28 17:22:42 +01:00
mr
7fd258dc9d Daemons Search 2026-01-28 17:22:29 +01:00
ycc
0ed2fc0f15 test
All checks were successful
continuous-integration/drone/push Build is passing
2025-11-12 10:55:21 +01:00
yc
ea5320d4be pack without swagger, temporary
Some checks failed
continuous-integration/drone/push Build is failing
2025-11-12 10:36:26 +01:00
yc
25184deecb fix sed
Some checks failed
continuous-integration/drone/push Build is failing
2025-11-12 10:16:36 +01:00
yc
27379cb392 Multiarch fix
Some checks failed
continuous-integration/drone/push Build is failing
2025-11-12 10:11:09 +01:00
yc
66d228f143 basic test update
Some checks failed
continuous-integration/drone/push Build is failing
2025-11-12 10:03:15 +01:00
yc
f1444c8046 Golang version removal
Some checks failed
continuous-integration/drone/push Build is failing
2025-11-12 09:57:24 +01:00
yc
0aef66207f Multiarch fix
Some checks failed
continuous-integration/drone/push Build is failing
2025-11-12 09:55:04 +01:00
yc
0d9d7c9931 multiple arch CI build
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2025-11-12 09:51:36 +01:00
ycc
90c24a9e05 Dockerfile upgrade for multi target
Some checks failed
continuous-integration/drone/push Build is failing
2025-11-12 09:14:13 +01:00
fdeb933e26 Actualiser .drone.yml
Some checks reported errors
continuous-integration/drone/push Build was killed
continuous-integration/drone Build is passing
2025-10-21 16:49:38 +02:00
5aae779e74 Actualiser docker-compose.yml
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone Build is failing
2025-10-21 15:51:16 +02:00
3731d48f81 Actualiser .drone.yml
Some checks reported errors
continuous-integration/drone/push Build encountered an error
continuous-integration/drone Build is failing
2025-10-21 14:33:07 +02:00
pb
4915b3e4cf Mise à jour des fichiers go.mod et go.sum
Some checks reported errors
continuous-integration/drone Build encountered an error
2025-08-12 16:22:56 +02:00
plm
50758a7efe k8s integration 2025-01-15 16:32:12 +01:00
ycc
2f2a3bf250 fix port, models.GetConfig assignation to be cheked 2024-10-16 08:42:51 +02:00
98 changed files with 11352 additions and 1078 deletions

View File

@@ -3,18 +3,37 @@ kind: pipeline
name: unit
steps:
- name: build
image: golang
commands:
- go test
- go build
# -------------------- tests (host arch only) --------------------
- name: test
image: golang:alpine
pull: if-not-exists
commands:
- go test ./...
# -------------------- build + push multi-arch image --------------------
- name: publish
image: plugins/docker:latest
settings:
username:
from_secret: docker-user
password:
from_secret: docker-pw
#repo:
# from_secret: docker-repo
repo: opencloudregistry/oc-discovery
# build context & dockerfile
context: .
dockerfile: Dockerfile
# enable buildx / multi-arch
buildx: true
platforms:
- linux/amd64
- linux/arm64
- linux/arm/v7
# tags to push (all as a single multi-arch manifest)
tags:
- latest
- name: publish
image: plugins/docker
settings:
username:
from_secret: docker-user
password:
from_secret: docker-pw
repo:
from_secret: docker-repo

View File

@@ -1,31 +1,62 @@
FROM golang:alpine as builder
# ========================
# Global build arguments
# ========================
ARG CONF_NUM
WORKDIR /app
COPY . .
RUN apk add git
RUN go get github.com/beego/bee/v2 && go install github.com/beego/bee/v2@master
RUN timeout 15 bee run -gendoc=true -downdoc=true -runmode=dev || :
RUN sed -i 's/http:\/\/127.0.0.1:8080\/swagger\/swagger.json/swagger.json/g' swagger/index.html
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" .
RUN ls /app
FROM scratch
# ========================
# Dependencies stage
# ========================
FROM golang:alpine AS deps
ARG CONF_NUM
WORKDIR /app
COPY --from=builder /app/oc-discovery /usr/bin/
COPY --from=builder /app/swagger /app/swagger
COPY peers.json /app/
COPY identity.json /app/
COPY docker_discovery.json /etc/oc/discovery.json
COPY go.mod go.sum ./
RUN sed -i '/replace/d' go.mod
RUN go mod download
EXPOSE 8080
# ========================
# Builder stage
# ========================
FROM golang:alpine AS builder
ARG CONF_NUM
ENTRYPOINT ["oc-discovery"]
WORKDIR /oc-discovery
# Reuse Go cache
COPY --from=deps /go/pkg /go/pkg
COPY --from=deps /app/go.mod /app/go.sum ./
# App sources
COPY . .
# Clean replace directives again (safety)
RUN sed -i '/replace/d' go.mod
# Build package
RUN go install github.com/beego/bee/v2@latest
RUN bee pack
# Extract bundle
RUN mkdir -p /app/extracted \
&& tar -zxvf oc-discovery.tar.gz -C /app/extracted
# ========================
# Runtime stage
# ========================
FROM golang:alpine
ARG CONF_NUM
WORKDIR /app
RUN mkdir ./pem
COPY --from=builder /app/extracted/pem/private${CONF_NUM:-1}.pem ./pem/private.pem
COPY --from=builder /app/extracted/psk ./psk
COPY --from=builder /app/extracted/pem/public${CONF_NUM:-1}.pem ./pem/public.pem
COPY --from=builder /app/extracted/oc-discovery /usr/bin/oc-discovery
COPY --from=builder /app/extracted/docker_discovery${CONF_NUM:-1}.json /etc/oc/discovery.json
EXPOSE 400${CONF_NUM:-1}
ENTRYPOINT ["oc-discovery"]

35
Makefile Normal file
View File

@@ -0,0 +1,35 @@
.DEFAULT_GOAL := all
build: clean
bee pack
run:
./oc-discovery
clean:
rm -rf oc-discovery
docker:
DOCKER_BUILDKIT=1 docker build --build-arg CONF_NUM=1 -t oc-discovery_1 -f Dockerfile .
docker tag oc-discovery_1 opencloudregistry/oc-discovery_1:latest
DOCKER_BUILDKIT=1 docker build --build-arg CONF_NUM=2 -t oc-discovery_2 -f Dockerfile .
docker tag oc-discovery_2 opencloudregistry/oc-discovery_2:latest
DOCKER_BUILDKIT=1 docker build --build-arg CONF_NUM=3 -t oc-discovery_3 -f Dockerfile .
docker tag oc-discovery_3 opencloudregistry/oc-discovery_3:latest
DOCKER_BUILDKIT=1 docker build --build-arg CONF_NUM=4 -t oc-discovery_4 -f Dockerfile .
docker tag oc-discovery_4 opencloudregistry/oc-discovery_4:latest
publish-kind:
kind load docker-image opencloudregistry/oc-discovery:latest --name opencloud
publish-registry:
docker push opencloudregistry/oc-discovery_1:latest
docker push opencloudregistry/oc-discovery_2:latest
docker push opencloudregistry/oc-discovery_3:latest
docker push opencloudregistry/oc-discovery_4:latest
all: docker publish-kind
ci: docker publish-registry
.PHONY: build run clean docker publish-kind publish-registry

View File

@@ -14,3 +14,34 @@ If default Swagger page is displayed instead of tyour api, change url in swagger
url: "swagger.json"
sequenceDiagram
autonumber
participant Dev as Développeur / Owner
participant IPFS as Réseau IPFS
participant CID as CID (hash du fichier)
participant Argo as Orchestrateur Argo
participant CU as Compute Unit
participant MinIO as Storage MinIO
%% 1. Ajout du fichier sur IPFS
Dev->>IPFS: Chiffre et ajoute fichier (algo/dataset)
IPFS-->>CID: Génère CID unique (hash du fichier)
Dev->>Dev: Stocke CID pour référence future
%% 2. Orchestration par Argo
Argo->>CID: Requête CID pour job
CID-->>Argo: Fournit le fichier (vérifié via hash)
%% 3. Execution sur la Compute Unit
Argo->>CU: Déploie job avec fichier récupéré
CU->>CU: Vérifie hash (CID) pour intégrité
CU->>CU: Exécute l'algo sur le dataset
%% 4. Stockage des résultats
CU->>MinIO: Stocke output (résultats) ou logs
CU->>IPFS: Optionnel : ajoute output sur IPFS (nouveau CID)
%% 5. Vérification et traçabilité
Dev->>IPFS: Vérifie CID output si nécessaire
CU->>Dev: Fournit résultat et log de hash

51
conf/config.go Normal file
View File

@@ -0,0 +1,51 @@
package conf
import "sync"
type Config struct {
Name string
Hostname string
PSKPath string
PublicKeyPath string
PrivateKeyPath string
NodeEndpointPort int64
IndexerAddresses string
PeerIDS string // TO REMOVE
NodeMode string
MinIndexer int
MaxIndexer int
// SearchTimeout is the max duration without a new result before the
// distributed peer search stream is closed. Default: 5s.
SearchTimeout int // seconds; 0 → use default (5)
// Indexer connection burst guard: max new connections accepted within the window.
// 0 → use defaults (20 new peers per 30s).
MaxConnPerWindow int // default 20
ConnWindowSecs int // default 30
// Per-node behavioral limits (sliding 60s window). 0 → use built-in defaults.
MaxHBPerMinute int // default 5
MaxPublishPerMinute int // default 10
MaxGetPerMinute int // default 50
// LocationGranularity controls how precisely this node discloses its position.
// 0 = opt-out (no location published)
// 1 = continent (±15°)
// 2 = country (±3°) — default
// 3 = region (±0.5°)
// 4 = city (±0.05°)
LocationGranularity int // default 2
}
var instance *Config
var once sync.Once
func GetConfig() *Config {
once.Do(func() {
instance = &Config{}
})
return instance
}

View File

@@ -1,41 +0,0 @@
package controllers
import (
"encoding/json"
"oc-discovery/models"
beego "github.com/beego/beego/v2/server/web"
)
// Operations about Identitys
type IdentityController struct {
beego.Controller
}
// @Title CreateIdentity
// @Description create identitys
// @Param body body models.Identity true "body for identity content"
// @Success 200 {result} "ok" or error
// @Failure 403 body is empty
// @router / [post]
func (u *IdentityController) Post() {
var identity models.Identity
json.Unmarshal(u.Ctx.Input.RequestBody, &identity)
err := models.UpdateIdentity(&identity)
if err != nil {
u.Data["json"] = err.Error()
} else {
u.Data["json"] = "ok"
}
u.ServeJSON()
}
// @Title Get
// @Description get Identity
// @Success 200 {object} models.Identity
// @router / [get]
func (u *IdentityController) GetAll() {
identity := models.GetIdentity()
u.Data["json"] = identity
u.ServeJSON()
}

View File

@@ -1,80 +0,0 @@
package controllers
import (
"encoding/json"
"oc-discovery/models"
beego "github.com/beego/beego/v2/server/web"
)
// Operations about peer
type PeerController struct {
beego.Controller
}
// @Title Create
// @Description create peers
// @Param body body []models.Peer true "The peer content"
// @Success 200 {string} models.Peer.Id
// @Failure 403 body is empty
// @router / [post]
func (o *PeerController) Post() {
var ob []models.Peer
json.Unmarshal(o.Ctx.Input.RequestBody, &ob)
models.AddPeers(ob)
o.Data["json"] = map[string]string{"Added": "OK"}
o.ServeJSON()
}
// @Title Get
// @Description find peer by peerid
// @Param peerId path string true "the peerid you want to get"
// @Success 200 {peer} models.Peer
// @Failure 403 :peerId is empty
// @router /:peerId [get]
func (o *PeerController) Get() {
peerId := o.Ctx.Input.Param(":peerId")
peer, err := models.GetPeer(peerId)
if err != nil {
o.Data["json"] = err.Error()
} else {
o.Data["json"] = peer
}
o.ServeJSON()
}
// @Title Find
// @Description find peers with query
// @Param query path string true "the keywords you need"
// @Success 200 {peers} []models.Peer
// @Failure 403
// @router /find/:query [get]
func (o *PeerController) Find() {
query := o.Ctx.Input.Param(":query")
peers, err := models.FindPeers(query)
if err != nil {
o.Data["json"] = err.Error()
} else {
o.Data["json"] = peers
}
o.ServeJSON()
}
// @Title Delete
// @Description delete the peer
// @Param peerId path string true "The peerId you want to delete"
// @Success 200 {string} delete success!
// @Failure 403 peerId is empty
// @router /:peerId [delete]
func (o *PeerController) Delete() {
peerId := o.Ctx.Input.Param(":peerId")
err := models.Delete(peerId)
if err != nil {
o.Data["json"] = err.Error()
} else {
o.Data["json"] = "delete success!"
}
o.ServeJSON()
}

View File

@@ -1,19 +0,0 @@
package controllers
import (
beego "github.com/beego/beego/v2/server/web"
)
// VersionController operations for Version
type VersionController struct {
beego.Controller
}
// @Title GetAll
// @Description get version
// @Success 200
// @router / [get]
func (c *VersionController) GetAll() {
c.Data["json"] = map[string]string{"version": "1"}
c.ServeJSON()
}

View File

@@ -0,0 +1,294 @@
package common
import (
"errors"
"sync"
"time"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
type Score struct {
FirstContacted time.Time
UptimeTracker *UptimeTracker
LastFillRate float64
Score float64
// IsSeed marks indexers that came from the IndexerAddresses static config.
// Seeds are sticky: they are never evicted by the score threshold alone.
// A seed is only removed when: (a) heartbeat fails, or (b) it sends
// SuggestMigrate and the node already has MinIndexer non-seed alternatives.
IsSeed bool
// challenge bookkeeping (2-3 peers per batch, raw data returned by indexer)
hbCount int // heartbeats sent since last challenge batch
nextChallenge int // send challenges when hbCount reaches this (rand 1-10)
challengeTotal int // number of own-PeerID challenges sent (ground truth)
challengeCorrect int // own PeerID found AND lastSeen within 2×interval
// fill rate consistency: cross-check reported fillRate vs peerCount/maxNodes
fillChecked int
fillConsistent int
// BornAt stability
LastBornAt time.Time
bornAtChanges int
// DHT challenge
dhtChecked int
dhtSuccess int
dhtBatchCounter int
// Peer witnesses
witnessChecked int
witnessConsistent int
}
// computeNodeSideScore computes the node's quality assessment of an indexer from raw metrics.
// All ratios are in [0,1]; result is in [0,100].
// - uptimeRatio : gap-aware fraction of lifetime the indexer was reachable
// - challengeAccuracy: own-PeerID challenges answered correctly (found + recent lastSeen)
// - latencyScore : 1 - RTT/maxRTT, clamped [0,1]
// - fillScore : 1 - fillRate — prefer less-loaded indexers
// - fillConsistency : fraction of ticks where peerCount/maxNodes ≈ fillRate (±10%)
func (s *Score) ComputeNodeSideScore(latencyScore float64) float64 {
uptime := s.UptimeTracker.UptimeRatio()
challengeAccuracy := 1.0
if s.challengeTotal > 0 {
challengeAccuracy = float64(s.challengeCorrect) / float64(s.challengeTotal)
}
fillScore := 1.0 - s.LastFillRate
fillConsistency := 1.0
if s.fillChecked > 0 {
fillConsistency = float64(s.fillConsistent) / float64(s.fillChecked)
}
witnessConsistency := 1.0
if s.witnessChecked > 0 {
witnessConsistency = float64(s.witnessConsistent) / float64(s.witnessChecked)
}
dhtSuccessRate := 1.0
if s.dhtChecked > 0 {
dhtSuccessRate = float64(s.dhtSuccess) / float64(s.dhtChecked)
}
base := ((0.20 * uptime) +
(0.20 * challengeAccuracy) +
(0.15 * latencyScore) +
(0.10 * fillScore) +
(0.10 * fillConsistency) +
(0.15 * witnessConsistency) +
(0.10 * dhtSuccessRate)) * 100
// BornAt stability: each unexpected BornAt change penalises by 30%.
bornAtPenalty := 1.0 - 0.30*float64(s.bornAtChanges)
if bornAtPenalty < 0 {
bornAtPenalty = 0
}
return base * bornAtPenalty
}
type Directory struct {
MuAddr sync.RWMutex
MuScore sync.RWMutex
MuStream sync.RWMutex
Addrs map[string]*pp.AddrInfo
Scores map[string]*Score
Nudge chan struct{}
Streams ProtocolStream
}
func (d *Directory) ExistsScore(a string) bool {
d.MuScore.RLock()
defer d.MuScore.RUnlock()
for addr, ai := range d.Scores {
if ai != nil && (a == addr) {
return true
}
}
return false
}
func (d *Directory) GetScore(a string) *Score {
d.MuScore.RLock()
defer d.MuScore.RUnlock()
for addr, s := range d.Scores {
if s != nil && (a == addr) {
sCopy := *s
return &sCopy
}
}
return nil
}
func (d *Directory) GetScores() map[string]*Score {
d.MuScore.RLock()
defer d.MuScore.RUnlock()
score := map[string]*Score{}
for addr, s := range d.Scores {
score[addr] = s
}
return score
}
func (d *Directory) DeleteScore(a string) {
d.MuScore.RLock()
defer d.MuScore.RUnlock()
score := map[string]*Score{}
for addr, s := range d.Scores {
if a != addr {
score[addr] = s
}
}
d.Scores = score
}
func (d *Directory) SetScore(addr string, score *Score) *pp.AddrInfo {
d.MuScore.Lock()
defer d.MuScore.Unlock()
d.Scores[addr] = score
return nil
}
func (d *Directory) ExistsAddr(addrOrId string) bool {
d.MuAddr.RLock()
defer d.MuAddr.RUnlock()
for addr, ai := range d.Addrs {
if ai != nil && (addrOrId == ai.ID.String() || addrOrId == addr) {
return true
}
}
return false
}
func (d *Directory) GetAddr(addrOrId string) *pp.AddrInfo {
d.MuAddr.RLock()
defer d.MuAddr.RUnlock()
for addr, ai := range d.Addrs {
if ai != nil && (addrOrId == ai.ID.String() || addrOrId == addr) {
aiCopy := *ai
return &aiCopy
}
}
return nil
}
func (d *Directory) DeleteAddr(a string) {
d.MuAddr.RLock()
defer d.MuAddr.RUnlock()
addrs := map[string]*pp.AddrInfo{}
for addr, s := range d.Addrs {
if a != addr {
addrs[addr] = s
}
}
d.Addrs = addrs
}
func (d *Directory) SetAddr(addr string, info *pp.AddrInfo) *pp.AddrInfo {
d.MuAddr.Lock()
defer d.MuAddr.Unlock()
d.Addrs[addr] = info
return nil
}
func (d *Directory) GetAddrIDs() []pp.ID {
d.MuAddr.RLock()
defer d.MuAddr.RUnlock()
indexers := make([]pp.ID, 0, len(d.Addrs))
for _, ai := range d.Addrs {
if ai != nil {
indexers = append(indexers, ai.ID)
}
}
return Shuffle(indexers)
}
func (d *Directory) GetAddrsStr() []string {
d.MuAddr.RLock()
defer d.MuAddr.RUnlock()
indexers := make([]string, 0, len(d.Addrs))
for s, ai := range d.Addrs {
if ai != nil {
indexers = append(indexers, s)
}
}
return Shuffle(indexers)
}
type Entry struct {
Addr string
Info *pp.AddrInfo
}
func (d *Directory) GetAddrs() []Entry {
d.MuAddr.RLock()
defer d.MuAddr.RUnlock()
indexers := make([]Entry, 0, len(d.Addrs))
for addr, ai := range d.Addrs {
if ai != nil {
indexers = append(indexers, Entry{
Addr: addr,
Info: ai,
})
}
}
return Shuffle(indexers)
}
// NudgeIndexerHeartbeat signals the indexer heartbeat goroutine to fire immediately.
func (d *Directory) NudgeIt() {
select {
case d.Nudge <- struct{}{}:
default: // nudge already pending, skip
}
}
type ProtocolStream map[protocol.ID]map[pp.ID]*Stream
func (ps ProtocolStream) Get(protocol protocol.ID) map[pp.ID]*Stream {
if ps[protocol] == nil {
ps[protocol] = map[pp.ID]*Stream{}
}
return ps[protocol]
}
func (ps ProtocolStream) GetPerID(protocol protocol.ID, peerID pp.ID) *Stream {
if ps[protocol] == nil {
ps[protocol] = map[pp.ID]*Stream{}
}
return ps[protocol][peerID]
}
func (ps ProtocolStream) Add(protocol protocol.ID, peerID *pp.ID, s *Stream) error {
if ps[protocol] == nil {
ps[protocol] = map[pp.ID]*Stream{}
}
if peerID != nil {
if s != nil {
ps[protocol][*peerID] = s
} else {
return errors.New("unable to add stream : stream missing")
}
}
return nil
}
func (ps ProtocolStream) Delete(protocol protocol.ID, peerID *pp.ID) {
if streams, ok := ps[protocol]; ok {
if peerID != nil && streams[*peerID] != nil && streams[*peerID].Stream != nil {
streams[*peerID].Stream.Close()
delete(streams, *peerID)
} else {
for _, s := range ps {
for _, v := range s {
if v.Stream != nil {
v.Stream.Close()
}
}
}
delete(ps, protocol)
}
}
}
var Indexers = &Directory{
Addrs: map[string]*pp.AddrInfo{},
Scores: map[string]*Score{},
Nudge: make(chan struct{}, 1),
Streams: ProtocolStream{},
}

View File

@@ -0,0 +1,289 @@
package common
import (
"context"
"encoding/json"
"io"
"time"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
oclib "cloud.o-forge.io/core/oc-lib"
)
// MemberEventType is the SWIM membership event classification.
type MemberEventType string
const (
MemberAlive MemberEventType = "alive"
MemberSuspect MemberEventType = "suspect"
MemberDead MemberEventType = "dead"
)
// MemberEvent is a SWIM membership event piggybacked on heartbeats (infection-style).
// HopsLeft starts at InitialEventHops and is decremented on each retransmission.
// Receivers discard events whose HopsLeft reaches 0 instead of forwarding them further.
// Deduplication by (PeerID, Incarnation): higher incarnation or higher-priority type wins.
type MemberEvent struct {
Type MemberEventType `json:"type"`
PeerID string `json:"peer_id"`
Incarnation uint64 `json:"incarnation"`
HopsLeft int `json:"hops_left"`
}
type Heartbeat struct {
Name string `json:"name"`
Stream *Stream `json:"stream"`
DID string `json:"did"`
PeerID string `json:"peer_id"`
Timestamp int64 `json:"timestamp"`
IndexersBinded []string `json:"indexers_binded"`
Score float64
// Record carries a fresh signed PeerRecord (JSON) so the receiving indexer
// can republish it to the DHT without an extra round-trip.
// Only set by nodes (not indexers heartbeating other indexers).
Record json.RawMessage `json:"record,omitempty"`
// Need is how many more indexers this node wants (MaxIndexer - current pool size).
// The receiving indexer uses this to know how many suggestions to return.
// 0 means the pool is full — no suggestions needed unless SuggestMigrate.
Need int `json:"need,omitempty"`
// Challenges is a list of PeerIDs the node asks the indexer to spot-check.
// Always includes the node's own PeerID (ground truth) + up to 2 additional
// known peers. Nil means no challenge this tick.
Challenges []string `json:"challenges,omitempty"`
// ChallengeDID asks the indexer to retrieve this DID from the DHT (every 5th batch).
ChallengeDID string `json:"challenge_did,omitempty"`
// Referent marks this indexer as the node's designated search referent.
// Only one indexer per node receives Referent=true at a time (the best-scored one).
// The indexer stores the node in its referencedNodes for distributed search.
Referent bool `json:"referent,omitempty"`
// SuspectedIncarnation is set when this node currently suspects the target indexer.
// If the value matches the indexer's own incarnation, the indexer increments its
// incarnation and replies with the new value — this is the SWIM refutation signal.
SuspectedIncarnation *uint64 `json:"suspected_incarnation,omitempty"`
// MembershipEvents carries SWIM events piggybacked on this heartbeat.
// Events are forwarded infection-style until HopsLeft reaches 0.
MembershipEvents []MemberEvent `json:"membership_events,omitempty"`
}
// SearchPeerRequest is sent by a node to an indexer via ProtocolSearchPeer.
// The indexer broadcasts it on the GossipSub search mesh and streams results back.
type SearchPeerRequest struct {
QueryID string `json:"query_id"`
// At least one of PeerID, DID, Name must be set.
PeerID string `json:"peer_id,omitempty"`
DID string `json:"did,omitempty"`
Name string `json:"name,omitempty"`
}
// SearchQuery is broadcast on TopicSearchPeer by the receiving indexer.
// EmitterID is the indexer's own PeerID — responding indexers open a
// ProtocolSearchPeerResponse stream back to it.
type SearchQuery struct {
QueryID string `json:"query_id"`
PeerID string `json:"peer_id,omitempty"`
DID string `json:"did,omitempty"`
Name string `json:"name,omitempty"`
EmitterID string `json:"emitter_id"`
}
// SearchPeerResult is sent by a responding indexer to the emitting indexer
// via ProtocolSearchPeerResponse, and forwarded by the emitting indexer to
// the node on the open ProtocolSearchPeer stream.
type SearchPeerResult struct {
QueryID string `json:"query_id"`
Records []SearchHit `json:"records"`
}
// SearchHit is a single peer found during distributed search.
type SearchHit struct {
PeerID string `json:"peer_id"`
DID string `json:"did"`
Name string `json:"name"`
}
// ChallengeEntry is the indexer's raw answer for one challenged peer.
type ChallengeEntry struct {
PeerID string `json:"peer_id"`
Found bool `json:"found"`
LastSeen time.Time `json:"last_seen,omitempty"` // zero if not found
}
// HeartbeatResponse carries raw metrics only — no pre-cooked score.
type HeartbeatResponse struct {
FillRate float64 `json:"fill_rate"`
PeerCount int `json:"peer_count"`
MaxNodes int `json:"max_nodes"` // capacity — lets node cross-check fillRate
BornAt time.Time `json:"born_at"`
Challenges []ChallengeEntry `json:"challenges,omitempty"`
// DHTFound / DHTPayload: response to a ChallengeDID request.
DHTFound bool `json:"dht_found,omitempty"`
DHTPayload json.RawMessage `json:"dht_payload,omitempty"`
// Witnesses: random sample of connected nodes so the querying node can cross-check.
Witnesses []pp.AddrInfo `json:"witnesses,omitempty"`
// Suggestions: better indexers this indexer knows about via its DHT cache.
// The node should open heartbeat connections to these (they become StaticIndexers).
Suggestions []pp.AddrInfo `json:"suggestions,omitempty"`
// SuggestMigrate: set when this indexer is overloaded (fill rate > threshold)
// and is actively trying to hand the node off to the Suggestions list.
// Seeds: node de-stickies this indexer once it has MinIndexer non-seed alternatives.
// Non-seeds: node removes this indexer immediately if it has enough alternatives.
SuggestMigrate bool `json:"suggest_migrate,omitempty"`
// Incarnation is this indexer's current SWIM incarnation number.
// It is incremented whenever the indexer refutes a suspicion signal.
// The node tracks this to detect explicit refutations and to clear suspect state.
Incarnation uint64 `json:"incarnation,omitempty"`
// MembershipEvents carries SWIM events piggybacked on this response.
// The node should forward them to its other indexers (infection-style).
MembershipEvents []MemberEvent `json:"membership_events,omitempty"`
}
// ComputeIndexerScore computes a composite quality score [0, 100] for the connecting peer.
// - uptimeRatio: fraction of tracked lifetime online (gap-aware) — peer reliability
// - bpms: bandwidth normalized to MaxExpectedMbps — link capacity
// - diversity: indexer's own /24 subnet diversity — network topology quality
// - latencyScore: 1 - RTT/maxRoundTrip — link responsiveness
// - fillRate: fraction of indexer slots used (0=empty, 1=full) — collective trust signal:
// a fuller indexer has been chosen and retained by many peers, which is evidence of quality.
func (hb *Heartbeat) ComputeIndexerScore(uptimeRatio float64, bpms float64, diversity float64, latencyScore float64, fillRate float64) {
hb.Score = ((0.20 * uptimeRatio) +
(0.20 * bpms) +
(0.20 * diversity) +
(0.15 * latencyScore) +
(0.25 * fillRate)) * 100
}
type HeartbeatInfo []struct {
Info []byte `json:"info"`
}
// WitnessRequest is sent by a node to a peer to ask its view of a given indexer.
type WitnessRequest struct {
IndexerPeerID string `json:"indexer_peer_id"`
}
// WitnessReport is returned by a peer in response to a WitnessRequest.
type WitnessReport struct {
Seen bool `json:"seen"`
BornAt time.Time `json:"born_at,omitempty"`
FillRate float64 `json:"fill_rate,omitempty"`
Score float64 `json:"score,omitempty"`
}
// HandleBandwidthProbe echoes back everything written on the stream, then closes.
// It is registered by all participants so the measuring side (the heartbeat receiver)
// can open a dedicated probe stream and read the round-trip latency + throughput.
func HandleBandwidthProbe(s network.Stream) {
defer s.Close()
s.SetDeadline(time.Now().Add(10 * time.Second))
io.Copy(s, s) // echo every byte back to the sender
}
// HandleWitnessQuery answers a witness query: the caller wants to know
// what this node thinks of a given indexer (identified by its PeerID).
func HandleWitnessQuery(h host.Host, s network.Stream) {
defer s.Close()
s.SetDeadline(time.Now().Add(5 * time.Second))
var req WitnessRequest
if err := json.NewDecoder(s).Decode(&req); err != nil {
return
}
report := WitnessReport{}
for _, ai := range Indexers.GetAddrs() {
if ai.Info == nil || ai.Info.ID.String() != req.IndexerPeerID {
continue
}
if score := Indexers.GetScore(addrKey(*ai.Info)); score != nil {
report.Seen = true
report.BornAt = score.LastBornAt
report.FillRate = score.LastFillRate
report.Score = score.Score
}
break
}
json.NewEncoder(s).Encode(report)
}
// SupportsHeartbeat probes pid with a short-lived stream to verify it has
// a ProtocolHeartbeat handler (i.e. it is an indexer, not a plain node).
// Only protocol negotiation is performed — no data is sent.
// Returns false on any error, including "protocol not supported".
func SupportsHeartbeat(h host.Host, pid pp.ID) bool {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
s, err := h.NewStream(ctx, pid, ProtocolHeartbeat)
if err != nil {
return false
}
s.Reset()
return true
}
// queryWitnesses contacts each witness in parallel, collects their view of the
// indexer, and updates score.witnessChecked / score.witnessConsistent.
// Called in a goroutine — must not hold any lock.
func queryWitnesses(h host.Host, indexerPeerID string, indexerBornAt time.Time, indexerFillRate float64, witnesses []pp.AddrInfo, score *Score) {
logger := oclib.GetLogger()
type result struct{ consistent bool }
results := make(chan result, len(witnesses))
for _, ai := range witnesses {
if ai.ID == h.ID() {
// Never query ourselves — skip and count as inconclusive.
results <- result{}
continue
}
go func(ai pp.AddrInfo) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
s, err := h.NewStream(ctx, ai.ID, ProtocolWitnessQuery)
if err != nil {
results <- result{}
return
}
defer s.Close()
s.SetDeadline(time.Now().Add(5 * time.Second))
if err := json.NewEncoder(s).Encode(WitnessRequest{IndexerPeerID: indexerPeerID}); err != nil {
results <- result{}
return
}
var rep WitnessReport
if err := json.NewDecoder(s).Decode(&rep); err != nil || !rep.Seen {
results <- result{}
return
}
// BornAt must be identical (fixed timestamp).
bornAtOK := !rep.BornAt.IsZero() && rep.BornAt.Equal(indexerBornAt)
// FillRate coherent within ±25% (it fluctuates normally).
diff := rep.FillRate - indexerFillRate
if diff < 0 {
diff = -diff
}
fillOK := diff < 0.25
consistent := bornAtOK && fillOK
logger.Debug().
Str("witness", ai.ID.String()).
Bool("bornAt_ok", bornAtOK).
Bool("fill_ok", fillOK).
Msg("witness report")
results <- result{consistent: consistent}
}(ai)
}
checked, consistent := 0, 0
for range witnesses {
r := <-results
checked++
if r.consistent {
consistent++
}
}
if checked == 0 {
return
}
score.witnessChecked += checked
score.witnessConsistent += consistent
}

View File

@@ -0,0 +1,807 @@
package common
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"strings"
"sync/atomic"
"time"
"oc-discovery/conf"
oclib "cloud.o-forge.io/core/oc-lib"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
var TimeWatcher time.Time
// retryRunning guards against launching multiple retryUntilSeedResponds goroutines.
var retryRunning atomic.Bool
// suspectTimeout is the maximum time a peer can stay in suspect state before
// being declared dead and evicted. Aligned with 3 heartbeat intervals so the
// peer has at least 3 chances to respond or refute the suspicion signal.
const suspectTimeout = 3 * RecommendedHeartbeatInterval
func ConnectToIndexers(h host.Host, minIndexer int, maxIndexer int, recordFn ...func() json.RawMessage) error {
TimeWatcher = time.Now().UTC()
logger := oclib.GetLogger()
// Bootstrap from IndexerAddresses seed set.
addresses := strings.Split(conf.GetConfig().IndexerAddresses, ",")
if len(addresses) > maxIndexer {
addresses = addresses[0:maxIndexer]
}
for _, indexerAddr := range addresses {
indexerAddr = strings.TrimSpace(indexerAddr)
if indexerAddr == "" {
continue
}
ad, err := pp.AddrInfoFromString(indexerAddr)
if err != nil {
logger.Err(err)
continue
}
key := ad.ID.String()
Indexers.SetAddr(key, ad)
// Pre-create score entry with IsSeed=true so the sticky flag is set before
// the first heartbeat tick (lazy creation in doTick would lose the flag).
if !Indexers.ExistsScore(key) {
Indexers.SetScore(key, &Score{
FirstContacted: time.Now().UTC(),
UptimeTracker: &UptimeTracker{FirstSeen: time.Now().UTC()},
nextChallenge: rand.Intn(10) + 1,
IsSeed: true,
})
}
}
seeds := Indexers.GetAddrs()
indexerCount := len(seeds)
if indexerCount < minIndexer {
return fmt.Errorf("you run a node without indexers... your gonna be isolated.")
}
// Start long-lived heartbeat to seed indexers. The single goroutine follows
// all subsequent StaticIndexers changes.
SendHeartbeat(context.Background(), ProtocolHeartbeat, conf.GetConfig().Name,
h, Indexers, 20*time.Second, maxIndexer, recordFn...)
// Watch for inbound connections: if a peer connects to us and our pool has
// room, probe it first to confirm it supports ProtocolHeartbeat (i.e. it is
// an indexer). Plain nodes don't register the handler — the negotiation fails
// instantly so we never pollute the pool with non-indexer peers.
h.Network().Notify(&network.NotifyBundle{
ConnectedF: func(n network.Network, c network.Conn) {
if c.Stat().Direction != network.DirInbound {
return
}
if len(Indexers.GetAddrs()) >= maxIndexer {
return
}
peerID := c.RemotePeer()
if Indexers.ExistsAddr(peerID.String()) {
return
}
// Probe in a goroutine — ConnectedF must not block.
go func(pid pp.ID) {
if !SupportsHeartbeat(h, pid) {
return // plain node, skip
}
if len(Indexers.GetAddrs()) >= maxIndexer {
return
}
if Indexers.ExistsAddr(pid.String()) {
return
}
addrs := h.Peerstore().Addrs(pid)
if len(addrs) == 0 {
return
}
ai := FilterLoopbackAddrs(pp.AddrInfo{ID: pid, Addrs: addrs})
if len(ai.Addrs) == 0 {
return
}
adCopy := ai
Indexers.SetAddr(pid.String(), &adCopy)
Indexers.NudgeIt()
log := oclib.GetLogger()
log.Info().Str("peer", pid.String()).
Msg("[pool] inbound indexer peer added as candidate")
}(peerID)
},
})
// Proactive DHT upgrade: once seeds are connected and the DHT routing table
// is warm, discover better indexers and add them to the pool alongside the seeds.
// Seeds stay as guaranteed anchors; scoring will demote poor performers over time.
go func(seeds []Entry) {
// Let seed connections establish and the DHT routing table warm up.
time.Sleep(5 * time.Second)
// For pure nodes (no IndexerService), spin up a lightweight DHT client.
if discoveryDHT == nil {
if len(seeds) == 0 {
return
}
initNodeDHT(h, seeds)
}
if discoveryDHT == nil {
return
}
current := len(Indexers.GetAddrs())
need := maxIndexer - current
if need <= 0 {
need = maxIndexer / 2 // diversify even when pool is already at capacity
}
logger.Info().Int("need", need).Msg("[dht] proactive indexer discovery from DHT")
replenishIndexersFromDHT(h, need)
}(seeds)
return nil
}
// reconnectToSeeds re-adds the configured seed indexers to StaticIndexers as
// sticky fallback entries. Called when the pool drops to zero so the node
// never becomes completely isolated.
func reconnectToSeeds() {
logger := oclib.GetLogger()
logger.Warn().Msg("[pool] all indexers lost, reconnecting to configured seeds")
addresses := strings.Split(conf.GetConfig().IndexerAddresses, ",")
for _, addrStr := range addresses {
addrStr = strings.TrimSpace(addrStr)
if addrStr == "" {
continue
}
ad, err := pp.AddrInfoFromString(addrStr)
if err != nil {
continue
}
key := ad.ID.String()
Indexers.SetAddr(key, ad)
if score := Indexers.GetScore(key); score == nil {
Indexers.SetScore(key, &Score{
FirstContacted: time.Now().UTC(),
UptimeTracker: &UptimeTracker{FirstSeen: time.Now().UTC()},
nextChallenge: rand.Intn(10) + 1,
IsSeed: true,
})
} else {
// Restore sticky flag so the seed is not immediately re-ejected.
score.IsSeed = true
}
}
}
// retryUntilSeedResponds loops with exponential backoff until at least one
// configured seed is reachable again. Once seeds are back in the pool it
// nudges the heartbeat loop and lets the normal DHT upgrade path take over.
// Should be called in a goroutine — it blocks until the situation resolves.
// Panics immediately if no seeds are configured: there is nothing to wait for.
func retryUntilSeedResponds() {
if !retryRunning.CompareAndSwap(false, true) {
return // another goroutine is already running the retry loop
}
defer retryRunning.Store(false)
logger := oclib.GetLogger()
rawAddresses := strings.TrimSpace(conf.GetConfig().IndexerAddresses)
if rawAddresses == "" {
// No seeds configured: rely on the inbound-connection notifee to fill
// the pool. Just wait patiently — the loop below will return as soon
// as any peer connects and NudgeIt() is called.
logger.Warn().Msg("[pool] pool empty and no seeds configured — waiting for inbound indexer")
}
backoff := 10 * time.Second
const maxBackoff = 5 * time.Minute
for {
time.Sleep(backoff)
if backoff < maxBackoff {
backoff *= 2
}
// Check whether someone else already refilled the pool.
if len(Indexers.GetAddrs()) > 0 {
logger.Info().Msg("[pool] pool refilled externally, stopping seed retry")
return
}
logger.Warn().Dur("backoff", backoff).Msg("[pool] still isolated, retrying seeds")
reconnectToSeeds()
if len(Indexers.GetAddrs()) > 0 {
Indexers.NudgeIt()
// Re-bootstrap DHT now that we have at least one connection candidate.
if discoveryDHT != nil {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
discoveryDHT.Bootstrap(ctx) //nolint:errcheck
cancel()
}
return
}
}
}
// ensureScore returns the Score for addr, creating it if absent.
func ensureScore(d *Directory, addr string) *Score {
if !d.ExistsScore(addr) {
d.SetScore(addr, &Score{
FirstContacted: time.Now().UTC(),
UptimeTracker: &UptimeTracker{FirstSeen: time.Now().UTC()},
nextChallenge: rand.Intn(10) + 1,
})
}
return d.GetScore(addr)
}
// evictPeer removes addr from directory atomically and returns a snapshot of
// remaining AddrInfos (for consensus voter selection).
func evictPeer(d *Directory, addr string, id pp.ID, proto protocol.ID) []pp.AddrInfo {
d.Streams.Delete(proto, &id)
d.DeleteAddr(addr)
voters := make([]pp.AddrInfo, 0, len(d.Addrs))
for _, ai := range d.GetAddrs() {
if ai.Info != nil {
voters = append(voters, *ai.Info)
}
}
d.DeleteScore(addr)
return voters
}
// handleSuggestions adds unknown suggested indexers to the directory.
func handleSuggestions(d *Directory, from string, suggestions []pp.AddrInfo) {
added := 0
for _, sug := range suggestions {
key := addrKey(sug)
if !d.ExistsAddr(key) {
cpy := sug
d.SetAddr(key, &cpy)
added++
}
}
if added > 0 {
logger := oclib.GetLogger()
logger.Info().Int("added", added).Str("from", from).
Msg("added suggested indexers from heartbeat response")
d.NudgeIt()
}
}
// SendHeartbeat starts a goroutine that sends periodic heartbeats to peers.
// recordFn, when provided, is called on each tick and its output is embedded in
// the heartbeat as a fresh signed PeerRecord so the receiving indexer can
// republish it to the DHT without an extra round-trip.
// Pass no recordFn (or nil) for indexer→indexer / native heartbeats.
func SendHeartbeat(ctx context.Context, proto protocol.ID, name string, h host.Host, directory *Directory, interval time.Duration, maxPool int, recordFn ...func() json.RawMessage) {
logger := oclib.GetLogger()
isIndexerHB := directory == Indexers
var recFn func() json.RawMessage
if len(recordFn) > 0 {
recFn = recordFn[0]
}
go func() {
logger.Info().Str("proto", string(proto)).Int("peers", len(directory.Addrs)).Msg("heartbeat started")
t := time.NewTicker(interval)
defer t.Stop()
// peerEntry pairs addr key with AddrInfo so doTick can update score maps directly.
type peerEntry struct {
addr string
ai *pp.AddrInfo
}
doTick := func() {
addrs := directory.GetAddrsStr()
need := maxPool - len(addrs)
if need < 0 {
need = 0
}
baseHB := Heartbeat{
Name: name,
PeerID: h.ID().String(),
Timestamp: time.Now().UTC().Unix(),
IndexersBinded: addrs,
Need: need,
}
if recFn != nil {
baseHB.Record = recFn()
}
// Piggyback SWIM membership events on every outgoing heartbeat batch.
// All peers in the pool receive the same events this tick.
if isIndexerHB {
baseHB.MembershipEvents = NodeEventQueue.Drain(5)
}
// Determine the referent indexer: highest-scored one receives Referent=true
// so it stores us in its referencedNodes for distributed search.
var referentAddr string
if isIndexerHB {
var bestScore float64 = -1
for _, ai2 := range directory.GetAddrs() {
if s := directory.GetScore(ai2.Addr); s != nil && s.Score > bestScore {
bestScore = s.Score
referentAddr = ai2.Addr
}
}
}
for _, ai := range directory.GetAddrs() {
// Build per-peer heartbeat copy so challenge injection is peer-specific.
hb := baseHB
if isIndexerHB && referentAddr != "" && ai.Addr == referentAddr {
hb.Referent = true
}
// SWIM: signal suspicion so the peer can refute by incrementing incarnation.
if isIndexerHB {
if score := directory.GetScore(ai.Addr); score != nil && !score.UptimeTracker.SuspectedAt.IsZero() {
inc := score.UptimeTracker.LastKnownIncarnation
hb.SuspectedIncarnation = &inc
}
}
// Ensure an IndexerScore entry exists for this peer.
var score *Score
if isIndexerHB {
score = ensureScore(directory, ai.Addr)
// Inject challenge batch if due (random 1-10 HBs between batches).
score.hbCount++
if score.hbCount >= score.nextChallenge {
// Ground truth: node's own PeerID — indexer MUST have us.
challenges := []string{h.ID().String()}
// Add up to 2 more known peers (other indexers) for richer data.
// Use the already-snapshotted entries to avoid re-locking.
for _, ai2 := range directory.GetAddrs() {
if ai2.Addr != ai.Addr && ai2.Info != nil {
challenges = append(challenges, ai2.Info.ID.String())
if len(challenges) >= 3 {
break
}
}
}
hb.Challenges = challenges
score.hbCount = 0
score.nextChallenge = rand.Intn(10) + 1
score.challengeTotal++ // count own-PeerID challenge (ground truth)
score.dhtBatchCounter++
// DHT challenge every 5th batch: ask indexer to retrieve our own DID.
if score.dhtBatchCounter%5 == 0 {
var selfDID string
if len(baseHB.Record) > 0 {
var partial struct {
DID string `json:"did"`
}
if json.Unmarshal(baseHB.Record, &partial) == nil {
selfDID = partial.DID
}
}
if selfDID != "" {
hb.ChallengeDID = selfDID
}
}
}
}
resp, rtt, err := sendHeartbeat(ctx, h, proto, ai.Info, hb, directory.Streams, interval*time.Second)
if err != nil { // Heartbeat fails
HeartbeatFailure(h, proto, directory, ai.Addr, ai.Info, isIndexerHB, maxPool, err)
continue
}
// Update IndexerScore — uptime recorded on any successful send,
// even if the indexer does not support bidirectional heartbeat (Fix 1).
if isIndexerHB && score != nil {
score.UptimeTracker.RecordHeartbeat()
score.UptimeTracker.ConsecutiveFails = 0 // reset on success
// SWIM: clear suspect state on any successful direct heartbeat.
// The peer proved it is reachable; if it also incremented its incarnation
// that is an explicit refutation — log it distinctly.
if !score.UptimeTracker.SuspectedAt.IsZero() {
wasExplicitRefutation := resp != nil &&
resp.Incarnation > 0 &&
resp.Incarnation > score.UptimeTracker.LastKnownIncarnation
if wasExplicitRefutation {
logger.Info().Str("peer", ai.Info.ID.String()).
Uint64("old_incarnation", score.UptimeTracker.LastKnownIncarnation).
Uint64("new_incarnation", resp.Incarnation).
Msg("[swim] explicit refutation: incarnation incremented, suspicion cleared")
} else {
logger.Info().Str("peer", ai.Info.ID.String()).
Msg("[swim] suspect cleared — peer responded to direct probe")
}
score.UptimeTracker.SuspectedAt = time.Time{}
// Propagate alive event so other nodes can clear their own suspect state.
inc := score.UptimeTracker.LastKnownIncarnation
if resp != nil && resp.Incarnation > 0 {
inc = resp.Incarnation
}
NodeEventQueue.Add(MemberEvent{
Type: MemberAlive,
PeerID: ai.Info.ID.String(),
Incarnation: inc,
HopsLeft: InitialEventHops,
})
}
// Always update last known incarnation.
if resp != nil && resp.Incarnation > score.UptimeTracker.LastKnownIncarnation {
score.UptimeTracker.LastKnownIncarnation = resp.Incarnation
}
maxRTT := BaseRoundTrip * 10
latencyScore := 1.0 - float64(rtt)/float64(maxRTT)
if latencyScore < 0 {
latencyScore = 0
}
if latencyScore > 1 {
latencyScore = 1
}
// Update fill / challenge fields only when the indexer responded.
if resp != nil {
// BornAt stability check.
if score.LastBornAt.IsZero() {
score.LastBornAt = resp.BornAt
} else if !resp.BornAt.IsZero() && !resp.BornAt.Equal(score.LastBornAt) {
score.bornAtChanges++
score.LastBornAt = resp.BornAt
logger.Warn().Str("peer", ai.Info.ID.String()).
Int("changes", score.bornAtChanges).
Msg("indexer BornAt changed — possible restart or impersonation")
}
score.LastFillRate = resp.FillRate
// Fill rate consistency: cross-check peerCount/maxNodes vs reported fillRate.
if resp.MaxNodes > 0 {
expected := float64(resp.PeerCount) / float64(resp.MaxNodes)
diff := expected - resp.FillRate
if diff < 0 {
diff = -diff
}
score.fillChecked++
if diff < 0.1 {
score.fillConsistent++
}
}
// Validate challenge responses. Only own-PeerID counts as ground truth.
if len(hb.Challenges) > 0 && len(resp.Challenges) > 0 {
ownID := h.ID().String()
for _, ce := range resp.Challenges {
if ce.PeerID != ownID {
continue // informational only
}
recentEnough := !ce.LastSeen.IsZero() &&
time.Since(ce.LastSeen) < 2*RecommendedHeartbeatInterval
if ce.Found && recentEnough {
score.challengeCorrect++
}
logger.Info().Str("peer", ai.Info.ID.String()).
Bool("found", ce.Found).
Bool("recent", recentEnough).
Msg("own-PeerID challenge result")
break
}
}
// DHT challenge result.
if hb.ChallengeDID != "" {
score.dhtChecked++
if resp.DHTFound {
score.dhtSuccess++
}
}
// Launch witness cross-check asynchronously (must not hold lock).
if len(resp.Witnesses) > 0 {
go queryWitnesses(h, ai.Info.ID.String(), resp.BornAt, resp.FillRate, resp.Witnesses, score)
} else if resp.MaxNodes > 0 {
// No witnesses offered. Valid if indexer only has us (PeerCount==1).
// Cross-check: FillRate should equal 1/MaxNodes within ±10%.
expected := 1.0 / float64(resp.MaxNodes)
diff := resp.FillRate - expected
if diff < 0 {
diff = -diff
}
score.witnessChecked++
if resp.PeerCount == 1 && diff < 0.1 {
score.witnessConsistent++
}
}
// SWIM infection: process membership events piggybacked on this response.
// Events with HopsLeft > 0 are re-queued for forwarding to other indexers.
for _, ev := range resp.MembershipEvents {
if ev.HopsLeft > 0 {
NodeEventQueue.Add(ev)
}
applyMemberEvent(ev, directory)
}
}
score.Score = score.ComputeNodeSideScore(latencyScore)
age := score.UptimeTracker.Uptime()
minScore := dynamicMinScore(age)
// Fix 4: grace period — at least 2 full heartbeat cycles before ejecting.
isSeed := score.IsSeed
// Seeds are sticky: never evicted by score alone (SuggestMigrate handles it).
// Never eject the last indexer by score alone — we would lose all connectivity.
belowThreshold := score.Score < minScore &&
score.UptimeTracker.TotalOnline >= 2*RecommendedHeartbeatInterval &&
!isSeed &&
len(directory.Addrs) > 1
if belowThreshold {
logger.Info().Str("peer", ai.Info.ID.String()).
Float64("score", score.Score).Float64("min", minScore).
Msg("indexer score below threshold, removing from pool")
voters := evictPeer(directory, ai.Addr, ai.Info.ID, proto)
need := max(maxPool-len(voters), 1)
if len(voters) > 0 {
go TriggerConsensus(h, voters, need)
} else {
go replenishIndexersFromDHT(h, need)
}
}
// Accept suggestions from this indexer — add unknown ones to the directory.
if resp != nil && len(resp.Suggestions) > 0 {
handleSuggestions(directory, ai.Info.ID.String(), resp.Suggestions)
}
// Handle SuggestMigrate: indexer is overloaded and wants us to move.
if resp != nil && resp.SuggestMigrate && isIndexerHB {
nonSeedCount := 0
for _, sc := range directory.GetScores() {
if !sc.IsSeed {
nonSeedCount++
}
}
if nonSeedCount >= conf.GetConfig().MinIndexer {
if isSeed {
// Seed has offloaded us: clear sticky flag, score eviction takes over.
score.IsSeed = false
logger.Info().Str("peer", ai.Info.ID.String()).
Msg("seed discharged via SuggestMigrate, de-stickied")
} else {
evictPeer(directory, ai.Addr, ai.Info.ID, proto)
logger.Info().Str("peer", ai.Info.ID.String()).Msg("accepted migration from overloaded indexer")
}
}
}
}
}
}
for {
select {
case <-t.C:
doTick()
case <-directory.Nudge:
if isIndexerHB {
logger.Info().Msg("nudge received, heartbeating new indexers immediately")
doTick()
}
case <-ctx.Done():
return
}
}
}()
}
// runIndirectProbe asks up to k live indexers (voters) to probe target via
// ProtocolBandwidthProbe and returns true if the majority report reachable.
// This is the SWIM explicit indirect ping — called only on heartbeat failure.
func runIndirectProbe(h host.Host, target pp.AddrInfo, voters []Entry, k int) bool {
if k > len(voters) {
k = len(voters)
}
if k == 0 {
return false
}
shuffled := make([]Entry, len(voters))
copy(shuffled, voters)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
shuffled = shuffled[:k]
type result struct{ reachable bool }
ch := make(chan result, k)
for _, voter := range shuffled {
if voter.Info == nil {
ch <- result{false}
continue
}
go func(v pp.AddrInfo) {
ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second)
defer cancel()
s, err := h.NewStream(ctx, v.ID, ProtocolIndirectProbe)
if err != nil {
ch <- result{false}
return
}
s.SetDeadline(time.Now().Add(8 * time.Second))
defer s.Close()
if err := json.NewEncoder(s).Encode(IndirectProbeRequest{Target: target}); err != nil {
ch <- result{false}
return
}
var resp IndirectProbeResponse
if err := json.NewDecoder(s).Decode(&resp); err != nil {
ch <- result{false}
return
}
ch <- result{resp.Reachable}
}(*voter.Info)
}
reachable := 0
for range k {
if (<-ch).reachable {
reachable++
}
}
return reachable > k/2
}
func HeartbeatFailure(h host.Host, proto protocol.ID, directory *Directory,
addr string, info *pp.AddrInfo, isIndexerHB bool, maxPool int, err error) {
logger := oclib.GetLogger()
logger.Err(err)
// Seeds are never evicted on heartbeat failure.
// Keeping them in the pool lets the regular 60-second ticker retry them
// at a natural cadence — no reconnect storm, no libp2p dial-backoff accumulation.
// A seed will self-heal once it comes back; DHT and inbound peers fill the gap.
if isIndexerHB {
if score := directory.GetScore(addr); score != nil {
if score.IsSeed {
logger.Warn().Str("peer", info.ID.String()).
Msg("[pool] seed heartbeat failed — keeping in pool, ticker will retry " + err.Error())
return
}
voters := directory.GetAddrs()
if len(voters) <= 1 {
// Last indexer: no peer available to proxy a probe.
// Enter suspect state on first failure; evict only after suspectTimeout.
if score.UptimeTracker.SuspectedAt.IsZero() {
score.UptimeTracker.SuspectedAt = time.Now().UTC()
score.UptimeTracker.ConsecutiveFails++
NodeEventQueue.Add(MemberEvent{
Type: MemberSuspect,
PeerID: info.ID.String(),
Incarnation: score.UptimeTracker.LastKnownIncarnation,
HopsLeft: InitialEventHops,
})
logger.Warn().Str("peer", info.ID.String()).
Msg("[swim] last indexer suspect — waiting for refutation or timeout")
return
}
if time.Since(score.UptimeTracker.SuspectedAt) < suspectTimeout {
logger.Warn().Str("peer", info.ID.String()).
Dur("suspected_for", time.Since(score.UptimeTracker.SuspectedAt)).
Msg("[swim] last indexer still failing, holding in suspect state")
return
}
// suspectTimeout exceeded with no refutation — declare dead.
logger.Warn().Str("peer", info.ID.String()).
Msg("[swim] last indexer suspect timeout exceeded, evicting")
NodeEventQueue.Add(MemberEvent{
Type: MemberDead,
PeerID: info.ID.String(),
Incarnation: score.UptimeTracker.LastKnownIncarnation,
HopsLeft: InitialEventHops,
})
} else if score.UptimeTracker.SuspectedAt.IsZero() {
// First miss with other live indexers available:
// enter suspect state and run an indirect probe asynchronously.
score.UptimeTracker.SuspectedAt = time.Now().UTC()
score.UptimeTracker.ConsecutiveFails++
NodeEventQueue.Add(MemberEvent{
Type: MemberSuspect,
PeerID: info.ID.String(),
Incarnation: score.UptimeTracker.LastKnownIncarnation,
HopsLeft: InitialEventHops,
})
probeTarget := *info
go func() {
alive := runIndirectProbe(h, probeTarget, voters, 2)
if alive {
// Other indexers confirm the target is reachable → our direct
// link may be temporarily broken. Keep suspected; the next
// heartbeat tick will retry the direct probe.
logger.Warn().Str("peer", probeTarget.ID.String()).
Msg("[swim] indirect probe: target reachable by peers, keeping (suspected)")
} else {
// Majority of probes also failed → the indexer is genuinely dead.
logger.Warn().Str("peer", probeTarget.ID.String()).
Msg("[swim] indirect probe: target unreachable, evicting")
NodeEventQueue.Add(MemberEvent{
Type: MemberDead,
PeerID: probeTarget.ID.String(),
Incarnation: score.UptimeTracker.LastKnownIncarnation,
HopsLeft: InitialEventHops,
})
consensusVoters := evictPeer(directory, addr, probeTarget.ID, proto)
need := max(maxPool-len(consensusVoters), 1)
if len(consensusVoters) > 0 {
TriggerConsensus(h, consensusVoters, need)
} else {
replenishIndexersFromDHT(h, need)
}
}
}()
return // decision deferred to probe goroutine
} else if time.Since(score.UptimeTracker.SuspectedAt) < suspectTimeout {
// Still within suspect window — the next tick's SuspectedIncarnation
// in the heartbeat may trigger a refutation. Keep retrying.
logger.Warn().Str("peer", info.ID.String()).
Dur("suspected_for", time.Since(score.UptimeTracker.SuspectedAt)).
Msg("[swim] suspected peer still failing, waiting for refutation or timeout")
return
} else {
// suspectTimeout exceeded — declare dead and fall through to eviction.
logger.Warn().Str("peer", info.ID.String()).
Msg("[swim] suspect timeout exceeded, evicting")
NodeEventQueue.Add(MemberEvent{
Type: MemberDead,
PeerID: info.ID.String(),
Incarnation: score.UptimeTracker.LastKnownIncarnation,
HopsLeft: InitialEventHops,
})
}
}
}
logger.Info().Str("peer", info.ID.String()).Str("proto", string(proto)).
Msg("heartbeat failed, removing peer from pool : " + err.Error())
consensusVoters := evictPeer(directory, addr, info.ID, proto)
if isIndexerHB {
need := maxPool - len(consensusVoters)
if need < 1 {
need = 1
}
logger.Info().Int("remaining", len(consensusVoters)).Int("need", need).Msg("pool state after removal")
poolSize := len(directory.GetAddrs())
if poolSize == 0 {
// Pool is truly empty (no seeds configured or no seeds in pool).
// Start the backoff retry loop — it will re-add seeds and nudge
// only once a seed actually responds.
go retryUntilSeedResponds()
} else if len(consensusVoters) > 0 {
go TriggerConsensus(h, consensusVoters, need)
} else {
go replenishIndexersFromDHT(h, need)
}
}
}
// applyMemberEvent applies an incoming SWIM membership event to the local directory.
// Only MemberAlive events with a higher incarnation can clear an existing suspect state;
// MemberSuspect / MemberDead from gossip are informational — we do not act on them
// unilaterally since the node has its own direct-probe evidence.
func applyMemberEvent(ev MemberEvent, directory *Directory) {
if ev.Type != MemberAlive {
return
}
logger := oclib.GetLogger()
for _, ai := range directory.GetAddrs() {
if ai.Info == nil || ai.Info.ID.String() != ev.PeerID {
continue
}
score := directory.GetScore(ai.Addr)
if score == nil || score.UptimeTracker == nil {
return
}
if ev.Incarnation > score.UptimeTracker.LastKnownIncarnation {
score.UptimeTracker.LastKnownIncarnation = ev.Incarnation
if !score.UptimeTracker.SuspectedAt.IsZero() {
score.UptimeTracker.SuspectedAt = time.Time{}
score.UptimeTracker.ConsecutiveFails = 0
logger.Info().Str("peer", ev.PeerID).
Uint64("incarnation", ev.Incarnation).
Msg("[swim] alive event via gossip cleared suspicion")
}
}
return
}
}

View File

@@ -0,0 +1,219 @@
package common
import (
"context"
"encoding/json"
"errors"
"fmt"
"sync"
"time"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/tools"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/host"
pp "github.com/libp2p/go-libp2p/core/peer"
)
type Event struct {
Type string `json:"type"`
From string `json:"from"` // peerID
User string
Groups []string
DataType int64 `json:"datatype"`
Timestamp int64 `json:"ts"`
Payload []byte `json:"payload"`
Signature []byte `json:"sig"`
}
func NewEvent(name string, from string, dt *tools.DataType, user string, payload []byte) *Event {
priv, err := tools.LoadKeyFromFilePrivate() // your node private key
if err != nil {
return nil
}
evt := &Event{
Type: name,
From: from,
User: user,
Timestamp: time.Now().UTC().Unix(),
Payload: payload,
}
if dt != nil {
evt.DataType = int64(dt.EnumIndex())
} else {
evt.DataType = -1
}
body, _ := json.Marshal(evt)
sig, _ := priv.Sign(body)
evt.Signature = sig
return evt
}
func (e *Event) RawEvent() *Event {
return &Event{
Type: e.Type,
From: e.From,
User: e.User,
DataType: e.DataType,
Timestamp: e.Timestamp,
Payload: e.Payload,
}
}
func (e *Event) toRawByte() ([]byte, error) {
return json.Marshal(e.RawEvent())
}
func (event *Event) Verify(p *peer.Peer) error {
if p == nil {
return errors.New("no peer found")
}
if p.Relation == peer.BLACKLIST { // if peer is blacklisted... quit...
return errors.New("peer is blacklisted")
}
return event.VerifySignature(p.PublicKey)
}
func (event *Event) VerifySignature(pk string) error {
pubKey, err := PubKeyFromString(pk) // extract pubkey from pubkey str
if err != nil {
return errors.New("pubkey is malformed")
}
data, err := event.toRawByte()
if err != nil {
return err
} // extract byte from raw event excluding signature.
if ok, _ := pubKey.Verify(data, event.Signature); !ok { // then verify if pubkey sign this message...
return errors.New("check signature failed")
}
return nil
}
type TopicNodeActivityPub struct {
NodeActivity int `json:"node_activity"`
Disposer string `json:"disposer_address"`
Name string `json:"name"`
DID string `json:"did"` // real PEER ID
PeerID string `json:"peer_id"`
}
type LongLivedPubSubService struct {
Host host.Host
LongLivedPubSubs map[string]*pubsub.Topic
PubsubMu sync.RWMutex
}
func NewLongLivedPubSubService(h host.Host) *LongLivedPubSubService {
return &LongLivedPubSubService{
Host: h,
LongLivedPubSubs: map[string]*pubsub.Topic{},
}
}
func (s *LongLivedPubSubService) GetPubSub(topicName string) *pubsub.Topic {
s.PubsubMu.Lock()
defer s.PubsubMu.Unlock()
return s.LongLivedPubSubs[topicName]
}
func (s *LongLivedPubSubService) processEvent(
ctx context.Context,
p *peer.Peer,
event *Event,
topicName string, handler func(context.Context, string, *Event) error) error {
if err := event.Verify(p); err != nil {
return err
}
return handler(ctx, topicName, event)
}
const TopicPubSubSearch = "oc-node-search"
func (s *LongLivedPubSubService) SubscribeToSearch(ps *pubsub.PubSub, f *func(context.Context, Event, string)) error {
ps.RegisterTopicValidator(TopicPubSubSearch, func(ctx context.Context, p pp.ID, m *pubsub.Message) bool {
return true
})
if topic, err := ps.Join(TopicPubSubSearch); err != nil {
return err
} else {
s.PubsubMu.Lock()
s.LongLivedPubSubs[TopicPubSubSearch] = topic
s.PubsubMu.Unlock()
}
if f != nil {
return SubscribeEvents(s, context.Background(), TopicPubSubSearch, -1, *f)
}
// Even when no handler is needed (e.g. strict indexers), we must call
// topic.Subscribe() so that this peer sends a SUBSCRIBE control message
// to connected peers and joins the GossipSub mesh as a forwarder.
// Without this, messages cannot be relayed through indexers between nodes.
topic := s.LongLivedPubSubs[TopicPubSubSearch]
sub, err := topic.Subscribe()
if err != nil {
return err
}
go func() {
for {
if _, err := sub.Next(context.Background()); err != nil {
return
}
}
}()
return nil
}
func SubscribeEvents[T interface{}](s *LongLivedPubSubService,
ctx context.Context, proto string, timeout int, f func(context.Context, T, string),
) error {
if s.LongLivedPubSubs[proto] == nil {
return errors.New("no protocol subscribed in pubsub")
}
topic := s.LongLivedPubSubs[proto]
sub, err := topic.Subscribe() // then subscribe to it
if err != nil {
return err
}
// launch loop waiting for results.
go waitResults(topic, s, ctx, sub, proto, timeout, f)
return nil
}
func waitResults[T interface{}](topic *pubsub.Topic, s *LongLivedPubSubService, ctx context.Context, sub *pubsub.Subscription, proto string, timeout int, f func(context.Context, T, string)) {
defer ctx.Done()
for {
s.PubsubMu.Lock() // check safely if cache is actually notified subscribed to topic
if s.LongLivedPubSubs[proto] == nil { // if not kill the loop.
s.LongLivedPubSubs[proto] = topic
}
s.PubsubMu.Unlock()
// if still subscribed -> wait for new message
var cancel context.CancelFunc
if timeout != -1 {
ctx, cancel = context.WithTimeout(ctx, time.Duration(timeout)*time.Second)
defer cancel()
}
msg, err := sub.Next(ctx)
if err != nil {
if errors.Is(err, context.DeadlineExceeded) {
// timeout hit, no message before deadline kill subsciption.
s.PubsubMu.Lock()
delete(s.LongLivedPubSubs, proto)
s.PubsubMu.Unlock()
return
}
continue
}
var evt T
if err := json.Unmarshal(msg.Data, &evt); err != nil { // map to event
continue
}
f(ctx, evt, fmt.Sprintf("%v", proto))
fmt.Println("DEADLOCK ?")
}
}

View File

@@ -0,0 +1,188 @@
package common
import (
"context"
cr "crypto/rand"
"io"
"net"
"slices"
"time"
"github.com/libp2p/go-libp2p/core/host"
pp "github.com/libp2p/go-libp2p/core/peer"
)
const MaxExpectedMbps = 100.0
const MinPayloadChallenge = 512
const MaxPayloadChallenge = 2048
const BaseRoundTrip = 400 * time.Millisecond
type UptimeTracker struct {
FirstSeen time.Time
LastSeen time.Time
TotalOnline time.Duration
ConsecutiveFails int // kept for compatibility / logging; primary eviction uses SuspectedAt
SuspectedAt time.Time // SWIM: non-zero when this peer is in suspect state
// LastKnownIncarnation is the last incarnation number received from this peer.
// When a peer sees itself suspected (SuspectedIncarnation in heartbeat) it
// increments its incarnation and the node clears the suspect state on receipt.
LastKnownIncarnation uint64
}
// RecordHeartbeat accumulates online time gap-aware: only counts the interval if
// the gap since the last heartbeat is within 2× the recommended interval (i.e. no
// extended outage). Call this each time a heartbeat is successfully processed.
func (u *UptimeTracker) RecordHeartbeat() {
now := time.Now().UTC()
if !u.LastSeen.IsZero() {
gap := now.Sub(u.LastSeen)
if gap <= 2*RecommendedHeartbeatInterval {
u.TotalOnline += gap
}
}
u.LastSeen = now
}
func (u *UptimeTracker) Uptime() time.Duration {
return time.Since(u.FirstSeen)
}
// UptimeRatio returns the fraction of tracked lifetime during which the peer was
// continuously online (gap ≤ 2×RecommendedHeartbeatInterval). Returns 0 before
// the first heartbeat interval has elapsed.
func (u *UptimeTracker) UptimeRatio() float64 {
total := time.Since(u.FirstSeen)
if total <= 0 {
return 0
}
ratio := float64(u.TotalOnline) / float64(total)
if ratio > 1 {
ratio = 1
}
return ratio
}
func (u *UptimeTracker) IsEligible(min time.Duration) bool {
return u.Uptime() >= min
}
// getBandwidthChallengeRate opens a dedicated ProtocolBandwidthProbe stream to
// remotePeer, sends a random payload, reads the echo, and computes throughput
// and a latency score. Returns (ok, bpms, latencyScore, error).
// latencyScore is 1.0 when RTT is very fast and 0.0 when at or beyond maxRoundTrip.
// Using a separate stream avoids mixing binary data on the JSON heartbeat stream
// and ensures the echo handler is actually running on the remote side.
func getBandwidthChallengeRate(h host.Host, remotePeer pp.ID, payloadSize int) (bool, float64, float64, error) {
payload := make([]byte, payloadSize)
if _, err := cr.Read(payload); err != nil {
return false, 0, 0, err
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
s, err := h.NewStream(ctx, remotePeer, ProtocolBandwidthProbe)
if err != nil {
return false, 0, 0, err
}
defer s.Reset()
s.SetDeadline(time.Now().Add(10 * time.Second))
start := time.Now()
if _, err = s.Write(payload); err != nil {
return false, 0, 0, err
}
s.CloseWrite()
// Half-close the write side so the handler's io.Copy sees EOF and stops.
// Read the echo.
response := make([]byte, payloadSize)
if _, err = io.ReadFull(s, response); err != nil {
return false, 0, 0, err
}
duration := time.Since(start)
maxRoundTrip := BaseRoundTrip + (time.Duration(payloadSize) * (100 * time.Millisecond))
mbps := float64(payloadSize*8) / duration.Seconds() / 1e6
// latencyScore: 1.0 = instant, 0.0 = at maxRoundTrip or beyond.
latencyScore := 1.0 - float64(duration)/float64(maxRoundTrip)
if latencyScore < 0 {
latencyScore = 0
}
if latencyScore > 1 {
latencyScore = 1
}
if duration > maxRoundTrip || mbps < 5.0 {
return false, float64(mbps / MaxExpectedMbps), latencyScore, nil
}
return true, float64(mbps / MaxExpectedMbps), latencyScore, nil
}
func getDiversityRate(h host.Host, peers []string) float64 {
peers, _ = checkPeers(h, peers)
diverse := []string{}
for _, p := range peers {
ip, err := ExtractIP(p)
if err != nil {
continue
}
div := ip.Mask(net.CIDRMask(24, 32)).String()
if !slices.Contains(diverse, div) {
diverse = append(diverse, div)
}
}
if len(diverse) == 0 || len(peers) == 0 {
return 1
}
return float64(len(diverse)) / float64(len(peers))
}
// getOwnDiversityRate measures subnet /24 diversity of the indexer's own connected peers.
// This evaluates the indexer's network position rather than the connecting node's topology.
func getOwnDiversityRate(h host.Host) float64 {
diverse := map[string]struct{}{}
total := 0
for _, pid := range h.Network().Peers() {
for _, maddr := range h.Peerstore().Addrs(pid) {
total++
ip, err := ExtractIP(maddr.String())
if err != nil {
continue
}
diverse[ip.Mask(net.CIDRMask(24, 32)).String()] = struct{}{}
}
}
if total == 0 {
return 1
}
return float64(len(diverse)) / float64(total)
}
func checkPeers(h host.Host, peers []string) ([]string, []string) {
concretePeer := []string{}
ips := []string{}
for _, p := range peers {
ad, err := pp.AddrInfoFromString(p)
if err != nil {
continue
}
if PeerIsAlive(h, *ad) {
concretePeer = append(concretePeer, p)
if ip, err := ExtractIP(p); err == nil {
ips = append(ips, ip.Mask(net.CIDRMask(24, 32)).String())
}
}
}
return concretePeer, ips
}
// dynamicMinScore returns the minimum acceptable score for a peer, starting
// permissive (20%) for brand-new peers and hardening linearly to 80% over 24h.
// This prevents ejecting newcomers in fresh networks while filtering parasites.
func dynamicMinScore(age time.Duration) float64 {
hours := age.Hours()
score := 20.0 + 60.0*(hours/24.0)
if score > 80.0 {
score = 80.0
}
return score
}

View File

@@ -0,0 +1,354 @@
package common
import (
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"oc-discovery/conf"
"strings"
"sync"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
type LongLivedStreamRecordedService[T interface{}] struct {
*LongLivedPubSubService
StreamRecords map[protocol.ID]map[pp.ID]*StreamRecord[T]
StreamMU sync.RWMutex
maxNodesConn int
ConnGuard *ConnectionRateGuard
// AllowInbound, when set, is called once at stream open before any heartbeat
// is decoded. remotePeer is the connecting peer; isNew is true when no
// StreamRecord exists yet (first-ever connection). Return a non-nil error
// to immediately reset the stream and refuse the peer.
AllowInbound func(remotePeer pp.ID, isNew bool) error
// ValidateHeartbeat, when set, is called inside the heartbeat loop after
// each successful CheckHeartbeat decode. Return a non-nil error to reset
// the stream and terminate the session.
ValidateHeartbeat func(remotePeer pp.ID) error
// AfterHeartbeat is called after each successful heartbeat with the full
// decoded Heartbeat so the hook can use the fresh embedded PeerRecord.
AfterHeartbeat func(hb *Heartbeat)
// AfterDelete is called after gc() evicts an expired peer, outside the lock.
// name and did may be empty if the HeartbeatStream had no metadata.
AfterDelete func(pid pp.ID, name string, did string)
// BuildHeartbeatResponse, when set, is called after each successfully decoded
// heartbeat to build the response sent back to the node.
// remotePeer is the connecting peer. hb is the full decoded heartbeat, including
// SWIM fields (SuspectedIncarnation, MembershipEvents) and record/challenge data.
BuildHeartbeatResponse func(remotePeer pp.ID, hb *Heartbeat) *HeartbeatResponse
}
func (ix *LongLivedStreamRecordedService[T]) MaxNodesConn() int {
return ix.maxNodesConn
}
func NewStreamRecordedService[T interface{}](h host.Host, maxNodesConn int) *LongLivedStreamRecordedService[T] {
service := &LongLivedStreamRecordedService[T]{
LongLivedPubSubService: NewLongLivedPubSubService(h),
StreamRecords: map[protocol.ID]map[pp.ID]*StreamRecord[T]{},
maxNodesConn: maxNodesConn,
ConnGuard: newConnectionRateGuard(),
}
go service.StartGC(30 * time.Second)
// Garbage collection is needed on every Map of Long-Lived Stream... it may be a top level redesigned
go service.Snapshot(1 * time.Hour)
return service
}
func (ix *LongLivedStreamRecordedService[T]) StartGC(interval time.Duration) {
go func() {
t := time.NewTicker(interval)
defer t.Stop()
for range t.C {
fmt.Println("ACTUALLY RELATED INDEXERS", Indexers.Addrs, len(Indexers.Addrs))
ix.gc()
}
}()
}
func (ix *LongLivedStreamRecordedService[T]) gc() {
ix.StreamMU.Lock()
now := time.Now().UTC()
if ix.StreamRecords[ProtocolHeartbeat] == nil {
ix.StreamRecords[ProtocolHeartbeat] = map[pp.ID]*StreamRecord[T]{}
ix.StreamMU.Unlock()
return
}
streams := ix.StreamRecords[ProtocolHeartbeat]
type gcEntry struct {
pid pp.ID
name string
did string
}
var evicted []gcEntry
for pid, rec := range streams {
if now.After(rec.HeartbeatStream.Expiry) || now.Sub(rec.HeartbeatStream.UptimeTracker.LastSeen) > 2*rec.HeartbeatStream.Expiry.Sub(now) {
name, did := "", ""
if rec.HeartbeatStream != nil {
name = rec.HeartbeatStream.Name
did = rec.HeartbeatStream.DID
}
evicted = append(evicted, gcEntry{pid, name, did})
for _, sstreams := range ix.StreamRecords {
if sstreams[pid] != nil {
delete(sstreams, pid)
}
}
}
}
ix.StreamMU.Unlock()
if ix.AfterDelete != nil {
for _, e := range evicted {
ix.AfterDelete(e.pid, e.name, e.did)
}
}
}
func (ix *LongLivedStreamRecordedService[T]) Snapshot(interval time.Duration) {
go func() {
logger := oclib.GetLogger()
t := time.NewTicker(interval)
defer t.Stop()
for range t.C {
infos := ix.snapshot()
for _, inf := range infos {
logger.Info().Msg(" -> " + inf.DID)
}
}
}()
}
// -------- Snapshot / Query --------
func (ix *LongLivedStreamRecordedService[T]) snapshot() []*StreamRecord[T] {
ix.StreamMU.Lock()
defer ix.StreamMU.Unlock()
out := make([]*StreamRecord[T], 0, len(ix.StreamRecords))
for _, streams := range ix.StreamRecords {
for _, stream := range streams {
out = append(out, stream)
}
}
return out
}
func (ix *LongLivedStreamRecordedService[T]) HandleHeartbeat(s network.Stream) {
logger := oclib.GetLogger()
defer s.Close()
// AllowInbound: burst guard + ban check before the first byte is read.
if ix.AllowInbound != nil {
remotePeer := s.Conn().RemotePeer()
ix.StreamMU.RLock()
_, exists := ix.StreamRecords[ProtocolHeartbeat][remotePeer]
ix.StreamMU.RUnlock()
if err := ix.AllowInbound(remotePeer, !exists); err != nil {
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("inbound connection refused")
s.Reset()
return
}
}
dec := json.NewDecoder(s)
for {
ix.StreamMU.Lock()
if ix.StreamRecords[ProtocolHeartbeat] == nil {
ix.StreamRecords[ProtocolHeartbeat] = map[pp.ID]*StreamRecord[T]{}
}
streams := ix.StreamRecords[ProtocolHeartbeat]
streamsAnonym := map[pp.ID]HeartBeatStreamed{}
for k, v := range streams {
streamsAnonym[k] = v
}
ix.StreamMU.Unlock()
pid, hb, err := CheckHeartbeat(ix.Host, s, dec, streamsAnonym, &ix.StreamMU, ix.maxNodesConn)
if err != nil {
// Stream-level errors (EOF, reset, closed) mean the connection is gone
// — exit so the goroutine doesn't spin forever on a dead stream.
// Metric/policy errors (score too low, too many connections) are transient
// — those are also stream-terminal since the stream carries one session.
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
strings.Contains(err.Error(), "reset") ||
strings.Contains(err.Error(), "closed") ||
strings.Contains(err.Error(), "too many connections") {
logger.Info().Err(err).Msg("heartbeat stream terminated, closing handler")
return
}
logger.Warn().Err(err).Msg("heartbeat check failed, retrying on same stream")
continue
}
// ValidateHeartbeat: per-tick behavioral check (rate limiting, bans).
if ix.ValidateHeartbeat != nil {
if err := ix.ValidateHeartbeat(*pid); err != nil {
logger.Warn().Err(err).Str("peer", pid.String()).Msg("heartbeat rejected, closing stream")
s.Reset()
return
}
}
ix.StreamMU.Lock()
// if record already seen update last seen
if rec, ok := streams[*pid]; ok {
rec.DID = hb.DID
// Preserve the existing UptimeTracker so TotalOnline accumulates correctly.
// hb.Stream is a fresh Stream with no UptimeTracker; carry the old one over.
oldTracker := rec.GetUptimeTracker()
rec.HeartbeatStream = hb.Stream
if oldTracker != nil {
rec.HeartbeatStream.UptimeTracker = oldTracker
} else {
rec.HeartbeatStream.UptimeTracker = &UptimeTracker{FirstSeen: time.Now().UTC()}
}
rec.HeartbeatStream.UptimeTracker.RecordHeartbeat()
rec.LastScore = hb.Score
logger.Info().Msg("A new node is updated : " + pid.String())
} else {
tracker := &UptimeTracker{FirstSeen: time.Now().UTC()}
tracker.RecordHeartbeat()
hb.Stream.UptimeTracker = tracker
streams[*pid] = &StreamRecord[T]{
DID: hb.DID,
HeartbeatStream: hb.Stream,
LastScore: hb.Score,
}
logger.Info().Msg("A new node is subscribed : " + pid.String())
}
ix.StreamMU.Unlock()
// Enrich hb.DID before calling the hook: nodes never set hb.DID directly;
// extract it from the embedded signed PeerRecord if available, then fall
// back to the DID stored by handleNodePublish in the stream record.
if hb.DID == "" && len(hb.Record) > 0 {
var partial struct {
DID string `json:"did"`
}
if json.Unmarshal(hb.Record, &partial) == nil && partial.DID != "" {
hb.DID = partial.DID
}
}
if hb.DID == "" {
ix.StreamMU.RLock()
if rec, ok := streams[*pid]; ok {
hb.DID = rec.DID
}
ix.StreamMU.RUnlock()
}
if ix.AfterHeartbeat != nil && hb.DID != "" {
go ix.AfterHeartbeat(hb)
}
// Send response back to the node (bidirectional heartbeat).
if ix.BuildHeartbeatResponse != nil {
if resp := ix.BuildHeartbeatResponse(s.Conn().RemotePeer(), hb); resp != nil {
s.SetWriteDeadline(time.Now().Add(3 * time.Second))
json.NewEncoder(s).Encode(resp)
s.SetWriteDeadline(time.Time{})
}
}
}
}
func CheckHeartbeat(h host.Host, s network.Stream, dec *json.Decoder, streams map[pp.ID]HeartBeatStreamed, lock *sync.RWMutex, maxNodes int) (*pp.ID, *Heartbeat, error) {
if len(h.Network().Peers()) >= maxNodes {
return nil, nil, fmt.Errorf("too many connections, try another indexer")
}
var hb Heartbeat
if err := dec.Decode(&hb); err != nil {
return nil, nil, err
}
_, bpms, latencyScore, _ := getBandwidthChallengeRate(h, s.Conn().RemotePeer(), MinPayloadChallenge+int(rand.Float64()*(MaxPayloadChallenge-MinPayloadChallenge)))
{
pid, err := pp.Decode(hb.PeerID)
if err != nil {
return nil, nil, err
}
uptimeRatio := float64(0)
age := time.Duration(0)
lock.Lock()
if rec, ok := streams[pid]; ok && rec.GetUptimeTracker() != nil {
uptimeRatio = rec.GetUptimeTracker().UptimeRatio()
age = rec.GetUptimeTracker().Uptime()
}
lock.Unlock()
// E: measure the indexer's own subnet diversity, not the node's view.
diversity := getOwnDiversityRate(h)
// fillRate: fraction of indexer capacity used — higher = more peers trust this indexer.
fillRate := 0.0
if maxNodes > 0 {
fillRate = float64(len(h.Network().Peers())) / float64(maxNodes)
if fillRate > 1 {
fillRate = 1
}
}
hb.ComputeIndexerScore(uptimeRatio, bpms, diversity, latencyScore, fillRate)
// B: dynamic minScore — starts at 20% for brand-new peers, ramps to 80% at 24h.
minScore := dynamicMinScore(age)
if hb.Score < minScore {
return nil, nil, errors.New("not enough trusting value")
}
hb.Stream = &Stream{
Name: hb.Name,
DID: hb.DID,
Stream: s,
Expiry: time.Now().UTC().Add(2 * time.Minute),
} // here is the long-lived bidirectional heartbeat.
return &pid, &hb, err
}
}
// ── ConnectionRateGuard ───────────────────────────────────────────────────────
// ConnectionRateGuard limits the number of NEW incoming connections accepted
// within a sliding time window. It protects public indexers against coordinated
// registration floods (Sybil bursts).
const defaultMaxConnPerWindow = 20
const defaultConnWindowSecs = 30
type ConnectionRateGuard struct {
mu sync.Mutex
window []time.Time
maxInWindow int
windowDur time.Duration
}
func newConnectionRateGuard() *ConnectionRateGuard {
cfg := conf.GetConfig()
return &ConnectionRateGuard{
maxInWindow: CfgOr(cfg.MaxConnPerWindow, defaultMaxConnPerWindow),
windowDur: time.Duration(CfgOr(cfg.ConnWindowSecs, defaultConnWindowSecs)) * time.Second,
}
}
// Allow returns true if a new connection may be accepted.
// The internal window is pruned on each call so memory stays bounded.
func (g *ConnectionRateGuard) Allow() bool {
g.mu.Lock()
defer g.mu.Unlock()
now := time.Now()
cutoff := now.Add(-g.windowDur)
i := 0
for i < len(g.window) && g.window[i].Before(cutoff) {
i++
}
g.window = g.window[i:]
if len(g.window) >= g.maxInWindow {
return false
}
g.window = append(g.window, now)
return true
}
func CfgOr(v, def int) int {
if v > 0 {
return v
}
return def
}

View File

@@ -0,0 +1,304 @@
package common
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
// InitialEventHops is the starting hop count for SWIM membership events.
// floor(log2(typical max-pool)) + 1 gives O(log n) propagation rounds.
const InitialEventHops = 4
const maxMemberEventQueue = 50
// MembershipEventQueue holds SWIM membership events to be piggybacked on
// outgoing heartbeats (infection-style dissemination). Bounded at
// maxMemberEventQueue entries; events are deduplicated by PeerID.
type MembershipEventQueue struct {
mu sync.Mutex
events []MemberEvent
}
// memberEventPriority maps event types to an integer so higher-severity
// events override lower-severity ones for the same PeerID.
func memberEventPriority(t MemberEventType) int {
switch t {
case MemberDead:
return 3
case MemberSuspect:
return 2
case MemberAlive:
return 1
}
return 0
}
// Add inserts or updates a membership event.
// An incoming event replaces the existing entry for the same PeerID when:
// - its Incarnation is higher, OR
// - the Incarnation is equal but the event type is higher-severity.
func (q *MembershipEventQueue) Add(e MemberEvent) {
q.mu.Lock()
defer q.mu.Unlock()
for i, ex := range q.events {
if ex.PeerID == e.PeerID {
if e.Incarnation > ex.Incarnation ||
(e.Incarnation == ex.Incarnation && memberEventPriority(e.Type) > memberEventPriority(ex.Type)) {
q.events[i] = e
}
return
}
}
if len(q.events) >= maxMemberEventQueue {
q.events = q.events[1:] // drop oldest
}
q.events = append(q.events, e)
}
// Drain returns up to max events ready for transmission.
// HopsLeft is decremented on each call; events that reach 0 are removed from
// the queue (they have already propagated enough rounds).
func (q *MembershipEventQueue) Drain(max int) []MemberEvent {
q.mu.Lock()
defer q.mu.Unlock()
if len(q.events) == 0 {
return nil
}
out := make([]MemberEvent, 0, max)
kept := q.events[:0]
for _, e := range q.events {
if len(out) < max {
e.HopsLeft--
out = append(out, e)
if e.HopsLeft > 0 {
kept = append(kept, e)
}
// HopsLeft reached 0: event has propagated enough, drop from queue.
} else {
kept = append(kept, e)
}
}
q.events = kept
return out
}
// NodeEventQueue is the global SWIM event queue for the node side.
// Events are added on suspect/dead detection and drained into outgoing heartbeats.
var NodeEventQueue = &MembershipEventQueue{}
const (
ProtocolPublish = "/opencloud/record/publish/1.0"
ProtocolGet = "/opencloud/record/get/1.0"
ProtocolDelete = "/opencloud/record/delete/1.0"
// ProtocolIndirectProbe is opened by a node toward a live indexer to ask it
// to actively probe a suspected indexer on the node's behalf (SWIM indirect ping).
// It is the only inter-indexer protocol — indexers do not maintain persistent
// connections to each other; this stream is one-shot and short-lived.
ProtocolIndirectProbe = "/opencloud/indexer/probe/1.0"
)
// IndirectProbeRequest is sent by a node over ProtocolIndirectProbe.
// The receiving indexer must attempt to reach Target and report back.
type IndirectProbeRequest struct {
Target pp.AddrInfo `json:"target"`
}
// IndirectProbeResponse is the reply from the probing indexer.
type IndirectProbeResponse struct {
Reachable bool `json:"reachable"`
LatencyMs int64 `json:"latency_ms,omitempty"`
}
const ProtocolHeartbeat = "/opencloud/heartbeat/1.0"
// ProtocolWitnessQuery is opened by a node to ask a peer what it thinks of a given indexer.
const ProtocolWitnessQuery = "/opencloud/witness/1.0"
// ProtocolSearchPeer is opened by a node toward one of its indexers to start a
// distributed peer search. The stream stays open; the indexer writes
// SearchPeerResult JSON objects as results arrive from the GossipSub mesh.
const ProtocolSearchPeer = "/opencloud/search/peer/1.0"
// ProtocolSearchPeerResponse is opened by an indexer back toward the emitting
// indexer to deliver search results found in its referencedNodes.
const ProtocolSearchPeerResponse = "/opencloud/search/peer/response/1.0"
// ProtocolBandwidthProbe is a dedicated short-lived stream used exclusively
// for bandwidth/latency measurement. The handler echoes any bytes it receives.
// All nodes and indexers register this handler so peers can measure them.
const ProtocolBandwidthProbe = "/opencloud/probe/1.0"
type Stream struct {
Name string `json:"name"`
DID string `json:"did"`
Stream network.Stream
Expiry time.Time `json:"expiry"`
UptimeTracker *UptimeTracker
}
func (s *Stream) GetUptimeTracker() *UptimeTracker {
return s.UptimeTracker
}
func NewStream[T interface{}](s network.Stream, did string, record T) *Stream {
return &Stream{
DID: did,
Stream: s,
Expiry: time.Now().UTC().Add(2 * time.Minute),
}
}
type StreamRecord[T interface{}] struct {
DID string
HeartbeatStream *Stream
Record T
LastScore float64
}
func (s *StreamRecord[T]) GetUptimeTracker() *UptimeTracker {
if s.HeartbeatStream == nil {
return nil
}
return s.HeartbeatStream.UptimeTracker
}
type ProtocolInfo struct {
PersistantStream bool
WaitResponse bool
TTL time.Duration
}
func TempStream(h host.Host, ad pp.AddrInfo, proto protocol.ID, did string, streams ProtocolStream, pts map[protocol.ID]*ProtocolInfo, mu *sync.RWMutex) (ProtocolStream, error) {
expiry := 2 * time.Second
if pts[proto] != nil {
expiry = pts[proto].TTL
}
ctxTTL, cancelTTL := context.WithTimeout(context.Background(), expiry)
defer cancelTTL()
if h.Network().Connectedness(ad.ID) != network.Connected {
if err := h.Connect(ctxTTL, ad); err != nil {
return streams, err
}
}
if streams[proto] != nil && streams[proto][ad.ID] != nil {
return streams, nil
} else if s, err := h.NewStream(ctxTTL, ad.ID, proto); err == nil {
mu.Lock()
if streams[proto] == nil {
streams[proto] = map[pp.ID]*Stream{}
}
mu.Unlock()
time.AfterFunc(expiry, func() {
mu.Lock()
delete(streams[proto], ad.ID)
mu.Unlock()
})
mu.Lock()
streams[proto][ad.ID] = &Stream{
DID: did,
Stream: s,
Expiry: time.Now().UTC().Add(expiry),
}
mu.Unlock()
return streams, nil
} else {
return streams, err
}
}
func sendHeartbeat(ctx context.Context, h host.Host, proto protocol.ID, p *pp.AddrInfo,
hb Heartbeat, ps ProtocolStream, interval time.Duration) (*HeartbeatResponse, time.Duration, error) {
logger := oclib.GetLogger()
if ps[proto] == nil {
ps[proto] = map[pp.ID]*Stream{}
}
streams := ps[proto]
pss, exists := streams[p.ID]
ctxTTL, cancel := context.WithTimeout(ctx, 3*interval)
defer cancel()
if h.Network().Connectedness(p.ID) != network.Connected {
if err := h.Connect(ctxTTL, *p); err != nil {
logger.Err(err)
return nil, 0, err
}
exists = false
}
if !exists || pss.Stream == nil {
logger.Info().Msg("New Stream engaged as Heartbeat " + fmt.Sprintf("%v", proto) + " " + p.ID.String())
s, err := h.NewStream(ctx, p.ID, proto)
if err != nil {
logger.Err(err).Msg(err.Error())
return nil, 0, err
}
pss = &Stream{
Stream: s,
Expiry: time.Now().UTC().Add(2 * time.Minute),
}
streams[p.ID] = pss
}
sentAt := time.Now()
if err := json.NewEncoder(pss.Stream).Encode(&hb); err != nil {
pss.Stream.Close()
pss.Stream = nil
return nil, 0, err
}
pss.Expiry = time.Now().UTC().Add(2 * time.Minute)
// Try to read a response (indexers that support bidirectional heartbeat respond).
pss.Stream.SetReadDeadline(time.Now().Add(5 * time.Second))
var resp HeartbeatResponse
rtt := time.Since(sentAt)
if err := json.NewDecoder(pss.Stream).Decode(&resp); err == nil {
rtt = time.Since(sentAt)
pss.Stream.SetReadDeadline(time.Time{})
return &resp, rtt, nil
}
pss.Stream.SetReadDeadline(time.Time{})
return nil, rtt, nil
}
func AddStreamProtocol(ctx *context.Context, protoS ProtocolStream, h host.Host, proto protocol.ID, id pp.ID, mypid pp.ID, force bool, onStreamCreated *func(network.Stream)) ProtocolStream {
logger := oclib.GetLogger()
if onStreamCreated == nil {
f := func(s network.Stream) {
protoS[proto][id] = &Stream{
Stream: s,
Expiry: time.Now().UTC().Add(2 * time.Minute),
}
}
onStreamCreated = &f
}
f := *onStreamCreated
if mypid > id || force {
if ctx == nil {
c := context.Background()
ctx = &c
}
if protoS[proto] == nil {
protoS[proto] = map[pp.ID]*Stream{}
}
if protoS[proto][id] != nil {
protoS[proto][id].Expiry = time.Now().Add(2 * time.Minute)
} else {
logger.Info().Msg("NEW STREAM Generated" + fmt.Sprintf("%v", proto) + " " + id.String())
s, err := h.NewStream(*ctx, id, proto)
if err != nil {
panic(err.Error())
}
f(s)
}
}
return protoS
}

View File

@@ -0,0 +1,199 @@
package common
import (
"context"
"encoding/json"
"sort"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
)
// ProtocolIndexerCandidates is opened by a node toward its remaining indexers
// to request candidate replacement indexers after an ejection event.
const ProtocolIndexerCandidates = "/opencloud/indexer/candidates/1.0"
// IndexerCandidatesRequest is sent by a node to one of its indexers.
// Count is how many candidates are needed.
type IndexerCandidatesRequest struct {
Count int `json:"count"`
}
// IndexerCandidatesResponse carries a random sample of known indexers from
// the responding indexer's DHT cache.
type IndexerCandidatesResponse struct {
Candidates []pp.AddrInfo `json:"candidates"`
}
// TriggerConsensus asks each remaining indexer for a random pool of candidates,
// scores them asynchronously via a one-shot probe heartbeat, and admits the
// best ones to StaticIndexers. Falls back to DHT replenishment for any gap.
//
// Must be called in a goroutine — it blocks until all probes have returned
// (or timed out), which can take up to ~10s.
func TriggerConsensus(h host.Host, remaining []pp.AddrInfo, need int) {
if need <= 0 || len(remaining) == 0 {
return
}
logger := oclib.GetLogger()
logger.Info().Int("voters", len(remaining)).Int("need", need).
Msg("[consensus] starting indexer candidate consensus")
// Phase 1 — collect candidates from all remaining indexers in parallel.
type collectResult struct{ candidates []pp.AddrInfo }
collectCh := make(chan collectResult, len(remaining))
for _, ai := range remaining {
go func(ai pp.AddrInfo) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
s, err := h.NewStream(ctx, ai.ID, ProtocolIndexerCandidates)
if err != nil {
collectCh <- collectResult{}
return
}
defer s.Close()
s.SetDeadline(time.Now().Add(5 * time.Second))
if err := json.NewEncoder(s).Encode(IndexerCandidatesRequest{Count: need + 2}); err != nil {
collectCh <- collectResult{}
return
}
var resp IndexerCandidatesResponse
if err := json.NewDecoder(s).Decode(&resp); err != nil {
collectCh <- collectResult{}
return
}
collectCh <- collectResult{candidates: resp.Candidates}
}(ai)
}
// Merge and deduplicate, excluding indexers already in the pool.
seen := map[pp.ID]struct{}{}
for _, ai := range Indexers.GetAddrIDs() {
seen[ai] = struct{}{}
}
var candidates []pp.AddrInfo
for range remaining {
r := <-collectCh
for _, ai := range r.candidates {
if _, dup := seen[ai.ID]; !dup {
seen[ai.ID] = struct{}{}
candidates = append(candidates, ai)
}
}
}
if len(candidates) == 0 {
logger.Info().Msg("[consensus] no candidates from voters, falling back to DHT")
replenishIndexersFromDHT(h, need)
return
}
logger.Info().Int("candidates", len(candidates)).Msg("[consensus] scoring candidates")
// Phase 2 — score all candidates in parallel via a one-shot probe heartbeat.
type scoreResult struct {
ai pp.AddrInfo
score float64
}
scoreCh := make(chan scoreResult, len(candidates))
for _, ai := range candidates {
go func(ai pp.AddrInfo) {
resp, rtt, err := probeIndexer(h, ai)
if err != nil {
scoreCh <- scoreResult{ai: ai, score: 0}
return
}
scoreCh <- scoreResult{ai: ai, score: quickScore(resp, rtt)}
}(ai)
}
results := make([]scoreResult, 0, len(candidates))
for range candidates {
results = append(results, <-scoreCh)
}
// Sort descending by quick score, admit top `need` above the minimum bar.
sort.Slice(results, func(i, j int) bool { return results[i].score > results[j].score })
minQ := dynamicMinScore(0) // fresh peer: threshold starts at 20
admitted := 0
for _, res := range results {
if admitted >= need {
break
}
if res.score < minQ {
break // sorted desc: everything after is worse
}
key := addrKey(res.ai)
if Indexers.ExistsAddr(key) {
continue // already in pool (race with heartbeat path)
}
cpy := res.ai
Indexers.SetAddr(key, &cpy)
admitted++
}
if admitted > 0 {
logger.Info().Int("admitted", admitted).Msg("[consensus] candidates admitted to pool")
Indexers.NudgeIt()
}
// Fill any remaining gap with DHT discovery.
if gap := need - admitted; gap > 0 {
logger.Info().Int("gap", gap).Msg("[consensus] gap after consensus, falling back to DHT")
replenishIndexersFromDHT(h, gap)
}
}
// probeIndexer dials the candidate, sends one lightweight heartbeat, and
// returns the HeartbeatResponse (nil if the indexer doesn't support it) and RTT.
func probeIndexer(h host.Host, ai pp.AddrInfo) (*HeartbeatResponse, time.Duration, error) {
ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second)
defer cancel()
if h.Network().Connectedness(ai.ID) != network.Connected {
if err := h.Connect(ctx, ai); err != nil {
return nil, 0, err
}
}
s, err := h.NewStream(ctx, ai.ID, ProtocolHeartbeat)
if err != nil {
return nil, 0, err
}
defer s.Close()
hb := Heartbeat{PeerID: h.ID().String(), Timestamp: time.Now().UTC().Unix()}
s.SetWriteDeadline(time.Now().Add(3 * time.Second))
if err := json.NewEncoder(s).Encode(hb); err != nil {
return nil, 0, err
}
s.SetWriteDeadline(time.Time{})
sentAt := time.Now()
s.SetReadDeadline(time.Now().Add(5 * time.Second))
var resp HeartbeatResponse
if err := json.NewDecoder(s).Decode(&resp); err != nil {
// Indexer connected but no response: connection itself is the signal.
return nil, time.Since(sentAt), nil
}
return &resp, time.Since(sentAt), nil
}
// quickScore computes a lightweight score [0,100] from a probe result.
// Uses only fill rate (inverse) and latency — the two signals available
// without a full heartbeat history.
func quickScore(resp *HeartbeatResponse, rtt time.Duration) float64 {
maxRTT := BaseRoundTrip * 10
latencyScore := 1.0 - float64(rtt)/float64(maxRTT)
if latencyScore < 0 {
latencyScore = 0
}
if resp == nil {
// Connection worked but no response (old indexer): moderate score.
return latencyScore * 50
}
fillScore := 1.0 - resp.FillRate // prefer less-loaded indexers
return (0.5*latencyScore + 0.5*fillScore) * 100
}

View File

@@ -0,0 +1,66 @@
package common
import (
"bytes"
"encoding/base64"
"errors"
"oc-discovery/conf"
"oc-discovery/models"
"os"
"cloud.o-forge.io/core/oc-lib/models/peer"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/pnet"
)
func VerifyPeer(peers []*peer.Peer, event models.Event) error {
if len(peers) == 0 {
return errors.New("no peer found")
}
p := peers[0]
if p.Relation == peer.BLACKLIST { // if peer is blacklisted... quit...
return errors.New("peer is blacklisted")
}
pubKey, err := PubKeyFromString(p.PublicKey) // extract pubkey from pubkey str
if err != nil {
return errors.New("pubkey is malformed")
}
data, err := event.ToRawByte()
if err != nil {
return err
} // extract byte from raw event excluding signature.
if ok, _ := pubKey.Verify(data, event.Signature); !ok { // then verify if pubkey sign this message...
return errors.New("check signature failed")
}
return nil
}
func Sign(priv crypto.PrivKey, data []byte) ([]byte, error) {
return priv.Sign(data)
}
func Verify(pub crypto.PubKey, data, sig []byte) (bool, error) {
return pub.Verify(data, sig)
}
func LoadPSKFromFile() (pnet.PSK, error) {
path := conf.GetConfig().PSKPath
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
psk, err := pnet.DecodeV1PSK(bytes.NewReader(data))
if err != nil {
return nil, err
}
return psk, nil
}
func PubKeyFromString(s string) (crypto.PubKey, error) {
data, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return nil, err
}
return crypto.UnmarshalPublicKey(data)
}

View File

@@ -0,0 +1,219 @@
package common
import (
"context"
"math/rand"
"strings"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"github.com/ipfs/go-cid"
dht "github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p/core/host"
pp "github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
mh "github.com/multiformats/go-multihash"
)
// FilterLoopbackAddrs strips loopback (127.x, ::1) and unspecified addresses
// from an AddrInfo so we never hand peers an address they cannot dial externally.
func FilterLoopbackAddrs(ai pp.AddrInfo) pp.AddrInfo {
filtered := make([]ma.Multiaddr, 0, len(ai.Addrs))
for _, addr := range ai.Addrs {
ip, err := ExtractIP(addr.String())
if err != nil || ip.IsLoopback() || ip.IsUnspecified() {
continue
}
filtered = append(filtered, addr)
}
return pp.AddrInfo{ID: ai.ID, Addrs: filtered}
}
// RecommendedHeartbeatInterval is the target period between heartbeat ticks.
// Indexers use this as the DHT Provide refresh interval.
const RecommendedHeartbeatInterval = 60 * time.Second
// discoveryDHT is the DHT instance used for indexer discovery.
// Set by SetDiscoveryDHT once the indexer service initialises its DHT.
var discoveryDHT *dht.IpfsDHT
// SetDiscoveryDHT stores the DHT instance used by replenishIndexersFromDHT.
// Called by NewIndexerService once the DHT is ready.
func SetDiscoveryDHT(d *dht.IpfsDHT) {
discoveryDHT = d
}
// initNodeDHT creates a lightweight DHT client for pure nodes (no IndexerService).
// Uses the seed indexers as bootstrap peers. Called lazily by ConnectToIndexers
// when discoveryDHT is still nil after the initial warm-up delay.
func initNodeDHT(h host.Host, seeds []Entry) {
logger := oclib.GetLogger()
bootstrapPeers := []pp.AddrInfo{}
for _, s := range seeds {
bootstrapPeers = append(bootstrapPeers, *s.Info)
}
d, err := dht.New(context.Background(), h,
dht.Mode(dht.ModeClient),
dht.ProtocolPrefix("oc"),
dht.BootstrapPeers(bootstrapPeers...),
)
if err != nil {
logger.Warn().Err(err).Msg("[dht] node DHT client init failed")
return
}
SetDiscoveryDHT(d)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
if err := d.Bootstrap(ctx); err != nil {
logger.Warn().Err(err).Msg("[dht] node DHT client bootstrap failed")
}
logger.Info().Msg("[dht] node DHT client ready")
}
// IndexerCID returns the well-known CID under which all indexers advertise.
func IndexerCID() cid.Cid {
h, _ := mh.Sum([]byte("/opencloud/indexers"), mh.SHA2_256, -1)
return cid.NewCidV1(cid.Raw, h)
}
// DiscoverIndexersFromDHT uses the DHT to find up to count indexers advertising
// under the well-known key. Excludes self. Resolves addresses when the provider
// record carries none.
func DiscoverIndexersFromDHT(h host.Host, d *dht.IpfsDHT, count int) []pp.AddrInfo {
logger := oclib.GetLogger()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
c := IndexerCID()
ch := d.FindProvidersAsync(ctx, c, count*2)
seen := map[pp.ID]struct{}{}
var results []pp.AddrInfo
for ai := range ch {
if ai.ID == h.ID() {
continue
}
if _, dup := seen[ai.ID]; dup {
continue
}
seen[ai.ID] = struct{}{}
if len(ai.Addrs) == 0 {
resolved, err := d.FindPeer(ctx, ai.ID)
if err != nil {
logger.Warn().Str("peer", ai.ID.String()).Msg("[dht] no addrs and FindPeer failed, skipping")
continue
}
ai = resolved
}
ai = FilterLoopbackAddrs(ai)
if len(ai.Addrs) == 0 {
continue
}
results = append(results, ai)
if len(results) >= count {
break
}
}
logger.Info().Int("found", len(results)).Msg("[dht] indexer discovery complete")
return results
}
// SelectByFillRate picks up to want providers using fill-rate weighted random
// selection w(F) = F*(1-F) — peaks at F=0.5, prefers less-loaded indexers.
// Providers with unknown fill rate receive F=0.5 (neutral prior).
// Enforces subnet /24 diversity: at most one indexer per /24.
func SelectByFillRate(providers []pp.AddrInfo, fillRates map[pp.ID]float64, want int) []pp.AddrInfo {
if len(providers) == 0 || want <= 0 {
return nil
}
type weighted struct {
ai pp.AddrInfo
weight float64
}
ws := make([]weighted, 0, len(providers))
for _, ai := range providers {
f, ok := fillRates[ai.ID]
if !ok {
f = 0.5
}
ws = append(ws, weighted{ai: ai, weight: f * (1 - f)})
}
// Shuffle first for fairness among equal-weight peers.
rand.Shuffle(len(ws), func(i, j int) { ws[i], ws[j] = ws[j], ws[i] })
// Sort descending by weight (simple insertion sort — small N).
for i := 1; i < len(ws); i++ {
for j := i; j > 0 && ws[j].weight > ws[j-1].weight; j-- {
ws[j], ws[j-1] = ws[j-1], ws[j]
}
}
subnets := map[string]struct{}{}
var selected []pp.AddrInfo
for _, w := range ws {
if len(selected) >= want {
break
}
subnet := subnetOf(w.ai)
if subnet != "" {
if _, dup := subnets[subnet]; dup {
continue
}
subnets[subnet] = struct{}{}
}
selected = append(selected, w.ai)
}
return selected
}
// subnetOf returns the /24 subnet string for the first non-loopback address of ai.
func subnetOf(ai pp.AddrInfo) string {
for _, ma := range ai.Addrs {
ip, err := ExtractIP(ma.String())
if err != nil || ip.IsLoopback() {
continue
}
parts := strings.Split(ip.String(), ".")
if len(parts) >= 3 {
return parts[0] + "." + parts[1] + "." + parts[2]
}
}
return ""
}
// replenishIndexersFromDHT is called when an indexer heartbeat fails and more
// indexers are needed. Queries the DHT and adds fresh entries to StaticIndexers.
func replenishIndexersFromDHT(h host.Host, need int) {
if need <= 0 || discoveryDHT == nil {
return
}
logger := oclib.GetLogger()
logger.Info().Int("need", need).Msg("[dht] replenishing indexer pool from DHT")
providers := DiscoverIndexersFromDHT(h, discoveryDHT, need*3)
selected := SelectByFillRate(providers, nil, need)
if len(selected) == 0 {
logger.Warn().Msg("[dht] no indexers found in DHT for replenishment")
return
}
added := 0
for _, ai := range selected {
addr := addrKey(ai)
if !Indexers.ExistsAddr(addr) {
adCopy := ai
Indexers.SetAddr(addr, &adCopy)
added++
}
}
if added > 0 {
logger.Info().Int("added", added).Msg("[dht] indexers added from DHT")
Indexers.NudgeIt()
}
}
// addrKey returns the canonical map key for an AddrInfo.
// The PeerID is used as key so the same peer is never stored twice regardless
// of which of its addresses was seen first.
func addrKey(ai pp.AddrInfo) string {
return ai.ID.String()
}

View File

@@ -0,0 +1,17 @@
package common
import (
"context"
"cloud.o-forge.io/core/oc-lib/models/peer"
pubsub "github.com/libp2p/go-libp2p-pubsub"
)
type HeartBeatStreamed interface {
GetUptimeTracker() *UptimeTracker
}
type DiscoveryPeer interface {
GetPeerRecord(ctx context.Context, key string) ([]*peer.Peer, error)
GetPubSub(topicName string) *pubsub.Topic
}

View File

@@ -0,0 +1,94 @@
package common
import (
"context"
"oc-discovery/conf"
"strings"
"sync"
"time"
"github.com/google/uuid"
)
// SearchIdleTimeout returns the configured search idle timeout (default 5s).
func SearchIdleTimeout() time.Duration {
if t := conf.GetConfig().SearchTimeout; t > 0 {
return time.Duration(t) * time.Second
}
return 5 * time.Second
}
// searchEntry holds the lifecycle state for one active search.
type searchEntry struct {
cancel context.CancelFunc
timer *time.Timer
idleTimeout time.Duration
}
// SearchTracker tracks one active search per user (peer or resource).
// Each search is keyed by a composite "user:searchID" so that a replaced
// search's late-arriving results can be told apart from the current one.
//
// Typical usage:
//
// ctx, cancel := context.WithCancel(parent)
// key := tracker.Register(userKey, cancel, idleTimeout)
// defer tracker.Cancel(key)
// // ... on each result: tracker.ResetIdle(key) + tracker.IsActive(key)
type SearchTracker struct {
mu sync.Mutex
entries map[string]*searchEntry
}
func NewSearchTracker() *SearchTracker {
return &SearchTracker{entries: map[string]*searchEntry{}}
}
// Register starts a new search for baseUser, cancelling any previous one.
// Returns the composite key "baseUser:searchID" to be used as the search identifier.
func (t *SearchTracker) Register(baseUser string, cancel context.CancelFunc, idleTimeout time.Duration) string {
compositeKey := baseUser + ":" + uuid.New().String()
t.mu.Lock()
t.cancelByPrefix(baseUser)
e := &searchEntry{cancel: cancel, idleTimeout: idleTimeout}
e.timer = time.AfterFunc(idleTimeout, func() { t.Cancel(compositeKey) })
t.entries[compositeKey] = e
t.mu.Unlock()
return compositeKey
}
// Cancel cancels the search(es) matching user (bare user key or composite key).
func (t *SearchTracker) Cancel(user string) {
t.mu.Lock()
t.cancelByPrefix(user)
t.mu.Unlock()
}
// ResetIdle resets the idle timer for compositeKey after a response arrives.
func (t *SearchTracker) ResetIdle(compositeKey string) {
t.mu.Lock()
if e, ok := t.entries[compositeKey]; ok {
e.timer.Reset(e.idleTimeout)
}
t.mu.Unlock()
}
// IsActive returns true if compositeKey is still the current active search.
func (t *SearchTracker) IsActive(compositeKey string) bool {
t.mu.Lock()
_, ok := t.entries[compositeKey]
t.mu.Unlock()
return ok
}
// cancelByPrefix cancels all entries whose key equals user or starts with "user:".
// Must be called with t.mu held.
func (t *SearchTracker) cancelByPrefix(user string) {
for k, e := range t.entries {
if k == user || strings.HasPrefix(k, user+":") {
e.timer.Stop()
e.cancel()
delete(t.entries, k)
}
}
}

View File

@@ -0,0 +1,68 @@
package common
import (
"context"
"fmt"
"math/rand"
"net"
"time"
"github.com/libp2p/go-libp2p/core/host"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
)
func PeerIsAlive(h host.Host, ad pp.AddrInfo) bool {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
err := h.Connect(ctx, ad)
return err == nil
}
func ExtractIP(addr string) (net.IP, error) {
ma, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return nil, err
}
ipStr, err := ma.ValueForProtocol(multiaddr.P_IP4)
if err != nil {
ipStr, err = ma.ValueForProtocol(multiaddr.P_IP6)
if err != nil {
return nil, err
}
}
ip := net.ParseIP(ipStr)
if ip == nil {
return nil, fmt.Errorf("invalid IP: %s", ipStr)
}
return ip, nil
}
func GetIndexer(addrOrId string) *pp.AddrInfo {
return Indexers.GetAddr(addrOrId)
}
func GetIndexersIDs() []pp.ID {
return Indexers.GetAddrIDs()
}
func GetIndexersStr() []string {
return Indexers.GetAddrsStr()
}
func GetIndexers() []*pp.AddrInfo {
entries := Indexers.GetAddrs()
result := make([]*pp.AddrInfo, 0, len(entries))
for _, e := range entries {
result = append(result, e.Info)
}
return result
}
func Shuffle[T any](slice []T) []T {
rand.Shuffle(len(slice), func(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
})
return slice
}

View File

@@ -0,0 +1,31 @@
package node
import (
"github.com/libp2p/go-libp2p/core/control"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
)
// OCConnectionGater allows all connections unconditionally.
// Peer validation (local DB + DHT by peer_id) is enforced at the stream level
// in each handler, so ProtocolHeartbeat and ProtocolPublish — through which a
// node first registers itself — are never blocked.
type OCConnectionGater struct {
host host.Host
}
func newOCConnectionGater(h host.Host) *OCConnectionGater {
return &OCConnectionGater{host: h}
}
func (g *OCConnectionGater) InterceptPeerDial(_ pp.ID) bool { return true }
func (g *OCConnectionGater) InterceptAddrDial(_ pp.ID, _ ma.Multiaddr) bool { return true }
func (g *OCConnectionGater) InterceptAccept(_ network.ConnMultiaddrs) bool { return true }
func (g *OCConnectionGater) InterceptSecured(_ network.Direction, _ pp.ID, _ network.ConnMultiaddrs) bool {
return true
}
func (g *OCConnectionGater) InterceptUpgraded(_ network.Conn) (bool, control.DisconnectReason) {
return true, 0
}

View File

@@ -0,0 +1,207 @@
package indexer
import (
"errors"
"sync"
"time"
"oc-discovery/conf"
"oc-discovery/daemons/node/common"
pp "github.com/libp2p/go-libp2p/core/peer"
)
// ── defaults ──────────────────────────────────────────────────────────────────
const (
defaultMaxHBPerMinute = 5
defaultMaxPublishPerMin = 10
defaultMaxGetPerMin = 50
strikeThreshold = 3
banDuration = 10 * time.Minute
behaviorWindowDur = 60 * time.Second
)
// ── per-node state ────────────────────────────────────────────────────────────
type nodeBehavior struct {
mu sync.Mutex
knownDID string
hbTimes []time.Time
pubTimes []time.Time
getTimes []time.Time
strikes int
bannedUntil time.Time
}
func (nb *nodeBehavior) isBanned() bool {
return time.Now().UTC().Before(nb.bannedUntil)
}
func (nb *nodeBehavior) strike(n int) {
nb.strikes += n
if nb.strikes >= strikeThreshold {
nb.bannedUntil = time.Now().Add(banDuration)
}
}
func pruneWindow(ts []time.Time, dur time.Duration) []time.Time {
cutoff := time.Now().Add(-dur)
i := 0
for i < len(ts) && ts[i].Before(cutoff) {
i++
}
return ts[i:]
}
// recordInWindow appends now to the window slice and returns false (+ adds a
// strike) when the count exceeds max.
func (nb *nodeBehavior) recordInWindow(ts *[]time.Time, max int) bool {
*ts = pruneWindow(*ts, behaviorWindowDur)
if len(*ts) >= max {
nb.strike(1)
return false
}
*ts = append(*ts, time.Now())
return true
}
// ── NodeBehaviorTracker ───────────────────────────────────────────────────────
// NodeBehaviorTracker is the indexer-side per-node compliance monitor.
// It is entirely local: no state is shared with other indexers.
type NodeBehaviorTracker struct {
mu sync.RWMutex
nodes map[pp.ID]*nodeBehavior
maxHB int
maxPub int
maxGet int
}
func newNodeBehaviorTracker() *NodeBehaviorTracker {
cfg := conf.GetConfig()
return &NodeBehaviorTracker{
nodes: make(map[pp.ID]*nodeBehavior),
maxHB: common.CfgOr(cfg.MaxHBPerMinute, defaultMaxHBPerMinute),
maxPub: common.CfgOr(cfg.MaxPublishPerMinute, defaultMaxPublishPerMin),
maxGet: common.CfgOr(cfg.MaxGetPerMinute, defaultMaxGetPerMin),
}
}
func (t *NodeBehaviorTracker) get(pid pp.ID) *nodeBehavior {
t.mu.RLock()
nb := t.nodes[pid]
t.mu.RUnlock()
if nb != nil {
return nb
}
t.mu.Lock()
defer t.mu.Unlock()
if nb = t.nodes[pid]; nb == nil {
nb = &nodeBehavior{}
t.nodes[pid] = nb
}
return nb
}
// IsBanned returns true when the peer is in an active ban period.
func (t *NodeBehaviorTracker) IsBanned(pid pp.ID) bool {
nb := t.get(pid)
nb.mu.Lock()
defer nb.mu.Unlock()
return nb.isBanned()
}
// RecordHeartbeat checks heartbeat cadence. Returns an error if the peer is
// flooding (too many heartbeats in the sliding window).
func (t *NodeBehaviorTracker) RecordHeartbeat(pid pp.ID) error {
nb := t.get(pid)
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.isBanned() {
return errors.New("peer is banned")
}
if !nb.recordInWindow(&nb.hbTimes, t.maxHB) {
return errors.New("heartbeat flood detected")
}
return nil
}
// CheckIdentity verifies that the DID associated with a PeerID never changes.
// A DID change is a strong signal of identity spoofing.
func (t *NodeBehaviorTracker) CheckIdentity(pid pp.ID, did string) error {
if did == "" {
return nil
}
nb := t.get(pid)
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.knownDID == "" {
nb.knownDID = did
return nil
}
if nb.knownDID != did {
nb.strike(2) // identity change is severe
return errors.New("DID mismatch for peer " + pid.String())
}
return nil
}
// RecordBadSignature registers a cryptographic verification failure.
// A single bad signature is worth 2 strikes (near-immediate ban).
func (t *NodeBehaviorTracker) RecordBadSignature(pid pp.ID) {
nb := t.get(pid)
nb.mu.Lock()
defer nb.mu.Unlock()
nb.strike(2)
}
// RecordPublish checks publish volume. Returns an error if the peer is
// sending too many publish requests.
func (t *NodeBehaviorTracker) RecordPublish(pid pp.ID) error {
nb := t.get(pid)
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.isBanned() {
return errors.New("peer is banned")
}
if !nb.recordInWindow(&nb.pubTimes, t.maxPub) {
return errors.New("publish volume exceeded")
}
return nil
}
// RecordGet checks get volume. Returns an error if the peer is enumerating
// the DHT at an abnormal rate.
func (t *NodeBehaviorTracker) RecordGet(pid pp.ID) error {
nb := t.get(pid)
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.isBanned() {
return errors.New("peer is banned")
}
if !nb.recordInWindow(&nb.getTimes, t.maxGet) {
return errors.New("get volume exceeded")
}
return nil
}
// Cleanup removes the behavior entry for a peer if it is not currently banned.
// Called when the peer is evicted from StreamRecords by the GC.
func (t *NodeBehaviorTracker) Cleanup(pid pp.ID) {
t.mu.RLock()
nb := t.nodes[pid]
t.mu.RUnlock()
if nb == nil {
return
}
nb.mu.Lock()
banned := nb.isBanned()
nb.mu.Unlock()
if !banned {
t.mu.Lock()
delete(t.nodes, pid)
t.mu.Unlock()
}
}

View File

@@ -0,0 +1,586 @@
package indexer
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"io"
"math/rand"
"oc-discovery/daemons/node/common"
"strings"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/dbs"
pp "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/utils"
"cloud.o-forge.io/core/oc-lib/tools"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/network"
lpp "github.com/libp2p/go-libp2p/core/peer"
)
// DefaultTTLSeconds is the default TTL for peer records when the publisher
// does not declare a custom TTL. Exported so the node package can reference it.
const DefaultTTLSeconds = 120
// maxTTLSeconds caps how far in the future a publisher can set their ExpiryDate.
const maxTTLSeconds = 86400 // 24h
// tombstoneTTL is how long a signed delete record stays alive in the DHT —
// long enough to propagate everywhere, short enough not to linger forever.
const tombstoneTTL = 10 * time.Minute
type PeerRecordPayload struct {
Name string `json:"name"`
DID string `json:"did"`
PubKey []byte `json:"pub_key"`
ExpiryDate time.Time `json:"expiry_date"`
// TTLSeconds is the publisher's declared lifetime for this record in seconds.
// 0 means "use the default (120 s)". Included in the signed payload so it
// cannot be altered by an intermediary.
TTLSeconds int `json:"ttl_seconds,omitempty"`
}
type PeerRecord struct {
PeerRecordPayload
PeerID string `json:"peer_id"`
APIUrl string `json:"api_url"`
StreamAddress string `json:"stream_address"`
NATSAddress string `json:"nats_address"`
WalletAddress string `json:"wallet_address"`
Location *pp.PeerLocation `json:"location,omitempty"`
Signature []byte `json:"signature"`
}
func (p *PeerRecord) Sign() error {
priv, err := tools.LoadKeyFromFilePrivate()
if err != nil {
return err
}
payload, _ := json.Marshal(p.PeerRecordPayload)
b, err := common.Sign(priv, payload)
p.Signature = b
return err
}
func (p *PeerRecord) Verify() (crypto.PubKey, error) {
pubKey, err := crypto.UnmarshalPublicKey(p.PubKey) // retrieve pub key in message
if err != nil {
return pubKey, err
}
payload, _ := json.Marshal(p.PeerRecordPayload)
if ok, _ := pubKey.Verify(payload, p.Signature); !ok { // verify minimal message was sign per pubKey
return pubKey, errors.New("invalid signature")
}
return pubKey, nil
}
func (pr *PeerRecord) ExtractPeer(ourkey string, key string, pubKey crypto.PubKey) (bool, *pp.Peer, error) {
pubBytes, err := crypto.MarshalPublicKey(pubKey)
if err != nil {
return false, nil, err
}
rel := pp.NONE
if ourkey == key { // at this point is PeerID is same as our... we are... thats our peer INFO
rel = pp.SELF
}
p := &pp.Peer{
AbstractObject: utils.AbstractObject{
UUID: pr.DID,
Name: pr.Name,
},
Relation: rel, // VERIFY.... it crush nothing
PeerID: pr.PeerID,
PublicKey: base64.StdEncoding.EncodeToString(pubBytes),
APIUrl: pr.APIUrl,
StreamAddress: pr.StreamAddress,
NATSAddress: pr.NATSAddress,
WalletAddress: pr.WalletAddress,
Location: pr.Location,
}
if time.Now().UTC().After(pr.ExpiryDate) {
return pp.SELF == p.Relation, nil, errors.New("peer " + key + " is offline")
}
return pp.SELF == p.Relation, p, nil
}
// TombstonePayload is the signed body of a delete request.
// Only the owner's private key can produce a valid signature over this payload.
type TombstonePayload struct {
DID string `json:"did"`
PeerID string `json:"peer_id"`
DeletedAt time.Time `json:"deleted_at"`
}
// TombstoneRecord is stored in the DHT at /node/{DID} to signal that a peer
// has voluntarily left the network. The Tombstone bool field acts as a
// discriminator so validators can distinguish it from a live PeerRecord.
type TombstoneRecord struct {
TombstonePayload
PubKey []byte `json:"pub_key"`
Tombstone bool `json:"tombstone"`
Signature []byte `json:"signature"`
}
func (ts *TombstoneRecord) Verify() (crypto.PubKey, error) {
pubKey, err := crypto.UnmarshalPublicKey(ts.PubKey)
if err != nil {
return nil, err
}
payload, _ := json.Marshal(ts.TombstonePayload)
if ok, _ := pubKey.Verify(payload, ts.Signature); !ok {
return nil, errors.New("invalid tombstone signature")
}
return pubKey, nil
}
// isTombstone returns true if data is a valid, well-formed TombstoneRecord.
func isTombstone(data []byte) bool {
var ts TombstoneRecord
return json.Unmarshal(data, &ts) == nil && ts.Tombstone
}
type GetValue struct {
Key string `json:"key"`
PeerID string `json:"peer_id,omitempty"`
}
type GetResponse struct {
Found bool `json:"found"`
Records map[string]PeerRecord `json:"records,omitempty"`
}
func (ix *IndexerService) genKey(did string) string {
return "/node/" + did
}
func (ix *IndexerService) genPIDKey(peerID string) string {
return "/pid/" + peerID
}
// isPeerKnown is the stream-level gate: returns true if pid is allowed.
// Check order (fast → slow):
// 1. In-memory stream records — currently heartbeating to this indexer.
// 2. Local DB by peer_id — known peer, blacklist enforced here.
// 3. DHT /pid/{peerID} → /node/{DID} — registered on any indexer.
//
// ProtocolHeartbeat and ProtocolPublish handlers do NOT call this — they are
// the streams through which a node first makes itself known.
func (ix *IndexerService) isPeerKnown(pid lpp.ID) bool {
// 1. Fast path: active heartbeat session.
ix.StreamMU.RLock()
_, active := ix.StreamRecords[common.ProtocolHeartbeat][pid]
ix.StreamMU.RUnlock()
if active {
return true
}
// 2. Local DB: known peer (handles blacklist).
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
results := access.Search(&dbs.Filters{
And: map[string][]dbs.Filter{
"peer_id": {{Operator: dbs.EQUAL.String(), Value: pid.String()}},
},
}, pid.String(), false)
for _, item := range results.Data {
p, ok := item.(*pp.Peer)
if !ok || p.PeerID != pid.String() {
continue
}
return p.Relation != pp.BLACKLIST
}
// 3. DHT lookup by peer_id.
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
did, err := ix.DHT.GetValue(ctx, ix.genPIDKey(pid.String()))
cancel()
if err != nil || len(did) == 0 {
return false
}
ctx2, cancel2 := context.WithTimeout(context.Background(), 3*time.Second)
val, err := ix.DHT.GetValue(ctx2, ix.genKey(string(did)))
cancel2()
return err == nil && !isTombstone(val)
}
func (ix *IndexerService) initNodeHandler() {
logger := oclib.GetLogger()
logger.Info().Msg("Init Node Handler")
// Each heartbeat from a node carries a freshly signed PeerRecord.
// Republish it to the DHT so the record never expires as long as the node
// is alive — no separate publish stream needed from the node side.
ix.AfterHeartbeat = func(hb *common.Heartbeat) {
// Priority 1: use the fresh signed PeerRecord embedded in the heartbeat.
// Each heartbeat tick, the node re-signs with ExpiryDate = now+2min, so
// this record is always fresh. Fetching from DHT would give a stale expiry.
var rec PeerRecord
if len(hb.Record) > 0 {
if err := json.Unmarshal(hb.Record, &rec); err != nil {
logger.Warn().Err(err).Msg("indexer: heartbeat embedded record unmarshal failed")
return
}
} else {
// Fallback: node didn't embed a record yet (first heartbeat before claimInfo).
// Fetch from DHT using the DID resolved by HandleHeartbeat.
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
res, err := ix.DHT.GetValue(ctx2, ix.genKey(hb.DID))
cancel2()
if err != nil {
logger.Warn().Err(err).Str("did", hb.DID).Msg("indexer: DHT fetch for refresh failed")
return
}
if err := json.Unmarshal(res, &rec); err != nil {
logger.Warn().Err(err).Str("did", hb.DID).Msg("indexer: heartbeat record unmarshal failed")
return
}
}
if _, err := rec.Verify(); err != nil {
logger.Warn().Err(err).Str("did", rec.DID).Msg("indexer: heartbeat record signature invalid")
return
}
// Don't republish if a tombstone was recently stored for this DID:
// the peer explicitly left and we must not re-animate their record.
ix.deletedDIDsMu.Lock()
if t, ok := ix.deletedDIDs[rec.DID]; ok {
if time.Since(t) < tombstoneTTL {
ix.deletedDIDsMu.Unlock()
return
}
// tombstoneTTL elapsed — peer is allowed to re-register.
delete(ix.deletedDIDs, rec.DID)
}
ix.deletedDIDsMu.Unlock()
// Keep StreamRecord.Record in sync so BuildHeartbeatResponse always
// sees a populated PeerRecord (Name, DID, etc.) regardless of whether
// handleNodePublish ran before or after the heartbeat stream was opened.
if pid, err := lpp.Decode(rec.PeerID); err == nil {
ix.StreamMU.Lock()
if srec, ok := ix.StreamRecords[common.ProtocolHeartbeat][pid]; ok {
srec.Record = rec
}
ix.StreamMU.Unlock()
}
data, err := json.Marshal(rec)
if err != nil {
return
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
if err := ix.DHT.PutValue(ctx, ix.genKey(rec.DID), data); err != nil {
logger.Warn().Err(err).Str("did", rec.DID).Msg("indexer: DHT refresh /node/ failed")
}
cancel()
// /pid/ is written unconditionally — the gater queries by PeerID and this
// index must stay fresh regardless of whether the /node/ write succeeded.
if rec.PeerID != "" {
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
if err := ix.DHT.PutValue(ctx2, ix.genPIDKey(rec.PeerID), []byte(rec.DID)); err != nil {
logger.Warn().Err(err).Str("pid", rec.PeerID).Msg("indexer: DHT refresh /pid/ failed")
}
cancel2()
}
}
ix.Host.SetStreamHandler(common.ProtocolHeartbeat, ix.HandleHeartbeat)
ix.Host.SetStreamHandler(common.ProtocolPublish, ix.handleNodePublish)
ix.Host.SetStreamHandler(common.ProtocolGet, ix.handleNodeGet)
ix.Host.SetStreamHandler(common.ProtocolDelete, ix.handleNodeDelete)
ix.Host.SetStreamHandler(common.ProtocolIndirectProbe, ix.handleIndirectProbe)
ix.Host.SetStreamHandler(common.ProtocolIndexerCandidates, ix.handleCandidateRequest)
ix.initSearchHandlers()
}
// handleCandidateRequest responds to a node's consensus candidate request.
// Returns a random sample of indexers from the local DHT cache.
func (ix *IndexerService) handleCandidateRequest(s network.Stream) {
defer s.Close()
if !ix.isPeerKnown(s.Conn().RemotePeer()) {
logger := oclib.GetLogger()
logger.Warn().Str("peer", s.Conn().RemotePeer().String()).Msg("[candidates] unknown peer, rejecting stream")
s.Reset()
return
}
s.SetDeadline(time.Now().Add(5 * time.Second))
var req common.IndexerCandidatesRequest
if err := json.NewDecoder(s).Decode(&req); err != nil {
return
}
if req.Count <= 0 || req.Count > 10 {
req.Count = 3
}
ix.dhtCacheMu.RLock()
cache := make([]dhtCacheEntry, len(ix.dhtCache))
copy(cache, ix.dhtCache)
ix.dhtCacheMu.RUnlock()
// Shuffle for randomness: each voter offers a different subset.
rand.Shuffle(len(cache), func(i, j int) { cache[i], cache[j] = cache[j], cache[i] })
candidates := make([]lpp.AddrInfo, 0, req.Count)
for _, e := range cache {
if len(candidates) >= req.Count {
break
}
candidates = append(candidates, e.AI)
}
json.NewEncoder(s).Encode(common.IndexerCandidatesResponse{Candidates: candidates})
}
func (ix *IndexerService) handleNodePublish(s network.Stream) {
defer s.Close()
logger := oclib.GetLogger()
remotePeer := s.Conn().RemotePeer()
if err := ix.behavior.RecordPublish(remotePeer); err != nil {
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("publish refused")
s.Reset()
return
}
for {
var rec PeerRecord
if err := json.NewDecoder(s).Decode(&rec); err != nil {
logger.Err(err)
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
strings.Contains(err.Error(), "reset") ||
strings.Contains(err.Error(), "closed") ||
strings.Contains(err.Error(), "too many connections") {
return
}
continue
}
if _, err := rec.Verify(); err != nil {
ix.behavior.RecordBadSignature(remotePeer)
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("bad signature on publish")
return
}
if err := ix.behavior.CheckIdentity(remotePeer, rec.DID); err != nil {
logger.Warn().Err(err).Msg("identity mismatch on publish")
s.Reset()
return
}
if rec.PeerID == "" || rec.ExpiryDate.Before(time.Now().UTC()) {
logger.Err(errors.New(rec.PeerID + " is expired."))
return
}
pid, err := lpp.Decode(rec.PeerID)
if err != nil {
return
}
ix.StreamMU.Lock()
defer ix.StreamMU.Unlock()
if ix.StreamRecords[common.ProtocolHeartbeat] == nil {
ix.StreamRecords[common.ProtocolHeartbeat] = map[lpp.ID]*common.StreamRecord[PeerRecord]{}
}
streams := ix.StreamRecords[common.ProtocolHeartbeat]
if srec, ok := streams[pid]; ok {
srec.DID = rec.DID
srec.Record = rec
srec.HeartbeatStream.UptimeTracker.LastSeen = time.Now().UTC()
}
key := ix.genKey(rec.DID)
data, err := json.Marshal(rec)
if err != nil {
logger.Err(err)
return
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
if err := ix.DHT.PutValue(ctx, key, data); err != nil {
logger.Err(err)
cancel()
return
}
cancel()
// Secondary index: /pid/<peerID> → DID, so peers can resolve by libp2p PeerID.
if rec.PeerID != "" {
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
if err := ix.DHT.PutValue(ctx2, ix.genPIDKey(rec.PeerID), []byte(rec.DID)); err != nil {
logger.Err(err).Str("pid", rec.PeerID).Msg("indexer: failed to write pid index")
}
cancel2()
}
return
}
}
func (ix *IndexerService) handleNodeGet(s network.Stream) {
defer s.Close()
logger := oclib.GetLogger()
remotePeer := s.Conn().RemotePeer()
if !ix.isPeerKnown(remotePeer) {
logger.Warn().Str("peer", remotePeer.String()).Msg("[get] unknown peer, rejecting stream")
s.Reset()
return
}
if err := ix.behavior.RecordGet(remotePeer); err != nil {
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("get refused")
s.Reset()
return
}
for {
var req GetValue
if err := json.NewDecoder(s).Decode(&req); err != nil {
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
strings.Contains(err.Error(), "reset") ||
strings.Contains(err.Error(), "closed") ||
strings.Contains(err.Error(), "too many connections") {
return
}
logger.Err(err)
continue
}
resp := GetResponse{Found: false, Records: map[string]PeerRecord{}}
// Resolve DID key: by PeerID (secondary /pid/ index) or direct DID key.
var key string
if req.PeerID != "" {
pidCtx, pidCancel := context.WithTimeout(context.Background(), 5*time.Second)
did, err := ix.DHT.GetValue(pidCtx, ix.genPIDKey(req.PeerID))
pidCancel()
if err == nil {
key = string(did)
}
} else {
key = req.Key
}
if key != "" {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
c, err := ix.DHT.GetValue(ctx, ix.genKey(key))
cancel()
if err == nil && !isTombstone(c) {
var rec PeerRecord
if json.Unmarshal(c, &rec) == nil {
resp.Records[rec.PeerID] = rec
}
} else if err != nil {
logger.Err(err).Msg("Failed to fetch PeerRecord from DHT " + key)
}
}
resp.Found = len(resp.Records) > 0
_ = json.NewEncoder(s).Encode(resp)
break
}
}
// handleNodeDelete processes a signed delete (tombstone) request from a peer.
// It verifies that the request is:
// - marked as a tombstone
// - recent (within 5 minutes, preventing replay attacks)
// - sent by the actual peer whose record is being deleted (PeerID == remotePeer)
// - signed by the matching private key
//
// On success it stores the tombstone in the DHT, evicts the peer from the local
// stream records, and marks the DID in deletedDIDs so AfterHeartbeat cannot
// accidentally republish the record during the tombstoneTTL window.
func (ix *IndexerService) handleNodeDelete(s network.Stream) {
defer s.Close()
logger := oclib.GetLogger()
remotePeer := s.Conn().RemotePeer()
s.SetDeadline(time.Now().Add(10 * time.Second))
var ts TombstoneRecord
if err := json.NewDecoder(s).Decode(&ts); err != nil || !ts.Tombstone {
s.Reset()
return
}
if ts.PeerID == "" || ts.DID == "" {
s.Reset()
return
}
if time.Since(ts.DeletedAt) > 5*time.Minute {
logger.Warn().Str("peer", remotePeer.String()).Msg("[delete] stale tombstone rejected")
s.Reset()
return
}
if ts.PeerID != remotePeer.String() {
logger.Warn().Str("peer", remotePeer.String()).Msg("[delete] tombstone PeerID mismatch")
s.Reset()
return
}
if _, err := ts.Verify(); err != nil {
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("[delete] invalid tombstone signature")
s.Reset()
return
}
// Mark DID as deleted in-memory before writing to DHT so AfterHeartbeat
// cannot win a race and republish the live record on top of the tombstone.
ix.deletedDIDsMu.Lock()
ix.deletedDIDs[ts.DID] = ts.DeletedAt
ix.deletedDIDsMu.Unlock()
data, _ := json.Marshal(ts)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
if err := ix.DHT.PutValue(ctx, ix.genKey(ts.DID), data); err != nil {
logger.Warn().Err(err).Str("did", ts.DID).Msg("[delete] DHT write tombstone failed")
}
cancel()
// Invalidate the /pid/ secondary index so isPeerKnown returns false quickly.
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
if err := ix.DHT.PutValue(ctx2, ix.genPIDKey(ts.PeerID), []byte("")); err != nil {
logger.Warn().Err(err).Str("pid", ts.PeerID).Msg("[delete] DHT clear pid failed")
}
cancel2()
// Evict from active stream records.
if pid, err := lpp.Decode(ts.PeerID); err == nil {
ix.StreamMU.Lock()
delete(ix.StreamRecords[common.ProtocolHeartbeat], pid)
ix.StreamMU.Unlock()
}
logger.Info().Str("did", ts.DID).Str("peer", ts.PeerID).Msg("[delete] tombstone stored, peer evicted")
}
// handleIndirectProbe is the SWIM inter-indexer probe handler.
// A node opens this stream toward a live indexer to ask: "can you reach peer X?"
// The indexer attempts a ProtocolBandwidthProbe to X and reports back.
// This is the only protocol that indexers use to communicate with each other;
// no persistent inter-indexer connections are maintained.
func (ix *IndexerService) handleIndirectProbe(s network.Stream) {
defer s.Close()
s.SetDeadline(time.Now().Add(10 * time.Second))
var req common.IndirectProbeRequest
if err := json.NewDecoder(s).Decode(&req); err != nil {
s.Reset()
return
}
respond := func(reachable bool, latencyMs int64) {
json.NewEncoder(s).Encode(common.IndirectProbeResponse{
Reachable: reachable,
LatencyMs: latencyMs,
})
}
// Connect to target if not already connected.
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Second)
defer cancel()
if ix.Host.Network().Connectedness(req.Target.ID) != network.Connected {
if err := ix.Host.Connect(ctx, req.Target); err != nil {
respond(false, 0)
return
}
}
// Open a bandwidth probe stream — already registered on all nodes/indexers.
start := time.Now()
ps, err := ix.Host.NewStream(ctx, req.Target.ID, common.ProtocolBandwidthProbe)
if err != nil {
respond(false, 0)
return
}
defer ps.Reset()
ps.SetDeadline(time.Now().Add(3 * time.Second))
ps.Write([]byte("ping"))
buf := make([]byte, 4)
_, err = ps.Read(buf)
latency := time.Since(start).Milliseconds()
respond(err == nil, latency)
}

View File

@@ -0,0 +1,233 @@
package indexer
import (
"context"
"encoding/json"
"strings"
"time"
"oc-discovery/conf"
"oc-discovery/daemons/node/common"
oclib "cloud.o-forge.io/core/oc-lib"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/network"
)
const TopicSearchPeer = "oc-search-peer"
// searchTimeout returns the configured search timeout, defaulting to 5s.
func searchTimeout() time.Duration {
if t := conf.GetConfig().SearchTimeout; t > 0 {
return time.Duration(t) * time.Second
}
return 5 * time.Second
}
// initSearchHandlers registers ProtocolSearchPeer and ProtocolSearchPeerResponse
// and subscribes to TopicSearchPeer on GossipSub.
func (ix *IndexerService) initSearchHandlers() {
ix.Host.SetStreamHandler(common.ProtocolSearchPeer, ix.handleSearchPeer)
ix.Host.SetStreamHandler(common.ProtocolSearchPeerResponse, ix.handleSearchPeerResponse)
ix.initSearchSubscription()
}
// updateReferent is called from HandleHeartbeat when Referent flag changes.
// If referent=true the node is added to referencedNodes; if false it is removed.
func (ix *IndexerService) updateReferent(pid pp.ID, rec PeerRecord, referent bool) {
ix.referencedNodesMu.Lock()
defer ix.referencedNodesMu.Unlock()
if referent {
ix.referencedNodes[pid] = rec
} else {
delete(ix.referencedNodes, pid)
}
}
// searchReferenced looks up nodes in referencedNodes matching the query.
// Matches on peerID (exact), DID (exact), or name (case-insensitive contains).
func (ix *IndexerService) searchReferenced(peerID, did, name string) []common.SearchHit {
ix.referencedNodesMu.RLock()
defer ix.referencedNodesMu.RUnlock()
nameLow := strings.ToLower(name)
var hits []common.SearchHit
for pid, rec := range ix.referencedNodes {
pidStr := pid.String()
matchPeerID := peerID != "" && pidStr == peerID
matchDID := did != "" && rec.DID == did
matchName := name != "" && strings.Contains(strings.ToLower(rec.Name), nameLow)
if matchPeerID || matchDID || matchName {
hits = append(hits, common.SearchHit{
PeerID: pidStr,
DID: rec.DID,
Name: rec.Name,
})
}
}
return hits
}
// handleSearchPeer is the ProtocolSearchPeer handler.
// The node opens this stream, sends a SearchPeerRequest, and reads results
// as they stream in. The stream stays open until timeout or node closes it.
func (ix *IndexerService) handleSearchPeer(s network.Stream) {
logger := oclib.GetLogger()
defer s.Reset()
if !ix.isPeerKnown(s.Conn().RemotePeer()) {
logger.Warn().Str("peer", s.Conn().RemotePeer().String()).Msg("[search] unknown peer, rejecting stream")
return
}
var req common.SearchPeerRequest
if err := json.NewDecoder(s).Decode(&req); err != nil || req.QueryID == "" {
return
}
// streamCtx is cancelled when the node closes its end of the stream.
streamCtx, streamCancel := context.WithCancel(context.Background())
go func() {
// Block until the stream is reset/closed, then cancel our context.
buf := make([]byte, 1)
s.Read(buf) //nolint:errcheck — we only care about EOF/reset
streamCancel()
}()
defer streamCancel()
resultCh := make(chan []common.SearchHit, 16)
ix.pendingSearchesMu.Lock()
ix.pendingSearches[req.QueryID] = resultCh
ix.pendingSearchesMu.Unlock()
defer func() {
ix.pendingSearchesMu.Lock()
delete(ix.pendingSearches, req.QueryID)
ix.pendingSearchesMu.Unlock()
}()
// Check own referencedNodes immediately.
if hits := ix.searchReferenced(req.PeerID, req.DID, req.Name); len(hits) > 0 {
resultCh <- hits
}
// Broadcast search on GossipSub so other indexers can respond.
ix.publishSearchQuery(req.QueryID, req.PeerID, req.DID, req.Name)
// Stream results back to node as they arrive; reset idle timer on each result.
enc := json.NewEncoder(s)
idleTimer := time.NewTimer(searchTimeout())
defer idleTimer.Stop()
for {
select {
case hits := <-resultCh:
if err := enc.Encode(common.SearchPeerResult{QueryID: req.QueryID, Records: hits}); err != nil {
logger.Debug().Err(err).Msg("[search] stream write failed")
return
}
// Reset idle timeout: keep alive as long as results trickle in.
if !idleTimer.Stop() {
select {
case <-idleTimer.C:
default:
}
}
idleTimer.Reset(searchTimeout())
case <-idleTimer.C:
// No new result within timeout — close gracefully.
return
case <-streamCtx.Done():
// Node closed the stream (new search superseded this one).
return
}
}
}
// handleSearchPeerResponse is the ProtocolSearchPeerResponse handler.
// Another indexer opens this stream to deliver hits for a pending queryID.
func (ix *IndexerService) handleSearchPeerResponse(s network.Stream) {
defer s.Reset()
var result common.SearchPeerResult
if err := json.NewDecoder(s).Decode(&result); err != nil || result.QueryID == "" {
return
}
ix.pendingSearchesMu.Lock()
ch := ix.pendingSearches[result.QueryID]
ix.pendingSearchesMu.Unlock()
if ch != nil {
select {
case ch <- result.Records:
default: // channel full, drop — node may be slow
}
}
}
// publishSearchQuery broadcasts a SearchQuery on TopicSearchPeer.
func (ix *IndexerService) publishSearchQuery(queryID, peerID, did, name string) {
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.RLock()
topic := ix.LongLivedStreamRecordedService.LongLivedPubSubService.LongLivedPubSubs[TopicSearchPeer]
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.RUnlock()
if topic == nil {
return
}
q := common.SearchQuery{
QueryID: queryID,
PeerID: peerID,
DID: did,
Name: name,
EmitterID: ix.Host.ID().String(),
}
b, err := json.Marshal(q)
if err != nil {
return
}
_ = topic.Publish(context.Background(), b)
}
// initSearchSubscription joins TopicSearchPeer and dispatches incoming queries.
func (ix *IndexerService) initSearchSubscription() {
logger := oclib.GetLogger()
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.Lock()
topic, err := ix.PS.Join(TopicSearchPeer)
if err != nil {
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.Unlock()
logger.Err(err).Msg("[search] failed to join search topic")
return
}
ix.LongLivedStreamRecordedService.LongLivedPubSubService.LongLivedPubSubs[TopicSearchPeer] = topic
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.Unlock()
common.SubscribeEvents(
ix.LongLivedStreamRecordedService.LongLivedPubSubService,
context.Background(),
TopicSearchPeer,
-1,
func(_ context.Context, q common.SearchQuery, _ string) {
ix.onSearchQuery(q)
},
)
}
// onSearchQuery handles an incoming GossipSub search broadcast.
// If we have matching referencedNodes, we respond to the emitting indexer.
func (ix *IndexerService) onSearchQuery(q common.SearchQuery) {
// Don't respond to our own broadcasts.
if q.EmitterID == ix.Host.ID().String() {
return
}
hits := ix.searchReferenced(q.PeerID, q.DID, q.Name)
if len(hits) == 0 {
return
}
emitterID, err := pp.Decode(q.EmitterID)
if err != nil {
return
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
s, err := ix.Host.NewStream(ctx, emitterID, common.ProtocolSearchPeerResponse)
if err != nil {
return
}
defer s.Reset()
s.SetDeadline(time.Now().Add(5 * time.Second))
json.NewEncoder(s).Encode(common.SearchPeerResult{QueryID: q.QueryID, Records: hits})
}

View File

@@ -0,0 +1,572 @@
package indexer
import (
"context"
"encoding/json"
"errors"
"math/rand"
"oc-discovery/conf"
"oc-discovery/daemons/node/common"
"strings"
"sync"
"sync/atomic"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
dht "github.com/libp2p/go-libp2p-kad-dht"
pubsub "github.com/libp2p/go-libp2p-pubsub"
record "github.com/libp2p/go-libp2p-record"
"github.com/libp2p/go-libp2p/core/host"
pp "github.com/libp2p/go-libp2p/core/peer"
)
// dhtCacheEntry holds one indexer discovered via DHT for use in suggestion responses.
type dhtCacheEntry struct {
AI pp.AddrInfo
LastSeen time.Time
}
// offloadState tracks which nodes we've already proposed migration to.
// When an indexer is overloaded (fill rate > offloadThreshold) it only sends
// SuggestMigrate to a small batch at a time; peers that don't migrate within
// offloadGracePeriod are moved to alreadyTried so a new batch can be picked.
type offloadState struct {
inBatch map[pp.ID]time.Time // peer → time added to current batch
alreadyTried map[pp.ID]struct{} // peers proposed to that didn't migrate
mu sync.Mutex
}
const (
offloadThreshold = 0.80 // fill rate above which to start offloading
offloadBatchSize = 5 // max concurrent "please migrate" proposals
offloadGracePeriod = 3 * common.RecommendedHeartbeatInterval
)
// IndexerService manages the indexer node's state: stream records, DHT, pubsub.
type IndexerService struct {
*common.LongLivedStreamRecordedService[PeerRecord]
PS *pubsub.PubSub
DHT *dht.IpfsDHT
isStrictIndexer bool
mu sync.RWMutex
dhtProvideCancel context.CancelFunc
bornAt time.Time
// Passive DHT cache: refreshed every 2 min in background, used for suggestions.
dhtCache []dhtCacheEntry
dhtCacheMu sync.RWMutex
// Offload state for overloaded-indexer migration proposals.
offload offloadState
// referencedNodes holds nodes that have designated this indexer as their
// search referent (Heartbeat.Referent=true). Used for distributed search.
referencedNodes map[pp.ID]PeerRecord
referencedNodesMu sync.RWMutex
// pendingSearches maps queryID → result channel for in-flight searches.
pendingSearches map[string]chan []common.SearchHit
pendingSearchesMu sync.Mutex
// behavior tracks per-node compliance (heartbeat rate, publish/get volume,
// identity consistency, signature failures).
behavior *NodeBehaviorTracker
// connGuard limits new-connection bursts to protect public indexers.
// deletedDIDs tracks recently tombstoned DIDs to prevent AfterHeartbeat
// from republishing records that were explicitly deleted by the peer.
// Entries are cleared automatically after tombstoneTTL.
deletedDIDs map[string]time.Time
deletedDIDsMu sync.RWMutex
// SWIM incarnation: incremented when a connecting node signals suspicion via
// SuspectedIncarnation. The new value is broadcast back so nodes can clear
// their suspect state (refutation mechanism).
incarnation atomic.Uint64
// eventQueue holds SWIM membership events to be piggybacked on responses
// (infection-style dissemination toward connected nodes).
eventQueue *common.MembershipEventQueue
}
// NewIndexerService creates an IndexerService.
// If ps is nil, this is a strict indexer (no pre-existing gossip sub from a node).
func NewIndexerService(h host.Host, ps *pubsub.PubSub, maxNode int) *IndexerService {
logger := oclib.GetLogger()
logger.Info().Msg("open indexer mode...")
var err error
ix := &IndexerService{
LongLivedStreamRecordedService: common.NewStreamRecordedService[PeerRecord](h, maxNode),
isStrictIndexer: ps == nil,
referencedNodes: map[pp.ID]PeerRecord{},
pendingSearches: map[string]chan []common.SearchHit{},
behavior: newNodeBehaviorTracker(),
deletedDIDs: make(map[string]time.Time),
eventQueue: &common.MembershipEventQueue{},
}
if ps == nil {
ps, err = pubsub.NewGossipSub(context.Background(), ix.Host)
if err != nil {
panic(err) // can't run your indexer without a propagation pubsub
}
}
ix.PS = ps
if ix.isStrictIndexer {
logger.Info().Msg("connect to indexers as strict indexer...")
common.ConnectToIndexers(h, conf.GetConfig().MinIndexer, conf.GetConfig().MaxIndexer*2)
logger.Info().Msg("subscribe to decentralized search flow as strict indexer...")
go ix.SubscribeToSearch(ix.PS, nil)
ix.AllowInbound = func(remotePeer pp.ID, isNew bool) error {
/*if ix.behavior.IsBanned(remotePeer) {
return errors.New("peer is banned")
}*/
if isNew {
// DB blacklist check: blocks reconnection after EvictPeer + blacklist.
/*if !ix.isPeerKnown(remotePeer) {
return errors.New("peer is blacklisted or unknown")
}*/
if !ix.ConnGuard.Allow() {
return errors.New("connection rate limit exceeded, retry later")
}
}
return nil
}
}
ix.LongLivedStreamRecordedService.AfterDelete = func(pid pp.ID, name, did string) {
// Remove behavior state for peers that are no longer connected and
// have no active ban — keeps memory bounded to the live node set.
ix.behavior.Cleanup(pid)
}
// AllowInbound: fired once per stream open, before any heartbeat is decoded.
// 1. Reject peers that are currently banned (behavioral strikes).
// 2. For genuinely new connections, check the DB blacklist and apply the burst guard.
// ValidateHeartbeat: fired on every heartbeat tick for an established stream.
// Checks heartbeat cadence — rejects if the node is sending too fast.
ix.ValidateHeartbeat = func(remotePeer pp.ID) error {
return ix.behavior.RecordHeartbeat(remotePeer)
}
// Parse bootstrap peers from configured indexer addresses so the DHT can
// find its routing table entries even in a fresh deployment.
var bootstrapPeers []pp.AddrInfo
for _, addrStr := range strings.Split(conf.GetConfig().IndexerAddresses, ",") {
addrStr = strings.TrimSpace(addrStr)
if addrStr == "" {
continue
}
if ad, err := pp.AddrInfoFromString(addrStr); err == nil {
bootstrapPeers = append(bootstrapPeers, *ad)
}
}
dhtOpts := []dht.Option{
dht.Mode(dht.ModeServer),
dht.ProtocolPrefix("oc"),
dht.Validator(record.NamespacedValidator{
"node": PeerRecordValidator{},
"name": DefaultValidator{},
"pid": DefaultValidator{},
}),
}
if len(bootstrapPeers) > 0 {
dhtOpts = append(dhtOpts, dht.BootstrapPeers(bootstrapPeers...))
}
if ix.DHT, err = dht.New(context.Background(), ix.Host, dhtOpts...); err != nil {
logger.Info().Msg(err.Error())
return nil
}
// Make the DHT available for replenishment from other packages.
common.SetDiscoveryDHT(ix.DHT)
ix.bornAt = time.Now().UTC()
ix.offload.inBatch = make(map[pp.ID]time.Time)
ix.offload.alreadyTried = make(map[pp.ID]struct{})
ix.initNodeHandler()
// Build and send a HeartbeatResponse after each received node heartbeat.
// Raw metrics only — no pre-cooked score. Node computes the score itself.
ix.BuildHeartbeatResponse = func(remotePeer pp.ID, hb *common.Heartbeat) *common.HeartbeatResponse {
logger := oclib.GetLogger()
need, challenges, challengeDID, referent, rawRecord :=
hb.Need, hb.Challenges, hb.ChallengeDID, hb.Referent, hb.Record
ix.StreamMU.RLock()
peerCount := len(ix.StreamRecords[common.ProtocolHeartbeat])
// Collect lastSeen per active peer for challenge responses.
type peerMeta struct {
found bool
lastSeen time.Time
}
peerLookup := make(map[string]peerMeta, peerCount)
var remotePeerRecord PeerRecord
for pid, rec := range ix.StreamRecords[common.ProtocolHeartbeat] {
var ls time.Time
if rec.HeartbeatStream != nil && rec.HeartbeatStream.UptimeTracker != nil {
ls = rec.HeartbeatStream.UptimeTracker.LastSeen
}
peerLookup[pid.String()] = peerMeta{found: true, lastSeen: ls}
if pid == remotePeer {
remotePeerRecord = rec.Record
}
}
ix.StreamMU.RUnlock()
// AfterHeartbeat updates srec.Record asynchronously — it may not have run yet.
// Use rawRecord (the fresh signed PeerRecord embedded in the heartbeat) directly
// so referencedNodes always gets the current Name/DID regardless of timing.
if remotePeerRecord.Name == "" && len(rawRecord) > 0 {
var fresh PeerRecord
if json.Unmarshal(rawRecord, &fresh) == nil {
remotePeerRecord = fresh
}
}
// Update referent designation: node marks its best-scored indexer with Referent=true.
ix.updateReferent(remotePeer, remotePeerRecord, referent)
// SWIM refutation: if the node signals our current incarnation as suspected,
// increment it and broadcast an alive event so other nodes can clear suspicion.
inc := ix.incarnation.Load()
if hb.SuspectedIncarnation != nil && *hb.SuspectedIncarnation == inc {
inc = ix.incarnation.Add(1)
logger.Info().
Str("suspected_by", remotePeer.String()).
Uint64("new_incarnation", inc).
Msg("[swim] refuting suspicion — incarnation incremented")
ix.eventQueue.Add(common.MemberEvent{
Type: common.MemberAlive,
PeerID: ix.Host.ID().String(),
Incarnation: inc,
HopsLeft: common.InitialEventHops,
})
}
// Relay incoming SWIM events from the node into our event queue so they
// propagate to other connected nodes (infection-style forwarding).
for _, ev := range hb.MembershipEvents {
if ev.HopsLeft > 0 {
ix.eventQueue.Add(ev)
}
}
maxN := ix.MaxNodesConn()
fillRate := 0.0
if maxN > 0 {
fillRate = float64(peerCount) / float64(maxN)
if fillRate > 1 {
fillRate = 1
}
}
resp := &common.HeartbeatResponse{
FillRate: fillRate,
PeerCount: peerCount,
MaxNodes: maxN,
BornAt: ix.bornAt,
}
// Answer each challenged PeerID with raw found + lastSeen.
for _, pidStr := range challenges {
meta := peerLookup[pidStr] // zero value if not found
entry := common.ChallengeEntry{
PeerID: pidStr,
Found: meta.found,
LastSeen: meta.lastSeen,
}
resp.Challenges = append(resp.Challenges, entry)
}
// DHT challenge: retrieve the node's own DID to prove DHT is functional.
if challengeDID != "" {
ctx3, cancel3 := context.WithTimeout(context.Background(), 3*time.Second)
val, err := ix.DHT.GetValue(ctx3, "/node/"+challengeDID)
cancel3()
resp.DHTFound = err == nil
if err == nil {
resp.DHTPayload = json.RawMessage(val)
}
}
// Random sample of connected nodes as witnesses (up to 3).
// Never include the requesting peer itself — asking a node to witness
// itself is circular and meaningless.
ix.StreamMU.RLock()
for pidStr := range peerLookup {
if len(resp.Witnesses) >= 3 {
break
}
pid, err := pp.Decode(pidStr)
if err != nil || pid == remotePeer || pid == ix.Host.ID() {
continue
}
addrs := ix.Host.Peerstore().Addrs(pid)
ai := common.FilterLoopbackAddrs(pp.AddrInfo{ID: pid, Addrs: addrs})
if len(ai.Addrs) > 0 {
resp.Witnesses = append(resp.Witnesses, ai)
}
}
ix.StreamMU.RUnlock()
// Attach suggestions: exactly `need` entries from the DHT cache.
// If the indexer is overloaded (SuggestMigrate will be set below), always
// provide at least 1 suggestion even when need == 0, so the node has
// somewhere to go.
suggestionsNeeded := need
if fillRate > offloadThreshold && suggestionsNeeded < 1 {
suggestionsNeeded = 1
}
if suggestionsNeeded > 0 {
ix.dhtCacheMu.RLock()
// When offloading, pick from a random offset within the top N of the
// cache so concurrent migrations spread across multiple targets rather
// than all rushing to the same least-loaded indexer (thundering herd).
// For normal need-based suggestions the full sorted order is fine.
cache := ix.dhtCache
if fillRate > offloadThreshold && len(cache) > suggestionsNeeded {
const spreadWindow = 5 // sample from the top-5 least-loaded
window := spreadWindow
if window > len(cache) {
window = len(cache)
}
start := rand.Intn(window)
cache = cache[start:]
}
for _, e := range cache {
if len(resp.Suggestions) >= suggestionsNeeded {
break
}
// Never suggest the requesting peer itself or this indexer.
if e.AI.ID == remotePeer || e.AI.ID == h.ID() {
continue
}
resp.Suggestions = append(resp.Suggestions, e.AI)
}
ix.dhtCacheMu.RUnlock()
}
// Offload logic: when fill rate is too high, selectively ask nodes to migrate.
if fillRate > offloadThreshold && len(resp.Suggestions) > 0 {
now := time.Now()
ix.offload.mu.Lock()
// Expire stale batch entries -> move to alreadyTried.
for pid, addedAt := range ix.offload.inBatch {
if now.Sub(addedAt) > offloadGracePeriod {
ix.offload.alreadyTried[pid] = struct{}{}
delete(ix.offload.inBatch, pid)
}
}
// Reset alreadyTried if we've exhausted the whole pool.
if len(ix.offload.alreadyTried) >= peerCount {
ix.offload.alreadyTried = make(map[pp.ID]struct{})
}
_, tried := ix.offload.alreadyTried[remotePeer]
_, inBatch := ix.offload.inBatch[remotePeer]
if !tried {
if inBatch {
resp.SuggestMigrate = true
} else if len(ix.offload.inBatch) < offloadBatchSize {
ix.offload.inBatch[remotePeer] = now
resp.SuggestMigrate = true
}
}
ix.offload.mu.Unlock()
} else if fillRate <= offloadThreshold {
// Fill rate back to normal: reset offload state.
ix.offload.mu.Lock()
if len(ix.offload.inBatch) > 0 || len(ix.offload.alreadyTried) > 0 {
ix.offload.inBatch = make(map[pp.ID]time.Time)
ix.offload.alreadyTried = make(map[pp.ID]struct{})
}
ix.offload.mu.Unlock()
}
// Bootstrap: if this indexer has no indexers of its own, probe the
// connecting peer to check it supports ProtocolHeartbeat (i.e. it is
// itself an indexer). Plain nodes do not register the handler and the
// negotiation fails instantly — no wasted heartbeat cycle.
// Run in a goroutine: the probe is a short blocking stream open.
if len(common.Indexers.GetAddrs()) == 0 && remotePeer != h.ID() {
pid := remotePeer
go func() {
if !common.SupportsHeartbeat(h, pid) {
logger.Debug().Str("peer", pid.String()).
Msg("[bootstrap] inbound peer has no heartbeat handler — not an indexer, skipping")
return
}
addrs := h.Peerstore().Addrs(pid)
ai := common.FilterLoopbackAddrs(pp.AddrInfo{ID: pid, Addrs: addrs})
if len(ai.Addrs) == 0 {
return
}
key := pid.String()
if !common.Indexers.ExistsAddr(key) {
adCopy := ai
common.Indexers.SetAddr(key, &adCopy)
common.Indexers.NudgeIt()
logger.Info().Str("peer", key).Msg("[bootstrap] no indexers — added inbound indexer peer as candidate")
}
}()
}
// Attach SWIM incarnation and piggybacked membership events.
resp.Incarnation = ix.incarnation.Load()
resp.MembershipEvents = ix.eventQueue.Drain(5)
return resp
}
// Advertise this indexer in the DHT so nodes can discover it.
fillRateFn := func() float64 {
ix.StreamMU.RLock()
n := len(ix.StreamRecords[common.ProtocolHeartbeat])
ix.StreamMU.RUnlock()
maxN := ix.MaxNodesConn()
if maxN <= 0 {
return 0
}
rate := float64(n) / float64(maxN)
if rate > 1 {
rate = 1
}
return rate
}
ix.startDHTCacheRefresh()
ix.startDHTProvide(fillRateFn)
return ix
}
// startDHTCacheRefresh periodically queries the DHT for peer indexers and
// refreshes ix.dhtCache. This passive cache is used by BuildHeartbeatResponse
// to suggest better indexers to connected nodes without any per-request cost.
func (ix *IndexerService) startDHTCacheRefresh() {
ctx, cancel := context.WithCancel(context.Background())
// Store cancel alongside the provide cancel so Close() stops both.
prevCancel := ix.dhtProvideCancel
ix.dhtProvideCancel = func() {
if prevCancel != nil {
prevCancel()
}
cancel()
}
go func() {
logger := oclib.GetLogger()
refresh := func() {
if ix.DHT == nil {
return
}
// Fetch more than needed so SelectByFillRate can filter for diversity.
raw := common.DiscoverIndexersFromDHT(ix.Host, ix.DHT, 30)
if len(raw) == 0 {
return
}
// Remove self before selection.
filtered := raw[:0]
for _, ai := range raw {
if ai.ID != ix.Host.ID() {
filtered = append(filtered, ai)
}
}
// SelectByFillRate applies /24 subnet diversity and fill-rate weighting.
// Fill rates are unknown at this stage (nil map) so all peers get
// the neutral prior f=0.5 — diversity filtering still applies.
selected := common.SelectByFillRate(filtered, nil, 10)
now := time.Now()
ix.dhtCacheMu.Lock()
ix.dhtCache = ix.dhtCache[:0]
for _, ai := range selected {
ix.dhtCache = append(ix.dhtCache, dhtCacheEntry{AI: ai, LastSeen: now})
}
ix.dhtCacheMu.Unlock()
logger.Info().Int("cached", len(selected)).Msg("[dht] indexer suggestion cache refreshed")
}
// Initial delay: let the DHT routing table warm up first.
select {
case <-time.After(30 * time.Second):
case <-ctx.Done():
return
}
refresh()
t := time.NewTicker(2 * time.Minute)
defer t.Stop()
for {
select {
case <-t.C:
refresh()
case <-ctx.Done():
return
}
}
}()
}
// startDHTProvide bootstraps the DHT and starts a goroutine that periodically
// advertises this indexer under the well-known provider key.
func (ix *IndexerService) startDHTProvide(fillRateFn func() float64) {
ctx, cancel := context.WithCancel(context.Background())
ix.dhtProvideCancel = cancel
go func() {
logger := oclib.GetLogger()
// Wait until a routable (non-loopback) address is available.
for i := 0; i < 12; i++ {
addrs := ix.Host.Addrs()
if len(addrs) > 0 && !strings.Contains(addrs[len(addrs)-1].String(), "127.0.0.1") {
break
}
select {
case <-ctx.Done():
return
case <-time.After(5 * time.Second):
}
}
if err := ix.DHT.Bootstrap(ctx); err != nil {
logger.Warn().Err(err).Msg("[dht] bootstrap failed")
}
provide := func() {
pCtx, pCancel := context.WithTimeout(ctx, 30*time.Second)
defer pCancel()
if err := ix.DHT.Provide(pCtx, common.IndexerCID(), true); err != nil {
logger.Warn().Err(err).Msg("[dht] Provide failed")
} else {
logger.Info().Float64("fill_rate", fillRateFn()).Msg("[dht] indexer advertised in DHT")
}
}
provide()
t := time.NewTicker(common.RecommendedHeartbeatInterval)
defer t.Stop()
for {
select {
case <-t.C:
provide()
case <-ctx.Done():
return
}
}
}()
}
// EvictPeer immediately closes the heartbeat stream of a peer and removes it
// from the active stream records. Used when a peer is auto-blacklisted.
func (ix *IndexerService) EvictPeer(peerID string) {
pid, err := pp.Decode(peerID)
if err != nil {
return
}
ix.StreamMU.Lock()
defer ix.StreamMU.Unlock()
if rec, ok := ix.StreamRecords[common.ProtocolHeartbeat][pid]; ok {
if rec.HeartbeatStream != nil && rec.HeartbeatStream.Stream != nil {
rec.HeartbeatStream.Stream.Reset()
}
delete(ix.StreamRecords[common.ProtocolHeartbeat], pid)
}
}
func (ix *IndexerService) Close() {
if ix.dhtProvideCancel != nil {
ix.dhtProvideCancel()
}
ix.DHT.Close()
ix.PS.UnregisterTopicValidator(common.TopicPubSubSearch)
for _, s := range ix.StreamRecords {
for _, ss := range s {
ss.HeartbeatStream.Stream.Close()
}
}
}

View File

@@ -0,0 +1,93 @@
package indexer
import (
"encoding/json"
"errors"
"time"
)
type DefaultValidator struct{}
func (v DefaultValidator) Validate(key string, value []byte) error {
return nil
}
func (v DefaultValidator) Select(key string, values [][]byte) (int, error) {
return 0, nil
}
type PeerRecordValidator struct{}
func (v PeerRecordValidator) Validate(key string, value []byte) error {
// Accept valid tombstones — deletion must be storable so it can propagate
// and win over stale live records on other DHT nodes via Select().
var ts TombstoneRecord
if err := json.Unmarshal(value, &ts); err == nil && ts.Tombstone {
if ts.PeerID == "" || ts.DID == "" {
return errors.New("tombstone: missing fields")
}
if time.Since(ts.DeletedAt) > tombstoneTTL {
return errors.New("tombstone: expired")
}
if _, err := ts.Verify(); err != nil {
return errors.New("tombstone: " + err.Error())
}
return nil
}
var rec PeerRecord
if err := json.Unmarshal(value, &rec); err != nil {
return errors.New("invalid json")
}
// PeerID must exist
if rec.PeerID == "" {
return errors.New("missing peerID")
}
// Expiry check
if rec.ExpiryDate.Before(time.Now().UTC()) {
return errors.New("record expired")
}
// TTL cap: publisher cannot set an expiry further than maxTTLSeconds in
// the future. Prevents abuse (e.g. records designed to linger for years).
if rec.ExpiryDate.After(time.Now().UTC().Add(maxTTLSeconds * time.Second)) {
return errors.New("TTL exceeds maximum allowed")
}
// Signature verification
if _, err := rec.Verify(); err != nil {
return errors.New("invalid signature")
}
return nil
}
func (v PeerRecordValidator) Select(key string, values [][]byte) (int, error) {
// Tombstone always wins: a signed delete supersedes any live record,
// even if the live record has a later ExpiryDate.
for i, val := range values {
var ts TombstoneRecord
if err := json.Unmarshal(val, &ts); err == nil && ts.Tombstone {
return i, nil
}
}
var newest time.Time
index := 0
for i, val := range values {
var rec PeerRecord
if err := json.Unmarshal(val, &rec); err != nil {
continue
}
if rec.ExpiryDate.After(newest) {
newest = rec.ExpiryDate
index = i
}
}
return index, nil
}

View File

@@ -0,0 +1,99 @@
// Package location resolves the geographic position of this node via IP
// geolocation and applies a privacy-preserving random offset proportional
// to the chosen granularity level before publishing the result.
package location
import (
"encoding/json"
"fmt"
"math/rand"
"net/http"
"time"
peer "cloud.o-forge.io/core/oc-lib/models/peer"
)
// fuzzRadius returns the maximum random offset (in degrees) for each axis
// given a granularity level.
//
// 0 → no location
// 1 → continent ±15° lat / ±20° lng
// 2 → country ±3° lat / ±4° lng (default)
// 3 → region ±0.5° lat / ±0.7° lng
// 4 → city ±0.05° lat / ±0.07° lng
func fuzzRadius(granularity int) (latR, lngR float64) {
switch granularity {
case 1:
return 15.0, 20.0
case 2:
return 3.0, 4.0
case 3:
return 0.5, 0.7
case 4:
return 0.05, 0.07
default:
return 3.0, 4.0
}
}
// clamp keeps a value inside [min, max].
func clamp(v, min, max float64) float64 {
if v < min {
return min
}
if v > max {
return max
}
return v
}
// ipAPIResponse is the subset of fields returned by ip-api.com/json.
type ipAPIResponse struct {
Status string `json:"status"`
Lat float64 `json:"lat"`
Lon float64 `json:"lon"`
Country string `json:"country"`
Region string `json:"regionName"`
City string `json:"city"`
}
// Geolocate resolves the current public IP location via ip-api.com (free,
// no key required for non-commercial use), then fuzzes the result according
// to granularity.
//
// Returns nil if granularity == 0 (opt-out) or if the lookup fails.
func Geolocate(granularity int) *peer.PeerLocation {
if granularity == 0 {
return nil
}
client := &http.Client{Timeout: 5 * time.Second}
resp, err := client.Get("http://ip-api.com/json?fields=status,lat,lon,country,regionName,city")
if err != nil {
return nil
}
defer resp.Body.Close()
var result ipAPIResponse
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil || result.Status != "success" {
return nil
}
latR, lngR := fuzzRadius(granularity)
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
fuzzedLat := result.Lat + (rng.Float64()*2-1)*latR
fuzzedLng := result.Lon + (rng.Float64()*2-1)*lngR
fuzzedLat = clamp(fuzzedLat, -85.0, 85.0)
fuzzedLng = clamp(fuzzedLng, -180.0, 180.0)
fmt.Printf("[location] granularity=%d raw=(%.4f,%.4f) fuzzed=(%.4f,%.4f)\n",
granularity, result.Lat, result.Lon, fuzzedLat, fuzzedLng)
return &peer.PeerLocation{
Latitude: fuzzedLat,
Longitude: fuzzedLng,
Granularity: granularity,
}
}

242
daemons/node/nats.go Normal file
View File

@@ -0,0 +1,242 @@
package node
import (
"context"
"encoding/json"
"fmt"
"oc-discovery/daemons/node/common"
"oc-discovery/daemons/node/stream"
"slices"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/config"
pp_model "cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/tools"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
type configPayload struct {
PeerID string `json:"source_peer_id"`
}
type executionConsidersPayload struct {
PeerIDs []string `json:"peer_ids"`
}
func ListenNATS(n *Node) {
tools.NewNATSCaller().ListenNats(map[tools.NATSMethod]func(tools.NATSResponse){
tools.PEER_BEHAVIOR_EVENT: func(resp tools.NATSResponse) { //nolint:typecheck
handlePeerBehaviorEvent(n, resp)
},
tools.PROPALGATION_EVENT: func(resp tools.NATSResponse) {
if resp.FromApp == config.GetAppName() {
return
}
p, err := oclib.GetMySelf()
if err != nil || p == nil || p.PeerID != n.PeerID.String() {
return
}
var propalgation tools.PropalgationMessage
err = json.Unmarshal(resp.Payload, &propalgation)
var dt *tools.DataType
if propalgation.DataType > 0 {
dtt := tools.DataType(propalgation.DataType)
dt = &dtt
}
if err == nil {
switch propalgation.Action {
case tools.PB_ADMIRALTY_CONFIG, tools.PB_MINIO_CONFIG:
var m configPayload
var proto protocol.ID = stream.ProtocolAdmiraltyConfigResource
if propalgation.Action == tools.PB_MINIO_CONFIG {
proto = stream.ProtocolMinioConfigResource
}
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
peers, _ := n.GetPeerRecord(context.Background(), m.PeerID)
for _, p := range peers {
n.StreamService.PublishCommon(&resp.Datatype, resp.User, resp.Groups,
p.PeerID, proto, propalgation.Payload)
}
}
case tools.PB_CREATE, tools.PB_UPDATE, tools.PB_DELETE:
if slices.Contains([]tools.DataType{tools.BOOKING, tools.PURCHASE_RESOURCE}, resp.Datatype) {
m := map[string]interface{}{}
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
if m["peer_id"] != nil {
n.StreamService.PublishCommon(&resp.Datatype, resp.User, resp.Groups,
fmt.Sprintf("%v", m["peer_id"]), stream.ProtocolCreateResource, propalgation.Payload)
}
}
} else {
fmt.Println(n.StreamService.ToPartnerPublishEvent(
context.Background(),
propalgation.Action,
dt, resp.User, resp.Groups,
propalgation.Payload,
))
}
case tools.PB_CONSIDERS:
switch resp.Datatype {
case tools.BOOKING, tools.PURCHASE_RESOURCE, tools.WORKFLOW_EXECUTION:
var m executionConsidersPayload
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
for _, p := range m.PeerIDs {
peers, _ := n.GetPeerRecord(context.Background(), p)
for _, pp := range peers {
n.StreamService.PublishCommon(&resp.Datatype, resp.User, resp.Groups,
pp.PeerID, stream.ProtocolConsidersResource, propalgation.Payload)
}
}
}
default:
// minio / admiralty config considers — route back to OriginID.
var m struct {
OriginID string `json:"origin_id"`
}
if err := json.Unmarshal(propalgation.Payload, &m); err == nil && m.OriginID != "" {
peers, _ := n.GetPeerRecord(context.Background(), m.OriginID)
for _, p := range peers {
n.StreamService.PublishCommon(nil, resp.User, resp.Groups,
p.PeerID, stream.ProtocolConsidersResource, propalgation.Payload)
}
}
}
case tools.PB_PLANNER:
m := map[string]interface{}{}
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
b := []byte{}
if len(m) > 1 {
b = propalgation.Payload
}
if m["peer_id"] == nil { // send to every active stream
n.StreamService.Mu.Lock()
if n.StreamService.Streams[stream.ProtocolSendPlanner] != nil {
for pid := range n.StreamService.Streams[stream.ProtocolSendPlanner] { // send Planner can be long lived - it's a conn
n.StreamService.PublishCommon(nil, resp.User, resp.Groups, pid.String(), stream.ProtocolSendPlanner, b)
}
}
n.StreamService.Mu.Unlock()
} else {
fmt.Println("REACH PLANNER")
n.StreamService.PublishCommon(nil, resp.User, resp.Groups, fmt.Sprintf("%v", m["peer_id"]), stream.ProtocolSendPlanner, b)
}
}
case tools.PB_CLOSE_PLANNER:
m := map[string]interface{}{}
if err := json.Unmarshal(resp.Payload, &m); err == nil {
n.StreamService.Mu.Lock()
if pid, err := pp.Decode(fmt.Sprintf("%v", m["peer_id"])); err == nil {
if n.StreamService.Streams[stream.ProtocolSendPlanner] != nil && n.StreamService.Streams[stream.ProtocolSendPlanner][pid] != nil {
n.StreamService.Streams[stream.ProtocolSendPlanner][pid].Stream.Close()
delete(n.StreamService.Streams[stream.ProtocolSendPlanner], pid)
}
}
n.StreamService.Mu.Unlock()
}
case tools.PB_CLOSE_SEARCH:
if propalgation.DataType == int(tools.PEER) {
n.peerSearches.Cancel(resp.User)
} else {
n.StreamService.ResourceSearches.Cancel(resp.User)
}
case tools.PB_SEARCH:
if propalgation.DataType == int(tools.PEER) {
m := map[string]interface{}{}
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
needle := fmt.Sprintf("%v", m["search"])
userKey := resp.User
go n.SearchPeerRecord(userKey, needle, func(hit common.SearchHit) {
if b, err := json.Marshal(hit); err == nil {
tools.NewNATSCaller().SetNATSPub(tools.SEARCH_EVENT, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(tools.PEER),
Method: int(tools.SEARCH_EVENT),
Payload: b,
})
}
})
}
} else {
m := map[string]interface{}{}
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
fmt.Println("PB_SEARCH CATA", m)
n.PubSubService.SearchPublishEvent(
context.Background(),
dt,
fmt.Sprintf("%v", m["type"]),
resp.User, resp.Groups,
fmt.Sprintf("%v", m["search"]),
)
}
}
}
}
},
})
}
// handlePeerBehaviorEvent applies a PeerBehaviorReport received from a trusted
// service (oc-scheduler, oc-datacenter, …). It:
// 1. Loads the target peer from the local DB.
// 2. Deducts the trust penalty and appends a BehaviorWarning.
// 3. Auto-blacklists and evicts the peer stream when TrustScore ≤ threshold.
//
// oc-discovery does NOT re-emit a PROPALGATION_EVENT: propagation is strictly
// inbound (oc-catalog → oc-discovery). The blacklist takes effect locally at
// the next isPeerKnown() call, and immediately via EvictPeer().
func handlePeerBehaviorEvent(n *Node, resp tools.NATSResponse) {
var report tools.PeerBehaviorReport
if err := json.Unmarshal(resp.Payload, &report); err != nil {
fmt.Println("handlePeerBehaviorEvent: unmarshal error:", err)
return
}
if report.TargetPeerID == "" {
return
}
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
data := access.LoadOne(report.TargetPeerID)
if data.Data == nil {
fmt.Println("handlePeerBehaviorEvent: peer not found:", report.TargetPeerID)
return
}
p := data.ToPeer()
if p == nil {
return
}
// Self-protection: never penalise ourselves.
if self, err := oclib.GetMySelf(); err == nil && self != nil && self.GetID() == p.GetID() {
return
}
shouldBlacklist := p.ApplyBehaviorReport(report)
if shouldBlacklist && p.Relation != pp_model.BLACKLIST {
p.Relation = pp_model.BLACKLIST
fmt.Printf("handlePeerBehaviorEvent: auto-blacklisting peer %s — reason: %s\n",
p.PeerID, p.BlacklistReason)
// Immediately evict any active stream so the peer can no longer heartbeat.
if n.IndexerService != nil {
n.IndexerService.EvictPeer(p.PeerID)
}
}
// Persist updated trust score + relation locally.
if updated := access.UpdateOne(p.Serialize(p), p.GetID()); updated.Err != "" {
fmt.Println("handlePeerBehaviorEvent: could not update peer:", updated.Err)
return
}
// Notify oc-peer (and any other local NATS consumer) of the updated peer record
// via CREATE_RESOURCE so they can synchronise their own state.
if b, err := json.Marshal(p.Serialize(p)); err == nil {
tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.PEER,
Method: int(tools.CREATE_RESOURCE),
Payload: b,
})
}
}

547
daemons/node/node.go Normal file
View File

@@ -0,0 +1,547 @@
package node
import (
"context"
"encoding/json"
"errors"
"fmt"
"oc-discovery/conf"
"oc-discovery/daemons/node/common"
"oc-discovery/daemons/node/indexer"
"oc-discovery/daemons/node/location"
"oc-discovery/daemons/node/pubsub"
"oc-discovery/daemons/node/stream"
"sync"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
"github.com/libp2p/go-libp2p"
pubsubs "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/p2p/security/noise"
)
type Node struct {
*common.LongLivedStreamRecordedService[interface{}] // change type of stream
PS *pubsubs.PubSub
IndexerService *indexer.IndexerService
PubSubService *pubsub.PubSubService
StreamService *stream.StreamService
PeerID pp.ID
isIndexer bool
peerRecord *indexer.PeerRecord
// peerSearches: one active peer search per user; new search cancels previous.
peerSearches *common.SearchTracker
Mu sync.RWMutex
}
func InitNode(isNode bool, isIndexer bool) (*Node, error) {
if !isNode && !isIndexer {
return nil, errors.New("wait... what ? your node need to at least something. Retry we can't be friend in that case")
}
logger := oclib.GetLogger()
logger.Info().Msg("retrieving private key...")
priv, err := tools.LoadKeyFromFilePrivate() // your node private key
if err != nil {
return nil, err
}
logger.Info().Msg("retrieving psk file...")
psk, err := common.LoadPSKFromFile() // network common private Network. Public OC PSK is Public Network
if err != nil {
return nil, nil
}
logger.Info().Msg("open a host...")
gater := newOCConnectionGater(nil) // host set below after creation
h, err := libp2p.New(
libp2p.PrivateNetwork(psk),
libp2p.Identity(priv),
libp2p.Security(noise.ID, noise.New),
libp2p.ListenAddrStrings(
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", conf.GetConfig().NodeEndpointPort),
),
libp2p.ConnectionGater(gater),
)
gater.host = h // wire host back into gater now that it exists
logger.Info().Msg("Host open on " + h.ID().String())
if err != nil {
return nil, errors.New("no host no node")
}
node := &Node{
PeerID: h.ID(),
isIndexer: isIndexer,
LongLivedStreamRecordedService: common.NewStreamRecordedService[interface{}](h, 1000),
peerSearches: common.NewSearchTracker(),
}
// Register the bandwidth probe handler so any peer measuring this node's
// throughput can open a dedicated probe stream and read the echo.
h.SetStreamHandler(common.ProtocolBandwidthProbe, common.HandleBandwidthProbe)
// Register the witness query handler so peers can ask this node's view of indexers.
h.SetStreamHandler(common.ProtocolWitnessQuery, func(s network.Stream) {
common.HandleWitnessQuery(h, s)
})
var ps *pubsubs.PubSub
if isNode {
logger.Info().Msg("generate opencloud node...")
ps, err = pubsubs.NewGossipSub(context.Background(), node.Host)
if err != nil {
panic(err) // can't run your node without a propalgation pubsub, of state of node.
}
node.PS = ps
// buildRecord returns a fresh signed PeerRecord as JSON, embedded in each
// heartbeat so the receiving indexer can republish it to the DHT directly.
// peerRecord is nil until claimInfo runs, so the first ~20s heartbeats carry
// no record — that's fine, claimInfo publishes once synchronously at startup.
buildRecord := func() json.RawMessage {
if node.peerRecord == nil {
return nil
}
priv, err := tools.LoadKeyFromFilePrivate()
if err != nil {
return nil
}
fresh := *node.peerRecord
ttl := time.Duration(fresh.TTLSeconds) * time.Second
if ttl <= 0 {
ttl = indexer.DefaultTTLSeconds * time.Second
}
fresh.PeerRecordPayload.ExpiryDate = time.Now().UTC().Add(ttl)
payload, _ := json.Marshal(fresh.PeerRecordPayload)
fresh.Signature, err = priv.Sign(payload)
if err != nil {
return nil
}
b, _ := json.Marshal(fresh)
return json.RawMessage(b)
}
logger.Info().Msg("connect to indexers...")
common.ConnectToIndexers(node.Host, conf.GetConfig().MinIndexer, conf.GetConfig().MaxIndexer, buildRecord)
logger.Info().Msg("claims my node...")
if _, err := node.claimInfo(conf.GetConfig().Name, conf.GetConfig().Hostname); err != nil {
panic(err)
}
logger.Info().Msg("run garbage collector...")
node.StartGC(30 * time.Second)
if node.StreamService, err = stream.InitStream(context.Background(), node.Host, node.PeerID, 1000, node); err != nil {
panic(err)
}
node.StreamService.IsPeerKnown = func(pid pp.ID) bool {
// 1. Local DB: known peer (handles blacklist).
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
results := access.Search(&dbs.Filters{
And: map[string][]dbs.Filter{
"peer_id": {{Operator: dbs.EQUAL.String(), Value: pid.String()}},
},
}, pid.String(), false)
for _, item := range results.Data {
p, ok := item.(*peer.Peer)
if !ok || p.PeerID != pid.String() {
continue
}
return p.Relation != peer.BLACKLIST
}
// 2. Ask a connected indexer → DHT lookup by peer_id.
for _, addr := range common.Indexers.GetAddrs() {
ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second)
s, err := h.NewStream(ctx, addr.Info.ID, common.ProtocolGet)
cancel()
if err != nil {
continue
}
json.NewEncoder(s).Encode(indexer.GetValue{PeerID: pid.String()})
var resp indexer.GetResponse
json.NewDecoder(s).Decode(&resp)
s.Reset()
return resp.Found
}
return false
}
if node.PubSubService, err = pubsub.InitPubSub(context.Background(), node.Host, node.PS, node, node.StreamService); err != nil {
panic(err)
}
f := func(ctx context.Context, evt common.Event, topic string) {
m := map[string]interface{}{}
err := json.Unmarshal(evt.Payload, &m)
if err != nil || evt.From == node.PeerID.String() {
return
}
fmt.Println("PUBSUB SendResponse bef peerrece")
if p, err := node.GetPeerRecord(ctx, evt.From); err == nil && len(p) > 0 && m["search"] != nil {
fmt.Println("PUBSUB SendResponse af peerrece", m)
node.StreamService.SendResponse(p[0], &evt, fmt.Sprintf("%v", m["search"]))
}
}
node.AllowInbound = func(remotePeer pp.ID, isNew bool) error {
if isNew {
// DB blacklist check: blocks reconnection after EvictPeer + blacklist.
if !node.isPeerKnown(remotePeer) {
return errors.New("peer is blacklisted or unknown")
}
if !node.ConnGuard.Allow() {
return errors.New("connection rate limit exceeded, retry later")
}
}
return nil
}
logger.Info().Msg("subscribe to decentralized search flow...")
go node.SubscribeToSearch(node.PS, &f)
logger.Info().Msg("connect to NATS")
go ListenNATS(node)
logger.Info().Msg("Node is actually running.")
}
if isIndexer {
logger.Info().Msg("generate opencloud indexer...")
node.IndexerService = indexer.NewIndexerService(node.Host, ps, 500)
}
return node, nil
}
// isPeerKnown is the stream-level gate: returns true if pid is allowed.
// Check order (fast → slow):
// 1. In-memory stream records — currently heartbeating to this indexer.
// 2. Local DB by peer_id — known peer, blacklist enforced here.
// 3. DHT /pid/{peerID} → /node/{DID} — registered on any indexer.
//
// ProtocolHeartbeat and ProtocolPublish handlers do NOT call this — they are
// the streams through which a node first makes itself known.
func (d *Node) isPeerKnown(pid pp.ID) bool {
// 1. Fast path: active heartbeat session.
d.StreamMU.RLock()
_, active := d.StreamRecords[common.ProtocolHeartbeat][pid]
d.StreamMU.RUnlock()
if active {
return true
}
// 2. Local DB: known peer (handles blacklist).
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
results := access.Search(&dbs.Filters{
And: map[string][]dbs.Filter{
"peer_id": {{Operator: dbs.EQUAL.String(), Value: pid.String()}},
},
}, pid.String(), false)
for _, item := range results.Data {
p, ok := item.(*peer.Peer)
if !ok || p.PeerID != pid.String() {
continue
}
return p.Relation != peer.BLACKLIST
}
return true
}
func (d *Node) Close() {
if d.isIndexer && d.IndexerService != nil {
d.IndexerService.Close()
}
d.PubSubService.Close()
d.StreamService.Close()
d.Host.Close()
}
func (d *Node) publishPeerRecord(
rec *indexer.PeerRecord,
) error {
priv, err := tools.LoadKeyFromFilePrivate() // your node private key
if err != nil {
return err
}
for _, ad := range common.Indexers.GetAddrs() {
var err error
if common.Indexers.Streams, err = common.TempStream(d.Host, *ad.Info, common.ProtocolPublish, "", common.Indexers.Streams, map[protocol.ID]*common.ProtocolInfo{},
&common.Indexers.MuStream); err != nil {
continue
}
stream := common.Indexers.Streams.GetPerID(common.ProtocolPublish, ad.Info.ID)
ttl := time.Duration(rec.TTLSeconds) * time.Second
if ttl <= 0 {
ttl = indexer.DefaultTTLSeconds * time.Second
}
base := indexer.PeerRecordPayload{
Name: rec.Name,
DID: rec.DID,
PubKey: rec.PubKey,
TTLSeconds: rec.TTLSeconds,
ExpiryDate: time.Now().UTC().Add(ttl),
}
payload, _ := json.Marshal(base)
rec.PeerRecordPayload = base
rec.Signature, err = priv.Sign(payload)
if err := json.NewEncoder(stream.Stream).Encode(&rec); err != nil { // then publish on stream
return err
}
}
return nil
}
// SearchPeerRecord starts a distributed peer search via ProtocolSearchPeer.
// A new call for the same userKey cancels any previous search.
// Results are pushed to onResult as they arrive; the function returns when
// the stream closes (idle timeout, explicit cancel, or indexer unreachable).
func (d *Node) SearchPeerRecord(userKey, needle string, onResult func(common.SearchHit)) {
logger := oclib.GetLogger()
idleTimeout := common.SearchIdleTimeout()
ctx, cancel := context.WithCancel(context.Background())
// Register cancels any previous search for userKey and starts the idle timer.
// The composite key doubles as QueryID so the indexer echoes it back.
searchKey := d.peerSearches.Register(userKey, cancel, idleTimeout)
defer d.peerSearches.Cancel(searchKey)
req := common.SearchPeerRequest{QueryID: searchKey}
if pid, err := pp.Decode(needle); err == nil {
req.PeerID = pid.String()
} else if _, err := uuid.Parse(needle); err == nil {
req.DID = needle
} else {
req.Name = needle
}
for _, ad := range common.Indexers.GetAddrs() {
if ad.Info == nil {
continue
}
dialCtx, dialCancel := context.WithTimeout(ctx, 5*time.Second)
s, err := d.Host.NewStream(dialCtx, ad.Info.ID, common.ProtocolSearchPeer)
dialCancel()
if err != nil {
continue
}
if err := json.NewEncoder(s).Encode(req); err != nil {
s.Reset()
continue
}
// Interrupt the blocking Decode as soon as the context is cancelled
// (idle timer, explicit PB_CLOSE_SEARCH, or replacement search).
go func() {
<-ctx.Done()
s.SetReadDeadline(time.Now())
}()
seen := map[string]struct{}{}
dec := json.NewDecoder(s)
for {
var result common.SearchPeerResult
if err := dec.Decode(&result); err != nil {
break
}
if result.QueryID != searchKey || !d.peerSearches.IsActive(searchKey) {
break
}
d.peerSearches.ResetIdle(searchKey)
for _, hit := range result.Records {
key := hit.PeerID
if key == "" {
key = hit.DID
}
if _, already := seen[key]; already {
continue
}
seen[key] = struct{}{}
onResult(hit)
}
}
s.Reset()
return
}
logger.Warn().Str("user", userKey).Msg("[search] no reachable indexer for peer search")
}
func (d *Node) GetPeerRecord(
ctx context.Context,
pidOrdid string,
) ([]*peer.Peer, error) {
var err error
var info map[string]indexer.PeerRecord
// Build the GetValue request: if pidOrdid is neither a UUID DID nor a libp2p
// PeerID, treat it as a human-readable name and let the indexer resolve it.
// GetPeerRecord resolves by PeerID or DID only.
// Name-based search goes through SearchPeerRecord (ProtocolSearchPeer).
getReq := indexer.GetValue{Key: pidOrdid}
if pidR, pidErr := pp.Decode(pidOrdid); pidErr == nil {
getReq.PeerID = pidR.String()
getReq.Key = ""
}
for _, ad := range common.Indexers.GetAddrs() {
if common.Indexers.Streams, err = common.TempStream(d.Host, *ad.Info, common.ProtocolGet, "",
common.Indexers.Streams, map[protocol.ID]*common.ProtocolInfo{}, &common.Indexers.MuStream); err != nil {
continue
}
stream := common.Indexers.Streams.GetPerID(common.ProtocolGet, ad.Info.ID)
if err := json.NewEncoder(stream.Stream).Encode(getReq); err != nil {
continue
}
var resp indexer.GetResponse
if err := json.NewDecoder(stream.Stream).Decode(&resp); err != nil {
continue
}
if resp.Found {
info = resp.Records
}
break
}
var ps []*peer.Peer
for _, pr := range info {
if pk, err := pr.Verify(); err != nil {
return nil, err
} else if _, p, err := pr.ExtractPeer(d.PeerID.String(), pr.PeerID, pk); err != nil {
return nil, err
} else {
ps = append(ps, p)
}
}
return ps, err
}
func (d *Node) claimInfo(
name string,
endPoint string, // TODO : endpoint is not necesserry StreamAddress
) (*peer.Peer, error) {
if endPoint == "" {
return nil, errors.New("no endpoint found for peer")
}
did := uuid.New().String()
peers := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).Search(&dbs.Filters{
And: map[string][]dbs.Filter{ // search by name if no filters are provided
"peer_id": {{Operator: dbs.EQUAL.String(), Value: d.Host.ID().String()}},
},
}, "", false)
if len(peers.Data) > 0 {
did = peers.Data[0].GetID() // if already existing set up did as made
}
priv, err := tools.LoadKeyFromFilePrivate()
if err != nil {
return nil, err
}
pub, err := tools.LoadKeyFromFilePublic()
if err != nil {
return nil, err
}
pubBytes, err := crypto.MarshalPublicKey(pub)
if err != nil {
return nil, err
}
now := time.Now().UTC()
pRec := indexer.PeerRecordPayload{
Name: name,
DID: did, // REAL PEER ID
PubKey: pubBytes,
TTLSeconds: indexer.DefaultTTLSeconds,
ExpiryDate: now.Add(indexer.DefaultTTLSeconds * time.Second),
}
d.PeerID = d.Host.ID()
payload, _ := json.Marshal(pRec)
rec := &indexer.PeerRecord{
PeerRecordPayload: pRec,
}
rec.Signature, err = priv.Sign(payload)
if err != nil {
return nil, err
}
rec.PeerID = d.Host.ID().String()
rec.APIUrl = endPoint
rec.StreamAddress = "/ip4/" + conf.GetConfig().Hostname + "/tcp/" + fmt.Sprintf("%v", conf.GetConfig().NodeEndpointPort) + "/p2p/" + rec.PeerID
rec.NATSAddress = oclib.GetConfig().NATSUrl
rec.WalletAddress = "my-wallet"
rec.Location = location.Geolocate(conf.GetConfig().LocationGranularity)
if err := d.publishPeerRecord(rec); err != nil {
return nil, err
}
d.peerRecord = rec
if _, err := rec.Verify(); err != nil {
return nil, err
} else {
_, p, err := rec.ExtractPeer(did, did, pub)
b, err := json.Marshal(p)
if err != nil {
return p, err
}
go tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.PEER,
Method: int(tools.CREATE_RESOURCE),
SearchAttr: "peer_id",
Payload: b,
})
return p, err
}
}
// DeleteRecord broadcasts a signed tombstone to all connected indexers, signalling
// that this node is voluntarily leaving the network.
// Each indexer verifies the signature, stores the tombstone in the DHT (replacing
// the live record), and evicts the peer from its active pool.
// After a successful call, d.peerRecord is set to nil.
func (d *Node) DeleteRecord() error {
if d.peerRecord == nil {
return errors.New("no peer record to delete")
}
priv, err := tools.LoadKeyFromFilePrivate()
if err != nil {
return err
}
pubBytes, err := crypto.MarshalPublicKey(priv.GetPublic())
if err != nil {
return err
}
tp := indexer.TombstonePayload{
DID: d.peerRecord.DID,
PeerID: d.PeerID.String(),
DeletedAt: time.Now().UTC(),
}
payloadBytes, _ := json.Marshal(tp)
sig, err := priv.Sign(payloadBytes)
if err != nil {
return err
}
ts := &indexer.TombstoneRecord{
TombstonePayload: tp,
PubKey: pubBytes,
Tombstone: true,
Signature: sig,
}
data, _ := json.Marshal(ts)
for _, ad := range common.Indexers.GetAddrs() {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
s, err := d.Host.NewStream(ctx, ad.Info.ID, common.ProtocolDelete)
cancel()
if err != nil {
continue
}
s.SetDeadline(time.Now().Add(5 * time.Second))
s.Write(data)
s.Close()
}
d.peerRecord = nil
return nil
}
/*
TODO:
- Le booking est un flow neuf décentralisé :
On check on attend une réponse, on valide, il passe par discovery, on relais.
- Le shared workspace est une affaire de décentralisation,
on communique avec les shared les mouvements
- Un shared remplace la notion de partnership à l'échelle de partnershipping
-> quand on share un workspace on devient partenaire temporaire
qu'on le soit originellement ou non.
-> on a alors les mêmes privilèges.
- Les orchestrations admiralty ont le même fonctionnement.
Un evenement provoque alors une création de clé de service.
On doit pouvoir crud avec verification de signature un DBobject.
*/

View File

@@ -0,0 +1,76 @@
package pubsub
import (
"context"
"encoding/json"
"errors"
"fmt"
"oc-discovery/conf"
"oc-discovery/daemons/node/common"
"oc-discovery/daemons/node/stream"
"oc-discovery/models"
"time"
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/tools"
)
func (ps *PubSubService) SearchPublishEvent(
ctx context.Context, dt *tools.DataType, typ string, user string, groups []string, search string) error {
b, err := json.Marshal(map[string]string{"search": search})
if err != nil {
return err
}
switch typ {
case "known": // define Search Strategy
return ps.StreamService.PublishesCommon(dt, user, groups, nil, b, stream.ProtocolSearchResource) //if partners focus only them*/
case "partner": // define Search Strategy
return ps.StreamService.PublishesCommon(dt, user, groups, &dbs.Filters{ // filter by like name, short_description, description, owner, url if no filters are provided
And: map[string][]dbs.Filter{
"relation": {{Operator: dbs.EQUAL.String(), Value: peer.PARTNER}},
},
}, b, stream.ProtocolSearchResource)
case "all": // Gossip PubSub
b, err := json.Marshal(map[string]string{"search": search})
if err != nil {
return err
}
idleTimeout := func() time.Duration {
if t := conf.GetConfig().SearchTimeout; t > 0 {
return time.Duration(t) * time.Second
}
return 5 * time.Second
}()
searchCtx, cancel := context.WithCancel(ctx)
// Register cancels any previous search for this user and starts the idle timer.
// The returned composite key is used as User in the GossipSub event so that
// remote peers echo it back unchanged, allowing IsActive to validate results.
searchKey := ps.StreamService.ResourceSearches.Register(user, cancel, idleTimeout)
fmt.Println("PUBLISH ON PUBSUB", common.TopicPubSubSearch, searchKey)
return ps.publishEvent(searchCtx, dt, tools.PB_SEARCH, common.TopicPubSubSearch, searchKey, b)
default:
return errors.New("no type of research found")
}
}
func (ps *PubSubService) publishEvent(
ctx context.Context, dt *tools.DataType, action tools.PubSubAction, topicName string, user string, payload []byte,
) error {
priv, err := tools.LoadKeyFromFilePrivate()
if err != nil {
return err
}
msg, _ := json.Marshal(models.NewEvent(action.String(), ps.Host.ID().String(), dt, user, payload, priv))
topic := ps.Node.GetPubSub(topicName)
if topic == nil {
topic, err = ps.PS.Join(topicName)
if err != nil {
return err
}
}
return topic.Publish(ctx, msg)
}
// TODO REVIEW PUBLISHING + ADD SEARCH ON PUBLIC : YES
// TODO : Search should verify DataType

View File

@@ -0,0 +1,32 @@
package pubsub
import (
"context"
"oc-discovery/daemons/node/common"
"oc-discovery/daemons/node/stream"
"sync"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/host"
)
type PubSubService struct {
Node common.DiscoveryPeer
Host host.Host
PS *pubsub.PubSub
StreamService *stream.StreamService
Subscription []string
mutex sync.RWMutex
}
func InitPubSub(ctx context.Context, h host.Host, ps *pubsub.PubSub, node common.DiscoveryPeer, streamService *stream.StreamService) (*PubSubService, error) {
return &PubSubService{
Host: h,
Node: node,
StreamService: streamService,
PS: ps,
}, nil
}
func (ix *PubSubService) Close() {
}

View File

@@ -0,0 +1,224 @@
package stream
import (
"context"
"crypto/subtle"
"encoding/json"
"errors"
"fmt"
"oc-discovery/daemons/node/common"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/booking/planner"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/tools"
)
type Verify struct {
IsVerified bool `json:"is_verified"`
}
func (ps *StreamService) handleEvent(protocol string, evt *common.Event) error {
fmt.Println("handleEvent")
ps.handleEventFromPartner(evt, protocol)
/*if protocol == ProtocolVerifyResource {
if evt.DataType == -1 {
tools.NewNATSCaller().SetNATSPub(tools.VERIFY_RESOURCE, tools.NATSResponse{
FromApp: "oc-discovery",
Method: int(tools.VERIFY_RESOURCE),
Payload: evt.Payload,
})
} else if err := ps.verifyResponse(evt); err != nil {
return err
}
}*/
if protocol == ProtocolSendPlanner {
fmt.Println("sendPlanner", evt)
if err := ps.sendPlanner(evt); err != nil {
return err
}
}
if protocol == ProtocolSearchResource && evt.DataType > -1 {
if err := ps.retrieveResponse(evt); err != nil {
return err
}
}
if protocol == ProtocolConsidersResource {
if err := ps.pass(evt, tools.CONSIDERS_EVENT); err != nil {
return err
}
}
if protocol == ProtocolAdmiraltyConfigResource {
if err := ps.pass(evt, tools.ADMIRALTY_CONFIG_EVENT); err != nil {
return err
}
}
if protocol == ProtocolMinioConfigResource {
if err := ps.pass(evt, tools.MINIO_CONFIG_EVENT); err != nil {
return err
}
}
return errors.New("no action authorized available : " + protocol)
}
func (abs *StreamService) verifyResponse(event *common.Event) error { //
res, err := resources.ToResource(int(event.DataType), event.Payload)
if err != nil || res == nil {
return nil
}
verify := Verify{
IsVerified: false,
}
access := oclib.NewRequestAdmin(oclib.LibDataEnum(event.DataType), nil)
data := access.LoadOne(res.GetID())
if data.Err == "" && data.Data != nil {
if b, err := json.Marshal(data.Data); err == nil {
if res2, err := resources.ToResource(int(event.DataType), b); err == nil {
verify.IsVerified = subtle.ConstantTimeCompare(res.GetSignature(), res2.GetSignature()) == 1
}
}
}
if b, err := json.Marshal(verify); err == nil {
abs.PublishCommon(nil, event.User, event.Groups, event.From, ProtocolVerifyResource, b)
}
return nil
}
func (abs *StreamService) sendPlanner(event *common.Event) error { //
fmt.Println("sendPlanner", len(event.Payload))
if len(event.Payload) == 0 {
if plan, err := planner.GenerateShallow(&tools.APIRequest{Admin: true}); err == nil {
if b, err := json.Marshal(plan); err == nil {
abs.PublishCommon(nil, event.User, event.Groups, event.From, ProtocolSendPlanner, b)
} else {
return err
}
} else {
return err
}
} else { // if not empty so it's
m := map[string]interface{}{}
if err := json.Unmarshal(event.Payload, &m); err == nil {
m["peer_id"] = event.From
if pl, err := json.Marshal(m); err == nil {
go tools.NewNATSCaller().SetNATSPub(tools.PLANNER_EXECUTION, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(oclib.BOOKING),
Method: int(tools.PLANNER_EXECUTION),
Payload: pl,
})
}
}
}
return nil
}
func (abs *StreamService) retrieveResponse(event *common.Event) error { //
if !abs.ResourceSearches.IsActive(event.User) {
return nil // search already closed or timed out
}
res, err := resources.ToResource(int(event.DataType), event.Payload)
if err != nil || res == nil {
return nil
}
// A response arrived — reset the idle timeout.
abs.ResourceSearches.ResetIdle(event.User)
b, err := json.Marshal(res.Serialize(res))
go tools.NewNATSCaller().SetNATSPub(tools.SEARCH_EVENT, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(event.DataType),
Method: int(tools.SEARCH_EVENT),
Payload: b,
})
return nil
}
func (abs *StreamService) pass(event *common.Event, method tools.NATSMethod) error { //
go tools.NewNATSCaller().SetNATSPub(method, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(event.DataType),
Method: int(method),
Payload: event.Payload,
})
return nil
}
func (ps *StreamService) handleEventFromPartner(evt *common.Event, protocol string) error {
switch protocol {
case ProtocolSearchResource:
m := map[string]interface{}{}
err := json.Unmarshal(evt.Payload, &m)
if err != nil {
return err
}
if search, ok := m["search"]; ok {
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
peers := access.Search(&dbs.Filters{
And: map[string][]dbs.Filter{
"peer_id": {{Operator: dbs.EQUAL.String(), Value: evt.From}},
},
}, evt.From, false)
if len(peers.Data) > 0 {
p := peers.Data[0].(*peer.Peer)
ps.SendResponse(p, evt, fmt.Sprintf("%v", search))
} else if p, err := ps.Node.GetPeerRecord(context.Background(), evt.From); err == nil && len(p) > 0 { // peer from is peerID
ps.SendResponse(p[0], evt, fmt.Sprintf("%v", search))
}
} else {
fmt.Println("SEND SEARCH_EVENT SetNATSPub", m)
go tools.NewNATSCaller().SetNATSPub(tools.SEARCH_EVENT, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(evt.DataType),
Method: int(tools.SEARCH_EVENT),
Payload: evt.Payload,
})
}
case ProtocolCreateResource, ProtocolUpdateResource:
fmt.Println("RECEIVED Protocol.Update", string(evt.Payload))
go tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(evt.DataType),
Method: int(tools.CREATE_RESOURCE),
Payload: evt.Payload,
})
case ProtocolDeleteResource:
go tools.NewNATSCaller().SetNATSPub(tools.REMOVE_RESOURCE, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(evt.DataType),
Method: int(tools.REMOVE_RESOURCE),
Payload: evt.Payload,
})
default:
return errors.New("no action authorized available : " + protocol)
}
return nil
}
func (abs *StreamService) SendResponse(p *peer.Peer, event *common.Event, search string) error {
dts := []tools.DataType{tools.DataType(event.DataType)}
if event.DataType == -1 { // expect all resources
dts = []tools.DataType{
tools.COMPUTE_RESOURCE,
tools.STORAGE_RESOURCE,
tools.PROCESSING_RESOURCE,
tools.DATA_RESOURCE,
tools.WORKFLOW_RESOURCE,
}
}
if self, err := oclib.GetMySelf(); err != nil {
return err
} else {
for _, dt := range dts {
access := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil)
searched := access.Search(abs.FilterPeer(self.GetID(), event.Groups, search), "", false)
for _, ss := range searched.Data {
if j, err := json.Marshal(ss); err == nil {
abs.PublishCommon(&dt, event.User, event.Groups, p.PeerID, ProtocolSearchResource, j)
}
}
}
}
return nil
}

View File

@@ -0,0 +1,147 @@
package stream
import (
"context"
"encoding/json"
"errors"
"fmt"
"oc-discovery/daemons/node/common"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/tools"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
func (ps *StreamService) PublishesCommon(dt *tools.DataType, user string, groups []string, filter *dbs.Filters, resource []byte, protos ...protocol.ID) error {
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
var p oclib.LibDataShallow
if filter == nil {
p = access.LoadAll(false)
} else {
p = access.Search(filter, "", false)
}
for _, pes := range p.Data {
for _, proto := range protos {
if _, err := ps.PublishCommon(dt, user, groups, pes.(*peer.Peer).PeerID, proto, resource); err != nil {
continue
}
}
}
return nil
}
func (ps *StreamService) PublishCommon(dt *tools.DataType, user string, groups []string, toPeerID string, proto protocol.ID, resource []byte) (*common.Stream, error) {
fmt.Println("PublishCommon", toPeerID)
if toPeerID == ps.Key.String() {
fmt.Println("Can't send to ourself !")
return nil, errors.New("Can't send to ourself !")
}
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
p := access.Search(&dbs.Filters{
And: map[string][]dbs.Filter{ // search by name if no filters are provided
"peer_id": {{Operator: dbs.EQUAL.String(), Value: toPeerID}},
},
}, toPeerID, false)
var pe *peer.Peer
if len(p.Data) > 0 && p.Data[0].(*peer.Peer).Relation != peer.BLACKLIST {
pe = p.Data[0].(*peer.Peer)
} else if pps, err := ps.Node.GetPeerRecord(context.Background(), toPeerID); err == nil && len(pps) > 0 {
pe = pps[0]
}
if pe != nil {
ad, err := pp.AddrInfoFromString(pe.StreamAddress)
if err != nil {
return nil, err
}
return ps.write(toPeerID, ad, dt, user, resource, proto)
}
return nil, errors.New("peer unvalid " + toPeerID)
}
func (ps *StreamService) ToPartnerPublishEvent(
ctx context.Context, action tools.PubSubAction, dt *tools.DataType, user string, groups []string, payload []byte) error {
if *dt == tools.PEER {
var p peer.Peer
if err := json.Unmarshal(payload, &p); err != nil {
return err
}
if _, err := pp.Decode(p.PeerID); err != nil {
return err
}
if pe, err := oclib.GetMySelf(); err != nil {
return err
} else if pe.GetID() == p.GetID() {
return fmt.Errorf("can't send to ourself")
} else {
pe.Relation = p.Relation
pe.Verify = false
if b2, err := json.Marshal(pe); err == nil {
if _, err := ps.PublishCommon(dt, user, groups, p.PeerID, ProtocolUpdateResource, b2); err != nil {
return err
}
}
}
return nil
}
ks := []protocol.ID{}
for k := range protocolsPartners {
ks = append(ks, k)
}
var proto protocol.ID
proto = ProtocolCreateResource
switch action {
case tools.PB_DELETE:
proto = ProtocolDeleteResource
case tools.PB_UPDATE:
proto = ProtocolUpdateResource
}
ps.PublishesCommon(dt, user, groups, &dbs.Filters{ // filter by like name, short_description, description, owner, url if no filters are provided
And: map[string][]dbs.Filter{
"relation": {{Operator: dbs.EQUAL.String(), Value: peer.PARTNER}},
},
}, payload, proto)
return nil
}
func (s *StreamService) write(
did string,
peerID *pp.AddrInfo,
dt *tools.DataType,
user string,
payload []byte,
proto protocol.ID) (*common.Stream, error) {
logger := oclib.GetLogger()
var err error
pts := map[protocol.ID]*common.ProtocolInfo{}
for k, v := range protocols {
pts[k] = v
}
for k, v := range protocolsPartners {
pts[k] = v
}
// should create a very temp stream
if s.Streams, err = common.TempStream(s.Host, *peerID, proto, did, s.Streams, pts, &s.Mu); err != nil {
fmt.Println("TempStream", err)
return nil, errors.New("no stream available for protocol " + fmt.Sprintf("%v", proto) + " from PID " + peerID.ID.String())
}
stream := s.Streams[proto][peerID.ID]
evt := common.NewEvent(string(proto), s.Host.ID().String(), dt, user, payload)
fmt.Println("SEND EVENT ", peerID, proto, evt.From, evt.DataType, evt.Timestamp)
if err := json.NewEncoder(stream.Stream).Encode(evt); err != nil {
stream.Stream.Close()
logger.Err(err)
return nil, err
}
if protocolInfo, ok := protocols[proto]; ok && protocolInfo.WaitResponse {
go s.readLoop(stream, peerID.ID, proto, &common.ProtocolInfo{PersistantStream: true})
}
return stream, nil
}

View File

@@ -0,0 +1,278 @@
package stream
import (
"context"
"encoding/json"
"errors"
"io"
"oc-discovery/conf"
"oc-discovery/daemons/node/common"
"strings"
"sync"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/utils"
"github.com/google/uuid"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
const ProtocolConsidersResource = "/opencloud/resource/considers/1.0"
const ProtocolMinioConfigResource = "/opencloud/minio/config/1.0"
const ProtocolAdmiraltyConfigResource = "/opencloud/admiralty/config/1.0"
const ProtocolSearchResource = "/opencloud/resource/search/1.0"
const ProtocolCreateResource = "/opencloud/resource/create/1.0"
const ProtocolUpdateResource = "/opencloud/resource/update/1.0"
const ProtocolDeleteResource = "/opencloud/resource/delete/1.0"
const ProtocolSendPlanner = "/opencloud/resource/planner/1.0"
const ProtocolVerifyResource = "/opencloud/resource/verify/1.0"
const ProtocolHeartbeatPartner = "/opencloud/resource/heartbeat/partner/1.0"
var protocols = map[protocol.ID]*common.ProtocolInfo{
ProtocolConsidersResource: {WaitResponse: false, TTL: 3 * time.Second},
ProtocolSendPlanner: {WaitResponse: true, TTL: 24 * time.Hour},
ProtocolSearchResource: {WaitResponse: true, TTL: 1 * time.Minute},
ProtocolVerifyResource: {WaitResponse: true, TTL: 1 * time.Minute},
ProtocolMinioConfigResource: {WaitResponse: true, TTL: 1 * time.Minute},
ProtocolAdmiraltyConfigResource: {WaitResponse: true, TTL: 1 * time.Minute},
}
var protocolsPartners = map[protocol.ID]*common.ProtocolInfo{
ProtocolCreateResource: {TTL: 3 * time.Second},
ProtocolUpdateResource: {TTL: 3 * time.Second},
ProtocolDeleteResource: {TTL: 3 * time.Second},
}
type StreamService struct {
Key pp.ID
Host host.Host
Node common.DiscoveryPeer
Streams common.ProtocolStream
maxNodesConn int
Mu sync.RWMutex
ResourceSearches *common.SearchTracker
// IsPeerKnown, when set, is called at stream open for every inbound protocol.
// Return false to reset the stream immediately. Left nil until wired by the node.
IsPeerKnown func(pid pp.ID) bool
}
func InitStream(ctx context.Context, h host.Host, key pp.ID, maxNode int, node common.DiscoveryPeer) (*StreamService, error) {
logger := oclib.GetLogger()
service := &StreamService{
Key: key,
Node: node,
Host: h,
Streams: common.ProtocolStream{},
maxNodesConn: maxNode,
ResourceSearches: common.NewSearchTracker(),
}
for proto := range protocols {
service.Host.SetStreamHandler(proto, service.gate(service.HandleResponse))
}
logger.Info().Msg("connect to partners...")
service.connectToPartners() // we set up a stream
go service.StartGC(8 * time.Second)
return service, nil
}
// gate wraps a stream handler with IsPeerKnown validation.
// If the peer is unknown the entire connection is closed and the handler is not called.
// IsPeerKnown is read at stream-open time so it works even when set after InitStream.
func (s *StreamService) gate(h func(network.Stream)) func(network.Stream) {
return func(stream network.Stream) {
if s.IsPeerKnown != nil && !s.IsPeerKnown(stream.Conn().RemotePeer()) {
logger := oclib.GetLogger()
logger.Warn().Str("peer", stream.Conn().RemotePeer().String()).Msg("[stream] unknown peer, closing connection")
stream.Conn().Close()
return
}
h(stream)
}
}
func (s *StreamService) HandleResponse(stream network.Stream) {
s.Mu.Lock()
defer s.Mu.Unlock()
stream.Protocol()
if s.Streams[stream.Protocol()] == nil {
s.Streams[stream.Protocol()] = map[pp.ID]*common.Stream{}
}
expiry := 1 * time.Minute
if protocols[stream.Protocol()] != nil {
expiry = protocols[stream.Protocol()].TTL
} else if protocolsPartners[stream.Protocol()] != nil {
expiry = protocolsPartners[stream.Protocol()].TTL
}
s.Streams[stream.Protocol()][stream.Conn().RemotePeer()] = &common.Stream{
Stream: stream,
Expiry: time.Now().UTC().Add(expiry + 1*time.Minute),
}
go s.readLoop(s.Streams[stream.Protocol()][stream.Conn().RemotePeer()],
stream.Conn().RemotePeer(),
stream.Protocol(), protocols[stream.Protocol()])
}
func (s *StreamService) connectToPartners() error {
logger := oclib.GetLogger()
// Register handlers for partner resource protocols (create/update/delete).
// Connections to partners happen on-demand via TempStream when needed.
for proto, info := range protocolsPartners {
f := func(ss network.Stream) {
if s.Streams[proto] == nil {
s.Streams[proto] = map[pp.ID]*common.Stream{}
}
s.Streams[proto][ss.Conn().RemotePeer()] = &common.Stream{
Stream: ss,
Expiry: time.Now().UTC().Add(10 * time.Second),
}
go s.readLoop(s.Streams[proto][ss.Conn().RemotePeer()], ss.Conn().RemotePeer(), proto, info)
}
logger.Info().Msg("SetStreamHandler " + string(proto))
s.Host.SetStreamHandler(proto, s.gate(f))
}
return nil
}
func (s *StreamService) searchPeer(search string) ([]*peer.Peer, error) {
ps := []*peer.Peer{}
if conf.GetConfig().PeerIDS != "" {
for _, peerID := range strings.Split(conf.GetConfig().PeerIDS, ",") {
ppID := strings.Split(peerID, "/")
ps = append(ps, &peer.Peer{
AbstractObject: utils.AbstractObject{
UUID: uuid.New().String(),
Name: ppID[1],
},
PeerID: ppID[len(ppID)-1],
StreamAddress: peerID,
Relation: peer.PARTNER,
})
}
}
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
peers := access.Search(nil, search, false)
for _, p := range peers.Data {
ps = append(ps, p.(*peer.Peer))
}
return ps, nil
}
func (ix *StreamService) Close() {
for _, s := range ix.Streams {
for _, ss := range s {
ss.Stream.Close()
}
}
}
func (s *StreamService) StartGC(interval time.Duration) {
go func() {
t := time.NewTicker(interval)
defer t.Stop()
for range t.C {
s.gc()
}
}()
}
func (s *StreamService) gc() {
s.Mu.Lock()
defer s.Mu.Unlock()
now := time.Now().UTC()
for pid, rec := range s.Streams[ProtocolHeartbeatPartner] {
if now.After(rec.Expiry) {
for _, sstreams := range s.Streams {
if sstreams[pid] != nil {
sstreams[pid].Stream.Close()
delete(sstreams, pid)
}
}
}
}
}
func (ps *StreamService) readLoop(s *common.Stream, id pp.ID, proto protocol.ID, protocolInfo *common.ProtocolInfo) {
defer s.Stream.Close()
defer func() {
ps.Mu.Lock()
defer ps.Mu.Unlock()
delete(ps.Streams[proto], id)
}()
loop := true
if !protocolInfo.PersistantStream && !protocolInfo.WaitResponse { // 2 sec is enough... to wait a response
time.AfterFunc(2*time.Second, func() {
loop = false
})
}
for {
if !loop {
break
}
var evt common.Event
if err := json.NewDecoder(s.Stream).Decode(&evt); err != nil {
// Any decode error (EOF, reset, malformed JSON) terminates the loop;
// continuing on a dead/closed stream creates an infinite spin.
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
strings.Contains(err.Error(), "reset") ||
strings.Contains(err.Error(), "closed") ||
strings.Contains(err.Error(), "too many connections") {
return
}
continue
}
ps.handleEvent(evt.Type, &evt)
if protocolInfo.WaitResponse && !protocolInfo.PersistantStream {
break
}
}
}
func (abs *StreamService) FilterPeer(peerID string, groups []string, search string) *dbs.Filters {
p, err := oclib.GetMySelf()
if err != nil {
return nil
}
groups = append(groups, "*")
filter := map[string][]dbs.Filter{
"abstractinstanciatedresource.abstractresource.abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: p.GetID()}}, // is my resource...
"": {{Operator: dbs.OR.String(), Value: &dbs.Filters{
Or: map[string][]dbs.Filter{
"abstractinstanciatedresource.abstractresource.abstractobject.access_mode": {{Operator: dbs.EQUAL.String(), Value: 1}}, // if public
"abstractinstanciatedresource.instances": {{Operator: dbs.ELEMMATCH.String(), Value: &dbs.Filters{ // or got a partners instances
And: map[string][]dbs.Filter{
"resourceinstance.partnerships": {{Operator: dbs.ELEMMATCH.String(), Value: &dbs.Filters{
And: map[string][]dbs.Filter{
"resourcepartnership.peer_groups." + peerID: {{Operator: dbs.IN.String(), Value: groups}},
},
}}},
},
}}},
},
}}},
}
if search != "" {
filter[" "] = []dbs.Filter{{Operator: dbs.OR.String(), Value: &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
"abstractinstanciatedresource.abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractinstanciatedresource.abstractresource.type": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractinstanciatedresource.abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractinstanciatedresource.abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractinstanciatedresource.abstractresource.owners.name": {{Operator: dbs.LIKE.String(), Value: search}},
},
}}}
}
return &dbs.Filters{
And: filter,
}
}

33
demo-discovery.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/bin/bash
IMAGE_BASE_NAME="oc-discovery"
DOCKERFILE_PATH="."
docker network create \
--subnet=172.40.0.0/24 \
discovery
for i in $(seq ${1:-0} ${2:-3}); do
NUM=$((i + 1))
PORT=$((4000 + $NUM))
IMAGE_NAME="${IMAGE_BASE_NAME}:${NUM}"
echo "▶ Building image ${IMAGE_NAME} with CONF_NUM=${NUM}"
docker build \
--build-arg CONF_NUM=${NUM} \
-t "${IMAGE_BASE_NAME}_${NUM}" \
${DOCKERFILE_PATH}
docker kill "${IMAGE_BASE_NAME}_${NUM}" | true
docker rm "${IMAGE_BASE_NAME}_${NUM}" | true
echo "▶ Running container ${IMAGE_NAME} on port ${PORT}:${PORT}"
docker run -d \
--network="${3:-oc}" \
-p ${PORT}:${PORT} \
--name "${IMAGE_BASE_NAME}_${NUM}" \
"${IMAGE_BASE_NAME}_${NUM}"
docker network connect --ip "172.40.0.${NUM}" discovery "${IMAGE_BASE_NAME}_${NUM}"
done

View File

@@ -1,10 +0,0 @@
{
"port": 8080,
"redisurl":"localhost:6379",
"redispassword":"",
"zincurl":"http://localhost:4080",
"zinclogin":"admin",
"zincpassword":"admin",
"identityfile":"/app/identity.json",
"defaultpeers":"/app/peers.json"
}

View File

@@ -1,10 +1,14 @@
version: '3.4'
services:
ocdiscovery:
image: 'ocdiscovery:latest'
oc-discovery:
image: 'oc-discovery:latest'
ports:
- 8088:8080
container_name: ocdiscovery
- 9002:8080
container_name: oc-discovery
networks:
- oc
networks:
oc:
external: true

View File

@@ -1,10 +0,0 @@
{
"port": 8080,
"redisurl":"localhost:6379",
"redispassword":"",
"zincurl":"http://localhost:4080",
"zinclogin":"admin",
"zincpassword":"admin",
"identityfile":"/app/identity.json",
"defaultpeers":"/app/peers.json"
}

6
docker_discovery1.json Normal file
View File

@@ -0,0 +1,6 @@
{
"MONGO_URL":"mongodb://mongo:27017/",
"MONGO_DATABASE":"DC_myDC",
"NATS_URL": "nats://nats:4222",
"NODE_MODE": "indexer"
}

8
docker_discovery2.json Normal file
View File

@@ -0,0 +1,8 @@
{
"MONGO_URL":"mongodb://mongo:27017/",
"MONGO_DATABASE":"DC_myDC",
"NATS_URL": "nats://nats:4222",
"NODE_MODE": "indexer",
"NODE_ENDPOINT_PORT": 4002,
"INDEXER_ADDRESSES": "/ip4/172.40.0.1/tcp/4001/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"
}

9
docker_discovery3.json Normal file
View File

@@ -0,0 +1,9 @@
{
"MONGO_URL":"mongodb://mongo:27017/",
"MONGO_DATABASE":"DC_myDC",
"NATS_URL": "nats://nats:4222",
"NODE_MODE": "node",
"NODE_ENDPOINT_PORT": 4003,
"NAME": "opencloud-demo-1",
"INDEXER_ADDRESSES": "/ip4/172.40.0.2/tcp/4002/p2p/12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u"
}

9
docker_discovery4.json Normal file
View File

@@ -0,0 +1,9 @@
{
"MONGO_URL":"mongodb://mongo:27017/",
"MONGO_DATABASE":"DC_myDC",
"NATS_URL": "nats://nats:4222",
"NODE_MODE": "node",
"NODE_ENDPOINT_PORT": 4004,
"NAME": "opencloud-demo-2",
"INDEXER_ADDRESSES": "/ip4/172.40.0.1/tcp/4001/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,362 @@
================================================================================
OC-DISCOVERY : ARCHITECTURE CIBLE — RÉSEAU DHT SANS NATIFS
Vision d'évolution long terme, issue d'une analyse comparative
================================================================================
Rédigé à partir de l'analyse de l'architecture actuelle et de la discussion
comparative avec Tapestry, Kademlia, EigenTrust et les systèmes de réputation
distribués.
Référence : DECENTRALIZED_SYSTEMS_COMPARISON.txt §9
================================================================================
1. MOTIVATION
================================================================================
L'architecture actuelle (node → indexer → native indexer) est robuste et bien
adaptée à une phase précoce du réseau. Ses limites à l'échelle sont :
- Pool de natives statique au démarrage → dépendance à la configuration
- Cache local des natives = point de défaillance unique (perte = pool vide)
- Consensus inter-natives bloquant (~7s) déclenché à chaque bootstrap node
- État O(N indexers) par native → croît linéairement avec le réseau
- Nœuds privilégiés structurellement → SPOFs relatifs
La cible décrite ici supprime la notion de native indexer en tant que tier
architectural. Le réseau devient plat : indexers et nodes sont des acteurs
de même nature, différenciés uniquement par leur rôle volontaire.
================================================================================
2. PRINCIPES FONDAMENTAUX
================================================================================
P1. Aucun nœud n'est structurellement privilégié.
P2. La confiance est un produit du temps et de la vérification, pas d'un arbitre.
P3. Les claims d'un acteur sont vérifiables indépendamment par tout pair.
P4. La réputation émerge du comportement collectif, pas d'un signalement central.
P5. La DHT est une infrastructure neutre — elle stocke des faits, pas des jugements.
P6. La configuration statique n'existe plus au runtime — seulement au bootstrap.
================================================================================
3. RÔLES
================================================================================
3.1 Node
--------
Consommateur du réseau. Démarre, sélectionne un pool d'indexers via DHT,
heartbeat ses indexers, accumule des scores localement. Ne publie rien en
routine. Participe aux challenges de consensus à la demande.
3.2 Indexer
-----------
Acteur volontaire. S'inscrit dans la DHT à la naissance, maintient son record,
sert le trafic des nodes (heartbeat, Publish, Get). Déclare ses métriques dans
chaque réponse heartbeat. Maintient un score agrégé depuis ses nodes connectés.
Différence avec l'actuel : l'indexer n'a plus de lien avec une native.
Il est autonome. Son existence dans le réseau est prouvée par son record DHT
et par les nodes qui le contactent directement.
3.3 Nœud DHT infrastructure (ex-native)
----------------------------------------
N'importe quel nœud suffisamment stable peut maintenir la DHT sans être un
indexer. C'est une configuration, pas un type architectural : `dht_mode: server`.
Ces nœuds maintiennent les k-buckets Kademlia et stockent les records des
indexers. Ils ne connaissent pas le trafic node↔indexer et ne l'orchestrent pas.
================================================================================
4. BOOTSTRAP D'UN NODE
================================================================================
4.1 Entrée dans le réseau
-------------------------
Le node démarre avec 1 à 3 adresses de nœuds DHT connus (bootstrap peers).
Ce sont les seules informations statiques nécessaires. Ces peers n'ont pas de
rôle sémantique — ils servent uniquement à entrer dans l'overlay DHT.
4.2 Découverte du pool d'indexers
----------------------------------
Node → DHT.FindProviders(hash("/opencloud/indexers"))
→ reçoit une liste de N candidats avec leurs records
Sélection du pool initial :
1. Filtre latence : ping < seuil → proximité réseau réelle
2. Filtre fill rate : préférer les indexers moins chargés
3. Tirage pondéré : probabilité ∝ (1 - fill_rate), courbe w(F) = F×(1-F)
indexer à 20% charge → très probable
indexer à 80% charge → peu probable
4. Filtre diversité : subnet /24 différent pour chaque entrée du pool
Aucun consensus nécessaire à cette étape. Le node démarre avec une tolérance
basse (voir §7) — il accepte des indexers imparfaits et les évalue au fil du temps.
================================================================================
5. REGISTRATION D'UN INDEXER DANS LA DHT
================================================================================
À la naissance, l'indexer publie son record DHT :
clé : hash("/opencloud/indexers") ← clé fixe, connue de tous
valeur: {
multiaddr : <adresse réseau>,
region : <subnet /24>,
capacity : <maxNodesConn>,
fill_rate : <float 0-1>, ← auto-déclaré, vérifiable
peer_count : <int>, ← auto-déclaré, vérifiable
peers : [hash(nodeID1), ...], ← liste hashée des nodes connectés
born_at : <timestamp>,
sig : <signature clé indexer>, ← non-forgeable (PSK context)
}
Le record est rafraîchi toutes les ~60s (avant expiration du TTL).
Si l'indexer tombe : TTL expire → disparaît de la DHT automatiquement.
La peer list est hashée pour la confidentialité mais reste vérifiable :
un challenger peut demander directement à un node s'il est connecté à cet indexer.
================================================================================
6. PROTOCOLE HEARTBEAT — QUESTION ET RÉPONSE
================================================================================
Le heartbeat devient bidirectionnel : le node pose des questions, l'indexer
répond avec ses déclarations courantes.
6.1 Structure
-------------
Node → Indexer :
{
ts : now,
challenge : <optionnel, voir §8>
}
Indexer → Node :
{
ts : now,
fill_rate : 0.42,
peer_count : 87,
cached_score : 0.74, ← score agrégé depuis tous ses nodes connectés
challenge_response : {...} ← si challenge présent dans la requête
}
Le heartbeat normal (sans challenge) est quasi-identique à l'actuel en poids.
Le cached_score indexer est mis à jour progressivement par les feedbacks reçus.
6.2 Le cached_score de l'indexer
---------------------------------
L'indexer agrège les scores que ses nodes connectés lui communiquent
(implicitement via le fait qu'ils restent connectés, ou explicitement lors
d'un consensus). Ce score lui donne une vision de sa propre qualité réseau.
Un node peut comparer son score local de l'indexer avec le cached_score déclaré.
Une forte divergence est un signal d'alerte.
Score local node : 0.40 ← cet indexer est médiocre pour moi
Cached score : 0.91 ← il se prétend excellent globalement
→ déclenche un challenge de vérification
================================================================================
7. MODÈLE DE CONFIANCE PROGRESSIVE
================================================================================
7.1 Cycle de vie d'un node
---------------------------
Naissance
→ tolérance basse : accepte presque n'importe quel indexer du DHT
→ switching cost faible : peu de contexte accumulé
→ minScore ≈ 20% (dynamicMinScore existant, conservé)
Quelques heures
→ uptime s'accumule sur chaque indexer connu
→ scores se stabilisent
→ seuil de remplacement qui monte progressivement
Long terme (jours)
→ pool stable, confiance élevée sur les indexers connus
→ switching coûteux mais déclenché sur déception franche
→ minScore ≈ 80% (maturité)
7.2 Modèle sous-jacent : beta distribution implicite
------------------------------------------------------
α = succès cumulés (heartbeats OK, probes OK, challenges réussis)
β = échecs cumulés (timeouts, probes échoués, challenges ratés)
confiance = α / (α + β)
Nouveau indexer : α=0, β=0 → prior neutre, tolérance basse
Après 10 jours : α élevé → confiance stable, seuil de switch élevé
Déception franche : β monte → confiance chute → switch déclenché
7.3 Ce que "décevoir" signifie
--------------------------------
Heartbeat rate → trop de timeouts → fiabilité en baisse
Bandwidth probe → chute sous déclaré → dégradation ou mensonge
Fill rate réel → supérieur au déclaré → indexer surchargé ou malhonnête
Challenge échoué → peer déclaré absent du réseau → claim invalide
Latence → dérive progressive → qualité réseau dégradée
Cached_score gonflé → divergence forte avec score local → suspicion
================================================================================
8. VÉRIFICATION DES CLAIMS — TROIS COUCHES
================================================================================
8.1 Couche 1 : passive (chaque heartbeat, 60s)
-----------------------------------------------
Mesures automatiques, zéro coût supplémentaire.
- RTT du heartbeat → latence directe
- fill_rate déclaré → tiny payload dans la réponse
- peer_count déclaré → tiny payload
- cached_score indexer → comparé au score local
8.2 Couche 2 : sampling actif (1 heartbeat sur N)
--------------------------------------------------
Vérifications périodiques, asynchrones, légères.
Tous les 5 HB (~5min) : spot-check 1 peer aléatoire (voir §8.4)
Tous les 10 HB (~10min): vérification diversité subnet (lookups DHT légers)
Tous les 15 HB (~15min): bandwidth probe (transfert réel, protocole dédié)
8.3 Couche 3 : consensus (événementiel)
-----------------------------------------
Déclenché sur : admission d'un nouvel indexer dans le pool, ou suspicion détectée.
Node sélectionne une claim vérifiable de l'indexer cible X
Node vérifie lui-même
Node demande à ses indexers de confiance : "vérifiez cette claim sur X"
Chaque indexer vérifie indépendamment
Convergence des résultats → X est honnête → admission
Divergence → X est suspect → rejet ou probation
Le consensus est léger : quelques contacts out-of-band, pas de round bloquant.
Il n'est pas continu — il est événementiel.
8.4 Vérification out-of-band (pas de DHT writes par les nodes)
----------------------------------------------------------------
Les nodes ne publient PAS de contact records continus dans la DHT.
Cela éviterait N×M records à rafraîchir (coût DHT élevé à l'échelle).
À la place, lors d'un challenge :
Challenger sélectionne 2-3 peers dans la peer list déclarée par X
→ contacte ces peers directement : "es-tu connecté à indexer X ?"
→ réponse directe (out-of-band, pas via DHT)
→ vérification sans écriture DHT
L'indexer ne peut pas faire répondre "oui" à des peers qui ne lui sont pas
connectés. La vérification est non-falsifiable et sans coût DHT.
8.5 Pourquoi X ne peut pas tricher
------------------------------------
X ne peut pas coordonner des réponses différentes vers des challengers
simultanés. Chaque challenger contacte indépendamment les mêmes peers.
Si X ment sur sa peer list :
- Challenger A contacte peer P → "non, pas connecté à X"
- Challenger B contacte peer P → "non, pas connecté à X"
- Consensus : X ment → score chute chez tous les challengers
- Effet réseau : progressivement, X perd ses connections
- Peer list DHT se vide → claims futures encore moins crédibles
================================================================================
9. EFFET RÉSEAU SANS SIGNALEMENT CENTRAL
================================================================================
Un node qui pénalise un indexer n'envoie aucun "rapport" à quiconque.
Ses actions locales produisent l'effet réseau par agrégation :
Node baisse le score de X → X reçoit moins de trafic de ce node
Node switche vers Y → X perd un client
Node refuse les challenges X → X ne peut plus participer aux consensus
Si 200 nodes font pareil :
X perd la majorité de ses connections
Sa peer list DHT se vide (peers contactés directement disent "non")
Son cached_score s'effondre (peu de nodes restent)
Les nouveaux nodes qui voient X dans la DHT obtiennent des challenges échoués
X est naturellement exclu sans aucune décision centrale
Inversement, un indexer honnête voit ses scores monter sur tous ses nodes
connectés, sa peer list se densifier, ses challenges réussis systématiquement.
Sa réputation est un produit observable et vérifiable.
================================================================================
10. RÉSUMÉ DE L'ARCHITECTURE
================================================================================
DHT → annuaire neutre, vérité des records indexers
maintenu par tout nœud stable (dht_mode: server)
Indexer → acteur volontaire, s'inscrit, maintient ses claims,
sert le trafic, accumule son propre score agrégé
Node → consommateur, score passif + sampling + consensus léger,
confiance progressive, switching adaptatif
Heartbeat → métronome 60s + vecteur de déclarations légères + challenge optionnel
Consensus → événementiel, multi-challengers indépendants,
vérification out-of-band sur claims DHT
Confiance → beta implicite, progressive, switching cost croissant avec l'âge
Réputation → émerge du comportement collectif, aucun arbitre central
Bootstrap → 1-3 peers DHT connus → seule configuration statique nécessaire
================================================================================
11. TRAJECTOIRE DE MIGRATION
================================================================================
Phase 1 (actuel)
Natives statiques, pool indexers dynamique, consensus inter-natives
→ robuste, adapté à la phase précoce
Phase 2 (intermédiaire)
Pool de natives dynamique via DHT (bootstrap + gossip)
Même protocole natif, juste la découverte devient dynamique
→ supprime la dépendance à la configuration statique des natives
→ voir DECENTRALIZED_SYSTEMS_COMPARISON.txt §9.2
Phase 3 (cible)
Architecture décrite dans ce document
Natives disparaissent en tant que tier architectural
DHT = infrastructure, indexers = acteurs autonomes
Scoring et consensus entièrement côté node
→ aucun nœud privilégié, scalabilité O(log N)
La migration Phase 2 → Phase 3 est une refonte du plan de contrôle.
Le plan de données (heartbeat node↔indexer, Publish, Get) est inchangé.
Les primitives libp2p (Kademlia DHT, GossipSub) sont déjà présentes.
================================================================================
12. PROPRIÉTÉS DU SYSTÈME CIBLE
================================================================================
Scalabilité O(log N) — routage DHT Kademlia
Résilience Pas de SPOF structurel, TTL = seule source de vérité
Confiance Progressive, vérifiable, émergente
Sybil resistance PSK — seuls les nœuds avec la clé peuvent publier
Cold start Tolérance basse initiale, montée progressive (existant)
Honnêteté Claims vérifiables out-of-band, non-falsifiables
Décentralisation Aucun nœud ne connaît l'état global complet
================================================================================

View File

@@ -0,0 +1,64 @@
@startuml
title Node Initialization — Peer A (InitNode)
participant "main (Peer A)" as MainA
participant "Node A" as NodeA
participant "libp2p (Peer A)" as libp2pA
participant "ConnectionGater A" as GaterA
participant "DB Peer A (oc-lib)" as DBA
participant "NATS A" as NATSA
participant "Indexer (shared)" as IndexerA
participant "DHT A" as DHTA
participant "StreamService A" as StreamA
participant "PubSubService A" as PubSubA
MainA -> NodeA: InitNode(isNode=true, isIndexer=false)
NodeA -> NodeA: LoadKeyFromFilePrivate() → priv
NodeA -> NodeA: LoadPSKFromFile() → psk
NodeA -> GaterA: newOCConnectionGater(nil)
NodeA -> libp2pA: New(\n PrivateNetwork(psk),\n Identity(priv),\n ListenAddr: tcp/4001,\n ConnectionGater(gater)\n)
libp2pA --> NodeA: host A (PeerID_A)
NodeA -> GaterA: gater.host = host A
note over GaterA: InterceptSecured (inbound):\n1. DB lookup by peer_id\n → BLACKLIST : refuse\n → found : accept\n2. Not found → DHT sequential check\n (transport-error fallthrough only)
NodeA -> libp2pA: SetStreamHandler(/opencloud/probe/1.0, HandleBandwidthProbe)
NodeA -> libp2pA: SetStreamHandler(/opencloud/witness/1.0, HandleWitnessQuery)
NodeA -> libp2pA: NewGossipSub(ctx, host) → ps (GossipSub)
NodeA -> NodeA: buildRecord() closure\n→ signs fresh PeerRecord (expiry=now+2min)\n embedded in each heartbeat tick
NodeA -> IndexerA: ConnectToIndexers(host, minIndexer=1, maxIndexer=5, buildRecord)
note over IndexerA: Reads IndexerAddresses from config\nAdds seeds → Indexers Directory (IsSeed=true)\nLaunches SendHeartbeat goroutine (20s ticker)
IndexerA -> DHTA: proactive DHT discovery (after 5s warmup)\ninitNodeDHT(h, seeds)\nDiscoverIndexersFromDHT → SelectByFillRate\n→ add to Indexers Directory + NudgeIt()
NodeA -> NodeA: claimInfo(name, hostname)
NodeA -> IndexerA: TempStream /opencloud/record/publish/1.0
NodeA -> IndexerA: stream.Encode(Signed PeerRecord A)
IndexerA -> DHTA: PutValue("/node/"+DID_A, record)
NodeA -> NodeA: StartGC(30s)
NodeA -> StreamA: InitStream(ctx, host, PeerID_A, 1000, nodeA)
StreamA -> StreamA: SetStreamHandler(resource/search, create, update,\n delete, planner, verify, considers)
StreamA --> NodeA: StreamService A
NodeA -> PubSubA: InitPubSub(ctx, host, ps, nodeA, streamA)
PubSubA -> PubSubA: subscribeEvents(PB_SEARCH, timeout=-1)
PubSubA --> NodeA: PubSubService A
NodeA -> NodeA: SubscribeToSearch(ps, callback)
note over NodeA: callback: if evt.From != self\n → GetPeerRecord(evt.From)\n → StreamService.SendResponse
NodeA -> NATSA: ListenNATS(nodeA)
note over NATSA: Subscribes:\nCREATE_RESOURCE → partner on-demand\nPROPALGATION_EVENT → resource propagation
NodeA --> MainA: *Node A is ready
note over NodeA,IndexerA: SendHeartbeat goroutine (permanent, 20s ticker):\nNode → Indexer : Heartbeat{name, PeerID, indexersBinded, need, challenges?, record}\nIndexer → Node : HeartbeatResponse{fillRate, challenges, suggestions, witnesses, suggestMigrate}\nScore updated (7 dimensions), pool managed autonomously
@enduml

View File

@@ -0,0 +1,38 @@
@startuml
title Node Claim — Peer A publish its PeerRecord (claimInfo + publishPeerRecord)
participant "DB Peer A (oc-lib)" as DBA
participant "Node A" as NodeA
participant "Indexer (shared)" as IndexerA
participant "DHT Kademlia" as DHT
participant "NATS A" as NATSA
NodeA -> DBA: DB(PEER).Search(SELF)
DBA --> NodeA: existing peer (DID_A) or new UUID
NodeA -> NodeA: LoadKeyFromFilePrivate() → priv A
NodeA -> NodeA: LoadKeyFromFilePublic() → pub A
NodeA -> NodeA: Build PeerRecord A {\n Name, DID, PubKey,\n PeerID: PeerID_A,\n APIUrl: hostname,\n StreamAddress: /ip4/.../tcp/4001/p2p/PeerID_A,\n NATSAddress, WalletAddress\n}
NodeA -> NodeA: priv.Sign(rec) → signature
NodeA -> NodeA: rec.ExpiryDate = now + 150s
loop For every Node Binded Indexer (Indexer A, B, ...)
NodeA -> IndexerA: TempStream /opencloud/record/publish/1.0
NodeA -> IndexerA: strea!.Encode(Signed PeerRecord A)
IndexerA -> IndexerA: Verify signature
IndexerA -> IndexerA: Check PeerID_A heartbeat stream
IndexerA -> DHT: PutValue("/node/"+DID_A, PeerRecord A)
DHT --> IndexerA: ok
end
NodeA -> NodeA: rec.ExtractPeer(DID_A, DID_A, pub A)
NodeA -> NATSA: SetNATSPub(CREATE_RESOURCE, {PEER, Peer A JSON})
NATSA -> DBA: Upsert Peer A (SearchAttr: peer_id)
DBA --> NATSA: ok
NodeA --> NodeA: *peer.Peer A (SELF)
@enduml

View File

@@ -0,0 +1,59 @@
@startuml indexer_heartbeat
title Heartbeat bidirectionnel node → indexeur (scoring 7 dimensions + challenges)
participant "Node A" as NodeA
participant "Node B" as NodeB
participant "IndexerService" as Indexer
note over NodeA,NodeB: SendHeartbeat goroutine — tick every 20s
== Tick Node A ==
NodeA -> Indexer: NewStream /opencloud/heartbeat/1.0\n(long-lived, réutilisé aux ticks suivants)
NodeA -> Indexer: stream.Encode(Heartbeat{\n name, PeerID_A, timestamp,\n indexersBinded: [addr1, addr2],\n need: maxPool - len(pool),\n challenges: [PeerID_A, PeerID_B], ← batch (tous les 1-10 HBs)\n challengeDID: "uuid-did-A", ← DHT challenge (tous les 5 batches)\n record: SignedPeerRecord_A ← expiry=now+2min\n})
Indexer -> Indexer: CheckHeartbeat(stream, maxNodes)\n→ len(Peers()) >= maxNodes → reject
Indexer -> Indexer: HandleHeartbeat → UptimeTracker.RecordHeartbeat()\n→ gap ≤ 2×interval : TotalOnline += gap
Indexer -> Indexer: Republish PeerRecord A to DHT\nDHT.PutValue("/node/"+DID_A, record_A)
== Réponse indexeur → node A ==
Indexer -> Indexer: BuildHeartbeatResponse(remotePeer=A, need, challenges, challengeDID)\n\nfillRate = connected_nodes / MaxNodesConn()\npeerCount = connected_nodes\nmaxNodes = MaxNodesConn()\nbornAt = time of indexer startup\n\nChallenges: pour chaque PeerID challengé\n found = PeerID dans StreamRecords[ProtocolHeartbeat]?\n lastSeen = HeartbeatStream.UptimeTracker.LastSeen\n\nDHT challenge:\n DHT.GetValue("/node/"+challengeDID, timeout=3s)\n → dhtFound + dhtPayload\n\nWitnesses: jusqu'à 3 AddrInfos de nœuds connectés\n (adresses connues dans Peerstore)\n\nSuggestions: jusqu'à `need` indexeurs depuis dhtCache\n (refresh asynchrone 2min, SelectByFillRate)\n\nSuggestMigrate: fillRate > 80%\n ET node dans offload.inBatch (batch ≤ 5, grace 3×HB)
Indexer --> NodeA: stream.Encode(HeartbeatResponse{\n fillRate, peerCount, maxNodes, bornAt,\n challenges, dhtFound, dhtPayload,\n witnesses, suggestions, suggestMigrate\n})
== Traitement score côté Node A ==
NodeA -> NodeA: score = ensureScore(Indexers, addr_indexer)\nscore.UptimeTracker.RecordHeartbeat()\n\nlatencyScore = max(0, 1 - RTT / (BaseRoundTrip × 10))\n\nBornAt stability:\n bornAt changed? → score.bornAtChanges++\n\nfillConsistency:\n expected = peerCount / maxNodes\n |expected - fillRate| < 10% → fillConsistent++\n\nChallenge PeerID (ground truth own PeerID):\n found=true AND lastSeen < 2×interval → challengeCorrect++\n\nDHT challenge:\n dhtFound=true → dhtSuccess++\n\nWitness query (async):\n go queryWitnesses(h, indexerID, bornAt, fillRate, witnesses, score)
NodeA -> NodeA: score.Score = ComputeNodeSideScore(latencyScore)\n\nScore = (\n 0.20 × uptimeRatio\n+ 0.20 × challengeAccuracy\n+ 0.15 × latencyScore\n+ 0.10 × fillScore ← 1 - fillRate\n+ 0.10 × fillConsistency\n+ 0.15 × witnessConsistency\n+ 0.10 × dhtSuccessRate\n) × 100 × bornAtPenalty\n\nbornAtPenalty = max(0, 1 - 0.30 × bornAtChanges)\nminScore = clamp(20 + 60 × (age.Hours/24), 20, 80)
alt score < minScore\n AND TotalOnline ≥ 2×interval\n AND !IsSeed\n AND len(pool) > 1
NodeA -> NodeA: evictPeer(dir, addr, id, proto)\n→ delete Addr + Score + Stream\ngo TriggerConsensus(h, voters, need)\n ou replenishIndexersFromDHT(h, need)
end
alt resp.SuggestMigrate == true AND nonSeedCount >= MinIndexer
alt IsSeed
NodeA -> NodeA: score.IsSeed = false\n(de-stickied — score eviction maintenant possible)
else !IsSeed
NodeA -> NodeA: evictPeer → migration acceptée
end
end
alt len(resp.Suggestions) > 0
NodeA -> NodeA: handleSuggestions(dir, indexerID, suggestions)\n→ inconnus ajoutés à Indexers Directory\n→ NudgeIt() si ajout effectif
end
== Tick Node B (concurrent) ==
NodeB -> Indexer: stream.Encode(Heartbeat{PeerID_B, ...})
Indexer -> Indexer: CheckHeartbeat → UptimeTracker → BuildHeartbeatResponse
Indexer --> NodeB: HeartbeatResponse{...}
== GC côté Indexeur ==
note over Indexer: GC ticker 30s — gc()\nnow.After(Expiry) où Expiry = lastHBTime + 2min\n→ AfterDelete(pid, name, did) hors lock\n→ publishNameEvent(NameIndexDelete, ...)\nFillRate recalculé automatiquement
@enduml

View File

@@ -0,0 +1,47 @@
@startuml
title Indexer — Peer A publishing, Peer B publishing (handleNodePublish → DHT)
participant "Node A" as NodeA
participant "Node B" as NodeB
participant "IndexerService (shared)" as Indexer
participant "DHT Kademlia" as DHT
note over NodeA: Start after claimInfo or refresh TTL
par Peer A publish its PeerRecord
NodeA -> Indexer: TempStream /opencloud/record/publish/1.0
NodeA -> Indexer: stream.Encode(PeerRecord A {DID_A, PeerID_A, PubKey_A, Expiry, Sig_A})
Indexer -> Indexer: Verify sig_A (reconstruit rec minimal, pubKey_A.Verify)
Indexer -> Indexer: Check StreamRecords[Heartbeat][PeerID_A] existe
alt A active Heartbeat
Indexer -> Indexer: StreamRecord A → DID_A, Record=PeerRecord A, LastSeen=now
Indexer -> DHT: PutValue("/node/"+DID_A, PeerRecord A JSON)
Indexer -> DHT: PutValue("/name/"+name_A, DID_A)
Indexer -> DHT: PutValue("/peer/"+peer_id_A, DID_A)
DHT --> Indexer: ok
else Pas de heartbeat
Indexer -> NodeA: (erreur "no heartbeat", stream close)
end
else Peer B publish its PeerRecord
NodeB -> Indexer: TempStream /opencloud/record/publish/1.0
NodeB -> Indexer: stream.Encode(PeerRecord B {DID_B, PeerID_B, PubKey_B, Expiry, Sig_B})
Indexer -> Indexer: Verify sig_B
Indexer -> Indexer: Check StreamRecords[Heartbeat][PeerID_B] existe
alt B Active Heartbeat
Indexer -> Indexer: StreamRecord B → DID_B, Record=PeerRecord B, LastSeen=now
Indexer -> DHT: PutValue("/node/"+DID_B, PeerRecord B JSON)
Indexer -> DHT: PutValue("/name/"+name_B, DID_B)
Indexer -> DHT: PutValue("/peer/"+peer_id_B, DID_B)
DHT --> Indexer: ok
else Pas de heartbeat
Indexer -> NodeB: (erreur "no heartbeat", stream close)
end
end par
note over DHT: DHT got \n"/node/DID_A" et "/node/DID_B"
@enduml

View File

@@ -0,0 +1,51 @@
@startuml
title Indexer — Peer A discover Peer B (GetPeerRecord + handleNodeGet)
participant "NATS A" as NATSA
participant "DB Pair A (oc-lib)" as DBA
participant "Node A" as NodeA
participant "IndexerService (partagé)" as Indexer
participant "DHT Kademlia" as DHT
participant "NATS A (retour)" as NATSA2
note over NodeA: Trigger : NATS PB_SEARCH PEER\nor callback SubscribeToSearch
NodeA -> DBA: (PEER).Search(DID_B or PeerID_B)
DBA --> NodeA: Local Peer B (if known) → solve DID_B + PeerID_B\nor use search value
loop For every Peer A Binded Indexer
NodeA -> Indexer: TempStream /opencloud/record/get/1.0 -> streamAI
NodeA -> Indexer: streamAI.Encode(GetValue{Key: DID_B, PeerID: PeerID_B})
Indexer -> Indexer: key = "/node/" + DID_B
Indexer -> DHT: SearchValue(ctx 10s, "/node/"+DID_B)
DHT --> Indexer: channel de bytes (PeerRecord B)
loop Pour every results in DHT
Indexer -> Indexer: read → PeerRecord B
alt PeerRecord.PeerID == PeerID_B
Indexer -> Indexer: resp.Found=true, resp.Records[PeerID_B]=PeerRecord B
Indexer -> Indexer: StreamRecord B.LastSeen = now (if active heartbeat)
end
end
Indexer -> NodeA: streamAI.Encode(GetResponse{Found:true, Records:{PeerID_B: PeerRecord B}})
end
loop For every PeerRecord founded
NodeA -> NodeA: rec.Verify() → valid B signature
NodeA -> NodeA: rec.ExtractPeer(ourDID_A, DID_B, pubKey_B)
alt ourDID_A == DID_B (it's our proper entry)
note over NodeA: Republish to refresh TTL
NodeA -> Indexer: publishPeerRecord(rec) [refresh 2 min]
end
NodeA -> NATSA2: SetNATSPub(CREATE_RESOURCE, {PEER, Peer B JSON,\nSearchAttr:"peer_id"})
NATSA2 -> DBA: Upsert Peer B in DB A
DBA --> NATSA2: ok
end
NodeA --> NodeA: []*peer.Peer → [Peer B]
@enduml

View File

@@ -0,0 +1,49 @@
@startuml native_registration
title Native Indexer — Indexer Subscription (StartNativeRegistration)
participant "Indexer A" as IndexerA
participant "Indexer B" as IndexerB
participant "Native Indexer" as Native
participant "DHT Kademlia" as DHT
participant "GossipSub (oc-indexer-registry)" as PubSub
note over IndexerA,IndexerB: At start + every 60s (RecommendedHeartbeatInterval)\\nStartNativeRegistration → RegisterWithNative
par Indexer A subscribe
IndexerA -> IndexerA: fillRateFn()\\n= len(StreamRecords[HB]) / maxNodes
IndexerA -> IndexerA: Build IndexerRegistration{\\n PeerID_A, Addr_A,\\n Timestamp=now.UnixNano(),\\n FillRate=fillRateFn(),\\n PubKey, Signature\\n}\\nreg.Sign(h)
IndexerA -> Native: NewStream /opencloud/native/subscribe/1.0
IndexerA -> Native: stream.Encode(IndexerRegistration A)
Native -> Native: reg.Verify() — verify signature
Native -> Native: liveIndexerEntry{\\n PeerID_A, Addr_A,\\n ExpiresAt = now + IndexerTTL (90s),\\n FillRate = reg.FillRate,\\n PubKey, Signature\\n}
Native -> Native: liveIndexers[PeerID_A] = entry A
Native -> Native: knownPeerIDs[PeerID_A] = Addr_A
Native -> DHT: PutValue("/indexer/"+PeerID_A, entry A)
DHT --> Native: ok
Native -> PubSub: topic.Publish([]byte(PeerID_A))
note over PubSub: Gossip to other Natives\\n→ it adds PeerID_A to knownPeerIDs\\n→ refresh DHT next tick (30s)
IndexerA -> Native: stream.Close()
else Indexer B subscribe
IndexerB -> IndexerB: fillRateFn() + reg.Sign(h)
IndexerB -> Native: NewStream /opencloud/native/subscribe/1.0
IndexerB -> Native: stream.Encode(IndexerRegistration B)
Native -> Native: reg.Verify() + liveIndexerEntry{FillRate=reg.FillRate, ExpiresAt=now+90s}
Native -> Native: liveIndexers[PeerID_B] = entry B
Native -> DHT: PutValue("/indexer/"+PeerID_B, entry B)
Native -> PubSub: topic.Publish([]byte(PeerID_B))
IndexerB -> Native: stream.Close()
end par
note over Native: liveIndexers = {PeerID_A: {FillRate:0.3}, PeerID_B: {FillRate:0.6}}\\nTTL 90s — IndexerTTL
note over Native: Explicit unsubcrive on stop :\\nUnregisterFromNative → /opencloud/native/unsubscribe/1.0\\nNative close all now.
@enduml

View File

@@ -0,0 +1,70 @@
@startuml native_get_consensus
title Native — ConnectToNatives : fetch pool + Phase 1 + Phase 2
participant "Node / Indexer\\n(appelant)" as Caller
participant "Native A" as NA
participant "Native B" as NB
participant "Indexer A\\n(stable voter)" as IA
note over Caller: NativeIndexerAddresses configured\\nConnectToNatives() called from ConnectToIndexers
== Step 1 : heartbeat to the native mesh (nativeHeartbeatOnce) ==
Caller -> NA: SendHeartbeat /opencloud/heartbeat/1.0
Caller -> NB: SendHeartbeat /opencloud/heartbeat/1.0
== Step 2 : parrallel fetch pool (timeout 6s) ==
par fetchIndexersFromNative — parallel
Caller -> NA: NewStream /opencloud/native/indexers/1.0\\nGetIndexersRequest{Count: maxIndexer, From: PeerID}
NA -> NA: reachableLiveIndexers()\\ntri par w(F) = fillRate×(1fillRate) desc
NA --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.3,IB:0.6}}
else
Caller -> NB: NewStream /opencloud/native/indexers/1.0
NB -> NB: reachableLiveIndexers()
NB --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.3,IB:0.6}}
end par
note over Caller: Fusion → candidates=[IA,IB]\\nisFallback=false
alt isFallback=true (native give themself as Fallback indexer)
note over Caller: resolvePool : avoid consensus\\nadmittedAt = Now (zero)\\nStaticIndexers = {native_addr}
else isFallback=false → Phase 1 + Phase 2
== Phase 1 — clientSideConsensus (timeout 3s/natif, 4s total) ==
par Parralel Consensus
Caller -> NA: NewStream /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IB]}
NA -> NA: compare with clean liveIndexers
NA --> Caller: ConsensusResponse{Trusted:[IA,IB], Suggestions:[]}
else
Caller -> NB: NewStream /opencloud/native/consensus/1.0
NB --> Caller: ConsensusResponse{Trusted:[IA], Suggestions:[IC]}
end par
note over Caller: IA → 2/2 votes → confirmed ✓\\nIB → 1/2 vote → refusé ✗\\nIC → suggestion → round 2 if confirmed < maxIndexer
alt confirmed < maxIndexer && available suggestions
note over Caller: Round 2 — rechallenge with confirmed + sample(suggestions)\\nclientSideConsensus([IA, IC])
end
note over Caller: admittedAt = time.Now()
== Phase 2 — indexerLivenessVote (timeout 3s/votant, 4s total) ==
note over Caller: Search for stable voters in Subscribed Indexers\\nAdmittedAt != zero && age >= MinStableAge (2min)
alt Stable Voters are available
par Phase 2 parrallel
Caller -> IA: NewStream /opencloud/indexer/consensus/1.0\\nIndexerConsensusRequest{Candidates:[IA]}
IA -> IA: StreamRecords[ProtocolHB][candidate]\\ntime.Since(LastSeen) <= 120s && LastScore >= 30.0
IA --> Caller: IndexerConsensusResponse{Alive:[IA]}
end par
note over Caller: alive IA confirmed per quorum > 0.5\\npool = {IA}
else No voters are stable (startup)
note over Caller: Phase 1 keep directly\\n(no indexer reaches MinStableAge)
end
== Replacement pool ==
Caller -> Caller: replaceStaticIndexers(pool, admittedAt)\\nStaticIndexerMeta[IA].AdmittedAt = admittedAt
end
== Étape 3 : heartbeat to indexers pool (ConnectToIndexers) ==
Caller -> Caller: SendHeartbeat /opencloud/heartbeat/1.0\\nvers StaticIndexers
@enduml

View File

@@ -0,0 +1,38 @@
@startuml
title NATS — CREATE_RESOURCE : Peer A crée/met à jour Peer B (connexion on-demand)
participant "App Peer A (oc-api)" as AppA
participant "NATS A" as NATSA
participant "Node A" as NodeA
participant "StreamService A" as StreamA
participant "Node B" as NodeB
participant "DB Peer A (oc-lib)" as DBA
note over AppA: Peer B est découvert\n(via indexeur ou manuellement)
AppA -> NATSA: Publish(CREATE_RESOURCE, {\n FromApp:"oc-api",\n Datatype:PEER,\n Payload: Peer B {StreamAddress_B, Relation:PARTNER}\n})
NATSA -> NodeA: ListenNATS callback → CREATE_RESOURCE
NodeA -> NodeA: json.Unmarshal(payload) → peer.Peer B
NodeA -> NodeA: if peer == self ? → skip
alt peer B.Relation == PARTNER
NodeA -> StreamA: ToPartnerPublishEvent(ctx, PB_CREATE, PEER, payload)
note over StreamA: Pas de heartbeat permanent.\nConnexion on-demand : ouvre un stream,\nenvoie l'événement, ferme ou laisse expirer.
StreamA -> StreamA: PublishCommon(PEER, user, B.PeerID,\n ProtocolUpdateResource, selfPeerJSON)
StreamA -> NodeB: TempStream /opencloud/resource/update/1.0\n(TTL court, fermé après envoi)
StreamA -> NodeB: stream.Encode(Event{from, datatype, payload})
NodeB --> StreamA: (traitement applicatif)
else peer B.Relation != PARTNER (révocation / blacklist)
note over NodeA: Ferme tous les streams existants vers Peer B
loop Pour chaque stream actif vers PeerID_B
NodeA -> StreamA: streams[proto][PeerID_B].Stream.Close()
NodeA -> StreamA: delete(streams[proto], PeerID_B)
end
end
NodeA -> DBA: (pas d'écriture directe — seule l'app source gère la DB)
@enduml

View File

@@ -0,0 +1,73 @@
@startuml
title NATS — PROPALGATION_EVENT : Peer A propalgate to Peer B lookup
participant "App Pair A" as AppA
participant "NATS A" as NATSA
participant "Node A" as NodeA
participant "StreamService A" as StreamA
participant "Node Partner B" as PeerB
participant "Node C" as PeerC
participant "NATS B" as NATSB
participant "DB Pair B (oc-lib)" as DBB
note over App: only our proper resource (db data) can be propalgate : creator_id==self
AppA -> NATSA: Publish(PROPALGATION_EVENT, {Action, DataType, Payload})
NATSA -> NodeA: ListenNATS callback → PROPALGATION_EVENT
NodeA -> NodeA: propalgate from himself ? → no, continue
NodeA -> NodeA: json.Unmarshal → PropalgationMessage{Action, DataType, Payload}
alt Action == PB_DELETE
NodeA -> StreamA: ToPartnerPublishEvent(PB_DELETE, dt, user, payload)
StreamA -> StreamA: searchPeer(PARTNER) → [Peer Partner B, ...]
StreamA -> NodeB: write(PeerID_B, addr_B, dt, user, payload, ProtocolDeleteResource)
note over NodeB: /opencloud/resource/delete/1.0
NodeB -> NodeB: handleEventFromPartner(evt, ProtocolDeleteResource)
NodeB -> NATSB: SetNATSPub(REMOVE_RESOURCE, {DataType, resource JSON})
NATSB -> DBB: Suppress ressource into DB B
else Action == PB_UPDATE (per ProtocolUpdateResource)
NodeA -> StreamA: ToPartnerPublishEvent(PB_UPDATE, dt, user, payload)
StreamA -> StreamA: searchPeer(PARTNER) → [Peer Partner B, ...]
StreamA -> NodeB: write → /opencloud/resource/update/1.0
NodeB -> NATSB: SetNATSPub(CREATE_RESOURCE, {DataType, resource JSON})
NATSB -> DBB: Upsert ressource dans DB B
else Action == PB_CREATE (per ProtocolCreateResource)
NodeA -> StreamA: ToPartnerPublishEvent(PB_UPDATE, dt, user, payload)
StreamA -> StreamA: searchPeer(PARTNER) → [Peer Partner B, ...]
StreamA -> NodeB: write → /opencloud/resource/create/1.0
NodeB -> NATSB: SetNATSPub(CREATE_RESOURCE, {DataType, resource JSON})
NATSB -> DBB: Create ressource dans DB B
else Action == PB_CONSIDERS (is a considering a previous action, such as planning or creating resource)
NodeA -> NodeA: Unmarshal → executionConsidersPayload{PeerIDs:[PeerID_B, ...]}
loop For every peer_id targeted
NodeA -> StreamA: PublishCommon(dt, user, PeerID_B, ProtocolConsidersResource, payload)
StreamA -> NodeB: write → /opencloud/resource/considers/1.0
NodeB -> NodeB: passConsidering(evt)
NodeB -> NATSB: SetNATSPub(PROPALGATION_EVENT, {PB_CONSIDERS, dt, payload})
NATSB -> DBB: (treat per emmitters app of a previous action on NATS B)
end
else Action == PB_CLOSE_PLANNER
NodeA -> NodeA: Unmarshal → {peer_id: PeerID_B}
NodeA -> StreamA: Streams[ProtocolSendPlanner][PeerID_B].Stream.Close()
NodeA -> StreamA: delete(Streams[ProtocolSendPlanner], PeerID_B)
else Action == PB_SEARCH + DataType == PEER
NodeA -> NodeA: read → {search: "..."}
NodeA -> NodeA: GetPeerRecord(ctx, search)
note over NodeA: Resolved per DB A or Indexer + DHT
NodeA -> NATSA: SetNATSPub(SEARCH_EVENT, {PEER, PeerRecord JSON})
NATSA -> NATSA: (AppA retrieve results)
else Action == PB_SEARCH + other DataType
NodeA -> NodeA: read → {type:"all"|"known"|"partner", search:"..."}
NodeA -> NodeA: PubSubService.SearchPublishEvent(ctx, dt, type, user, search)
note over NodeA: Watch after pubsub_search & stream_search diagrams
end
@enduml

View File

@@ -0,0 +1,58 @@
@startuml
title PubSub — Gossip Global search (type "all") : Peer A searching, Peer B answering
participant "App UI A" as UIA
participant "App Peer A" as AppA
participant "NATS A" as NATSA
participant "Node A" as NodeA
participant "StreamService A" as StreamA
participant "PubSubService A" as PubSubA
participant "GossipSub libp2p (mesh)" as GossipSub
participant "Node B" as NodeB
participant "PubSubService B" as PubSubB
participant "DB Peer B (oc-lib)" as DBB
participant "StreamService B" as StreamB
UIA -> AppA: websocket subscription, sending {type:"all", search:"search"} in query
AppA -> NATSA: Publish(PROPALGATION_EVENT, {PB_SEARCH, type:"all", search:"search"})
NATSA -> NodeA: ListenNATS → PB_SEARCH (type "all")
NodeA -> PubSubA: SearchPublishEvent(ctx, dt, "all", user, "search")
PubSubA -> PubSubA: publishEvent(PB_SEARCH, user, {search:"search"})
PubSubA -> PubSubA: priv_A.Sign(event body) → sig
PubSubA -> PubSubA: Build Event{Type:"search", From:DID_A, Payload:{search:"search"}, Sig}
PubSubA -> GossipSub: topic.Join("search")
PubSubA -> GossipSub: topic.Publish(ctx, json(Event))
GossipSub --> NodeB: Propalgate message (gossip mesh)
NodeB -> PubSubB: subscribeEvents listen to topic "search#"
PubSubB -> PubSubB: read → Event{From: DID_A}
PubSubB -> NodeB: GetPeerRecord(ctx, DID_A)
note over NodeB: Resolve Peer A per DB B or ask to Indexer
NodeB --> PubSubB: Peer A {PublicKey_A, Relation, ...}
PubSubB -> PubSubB: event.Verify(Peer A) → valid sig_A
PubSubB -> PubSubB: handleEventSearch(ctx, evt, PB_SEARCH)
PubSubB -> StreamB: SendResponse(Peer A, evt)
StreamB -> DBB: Search(COMPUTE + STORAGE + ..., filters{creator=self, access=PUBLIC OR partnerships[PeerID_A]}, search="search")
DBB --> StreamB: [Resource1, Resource2, ...]
loop For every matching resource, only match our own resource creator_id=self_did
StreamB -> StreamB: write(PeerID_A, addr_A, dt, resource JSON, ProtocolSearchResource)
StreamB -> StreamA: NewStream /opencloud/resource/search/1.0
StreamB -> StreamA: stream.Encode(Event{Type:search, From:DID_B, DataType, Payload:resource})
end
StreamA -> StreamA: readLoop → handleEvent(ProtocolSearchResource, evt)
StreamA -> StreamA: retrieveResponse(evt)
StreamA -> NATSA: SetNATSPub(SEARCH_EVENT, {DataType, resource JSON})
NATSA -> AppA: Search results from Peer B
AppA -> UIA: emit on websocket
@enduml

View File

@@ -0,0 +1,54 @@
@startuml
title Stream — Direct search (type "known"/"partner") : Peer A → Peer B
participant "App UI A" as UIA
participant "App Pair A" as AppA
participant "NATS A" as NATSA
participant "Node A" as NodeA
participant "PubSubService A" as PubSubA
participant "StreamService A" as StreamA
participant "DB Pair A (oc-lib)" as DBA
participant "Node B" as NodeB
participant "StreamService B" as StreamB
participant "DB Pair B (oc-lib)" as DBB
UIA -> AppA: websocket subscription, sending {type:"all", search:"search"} in query
AppA -> NATSA: Publish(PROPALGATION_EVENT, {PB_SEARCH, type:"partner", search:"gpu"})
NATSA -> NodeA: ListenNATS → PB_SEARCH (type "partner")
NodeA -> PubSubA: SearchPublishEvent(ctx, dt, "partner", user, "gpu")
PubSubA -> StreamA: SearchPartnersPublishEvent(dt, user, "gpu")
StreamA -> DBA: Search(PEER, PARTNER) + PeerIDS config
DBA --> StreamA: [Peer B, ...]
loop Pour chaque pair partenaire (Pair B)
StreamA -> StreamA: write(PeerID_B, addr_B, dt, user, payload, ProtocolSearchResource)
StreamA -> NodeB: TempStream /opencloud/resource/search/1.0
StreamA -> NodeB: stream.Encode(Event{Type:search, From:DID_A, DataType, Payload:{search:"gpu"}})
NodeB -> StreamB: HandleResponse(stream) → readLoop
StreamB -> StreamB: handleEvent(ProtocolSearchResource, evt)
StreamB -> StreamB: handleEventFromPartner(evt, ProtocolSearchResource)
alt evt.DataType == -1 (toutes ressources)
StreamB -> DBA: Search(PEER, evt.From=DID_A)
note over StreamB: Local Resolving (DB) or GetPeerRecord (Indexer Way)
StreamB -> StreamB: SendResponse(Peer A, evt)
StreamB -> DBB: Search(ALL_RESOURCES, filter{creator=B + public OR partner A + search:"gpu"})
DBB --> StreamB: [Resource1, Resource2, ...]
else evt.DataType specified
StreamB -> DBB: Search(DataType, filter{creator=B + access + search:"gpu"})
DBB --> StreamB: [Resource1, ...]
end
loop Pour chaque ressource
StreamB -> StreamA: write(PeerID_A, addr_A, dt, resource JSON, ProtocolSearchResource)
StreamA -> StreamA: readLoop → handleEvent(ProtocolSearchResource, evt)
StreamA -> StreamA: retrieveResponse(evt)
StreamA -> NATSA: SetNATSPub(SEARCH_EVENT, {DataType, resource JSON})
NATSA -> AppA: Peer B results
AppA -> UIA: emit on websocket
end
end
@enduml

View File

@@ -0,0 +1,60 @@
@startuml
title Stream — Partner Heartbeat et propagation CRUD Pair A ↔ Pair B
participant "DB Pair A (oc-lib)" as DBA
participant "StreamService A" as StreamA
participant "Node A" as NodeA
participant "Node B" as NodeB
participant "StreamService B" as StreamB
participant "NATS B" as NATSB
participant "DB Pair B (oc-lib)" as DBB
participant "NATS A" as NATSA
note over StreamA: Démarrage → connectToPartners()
StreamA -> DBA: Search(PEER, PARTNER) + PeerIDS config
DBA --> StreamA: [Peer B, ...]
StreamA -> NodeB: Connect (libp2p)
StreamA -> NodeB: NewStream /opencloud/resource/heartbeat/partner/1.0
StreamA -> NodeB: json.Encode(Heartbeat{Name_A, DID_A, PeerID_A, IndexersBinded_A})
NodeB -> StreamB: HandlePartnerHeartbeat(stream)
StreamB -> StreamB: CheckHeartbeat → bandwidth challenge
StreamB -> StreamA: Echo(payload)
StreamB -> StreamB: streams[ProtocolHeartbeatPartner][PeerID_A] = {DID_A, Expiry=now+10s}
StreamA -> StreamA: streams[ProtocolHeartbeatPartner][PeerID_B] = {DID_B, Expiry=now+10s}
note over StreamA,StreamB: Stream partner long-lived établi\nGC toutes les 8s (StreamService A)\nGC toutes les 30s (StreamService B)
note over NATSA: Pair A reçoit PROPALGATION_EVENT{PB_DELETE, dt:"storage", payload:res}
NATSA -> NodeA: ListenNATS → ToPartnerPublishEvent(PB_DELETE, dt, user, payload)
NodeA -> StreamA: ToPartnerPublishEvent(ctx, PB_DELETE, dt_storage, user, payload)
alt dt == PEER (mise à jour relation partenaire)
StreamA -> StreamA: json.Unmarshal → peer.Peer B updated
alt B.Relation == PARTNER
StreamA -> NodeB: ConnectToPartner(B.StreamAddress)
note over StreamA,NodeB: Reconnexion heartbeat si relation upgrade
else B.Relation != PARTNER
loop Tous les protocoles
StreamA -> StreamA: delete(streams[proto][PeerID_B])
StreamA -> NodeB: (streams fermés)
end
end
else dt != PEER (ressource ordinaire)
StreamA -> DBA: Search(PEER, PARTNER) → [Pair B, ...]
loop Pour chaque protocole partner (Create/Update/Delete)
StreamA -> NodeB: write(PeerID_B, addr_B, dt, user, payload, ProtocolDeleteResource)
note over NodeB: /opencloud/resource/delete/1.0
NodeB -> StreamB: HandleResponse → readLoop
StreamB -> StreamB: handleEventFromPartner(evt, ProtocolDeleteResource)
StreamB -> NATSB: SetNATSPub(REMOVE_RESOURCE, {DataType, resource JSON})
NATSB -> DBB: Supprimer ressource dans DB B
end
end
@enduml

View File

@@ -0,0 +1,51 @@
@startuml
title Stream — Session Planner : Pair A demande le plan de Pair B
participant "App Pair A (oc-booking)" as AppA
participant "NATS A" as NATSA
participant "Node A" as NodeA
participant "StreamService A" as StreamA
participant "Node B" as NodeB
participant "StreamService B" as StreamB
participant "DB Pair B (oc-lib)" as DBB
participant "NATS B" as NATSB
' Ouverture session planner
AppA -> NATSA: Publish(PROPALGATION_EVENT, {PB_PLANNER, peer_id:PeerID_B, payload:{}})
NATSA -> NodeA: ListenNATS → PB_PLANNER
NodeA -> NodeA: Unmarshal → {peer_id: PeerID_B, payload: {}}
NodeA -> StreamA: PublishCommon(nil, user, PeerID_B, ProtocolSendPlanner, {})
note over StreamA: WaitResponse=true, TTL=24h\nStream long-lived vers Pair B
StreamA -> NodeB: TempStream /opencloud/resource/planner/1.0
StreamA -> NodeB: json.Encode(Event{Type:planner, From:DID_A, Payload:{}})
NodeB -> StreamB: HandleResponse → readLoop(ProtocolSendPlanner)
StreamB -> StreamB: handleEvent(ProtocolSendPlanner, evt)
StreamB -> StreamB: sendPlanner(evt)
alt evt.Payload vide (requête initiale)
StreamB -> DBB: planner.GenerateShallow(AdminRequest)
DBB --> StreamB: plan (shallow booking plan de Pair B)
StreamB -> StreamA: PublishCommon(nil, user, DID_A, ProtocolSendPlanner, planJSON)
StreamA -> NodeA: json.Encode(Event{plan de B})
NodeA -> NATSA: (forwardé à AppA via SEARCH_EVENT ou PLANNER event)
NATSA -> AppA: Plan de Pair B
else evt.Payload non vide (mise à jour planner)
StreamB -> StreamB: m["peer_id"] = evt.From (DID_A)
StreamB -> NATSB: SetNATSPub(PROPALGATION_EVENT, {PB_PLANNER, peer_id:DID_A, payload:plan})
NATSB -> DBB: (oc-booking traite le plan sur NATS B)
end
' Fermeture session planner
AppA -> NATSA: Publish(PROPALGATION_EVENT, {PB_CLOSE_PLANNER, peer_id:PeerID_B})
NATSA -> NodeA: ListenNATS → PB_CLOSE_PLANNER
NodeA -> NodeA: Unmarshal → {peer_id: PeerID_B}
NodeA -> StreamA: Mu.Lock()
NodeA -> StreamA: Streams[ProtocolSendPlanner][PeerID_B].Stream.Close()
NodeA -> StreamA: delete(Streams[ProtocolSendPlanner], PeerID_B)
NodeA -> StreamA: Mu.Unlock()
note over StreamA,NodeB: Stream planner fermé — session terminée
@enduml

View File

@@ -0,0 +1,61 @@
@startuml
title Native Indexer — Boucles background (offload, DHT refresh, GC streams)
participant "Indexer A (enregistré)" as IndexerA
participant "Indexer B (enregistré)" as IndexerB
participant "Native Indexer" as Native
participant "DHT Kademlia" as DHT
participant "Node A (responsible peer)" as NodeA
note over Native: runOffloadLoop — toutes les 30s
loop Toutes les 30s
Native -> Native: len(responsiblePeers) > 0 ?
note over Native: responsiblePeers = peers pour lesquels\nle native a fait selfDelegate (aucun indexer dispo)
alt Des responsible peers existent (ex: Node A)
Native -> Native: reachableLiveIndexers()
note over Native: Filtre liveIndexers par TTL\nping PeerIsAlive pour chaque candidat
alt Indexers A et B maintenant joignables
Native -> Native: responsiblePeers = {} (libère Node A et autres)
note over Native: Node A se reconnectera\nau prochain ConnectToNatives
else Toujours aucun indexer
note over Native: Node A reste sous la responsabilité du native
end
end
end
note over Native: refreshIndexersFromDHT — toutes les 30s
loop Toutes les 30s
Native -> Native: Collecter tous les knownPeerIDs\n= {PeerID_A, PeerID_B, ...}
loop Pour chaque PeerID connu
Native -> Native: liveIndexers[PeerID] encore frais ?
alt Entrée manquante ou expirée
Native -> DHT: SearchValue(ctx 5s, "/indexer/"+PeerID)
DHT --> Native: channel de bytes
loop Pour chaque résultat DHT
Native -> Native: Unmarshal → liveIndexerEntry
Native -> Native: Garder le meilleur (ExpiresAt le plus récent, valide)
end
Native -> Native: liveIndexers[PeerID] = best entry
note over Native: "native: refreshed indexer from DHT"
end
end
end
note over Native: LongLivedStreamRecordedService GC — toutes les 30s
loop Toutes les 30s
Native -> Native: gc() — lock StreamRecords[Heartbeat]
loop Pour chaque StreamRecord (Indexer A, B, ...)
Native -> Native: now > rec.Expiry ?\nOU timeSince(LastSeen) > 2×TTL restant ?
alt Pair périmé (ex: Indexer B disparu)
Native -> Native: Supprimer Indexer B de TOUS les maps de protocoles
note over Native: Stream heartbeat fermé\nliveIndexers[PeerID_B] expirera naturellement
end
end
end
note over IndexerA: Indexer A continue à heartbeater normalement\net reste dans StreamRecords + liveIndexers
@enduml

View File

@@ -0,0 +1,49 @@
@startuml 15_archi_config_nominale
skinparam componentStyle rectangle
skinparam backgroundColor white
skinparam defaultTextAlignment center
title C1 — Topologie nominale\n2 natifs · 2 indexeurs · 2 nœuds
package "Couche 1 — Mesh natif" #E8F4FD {
component "Native A\n(hub autoritaire)" as NA #AED6F1
component "Native B\n(hub autoritaire)" as NB #AED6F1
NA <--> NB : heartbeat /opencloud/heartbeat/1.0 (20s)\n+ gossip PubSub oc-indexer-registry
}
package "Couche 2 — Indexeurs" #E9F7EF {
component "Indexer A\n(DHT server)" as IA #A9DFBF
component "Indexer B\n(DHT server)" as IB #A9DFBF
}
package "Couche 3 — Nœuds" #FEFBD8 {
component "Node 1" as N1 #FAF0BE
component "Node 2" as N2 #FAF0BE
}
' Enregistrements (one-shot, 60s)
IA -[#117A65]--> NA : subscribe signé (60s)\n/opencloud/native/subscribe/1.0
IA -[#117A65]--> NB : subscribe signé (60s)
IB -[#117A65]--> NA : subscribe signé (60s)
IB -[#117A65]--> NB : subscribe signé (60s)
' Heartbeats indexeurs → natifs (long-lived, 20s)
IA -[#27AE60]..> NA : heartbeat (20s)
IA -[#27AE60]..> NB : heartbeat (20s)
IB -[#27AE60]..> NA : heartbeat (20s)
IB -[#27AE60]..> NB : heartbeat (20s)
' Heartbeats nœuds → indexeurs (long-lived, 20s)
N1 -[#E67E22]--> IA : heartbeat long-lived (20s)\n/opencloud/heartbeat/1.0
N1 -[#E67E22]--> IB : heartbeat long-lived (20s)
N2 -[#E67E22]--> IA : heartbeat long-lived (20s)
N2 -[#E67E22]--> IB : heartbeat long-lived (20s)
note as Legend
Légende :
──► enregistrement one-shot (signé)
···► heartbeat long-lived (20s)
──► heartbeat nœud → indexeur (20s)
end note
@enduml

View File

@@ -0,0 +1,38 @@
@startuml 16_archi_config_seed
skinparam componentStyle rectangle
skinparam backgroundColor white
skinparam defaultTextAlignment center
title C2 — Mode seed (sans natif)\nIndexerAddresses seuls · AdmittedAt = zero
package "Couche 2 — Indexeurs seeds" #E9F7EF {
component "Indexer A\n(seed, AdmittedAt=0)" as IA #A9DFBF
component "Indexer B\n(seed, AdmittedAt=0)" as IB #A9DFBF
}
package "Couche 3 — Nœuds" #FEFBD8 {
component "Node 1" as N1 #FAF0BE
component "Node 2" as N2 #FAF0BE
}
note as NNative #FFDDDD
Aucun natif configuré.
AdmittedAt = zero → IsStableVoter() = false
Phase 2 sans votants : Phase 1 conservée directement.
Risque D20 : circularité du trust (seeds se valident mutuellement).
end note
' Heartbeats nœuds → indexeurs seeds
N1 -[#E67E22]--> IA : heartbeat long-lived (20s)
N1 -[#E67E22]--> IB : heartbeat long-lived (20s)
N2 -[#E67E22]--> IA : heartbeat long-lived (20s)
N2 -[#E67E22]--> IB : heartbeat long-lived (20s)
note bottom of IA
Après 2s : goroutine async
fetchNativeFromIndexers → ?
Si natif trouvé → ConnectToNatives (upgrade vers C1)
Si non → mode indexeur pur (D20 actif)
end note
@enduml

View File

@@ -0,0 +1,63 @@
@startuml 17_startup_consensus_phase1_phase2
title Démarrage avec natifs — Phase 1 (admission) + Phase 2 (vivacité)
participant "Node / Indexer\n(appelant)" as Caller
participant "Native A" as NA
participant "Native B" as NB
participant "Indexer A" as IA
participant "Indexer B" as IB
note over Caller: ConnectToNatives()\nNativeIndexerAddresses configuré
== Étape 0 : heartbeat vers le mesh natif ==
Caller -> NA: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
Caller -> NB: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
== Étape 1 : fetch pool en parallèle ==
par Fetch parallèle (timeout 6s)
Caller -> NA: GET /opencloud/native/indexers/1.0\nGetIndexersRequest{Count: max, FillRates demandés}
NA -> NA: reachableLiveIndexers()\ntri par w(F) = fillRate×(1fillRate)
NA --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.3, IB:0.6}}
else
Caller -> NB: GET /opencloud/native/indexers/1.0
NB -> NB: reachableLiveIndexers()
NB --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.3, IB:0.6}}
end par
note over Caller: Fusion + dédup → candidates = [IA, IB]\nisFallback = false
== Étape 2a : Phase 1 — Admission native (clientSideConsensus) ==
par Consensus parallèle (timeout 3s par natif, 4s total)
Caller -> NA: /opencloud/native/consensus/1.0\nConsensusRequest{Candidates:[IA,IB]}
NA -> NA: croiser avec liveIndexers
NA --> Caller: ConsensusResponse{Trusted:[IA,IB], Suggestions:[]}
else
Caller -> NB: /opencloud/native/consensus/1.0\nConsensusRequest{Candidates:[IA,IB]}
NB -> NB: croiser avec liveIndexers
NB --> Caller: ConsensusResponse{Trusted:[IA], Suggestions:[IC]}
end par
note over Caller: IA → 2/2 votes → confirmé ✓\nIB → 1/2 vote → refusé ✗\nadmittedAt = time.Now()
== Étape 2b : Phase 2 — Liveness vote (indexerLivenessVote) ==
note over Caller: Cherche votants stables dans StaticIndexerMeta\n(AdmittedAt != zero, age >= MinStableAge=2min)
alt Votants stables disponibles
par Phase 2 parallèle (timeout 3s)
Caller -> IA: /opencloud/indexer/consensus/1.0\nIndexerConsensusRequest{Candidates:[IA]}
IA -> IA: vérifier StreamRecords[ProtocolHB][candidate]\nLastSeen ≤ 2×60s && LastScore ≥ 30
IA --> Caller: IndexerConsensusResponse{Alive:[IA]}
end par
note over Caller: IA confirmé vivant par quorum > 0.5
else Aucun votant stable (premier démarrage)
note over Caller: Phase 1 conservée directement\n(aucun votant MinStableAge atteint)
end
== Étape 3 : remplacement StaticIndexers ==
Caller -> Caller: replaceStaticIndexers(pool={IA}, admittedAt)\nStaticIndexerMeta[IA].AdmittedAt = time.Now()
== Étape 4 : heartbeat long-lived vers pool ==
Caller -> IA: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
note over Caller: Pool actif. NudgeIndexerHeartbeat()
@enduml

View File

@@ -0,0 +1,51 @@
@startuml 18_startup_seed_discovers_native
title C2 → C1 — Seed découvre un natif (upgrade async)
participant "Node / Indexer\\n(seed mode)" as Caller
participant "Indexer A\\n(seed)" as IA
participant "Indexer B\\n(seed)" as IB
participant "Native A\\n(découvert)" as NA
note over Caller: Démarrage sans NativeIndexerAddresses\\nStaticIndexers = [IA, IB] (AdmittedAt=0)
== Phase initiale seed ==
Caller -> IA: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
Caller -> IB: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
note over Caller: Pool actif en mode seed.\\nIsStableVoter() = false (AdmittedAt=0)\\nPhase 2 sans votants → Phase 1 conservée.
== Goroutine async après 2s ==
note over Caller: time.Sleep(2s)\\nfetchNativeFromIndexers()
Caller -> IA: GET /opencloud/indexer/natives/1.0
IA --> Caller: GetNativesResponse{Natives:[NA]}
note over Caller: Natif découvert : NA\\nAppel ConnectToNatives([NA])
== Upgrade vers mode nominal (ConnectToNatives) ==
Caller -> NA: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
par Fetch pool depuis natif (timeout 6s)
Caller -> NA: GET /opencloud/native/indexers/1.0\\nGetIndexersRequest{Count: max}
NA -> NA: reachableLiveIndexers()\\ntri par w(F) = fillRate×(1fillRate)
NA --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.4, IB:0.6}}
end par
note over Caller: candidates = [IA, IB], isFallback = false
par Consensus Phase 1 (timeout 3s)
Caller -> NA: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IB]}
NA -> NA: croiser avec liveIndexers
NA --> Caller: ConsensusResponse{Trusted:[IA,IB], Suggestions:[]}
end par
note over Caller: IA ✓ IB ✓ (1/1 vote)\\nadmittedAt = time.Now()
note over Caller: Aucun votant stable (AdmittedAt vient d'être posé)\\nPhase 2 sautée → Phase 1 conservée directement
== Remplacement pool ==
Caller -> Caller: replaceStaticIndexers(pool={IA,IB}, admittedAt)\\nStaticIndexerMeta[IA].AdmittedAt = time.Now()\\nStaticIndexerMeta[IB].AdmittedAt = time.Now()
note over Caller: Pool upgradé dans la map partagée StaticIndexers.\\nLa goroutine heartbeat existante (démarrée en mode seed)\\ndétecte les nouveaux membres sur le prochain tick (20s).\\nAucune nouvelle goroutine créée.\\nIsStableVoter() deviendra true après MinStableAge (2min).\\nD20 (circularité seeds) éliminé.
@enduml

View File

@@ -0,0 +1,55 @@
@startuml failure_indexer_crash
title Indexer Failure → replenish from a Native
participant "Node" as N
participant "Indexer A (alive)" as IA
participant "Indexer B (crashed)" as IB
participant "Native A" as NA
participant "Native B" as NB
note over N: Active Pool : Indexers = [IA, IB]\\nActive Heartbeat long-lived from IA & IB
== IB Failure ==
IB ->x N: heartbeat fails (sendHeartbeat err)
note over N: doTick() dans SendHeartbeat triggers failure\\n→ delete(Indexers[IB])\\n→ delete(IndexerMeta[IB])\\nUnique heartbeat goroutine continue
N -> N: go replenishIndexersFromNative(need=1)
note over N: Reduced Pool to 1 indexers.\\nReplenish triggers with goroutine.
== Replenish from natives ==
par Fetch pool (timeout 6s)
N -> NA: GET /opencloud/native/indexers/1.0\\nGetIndexersRequest{Count: max}
NA -> NA: reachableLiveIndexers()\\n(IB absent because of a expired heartbeat)
NA --> N: GetIndexersResponse{Indexers:[IA,IC], FillRates:{IA:0.4,IC:0.2}}
else
N -> NB: GET /opencloud/native/indexers/1.0
NB --> N: GetIndexersResponse{Indexers:[IA,IC]}
end par
note over N: Fusion + duplication → candidates = [IA, IC]\\n(IA already in pool → IC new candidate)
par Consensus Phase 1 (timeout 4s)
N -> NA: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IC]}
NA --> N: ConsensusResponse{Trusted:[IA,IC]}
else
N -> NB: /opencloud/native/consensus/1.0
NB --> N: ConsensusResponse{Trusted:[IA,IC]}
end par
note over N: IC → 2/2 votes → admit\\nadmittedAt = time.Now()
par Phase 2 — liveness vote (if stable voters )
N -> IA: /opencloud/indexer/consensus/1.0\\nIndexerConsensusRequest{Candidates:[IC]}
IA -> IA: StreamRecords[ProtocolHB][IC]\\nLastSeen ≤ 120s && LastScore ≥ 30
IA --> N: IndexerConsensusResponse{Alive:[IC]}
end par
note over N: IC confirmed alive → add to pool
N -> N: replaceStaticIndexers(pool={IA,IC})
N -> IC: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine long-live)
note over N: Pool restaured to 2 indexers.
@enduml

View File

@@ -0,0 +1,51 @@
@startuml failure_indexers_native_falback
title indexers failures → native IsSelfFallback
participant "Node" as N
participant "Indexer A (crashed)" as IA
participant "Indexer B (crashed)" as IB
participant "Native A" as NA
participant "Native B" as NB
note over N: Active Pool : Indexers = [IA, IB]
== Successive Failures on IA & IB ==
IA ->x N: heartbeat failure (sendHeartbeat err)
IB ->x N: heartbeat failure (sendHeartbeat err)
note over N: doTick() in SendHeartbeat triggers failures\\n→ delete(StaticIndexers[IA]), delete(StaticIndexers[IB])\\n→ delete(StaticIndexerMeta[IA/IB])\\n unique heartbeat goroutine continue.
N -> N: go replenishIndexersFromNative(need=2)
== Replenish attempt — natives switches to self-fallback mode ==
par Fetch from natives (timeout 6s)
N -> NA: GET /opencloud/native/indexers/1.0
NA -> NA: reachableLiveIndexers() → 0 alive indexer\\nFallback : included as himself(IsSelfFallback=true)
NA --> N: GetIndexersResponse{Indexers:[NA_addr], IsSelfFallback:true}
else
N -> NB: GET /opencloud/native/indexers/1.0
NB --> N: GetIndexersResponse{Indexers:[NB_addr], IsSelfFallback:true}
end par
note over N: isFallback=true → resolvePool avoids consensus\\nadmittedAt = time.Time{} (zero)\\nStaticIndexers = {NA_addr} (native as fallback)
N -> NA: SendHeartbeat /opencloud/heartbeat/1.0\\n(native as temporary fallback indexers)
note over NA: responsiblePeers[N] registered.\\nrunOffloadLoop look after real indexers.
== Reprise IA → runOffloadLoop native side ==
IA -> NA: /opencloud/native/subscribe/1.0\\nIndexerRegistration{FillRate: 0}
note over NA: liveIndexers[IA] updated.\\nrunOffloadLoop triggers a real available indexer\\migrate from N to IA.
== Replenish on next heartbeat tick ==
N -> NA: GET /opencloud/native/indexers/1.0
NA --> N: GetIndexersResponse{Indexers:[IA], IsSelfFallback:false}
note over N: isFallback=false → Classic Phase 1 + Phase 2
N -> N: replaceStaticIndexers(pool={IA}, admittedAt)
N -> IA: SendHeartbeat /opencloud/heartbeat/1.0
note over N: Pool restaured. Native self extracted as indexer.
@enduml

View File

@@ -0,0 +1,46 @@
@startuml failure_native_one_down
title Native failure, with one still alive
participant "Indexer A" as IA
participant "Indexer B" as IB
participant "Native A (crashed)" as NA
participant "Native B (alive)" as NB
participant "Node" as N
note over IA, NB: Native State : IA, IB heartbeats to NA & NB
== Native A Failure ==
NA ->x IA: stream reset
NA ->x IB: stream reset
NA ->x N: stream reset (heartbeat Node → NA)
== Indexers side : replenishNativesFromPeers ==
note over IA: SendHeartbeat(NA) détecte reset\\nAfterDelete(NA)\\nStaticNatives = [NB] (still 1)
IA -> IA: replenishNativesFromPeers()\\nphase 1 : fetchNativeFromNatives
IA -> NB: GET /opencloud/native/peers/1.0
NB --> IA: GetPeersResponse{Peers:[NC]} /' new native if one known '/
alt NC disponible
IA -> NC: SendHeartbeat /opencloud/heartbeat/1.0\\nSubscribe /opencloud/native/subscribe/1.0
note over IA: StaticNatives = [NB, NC]\\nNative Pool restored.
else Aucun peer natif
IA -> IA: fetchNativeFromIndexers()\\nAsk to any indexers their natives
IB --> IA: GetNativesResponse{Natives:[]} /' IB also only got NB '/
note over IA: Impossible to find a 2e native.\\nStaticNatives = [NB] (degraded but alive).
end
== Node side : alive indexers pool ==
note over N: Node heartbeats to IA & IB.\\nNA Failure does not affect indexers pool.\\nFuture Consensus did not use NB (1/1 vote = quorum OK).
N -> NB: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IB]}
NB --> N: ConsensusResponse{Trusted:[IA,IB]}
note over N: Consensus 1/1 alive natif → admit.\\nAuto downgrade of the consensus floor (alive majority).
== NB side : heartbeat to NA fails ==
note over NB: EnsureNativePeers / SendHeartbeat to NA\\nfail (sendHeartbeat err)\\n→ delete(StaticNatives[NA])\\nreplenishNativesFromPeers(NA) triggers
note over NB: Mesh natif downgraded to NB alone.\\Downgraded but functionnal.
@enduml

View File

@@ -0,0 +1,60 @@
@startuml 22_failure_both_natives
title F4 — Panne des 2 natifs → fallback pool pré-validé
participant "Node" as N
participant "Indexer A\\n(vivant)" as IA
participant "Indexer B\\n(vivant)" as IB
participant "Native A\\n(crashé)" as NA
participant "Native B\\n(crashé)" as NB
note over N: Pool actif : StaticIndexers = [IA, IB]\\nStaticNatives = [NA, NB]\\nAdmittedAt[IA] et AdmittedAt[IB] posés (stables)
== Panne simultanée NA et NB ==
NA ->x N: stream reset
NB ->x N: stream reset
N -> N: AfterDelete(NA) + AfterDelete(NB)\\nStaticNatives = {} (vide)
== replenishNativesFromPeers (sans résultat) ==
N -> N: fetchNativeFromNatives() → aucun natif vivant
N -> IA: GET /opencloud/indexer/natives/1.0
IA --> N: GetNativesResponse{Natives:[NA,NB]}
note over N: NA et NB connus mais non joignables.\\nAucun nouveau natif trouvé.
== Fallback : pool d'indexeurs conservé ==
note over N: isFallback = true\\nStaticIndexers conservé tel quel [IA, IB]\\n(dernier pool validé avec AdmittedAt != zero)\\nRisque D19 atténué : quorum natif = 0 → fallback accepté
note over N: Heartbeats IA et IB continuent normalement.\\nPool d'indexeurs opérationnel sans natifs.
N -> IA: SendHeartbeat /opencloud/heartbeat/1.0 (continue)
N -> IB: SendHeartbeat /opencloud/heartbeat/1.0 (continue)
== retryLostNative (30s ticker) ==
loop toutes les 30s
N -> N: retryLostNative()\\ntente reconnexion NA et NB
N -> NA: dial (échec)
N -> NB: dial (échec)
note over N: Retry sans résultat.\\nPool indexeurs maintenu en fallback.
end
== Reprise natifs ==
NA -> NA: redémarrage
NB -> NB: redémarrage
N -> NA: dial (succès)
N -> NA: SendHeartbeat /opencloud/heartbeat/1.0
N -> NB: SendHeartbeat /opencloud/heartbeat/1.0
note over N: StaticNatives = [NA, NB] restauré\\nisFallback = false
== Re-consensus pool indexeurs (optionnel) ==
par Consensus Phase 1
N -> NA: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IB]}
NA --> N: ConsensusResponse{Trusted:[IA,IB]}
else
N -> NB: /opencloud/native/consensus/1.0
NB --> N: ConsensusResponse{Trusted:[IA,IB]}
end par
note over N: Pool [IA,IB] reconfirmé.\\nisFallback = false. AdmittedAt[IA,IB] rafraîchi.
@enduml

View File

@@ -0,0 +1,63 @@
@startuml 23_failure_native_plus_indexer
title F5 — Panne combinée : 1 natif + 1 indexeur
participant "Node" as N
participant "Indexer A\\n(vivant)" as IA
participant "Indexer B\\n(crashé)" as IB
participant "Native A\\n(vivant)" as NA
participant "Native B\\n(crashé)" as NB
note over N: Pool nominal : StaticIndexers=[IA,IB], StaticNatives=[NA,NB]
== Pannes simultanées NB + IB ==
NB ->x N: stream reset
IB ->x N: stream reset
N -> N: AfterDelete(NB) — StaticNatives = [NA]
N -> N: AfterDelete(IB) — StaticIndexers = [IA]
== Replenish natif (1 vivant) ==
N -> N: replenishNativesFromPeers()
N -> NA: GET /opencloud/native/peers/1.0
NA --> N: GetPeersResponse{Peers:[]} /' NB seul pair, disparu '/
note over N: Aucun natif alternatif.\\nStaticNatives = [NA] — dégradé.
== Replenish indexeur depuis NA ==
par Fetch pool (timeout 6s)
N -> NA: GET /opencloud/native/indexers/1.0
NA -> NA: reachableLiveIndexers()\\n(IB absent — heartbeat expiré)
NA --> N: GetIndexersResponse{Indexers:[IA,IC], FillRates:{IA:0.5,IC:0.3}}
end par
note over N: candidates = [IA, IC]
par Consensus Phase 1 — 1 seul natif vivant (timeout 3s)
N -> NA: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IC]}
NA --> N: ConsensusResponse{Trusted:[IA,IC]}
end par
note over N: IC → 1/1 vote → admis (quorum sur vivants)\\nadmittedAt = time.Now()
par Phase 2 liveness vote
N -> IA: /opencloud/indexer/consensus/1.0\\nIndexerConsensusRequest{Candidates:[IC]}
IA -> IA: StreamRecords[ProtocolHB][IC]\\nLastSeen ≤ 120s && LastScore ≥ 30
IA --> N: IndexerConsensusResponse{Alive:[IC]}
end par
N -> N: replaceStaticIndexers(pool={IA,IC})
N -> IC: SendHeartbeat /opencloud/heartbeat/1.0
note over N: Pool restauré à [IA,IC].\\nMode dégradé : 1 natif seulement.\\nretryLostNative(NB) actif (30s ticker).
== retryLostNative pour NB ==
loop toutes les 30s
N -> NB: dial (échec)
end
NB -> NB: redémarrage
NB -> NA: heartbeat (mesh natif reconstruit)
N -> NB: dial (succès)
N -> NB: SendHeartbeat /opencloud/heartbeat/1.0
note over N: StaticNatives = [NA,NB] restauré.\\nMode nominal retrouvé.
@enduml

View File

@@ -0,0 +1,45 @@
@startuml 24_failure_retry_lost_native
title F6 — retryLostNative : reconnexion natif après panne réseau
participant "Node / Indexer" as Caller
participant "Native A\\n(vivant)" as NA
participant "Native B\\n(réseau instable)" as NB
note over Caller: StaticNatives = [NA, NB]\\nHeartbeats actifs vers NA et NB
== Panne réseau transitoire vers NB ==
NB ->x Caller: stream reset (timeout réseau)
Caller -> Caller: AfterDelete(NB)\\nStaticNatives = [NA]\\nlostNatives.Store(NB.addr)
== replenishNativesFromPeers — phase 1 ==
Caller -> NA: GET /opencloud/native/peers/1.0
NA --> Caller: GetPeersResponse{Peers:[NB]}
note over Caller: NB connu de NA, tentative de reconnexion directe
Caller -> NB: dial (échec — réseau toujours coupé)
note over Caller: Connexion impossible.\\nPassage en retryLostNative()
== retryLostNative : ticker 30s ==
loop toutes les 30s tant que NB absent
Caller -> Caller: retryLostNative()\\nParcourt lostNatives
Caller -> NB: StartNativeRegistration (dial + heartbeat + subscribe)
NB --> Caller: dial échoue
note over Caller: Retry loggé. Prochain essai dans 30s.
end
== Réseau rétabli ==
note over NB: Réseau rétabli\\nNB de nouveau joignable
Caller -> NB: StartNativeRegistration\\ndial (succès)
Caller -> NB: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
Caller -> NB: /opencloud/native/subscribe/1.0\\nIndexerRegistration{FillRate: fillRateFn()}
NB --> Caller: subscribe ack
Caller -> Caller: lostNatives.Delete(NB.addr)\\nStaticNatives = [NA, NB] restauré
note over Caller: Mode nominal retrouvé.\\nnativeHeartbeatOnce non utilisé (goroutine déjà active pour NA).\\nNouvelle goroutine SendHeartbeat pour NB uniquement.
@enduml

View File

@@ -0,0 +1,35 @@
@startuml 25_failure_node_gc
title F7 — Crash nœud → GC indexeur + AfterDelete
participant "Node\n(crashé)" as N
participant "Indexer A" as IA
participant "Indexer B" as IB
note over N, IB: État nominal : N heartbeatait vers IA et IB
== Crash Node ==
N ->x IA: stream reset (heartbeat coupé)
N ->x IB: stream reset (heartbeat coupé)
== GC côté Indexer A ==
note over IA: HandleHeartbeat : stream reset détecté\nStreamRecords[ProtocolHeartbeat][N].Expiry figé
loop ticker GC (30s) — StartGC(30*time.Second)
IA -> IA: gc()\nnow.After(Expiry) où Expiry = lastHBTime + 2min\n→ si 2min sans heartbeat → éviction
IA -> IA: delete(StreamRecords[ProtocolHeartbeat][N])\nAfterDelete(N, name, did) appelé hors lock
note over IA: N retiré du registre vivant.\nFillRate recalculé : (n-1) / MaxNodesConn()
end
== Impact fill rate ==
note over IA: FillRate diminue.\nProchain BuildHeartbeatResponse\ninclura FillRate mis à jour.\nSi fillRate revient < 80% :\n→ offload.inBatch et alreadyTried réinitialisés.
== GC côté Indexer B ==
note over IB: Même GC effectué.\nN retiré de StreamRecords[ProtocolHeartbeat].
== Reconnexion éventuelle du nœud ==
N -> N: redémarrage
N -> IA: SendHeartbeat /opencloud/heartbeat/1.0\nHeartbeat{name, PeerID_N, IndexersBinded, need, record}
IA -> IA: HandleHeartbeat → UptimeTracker(FirstSeen=now)\nStreamRecords[ProtocolHeartbeat][N] recréé\nRepublish PeerRecord N dans DHT
note over IA: N de retour avec FirstSeen frais.\ndynamicMinScore élevé tant que age < 24h.\n(phase de grâce : 2 ticks avant scoring)
@enduml

88
docs/diagrams/README.md Normal file
View File

@@ -0,0 +1,88 @@
# OC-Discovery — Diagrammes d'architecture et de séquence
Tous les fichiers sont au format [PlantUML](https://plantuml.com/).
Rendu possible via VS Code (extension PlantUML), IntelliJ, ou [plantuml.com/plantuml](https://www.plantuml.com/plantuml/uml/).
> **Note :** Les diagrammes 06, 07, 12, 1424 et plusieurs protocoles ci-dessous
> concernaient l'architecture à 3 niveaux (node → indexer → native indexer),
> supprimée dans la branche `feature/no_native_consortium`. Ces fichiers sont
> conservés à titre historique. Les diagrammes actifs sont indiqués ci-dessous.
## Diagrammes actifs (architecture 2 niveaux)
### Séquences principales
| Fichier | Description |
|---------|-------------|
| `01_node_init.puml` | Initialisation d'un Node : libp2p host + PSK + ConnectionGater + ConnectToIndexers + SendHeartbeat + DHT proactive |
| `02_node_claim.puml` | Enregistrement du nœud : `claimInfo` + `publishPeerRecord` → indexeurs → DHT |
| `03_indexer_heartbeat.puml` | Protocole heartbeat bidirectionnel : challenges PeerID + DHT + witness, scoring 7 dimensions, suggestions, SuggestMigrate |
| `04_indexer_publish.puml` | Publication d'un `PeerRecord` vers l'indexeur → DHT (PutValue /node, /name, /pid) |
| `05_indexer_get.puml` | Résolution d'un pair : `GetPeerRecord` → indexeur → DHT si absent local |
| `08_nats_create_resource.puml` | Handler NATS `CREATE_RESOURCE` : propagation partenaires on-demand |
| `09_nats_propagation.puml` | Handler NATS `PROPALGATION_EVENT` : delete, considers, planner, search |
| `10_pubsub_search.puml` | Recherche gossip globale (GossipSub /opencloud/search/1.0) |
| `11_stream_search.puml` | Recherche directe par stream (type `"known"` ou `"partner"`) |
| `13_planner_flow.puml` | Session planner (ouverture, échange, fermeture) |
### Résilience et pool management
| Fichier | Description |
|---------|-------------|
| `hb_failure_evict.puml` | HeartbeatFailure → evictPeer → TriggerConsensus ou DHT replenish |
| `hb_last_indexer.puml` | Protection last-indexer → reconnectToSeeds → retryUntilSeedResponds |
| `dht_discovery.puml` | Découverte proactive DHT : Provide/FindProviders, SelectByFillRate, dhtCache |
| `connection_gater.puml` | ConnectionGater : DB blacklist → DHT sequential check (transport-error fallthrough) |
## Diagrammes historiques (architecture 3 niveaux — obsolètes)
Ces fichiers documentent l'ancienne architecture. Ils ne correspondent plus
au code en production.
| Fichier | Description |
|---------|-------------|
| `06_native_registration.puml` | Enregistrement d'un indexeur auprès du Native (supprimé) |
| `07_native_get_consensus.puml` | `ConnectToNatives` : fetch pool + Phase 1 + Phase 2 (supprimé) |
| `12_partner_heartbeat.puml` | Heartbeat partner permanent (supprimé — connexions on-demand) |
| `14_native_offload_gc.puml` | Boucles background Native Indexer (supprimé) |
| `15_archi_config_nominale.puml` | Topologie nominale avec natifs (obsolète) |
| `16_archi_config_seed.puml` | Mode seed sans natif (obsolète) |
| `17_startup_consensus_phase1_phase2.puml` | Démarrage avec consensus natifs (supprimé) |
| `18_startup_seed_discovers_native.puml` | Upgrade seed → native (supprimé) |
| `19_failure_indexer_crash.puml` | F1 — replenish depuis natif (supprimé) |
| `20_failure_both_indexers_selfdelegate.puml` | F2 — IsSelfFallback native (supprimé) |
| `21_failure_native_one_down.puml` | F3 — panne 1 natif (supprimé) |
| `22_failure_both_natives.puml` | F4 — panne 2 natifs (supprimé) |
| `23_failure_native_plus_indexer.puml` | F5 — panne combinée natif + indexeur (supprimé) |
| `24_failure_retry_lost_native.puml` | F6 — retryLostNative (supprimé) |
| `25_failure_node_gc.puml` | F7 — GC nœud côté indexeur (toujours valide) |
## Protocoles libp2p actifs
| Protocole | Description |
|-----------|-------------|
| `/opencloud/heartbeat/1.0` | Heartbeat bidirectionnel node→indexeur (long-lived) |
| `/opencloud/probe/1.0` | Sonde de bande passante (echo, mesure latence + débit) |
| `/opencloud/witness/1.0` | Requête témoin : "quel est ton score de l'indexeur X ?" |
| `/opencloud/record/publish/1.0` | Publication `PeerRecord` vers indexeur |
| `/opencloud/record/get/1.0` | Requête `GetPeerRecord` vers indexeur |
| `/opencloud/resource/search/1.0` | Recherche de ressources entre peers |
| `/opencloud/resource/create/1.0` | Propagation création ressource → partner |
| `/opencloud/resource/update/1.0` | Propagation mise à jour ressource → partner |
| `/opencloud/resource/delete/1.0` | Propagation suppression ressource → partner |
| `/opencloud/resource/planner/1.0` | Session planner (booking) |
| `/opencloud/resource/verify/1.0` | Vérification signature ressource |
| `/opencloud/resource/considers/1.0` | Transmission d'un considers d'exécution |
## Protocoles supprimés (architecture native)
| Protocole | Raison |
|-----------|--------|
| `/opencloud/native/subscribe/1.0` | Tier native supprimé |
| `/opencloud/native/unsubscribe/1.0` | Tier native supprimé |
| `/opencloud/native/indexers/1.0` | Remplacé par DHT FindProviders |
| `/opencloud/native/consensus/1.0` | Remplacé par TriggerConsensus léger |
| `/opencloud/native/peers/1.0` | Tier native supprimé |
| `/opencloud/indexer/natives/1.0` | Tier native supprimé |
| `/opencloud/indexer/consensus/1.0` | Remplacé par TriggerConsensus |
| `/opencloud/resource/heartbeat/partner/1.0` | Heartbeat partner supprimé — on-demand |

View File

@@ -0,0 +1,69 @@
@startuml connection_gater
title ConnectionGater — Vérification à l'admission (InterceptSecured)
participant "Remote Peer\n(inbound)" as Remote
participant "libp2p\nhost A" as Host
participant "OCConnectionGater" as Gater
participant "DB (oc-lib)" as DB
participant "Indexer X\n(joignable)" as IX
participant "Indexer Y\n(injoignable)" as IY
Remote -> Host: inbound connection (post-PSK, post-TLS)
Host -> Gater: InterceptSecured(dir=Inbound, id=RemotePeerID, conn)
alt dir == Outbound
Gater --> Host: true (outbound toujours autorisé)
end
== Étape 1 : Vérification base de données ==
Gater -> DB: NewRequestAdmin(PEER).Search(\n Filter: peer_id = RemotePeerID\n)
DB --> Gater: []peer.Peer
alt trouvé AND relation == BLACKLIST
Gater --> Host: false (refusé — blacklisté)
Host ->x Remote: connexion fermée
end
alt trouvé AND relation != BLACKLIST
Gater --> Host: true (connu et non blacklisté)
end
== Étape 2 : Vérification DHT (peer inconnu en DB) ==
note over Gater: Peer inconnu → vérifier qu'il existe\ndans le réseau DHT
Gater -> Gater: getReq = GetValue{PeerID: RemotePeerID}
loop Pour chaque indexeur (ordre aléatoire — Shuffle)
alt Indexer IY injoignable (transport error)
Gater -> IY: h.Connect(ctxTTL, IY_AddrInfo)
IY -->x Gater: connexion échouée
note over Gater: reachable=false\n→ essaie le suivant
end
alt Indexer IX joignable
Gater -> IX: h.Connect(ctxTTL, IX_AddrInfo)
IX --> Gater: OK
Gater -> IX: TempStream /opencloud/record/get/1.0
Gater -> IX: stream.Encode(GetValue{PeerID: RemotePeerID})
IX -> IX: Recherche locale + DHT si absent
IX --> Gater: GetResponse{Found: true/false, Records}
note over Gater: reachable=true → réponse autoritaire\n(DHT distribué : un seul indexeur suffit)
alt Found == true
Gater --> Host: true (pair connu du réseau)
else Found == false
Gater --> Host: false (refusé — inconnu du réseau)
Host ->x Remote: connexion fermée
end
end
end
alt Aucun indexeur joignable
note over Gater: Réseau naissant ou tous isolés.\nAutorisation par défaut.
Gater --> Host: true
end
@enduml

View File

@@ -0,0 +1,56 @@
@startuml dht_discovery
title Découverte DHT : Provide/FindProviders + SelectByFillRate + dhtCache indexeur
participant "Indexer A\n(nouveau)" as IA
participant "DHT Network" as DHT
participant "Node B\n(bootstrap)" as NodeB
participant "Indexer A\n(existant)" as IAexist
== Inscription indexeur dans la DHT ==
note over IA: Démarrage IndexerService\nstartDHTProvide(fillRateFn)
IA -> IA: Attend adresse routable (max 60s)\nnon-loopback disponible
IA -> DHT: DHT.Bootstrap(ctx)\n→ routing table warmup
loop ticker RecommendedHeartbeatInterval (~20s)
IA -> DHT: DHT.Provide(IndexerCID, true)\n← IndexerCID = CID(sha256("/opencloud/indexers"))
note over DHT: L'indexeur est annoncé comme provider.\nTTL géré par libp2p-kad-dht.\nAuto-expire si Provide() s'arrête.
end
== Cache DHT passif de l'indexeur ==
note over IA: startDHTCacheRefresh()\ngoroutine arrière-plan
IA -> IA: Initial delay 30s (routing table warmup)
loop ticker 2min
IA -> DHT: DiscoverIndexersFromDHT(h, dht, 30)\n← FindProviders(IndexerCID, max=30)
DHT --> IA: []AddrInfo (jusqu'à 30 candidats)
IA -> IA: Filtre self\nSelectByFillRate(filtered, nil, 10)\n→ diversité /24, prior f=0.5 (fill rates inconnus)
IA -> IA: dhtCache = selected (max 10)\n→ utilisé pour Suggestions dans BuildHeartbeatResponse
end
== Découverte côté Node au bootstrap ==
NodeB -> NodeB: ConnectToIndexers → seeds ajoutés\nSendHeartbeat démarré
NodeB -> NodeB: goroutine proactive (après 5s warmup)
alt discoveryDHT == nil (node pur, pas d'IndexerService)
NodeB -> DHT: initNodeDHT(h, seeds)\n← DHT client mode, bootstrappé sur seeds
end
NodeB -> DHT: DiscoverIndexersFromDHT(h, discoveryDHT, need+extra)
DHT --> NodeB: []AddrInfo candidats
NodeB -> NodeB: Filtre self\nSelectByFillRate(candidates, fillRates, need)\n→ pondération w(F) = F×(1-F)\n F=0.2 → w=0.16 (très probable)\n F=0.5 → w=0.25 (max)\n F=0.8 → w=0.16 (peu probable)\n→ filtre diversité /24
loop Pour chaque candidat retenu
NodeB -> NodeB: Indexers.SetAddr(key, &addrInfo)\nNudgeIt() → heartbeat immédiat
end
note over NodeB: Pool enrichi au-delà des seeds.\nScoring commence au premier heartbeat.\nSeeds restent IsSeed=true (stickiness).
@enduml

View File

@@ -0,0 +1,41 @@
@startuml hb_failure_evict
title HeartbeatFailure → evictPeer → TriggerConsensus ou DHT replenish
participant "Node A" as NodeA
participant "Indexer X\n(défaillant)" as IX
participant "Indexer Y\n(voter)" as IY
participant "Indexer Z\n(voter)" as IZ
participant "DHT" as DHT
participant "Indexer NEW\n(candidat)" as INEW
note over NodeA: SendHeartbeat tick — Indexer X dans le pool
NodeA -> IX: stream.Encode(Heartbeat{...})
IX -->x NodeA: timeout / transport error
NodeA -> NodeA: HeartbeatFailure(h, proto, dir, addr_X, info_X, isIndexerHB=true, maxPool)
NodeA -> NodeA: evictPeer(dir, addr_X, id_X, proto)\n→ Streams.Delete(proto, &id_X)\n→ DeleteAddr(addr_X)\n→ DeleteScore(addr_X)\n→ voters = remaining AddrInfos
NodeA -> NodeA: poolSize = len(dir.GetAddrs())
alt poolSize == 0
NodeA -> NodeA: reconnectToSeeds()\n→ réinjecte IndexerAddresses (IsSeed=true)
alt seeds ajoutés
NodeA -> NodeA: need = maxPool\nNudgeIt() → tick immédiat
else aucun seed configuré ou seeds injoignables
NodeA -> NodeA: go retryUntilSeedResponds()\n(backoff 10s→5min, panic si IndexerAddresses vide)
end
else poolSize > 0 AND len(voters) > 0
NodeA -> NodeA: go TriggerConsensus(h, voters, need)
NodeA -> IY: stream GET → GetValue{Key: candidate_DID}
IY --> NodeA: GetResponse{Found, Records}
NodeA -> IZ: stream GET → GetValue{Key: candidate_DID}
IZ --> NodeA: GetResponse{Found, Records}
note over NodeA: Quorum check:\nfound=true AND lastSeen ≤ 2×interval\nAND lastScore ≥ 30\n→ majorité → admission INEW
NodeA -> NodeA: Indexers.SetAddr(addr_NEW, &INEW_AddrInfo)\nIndexers.SetScore(addr_NEW, Score{IsSeed:false})\nNudgeIt()
else poolSize > 0 AND len(voters) == 0
NodeA -> DHT: go replenishIndexersFromDHT(h, need)\nDiscoverIndexersFromDHT → SelectByFillRate\n→ add to Indexers Directory
end
@enduml

View File

@@ -0,0 +1,46 @@
@startuml hb_last_indexer
title Protection last-indexer → reconnectToSeeds → retryUntilSeedResponds
participant "Node A" as NodeA
participant "Indexer LAST\n(seul restant)" as IL
participant "Seed Indexer\n(config)" as SEED
participant "DHT" as DHT
note over NodeA: Pool = 1 indexeur (LAST)\nIsSeed=false, score bas depuis longtemps
== Tentative d'éviction par score ==
NodeA -> NodeA: score < minScore\nAND TotalOnline ≥ 2×interval\nAND !IsSeed\nAND len(pool) > 1 ← FAUX : pool == 1
note over NodeA: Garde active : len(pool) == 1\n→ éviction par score BLOQUÉE\nLAST reste dans le pool
== Panne réseau (heartbeat fail) ==
NodeA -> IL: stream.Encode(Heartbeat{...})
IL -->x NodeA: timeout
NodeA -> NodeA: HeartbeatFailure → evictPeer(LAST)\npoolSize = 0
NodeA -> NodeA: reconnectToSeeds()\n→ parse IndexerAddresses (conf)\n→ SetAddr + SetScore(IsSeed=true) pour chaque seed
alt seeds ajoutés (IndexerAddresses non vide)
NodeA -> NodeA: NudgeIt() → tick immédiat
NodeA -> SEED: Heartbeat{...} (via SendHeartbeat nudge)
SEED --> NodeA: HeartbeatResponse{fillRate, ...}
note over NodeA: Pool rétabli via seeds.\nDHT proactive discovery reprend.
else IndexerAddresses vide
NodeA -> NodeA: go retryUntilSeedResponds()
note over NodeA: panic immédiat :\n"pool is empty and no seed indexers configured"\n→ arrêt du processus
end
== retryUntilSeedResponds (si seeds non répondants) ==
loop backoff exponentiel (10s → 20s → ... → 5min)
NodeA -> NodeA: time.Sleep(backoff)
NodeA -> NodeA: len(Indexers.GetAddrs()) > 0?\n→ oui : retour (quelqu'un a refillé)
NodeA -> NodeA: reconnectToSeeds()
alt pool > 0 après reconnect
NodeA -> NodeA: NudgeIt()\nDHT.Bootstrap(ctx, 15s)
note over NodeA: Sortie de la boucle.\nHeartbeat normal reprend.
end
end
@enduml

193
go.mod
View File

@@ -1,64 +1,177 @@
module oc-discovery
go 1.22.0
go 1.25.0
require (
cloud.o-forge.io/core/oc-lib v0.0.0-20240902132116-fba1608edb70
github.com/beego/beego v1.12.13
github.com/beego/beego/v2 v2.3.0
github.com/go-redis/redis v6.15.9+incompatible
github.com/goraz/onion v0.1.3
github.com/smartystreets/goconvey v1.7.2
github.com/tidwall/gjson v1.17.3
cloud.o-forge.io/core/oc-lib v0.0.0-20260331181901-f3b5a54545ee
github.com/ipfs/go-cid v0.6.0
github.com/libp2p/go-libp2p v0.47.0
github.com/libp2p/go-libp2p-record v0.3.1
github.com/multiformats/go-multiaddr v0.16.1
github.com/multiformats/go-multihash v0.2.3
)
require (
github.com/beego/beego/v2 v2.3.8 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/dunglas/httpsfv v1.1.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/filecoin-project/go-clock v0.1.0 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/ipfs/boxo v0.35.2 // indirect
github.com/ipfs/go-datastore v0.9.0 // indirect
github.com/ipfs/go-log/v2 v2.9.1 // indirect
github.com/ipld/go-ipld-prime v0.21.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/koron/go-ssdp v0.0.6 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-netroute v0.4.0 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/miekg/dns v1.1.68 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.10.0 // indirect
github.com/multiformats/go-multistream v0.6.1 // indirect
github.com/multiformats/go-varint v0.1.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pion/datachannel v1.5.10 // indirect
github.com/pion/dtls/v2 v2.2.12 // indirect
github.com/pion/dtls/v3 v3.0.6 // indirect
github.com/pion/ice/v4 v4.0.10 // indirect
github.com/pion/interceptor v0.1.40 // indirect
github.com/pion/logging v0.2.3 // indirect
github.com/pion/mdns/v2 v2.0.7 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/pion/rtcp v1.2.15 // indirect
github.com/pion/rtp v1.8.19 // indirect
github.com/pion/sctp v1.8.39 // indirect
github.com/pion/sdp/v3 v3.0.13 // indirect
github.com/pion/srtp/v3 v3.0.6 // indirect
github.com/pion/stun v0.6.1 // indirect
github.com/pion/stun/v3 v3.0.0 // indirect
github.com/pion/transport/v2 v2.2.10 // indirect
github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pion/turn/v4 v4.0.2 // indirect
github.com/pion/webrtc/v4 v4.1.2 // indirect
github.com/polydawn/refmt v0.89.0 // indirect
github.com/quic-go/qpack v0.6.0 // indirect
github.com/quic-go/quic-go v0.59.0 // indirect
github.com/quic-go/webtransport-go v0.10.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/otel v1.39.0 // indirect
go.opentelemetry.io/otel/metric v1.39.0 // indirect
go.opentelemetry.io/otel/trace v1.39.0 // indirect
go.uber.org/dig v1.19.0 // indirect
go.uber.org/fx v1.24.0 // indirect
go.uber.org/mock v0.5.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.1 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
golang.org/x/mod v0.32.0 // indirect
golang.org/x/oauth2 v0.32.0 // indirect
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 // indirect
golang.org/x/term v0.39.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.41.0 // indirect
gonum.org/v1/gonum v0.17.0 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/api v0.35.1 // indirect
k8s.io/apimachinery v0.35.1 // indirect
k8s.io/client-go v0.35.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/biter777/countries v1.7.5 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.5 // indirect
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.22.0 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect
github.com/go-playground/validator/v10 v10.27.0 // indirect
github.com/golang/snappy v1.0.0 // indirect
github.com/google/uuid v1.6.0
github.com/goraz/onion v0.1.3 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.37.1
github.com/libp2p/go-libp2p-pubsub v0.15.0
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nats-io/nats.go v1.37.0 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nats.go v1.43.0 // indirect
github.com/nats-io/nkeys v0.4.11 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.20.2 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.57.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/rs/zerolog v1.33.0 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
github.com/rs/zerolog v1.34.0 // indirect
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect
github.com/smartystreets/assertions v1.2.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
go.mongodb.org/mongo-driver v1.16.1 // indirect
golang.org/x/crypto v0.26.0 // indirect
golang.org/x/net v0.28.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/sys v0.24.0 // indirect
golang.org/x/text v0.17.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
go.mongodb.org/mongo-driver v1.17.4 // indirect
golang.org/x/crypto v0.47.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/text v0.33.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
)

669
go.sum
View File

@@ -1,230 +1,345 @@
cloud.o-forge.io/core/oc-lib v0.0.0-20240830131445-af18dba5563c h1:4ZoM9ONJiaeLHSi0s8gsCe4lHuRHXkfK+eDSnTCspa0=
cloud.o-forge.io/core/oc-lib v0.0.0-20240830131445-af18dba5563c/go.mod h1:FIJD0taWLJ5pjQLJ6sfE2KlTkvbmk5SMcyrxdjsaVz0=
cloud.o-forge.io/core/oc-lib v0.0.0-20240902132116-fba1608edb70 h1:xHxxRDtMG2/AAc7immArZfsnVF+KfJqoyUeUENmF6DA=
cloud.o-forge.io/core/oc-lib v0.0.0-20240902132116-fba1608edb70/go.mod h1:FIJD0taWLJ5pjQLJ6sfE2KlTkvbmk5SMcyrxdjsaVz0=
cloud.o-forge.io/core/oc-lib v0.0.0-20260304145747-e03a0d3dd0aa h1:1wCpI4dwN1pj6MlpJ7/WifhHVHmCE4RU+9klwqgo/bk=
cloud.o-forge.io/core/oc-lib v0.0.0-20260304145747-e03a0d3dd0aa/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260311072518-933b7147e908 h1:1jz3xI/u2FzCG8phY7ShqADrmCj0mlrdjbdNUosSwgs=
cloud.o-forge.io/core/oc-lib v0.0.0-20260311072518-933b7147e908/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260312073634-2c9c42dd516a h1:oCkb9l/Cvn0x6iicxIydrjfCNU+UHhKuklFgfzDa174=
cloud.o-forge.io/core/oc-lib v0.0.0-20260312073634-2c9c42dd516a/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260312141150-a335c905b3a2 h1:DuB6SDThFVJVQ0iI0pZnBqtCE0uW+SNI7R7ndKixu2k=
cloud.o-forge.io/core/oc-lib v0.0.0-20260312141150-a335c905b3a2/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260318143822-5976795d4406 h1:FN1EtRWn228JprAbnY5K863Fzj+SzMqQtKRtwvECbLw=
cloud.o-forge.io/core/oc-lib v0.0.0-20260318143822-5976795d4406/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260325092016-4580200e8057 h1:pR+lZzcCWZ0kke2r2xXa7OpdbLpPW3gZSWZ8gGHh274=
cloud.o-forge.io/core/oc-lib v0.0.0-20260325092016-4580200e8057/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260331144112-c0722483b86c h1:wTIridvhud8zwMsMkwxgrQ+j+6UAo2IHDr3N80AA6zc=
cloud.o-forge.io/core/oc-lib v0.0.0-20260331144112-c0722483b86c/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260331181901-f3b5a54545ee h1:iJ1kgMbBOBIHwS4jHOVB5zFqOd7J9ZlweQBuchnmvT0=
cloud.o-forge.io/core/oc-lib v0.0.0-20260331181901-f3b5a54545ee/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
github.com/beego/beego v1.12.11 h1:MWKcnpavb7iAIS0m6uuEq6pHKkYvGNw/5umIUKqL7jM=
github.com/beego/beego v1.12.11/go.mod h1:QURFL1HldOcCZAxnc1cZ7wrplsYR5dKPHFjmk6WkLAs=
github.com/beego/beego v1.12.13 h1:g39O1LGLTiPejWVqQKK/TFGrroW9BCZQz6/pf4S8IRM=
github.com/beego/beego v1.12.13/go.mod h1:QURFL1HldOcCZAxnc1cZ7wrplsYR5dKPHFjmk6WkLAs=
github.com/beego/beego/v2 v2.0.7 h1:9KNnUM40tn3pbCOFfe6SJ1oOL0oTi/oBS/C/wCEdAXA=
github.com/beego/beego/v2 v2.0.7/go.mod h1:f0uOEkmJWgAuDTlTxUdgJzwG3PDSIf3UWF3NpMohbFE=
github.com/beego/beego/v2 v2.3.0 h1:iECVwzm6egw6iw6tkWrEDqXG4NQtKLQ6QBSYqlM6T/I=
github.com/beego/beego/v2 v2.3.0/go.mod h1:Ob/5BJ9fIKZLd4s9ZV3o9J6odkkIyL83et+p98gyYXo=
github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd/go.mod h1:1b+Y/CofkYwXMUU0OhQqGvsY2Bvgr4j6jfT699wyZKQ=
github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542/go.mod h1:kSeGC/p1AbBiEp5kat81+DSQrZenVBZXklMLaELspWU=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc=
github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
github.com/casbin/casbin v1.7.0/go.mod h1:c67qKN6Oum3UF5Q1+BByfFxkwKvhwW57ITjqwtzR1KE=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q=
github.com/biter777/countries v1.7.5/go.mod h1:1HSpZ526mYqKJcpT5Ti1kcGQ0L0SrXWIaptUWjFfv2E=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/couchbase/go-couchbase v0.0.0-20201216133707-c04035124b17/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A=
github.com/couchbase/gomemcached v0.1.2-0.20201224031647-c432ccf49f32/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo=
github.com/couchbase/goutils v0.0.0-20210118111533-e33d3ffb5401/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elastic/go-elasticsearch/v6 v6.8.5/go.mod h1:UwaDJsD3rWLM5rKNFzv9hgox93HoX8utj1kxD9aFUcI=
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/dunglas/httpsfv v1.1.0 h1:Jw76nAyKWKZKFrpMMcL76y35tOpYHqQPzHQiwDvpe54=
github.com/dunglas/httpsfv v1.1.0/go.mod h1:zID2mqw9mFsnt7YC3vYQ9/cjq30q41W+1AnDwH8TiMg=
github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw=
github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwcjo9m4iOqoijouPJ4bs=
github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU=
github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I=
github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s=
github.com/gabriel-vasile/mimetype v1.4.5 h1:J7wGKdGu33ocBOhGy0z653k/lFKLFDPJMG8Gql0kxn4=
github.com/gabriel-vasile/mimetype v1.4.5/go.mod h1:ibHel+/kbxn9x2407k1izTA1S81ku1z/DlgOW2QE0M4=
github.com/glendc/gopher-json v0.0.0-20170414221815-dc4743023d0c/go.mod h1:Gja1A+xZ9BoviGJNA2E9vFkPjjsl+CoJxSXiQM1UXtw=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao=
github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-redis/redis v6.14.2+incompatible h1:UE9pLhzmWf+xHNmZsoccjXosPicuiNaInPgym8nzfg0=
github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/goraz/onion v0.1.3 h1:KhyvbDA2b70gcz/d5izfwTiOH8SmrvV43AsVzpng3n0=
github.com/goraz/onion v0.1.3/go.mod h1:XEmz1XoBz+wxTgWB8NwuvRm4RAu3vKxvrmYtzK+XCuQ=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/ipfs/boxo v0.35.2 h1:0QZJJh6qrak28abENOi5OA8NjBnZM4p52SxeuIDqNf8=
github.com/ipfs/boxo v0.35.2/go.mod h1:bZn02OFWwJtY8dDW9XLHaki59EC5o+TGDECXEbe1w8U=
github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk=
github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA=
github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30=
github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ=
github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w=
github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg=
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
github.com/ipfs/go-log/v2 v2.9.1 h1:3JXwHWU31dsCpvQ+7asz6/QsFJHqFr4gLgQ0FWteujk=
github.com/ipfs/go-log/v2 v2.9.1/go.mod h1:evFx7sBiohUN3AG12mXlZBw5hacBQld3ZPHrowlJYoo=
github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc=
github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o=
github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E=
github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/ledisdb/ledisdb v0.0.0-20200510135210-d35789ec47e6/go.mod h1:n931TsDuKuq+uX4v1fulaMbA/7ZLLhjc85h7chZGBCQ=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c=
github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic=
github.com/libp2p/go-flow-metrics v0.3.0 h1:q31zcHUvHnwDO0SHaukewPYgwOBSxtt830uJtUx6784=
github.com/libp2p/go-flow-metrics v0.3.0/go.mod h1:nuhlreIwEguM1IvHAew3ij7A8BMlyHQJ279ao24eZZo=
github.com/libp2p/go-libp2p v0.47.0 h1:qQpBjSCWNQFF0hjBbKirMXE9RHLtSuzTDkTfr1rw0yc=
github.com/libp2p/go-libp2p v0.47.0/go.mod h1:s8HPh7mMV933OtXzONaGFseCg/BE//m1V34p3x4EUOY=
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
github.com/libp2p/go-libp2p-kad-dht v0.37.1 h1:jtX8bQIXVCs6/allskNB4m5n95Xvwav7wHAhopGZfS0=
github.com/libp2p/go-libp2p-kad-dht v0.37.1/go.mod h1:Uwokdh232k9Y1uMy2yJOK5zb7hpMHn4P8uWS4s9i05Q=
github.com/libp2p/go-libp2p-kbucket v0.8.0 h1:QAK7RzKJpYe+EuSEATAaaHYMYLkPDGC18m9jxPLnU8s=
github.com/libp2p/go-libp2p-kbucket v0.8.0/go.mod h1:JMlxqcEyKwO6ox716eyC0hmiduSWZZl6JY93mGaaqc4=
github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o=
github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4=
github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg=
github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E=
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI=
github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98=
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
github.com/libp2p/go-netroute v0.4.0 h1:sZZx9hyANYUx9PZyqcgE/E1GUG3iEtTZHUEvdtXT7/Q=
github.com/libp2p/go-netroute v0.4.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA=
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/marcopolo/simnet v0.0.4 h1:50Kx4hS9kFGSRIbrt9xUS3NJX33EyPqHVmpXvaKLqrY=
github.com/marcopolo/simnet v0.0.4/go.mod h1:tfQF1u2DmaB6WHODMtQaLtClEf3a296CKQLq5gAsIS0=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw=
github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
github.com/multiformats/go-multicodec v0.10.0 h1:UpP223cig/Cx8J76jWt91njpK3GTAO1w02sdcjZDSuc=
github.com/multiformats/go-multicodec v0.10.0/go.mod h1:wg88pM+s2kZJEQfRCKBNU+g32F5aWBEjyFHXvZLTcLI=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI=
github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE=
github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug=
github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/ogier/pflag v0.0.1/go.mod h1:zkFki7tvTa0tafRvTBIZTvzYyAu6kQhPZFnshFFPE+g=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/pelletier/go-toml v1.0.1/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
github.com/peterh/liner v1.0.1-0.20171122030339-3681c2a91233/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.0/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.41.0 h1:npo01n6vUlRViIj5fgwiK8vlNIh8bnoxqh3gypKsyAw=
github.com/prometheus/common v0.41.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY=
github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg=
github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 h1:DAYUYH5869yV94zvCES9F51oYtN5oGlwjxJJz7ZCnik=
github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg=
github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw=
github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
github.com/quic-go/webtransport-go v0.10.0 h1:LqXXPOXuETY5Xe8ITdGisBzTYmUOy5eSj+9n4hLTjHI=
github.com/quic-go/webtransport-go v0.10.0/go.mod h1:LeGIXr5BQKE3UsynwVBeQrU1TPrbh73MGoC6jd+V7ow=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 h1:v9ezJDHA1XGxViAUSIoO/Id7Fl63u6d0YmsAm+/p2hs=
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02/go.mod h1:RF16/A3L0xSa0oSERcnhd8Pu3IXSDZSK2gmGIMsttFE=
github.com/siddontang/go v0.0.0-20170517070808-cb568a3e5cc0/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
github.com/siddontang/goredis v0.0.0-20150324035039-760763f78400/go.mod h1:DDcKzU3qCuvj/tPnimWSsZZzvk9qvkvrIL5naVBPh5s=
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/skarademir/naturalsort v0.0.0-20150715044055-69a5d87bef62/go.mod h1:oIdVclZaltY1Nf7OQUkg1/2jImBJ+ZfKZuDIRSwk3p0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
@@ -232,143 +347,213 @@ github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYl
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec/go.mod h1:QBvMkMya+gXctz3kmljlUCu/yB3GZ6oee+dUozsezQE=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/syndtr/goleveldb v0.0.0-20160425020131-cfa635847112/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM=
github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.17.3 h1:bwWLZU7icoKRG+C+0PNwIKC6FCJO/Q3p2pZvuP0jN94=
github.com/tidwall/gjson v1.17.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/ugorji/go v0.0.0-20171122102828-84cb69a8af83/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
github.com/wendal/errors v0.0.0-20181209125328-7f31f4b264ec/go.mod h1:Q12BUT7DqIlHRmgv3RskH+UCM/4eqVMgI0EMmlSpAXc=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ=
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76 h1:tBiBTKHnIjovYoLX/TPkcf+OjqqKGQrPtGT3Foz+Pgo=
github.com/youmark/pkcs8 v0.0.0-20240424034433-3c2c7870ae76/go.mod h1:SQliXeA7Dhkt//vS29v3zpbEwoa+zb2Cn5xj5uO4K5U=
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/gopher-lua v0.0.0-20171031051903-609c9cd26973/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU=
go.mongodb.org/mongo-driver v1.16.0 h1:tpRsfBJMROVHKpdGyc1BBEzzjDUWjItxbVSZ8Ls4BQ4=
go.mongodb.org/mongo-driver v1.16.0/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw=
go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4l8=
go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 h1:O1cMQHRfwNpDfDJerqRoE2oD+AFlyid87D40L/OkkJo=
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=
k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=
k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=
k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=
k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=

865
logs.txt Normal file
View File

@@ -0,0 +1,865 @@
{"level":"info","time":"2026-04-08T06:17:16Z","message":"Config file found : /etc/oc/discovery.json"}
{"level":"info","time":"2026-04-08T06:17:16Z","message":"Connecting tomongodb://mongo:27017/"}
{"level":"info","time":"2026-04-08T06:17:16Z","message":"Connecting mongo client to db DC_myDC"}
{"level":"info","time":"2026-04-08T06:17:16Z","message":"Database is READY"}
{"level":"info","time":"2026-04-08T06:17:16Z","message":"Config file found : /etc/oc/discovery.json"}
{"level":"info","time":"2026-04-08T06:17:16Z","message":"retrieving private key..."}
{"level":"info","time":"2026-04-08T06:17:16Z","message":"retrieving psk file..."}
{"level":"info","time":"2026-04-08T06:17:16Z","message":"open a host..."}
{"level":"info","time":"2026-04-08T06:17:16Z","message":"Host open on 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN"}
{"level":"info","time":"2026-04-08T06:17:16Z","message":"generate opencloud node..."}
{"level":"info","time":"2026-04-08T06:17:16Z","message":"connect to indexers..."}
{"level":"info","time":"2026-04-08T06:17:16Z","message":"claims my node..."}
{"level":"info","proto":"/opencloud/heartbeat/1.0","peers":1,"time":"2026-04-08T06:17:16Z","message":"heartbeat started"}
[location] granularity=2 raw=(43.6046,1.4451) fuzzed=(43.5226,-0.7667)
{"level":"info","time":"2026-04-08T06:17:17Z","message":"run garbage collector..."}
{"level":"info","time":"2026-04-08T06:17:17Z","message":"connect to partners..."}
{"level":"info","time":"2026-04-08T06:17:17Z","message":"SetStreamHandler /opencloud/resource/update/1.0"}
{"level":"info","time":"2026-04-08T06:17:17Z","message":"SetStreamHandler /opencloud/resource/delete/1.0"}
{"level":"info","time":"2026-04-08T06:17:17Z","message":"SetStreamHandler /opencloud/resource/create/1.0"}
{"level":"info","time":"2026-04-08T06:17:17Z","message":"subscribe to decentralized search flow..."}
{"level":"info","time":"2026-04-08T06:17:17Z","message":"connect to NATS"}
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Node is actually running."}
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Listening to propalgation_event"}
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Listening to peer_behavior_event"}
Published on create_resource
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:21Z","message":"[dht] node DHT client ready"}
{"level":"info","need":4,"time":"2026-04-08T06:17:21Z","message":"[dht] proactive indexer discovery from DHT"}
{"level":"info","need":4,"time":"2026-04-08T06:17:21Z","message":"[dht] replenishing indexer pool from DHT"}
{"level":"info","found":0,"time":"2026-04-08T06:17:21Z","message":"[dht] indexer discovery complete"}
{"level":"warn","time":"2026-04-08T06:17:21Z","message":"[dht] no indexers found in DHT for replenishment"}
{"level":"info","time":"2026-04-08T06:17:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:27Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:27Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:28Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:28Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:31Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:31Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:33Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:33Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:33Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:33Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:36Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:36Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:36Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:36Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:36Z","message":"New Stream engaged as Heartbeat /opencloud/heartbeat/1.0 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"}
{"level":"info","added":1,"from":"12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu","time":"2026-04-08T06:17:36Z","message":"added suggested indexers from heartbeat response"}
{"level":"info","time":"2026-04-08T06:17:36Z","message":"nudge received, heartbeating new indexers immediately"}
{"level":"info","time":"2026-04-08T06:17:36Z","message":"New Stream engaged as Heartbeat /opencloud/heartbeat/1.0 12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:17:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:17:36Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:17:41Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:41Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:41Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:41Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:46Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:46Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:46Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:46Z","message":"Catching propalgation event... oc-scheduler - "}
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:17:51Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:51Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:51Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:51Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:56Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:56Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:17:56Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:17:57Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:17:57Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:02Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:02Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:02Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:02Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:07Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:07Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:07Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:07Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:12Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:12Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:12Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:12Z","message":"Catching propalgation event... oc-scheduler - "}
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:18:16Z","message":"witness report"}
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:18:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:27Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:27Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:27Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:27Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:32Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:32Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:32Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:32Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:18:36Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:18:37Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:37Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:37Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:37Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:42Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:42Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:42Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:42Z","message":"Catching propalgation event... oc-scheduler - "}
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:18:47Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:47Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:47Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:47Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:52Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:52Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:53Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:53Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:18:56Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:18:58Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:58Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:58Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:18:58Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:03Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:03Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:03Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:03Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:08Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:08Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:08Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:08Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:13Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:13Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:13Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:13Z","message":"Catching propalgation event... oc-scheduler - "}
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:16Z","message":"witness report"}
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:19:18Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:18Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:18Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:18Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629158 [] [23 185 233 217 252 222 167 147 185 9 46 210 169 26 129 42 84 220 137 75 23 42 46 164 85 169 193 238 54 52 179 3 4 189 34 95 188 69 145 100 214 183 69 106 248 20 58 176 61 17 208 250 147 187 154 221 86 213 99 74 6 127 10 3]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629158
{"level":"info","time":"2026-04-08T06:19:23Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:23Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:23Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:23Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629163 [] [148 228 35 212 146 117 198 240 248 47 53 28 217 242 84 16 53 77 141 38 6 249 197 179 135 35 145 140 151 72 185 169 54 204 188 6 20 164 204 214 121 157 67 117 30 95 109 55 108 249 174 141 68 222 186 135 191 90 60 187 194 30 172 5]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629163
{"level":"info","time":"2026-04-08T06:19:28Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:28Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:28Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:28Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629168 [] [48 136 252 209 83 105 251 34 193 50 235 55 55 191 89 233 40 236 174 129 200 65 63 74 57 229 66 155 94 151 91 120 115 163 230 136 81 161 88 35 170 229 126 33 45 134 79 111 7 114 18 209 200 103 43 75 18 170 194 108 197 253 112 13]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629168
{"level":"info","time":"2026-04-08T06:19:31Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:31Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629171 [] [216 81 114 23 19 203 244 153 11 214 139 99 54 104 11 30 130 152 134 82 91 69 227 136 79 160 150 92 56 191 14 150 252 100 235 150 93 199 139 179 59 97 131 99 88 55 131 145 246 243 235 31 55 41 42 32 215 75 15 63 31 131 73 8]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629171
{"level":"info","time":"2026-04-08T06:19:33Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:33Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:33Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:33Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629173 [] [92 242 214 250 125 244 127 81 229 41 112 244 67 188 187 216 10 233 184 185 231 147 125 84 105 192 97 1 65 231 133 104 34 159 166 149 45 186 190 51 117 68 231 163 40 0 58 162 94 88 207 37 206 45 66 178 118 149 54 78 57 1 3 6]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629173
{"level":"info","time":"2026-04-08T06:19:36Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:36Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:36Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:36Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629176 [] [54 239 156 204 25 72 110 223 95 214 112 163 168 169 130 166 57 102 104 232 85 72 119 22 58 182 122 44 2 157 192 190 89 249 35 93 183 96 120 183 23 88 104 83 77 93 3 233 50 231 190 217 162 109 134 212 198 85 206 252 139 156 218 8]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629176
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:36Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:19:38Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:38Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:41Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:41Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:41Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629181 [] [127 158 58 208 104 78 156 212 40 94 58 139 31 6 199 56 174 149 133 246 174 53 58 21 177 70 222 188 111 252 100 238 136 147 1 46 4 158 101 6 80 113 125 136 135 228 149 240 212 154 152 92 37 146 163 141 150 95 133 35 62 170 180 9]}
sendPlanner 0
{"level":"info","time":"2026-04-08T06:19:41Z","message":"Catching propalgation event... oc-scheduler - "}
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629181
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:19:46Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:46Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:47Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:47Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629187 [] [151 223 225 231 72 46 42 81 219 141 98 116 110 73 2 184 17 222 237 90 217 214 53 151 144 3 88 16 241 119 18 173 3 150 226 167 148 111 83 87 225 120 78 69 38 135 253 164 97 128 66 87 219 207 190 235 196 92 50 89 116 246 17 5]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629187
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:19:52Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:52Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:52Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:52Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629192 [] [111 224 143 159 179 190 133 150 240 14 180 172 240 228 53 86 220 117 5 151 202 186 17 35 105 82 21 231 133 155 7 170 96 134 221 61 23 6 146 1 75 253 32 58 126 98 140 4 246 87 24 230 62 83 114 168 163 74 152 53 217 201 229 14]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629192
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:56Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:56Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:56Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:19:57Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:57Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:57Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:19:57Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629197 [] [59 20 147 21 143 23 216 117 191 182 95 226 35 78 36 207 4 147 88 202 120 62 22 123 80 205 240 83 239 70 176 59 211 78 229 145 234 44 172 196 61 193 28 121 145 215 214 169 165 82 57 233 164 87 239 32 6 86 196 18 218 74 227 14]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629197
{"level":"info","time":"2026-04-08T06:20:02Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:02Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:02Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:02Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629202 [] [54 107 90 122 30 116 226 174 104 100 37 235 211 254 239 142 225 51 128 143 22 29 195 114 125 40 197 196 131 65 67 63 62 101 56 144 212 113 33 95 153 254 238 194 71 3 99 121 190 135 33 167 83 49 137 168 113 133 181 155 63 214 176 6]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629202
{"level":"info","time":"2026-04-08T06:20:07Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:07Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:07Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629207 [] [57 134 104 85 240 207 200 242 9 59 251 115 73 171 5 39 138 118 184 194 201 211 132 32 254 74 163 221 220 79 102 201 107 3 87 49 213 235 164 31 174 104 209 246 196 64 85 15 248 99 69 165 153 175 40 77 115 61 129 112 36 53 47 13]}
sendPlanner 0
{"level":"info","time":"2026-04-08T06:20:07Z","message":"Catching propalgation event... oc-scheduler - "}
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629207
{"level":"info","time":"2026-04-08T06:20:12Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:12Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:12Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:12Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629212 [] [215 117 62 147 102 252 227 9 93 160 68 16 169 186 89 164 56 59 42 225 102 42 57 242 128 46 18 182 109 94 140 95 167 254 240 34 108 229 225 171 28 17 34 113 122 164 121 212 83 227 121 8 11 165 150 5 227 179 156 60 185 75 67 7]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629212
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:16Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:16Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:16Z","message":"witness report"}
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:20:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:17Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629217 [] [122 210 113 143 218 90 178 12 159 83 141 245 210 208 198 0 73 80 127 229 163 221 103 110 98 102 228 198 152 201 57 180 167 246 85 75 85 203 223 139 131 99 54 151 42 36 243 202 241 32 178 202 63 198 104 92 210 26 248 230 115 28 120 6]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629217
{"level":"info","time":"2026-04-08T06:20:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:22Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
{"level":"info","time":"2026-04-08T06:20:22Z","message":"Catching propalgation event... oc-scheduler - "}
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629222 [] [98 74 235 201 133 101 129 234 143 83 255 18 141 176 169 45 134 208 161 116 31 110 66 138 163 128 202 143 80 171 55 4 159 83 100 155 218 40 66 102 245 17 165 102 131 206 245 95 121 136 129 109 98 175 54 101 67 18 229 19 255 170 236 2]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629222
{"level":"info","time":"2026-04-08T06:20:27Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:27Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:27Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:27Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629227 [] [156 122 36 54 136 116 3 84 110 253 41 115 42 125 114 202 247 54 2 16 150 129 247 202 165 131 236 228 103 244 233 202 31 61 247 47 108 182 244 136 187 187 51 2 2 212 167 180 28 138 87 1 255 253 5 52 40 206 211 89 171 106 46 8]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629227
{"level":"info","time":"2026-04-08T06:20:32Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:32Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:32Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:32Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629232 [] [27 218 207 190 133 114 43 118 66 246 46 131 169 53 78 44 118 11 115 146 75 78 192 70 20 132 177 126 116 41 4 82 91 182 92 95 157 252 39 218 96 59 222 182 159 171 151 93 63 254 212 244 243 149 248 234 177 108 160 163 77 179 99 4]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629232
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:36Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:20:37Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:37Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:37Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:37Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629237 [] [66 73 1 165 227 98 102 241 100 114 13 76 157 152 186 255 91 225 92 239 119 226 131 110 63 17 118 163 218 216 82 77 159 127 112 189 232 58 185 236 186 247 44 208 20 57 214 123 0 44 238 132 124 168 240 210 43 74 29 58 252 224 140 15]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629237
{"level":"info","time":"2026-04-08T06:20:42Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:42Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:43Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:43Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629243 [] [187 179 204 44 46 42 148 223 7 92 219 164 86 92 179 130 108 225 153 122 216 227 255 58 75 154 36 41 149 225 159 57 71 142 104 244 194 61 123 52 129 98 103 234 232 42 181 251 19 101 190 168 128 101 227 32 135 240 54 16 194 185 153 10]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629243
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:20:48Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:48Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:48Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:48Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629248 [] [210 69 3 179 113 27 162 199 166 128 158 136 142 219 138 230 173 237 41 106 245 196 147 23 14 246 101 102 117 21 146 63 205 112 83 92 255 2 178 177 120 187 223 76 203 219 106 93 152 155 143 42 128 210 49 156 116 68 142 33 156 134 72 2]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629248
{"level":"info","time":"2026-04-08T06:20:53Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:53Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:53Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:53Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629253 [] [98 113 231 230 60 162 99 109 134 79 56 165 55 34 199 155 193 206 14 147 77 204 30 202 204 132 175 84 98 196 126 38 127 234 144 60 176 102 123 90 23 253 63 94 102 85 33 245 228 39 49 203 81 105 59 58 78 215 9 146 196 129 214 2]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629253
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:56Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:56Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:56Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:20:58Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:58Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:58Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:20:58Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629258 [] [162 228 163 147 196 70 190 233 194 135 209 58 118 66 252 60 11 21 153 184 75 46 15 166 74 81 139 192 70 155 217 61 180 34 31 231 187 121 142 194 72 235 89 198 123 92 0 94 4 162 140 123 107 128 40 211 154 155 33 20 231 108 189 15]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629258
{"level":"info","time":"2026-04-08T06:21:03Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:03Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:03Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:03Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629263 [] [20 112 147 99 13 17 196 30 106 208 129 192 89 88 89 29 13 237 42 200 147 35 51 171 245 211 141 82 162 241 75 242 52 123 186 189 210 57 228 251 188 71 136 13 244 154 120 85 17 120 2 195 170 105 95 66 197 156 141 201 193 225 101 14]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629263
{"level":"info","time":"2026-04-08T06:21:08Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:08Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:08Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:08Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629268 [] [110 141 235 237 232 239 196 232 36 2 113 65 208 171 45 101 205 210 104 91 173 207 52 76 61 227 57 225 8 143 184 208 103 226 163 105 10 229 11 205 26 40 56 182 212 49 242 151 28 155 96 235 85 128 177 94 10 162 192 212 89 146 12 13]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629268
{"level":"info","time":"2026-04-08T06:21:13Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:13Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:13Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:13Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629273 [] [156 56 90 120 217 145 134 236 179 121 242 238 104 51 219 222 64 187 30 164 122 148 93 163 211 8 96 24 81 202 77 128 1 231 61 106 5 222 78 83 107 132 231 180 37 46 27 249 38 163 100 1 20 194 164 79 250 184 67 128 245 183 33 11]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629273
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:16Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:16Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:16Z","message":"witness report"}
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:21:18Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:18Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:18Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:18Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629278 [] [148 48 28 250 242 138 131 204 57 180 146 119 232 43 28 103 82 127 68 120 144 129 9 191 47 7 8 202 130 176 222 24 120 15 164 128 112 142 10 29 31 199 183 181 88 221 74 63 198 40 64 75 46 116 24 82 1 227 205 50 223 185 20 7]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629278
{"level":"info","time":"2026-04-08T06:21:23Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:23Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:23Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:23Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629283 [] [127 128 124 54 133 149 56 98 160 205 142 167 223 216 219 41 136 215 118 172 104 237 75 1 225 217 135 30 40 43 152 175 85 33 249 226 136 173 48 227 188 201 209 97 164 71 109 153 173 177 238 1 48 5 74 240 155 80 44 7 191 217 182 14]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629283
{"level":"info","time":"2026-04-08T06:21:28Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:28Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:29Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629289 [] [30 247 110 61 160 25 49 192 200 184 72 44 244 40 97 148 226 113 239 127 100 253 83 95 168 108 64 62 138 78 126 36 223 108 76 232 5 254 202 116 234 179 14 196 31 193 211 116 74 201 236 164 69 24 52 42 215 61 128 101 0 39 32 1]}
sendPlanner 0
{"level":"info","time":"2026-04-08T06:21:29Z","message":"Catching propalgation event... oc-scheduler - "}
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629289
{"level":"info","time":"2026-04-08T06:21:34Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:34Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:34Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:34Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629294 [] [49 111 35 93 42 104 196 113 147 214 133 98 55 125 220 97 32 136 31 24 228 36 49 136 178 200 95 168 175 93 252 239 189 139 232 12 234 90 100 17 203 119 41 135 118 51 89 131 95 99 175 131 73 159 153 14 125 119 190 35 246 197 96 4]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629294
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:36Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:21:39Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:39Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:39Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:39Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629299 [] [22 128 127 139 144 129 120 137 46 77 249 144 78 154 135 105 172 70 0 188 41 99 232 139 111 193 17 70 128 119 139 221 92 230 34 230 82 234 71 108 11 11 40 34 211 126 80 63 188 67 226 171 7 120 181 168 42 118 92 160 128 209 199 10]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629299
{"level":"info","time":"2026-04-08T06:21:44Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:44Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:44Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:44Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629304 [] [240 170 201 48 157 246 93 160 52 84 119 29 39 64 210 83 155 211 24 189 11 57 75 69 91 101 83 150 55 146 51 208 254 60 128 151 238 99 45 153 9 195 139 123 122 137 206 45 100 249 55 98 85 215 194 33 95 71 164 99 160 223 90 0]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629304
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:21:49Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:49Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:49Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:49Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629309 [] [179 172 109 115 79 55 94 106 15 237 19 131 151 43 10 7 72 77 2 34 176 107 173 191 240 81 63 217 10 154 172 119 161 82 9 153 70 208 120 46 207 44 43 71 177 103 23 105 220 70 61 67 95 149 155 214 48 94 163 119 137 80 224 13]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629309
{"level":"info","time":"2026-04-08T06:21:54Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:54Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:54Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:54Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629314 [] [157 215 144 156 25 165 9 240 220 81 71 142 16 43 178 102 120 205 4 242 206 104 24 211 249 252 141 85 99 49 46 114 57 130 244 253 153 218 20 66 122 119 63 128 55 162 231 112 221 36 190 225 226 141 123 251 179 33 153 10 126 220 183 0]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629314
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:56Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:56Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:56Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:21:59Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:59Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:59Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:21:59Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629319 [] [60 10 119 166 57 198 44 211 37 69 210 60 151 227 116 48 167 100 105 231 170 221 65 184 148 189 136 19 169 39 34 7 37 208 1 83 93 0 228 219 223 53 217 129 90 48 204 117 198 150 228 38 176 211 173 125 120 251 96 135 166 205 21 1]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629319
{"level":"info","time":"2026-04-08T06:22:04Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:04Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:04Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:04Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629324 [] [70 8 80 144 29 14 138 41 69 121 183 136 249 69 130 66 211 97 58 92 106 206 212 70 237 239 13 77 76 117 254 238 246 54 200 102 187 178 44 236 56 30 151 255 102 199 67 189 142 93 22 105 48 96 134 147 132 166 5 34 99 206 89 1]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629324
{"level":"info","time":"2026-04-08T06:22:09Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:09Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:09Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:09Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629329 [] [34 176 30 255 58 157 174 41 8 33 88 101 25 86 132 135 160 119 211 45 145 130 83 234 66 160 58 3 81 135 9 81 179 111 163 85 47 200 180 172 172 159 200 27 230 110 158 250 1 21 74 78 91 47 12 92 100 101 114 216 179 56 6 1]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629329
{"level":"info","time":"2026-04-08T06:22:14Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:14Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:15Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:15Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629335 [] [170 58 162 2 109 219 82 212 97 254 26 88 22 167 86 86 139 214 119 179 237 60 203 12 193 126 20 227 116 140 67 87 241 250 24 93 122 93 97 145 204 45 154 175 162 96 127 220 180 2 34 234 154 204 242 114 174 88 222 135 53 214 93 2]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629335
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:16Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:16Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:16Z","message":"witness report"}
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:22:20Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:20Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:20Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:20Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629340 [] [238 195 43 183 107 63 168 220 253 184 160 179 158 178 165 18 63 242 162 211 74 185 22 66 56 139 189 236 91 229 162 94 244 74 125 229 156 74 43 108 66 229 181 160 106 103 210 105 242 151 125 116 18 166 133 112 194 78 64 41 159 213 119 8]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629340
{"level":"info","time":"2026-04-08T06:22:25Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:25Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:25Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:25Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629345 [] [3 253 149 166 87 72 223 116 17 174 119 244 178 113 199 143 95 24 132 160 245 22 104 98 161 183 22 219 187 58 98 27 57 165 159 130 132 178 53 13 165 200 94 31 47 53 199 131 61 24 128 177 212 196 170 189 253 49 214 144 120 221 99 15]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629345
{"level":"info","time":"2026-04-08T06:22:30Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:30Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:30Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:30Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629350 [] [181 210 168 89 33 8 181 117 214 50 72 135 24 44 202 145 53 30 123 114 245 156 170 254 171 22 52 205 63 37 232 187 153 176 71 181 226 14 56 123 223 243 12 180 207 196 14 139 71 87 34 27 184 156 1 131 231 154 114 40 207 99 24 9]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629350
{"level":"info","time":"2026-04-08T06:22:35Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:35Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:35Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:35Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629355 [] [206 59 19 160 32 126 18 156 87 174 206 32 59 177 100 105 90 226 227 101 156 201 16 46 46 26 125 91 87 146 186 183 156 169 151 96 96 26 24 110 149 82 157 99 228 235 168 74 239 244 91 241 143 124 229 6 30 197 49 233 48 208 80 15]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629355
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:36Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:22:40Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:40Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:40Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:40Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629360 [] [90 246 228 229 207 100 199 90 36 54 134 95 53 15 1 170 67 139 67 169 160 69 127 224 21 32 38 142 79 108 247 12 204 251 157 57 180 116 116 76 240 39 176 219 148 10 107 9 227 128 6 71 216 38 134 234 205 253 95 174 177 216 161 6]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629360
{"level":"info","time":"2026-04-08T06:22:45Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:45Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:45Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:45Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629365 [] [63 199 217 15 130 99 177 48 78 10 36 251 150 42 115 110 100 247 20 74 254 171 89 52 92 230 211 148 104 143 107 230 209 68 95 224 15 57 122 163 53 46 159 191 92 175 118 241 157 14 62 2 101 66 66 211 67 124 208 117 101 1 225 6]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629365
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:22:50Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:50Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:50Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:50Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629370 [] [91 181 191 156 49 87 26 57 12 57 27 55 225 180 28 39 16 57 126 224 26 10 145 93 71 252 245 81 173 203 116 234 36 138 219 93 165 15 130 246 116 128 146 151 255 128 140 210 112 225 129 5 45 10 198 125 98 135 1 89 144 115 60 11]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629370
{"level":"info","time":"2026-04-08T06:22:55Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:55Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:55Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:22:55Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629375 [] [168 228 101 110 246 181 204 230 69 13 111 254 247 246 184 225 72 223 92 222 179 241 100 165 195 74 14 198 45 136 87 171 93 156 118 186 49 2 58 187 123 117 79 124 61 11 119 124 53 2 95 121 29 251 73 217 212 191 200 176 139 228 215 13]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629375
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:56Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:56Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:56Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:23:00Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:00Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:01Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:01Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629381 [] [144 54 219 83 60 101 156 100 36 53 246 108 176 11 139 211 109 131 136 230 114 54 172 174 19 55 43 50 22 109 164 36 51 57 44 117 203 235 34 222 204 199 30 130 72 91 125 97 229 230 155 34 1 129 192 230 227 13 210 149 41 42 63 2]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629381
{"level":"info","time":"2026-04-08T06:23:01Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
{"level":"info","time":"2026-04-08T06:23:01Z","message":"Catching propalgation event... oc-scheduler - "}
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629381 [] [144 54 219 83 60 101 156 100 36 53 246 108 176 11 139 211 109 131 136 230 114 54 172 174 19 55 43 50 22 109 164 36 51 57 44 117 203 235 34 222 204 199 30 130 72 91 125 97 229 230 155 34 1 129 192 230 227 13 210 149 41 42 63 2]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629381
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629386 [] [30 202 184 203 25 1 125 206 41 62 89 178 214 193 137 233 121 38 215 131 240 88 92 129 180 41 181 33 6 54 118 64 216 128 3 248 193 219 65 6 29 91 17 247 40 25 133 7 116 66 66 64 183 35 150 196 120 248 154 133 92 80 138 2]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629386
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629386 [] [30 202 184 203 25 1 125 206 41 62 89 178 214 193 137 233 121 38 215 131 240 88 92 129 180 41 181 33 6 54 118 64 216 128 3 248 193 219 65 6 29 91 17 247 40 25 133 7 116 66 66 64 183 35 150 196 120 248 154 133 92 80 138 2]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629386
{"level":"info","time":"2026-04-08T06:23:11Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:11Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:11Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:11Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629391 [] [110 205 129 253 77 31 152 6 78 24 42 124 97 64 89 83 228 237 184 188 238 96 174 104 50 102 124 130 86 94 197 8 234 175 51 108 62 143 173 174 237 85 151 24 68 87 56 55 38 225 233 70 50 80 66 236 241 165 134 78 242 88 168 10]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629391
{"level":"info","time":"2026-04-08T06:23:11Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:11Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:12Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:12Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629392 [] [75 116 113 253 77 139 233 209 46 112 155 197 247 65 52 60 137 142 105 1 205 163 117 18 158 109 93 196 162 238 246 20 224 91 203 238 197 189 191 221 154 16 149 245 45 66 27 211 84 170 9 125 137 77 171 165 232 188 216 242 164 215 76 11]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629392
{"level":"info","time":"2026-04-08T06:23:16Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:16Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:16Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:16Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629396 [] [240 147 198 201 116 215 160 95 148 236 135 110 30 97 4 108 212 135 243 192 27 44 61 14 30 116 20 247 191 1 103 228 225 64 12 0 102 20 146 104 141 143 144 245 158 186 83 98 232 170 7 24 148 190 174 234 237 77 212 157 73 109 118 9]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629396
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:16Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:16Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:16Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:23:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:17Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:17Z","message":"Catching propalgation event... oc-scheduler - "}
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629397 [] [144 164 56 237 29 110 226 88 72 232 64 152 15 204 134 122 155 214 86 191 104 104 233 231 117 60 0 46 236 113 98 65 83 58 9 168 130 155 188 250 239 51 192 170 242 103 173 236 91 177 224 13 161 196 219 48 139 206 90 53 98 20 239 14]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629397
{"level":"info","time":"2026-04-08T06:23:21Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:21Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:22Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:22Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629402 [] [160 52 205 176 18 24 15 168 91 245 68 74 111 206 79 223 168 65 78 51 228 33 168 160 119 90 93 100 197 188 182 5 78 203 114 164 154 38 38 109 105 5 1 164 176 177 80 180 246 148 224 27 197 112 27 163 148 255 141 147 34 188 76 12]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629402
{"level":"info","time":"2026-04-08T06:23:27Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:27Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:27Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:27Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629407 [] [88 59 221 55 52 217 233 119 150 192 131 111 42 48 14 16 50 136 74 47 169 197 90 201 55 244 224 159 96 11 247 141 130 162 158 159 61 223 1 92 102 220 144 6 193 206 130 184 122 35 192 144 233 156 4 162 32 148 54 38 138 173 20 3]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629407
{"level":"info","time":"2026-04-08T06:23:32Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:32Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:32Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:32Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629412 [] [182 88 119 236 225 31 252 201 108 139 33 209 96 200 95 175 113 90 100 161 245 96 236 77 177 94 27 202 73 29 24 144 53 105 98 119 246 73 194 77 161 49 76 129 106 24 21 143 253 80 129 124 120 252 135 88 134 135 207 157 192 2 219 4]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629412
{"level":"info","time":"2026-04-08T06:23:36Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:36Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629416 [] [10 244 75 241 90 17 201 160 103 126 22 191 185 54 29 248 245 177 134 52 150 112 55 146 241 110 87 175 229 109 156 206 245 219 178 64 104 78 24 182 65 254 218 15 86 33 182 25 92 108 83 228 153 76 244 150 108 234 21 39 87 196 146 12]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629416
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:36Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:23:37Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:37Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:41Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:41Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:41Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:41Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629421 [] [245 147 119 148 73 234 140 156 142 197 2 195 72 36 158 115 145 114 155 37 71 197 182 145 61 187 183 194 224 168 39 133 73 122 207 153 51 18 253 144 63 88 138 225 211 61 26 10 239 110 143 97 150 52 228 80 245 159 248 225 56 187 75 4]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629421
{"level":"info","time":"2026-04-08T06:23:46Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:46Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:46Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:46Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629426 [] [141 72 153 198 16 160 45 156 136 192 30 121 181 17 191 121 164 1 21 178 18 144 234 140 250 86 200 10 239 124 55 214 17 37 197 65 192 47 211 6 77 242 33 240 223 228 13 22 34 198 107 128 182 220 178 184 54 133 169 166 101 156 102 5]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629426
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:23:51Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:51Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:51Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629431 [] [12 54 17 250 235 119 198 198 176 220 73 25 17 129 109 77 21 5 79 143 223 236 101 253 67 236 125 152 154 33 35 141 10 94 10 208 18 2 133 244 145 2 255 82 255 205 44 161 50 254 199 50 91 112 80 182 206 2 185 114 253 92 92 14]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629431
{"level":"info","time":"2026-04-08T06:23:51Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:56Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:56Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:56Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:23:56Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629436 [] [23 83 186 19 183 6 46 206 59 164 212 169 55 102 13 73 224 1 161 53 196 143 252 186 57 92 58 65 207 137 186 81 172 118 242 143 197 229 171 7 29 127 80 235 182 193 92 104 137 158 182 102 18 242 121 191 29 230 30 197 136 144 135 1]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629436
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:56Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:56Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:56Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:24:01Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:01Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:01Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:01Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629441 [] [254 13 94 37 111 178 40 255 29 162 65 35 169 202 46 185 196 213 241 189 215 208 215 53 142 226 11 120 36 151 74 60 48 255 214 188 71 71 113 85 96 110 211 213 22 24 115 27 172 118 188 154 89 147 143 149 202 193 240 123 139 19 87 6]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629441
{"level":"info","time":"2026-04-08T06:24:06Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:06Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:06Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:06Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629446 [] [246 176 91 73 160 106 186 232 90 254 21 33 188 36 82 137 87 157 180 212 249 215 0 194 143 19 45 211 65 107 64 50 225 228 242 113 78 152 136 187 19 134 178 53 244 118 233 41 188 102 161 95 32 120 87 154 4 111 222 114 195 6 255 8]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629446
{"level":"info","time":"2026-04-08T06:24:11Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:11Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:11Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:11Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629451 [] [86 184 139 175 162 65 132 180 192 217 216 94 173 156 155 61 160 135 254 150 36 163 185 29 90 201 55 156 154 112 215 85 170 73 89 44 48 50 216 26 48 176 49 251 55 59 41 110 255 205 249 121 252 13 178 191 238 221 153 87 167 160 109 15]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629451
{"level":"info","time":"2026-04-08T06:24:16Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:16Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:16Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:16Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629456 [] [155 167 250 33 26 113 255 90 195 252 218 51 87 156 236 27 33 235 173 205 214 91 71 182 173 208 4 34 56 187 252 51 242 25 188 172 144 173 139 254 206 169 130 128 76 124 221 225 175 107 51 5 195 217 190 89 183 197 214 233 225 144 22 9]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629456
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:24:16Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:24:16Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:24:16Z","message":"witness report"}
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:24:21Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:21Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:21Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:21Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629461 [] [223 59 241 174 47 85 114 207 253 22 199 23 113 236 175 59 178 219 243 5 86 61 120 204 45 48 23 18 133 213 233 207 215 112 60 211 172 117 141 207 212 195 38 176 99 210 36 167 4 173 3 172 118 230 68 15 115 1 34 117 29 43 17 10]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629461
{"level":"info","time":"2026-04-08T06:24:26Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:26Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:26Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:26Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629466 [] [78 233 88 195 228 51 61 74 210 130 161 187 123 96 35 249 102 247 231 145 230 218 95 156 156 57 196 239 51 100 156 207 145 191 177 198 148 187 45 255 188 135 168 246 45 232 145 167 16 227 212 162 240 83 53 227 19 50 4 155 89 251 173 15]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629466
{"level":"info","time":"2026-04-08T06:24:31Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:31Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:32Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:32Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629472 [] [248 73 18 53 38 161 4 235 67 154 166 207 190 24 82 159 151 63 131 28 138 151 149 208 177 1 148 50 52 20 130 170 147 166 102 151 65 33 160 85 94 206 117 136 17 45 102 242 151 27 234 24 220 119 242 246 63 118 124 180 251 201 150 2]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629472
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:24:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:24:36Z","message":"witness report"}
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:24:36Z","message":"witness report"}
{"level":"info","time":"2026-04-08T06:24:37Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:37Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:37Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:37Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629477 [] [18 59 128 116 59 142 226 74 128 34 172 184 42 157 167 147 209 86 3 76 58 71 29 198 127 217 243 106 0 8 233 13 28 54 16 255 138 41 114 125 164 222 219 216 190 57 22 197 217 128 65 197 209 136 44 83 82 207 68 42 252 246 13 7]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629477
{"level":"info","time":"2026-04-08T06:24:42Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:42Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:42Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629482 [] [215 29 21 255 33 54 189 86 117 184 2 113 112 242 70 247 207 146 67 145 195 86 28 111 185 44 127 169 88 232 89 178 126 185 103 154 58 53 235 247 245 64 28 23 33 6 145 58 32 230 73 234 168 227 137 107 191 80 91 197 194 61 166 8]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
{"level":"info","time":"2026-04-08T06:24:42Z","message":"Catching propalgation event... oc-scheduler - "}
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629482
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:24:47Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:47Z","message":"Catching propalgation event... oc-scheduler - "}
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
{"level":"info","time":"2026-04-08T06:24:47Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:47Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629487 [] [5 82 17 18 126 200 221 134 181 2 49 50 91 164 41 106 249 127 229 134 172 39 47 89 85 69 10 130 162 118 200 8 236 130 234 199 170 129 48 35 152 51 170 110 224 157 113 148 82 30 50 142 147 145 248 90 246 214 192 216 202 228 209 7]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629487
{"level":"info","time":"2026-04-08T06:24:52Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:52Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:52Z","message":"Catching propalgation event... oc-scheduler - "}
{"level":"info","time":"2026-04-08T06:24:52Z","message":"Catching propalgation event... oc-scheduler - "}
handleEvent
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629492 [] [131 245 52 32 139 115 185 77 202 252 164 122 78 64 57 34 31 59 111 170 69 186 52 182 219 102 57 5 93 169 20 201 204 107 19 124 212 239 19 134 62 200 86 206 54 214 124 41 155 206 243 91 91 215 19 73 230 132 139 220 91 43 196 12]}
sendPlanner 0
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629492

65
main.go
View File

@@ -1,44 +1,55 @@
package main
import (
"oc-discovery/models"
_ "oc-discovery/routers"
"context"
"log"
"oc-discovery/conf"
"oc-discovery/daemons/node"
"os"
"os/signal"
"strings"
"syscall"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/logs"
"cloud.o-forge.io/core/oc-lib/tools"
beego "github.com/beego/beego/v2/server/web"
)
const appname = "oc-discovery"
func main() {
// Init the oc-lib
oclib.Init(appname, "", "")
oclib.InitDaemon(appname)
// get the right config file
o := tools.GetConfLoader()
o := oclib.GetConfLoader(appname)
models.GetConfig().Port = o.GetIntDefault("port", 8080)
models.GetConfig().LokiUrl = o.GetStringDefault("lokiurl", "")
models.GetConfig().RedisUrl = o.GetStringDefault("redisurl", "localhost:6379")
models.GetConfig().RedisPassword = o.GetStringDefault("redispassword", "")
models.GetConfig().ZincUrl = o.GetStringDefault("zincurl", "http://localhost:4080")
models.GetConfig().ZincLogin = o.GetStringDefault("zinclogin", "admin")
models.GetConfig().ZincPassword = o.GetStringDefault("zincpassword", "admin")
models.GetConfig().IdentityFile = o.GetStringDefault("identityfile", "./identity.json")
models.GetConfig().Defaultpeers = o.GetStringDefault("defaultpeers", "./peers.json")
conf.GetConfig().Name = o.GetStringDefault("NAME", "opencloud-demo")
conf.GetConfig().Hostname = o.GetStringDefault("HOSTNAME", "127.0.0.1")
conf.GetConfig().PSKPath = o.GetStringDefault("PSK_PATH", "./psk/psk.key")
conf.GetConfig().NodeEndpointPort = o.GetInt64Default("NODE_ENDPOINT_PORT", 4001)
conf.GetConfig().IndexerAddresses = o.GetStringDefault("INDEXER_ADDRESSES", "")
// set oc-lib logger
if models.GetConfig().LokiUrl != "" {
logs.CreateLogger(appname, models.GetConfig().LokiUrl)
conf.GetConfig().PeerIDS = o.GetStringDefault("PEER_IDS", "")
conf.GetConfig().NodeMode = o.GetStringDefault("NODE_MODE", "node")
conf.GetConfig().MinIndexer = o.GetIntDefault("MIN_INDEXER", 1)
conf.GetConfig().MaxIndexer = o.GetIntDefault("MAX_INDEXER", 5)
conf.GetConfig().LocationGranularity = o.GetIntDefault("LOCATION_GRANULARITY", 2)
ctx, stop := signal.NotifyContext(
context.Background(),
os.Interrupt,
syscall.SIGTERM,
)
defer stop()
isNode := strings.Contains(conf.GetConfig().NodeMode, "node")
isIndexer := strings.Contains(conf.GetConfig().NodeMode, "indexer")
if n, err := node.InitNode(isNode, isIndexer); err != nil {
panic(err)
} else {
<-ctx.Done() // the only blocking point
log.Println("shutting down")
n.Close()
}
// Normal beego init
beego.BConfig.AppName = appname
beego.BConfig.Listen.HTTPPort = models.GetConfig().Port
beego.BConfig.WebConfig.DirectoryIndex = true
beego.BConfig.WebConfig.StaticDir["/swagger"] = "swagger"
beego.Run()
}

View File

@@ -1,25 +0,0 @@
package models
import "sync"
type Config struct {
Port int
LokiUrl string
ZincUrl string
ZincLogin string
ZincPassword string
RedisUrl string
RedisPassword string
IdentityFile string
Defaultpeers string
}
var instance *Config
var once sync.Once
func GetConfig() *Config {
once.Do(func() {
instance = &Config{}
})
return instance
}

56
models/event.go Normal file
View File

@@ -0,0 +1,56 @@
package models
import (
"encoding/json"
"time"
"cloud.o-forge.io/core/oc-lib/tools"
"github.com/libp2p/go-libp2p/core/crypto"
)
type Event struct {
Type string `json:"type"`
From string `json:"from"` // peerID
User string
DataType int64 `json:"datatype"`
Timestamp int64 `json:"ts"`
Payload []byte `json:"payload"`
Signature []byte `json:"sig"`
}
func NewEvent(name string, from string, dt *tools.DataType, user string, payload []byte, priv crypto.PrivKey) *Event {
evt := &Event{
Type: name,
From: from,
User: user,
Timestamp: time.Now().UTC().Unix(),
Payload: payload,
}
if dt != nil {
evt.DataType = int64(dt.EnumIndex())
} else {
evt.DataType = -1
}
body, _ := json.Marshal(evt)
sig, _ := priv.Sign(body)
evt.Signature = sig
return evt
}
func (e *Event) RawEvent() *Event {
return &Event{
Type: e.Type,
From: e.From,
User: e.User,
DataType: e.DataType,
Timestamp: e.Timestamp,
Payload: e.Payload,
}
}
func (e *Event) ToRawByte() ([]byte, error) {
return json.Marshal(e.RawEvent())
}

View File

@@ -1,44 +0,0 @@
package models
import (
"encoding/json"
"os"
"github.com/beego/beego/logs"
)
var (
Me Identity
)
func init() {
content, err := os.ReadFile("./identity.json")
if err != nil {
logs.Error("Error when opening file: ", err)
}
err = json.Unmarshal(content, &Me)
if err != nil {
logs.Error("Error during Unmarshal(): ", err)
}
}
type Identity struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
PrivateKey string `json:"private_key,omitempty"`
PublicAttributes Peer `json:"public_attributes,omitempty"`
}
func GetIdentity() (u *Identity) {
return &Me
}
func UpdateIdentity(uu *Identity) error {
Me = *uu
jsonBytes, err := json.Marshal(uu)
if err != nil {
return err
}
os.WriteFile("./identity.json", jsonBytes, 0600)
return nil
}

View File

@@ -1,88 +0,0 @@
package models
import (
"encoding/json"
"io/ioutil"
"time"
"github.com/beego/beego/logs"
)
var (
Peers []Peer
Store Storage
)
type Peer struct {
PeerId string `json:"peer_id,omitempty"`
Name string `json:"name,omitempty"`
EntityName string `json:"entity_name,omitempty"`
EntityType string `json:"entity_type,omitempty"`
Description string `json:"description,omitempty"`
Website string `json:"website,omitempty"`
Address string `json:"address,omitempty"`
Postcode string `json:"postcode,omitempty"`
City string `json:"city,omitempty"`
Country string `json:"country,omitempty"`
Phone string `json:"phone,omitempty"`
Email string `json:"email,omitempty"`
Activity string `json:"activity,omitempty"`
Keywords []string `json:"keywords,omitempty"`
ApiUrl string `json:"api_url,omitempty"`
PublicKey string `json:"public_key,omitempty"`
// internal use
Score int64 `json:"score,omitempty"`
LastSeenOnline time.Time `json:"last_seen_online,omitempty"`
ApiVersion string `json:"api_version,omitempty"`
}
func init() {
c := GetConfig()
Store = Storage{c.ZincUrl, c.ZincLogin, c.ZincPassword, c.RedisUrl, c.RedisPassword}
Store = Storage{"http://localhost:4080", "admin", "admin", "localhost:6379", ""}
//p := Peer{uuid.New().String(), 0, []string{"car", "highway", "images", "video"}, time.Now(), "1", "asf", ""}
// pa := []Peer{p}
// byteArray, err := json.Marshal(pa)
// if err != nil {
// log.Fatal(err)
// }
// ioutil.WriteFile("./peers.json", byteArray, 0644)
content, err := ioutil.ReadFile("./peers.json")
if err != nil {
logs.Error("Error when opening file: ", err)
}
err = json.Unmarshal(content, &Peers)
if err != nil {
logs.Error("Error during Unmarshal(): ", err)
}
Store.ImportData(LoadPeersJson("./peers.json"))
}
func AddPeers(peers []Peer) (status string) {
err := Store.ImportData(peers)
if err != nil {
logs.Error("Error during Unmarshal(): ", err)
return "error"
}
return "ok"
}
func FindPeers(query string) (peers []Peer, err error) {
result, err := Store.FindPeers(query)
if err != nil {
return nil, err
}
return result, nil
}
func GetPeer(uid string) (*Peer, error) {
return Store.GetPeer(uid)
}
func Delete(PeerId string) error {
err := Store.DeletePeer(PeerId)
if err != nil {
return err
}
return nil
}

View File

@@ -1,206 +0,0 @@
package models
import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"strings"
"github.com/beego/beego/logs"
"github.com/go-redis/redis"
"github.com/tidwall/gjson"
)
type Storage struct {
ZincUrl string
ZincLogin string
ZincPassword string
RedisUrl string
RedisPassword string
}
func LoadPeersJson(filename string) []Peer {
var peers []Peer
content, err := os.ReadFile("./peers.json")
if err != nil {
logs.Error("Error when opening file: ", err)
}
err = json.Unmarshal(content, &peers)
if err != nil {
logs.Error("Error during Unmarshal(): ", err)
}
return peers
}
func (s *Storage) ImportData(peers []Peer) error {
rdb := redis.NewClient(&redis.Options{
Addr: s.RedisUrl,
Password: s.RedisPassword, // no password set
DB: 0, // use default DB
})
var indexedPeers []map[string]interface{}
for _, p := range peers {
// Creating data block for indexing
indexedPeer := make(map[string]interface{})
indexedPeer["_id"] = p.PeerId
indexedPeer["name"] = p.Name
indexedPeer["keywords"] = p.Keywords
indexedPeer["name"] = p.Name
indexedPeer["entityname"] = p.EntityName
indexedPeer["entitytype"] = p.EntityType
indexedPeer["activity"] = p.Activity
indexedPeer["address"] = p.Address
indexedPeer["postcode"] = p.Postcode
indexedPeer["city"] = p.City
indexedPeer["country"] = p.Country
indexedPeer["description"] = p.Description
indexedPeer["apiurl"] = p.ApiUrl
indexedPeer["website"] = p.Website
indexedPeers = append(indexedPeers, indexedPeer)
// Adding peer to Redis (fast retieval and status updates)
jsonp, err := json.Marshal(p)
if err != nil {
return err
}
err = rdb.Set("peer:"+p.PeerId, jsonp, 0).Err()
if err != nil {
return err
}
}
bulk := map[string]interface{}{"index": "peers", "records": indexedPeers}
raw, err := json.Marshal(bulk)
if err != nil {
return err
}
req, err := http.NewRequest("POST", s.ZincUrl+"/api/_bulkv2", strings.NewReader(string(raw)))
if err != nil {
return err
}
req.SetBasicAuth(s.ZincLogin, s.ZincPassword)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
log.Println(resp.StatusCode)
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
fmt.Println(string(body))
return nil
}
func (s *Storage) FindPeers(queryString string) ([]Peer, error) {
var peers []Peer
query := `{
"search_type": "match",
"query":
{
"term": "` + queryString + `",
"start_time": "2020-06-02T14:28:31.894Z",
"end_time": "2029-12-02T15:28:31.894Z"
},
"from": 0,
"max_results": 100,
"_source": []
}`
req, err := http.NewRequest("POST", s.ZincUrl+"/api/peers/_search", strings.NewReader(query))
if err != nil {
log.Fatal(err)
}
req.SetBasicAuth(s.ZincLogin, s.ZincPassword)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36")
resp, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
log.Println(resp.StatusCode)
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
value := gjson.Get(string(body), "hits.hits")
rdb := redis.NewClient(&redis.Options{
Addr: s.RedisUrl,
Password: s.RedisPassword, // no password set
DB: 0, // use default DB
})
for _, v := range value.Array() {
peerBytes, err := rdb.Get("peer:" + v.Get("_id").Str).Bytes()
if err != nil {
logs.Error(err)
} else {
var p Peer
err = json.Unmarshal(peerBytes, &p)
if err != nil {
return nil, err
}
peers = append(peers, p)
}
}
return peers, nil
}
func (s *Storage) GetPeer(uid string) (*Peer, error) {
var peer Peer
rdb := redis.NewClient(&redis.Options{
Addr: s.RedisUrl,
Password: s.RedisPassword, // no password set
DB: 0, // use default DB
})
peerBytes, err := rdb.Get("peer:" + uid).Bytes()
if err != nil {
return nil, err
} else {
err = json.Unmarshal(peerBytes, &peer)
if err != nil {
return nil, err
}
return &peer, nil
}
}
func (s *Storage) DeletePeer(uid string) error {
// Removing from Redis
rdb := redis.NewClient(&redis.Options{
Addr: s.RedisUrl,
Password: s.RedisPassword, // no password set
DB: 0, // use default DB
})
err := rdb.Unlink("peer:" + uid).Err()
if err != nil {
return err
}
// Removing from Index
req, err := http.NewRequest("DELETE", s.ZincUrl+"/api/peers/_doc"+uid, nil)
if err != nil {
return err
}
req.SetBasicAuth(s.ZincLogin, s.ZincPassword)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
log.Println(resp.StatusCode)
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
fmt.Println(string(body))
return nil
}

BIN
oc-k8s Executable file

Binary file not shown.

View File

@@ -1,54 +0,0 @@
[
{
"peer_id": "a50d3697-7ede-4fe5-a385-e9d01ebc1002",
"name": "ASF",
"keywords": [
"car",
"highway",
"images",
"video"
],
"last_seen_online": "2023-03-07T11:57:13.378707853+01:00",
"api_version": "1",
"api_url": "http://127.0.0.1:49618/v1"
},
{
"peer_id": "a50d3697-7ede-4fe5-a385-e9d01ebc1003",
"name": "IT",
"keywords": [
"car",
"highway",
"images",
"video"
],
"last_seen_online": "2023-03-07T11:57:13.378707853+01:00",
"api_version": "1",
"api_url": "https://it.irtse.com/oc"
},
{
"peer_id": "a50d3697-7ede-4fe5-a385-e9d01ebc1004",
"name": "Centre de traitement des amendes",
"keywords": [
"car",
"highway",
"images",
"video"
],
"last_seen_online": "2023-03-07T11:57:13.378707853+01:00",
"api_version": "1",
"api_url": "https://impots.irtse.com/oc"
},
{
"peer_id": "a50d3697-7ede-4fe5-a385-e9d01ebc1005",
"name": "Douanes",
"keywords": [
"car",
"highway",
"images",
"video"
],
"last_seen_online": "2023-03-07T11:57:13.378707853+01:00",
"api_version": "1",
"api_url": "https://douanes.irtse.com/oc"
}
]

3
pem/private1.pem Normal file
View File

@@ -0,0 +1,3 @@
-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEIK2oBaOtGNchE09MBRtPd5oEOUcVUQG2ndym5wKExj7R
-----END PRIVATE KEY-----

3
pem/private2.pem Normal file
View File

@@ -0,0 +1,3 @@
-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEIE58GDazCyF1jp796ivSmHiCepbkC8TpzliIaQ7eGEpu
-----END PRIVATE KEY-----

3
pem/private3.pem Normal file
View File

@@ -0,0 +1,3 @@
-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEIAeX4O7ldwehRSnPkbzuE6csyo63vjvqAcNNujENOKUC
-----END PRIVATE KEY-----

3
pem/private4.pem Normal file
View File

@@ -0,0 +1,3 @@
-----BEGIN PRIVATE KEY-----
MC4CAQAwBQYDK2VwBCIEIEkgqINXDLnxIJZs2LEK9O4vdsqk43dwbULGUE25AWuR
-----END PRIVATE KEY-----

3
pem/public1.pem Normal file
View File

@@ -0,0 +1,3 @@
-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEAZ2nLJBL8a5opfa8nFeVj0SZToW8pl4+zgcSUkeZFRO4=
-----END PUBLIC KEY-----

3
pem/public2.pem Normal file
View File

@@ -0,0 +1,3 @@
-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEAIQVeSGwsjPjyepPTnzzYqVxIxviSEjZXU7C7zuNTui4=
-----END PUBLIC KEY-----

3
pem/public3.pem Normal file
View File

@@ -0,0 +1,3 @@
-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEAG95Ettl3jTi41HM8le1A9WDmOEq0ANEqpLF7zTZrfXA=
-----END PUBLIC KEY-----

3
pem/public4.pem Normal file
View File

@@ -0,0 +1,3 @@
-----BEGIN PUBLIC KEY-----
MCowBQYDK2VwAyEA/ymOIb0sJ0qCWrf3mKz7ACCvsMXLog/EK533JfNXZTM=
-----END PUBLIC KEY-----

3
psk/psk.key Normal file
View File

@@ -0,0 +1,3 @@
/key/swarm/psk/1.0.0/
/base16/
9018b627b4d0f123d1cfcd16f33b538688ed7588cde9da8c60d587b7022399c1

View File

@@ -1,73 +0,0 @@
package routers
import (
beego "github.com/beego/beego/v2/server/web"
"github.com/beego/beego/v2/server/web/context/param"
)
func init() {
beego.GlobalControllerRouter["oc-discovery/controllers:IdentityController"] = append(beego.GlobalControllerRouter["oc-discovery/controllers:IdentityController"],
beego.ControllerComments{
Method: "Post",
Router: `/`,
AllowHTTPMethods: []string{"post"},
MethodParams: param.Make(),
Filters: nil,
Params: nil})
beego.GlobalControllerRouter["oc-discovery/controllers:IdentityController"] = append(beego.GlobalControllerRouter["oc-discovery/controllers:IdentityController"],
beego.ControllerComments{
Method: "GetAll",
Router: `/`,
AllowHTTPMethods: []string{"get"},
MethodParams: param.Make(),
Filters: nil,
Params: nil})
beego.GlobalControllerRouter["oc-discovery/controllers:PeerController"] = append(beego.GlobalControllerRouter["oc-discovery/controllers:PeerController"],
beego.ControllerComments{
Method: "Post",
Router: `/`,
AllowHTTPMethods: []string{"post"},
MethodParams: param.Make(),
Filters: nil,
Params: nil})
beego.GlobalControllerRouter["oc-discovery/controllers:PeerController"] = append(beego.GlobalControllerRouter["oc-discovery/controllers:PeerController"],
beego.ControllerComments{
Method: "Get",
Router: `/:peerId`,
AllowHTTPMethods: []string{"get"},
MethodParams: param.Make(),
Filters: nil,
Params: nil})
beego.GlobalControllerRouter["oc-discovery/controllers:PeerController"] = append(beego.GlobalControllerRouter["oc-discovery/controllers:PeerController"],
beego.ControllerComments{
Method: "Delete",
Router: `/:peerId`,
AllowHTTPMethods: []string{"delete"},
MethodParams: param.Make(),
Filters: nil,
Params: nil})
beego.GlobalControllerRouter["oc-discovery/controllers:PeerController"] = append(beego.GlobalControllerRouter["oc-discovery/controllers:PeerController"],
beego.ControllerComments{
Method: "Find",
Router: `/find/:query`,
AllowHTTPMethods: []string{"get"},
MethodParams: param.Make(),
Filters: nil,
Params: nil})
beego.GlobalControllerRouter["oc-discovery/controllers:VersionController"] = append(beego.GlobalControllerRouter["oc-discovery/controllers:VersionController"],
beego.ControllerComments{
Method: "GetAll",
Router: `/`,
AllowHTTPMethods: []string{"get"},
MethodParams: param.Make(),
Filters: nil,
Params: nil})
}

View File

@@ -1,36 +0,0 @@
// @APIVersion 1.0.0
// @Title beego Test API
// @Description beego has a very cool tools to autogenerate documents for your API
// @Contact astaxie@gmail.com
// @TermsOfServiceUrl http://beego.me/
// @License AGPL
// @LicenseUrl https://www.gnu.org/licenses/agpl-3.0.html
package routers
import (
"oc-discovery/controllers"
beego "github.com/beego/beego/v2/server/web"
)
func init() {
ns := beego.NewNamespace("/oc",
beego.NSNamespace("/peer",
beego.NSInclude(
&controllers.PeerController{},
),
),
beego.NSNamespace("/identity",
beego.NSInclude(
&controllers.IdentityController{},
),
),
beego.NSNamespace("/version",
beego.NSInclude(
&controllers.VersionController{},
),
),
)
beego.AddNamespace(ns)
}

View File

@@ -1,39 +0,0 @@
package test
import (
"net/http"
"net/http/httptest"
"testing"
"runtime"
"path/filepath"
_ "oc-discovery/routers"
beego "github.com/beego/beego/v2/server/web"
"github.com/beego/beego/v2/core/logs"
. "github.com/smartystreets/goconvey/convey"
)
func init() {
_, file, _, _ := runtime.Caller(0)
apppath, _ := filepath.Abs(filepath.Dir(filepath.Join(file, ".." + string(filepath.Separator))))
beego.TestBeegoInit(apppath)
}
// TestGet is a sample to run an endpoint test
func TestGet(t *testing.T) {
r, _ := http.NewRequest("GET", "/v1/object", nil)
w := httptest.NewRecorder()
beego.BeeApp.Handlers.ServeHTTP(w, r)
logs.Info("testing", "TestGet", "Code[%d]\n%s", w.Code, w.Body.String())
Convey("Subject: Test Station Endpoint\n", t, func() {
Convey("Status Code Should Be 200", func() {
So(w.Code, ShouldEqual, 200)
})
Convey("The Result Should Not Be Empty", func() {
So(w.Body.Len(), ShouldBeGreaterThan, 0)
})
})
}