Files
oc-discovery/daemons/node/node.go

338 lines
10 KiB
Go
Raw Normal View History

2026-01-30 16:57:36 +01:00
package node
import (
"context"
"encoding/json"
"errors"
"fmt"
2026-03-03 16:38:24 +01:00
"maps"
2026-01-30 16:57:36 +01:00
"oc-discovery/conf"
"oc-discovery/daemons/node/common"
"oc-discovery/daemons/node/indexer"
"oc-discovery/daemons/node/pubsub"
"oc-discovery/daemons/node/stream"
"sync"
2026-01-30 16:57:36 +01:00
"time"
oclib "cloud.o-forge.io/core/oc-lib"
2026-03-03 16:38:24 +01:00
"cloud.o-forge.io/core/oc-lib/dbs"
2026-01-30 16:57:36 +01:00
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/tools"
2026-02-03 15:25:15 +01:00
"github.com/google/uuid"
2026-01-30 16:57:36 +01:00
"github.com/libp2p/go-libp2p"
pubsubs "github.com/libp2p/go-libp2p-pubsub"
2026-02-03 15:25:15 +01:00
"github.com/libp2p/go-libp2p/core/crypto"
2026-01-30 16:57:36 +01:00
pp "github.com/libp2p/go-libp2p/core/peer"
2026-02-24 14:31:37 +01:00
"github.com/libp2p/go-libp2p/core/protocol"
2026-01-30 16:57:36 +01:00
)
type Node struct {
*common.LongLivedStreamRecordedService[interface{}] // change type of stream
PS *pubsubs.PubSub
IndexerService *indexer.IndexerService
PubSubService *pubsub.PubSubService
StreamService *stream.StreamService
PeerID pp.ID
2026-02-02 09:05:58 +01:00
isIndexer bool
2026-03-03 16:38:24 +01:00
peerRecord *indexer.PeerRecord
Mu sync.RWMutex
2026-01-30 16:57:36 +01:00
}
func InitNode(isNode bool, isIndexer bool, isNativeIndexer bool) (*Node, error) {
2026-01-30 16:57:36 +01:00
if !isNode && !isIndexer {
return nil, errors.New("wait... what ? your node need to at least something. Retry we can't be friend in that case")
}
logger := oclib.GetLogger()
logger.Info().Msg("retrieving private key...")
priv, err := tools.LoadKeyFromFilePrivate() // your node private key
2026-01-30 16:57:36 +01:00
if err != nil {
return nil, err
}
logger.Info().Msg("retrieving psk file...")
psk, err := common.LoadPSKFromFile() // network common private Network. Public OC PSK is Public Network
if err != nil {
return nil, nil
}
logger.Info().Msg("open a host...")
h, err := libp2p.New(
libp2p.PrivateNetwork(psk),
libp2p.Identity(priv),
libp2p.ListenAddrStrings(
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", conf.GetConfig().NodeEndpointPort),
),
)
2026-02-03 15:25:15 +01:00
logger.Info().Msg("Host open on " + h.ID().String())
2026-01-30 16:57:36 +01:00
if err != nil {
return nil, errors.New("no host no node")
}
node := &Node{
PeerID: h.ID(),
2026-02-02 09:05:58 +01:00
isIndexer: isIndexer,
2026-02-20 12:42:18 +01:00
LongLivedStreamRecordedService: common.NewStreamRecordedService[interface{}](h, 1000),
2026-01-30 16:57:36 +01:00
}
2026-03-03 16:38:24 +01:00
// Register the bandwidth probe handler so any peer measuring this node's
// throughput can open a dedicated probe stream and read the echo.
h.SetStreamHandler(common.ProtocolBandwidthProbe, common.HandleBandwidthProbe)
2026-01-30 16:57:36 +01:00
var ps *pubsubs.PubSub
if isNode {
logger.Info().Msg("generate opencloud node...")
ps, err = pubsubs.NewGossipSub(context.Background(), node.Host)
if err != nil {
panic(err) // can't run your node without a propalgation pubsub, of state of node.
}
node.PS = ps
2026-03-03 16:38:24 +01:00
// buildRecord returns a fresh signed PeerRecord as JSON, embedded in each
// heartbeat so the receiving indexer can republish it to the DHT directly.
// peerRecord is nil until claimInfo runs, so the first ~20s heartbeats carry
// no record — that's fine, claimInfo publishes once synchronously at startup.
buildRecord := func() json.RawMessage {
if node.peerRecord == nil {
return nil
}
priv, err := tools.LoadKeyFromFilePrivate()
if err != nil {
return nil
}
fresh := *node.peerRecord
fresh.PeerRecordPayload.ExpiryDate = time.Now().UTC().Add(2 * time.Minute)
payload, _ := json.Marshal(fresh.PeerRecordPayload)
fresh.Signature, err = priv.Sign(payload)
if err != nil {
return nil
}
b, _ := json.Marshal(fresh)
return json.RawMessage(b)
}
2026-02-02 09:05:58 +01:00
logger.Info().Msg("connect to indexers...")
2026-03-03 16:38:24 +01:00
common.ConnectToIndexers(node.Host, conf.GetConfig().MinIndexer, conf.GetConfig().MaxIndexer, node.PeerID, buildRecord)
2026-02-02 09:05:58 +01:00
logger.Info().Msg("claims my node...")
2026-02-03 15:25:15 +01:00
if _, err := node.claimInfo(conf.GetConfig().Name, conf.GetConfig().Hostname); err != nil {
panic(err)
}
2026-02-02 09:05:58 +01:00
logger.Info().Msg("subscribe to decentralized search flow...")
logger.Info().Msg("run garbage collector...")
2026-01-30 16:57:36 +01:00
node.StartGC(30 * time.Second)
if node.StreamService, err = stream.InitStream(context.Background(), node.Host, node.PeerID, 1000, node); err != nil {
panic(err)
}
if node.PubSubService, err = pubsub.InitPubSub(context.Background(), node.Host, node.PS, node, node.StreamService); err != nil {
panic(err)
}
2026-02-04 11:35:19 +01:00
f := func(ctx context.Context, evt common.Event, topic string) {
if p, err := node.GetPeerRecord(ctx, evt.From); err == nil && len(p) > 0 {
node.StreamService.SendResponse(p[0], &evt)
}
}
node.SubscribeToSearch(node.PS, &f)
2026-03-03 16:38:24 +01:00
logger.Info().Msg("connect to NATS")
go ListenNATS(node)
logger.Info().Msg("Node is actually running.")
2026-01-30 16:57:36 +01:00
}
if isIndexer {
logger.Info().Msg("generate opencloud indexer...")
2026-03-03 16:38:24 +01:00
node.IndexerService = indexer.NewIndexerService(node.Host, ps, 500, isNativeIndexer)
2026-01-30 16:57:36 +01:00
}
return node, nil
}
func (d *Node) Close() {
2026-02-04 11:35:19 +01:00
if d.isIndexer && d.IndexerService != nil {
2026-02-02 09:05:58 +01:00
d.IndexerService.Close()
}
2026-01-30 16:57:36 +01:00
d.PubSubService.Close()
d.StreamService.Close()
d.Host.Close()
}
func (d *Node) publishPeerRecord(
rec *indexer.PeerRecord,
) error {
priv, err := tools.LoadKeyFromFilePrivate() // your node private key
2026-01-30 16:57:36 +01:00
if err != nil {
return err
}
2026-03-03 16:38:24 +01:00
common.StreamMuIndexes.RLock()
indexerSnapshot := make([]*pp.AddrInfo, 0, len(common.StaticIndexers))
2026-01-30 16:57:36 +01:00
for _, ad := range common.StaticIndexers {
2026-03-03 16:38:24 +01:00
indexerSnapshot = append(indexerSnapshot, ad)
}
common.StreamMuIndexes.RUnlock()
for _, ad := range indexerSnapshot {
var err error
2026-02-24 14:31:37 +01:00
if common.StreamIndexers, err = common.TempStream(d.Host, *ad, common.ProtocolPublish, "", common.StreamIndexers, map[protocol.ID]*common.ProtocolInfo{},
&common.StreamMuIndexes); err != nil {
continue
2026-01-30 16:57:36 +01:00
}
stream := common.StreamIndexers[common.ProtocolPublish][ad.ID]
2026-03-03 16:38:24 +01:00
base := indexer.PeerRecordPayload{
2026-01-30 16:57:36 +01:00
Name: rec.Name,
DID: rec.DID,
PubKey: rec.PubKey,
ExpiryDate: time.Now().UTC().Add(2 * time.Minute),
}
payload, _ := json.Marshal(base)
2026-03-03 16:38:24 +01:00
rec.PeerRecordPayload = base
rec.Signature, err = priv.Sign(payload)
2026-01-30 16:57:36 +01:00
if err := json.NewEncoder(stream.Stream).Encode(&rec); err != nil { // then publish on stream
return err
}
}
return nil
}
func (d *Node) GetPeerRecord(
ctx context.Context,
pidOrdid string,
2026-02-04 11:35:19 +01:00
) ([]*peer.Peer, error) {
2026-01-30 16:57:36 +01:00
var err error
2026-02-04 11:35:19 +01:00
var info map[string]indexer.PeerRecord
2026-03-03 16:38:24 +01:00
common.StreamMuIndexes.RLock()
indexerSnapshot2 := make([]*pp.AddrInfo, 0, len(common.StaticIndexers))
2026-01-30 16:57:36 +01:00
for _, ad := range common.StaticIndexers {
2026-03-03 16:38:24 +01:00
indexerSnapshot2 = append(indexerSnapshot2, ad)
}
common.StreamMuIndexes.RUnlock()
// Build the GetValue request: if pidOrdid is neither a UUID DID nor a libp2p
// PeerID, treat it as a human-readable name and let the indexer resolve it.
getReq := indexer.GetValue{Key: pidOrdid}
isNameSearch := false
if pidR, pidErr := pp.Decode(pidOrdid); pidErr == nil {
getReq.PeerID = pidR
} else if _, uuidErr := uuid.Parse(pidOrdid); uuidErr != nil {
// Not a UUID DID → treat pidOrdid as a name substring search.
getReq.Name = pidOrdid
getReq.Key = ""
isNameSearch = true
}
for _, ad := range indexerSnapshot2 {
if common.StreamIndexers, err = common.TempStream(d.Host, *ad, common.ProtocolGet, "",
2026-02-24 14:31:37 +01:00
common.StreamIndexers, map[protocol.ID]*common.ProtocolInfo{}, &common.StreamMuIndexes); err != nil {
continue
2026-01-30 16:57:36 +01:00
}
2026-03-03 16:38:24 +01:00
stream := common.StreamIndexers[common.ProtocolGet][ad.ID]
if err := json.NewEncoder(stream.Stream).Encode(getReq); err != nil {
continue
}
2026-03-03 16:38:24 +01:00
var resp indexer.GetResponse
if err := json.NewDecoder(stream.Stream).Decode(&resp); err != nil {
continue
2026-01-30 16:57:36 +01:00
}
2026-03-03 16:38:24 +01:00
if resp.Found {
if info == nil {
2026-02-04 11:35:19 +01:00
info = resp.Records
2026-03-03 16:38:24 +01:00
} else {
// Aggregate results from all indexers for name searches.
maps.Copy(info, resp.Records)
}
// For exact lookups (PeerID / DID) stop at the first hit.
if !isNameSearch {
2026-02-04 11:35:19 +01:00
break
}
2026-01-30 16:57:36 +01:00
}
}
2026-02-04 11:35:19 +01:00
var ps []*peer.Peer
for _, pr := range info {
if pk, err := pr.Verify(); err != nil {
2026-01-30 16:57:36 +01:00
return nil, err
2026-03-03 16:38:24 +01:00
} else if ok, p, err := pr.ExtractPeer(d.PeerID.String(), pr.PeerID, pk); err != nil {
2026-01-30 16:57:36 +01:00
return nil, err
} else {
if ok {
2026-02-04 11:35:19 +01:00
d.publishPeerRecord(&pr)
2026-01-30 16:57:36 +01:00
}
2026-02-04 11:35:19 +01:00
ps = append(ps, p)
2026-01-30 16:57:36 +01:00
}
}
2026-02-04 11:35:19 +01:00
return ps, err
2026-01-30 16:57:36 +01:00
}
func (d *Node) claimInfo(
name string,
endPoint string, // TODO : endpoint is not necesserry StreamAddress
) (*peer.Peer, error) {
if endPoint == "" {
return nil, errors.New("no endpoint found for peer")
}
2026-02-20 15:01:01 +01:00
did := uuid.New().String()
2026-03-03 16:38:24 +01:00
peers := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).Search(&dbs.Filters{
And: map[string][]dbs.Filter{ // search by name if no filters are provided
"peer_id": {{Operator: dbs.EQUAL.String(), Value: d.Host.ID().String()}},
},
}, "", false)
2026-02-20 15:01:01 +01:00
if len(peers.Data) > 0 {
did = peers.Data[0].GetID() // if already existing set up did as made
}
priv, err := tools.LoadKeyFromFilePrivate()
2026-01-30 16:57:36 +01:00
if err != nil {
return nil, err
}
pub, err := tools.LoadKeyFromFilePublic()
2026-01-30 16:57:36 +01:00
if err != nil {
return nil, err
}
2026-02-03 15:25:15 +01:00
pubBytes, err := crypto.MarshalPublicKey(pub)
2026-01-30 16:57:36 +01:00
if err != nil {
return nil, err
}
2026-02-03 15:25:15 +01:00
now := time.Now().UTC()
2026-01-30 16:57:36 +01:00
expiry := now.Add(150 * time.Second)
2026-03-03 16:38:24 +01:00
pRec := indexer.PeerRecordPayload{
Name: name,
DID: did, // REAL PEER ID
PubKey: pubBytes,
ExpiryDate: expiry,
2026-01-30 16:57:36 +01:00
}
d.PeerID = d.Host.ID()
2026-03-03 16:38:24 +01:00
payload, _ := json.Marshal(pRec)
2026-01-30 16:57:36 +01:00
2026-03-03 16:38:24 +01:00
rec := &indexer.PeerRecord{
PeerRecordPayload: pRec,
}
rec.Signature, err = priv.Sign(payload)
2026-01-30 16:57:36 +01:00
if err != nil {
return nil, err
}
2026-03-03 16:38:24 +01:00
rec.PeerID = d.Host.ID().String()
2026-01-30 16:57:36 +01:00
rec.APIUrl = endPoint
2026-02-05 15:47:29 +01:00
rec.StreamAddress = "/ip4/" + conf.GetConfig().Hostname + "/tcp/" + fmt.Sprintf("%v", conf.GetConfig().NodeEndpointPort) + "/p2p/" + rec.PeerID
2026-01-30 16:57:36 +01:00
rec.NATSAddress = oclib.GetConfig().NATSUrl
rec.WalletAddress = "my-wallet"
if err := d.publishPeerRecord(rec); err != nil {
return nil, err
}
2026-03-03 16:38:24 +01:00
d.peerRecord = rec
if _, err := rec.Verify(); err != nil {
2026-01-30 16:57:36 +01:00
return nil, err
2026-03-03 16:38:24 +01:00
} else {
_, p, err := rec.ExtractPeer(did, did, pub)
return p, err
}
2026-01-30 16:57:36 +01:00
}
/*
TODO:
- Le booking est un flow neuf décentralisé :
On check on attend une réponse, on valide, il passe par discovery, on relais.
- Le shared workspace est une affaire de décentralisation,
on communique avec les shared les mouvements
- Un shared remplace la notion de partnership à l'échelle de partnershipping
-> quand on share un workspace on devient partenaire temporaire
qu'on le soit originellement ou non.
-> on a alors les mêmes privilèges.
- Les orchestrations admiralty ont le même fonctionnement.
Un evenement provoque alors une création de clé de service.
On doit pouvoir crud avec verification de signature un DBobject.
*/