Skip to content

Commit

Permalink
Merge branch 'master' into rest-store-v3
Browse files Browse the repository at this point in the history
  • Loading branch information
chaitanyaprem authored May 14, 2024
2 parents d416e08 + e8dc887 commit c731def
Show file tree
Hide file tree
Showing 8 changed files with 241 additions and 66 deletions.
1 change: 0 additions & 1 deletion go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -564,7 +564,6 @@ github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2C
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
Expand Down
2 changes: 0 additions & 2 deletions waku/v2/node/wakuoptions.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p"
mplex "github.com/libp2p/go-libp2p-mplex"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/config"
"github.com/libp2p/go-libp2p/core/crypto"
Expand Down Expand Up @@ -559,7 +558,6 @@ var DefaultLibP2POptions = []libp2p.Option{
libp2p.UserAgent(UserAgent),
libp2p.ChainOptions(
libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport),
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
),
libp2p.EnableNATService(),
libp2p.ConnectionManager(newConnManager(200, 300, connmgr.WithGracePeriod(0))),
Expand Down
172 changes: 172 additions & 0 deletions waku/v2/peermanager/fastest_peer_selector.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
package peermanager

import (
"context"
"errors"
"sort"
"sync"
"time"

"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
"github.com/waku-org/go-waku/logging"
"go.uber.org/zap"
)

type FastestPeerSelector struct {
sync.RWMutex

host host.Host

logger *zap.Logger
}

func NewFastestPeerSelector(logger *zap.Logger) *FastestPeerSelector {
return &FastestPeerSelector{
logger: logger.Named("rtt-cache"),
}
}

func (r *FastestPeerSelector) SetHost(h host.Host) {
r.host = h
}

func (r *FastestPeerSelector) PingPeer(ctx context.Context, peer peer.ID) (time.Duration, error) {
if peer == r.host.ID() {
return 0, errors.New("can't ping yourself")
}

ctx, cancel := context.WithTimeout(ctx, 7*time.Second)
defer cancel()

select {
case <-ctx.Done():
return 0, ctx.Err()

case result := <-ping.Ping(ctx, r.host, peer):
r.Lock()
defer r.Unlock()

if result.Error == nil {
return result.RTT, nil
} else {
r.logger.Debug("could not ping", logging.HostID("peer", peer), zap.Error(result.Error))
return 0, result.Error
}
}

}

func (r *FastestPeerSelector) FastestPeer(ctx context.Context, peers peer.IDSlice) (peer.ID, error) {
var peerRTT []pingResult
var peerRTTMutex sync.Mutex

wg := sync.WaitGroup{}
pingCh := make(chan peer.ID)

pinged := make(map[peer.ID]struct{})

go func() {
// Ping any peer with no latency recorded
for peerToPing := range pingCh {
go func(p peer.ID) {
defer wg.Done()
rtt := time.Hour
result, err := r.PingPeer(ctx, p)
if err == nil {
rtt = result
}

peerRTTMutex.Lock()
peerRTT = append(peerRTT, pingResult{
peerID: p,
rtt: rtt,
connectedness: r.host.Network().Connectedness(p),
})
peerRTTMutex.Unlock()
}(peerToPing)
}
}()

for _, p := range peers {
latency := r.host.Peerstore().LatencyEWMA(p)
if latency == 0 {
wg.Add(1)
pinged[p] = struct{}{} // To avoid double pings
pingCh <- p
} else {
peerRTTMutex.Lock()
peerRTT = append(peerRTT, pingResult{
peerID: p,
rtt: latency,
connectedness: r.host.Network().Connectedness(p),
})
peerRTTMutex.Unlock()
}
}

// Wait for pings to be done (if any)
wg.Wait()
close(pingCh)

sort.Sort(pingSort(peerRTT))

for _, p := range peerRTT {
if p.rtt == time.Hour {
break
}

// Make sure peer is reachable
_, exists := pinged[p.peerID] // Did we just ping the peer?
if !exists {
_, err := r.PingPeer(ctx, p.peerID)
if err != nil {
continue
} else {
if p.rtt != time.Hour {
return p.peerID, nil
}
}
} else {
if p.rtt != time.Hour {
return p.peerID, nil
}
}
}

return "", ErrNoPeersAvailable
}

type pingResult struct {
peerID peer.ID
rtt time.Duration
connectedness network.Connectedness
}

type pingSort []pingResult

func (a pingSort) Len() int {
return len(a)
}

func (a pingSort) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}

var connectednessPriority map[network.Connectedness]int

func init() {
// Closer to 0 is prefered
connectednessPriority = map[network.Connectedness]int{
network.CanConnect: 1,
network.Connected: 1,
network.NotConnected: 2,
network.CannotConnect: 3,
}
}

func (a pingSort) Less(i, j int) bool {
return connectednessPriority[a[i].connectedness] < connectednessPriority[a[j].connectedness] && a[i].rtt < a[j].rtt
}
48 changes: 48 additions & 0 deletions waku/v2/peermanager/fastest_peer_selector_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
package peermanager

import (
"context"
"crypto/rand"
"testing"
"time"

"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
"github.com/stretchr/testify/require"
"github.com/waku-org/go-waku/tests"
"github.com/waku-org/go-waku/waku/v2/utils"
)

func TestRTT(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()

h1, _ := tests.MakeHost(ctx, 0, rand.Reader)
h2, _ := tests.MakeHost(ctx, 0, rand.Reader)
h3, _ := tests.MakeHost(ctx, 0, rand.Reader)

h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), peerstore.PermanentAddrTTL)
h1.Peerstore().AddAddrs(h3.ID(), h3.Addrs(), peerstore.PermanentAddrTTL)

rtt := NewFastestPeerSelector(utils.Logger())
rtt.SetHost(h1)

_, err := rtt.FastestPeer(ctx, peer.IDSlice{h2.ID(), h3.ID()})
require.NoError(t, err)

// Simulate H3 being no longer available
h3.Close()

_, err = rtt.FastestPeer(ctx, peer.IDSlice{h3.ID()})
require.ErrorIs(t, err, ErrNoPeersAvailable)

// H3 should never return
for i := 0; i < 100; i++ {
p, err := rtt.FastestPeer(ctx, peer.IDSlice{h2.ID(), h3.ID()})
if err != nil {
require.ErrorIs(t, err, ErrNoPeersAvailable)
} else {
require.NotEqual(t, h3.ID(), p)
}
}
}
20 changes: 15 additions & 5 deletions waku/v2/peermanager/peer_connector.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,6 @@ func (c *PeerConnectionStrategy) canDialPeer(pi peer.AddrInfo) bool {
c.mux.Lock()
defer c.mux.Unlock()
val, ok := c.cache.Get(pi.ID)
var cachedPeer *connCacheData
if ok {
tv := val.(*connCacheData)
now := time.Now()
Expand All @@ -204,15 +203,25 @@ func (c *PeerConnectionStrategy) canDialPeer(pi peer.AddrInfo) bool {
}
c.logger.Debug("Proceeding with connecting to peer",
zap.Time("currentTime", now), zap.Time("nextTry", tv.nextTry))
tv.nextTry = now.Add(tv.strat.Delay())
}
return true
}

func (c *PeerConnectionStrategy) addConnectionBackoff(peerID peer.ID) {
c.mux.Lock()
defer c.mux.Unlock()
val, ok := c.cache.Get(peerID)
var cachedPeer *connCacheData
if ok {
tv := val.(*connCacheData)
tv.nextTry = time.Now().Add(tv.strat.Delay())
} else {
cachedPeer = &connCacheData{strat: c.backoff()}
cachedPeer.nextTry = time.Now().Add(cachedPeer.strat.Delay())
c.logger.Debug("Initializing connectionCache for peer ",
logging.HostID("peerID", pi.ID), zap.Time("until", cachedPeer.nextTry))
c.cache.Add(pi.ID, cachedPeer)
logging.HostID("peerID", peerID), zap.Time("until", cachedPeer.nextTry))
c.cache.Add(peerID, cachedPeer)
}
return true
}

func (c *PeerConnectionStrategy) dialPeers() {
Expand Down Expand Up @@ -255,6 +264,7 @@ func (c *PeerConnectionStrategy) dialPeer(pi peer.AddrInfo, sem chan struct{}) {
defer cancel()
err := c.host.Connect(ctx, pi)
if err != nil && !errors.Is(err, context.Canceled) {
c.addConnectionBackoff(pi.ID)
c.host.Peerstore().(wps.WakuPeerstore).AddConnFailure(pi)
c.logger.Warn("connecting to peer", logging.HostID("peerID", pi.ID), zap.Error(err))
}
Expand Down
4 changes: 3 additions & 1 deletion waku/v2/peermanager/peer_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ type PeerManager struct {
discoveryService *discv5.DiscoveryV5
wakuprotoToENRFieldMap map[protocol.ID]WakuProtoInfo
TopicHealthNotifCh chan<- TopicHealthStatus
rttCache *FastestPeerSelector
}

// PeerSelection provides various options based on which Peer is selected from a list of peers.
Expand Down Expand Up @@ -188,6 +189,7 @@ func NewPeerManager(maxConnections int, maxPeers int, metadata *metadata.WakuMet
subRelayTopics: make(map[string]*NodeTopicDetails),
maxPeers: maxPeers,
wakuprotoToENRFieldMap: map[protocol.ID]WakuProtoInfo{},
rttCache: NewFastestPeerSelector(logger),
}
logger.Info("PeerManager init values", zap.Int("maxConnections", maxConnections),
zap.Int("maxRelayPeers", maxRelayPeers),
Expand All @@ -206,6 +208,7 @@ func (pm *PeerManager) SetDiscv5(discv5 *discv5.DiscoveryV5) {
// SetHost sets the host to be used in order to access the peerStore.
func (pm *PeerManager) SetHost(host host.Host) {
pm.host = host
pm.rttCache.SetHost(host)
}

// SetPeerConnector sets the peer connector to be used for establishing relay connections.
Expand All @@ -215,7 +218,6 @@ func (pm *PeerManager) SetPeerConnector(pc *PeerConnectionStrategy) {

// Start starts the processing to be done by peer manager.
func (pm *PeerManager) Start(ctx context.Context) {

pm.RegisterWakuProtocol(relay.WakuRelayID_v200, relay.WakuRelayENRField)

pm.ctx = ctx
Expand Down
Loading

0 comments on commit c731def

Please sign in to comment.