Compare commits

...

6 Commits

Author SHA1 Message Date
Yury Gargay
026958c22a Lock by PeerID when CreateChannel 2023-10-09 14:16:08 +02:00
Yury Gargay
9bfab103a0 Merge remote-tracking branch 'upstream/main' into yury/use-sync-map-in-updatechannel 2023-10-09 13:54:21 +02:00
Yury Gargay
dece311076 Merge remote-tracking branch 'upstream/main' into yury/use-sync-map-in-updatechannel 2023-10-04 14:03:34 +02:00
Yury Gargay
206d903de5 Update error message 2023-09-25 10:34:10 +02:00
Yury Gargay
3e20f23646 Rework Len() method via atomic counter 2023-09-25 10:34:10 +02:00
Yury Gargay
025fefc6bd Use sync.Map in PeersUpdateManager 2023-09-25 10:34:10 +02:00
3 changed files with 75 additions and 40 deletions

View File

@@ -61,7 +61,7 @@ func NewServer(config *Config, accountManager AccountManager, peersUpdateManager
if appMetrics != nil {
// update gauge based on number of connected peers which is equal to open gRPC streams
err = appMetrics.GRPCMetrics().RegisterConnectedStreams(func() int64 {
return int64(len(peersUpdateManager.peerChannels))
return peersUpdateManager.Len()
})
if err != nil {
return nil, err

View File

@@ -2,6 +2,7 @@ package server
import (
"sync"
"sync/atomic"
log "github.com/sirupsen/logrus"
@@ -14,25 +15,29 @@ type UpdateMessage struct {
Update *proto.SyncResponse
}
type UpdateChannel chan *UpdateMessage
type PeersUpdateManager struct {
// peerChannels is an update channel indexed by Peer.ID
peerChannels map[string]chan *UpdateMessage
channelsMux *sync.Mutex
peerChannels sync.Map
// peerChannelLocks keeps the peer locks to organize channel creations
peerChannelLocks sync.Map
// len is the length of peerChannels
len atomic.Int64
}
// NewPeersUpdateManager returns a new instance of PeersUpdateManager
func NewPeersUpdateManager() *PeersUpdateManager {
return &PeersUpdateManager{
peerChannels: make(map[string]chan *UpdateMessage),
channelsMux: &sync.Mutex{},
}
return &PeersUpdateManager{}
}
// SendUpdate sends update message to the peer's channel
func (p *PeersUpdateManager) SendUpdate(peerID string, update *UpdateMessage) {
p.channelsMux.Lock()
defer p.channelsMux.Unlock()
if channel, ok := p.peerChannels[peerID]; ok {
if ch, ok := p.peerChannels.Load(peerID); ok {
channel, ok := ch.(UpdateChannel)
if !ok {
log.Warnf("could not cast to UpdateChannel")
}
select {
case channel <- update:
log.Debugf("update was sent to channel for peer %s", peerID)
@@ -45,35 +50,48 @@ func (p *PeersUpdateManager) SendUpdate(peerID string, update *UpdateMessage) {
}
// CreateChannel creates a go channel for a given peer used to deliver updates relevant to the peer.
func (p *PeersUpdateManager) CreateChannel(peerID string) chan *UpdateMessage {
p.channelsMux.Lock()
defer p.channelsMux.Unlock()
func (p *PeersUpdateManager) CreateChannel(peerID string) UpdateChannel {
// we have to lock the whole operation by peerID as we do two non atomic operations:
// - closeChannel()
// - Store
value, _ := p.peerChannelLocks.LoadOrStore(peerID, &sync.Mutex{})
mtx := value.(*sync.Mutex)
mtx.Lock()
defer mtx.Unlock()
p.closeChannel(peerID)
if channel, ok := p.peerChannels[peerID]; ok {
delete(p.peerChannels, peerID)
close(channel)
}
// mbragin: todo shouldn't it be more? or configurable?
channel := make(chan *UpdateMessage, channelBufferSize)
p.peerChannels[peerID] = channel
channel := make(UpdateChannel, channelBufferSize)
p.peerChannels.Store(peerID, channel)
p.len.Add(1)
log.Debugf("opened updates channel for a peer %s", peerID)
return channel
}
func (p *PeersUpdateManager) closeChannel(peerID string) {
if channel, ok := p.peerChannels[peerID]; ok {
delete(p.peerChannels, peerID)
close(channel)
func (p *PeersUpdateManager) GetChannel(peerID string) UpdateChannel {
if ch, ok := p.peerChannels.Load(peerID); ok {
channel := ch.(UpdateChannel)
return channel
}
return nil
}
log.Debugf("closed updates channel of a peer %s", peerID)
func (p *PeersUpdateManager) closeChannel(peerID string) {
if ch, ok := p.peerChannels.LoadAndDelete(peerID); ok {
channel, ok := ch.(UpdateChannel)
if !ok {
log.Errorf("could not cast to UpdateChannel")
}
p.len.Add(-1)
close(channel)
log.Debugf("closed updates channel of a peer %s", peerID)
}
}
// CloseChannels closes updates channel for each given peer
func (p *PeersUpdateManager) CloseChannels(peerIDs []string) {
p.channelsMux.Lock()
defer p.channelsMux.Unlock()
for _, id := range peerIDs {
p.closeChannel(id)
}
@@ -81,18 +99,22 @@ func (p *PeersUpdateManager) CloseChannels(peerIDs []string) {
// CloseChannel closes updates channel of a given peer
func (p *PeersUpdateManager) CloseChannel(peerID string) {
p.channelsMux.Lock()
defer p.channelsMux.Unlock()
p.closeChannel(peerID)
}
// GetAllConnectedPeers returns a copy of the connected peers map
func (p *PeersUpdateManager) GetAllConnectedPeers() map[string]struct{} {
p.channelsMux.Lock()
defer p.channelsMux.Unlock()
m := make(map[string]struct{})
for ID := range p.peerChannels {
m[ID] = struct{}{}
}
p.peerChannels.Range(func(key any, value any) bool {
if ID, ok := key.(string); ok {
m[ID] = struct{}{}
}
return true
})
return m
}
// Len returns the length of the peer channels
func (p *PeersUpdateManager) Len() (len int64) {
return p.len.Load()
}

View File

@@ -1,9 +1,10 @@
package server
import (
"github.com/netbirdio/netbird/management/proto"
"testing"
"time"
"github.com/netbirdio/netbird/management/proto"
)
//var peersUpdater *PeersUpdateManager
@@ -13,10 +14,22 @@ func TestCreateChannel(t *testing.T) {
peersUpdater := NewPeersUpdateManager()
defer peersUpdater.CloseChannel(peer)
if peersUpdater.Len() != 0 {
t.Error("peersUpdated should not have any channels yet")
}
if ch := peersUpdater.GetChannel(peer); ch != nil {
t.Errorf("We should not have channel for %s yet", peer)
}
_ = peersUpdater.CreateChannel(peer)
if _, ok := peersUpdater.peerChannels[peer]; !ok {
if ch := peersUpdater.GetChannel(peer); ch == nil {
t.Error("Error creating the channel")
}
if peersUpdater.Len() != 1 {
t.Error("peersUpdated should have 1 channel")
}
}
func TestSendUpdate(t *testing.T) {
@@ -28,12 +41,12 @@ func TestSendUpdate(t *testing.T) {
},
}}
_ = peersUpdater.CreateChannel(peer)
if _, ok := peersUpdater.peerChannels[peer]; !ok {
if ch := peersUpdater.GetChannel(peer); ch == nil {
t.Error("Error creating the channel")
}
peersUpdater.SendUpdate(peer, update1)
select {
case <-peersUpdater.peerChannels[peer]:
case <-peersUpdater.GetChannel(peer):
default:
t.Error("Update wasn't send")
}
@@ -54,7 +67,7 @@ func TestSendUpdate(t *testing.T) {
select {
case <-timeout:
t.Error("timed out reading previously sent updates")
case updateReader := <-peersUpdater.peerChannels[peer]:
case updateReader := <-peersUpdater.GetChannel(peer):
if updateReader.Update.NetworkMap.Serial == update2.Update.NetworkMap.Serial {
t.Error("got the update that shouldn't have been sent")
}
@@ -67,11 +80,11 @@ func TestCloseChannel(t *testing.T) {
peer := "test-close"
peersUpdater := NewPeersUpdateManager()
_ = peersUpdater.CreateChannel(peer)
if _, ok := peersUpdater.peerChannels[peer]; !ok {
if ch := peersUpdater.GetChannel(peer); ch == nil {
t.Error("Error creating the channel")
}
peersUpdater.CloseChannel(peer)
if _, ok := peersUpdater.peerChannels[peer]; ok {
if ch := peersUpdater.GetChannel(peer); ch != nil {
t.Error("Error closing the channel")
}
}