Compare commits

...

4 Commits

Author SHA1 Message Date
Zoltán Papp
2ea7fb7b21 add support for disabling relay via environment variable 2026-02-18 18:59:56 +01:00
Zoltán Papp
93f530637d log duration of updateChecksIfNew execution 2026-02-18 18:43:48 +01:00
Zoltán Papp
3a84475d14 add test for adding multiple allowed IPs to userspace interface 2026-02-18 18:39:27 +01:00
Zoltán Papp
3d7368e51f set readyChan in Engine to signal when first sync is received 2026-02-18 15:58:05 +01:00
5 changed files with 142 additions and 7 deletions

View File

@@ -589,6 +589,101 @@ func Test_ConnectPeers(t *testing.T) {
}
func Test_UserSpaceAddAllowedIPs(t *testing.T) {
ifaceName := fmt.Sprintf("utun%d", WgIntNumber+5)
wgIP := "10.99.99.21/30"
wgPort := 33105
newNet, err := stdnet.NewNet(context.Background(), nil)
if err != nil {
t.Fatal(err)
}
opts := WGIFaceOpts{
IFaceName: ifaceName,
Address: wgIP,
WGPort: wgPort,
WGPrivKey: key,
MTU: DefaultMTU,
TransportNet: newNet,
}
iface, err := NewWGIFace(opts)
if err != nil {
t.Fatal(err)
}
err = iface.Create()
if err != nil {
t.Fatal(err)
}
defer func() {
if err := iface.Close(); err != nil {
t.Error(err)
}
}()
_, err = iface.Up()
if err != nil {
t.Fatal(err)
}
keepAlive := 15 * time.Second
initialAllowedIP := netip.MustParsePrefix("10.99.99.22/32")
endpoint, err := net.ResolveUDPAddr("udp", "127.0.0.1:9905")
if err != nil {
t.Fatal(err)
}
// Add peer with initial endpoint and first allowed IP
err = iface.UpdatePeer(peerPubKey, []netip.Prefix{initialAllowedIP}, keepAlive, endpoint, nil)
if err != nil {
t.Fatal(err)
}
// Phase 1: generate 500 allowed IPs into a list
const extraIPs = 500
addedPrefixes := make([]netip.Prefix, 0, extraIPs)
for i := 0; i < extraIPs; i++ {
// Use 172.16.x.y/32 range: i encoded as two octets
prefix := netip.MustParsePrefix(fmt.Sprintf("172.16.%d.%d/32", i/256, i%256))
addedPrefixes = append(addedPrefixes, prefix)
}
// Phase 2: iterate over the list and add each allowed IP to the peer
phase2Start := time.Now()
for _, prefix := range addedPrefixes {
if addErr := iface.AddAllowedIP(peerPubKey, prefix); addErr != nil {
t.Fatalf("failed to add allowed IP %s: %v", prefix, addErr)
}
}
t.Logf("Phase 2 (add %d IPs to peer): %s", extraIPs, time.Since(phase2Start))
// Verify the peer has all 101 allowed IPs (1 initial + 100 added)
peer, err := getPeer(ifaceName, peerPubKey)
if err != nil {
t.Fatal(err)
}
if peer.Endpoint.String() != endpoint.String() {
t.Fatalf("expected endpoint %s, got %s", endpoint, peer.Endpoint)
}
allExpected := append([]netip.Prefix{initialAllowedIP}, addedPrefixes...)
if len(peer.AllowedIPs) != len(allExpected) {
t.Fatalf("expected %d allowed IPs, got %d", len(allExpected), len(peer.AllowedIPs))
}
allowedIPSet := make(map[string]struct{}, len(peer.AllowedIPs))
for _, aip := range peer.AllowedIPs {
allowedIPSet[aip.String()] = struct{}{}
}
for _, expected := range allExpected {
if _, found := allowedIPSet[expected.String()]; !found {
t.Errorf("expected allowed IP %s not found in peer config", expected)
}
}
}
func getPeer(ifaceName, peerPubKey string) (wgtypes.Peer, error) {
wg, err := wgctrl.New()
if err != nil {

View File

@@ -290,6 +290,10 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
return wrapErr(err)
}
if relayClient.IsDisableRelay() {
relayURLs = []string{}
}
relayManager := relayClient.NewManager(engineCtx, relayURLs, myPrivateKey.PublicKey().String(), engineConfig.MTU)
c.statusRecorder.SetRelayMgr(relayManager)
if len(relayURLs) > 0 {
@@ -310,6 +314,8 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
c.engineMutex.Lock()
engine := NewEngine(engineCtx, cancel, signalClient, mgmClient, relayManager, engineConfig, mobileDependency, c.statusRecorder, checks, stateManager)
engine.SetSyncResponsePersistence(c.persistSyncResponse)
engine.SetReadyChan(runningChan)
runningChan = nil
c.engine = engine
c.engineMutex.Unlock()
@@ -330,11 +336,6 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
log.Infof("Netbird engine started, the IP is: %s", peerConfig.GetAddress())
state.Set(StatusConnected)
if runningChan != nil {
close(runningChan)
runningChan = nil
}
<-engineCtx.Done()
c.engineMutex.Lock()

View File

@@ -28,8 +28,8 @@ import (
"github.com/netbirdio/netbird/client/firewall"
firewallManager "github.com/netbirdio/netbird/client/firewall/manager"
"github.com/netbirdio/netbird/client/iface"
nbnetstack "github.com/netbirdio/netbird/client/iface/netstack"
"github.com/netbirdio/netbird/client/iface/device"
nbnetstack "github.com/netbirdio/netbird/client/iface/netstack"
"github.com/netbirdio/netbird/client/iface/udpmux"
"github.com/netbirdio/netbird/client/internal/acl"
"github.com/netbirdio/netbird/client/internal/debug"
@@ -217,6 +217,10 @@ type Engine struct {
// WireGuard interface monitor
wgIfaceMonitor *WGIfaceMonitor
// readyChan is closed when the first sync message is received from management
readyChan chan struct{}
readyChanOnce sync.Once
// shutdownWg tracks all long-running goroutines to ensure clean shutdown
shutdownWg sync.WaitGroup
@@ -275,6 +279,10 @@ func NewEngine(
return engine
}
func (e *Engine) SetReadyChan(ch chan struct{}) {
e.readyChan = ch
}
func (e *Engine) Stop() error {
if e == nil {
// this seems to be a very odd case but there was the possibility if the netbird down command comes before the engine is fully started
@@ -834,6 +842,13 @@ func (e *Engine) handleSync(update *mgmProto.SyncResponse) error {
defer func() {
log.Infof("sync finished in %s", time.Since(started))
}()
e.readyChanOnce.Do(func() {
if e.readyChan != nil {
close(e.readyChan)
}
})
e.syncMsgMux.Lock()
defer e.syncMsgMux.Unlock()
@@ -880,9 +895,11 @@ func (e *Engine) handleSync(update *mgmProto.SyncResponse) error {
// todo update signal
}
uCheckTime := time.Now()
if err := e.updateChecksIfNew(update.Checks); err != nil {
return err
}
log.Infof("update check finished in %s", time.Since(uCheckTime))
nm := update.GetNetworkMap()
if nm == nil {
@@ -925,7 +942,9 @@ func (e *Engine) handleRelayUpdate(update *mgmProto.RelayConfig) error {
return fmt.Errorf("update relay token: %w", err)
}
e.relayManager.UpdateServerURLs(update.Urls)
if !relayClient.IsDisableRelay() {
e.relayManager.UpdateServerURLs(update.Urls)
}
// Just in case the agent started with an MGM server where the relay was disabled but was later enabled.
// We can ignore all errors because the guard will manage the reconnection retries.

View File

@@ -12,6 +12,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/client/internal/stdnet"
relayClient "github.com/netbirdio/netbird/shared/relay/client"
)
const (
@@ -125,6 +126,10 @@ func GenerateICECredentials() (string, string, error) {
}
func CandidateTypes() []ice.CandidateType {
if relayClient.IsDisableRelay() {
return []ice.CandidateType{ice.CandidateTypeHost, ice.CandidateTypeServerReflexive, ice.CandidateTypeRelay}
}
if hasICEForceRelayConn() {
return []ice.CandidateType{ice.CandidateTypeRelay}
}

View File

@@ -0,0 +1,15 @@
package client
import (
"os"
"strconv"
)
const (
envKeyNBDebugDisableRelay = "NB_DEBUG_DISABLE_RELAY"
)
func IsDisableRelay() bool {
v, _ := strconv.ParseBool(os.Getenv(envKeyNBDebugDisableRelay))
return v
}