Compare commits

..

1 Commits

Author SHA1 Message Date
braginini
b78ec082a3 ebpf test 2021-12-05 18:17:13 +01:00
253 changed files with 5086 additions and 31652 deletions

View File

@@ -1,27 +0,0 @@
name: Test Code Darwin
on: [push,pull_request]
jobs:
test:
runs-on: macos-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache Go modules
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: macos-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
macos-go-
- name: Install modules
run: go mod tidy
- name: Test
run: go test -exec 'sudo --preserve-env=CI' -timeout 5m -p 1 ./...

View File

@@ -1,35 +0,0 @@
name: Test Code Linux
on: [push,pull_request]
jobs:
test:
strategy:
matrix:
arch: ['386','amd64']
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Cache Go modules
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Checkout code
uses: actions/checkout@v2
- name: Install dependencies
run: sudo apt update && sudo apt install -y -q libgtk-3-dev libappindicator3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev
- name: Install modules
run: go mod tidy
- name: Test
run: GOARCH=${{ matrix.arch }} go test -exec 'sudo --preserve-env=CI' -timeout 5m -p 1 ./...

View File

@@ -1,48 +0,0 @@
name: Test Code Windows
on: [push,pull_request]
jobs:
pre:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- run: bash -x wireguard_nt.sh
working-directory: client
- uses: actions/upload-artifact@v2
with:
name: syso
path: client/*.syso
retention-days: 1
test:
needs: pre
runs-on: windows-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- uses: actions/cache@v2
with:
path: |
%LocalAppData%\go-build
~\go\pkg\mod
~\AppData\Local\go-build
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- uses: actions/download-artifact@v2
with:
name: syso
path: iface\
- name: Test
run: go test -tags=load_wgnt_from_rsrc -timeout 5m -p 1 ./...

59
.github/workflows/golang-test.yml vendored Normal file
View File

@@ -0,0 +1,59 @@
on:
push:
branches:
- main
pull_request:
name: Test
jobs:
test:
strategy:
matrix:
go-version: [1.16.x]
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: Test
run: GOBIN=$(which go) && sudo --preserve-env=GOROOT $GOBIN test -p 1 ./...
test_build:
strategy:
matrix:
os: [ windows, linux, darwin ]
go-version: [1.16.x]
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Cache Go modules
uses: actions/cache@v1
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Install modules
run: go mod tidy
- name: run build client
run: GOOS=${{ matrix.os }} go build .
working-directory: client
- name: run build management
run: GOOS=${{ matrix.os }} go build .
working-directory: management
- name: run build signal
run: GOOS=${{ matrix.os }} go build .
working-directory: signal

View File

@@ -1,21 +1,16 @@
name: golangci-lint
on: [pull_request]
on:
push:
branches:
- main
pull_request:
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Install dependencies
run: sudo apt update && sudo apt install -y -q libgtk-3-dev libappindicator3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
# SA1019: "io/ioutil" has been deprecated since Go 1.16
args: --timeout=6m -e SA1019

View File

@@ -4,13 +4,6 @@ on:
push:
tags:
- 'v*'
branches:
- main
pull_request:
env:
SIGN_PIPE_VER: "v0.0.3"
GORELEASER_VER: "v1.6.3"
jobs:
release:
@@ -21,15 +14,11 @@ jobs:
uses: actions/checkout@v2
with:
fetch-depth: 0 # It is required for GoReleaser to work properly
- name: Generate syso with DLL
run: bash -x wireguard_nt.sh
working-directory: client
-
name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.18
go-version: 1.16
-
name: Cache Go modules
uses: actions/cache@v1
@@ -41,9 +30,6 @@ jobs:
-
name: Install modules
run: go mod tidy
-
name: check git status
run: git --no-pager diff --exit-code
-
name: Set up QEMU
uses: docker/setup-qemu-action@v1
@@ -52,145 +38,28 @@ jobs:
uses: docker/setup-buildx-action@v1
-
name: Login to Docker hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: netbirdio
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_TOKEN }}
-
name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
version: ${{ env.GORELEASER_VER }}
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
UPLOAD_DEBIAN_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }}
UPLOAD_YUM_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }}
-
name: upload non tags for debug purposes
uses: actions/upload-artifact@v2
with:
name: release
path: dist/
retention-days: 3
release_ui:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0 # It is required for GoReleaser to work properly
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.18
- name: Cache Go modules
uses: actions/cache@v1
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-ui-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-ui-go-
- name: Install modules
run: go mod tidy
- name: check git status
run: git --no-pager diff --exit-code
- name: Install dependencies
run: sudo apt update && sudo apt install -y -q libgtk-3-dev libappindicator3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev gcc-mingw-w64-x86-64
- name: Install rsrc
run: go install github.com/akavel/rsrc@v0.10.2
- name: Generate windows rsrc
run: rsrc -arch amd64 -ico client/ui/netbird.ico -manifest client/ui/manifest.xml -o client/ui/resources_windows_amd64.syso
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
version: ${{ env.GORELEASER_VER }}
args: release --config .goreleaser_ui.yaml --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
UPLOAD_DEBIAN_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }}
UPLOAD_YUM_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }}
- name: upload non tags for debug purposes
uses: actions/upload-artifact@v2
with:
name: release-ui
path: dist/
retention-days: 3
release_ui_darwin:
runs-on: macos-latest
steps:
-
name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0 # It is required for GoReleaser to work properly
-
name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.18
-
name: Cache Go modules
uses: actions/cache@v1
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-ui-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-ui-go-
-
name: Install modules
run: go mod tidy
-
name: Run GoReleaser
id: goreleaser
uses: goreleaser/goreleaser-action@v2
with:
version: ${{ env.GORELEASER_VER }}
args: release --config .goreleaser_ui_darwin.yaml --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
name: upload non tags for debug purposes
uses: actions/upload-artifact@v2
with:
name: release-ui-darwin
path: dist/
retention-days: 3
trigger_windows_signer:
runs-on: ubuntu-latest
needs: [release,release_ui]
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Trigger Windows binaries sign pipeline
name: Trigger Windows binaries sign pipeline
uses: benc-uk/workflow-dispatch@v1
with:
workflow: Sign windows bin and installer
repo: netbirdio/sign-pipelines
ref: ${{ env.SIGN_PIPE_VER }}
token: ${{ secrets.SIGN_GITHUB_TOKEN }}
inputs: '{ "tag": "${{ github.ref }}" }'
trigger_darwin_signer:
runs-on: ubuntu-latest
needs: release_ui_darwin
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Trigger Darwin App binaries sign pipeline
uses: benc-uk/workflow-dispatch@v1
with:
workflow: Sign darwin ui app with dispatch
repo: netbirdio/sign-pipelines
ref: ${{ env.SIGN_PIPE_VER }}
repo: wiretrustee/windows-sign-pipeline
ref: v0.0.1
token: ${{ secrets.SIGN_GITHUB_TOKEN }}
inputs: '{ "tag": "${{ github.ref }}" }'

View File

@@ -1,72 +0,0 @@
name: Test Docker Compose Linux
on: [push,pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Install jq
run: sudo apt-get install -y jq
- name: Install curl
run: sudo apt-get install -y curl
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.18.x
- name: Cache Go modules
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Checkout code
uses: actions/checkout@v2
- name: cp setup.env
run: cp infrastructure_files/tests/setup.env infrastructure_files/
- name: run configure
working-directory: infrastructure_files
run: bash -x configure.sh
env:
CI_NETBIRD_AUTH_CLIENT_ID: ${{ secrets.CI_NETBIRD_AUTH_CLIENT_ID }}
CI_NETBIRD_AUTH_AUDIENCE: testing.ci
CI_NETBIRD_AUTH_OIDC_CONFIGURATION_ENDPOINT: https://example.eu.auth0.com/.well-known/openid-configuration
CI_NETBIRD_USE_AUTH0: true
- name: check values
working-directory: infrastructure_files
env:
CI_NETBIRD_AUTH_CLIENT_ID: ${{ secrets.CI_NETBIRD_AUTH_CLIENT_ID }}
CI_NETBIRD_AUTH_AUDIENCE: testing.ci
CI_NETBIRD_AUTH_OIDC_CONFIGURATION_ENDPOINT: https://example.eu.auth0.com/.well-known/openid-configuration
CI_NETBIRD_USE_AUTH0: true
CI_NETBIRD_AUTH_SUPPORTED_SCOPES: "openid profile email offline_access api email_verified"
CI_NETBIRD_AUTH_AUTHORITY: https://example.eu.auth0.com/
CI_NETBIRD_AUTH_JWT_CERTS: https://example.eu.auth0.com/.well-known/jwks.json
CI_NETBIRD_AUTH_TOKEN_ENDPOINT: https://example.eu.auth0.com/oauth/token
CI_NETBIRD_AUTH_DEVICE_AUTH_ENDPOINT: https://example.eu.auth0.com/oauth/device/code
run: |
grep AUTH_CLIENT_ID docker-compose.yml | grep $CI_NETBIRD_AUTH_CLIENT_ID
grep AUTH_AUTHORITY docker-compose.yml | grep $CI_NETBIRD_AUTH_AUTHORITY
grep AUTH_AUDIENCE docker-compose.yml | grep $CI_NETBIRD_AUTH_AUDIENCE
grep AUTH_SUPPORTED_SCOPES docker-compose.yml | grep "$CI_NETBIRD_AUTH_SUPPORTED_SCOPES"
grep USE_AUTH0 docker-compose.yml | grep $CI_NETBIRD_USE_AUTH0
grep NETBIRD_MGMT_API_ENDPOINT docker-compose.yml | grep "http://localhost:33073"
- name: run docker compose up
working-directory: infrastructure_files
run: |
docker-compose up -d
sleep 5
- name: test running containers
run: |
count=$(docker compose ps --format json | jq '.[] | select(.Project | contains("infrastructure_files")) | .State' | grep -c running)
test $count -eq 4
working-directory: infrastructure_files

7
.gitignore vendored
View File

@@ -1,13 +1,8 @@
.idea
.run
*.iml
dist/
bin/
.env
conf.json
http-cmds.sh
infrastructure_files/management.json
infrastructure_files/docker-compose.yml
*.syso
client/.distfiles/
infrastructure_files/setup.env
infrastructure_files/docker-compose.yml

View File

@@ -1,9 +1,10 @@
project_name: netbird
project_name: wiretrustee
builds:
- id: netbird
- id: wiretrustee
dir: client
binary: netbird
binary: wiretrustee
env: [CGO_ENABLED=0]
goos:
- linux
- darwin
@@ -13,7 +14,6 @@ builds:
- amd64
- arm64
- mips
- 386
gomips:
- hardfloat
- softfloat
@@ -22,18 +22,16 @@ builds:
goarch: arm64
- goos: windows
goarch: arm
- goos: windows
goarch: 386
ldflags:
- -s -w -X github.com/netbirdio/netbird/client/system.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser
- -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser
mod_timestamp: '{{ .CommitTimestamp }}'
tags:
- load_wgnt_from_rsrc
- load_wintun_from_rsrc
- id: netbird-mgmt
- id: wiretrustee-mgmt
dir: management
env: [CGO_ENABLED=0]
binary: netbird-mgmt
binary: wiretrustee-mgmt
goos:
- linux
goarch:
@@ -44,10 +42,10 @@ builds:
- -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser
mod_timestamp: '{{ .CommitTimestamp }}'
- id: netbird-signal
- id: wiretrustee-signal
dir: signal
env: [CGO_ENABLED=0]
binary: netbird-signal
binary: wiretrustee-signal
goos:
- linux
goarch:
@@ -57,56 +55,40 @@ builds:
ldflags:
- -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser
mod_timestamp: '{{ .CommitTimestamp }}'
archives:
- builds:
- netbird
- wiretrustee
nfpms:
- maintainer: Netbird <dev@netbird.io>
description: Netbird client.
homepage: https://netbird.io/
id: netbird-deb
bindir: /usr/bin
- maintainer: Wiretrustee <dev@wiretrustee.com>
description: Wiretrustee client.
homepage: https://wiretrustee.com/
id: deb
builds:
- netbird
- wiretrustee
formats:
- deb
replaces:
- wiretrustee
conflicts:
- wiretrustee
scripts:
postinstall: "release_files/post_install.sh"
preremove: "release_files/pre_remove.sh"
- maintainer: Netbird <dev@netbird.io>
description: Netbird client.
homepage: https://netbird.io/
id: netbird-rpm
bindir: /usr/bin
- maintainer: Wiretrustee <dev@wiretrustee.com>
description: Wiretrustee client.
homepage: https://wiretrustee.com/
id: rpm
builds:
- netbird
- wiretrustee
formats:
- rpm
replaces:
- wiretrustee
conflicts:
- wiretrustee
scripts:
postinstall: "release_files/post_install.sh"
preremove: "release_files/pre_remove.sh"
dockers:
- image_templates:
- netbirdio/netbird:{{ .Version }}-amd64
- wiretrustee/wiretrustee:{{ .Version }}-amd64
ids:
- netbird
- wiretrustee
goarch: amd64
use: buildx
dockerfile: client/Dockerfile
@@ -117,11 +99,11 @@ dockers:
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=maintainer=dev@netbird.io"
- "--label=maintainer=wiretrustee@wiretrustee.com"
- image_templates:
- netbirdio/netbird:{{ .Version }}-arm64v8
- wiretrustee/wiretrustee:{{ .Version }}-arm64v8
ids:
- netbird
- wiretrustee
goarch: arm64
use: buildx
dockerfile: client/Dockerfile
@@ -132,11 +114,11 @@ dockers:
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=maintainer=dev@netbird.io"
- "--label=maintainer=wiretrustee@wiretrustee.com"
- image_templates:
- netbirdio/netbird:{{ .Version }}-arm
- wiretrustee/wiretrustee:{{ .Version }}-arm
ids:
- netbird
- wiretrustee
goarch: arm
goarm: 6
use: buildx
@@ -148,11 +130,11 @@ dockers:
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=maintainer=dev@netbird.io"
- "--label=maintainer=wiretrustee@wiretrustee.com"
- image_templates:
- netbirdio/signal:{{ .Version }}-amd64
- wiretrustee/signal:{{ .Version }}-amd64
ids:
- netbird-signal
- wiretrustee-signal
goarch: amd64
use: buildx
dockerfile: signal/Dockerfile
@@ -163,11 +145,11 @@ dockers:
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=maintainer=dev@netbird.io"
- "--label=maintainer=wiretrustee@wiretrustee.com"
- image_templates:
- netbirdio/signal:{{ .Version }}-arm64v8
- wiretrustee/signal:{{ .Version }}-arm64v8
ids:
- netbird-signal
- wiretrustee-signal
goarch: arm64
use: buildx
dockerfile: signal/Dockerfile
@@ -178,11 +160,11 @@ dockers:
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=maintainer=dev@netbird.io"
- "--label=maintainer=wiretrustee@wiretrustee.com"
- image_templates:
- netbirdio/signal:{{ .Version }}-arm
- wiretrustee/signal:{{ .Version }}-arm
ids:
- netbird-signal
- wiretrustee-signal
goarch: arm
goarm: 6
use: buildx
@@ -194,11 +176,11 @@ dockers:
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=maintainer=dev@netbird.io"
- "--label=maintainer=wiretrustee@wiretrustee.com"
- image_templates:
- netbirdio/management:{{ .Version }}-amd64
- wiretrustee/management:{{ .Version }}-amd64
ids:
- netbird-mgmt
- wiretrustee-mgmt
goarch: amd64
use: buildx
dockerfile: management/Dockerfile
@@ -209,11 +191,11 @@ dockers:
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=maintainer=dev@netbird.io"
- "--label=maintainer=wiretrustee@wiretrustee.com"
- image_templates:
- netbirdio/management:{{ .Version }}-arm64v8
- wiretrustee/management:{{ .Version }}-arm64v8
ids:
- netbird-mgmt
- wiretrustee-mgmt
goarch: arm64
use: buildx
dockerfile: management/Dockerfile
@@ -224,11 +206,11 @@ dockers:
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=maintainer=dev@netbird.io"
- "--label=maintainer=wiretrustee@wiretrustee.com"
- image_templates:
- netbirdio/management:{{ .Version }}-arm
- wiretrustee/management:{{ .Version }}-arm
ids:
- netbird-mgmt
- wiretrustee-mgmt
goarch: arm
goarm: 6
use: buildx
@@ -240,11 +222,11 @@ dockers:
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=maintainer=dev@netbird.io"
- "--label=maintainer=wiretrustee@wiretrustee.com"
- image_templates:
- netbirdio/management:{{ .Version }}-debug-amd64
- wiretrustee/management:{{ .Version }}-debug-amd64
ids:
- netbird-mgmt
- wiretrustee-mgmt
goarch: amd64
use: buildx
dockerfile: management/Dockerfile.debug
@@ -255,11 +237,11 @@ dockers:
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=maintainer=dev@netbird.io"
- "--label=maintainer=wiretrustee@wiretrustee.com"
- image_templates:
- netbirdio/management:{{ .Version }}-debug-arm64v8
- wiretrustee/management:{{ .Version }}-debug-arm64v8
ids:
- netbird-mgmt
- wiretrustee-mgmt
goarch: arm64
use: buildx
dockerfile: management/Dockerfile.debug
@@ -270,12 +252,12 @@ dockers:
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=maintainer=dev@netbird.io"
- "--label=maintainer=wiretrustee@wiretrustee.com"
- image_templates:
- netbirdio/management:{{ .Version }}-debug-arm
- wiretrustee/management:{{ .Version }}-debug-arm
ids:
- netbird-mgmt
- wiretrustee-mgmt
goarch: arm
goarm: 6
use: buildx
@@ -287,83 +269,78 @@ dockers:
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
- "--label=org.opencontainers.image.version={{.Version}}"
- "--label=maintainer=dev@netbird.io"
- "--label=maintainer=wiretrustee@wiretrustee.com"
docker_manifests:
- name_template: netbirdio/netbird:{{ .Version }}
- name_template: wiretrustee/wiretrustee:{{ .Version }}
image_templates:
- netbirdio/netbird:{{ .Version }}-arm64v8
- netbirdio/netbird:{{ .Version }}-arm
- netbirdio/netbird:{{ .Version }}-amd64
- wiretrustee/wiretrustee:{{ .Version }}-arm64v8
- wiretrustee/wiretrustee:{{ .Version }}-arm
- wiretrustee/wiretrustee:{{ .Version }}-amd64
- name_template: netbirdio/netbird:latest
- name_template: wiretrustee/wiretrustee:latest
image_templates:
- netbirdio/netbird:{{ .Version }}-arm64v8
- netbirdio/netbird:{{ .Version }}-arm
- netbirdio/netbird:{{ .Version }}-amd64
- wiretrustee/wiretrustee:{{ .Version }}-arm64v8
- wiretrustee/wiretrustee:{{ .Version }}-arm
- wiretrustee/wiretrustee:{{ .Version }}-amd64
- name_template: netbirdio/signal:{{ .Version }}
- name_template: wiretrustee/signal:{{ .Version }}
image_templates:
- netbirdio/signal:{{ .Version }}-arm64v8
- netbirdio/signal:{{ .Version }}-arm
- netbirdio/signal:{{ .Version }}-amd64
- wiretrustee/signal:{{ .Version }}-arm64v8
- wiretrustee/signal:{{ .Version }}-arm
- wiretrustee/signal:{{ .Version }}-amd64
- name_template: netbirdio/signal:latest
- name_template: wiretrustee/signal:latest
image_templates:
- netbirdio/signal:{{ .Version }}-arm64v8
- netbirdio/signal:{{ .Version }}-arm
- netbirdio/signal:{{ .Version }}-amd64
- wiretrustee/signal:{{ .Version }}-arm64v8
- wiretrustee/signal:{{ .Version }}-arm
- wiretrustee/signal:{{ .Version }}-amd64
- name_template: netbirdio/management:{{ .Version }}
- name_template: wiretrustee/management:{{ .Version }}
image_templates:
- netbirdio/management:{{ .Version }}-arm64v8
- netbirdio/management:{{ .Version }}-arm
- netbirdio/management:{{ .Version }}-amd64
- wiretrustee/management:{{ .Version }}-arm64v8
- wiretrustee/management:{{ .Version }}-arm
- wiretrustee/management:{{ .Version }}-amd64
- name_template: netbirdio/management:latest
- name_template: wiretrustee/management:latest
image_templates:
- netbirdio/management:{{ .Version }}-arm64v8
- netbirdio/management:{{ .Version }}-arm
- netbirdio/management:{{ .Version }}-amd64
- wiretrustee/management:{{ .Version }}-arm64v8
- wiretrustee/management:{{ .Version }}-arm
- wiretrustee/management:{{ .Version }}-amd64
- name_template: netbirdio/management:debug-latest
- name_template: wiretrustee/management:debug-latest
image_templates:
- netbirdio/management:{{ .Version }}-debug-arm64v8
- netbirdio/management:{{ .Version }}-debug-arm
- netbirdio/management:{{ .Version }}-debug-amd64
- wiretrustee/management:{{ .Version }}-debug-arm64v8
- wiretrustee/management:{{ .Version }}-debug-arm
- wiretrustee/management:{{ .Version }}-debug-amd64
brews:
-
ids:
- default
tap:
owner: netbirdio
name: homebrew-tap
owner: wiretrustee
name: homebrew-client
token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}"
commit_author:
name: Netbird
email: dev@netbird.io
description: Netbird project.
name: Wiretrustee
email: wiretrustee@wiretrustee.com
description: Wiretrustee project.
download_strategy: CurlDownloadStrategy
homepage: https://netbird.io/
homepage: https://wiretrustee.com/
license: "BSD3"
test: |
system "#{bin}/{{ .ProjectName }} version"
conflicts:
- wiretrustee
system "#{bin}/{{ .ProjectName }} -h"
uploads:
- name: debian
ids:
- netbird-deb
- deb
mode: archive
target: https://pkgs.wiretrustee.com/debian/pool/{{ .ArtifactName }};deb.distribution=stable;deb.component=main;deb.architecture={{ if .Arm }}armhf{{ else }}{{ .Arch }}{{ end }};deb.package=
username: dev@wiretrustee.com
method: PUT
- name: yum
ids:
- netbird-rpm
- rpm
mode: archive
target: https://pkgs.wiretrustee.com/yum/{{ .Arch }}{{ if .Arm }}{{ .Arm }}{{ end }}
username: dev@wiretrustee.com
method: PUT
method: PUT

View File

@@ -1,98 +0,0 @@
project_name: netbird-ui
builds:
- id: netbird-ui
dir: client/ui
binary: netbird-ui
env:
- CGO_ENABLED=1
goos:
- linux
goarch:
- amd64
ldflags:
- -s -w -X github.com/netbirdio/netbird/client/system.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser
mod_timestamp: '{{ .CommitTimestamp }}'
- id: netbird-ui-windows
dir: client/ui
binary: netbird-ui
env:
- CGO_ENABLED=1
- CC=x86_64-w64-mingw32-gcc
goos:
- windows
goarch:
- amd64
ldflags:
- -s -w -X github.com/netbirdio/netbird/client/system.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser
- -H windowsgui
mod_timestamp: '{{ .CommitTimestamp }}'
archives:
- id: linux-arch
name_template: "{{ .ProjectName }}-linux_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
builds:
- netbird-ui
- id: windows-arch
name_template: "{{ .ProjectName }}-windows_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
builds:
- netbird-ui-windows
nfpms:
- maintainer: Netbird <dev@netbird.io>
description: Netbird client UI.
homepage: https://netbird.io/
id: netbird-ui-deb
package_name: netbird-ui
builds:
- netbird-ui
formats:
- deb
contents:
- src: client/ui/netbird.desktop
dst: /usr/share/applications/netbird.desktop
- src: client/ui/disconnected.png
dst: /usr/share/pixmaps/netbird.png
dependencies:
- libayatana-appindicator3-1
- libgtk-3-dev
- libappindicator3-dev
- netbird
- maintainer: Netbird <dev@netbird.io>
description: Netbird client UI.
homepage: https://netbird.io/
id: netbird-ui-rpm
package_name: netbird-ui
builds:
- netbird-ui
formats:
- rpm
contents:
- src: client/ui/netbird.desktop
dst: /usr/share/applications/netbird.desktop
- src: client/ui/disconnected.png
dst: /usr/share/pixmaps/netbird.png
dependencies:
- libayatana-appindicator3-1
- libgtk-3-dev
- libappindicator3-dev
- netbird
uploads:
- name: debian
ids:
- netbird-ui-deb
mode: archive
target: https://pkgs.wiretrustee.com/debian/pool/{{ .ArtifactName }};deb.distribution=stable;deb.component=main;deb.architecture={{ if .Arm }}armhf{{ else }}{{ .Arch }}{{ end }};deb.package=
username: dev@wiretrustee.com
method: PUT
- name: yum
ids:
- netbird-ui-rpm
mode: archive
target: https://pkgs.wiretrustee.com/yum/{{ .Arch }}{{ if .Arm }}{{ .Arm }}{{ end }}
username: dev@wiretrustee.com
method: PUT

View File

@@ -1,29 +0,0 @@
project_name: netbird-ui
builds:
- id: netbird-ui-darwin
dir: client/ui
binary: netbird-ui
env: [CGO_ENABLED=1]
goos:
- darwin
goarch:
- amd64
- arm64
gomips:
- hardfloat
- softfloat
ldflags:
- -s -w -X github.com/netbirdio/netbird/client/system.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser
mod_timestamp: '{{ .CommitTimestamp }}'
tags:
- load_wgnt_from_rsrc
archives:
- builds:
- netbird-ui-darwin
checksum:
name_template: "{{ .ProjectName }}_darwin_checksums.txt"
changelog:
skip: true

View File

@@ -1,3 +1,2 @@
Mikhail Bragin (https://github.com/braginini)
Maycon Santos (https://github.com/mlsmaycon)
Wiretrustee UG (haftungsbeschränkt)

View File

@@ -1,132 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
dev@wiretrustee.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations

View File

@@ -1,6 +1,6 @@
BSD 3-Clause License
Copyright (c) 2022 Wiretrustee UG (haftungsbeschränkt) & AUTHORS
Copyright (c) 2021 Wiretrustee AUTHORS
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

256
README.md
View File

@@ -1,34 +1,23 @@
<p align="center">
<strong>:hatching_chick: New release! NetBird Easy SSH</strong>.
<a href="https://github.com/netbirdio/netbird/releases/tag/v0.8.0">
Learn more
</a>
</p>
<br/>
<div align="center">
<p align="center">
<img width="234" src="docs/media/logo-full.png"/>
<img width="250" src="docs/media/logo-full.png"/>
</p>
<p>
<a href="https://github.com/netbirdio/netbird/blob/main/LICENSE">
<img src="https://img.shields.io/badge/license-BSD--3-blue" />
</a>
<a href="https://www.codacy.com/gh/netbirdio/netbird/dashboard?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=netbirdio/netbird&amp;utm_campaign=Badge_Grade"><img src="https://app.codacy.com/project/badge/Grade/e3013d046aec44cdb7462c8673b00976"/></a>
<br>
<a href="https://join.slack.com/t/netbirdio/shared_invite/zt-vrahf41g-ik1v7fV8du6t0RwxSrJ96A">
<img src="https://img.shields.io/badge/slack-@wiretrustee-red.svg?logo=slack"/>
</a>
<img src="https://img.shields.io/badge/license-BSD--3-blue" />
<img src="https://img.shields.io/docker/pulls/wiretrustee/management" />
<img src="https://badgen.net/badge/Open%20Source%3F/Yes%21/blue?icon=github" />
</p>
</div>
<p align="center">
<strong>
Start using NetBird at <a href="https://app.netbird.io/">app.netbird.io</a>
Start using Wiretrustee at <a href="https://app.wiretrustee.com/">app.wiretrustee.com</a>
<br/>
See <a href="https://netbird.io/docs/">Documentation</a>
See <a href="https://docs.wiretrustee.com">Documentation</a>
<br/>
Join our <a href="https://join.slack.com/t/netbirdio/shared_invite/zt-vrahf41g-ik1v7fV8du6t0RwxSrJ96A">Slack channel</a>
Join our <a href="https://join.slack.com/t/wiretrustee/shared_invite/zt-vrahf41g-ik1v7fV8du6t0RwxSrJ96A">Slack channel</a>
<br/>
</strong>
@@ -36,73 +25,198 @@
<br>
**NetBird is an open-source VPN management platform built on top of WireGuard® making it easy to create secure private networks for your organization or home.**
**Wiretrustee is an open-source VPN platform built on top of WireGuard® making it easy to create secure private networks for your organization or home.**
It requires zero configuration effort leaving behind the hassle of opening ports, complex firewall rules, VPN gateways, and so forth.
It requires zero configuration effort leaving behind the hassle of opening ports, complex firewall rules, vpn gateways, and so forth.
NetBird creates an overlay peer-to-peer network connecting machines automatically regardless of their location (home, office, datacenter, container, cloud or edge environments) unifying virtual private network management experience.
**Wiretrustee automates Wireguard-based networks, offering a management layer with:**
* Centralized Peer IP management with a UI dashboard.
* Encrypted peer-to-peet connections without a centralized VPN gateway.
* Automatic Peer discovery and configuration.
* UDP hole punching to establish peer-to-peer connections behind NAT, firewall, and without a public static IP.
* Connection relay fallback in case a peer-to-peer connection is not possible.
* Multitenancy (coming soon).
* Client application SSO with MFA (coming soon).
* Access Controls (coming soon).
* Activity Monitoring (coming soon).
* Private DNS (coming baoon)
**Key features:**
- \[x] Automatic IP allocation and network management with a Web UI ([separate repo](https://github.com/netbirdio/dashboard))
- \[x] Automatic WireGuard peer (machine) discovery and configuration.
- \[x] Encrypted peer-to-peer connections without a central VPN gateway.
- \[x] Connection relay fallback in case a peer-to-peer connection is not possible.
- \[x] Desktop client applications for Linux, MacOS, and Windows (systray).
- \[x] Multiuser support - sharing network between multiple users.
- \[x] SSO and MFA support.
- \[x] Multicloud and hybrid-cloud support.
- \[x] Kernel WireGuard usage when possible.
- \[x] Access Controls - groups & rules.
- \[x] Remote SSH access without managing SSH keys.
**Coming soon:**
- \[ ] Network Routes.
- \[ ] Private DNS.
- \[ ] Mobile clients.
- \[ ] Network Activity Monitoring.
### Secure peer-to-peer VPN with SSO and MFA in minutes
### Secure peer-to-peer VPN in minutes
<p float="left" align="middle">
<img src="docs/media/peerA.gif" width="400"/>
<img src="docs/media/peerB.gif" width="400"/>
</p>
**Note**: The `main` branch may be in an *unstable or even broken state* during development.
For stable versions, see [releases](https://github.com/netbirdio/netbird/releases).
**Note**: The `main` branch may be in an *unstable or even broken state* during development. For stable versions, see [releases](https://github.com/wiretrustee/wiretrustee/releases).
### Start using NetBird
- Hosted version: [https://app.netbird.io/](https://app.netbird.io/).
- See our documentation for [Quickstart Guide](https://netbird.io/docs/getting-started/quickstart).
- If you are looking to self-host NetBird, check our [Self-Hosting Guide](https://netbird.io/docs/getting-started/self-hosting).
- Step-by-step [Installation Guide](https://netbird.io/docs/getting-started/installation) for different platforms.
- Web UI [repository](https://github.com/netbirdio/dashboard).
- 5 min [demo video](https://youtu.be/Tu9tPsUWaY0) on YouTube.
Hosted demo version:
[https://app.wiretrustee.com/](https://app.wiretrustee.com/peers).
[UI Dashboard Repo](https://github.com/wiretrustee/wiretrustee-dashboard)
### A bit on NetBird internals
- Every machine in the network runs [NetBird Agent (or Client)](client/) that manages WireGuard.
- Every agent connects to [Management Service](management/) that holds network state, manages peer IPs, and distributes network updates to agents (peers).
- NetBird agent uses WebRTC ICE implemented in [pion/ice library](https://github.com/pion/ice) to discover connection candidates when establishing a peer-to-peer connection between machines.
- Connection candidates are discovered with a help of [STUN](https://en.wikipedia.org/wiki/STUN) servers.
- Agents negotiate a connection through [Signal Service](signal/) passing p2p encrypted messages with candidates.
- Sometimes the NAT traversal is unsuccessful due to strict NATs (e.g. mobile carrier-grade NAT) and p2p connection isn't possible. When this occurs the system falls back to a relay server called [TURN](https://en.wikipedia.org/wiki/Traversal_Using_Relays_around_NAT), and a secure WireGuard tunnel is established via the TURN server.
[Coturn](https://github.com/coturn/coturn) is the one that has been successfully used for STUN and TURN in NetBird setups.
### A bit on Wiretrustee internals
* Wiretrustee features a Management Service that offers peer IP management and network updates distribution (e.g. when new peer joins the network).
* Wiretrustee uses WebRTC ICE implemented in [pion/ice library](https://github.com/pion/ice) to discover connection candidates when establishing a peer-to-peer connection between devices.
* Peers negotiate connection through [Signal Service](signal/).
* Signal Service uses public Wireguard keys to route messages between peers.
Contents of the messages sent between peers through the signaling server are encrypted with Wireguard keys, making it impossible to inspect them.
* Occasionally, the NAT-traversal is unsuccessful due to strict NATs (e.g. mobile carrier-grade NAT).
When this occurs the system falls back to relay server (TURN), and a secure Wireguard tunnel is established via TURN server.
[Coturn](https://github.com/coturn/coturn) is the one that has been successfully used for STUN and TURN in Wiretrustee setups.
<p float="left" align="middle">
<img src="https://netbird.io/docs/img/architecture/high-level-dia.png" width="700"/>
</p>
### Product Roadmap
- [Public Roadmap](https://github.com/wiretrustee/wiretrustee/projects/2)
- [Public Roadmap Progress Tracking](https://github.com/wiretrustee/wiretrustee/projects/1)
See a complete [architecture overview](https://netbird.io/docs/overview/architecture) for details.
### Client Installation
#### Linux
### Roadmap
- [Public Roadmap](https://github.com/netbirdio/netbird/projects/2)
**APT/Debian**
1. Add the repository:
```shell
sudo apt-get update
sudo apt-get install ca-certificates curl gnupg -y
curl -L https://pkgs.wiretrustee.com/debian/public.key | sudo apt-key add -
echo 'deb https://pkgs.wiretrustee.com/debian stable main' | sudo tee /etc/apt/sources.list.d/wiretrustee.list
```
2. Install the package
```shell
sudo apt-get update
sudo apt-get install wiretrustee
```
**RPM/Red hat**
1. Add the repository:
```shell
cat <<EOF | sudo tee /etc/yum.repos.d/wiretrustee.repo
[Wiretrustee]
name=Wiretrustee
baseurl=https://pkgs.wiretrustee.com/yum/
enabled=1
gpgcheck=0
gpgkey=https://pkgs.wiretrustee.com/yum/repodata/repomd.xml.key
repo_gpgcheck=1
EOF
```
2. Install the package
```shell
sudo yum install wiretrustee
```
#### MACOS
**Brew install**
1. Download and install Brew at https://brew.sh/
2. Install the client
```shell
brew install wiretrustee/client/wiretrustee
```
**Installation from binary**
1. Checkout Wiretrustee [releases](https://github.com/wiretrustee/wiretrustee/releases/latest)
2. Download the latest release (**Switch VERSION to the latest**):
```shell
curl -o ./wiretrustee_<VERSION>_darwin_amd64.tar.gz https://github.com/wiretrustee/wiretrustee/releases/download/v<VERSION>/wiretrustee_<VERSION>_darwin_amd64.tar.gz
```
3. Decompress
```shell
tar xcf ./wiretrustee_<VERSION>_darwin_amd64.tar.gz
sudo mv wiretrusee /usr/local/bin/wiretrustee
chmod +x /usr/local/bin/wiretrustee
```
After that you may need to add /usr/local/bin in your MAC's PATH environment variable:
````shell
export PATH=$PATH:/usr/local/bin
````
### Community projects
- [NetBird on OpenWRT](https://github.com/messense/openwrt-netbird)
#### Windows
1. Checkout Wiretrustee [releases](https://github.com/wiretrustee/wiretrustee/releases/latest)
2. Download the latest Windows release installer ```wiretrustee_installer_<VERSION>_windows_amd64.exe``` (**Switch VERSION to the latest**):
3. Proceed with installation steps
4. This will install the client in the C:\\Program Files\\Wiretrustee and add the client service
5. After installing, you can follow the [Client Configuration](#Client-Configuration) steps.
> To uninstall the client and service, you can use Add/Remove programs
### Client Configuration
1. Login to the Management Service. You need to have a `setup key` in hand (see ).
For **Unix** systems:
```shell
sudo wiretrustee up --setup-key <SETUP KEY>
```
For **Windows** systems, start powershell as administrator and:
```shell
wiretrustee up --setup-key <SETUP KEY>
```
For **Docker**, you can run with the following command:
```shell
docker run --network host --privileged --rm -d -e WT_SETUP_KEY=<SETUP KEY> -v wiretrustee-client:/etc/wiretrustee wiretrustee/wiretrustee:<TAG>
```
> TAG > 0.3.0 version
Alternatively, if you are hosting your own Management Service provide `--management-url` property pointing to your Management Service:
```shell
sudo wiretrustee up --setup-key <SETUP KEY> --management-url https://localhost:33073
```
> You could also omit `--setup-key` property. In this case the tool will prompt it the key.
2. Check your IP:
For **MACOS** you will just start the service:
````shell
sudo ipconfig getifaddr utun100
````
For **Linux** systems:
```shell
ip addr show wt0
```
For **Windows** systems:
```shell
netsh interface ip show config name="wt0"
```
3. Repeat on other machines.
### Running Dashboard, Management, Signal and Coturn
Wiretrustee uses [Auth0](https://auth0.com) for user authentication and authorization, therefore you will need to create a free account
and configure Auth0 variables in the compose file (dashboard) and in the management config file.
We chose Auth0 to "outsource" the user management part of our platform because we believe that implementing a proper user auth is not a trivial task and requires significant amount of time to make it right. We focused on connectivity instead.
It is worth mentioning that dependency to Auth0 is the only one that cannot be self-hosted.
Configuring Wiretrustee Auth0 integration:
- check [How to run](https://github.com/wiretrustee/wiretrustee-dashboard#how-to-run) to obtain Auth0 environment variables for UI Dashboard
- set these variables in the [environment section of the docker-compose file](https://github.com/wiretrustee/wiretrustee/blob/main/infrastructure_files/docker-compose.yml)
- check [Auth0 Golang API Guide](https://auth0.com/docs/quickstart/backend/golang) to obtain ```AuthIssuer```, ```AuthAudience```, and ```AuthKeysLocation```
- set these properties in the [management config files](https://github.com/wiretrustee/wiretrustee/blob/main/infrastructure_files/management.json#L33)
Under infrastructure_files we have a docker-compose example to run Dashboard, Wiretrustee Management and Signal services, plus an instance of [Coturn](https://github.com/coturn/coturn), it also provides a turnserver.conf file as a simple example of Coturn configuration.
You can edit the turnserver.conf file and change its Realm setting (defaults to wiretrustee.com) to your own domain and user setting (defaults to username1:password1) to **proper credentials**.
The example is set to use the official images from Wiretrustee and Coturn, you can find our documentation to run the signal server in docker in [Running the Signal service](#running-the-signal-service), the management in [Management](./management/README.md), and the Coturn official documentation [here](https://hub.docker.com/r/coturn/coturn).
> Run Coturn at your own risk, we are just providing an example, be sure to follow security best practices and to configure proper credentials as this service can be exploited and you may face large data transfer charges.
Also, if you have an SSL certificate for Coturn, you can modify the docker-compose.yml file to point to its files in your host machine, then switch the domainname to your own SSL domain. If you don't already have an SSL certificate, you can follow [Certbot's](https://certbot.eff.org/docs/intro.html) official documentation
to generate one from [Lets Encrypt](https://letsencrypt.org/), or, we found that the example provided by [BigBlueButton](https://docs.bigbluebutton.org/2.2/setup-turn-server.html#generating-tls-certificates) covers the basics to configure Coturn with Let's Encrypt certs.
> The Wiretrustee Management service can generate and maintain the certificates automatically, all you need to do is run the servicein a host with a public IP, configure a valid DNS record pointing to that IP and uncomment the 443 ports and command lines in the docker-compose.yml file.
Simple docker-composer execution:
````shell
cd infrastructure_files
docker-compose up -d
````
You can check logs by running:
````shell
cd infrastructure_files
docker-compose logs signal
docker-compose logs management
docker-compose logs coturn
````
If you need to stop the services, run the following:
````shell
cd infrastructure_files
docker-compose down
````
### Testimonials
We use open-source technologies like [WireGuard®](https://www.wireguard.com/), [Pion ICE (WebRTC)](https://github.com/pion/ice), and [Coturn](https://github.com/coturn/coturn). We very much appreciate the work these guys are doing and we'd greatly appreciate if you could support them in any way (e.g. giving a star or a contribution).
### Legal
[WireGuard](https://wireguard.com/) is a registered trademark of Jason A. Donenfeld.

View File

@@ -1,7 +1,4 @@
FROM gcr.io/distroless/base:debug
ENV WT_LOG_FILE=console
ENV PATH=/sbin:/usr/sbin:/bin:/usr/bin:/busybox
SHELL ["/busybox/sh","-c"]
RUN sed -i -E 's/(^root:.+)\/sbin\/nologin/\1\/busybox\/sh/g' /etc/passwd
ENTRYPOINT [ "/go/bin/netbird","up"]
COPY netbird /go/bin/netbird
ENTRYPOINT [ "/go/bin/wiretrustee","up"]
COPY wiretrustee /go/bin/wiretrustee

View File

@@ -1,46 +0,0 @@
package cmd
import (
"context"
"github.com/netbirdio/netbird/util"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/netbirdio/netbird/client/proto"
)
var downCmd = &cobra.Command{
Use: "down",
Short: "down netbird connections",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
cmd.SetOut(cmd.OutOrStdout())
err := util.InitLog(logLevel, "console")
if err != nil {
log.Errorf("failed initializing log %v", err)
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*3)
defer cancel()
conn, err := DialClientGRPCServer(ctx, daemonAddr)
if err != nil {
log.Errorf("failed to connect to service CLI interface %v", err)
return err
}
defer conn.Close()
daemonClient := proto.NewDaemonServiceClient(conn)
if _, err := daemonClient.Down(ctx, &proto.DownRequest{}); err != nil {
log.Errorf("call service down method: %v", err)
return err
}
return nil
},
}

View File

@@ -1,203 +1,147 @@
package cmd
import (
"bufio"
"context"
"fmt"
"github.com/skratchdot/open-golang/open"
"google.golang.org/grpc/codes"
gstatus "google.golang.org/grpc/status"
"time"
"github.com/netbirdio/netbird/util"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/netbirdio/netbird/client/internal"
"github.com/netbirdio/netbird/client/proto"
"github.com/wiretrustee/wiretrustee/client/internal"
mgm "github.com/wiretrustee/wiretrustee/management/client"
mgmProto "github.com/wiretrustee/wiretrustee/management/proto"
"github.com/wiretrustee/wiretrustee/util"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"os"
)
var loginCmd = &cobra.Command{
Use: "login",
Short: "login to the Netbird Management Service (first run)",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
var (
loginCmd = &cobra.Command{
Use: "login",
Short: "login to the Wiretrustee Management Service (first run)",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
cmd.SetOut(cmd.OutOrStdout())
err := util.InitLog(logLevel, "console")
if err != nil {
return fmt.Errorf("failed initializing log %v", err)
}
ctx := internal.CtxInitState(context.Background())
// workaround to run without service
if logFile == "console" {
err = handleRebrand(cmd)
err := util.InitLog(logLevel, logFile)
if err != nil {
log.Errorf("failed initializing log %v", err)
return err
}
config, err := internal.GetConfig(managementURL, adminURL, configPath, preSharedKey)
config, err := internal.GetConfig(managementURL, configPath, preSharedKey)
if err != nil {
return fmt.Errorf("get config file: %v", err)
log.Errorf("failed getting config %s %v", configPath, err)
return err
}
config, _ = internal.UpdateOldManagementPort(ctx, config, configPath)
err = foregroundLogin(ctx, cmd, config, setupKey)
//validate our peer's Wireguard PRIVATE key
myPrivateKey, err := wgtypes.ParseKey(config.PrivateKey)
if err != nil {
return fmt.Errorf("foreground login failed: %v", err)
log.Errorf("failed parsing Wireguard key %s: [%s]", config.PrivateKey, err.Error())
return err
}
cmd.Println("Logging successfully")
return nil
}
conn, err := DialClientGRPCServer(ctx, daemonAddr)
if err != nil {
return fmt.Errorf("failed to connect to daemon error: %v\n"+
"If the daemon is not running please run: "+
"\nnetbird service install \nnetbird service start\n", err)
}
defer conn.Close()
ctx := context.Background()
client := proto.NewDaemonServiceClient(conn)
loginRequest := proto.LoginRequest{
SetupKey: setupKey,
PreSharedKey: preSharedKey,
ManagementUrl: managementURL,
}
var loginErr error
var loginResp *proto.LoginResponse
err = WithBackOff(func() error {
var backOffErr error
loginResp, backOffErr = client.Login(ctx, &loginRequest)
if s, ok := gstatus.FromError(backOffErr); ok && (s.Code() == codes.InvalidArgument ||
s.Code() == codes.PermissionDenied ||
s.Code() == codes.NotFound ||
s.Code() == codes.Unimplemented) {
loginErr = backOffErr
return nil
mgmTlsEnabled := false
if config.ManagementURL.Scheme == "https" {
mgmTlsEnabled = true
}
return backOffErr
})
if err != nil {
return fmt.Errorf("login backoff cycle failed: %v", err)
}
if loginErr != nil {
return fmt.Errorf("login failed: %v", loginErr)
}
if loginResp.NeedsSSOLogin {
openURL(cmd, loginResp.VerificationURIComplete)
_, err = client.WaitSSOLogin(ctx, &proto.WaitSSOLoginRequest{UserCode: loginResp.UserCode})
log.Debugf("connecting to Management Service %s", config.ManagementURL.String())
mgmClient, err := mgm.NewClient(ctx, config.ManagementURL.Host, myPrivateKey, mgmTlsEnabled)
if err != nil {
return fmt.Errorf("waiting sso login failed with: %v", err)
log.Errorf("failed connecting to Management Service %s %v", config.ManagementURL.String(), err)
return err
}
}
log.Debugf("connected to anagement Service %s", config.ManagementURL.String())
cmd.Println("Logging successfully")
return nil
},
}
func foregroundLogin(ctx context.Context, cmd *cobra.Command, config *internal.Config, setupKey string) error {
needsLogin := false
err := WithBackOff(func() error {
err := internal.Login(ctx, config, "", "")
if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) {
needsLogin = true
return nil
}
return err
})
if err != nil {
return fmt.Errorf("backoff cycle failed: %v", err)
}
jwtToken := ""
if setupKey == "" && needsLogin {
tokenInfo, err := foregroundGetTokenInfo(ctx, cmd, config)
if err != nil {
return fmt.Errorf("interactive sso login failed: %v", err)
}
jwtToken = tokenInfo.AccessToken
}
err = WithBackOff(func() error {
err := internal.Login(ctx, config, setupKey, jwtToken)
if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) {
return nil
}
return err
})
if err != nil {
return fmt.Errorf("backoff cycle failed: %v", err)
}
return nil
}
func foregroundGetTokenInfo(ctx context.Context, cmd *cobra.Command, config *internal.Config) (*internal.TokenInfo, error) {
providerConfig, err := internal.GetDeviceAuthorizationFlowInfo(ctx, config)
if err != nil {
s, ok := gstatus.FromError(err)
if ok && s.Code() == codes.NotFound {
return nil, fmt.Errorf("no SSO provider returned from management. " +
"If you are using hosting Netbird see documentation at " +
"https://github.com/netbirdio/netbird/tree/main/management for details")
} else if ok && s.Code() == codes.Unimplemented {
mgmtURL := managementURL
if mgmtURL == "" {
mgmtURL = internal.ManagementURLDefault().String()
serverKey, err := mgmClient.GetServerPublicKey()
if err != nil {
log.Errorf("failed while getting Management Service public key: %v", err)
return err
}
return nil, fmt.Errorf("the management server, %s, does not support SSO providers, "+
"please update your servver or use Setup Keys to login", mgmtURL)
_, err = loginPeer(*serverKey, mgmClient, setupKey)
if err != nil {
log.Errorf("failed logging-in peer on Management Service : %v", err)
return err
}
err = mgmClient.Close()
if err != nil {
log.Errorf("failed closing Management Service client: %v", err)
return err
}
return nil
},
}
)
// loginPeer attempts to login to Management Service. If peer wasn't registered, tries the registration flow.
func loginPeer(serverPublicKey wgtypes.Key, client *mgm.Client, setupKey string) (*mgmProto.LoginResponse, error) {
loginResp, err := client.Login(serverPublicKey)
if err != nil {
if s, ok := status.FromError(err); ok && s.Code() == codes.PermissionDenied {
log.Debugf("peer registration required")
return registerPeer(serverPublicKey, client, setupKey)
} else {
return nil, fmt.Errorf("getting device authorization flow info failed with error: %v", err)
return nil, err
}
}
hostedClient := internal.NewHostedDeviceFlow(
providerConfig.ProviderConfig.Audience,
providerConfig.ProviderConfig.ClientID,
providerConfig.ProviderConfig.TokenEndpoint,
providerConfig.ProviderConfig.DeviceAuthEndpoint,
)
log.Info("peer has successfully logged-in to Management Service")
flowInfo, err := hostedClient.RequestDeviceCode(context.TODO())
if err != nil {
return nil, fmt.Errorf("getting a request device code failed: %v", err)
}
openURL(cmd, flowInfo.VerificationURIComplete)
waitTimeout := time.Duration(flowInfo.ExpiresIn)
waitCTX, c := context.WithTimeout(context.TODO(), waitTimeout*time.Second)
defer c()
tokenInfo, err := hostedClient.WaitToken(waitCTX, flowInfo)
if err != nil {
return nil, fmt.Errorf("waiting for browser login failed: %v", err)
}
return &tokenInfo, nil
return loginResp, nil
}
func openURL(cmd *cobra.Command, verificationURIComplete string) {
err := open.Run(verificationURIComplete)
cmd.Printf("Please do the SSO login in your browser. \n" +
"If your browser didn't open automatically, use this URL to log in:\n\n" +
" " + verificationURIComplete + " \n\n")
if err != nil {
cmd.Printf("Alternatively, you may want to use a setup key, see:\n\n https://www.netbird.io/docs/overview/setup-keys\n")
// registerPeer checks whether setupKey was provided via cmd line and if not then it prompts user to enter a key.
// Otherwise tries to register with the provided setupKey via command line.
func registerPeer(serverPublicKey wgtypes.Key, client *mgm.Client, setupKey string) (*mgmProto.LoginResponse, error) {
var err error
if setupKey == "" {
setupKey, err = promptPeerSetupKey()
if err != nil {
log.Errorf("failed getting setup key from user: %s", err)
return nil, err
}
}
validSetupKey, err := uuid.Parse(setupKey)
if err != nil {
return nil, err
}
log.Debugf("sending peer registration request to Management Service")
loginResp, err := client.Register(serverPublicKey, validSetupKey.String())
if err != nil {
log.Errorf("failed registering peer %v", err)
return nil, err
}
log.Infof("peer has been successfully registered on Management Service")
return loginResp, nil
}
// promptPeerSetupKey prompts user to enter Setup Key
func promptPeerSetupKey() (string, error) {
fmt.Print("Enter setup key: ")
s := bufio.NewScanner(os.Stdin)
for s.Scan() {
input := s.Text()
if input != "" {
return input, nil
}
fmt.Println("Specified key is empty, try again:")
}
return "", s.Err()
}

View File

@@ -2,16 +2,34 @@ package cmd
import (
"fmt"
"github.com/wiretrustee/wiretrustee/client/internal"
"github.com/wiretrustee/wiretrustee/iface"
mgmt "github.com/wiretrustee/wiretrustee/management/server"
"github.com/wiretrustee/wiretrustee/util"
"path/filepath"
"strings"
"testing"
"github.com/netbirdio/netbird/client/internal"
"github.com/netbirdio/netbird/iface"
"github.com/netbirdio/netbird/util"
)
var mgmAddr string
func TestLogin_Start(t *testing.T) {
config := &mgmt.Config{}
_, err := util.ReadJson("../testdata/management.json", config)
if err != nil {
t.Fatal(err)
}
testDir := t.TempDir()
config.Datadir = testDir
err = util.CopyFileContents("../testdata/store.json", filepath.Join(testDir, "store.json"))
if err != nil {
t.Fatal(err)
}
_, listener := startManagement(config, t)
mgmAddr = listener.Addr().String()
}
func TestLogin(t *testing.T) {
mgmAddr := startTestingServices(t)
tempDir := t.TempDir()
confPath := tempDir + "/config.json"
@@ -20,8 +38,6 @@ func TestLogin(t *testing.T) {
"login",
"--config",
confPath,
"--log-file",
"console",
"--setup-key",
strings.ToUpper("a2c8e62b-38f5-4553-b31e-dd66c696cebb"),
"--management-url",
@@ -44,7 +60,7 @@ func TestLogin(t *testing.T) {
}
if actualConf.WgIface != iface.WgInterfaceDefault {
t.Errorf("expected WgIfaceName %s got %s", iface.WgInterfaceDefault, actualConf.WgIface)
t.Errorf("expected WgIface %s got %s", iface.WgInterfaceDefault, actualConf.WgIface)
}
if len(actualConf.PrivateKey) == 0 {

View File

@@ -1,117 +1,77 @@
package cmd
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"io/ioutil"
"os"
"os/signal"
"path"
"runtime"
"strings"
"syscall"
"time"
"github.com/cenkalti/backoff/v4"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"github.com/netbirdio/netbird/client/internal"
"github.com/wiretrustee/wiretrustee/client/internal"
"os"
"os/signal"
"runtime"
"strings"
"syscall"
)
var (
configPath string
defaultConfigPathDir string
defaultConfigPath string
oldDefaultConfigPathDir string
oldDefaultConfigPath string
logLevel string
defaultLogFileDir string
defaultLogFile string
oldDefaultLogFileDir string
oldDefaultLogFile string
logFile string
daemonAddr string
managementURL string
adminURL string
setupKey string
preSharedKey string
rootCmd = &cobra.Command{
Use: "netbird",
Short: "",
Long: "",
SilenceUsage: true,
configPath string
defaultConfigPath string
logLevel string
defaultLogFile string
logFile string
managementURL string
setupKey string
preSharedKey string
rootCmd = &cobra.Command{
Use: "wiretrustee",
Short: "",
Long: "",
}
// Execution control channel for stopCh signal
stopCh chan int
cleanupCh chan struct{}
)
// Execute executes the root command.
func Execute() error {
return rootCmd.Execute()
}
func init() {
defaultConfigPathDir = "/etc/netbird/"
defaultLogFileDir = "/var/log/netbird/"
oldDefaultConfigPathDir = "/etc/wiretrustee/"
oldDefaultLogFileDir = "/var/log/wiretrustee/"
stopCh = make(chan int)
cleanupCh = make(chan struct{})
defaultConfigPath = "/etc/wiretrustee/config.json"
defaultLogFile = "/var/log/wiretrustee/client.log"
if runtime.GOOS == "windows" {
defaultConfigPathDir = os.Getenv("PROGRAMDATA") + "\\Netbird\\"
defaultLogFileDir = os.Getenv("PROGRAMDATA") + "\\Netbird\\"
oldDefaultConfigPathDir = os.Getenv("PROGRAMDATA") + "\\Wiretrustee\\"
oldDefaultLogFileDir = os.Getenv("PROGRAMDATA") + "\\Wiretrustee\\"
defaultConfigPath = os.Getenv("PROGRAMDATA") + "\\Wiretrustee\\" + "config.json"
defaultLogFile = os.Getenv("PROGRAMDATA") + "\\Wiretrustee\\" + "client.log"
}
defaultConfigPath = defaultConfigPathDir + "config.json"
defaultLogFile = defaultLogFileDir + "client.log"
oldDefaultConfigPath = oldDefaultConfigPathDir + "config.json"
oldDefaultLogFile = oldDefaultLogFileDir + "client.log"
defaultDaemonAddr := "unix:///var/run/netbird.sock"
if runtime.GOOS == "windows" {
defaultDaemonAddr = "tcp://127.0.0.1:41731"
}
rootCmd.PersistentFlags().StringVar(&daemonAddr, "daemon-addr", defaultDaemonAddr, "Daemon service address to serve CLI requests [unix|tcp]://[path|host:port]")
rootCmd.PersistentFlags().StringVar(&managementURL, "management-url", "", fmt.Sprintf("Management Service URL [http|https]://[host]:[port] (default \"%s\")", internal.ManagementURLDefault().String()))
rootCmd.PersistentFlags().StringVar(&adminURL, "admin-url", "https://app.netbird.io", "Admin Panel URL [http|https]://[host]:[port]")
rootCmd.PersistentFlags().StringVar(&configPath, "config", defaultConfigPath, "Netbird config file location")
rootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", "sets Netbird log level")
rootCmd.PersistentFlags().StringVar(&logFile, "log-file", defaultLogFile, "sets Netbird log path. If console is specified the the log will be output to stdout")
rootCmd.PersistentFlags().StringVar(&configPath, "config", defaultConfigPath, "Wiretrustee config file location")
rootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", "sets Wiretrustee log level")
rootCmd.PersistentFlags().StringVar(&logFile, "log-file", defaultLogFile, "sets Wiretrustee log path. If console is specified the the log will be output to stdout")
rootCmd.PersistentFlags().StringVar(&setupKey, "setup-key", "", "Setup key obtained from the Management Service Dashboard (used to register peer)")
rootCmd.PersistentFlags().StringVar(&preSharedKey, "preshared-key", "", "Sets Wireguard PreSharedKey property. If set, then only peers that have the same key can communicate.")
rootCmd.AddCommand(serviceCmd)
rootCmd.AddCommand(upCmd)
rootCmd.AddCommand(downCmd)
rootCmd.AddCommand(statusCmd)
rootCmd.AddCommand(loginCmd)
rootCmd.AddCommand(versionCmd)
rootCmd.AddCommand(sshCmd)
serviceCmd.AddCommand(runCmd, startCmd, stopCmd, restartCmd) // service control commands are subcommands of service
serviceCmd.AddCommand(installCmd, uninstallCmd) // service installer commands are subcommands of service
}
// SetupCloseHandler handles SIGTERM signal and exits with success
func SetupCloseHandler(ctx context.Context, cancel context.CancelFunc) {
termCh := make(chan os.Signal, 1)
signal.Notify(termCh, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
func SetupCloseHandler() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
go func() {
done := ctx.Done()
select {
case <-done:
case <-termCh:
for range c {
log.Info("shutdown signal received")
stopCh <- 0
}
log.Info("shutdown signal received")
cancel()
}()
}
@@ -119,171 +79,23 @@ func SetupCloseHandler(ctx context.Context, cancel context.CancelFunc) {
func SetFlagsFromEnvVars() {
flags := rootCmd.PersistentFlags()
flags.VisitAll(func(f *pflag.Flag) {
oldEnvVar := FlagNameToEnvVar(f.Name, "WT_")
if value, present := os.LookupEnv(oldEnvVar); present {
envVar := FlagNameToEnvVar(f.Name)
if value, present := os.LookupEnv(envVar); present {
err := flags.Set(f.Name, value)
if err != nil {
log.Infof("unable to configure flag %s using variable %s, err: %v", f.Name, oldEnvVar, err)
}
}
newEnvVar := FlagNameToEnvVar(f.Name, "NB_")
if value, present := os.LookupEnv(newEnvVar); present {
err := flags.Set(f.Name, value)
if err != nil {
log.Infof("unable to configure flag %s using variable %s, err: %v", f.Name, newEnvVar, err)
log.Infof("unable to configure flag %s using variable %s, err: %v", f.Name, envVar, err)
}
}
})
}
// FlagNameToEnvVar converts flag name to environment var name adding a prefix,
// replacing dashes and making all uppercase (e.g. setup-keys is converted to NB_SETUP_KEYS according to the input prefix)
func FlagNameToEnvVar(cmdFlag string, prefix string) string {
parsed := strings.ReplaceAll(cmdFlag, "-", "_")
// replacing dashes and making all uppercase (e.g. setup-keys is converted to WT_SETUP_KEYS)
func FlagNameToEnvVar(f string) string {
prefix := "WT_"
parsed := strings.ReplaceAll(f, "-", "_")
upper := strings.ToUpper(parsed)
return prefix + upper
}
// DialClientGRPCServer returns client connection to the dameno server.
func DialClientGRPCServer(ctx context.Context, addr string) (*grpc.ClientConn, error) {
ctx, cancel := context.WithTimeout(ctx, time.Second*3)
defer cancel()
return grpc.DialContext(
ctx,
strings.TrimPrefix(addr, "tcp://"),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(),
)
}
// WithBackOff execute function in backoff cycle.
func WithBackOff(bf func() error) error {
return backoff.RetryNotify(bf, CLIBackOffSettings, func(err error, duration time.Duration) {
log.Warnf("retrying Login to the Management service in %v due to error %v", duration, err)
})
}
// CLIBackOffSettings is default backoff settings for CLI commands.
var CLIBackOffSettings = &backoff.ExponentialBackOff{
InitialInterval: time.Second,
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: 10 * time.Second,
MaxElapsedTime: 30 * time.Second,
Stop: backoff.Stop,
Clock: backoff.SystemClock,
}
func handleRebrand(cmd *cobra.Command) error {
var err error
if logFile == defaultLogFile {
if migrateToNetbird(oldDefaultLogFile, defaultLogFile) {
cmd.Printf("will copy Log dir %s and its content to %s\n", oldDefaultLogFileDir, defaultLogFileDir)
err = cpDir(oldDefaultLogFileDir, defaultLogFileDir)
if err != nil {
return err
}
}
}
if configPath == defaultConfigPath {
if migrateToNetbird(oldDefaultConfigPath, defaultConfigPath) {
cmd.Printf("will copy Config dir %s and its content to %s\n", oldDefaultConfigPathDir, defaultConfigPathDir)
err = cpDir(oldDefaultConfigPathDir, defaultConfigPathDir)
if err != nil {
return err
}
}
}
return nil
}
func cpFile(src, dst string) error {
var err error
var srcfd *os.File
var dstfd *os.File
var srcinfo os.FileInfo
if srcfd, err = os.Open(src); err != nil {
return err
}
defer srcfd.Close()
if dstfd, err = os.Create(dst); err != nil {
return err
}
defer dstfd.Close()
if _, err = io.Copy(dstfd, srcfd); err != nil {
return err
}
if srcinfo, err = os.Stat(src); err != nil {
return err
}
return os.Chmod(dst, srcinfo.Mode())
}
func copySymLink(source, dest string) error {
link, err := os.Readlink(source)
if err != nil {
return err
}
return os.Symlink(link, dest)
}
func cpDir(src string, dst string) error {
var err error
var fds []os.FileInfo
var srcinfo os.FileInfo
if srcinfo, err = os.Stat(src); err != nil {
return err
}
if err = os.MkdirAll(dst, srcinfo.Mode()); err != nil {
return err
}
if fds, err = ioutil.ReadDir(src); err != nil {
return err
}
for _, fd := range fds {
srcfp := path.Join(src, fd.Name())
dstfp := path.Join(dst, fd.Name())
fileInfo, err := os.Stat(srcfp)
if err != nil {
return fmt.Errorf("fouldn't get fileInfo; %v", err)
}
switch fileInfo.Mode() & os.ModeType {
case os.ModeSymlink:
if err = copySymLink(srcfp, dstfp); err != nil {
return fmt.Errorf("failed to copy from %s to %s; %v", srcfp, dstfp, err)
}
case os.ModeDir:
if err = cpDir(srcfp, dstfp); err != nil {
return fmt.Errorf("failed to copy from %s to %s; %v", srcfp, dstfp, err)
}
default:
if err = cpFile(srcfp, dstfp); err != nil {
return fmt.Errorf("failed to copy from %s to %s; %v", srcfp, dstfp, err)
}
}
}
return nil
}
func migrateToNetbird(oldPath, newPath string) bool {
_, errOld := os.Stat(oldPath)
_, errNew := os.Stat(newPath)
if errors.Is(errOld, fs.ErrNotExist) || errNew == nil {
return false
}
return true
}

View File

@@ -1,36 +1,20 @@
package cmd
import (
"context"
"runtime"
"github.com/kardianos/service"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"google.golang.org/grpc"
"github.com/netbirdio/netbird/client/internal"
)
type program struct {
ctx context.Context
cancel context.CancelFunc
serv *grpc.Server
}
func newProgram(ctx context.Context, cancel context.CancelFunc) *program {
ctx = internal.CtxInitState(ctx)
return &program{ctx: ctx, cancel: cancel}
cmd *cobra.Command
args []string
}
func newSVCConfig() *service.Config {
name := "netbird"
if runtime.GOOS == "windows" {
name = "Netbird"
}
return &service.Config{
Name: name,
DisplayName: "Netbird",
Name: "wiretrustee",
DisplayName: "Wiretrustee",
Description: "A WireGuard-based mesh network that connects your devices into a single private network.",
}
}
@@ -44,7 +28,9 @@ func newSVC(prg *program, conf *service.Config) (service.Service, error) {
return s, nil
}
var serviceCmd = &cobra.Command{
Use: "service",
Short: "manages Netbird service",
}
var (
serviceCmd = &cobra.Command{
Use: "service",
Short: "manages wiretrustee service",
}
)

View File

@@ -1,216 +1,152 @@
package cmd
import (
"context"
"fmt"
"net"
"os"
"strings"
"time"
"github.com/kardianos/service"
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/client/proto"
"github.com/netbirdio/netbird/client/server"
"github.com/netbirdio/netbird/util"
"github.com/spf13/cobra"
"google.golang.org/grpc"
"github.com/wiretrustee/wiretrustee/util"
"time"
)
func (p *program) Start(svc service.Service) error {
func (p *program) Start(service.Service) error {
// Start should not block. Do the actual work async.
log.Info("starting Netbird service") //nolint
// in any case, even if configuration does not exists we run daemon to serve CLI gRPC API.
p.serv = grpc.NewServer()
split := strings.Split(daemonAddr, "://")
switch split[0] {
case "unix":
// cleanup failed close
stat, err := os.Stat(split[1])
if err == nil && !stat.IsDir() {
if err := os.Remove(split[1]); err != nil {
log.Debugf("remove socket file: %v", err)
}
}
case "tcp":
default:
return fmt.Errorf("unsupported daemon address protocol: %v", split[0])
}
listen, err := net.Listen(split[0], split[1])
if err != nil {
return fmt.Errorf("failed to listen daemon interface: %w", err)
}
log.Info("starting service") //nolint
go func() {
defer listen.Close()
if split[0] == "unix" {
err = os.Chmod(split[1], 0666)
if err != nil {
log.Errorf("failed setting daemon permissions: %v", split[1])
return
}
}
serverInstance := server.New(p.ctx, managementURL, adminURL, configPath, logFile)
if err := serverInstance.Start(); err != nil {
log.Fatalf("failed to start daemon: %v", err)
}
proto.RegisterDaemonServiceServer(p.serv, serverInstance)
log.Printf("started daemon server: %v", split[1])
if err := p.serv.Serve(listen); err != nil {
log.Errorf("failed to serve daemon requests: %v", err)
err := runClient()
if err != nil {
log.Errorf("stopped Wiretrustee client app due to error: %v", err)
return
}
}()
return nil
}
func (p *program) Stop(srv service.Service) error {
p.cancel()
func (p *program) Stop(service.Service) error {
go func() {
stopCh <- 1
}()
if p.serv != nil {
p.serv.Stop()
select {
case <-cleanupCh:
case <-time.After(time.Second * 10):
log.Warnf("failed waiting for service cleanup, terminating")
}
time.Sleep(time.Second * 2)
log.Info("stopped Netbird service") //nolint
log.Info("stopped Wiretrustee service") //nolint
return nil
}
var runCmd = &cobra.Command{
Use: "run",
Short: "runs Netbird as service",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
var (
runCmd = &cobra.Command{
Use: "run",
Short: "runs wiretrustee as service",
Run: func(cmd *cobra.Command, args []string) {
SetFlagsFromEnvVars()
cmd.SetOut(cmd.OutOrStdout())
err := util.InitLog(logLevel, logFile)
if err != nil {
log.Errorf("failed initializing log %v", err)
return
}
err := handleRebrand(cmd)
if err != nil {
return err
}
SetupCloseHandler()
err = util.InitLog(logLevel, logFile)
if err != nil {
return fmt.Errorf("failed initializing log %v", err)
}
prg := &program{
cmd: cmd,
args: args,
}
ctx, cancel := context.WithCancel(cmd.Context())
SetupCloseHandler(ctx, cancel)
s, err := newSVC(prg, newSVCConfig())
if err != nil {
cmd.PrintErrln(err)
return
}
err = s.Run()
if err != nil {
cmd.PrintErrln(err)
return
}
cmd.Printf("Wiretrustee service is running")
},
}
)
s, err := newSVC(newProgram(ctx, cancel), newSVCConfig())
if err != nil {
return err
}
err = s.Run()
if err != nil {
return err
}
cmd.Printf("Netbird service is running")
return nil
},
}
var (
startCmd = &cobra.Command{
Use: "start",
Short: "starts wiretrustee service",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
var startCmd = &cobra.Command{
Use: "start",
Short: "starts Netbird service",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
err := util.InitLog(logLevel, logFile)
if err != nil {
log.Errorf("failed initializing log %v", err)
return err
}
s, err := newSVC(&program{}, newSVCConfig())
if err != nil {
cmd.PrintErrln(err)
return err
}
err = s.Start()
if err != nil {
cmd.PrintErrln(err)
return err
}
cmd.Println("Wiretrustee service has been started")
return nil
},
}
)
cmd.SetOut(cmd.OutOrStdout())
var (
stopCmd = &cobra.Command{
Use: "stop",
Short: "stops wiretrustee service",
Run: func(cmd *cobra.Command, args []string) {
SetFlagsFromEnvVars()
err := handleRebrand(cmd)
if err != nil {
return err
}
err := util.InitLog(logLevel, logFile)
if err != nil {
log.Errorf("failed initializing log %v", err)
}
s, err := newSVC(&program{}, newSVCConfig())
if err != nil {
cmd.PrintErrln(err)
return
}
err = s.Stop()
if err != nil {
cmd.PrintErrln(err)
return
}
cmd.Println("Wiretrustee service has been stopped")
},
}
)
err = util.InitLog(logLevel, logFile)
if err != nil {
return err
}
var (
restartCmd = &cobra.Command{
Use: "restart",
Short: "restarts wiretrustee service",
Run: func(cmd *cobra.Command, args []string) {
SetFlagsFromEnvVars()
ctx, cancel := context.WithCancel(cmd.Context())
s, err := newSVC(newProgram(ctx, cancel), newSVCConfig())
if err != nil {
cmd.PrintErrln(err)
return err
}
err = s.Start()
if err != nil {
cmd.PrintErrln(err)
return err
}
cmd.Println("Netbird service has been started")
return nil
},
}
var stopCmd = &cobra.Command{
Use: "stop",
Short: "stops Netbird service",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
cmd.SetOut(cmd.OutOrStdout())
err := handleRebrand(cmd)
if err != nil {
return err
}
err = util.InitLog(logLevel, logFile)
if err != nil {
return fmt.Errorf("failed initializing log %v", err)
}
ctx, cancel := context.WithCancel(cmd.Context())
s, err := newSVC(newProgram(ctx, cancel), newSVCConfig())
if err != nil {
return err
}
err = s.Stop()
if err != nil {
return err
}
cmd.Println("Netbird service has been stopped")
return nil
},
}
var restartCmd = &cobra.Command{
Use: "restart",
Short: "restarts Netbird service",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
cmd.SetOut(cmd.OutOrStdout())
err := handleRebrand(cmd)
if err != nil {
return err
}
err = util.InitLog(logLevel, logFile)
if err != nil {
return fmt.Errorf("failed initializing log %v", err)
}
ctx, cancel := context.WithCancel(cmd.Context())
s, err := newSVC(newProgram(ctx, cancel), newSVCConfig())
if err != nil {
return err
}
err = s.Restart()
if err != nil {
return err
}
cmd.Println("Netbird service has been restarted")
return nil
},
}
err := util.InitLog(logLevel, logFile)
if err != nil {
log.Errorf("failed initializing log %v", err)
}
s, err := newSVC(&program{}, newSVCConfig())
if err != nil {
cmd.PrintErrln(err)
return
}
err = s.Restart()
if err != nil {
cmd.PrintErrln(err)
return
}
cmd.Println("Wiretrustee service has been restarted")
},
}
)

View File

@@ -1,89 +1,69 @@
package cmd
import (
"context"
"runtime"
"github.com/spf13/cobra"
"runtime"
)
var installCmd = &cobra.Command{
Use: "install",
Short: "installs Netbird service",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
var (
installCmd = &cobra.Command{
Use: "install",
Short: "installs wiretrustee service",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
cmd.SetOut(cmd.OutOrStdout())
svcConfig := newSVCConfig()
err := handleRebrand(cmd)
if err != nil {
return err
}
svcConfig.Arguments = []string{
"service",
"run",
"--config",
configPath,
"--log-level",
logLevel,
}
svcConfig := newSVCConfig()
if runtime.GOOS == "linux" {
// Respected only by systemd systems
svcConfig.Dependencies = []string{"After=network.target syslog.target"}
}
svcConfig.Arguments = []string{
"service",
"run",
"--config",
configPath,
"--log-level",
logLevel,
}
s, err := newSVC(&program{}, svcConfig)
if err != nil {
cmd.PrintErrln(err)
return err
}
if managementURL != "" {
svcConfig.Arguments = append(svcConfig.Arguments, "--management-url")
svcConfig.Arguments = append(svcConfig.Arguments, managementURL)
}
err = s.Install()
if err != nil {
cmd.PrintErrln(err)
return err
}
cmd.Println("Wiretrustee service has been installed")
return nil
},
}
)
if runtime.GOOS == "linux" {
// Respected only by systemd systems
svcConfig.Dependencies = []string{"After=network.target syslog.target"}
}
var (
uninstallCmd = &cobra.Command{
Use: "uninstall",
Short: "uninstalls wiretrustee service from system",
Run: func(cmd *cobra.Command, args []string) {
SetFlagsFromEnvVars()
ctx, cancel := context.WithCancel(cmd.Context())
s, err := newSVC(&program{}, newSVCConfig())
if err != nil {
cmd.PrintErrln(err)
return
}
s, err := newSVC(newProgram(ctx, cancel), svcConfig)
if err != nil {
cmd.PrintErrln(err)
return err
}
err = s.Install()
if err != nil {
cmd.PrintErrln(err)
return err
}
cmd.Println("Netbird service has been installed")
return nil
},
}
var uninstallCmd = &cobra.Command{
Use: "uninstall",
Short: "uninstalls Netbird service from system",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
cmd.SetOut(cmd.OutOrStdout())
err := handleRebrand(cmd)
if err != nil {
return err
}
ctx, cancel := context.WithCancel(cmd.Context())
s, err := newSVC(newProgram(ctx, cancel), newSVCConfig())
if err != nil {
return err
}
err = s.Uninstall()
if err != nil {
return err
}
cmd.Println("Netbird has been uninstalled")
return nil
},
}
err = s.Uninstall()
if err != nil {
cmd.PrintErrln(err)
return
}
cmd.Println("Wiretrustee has been uninstalled")
},
}
)

View File

@@ -1,115 +0,0 @@
package cmd
import (
"context"
"errors"
"fmt"
"github.com/netbirdio/netbird/client/internal"
nbssh "github.com/netbirdio/netbird/client/ssh"
"github.com/netbirdio/netbird/util"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"os"
"os/signal"
"strings"
"syscall"
)
var (
port int
user = "root"
host string
)
var sshCmd = &cobra.Command{
Use: "ssh",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return errors.New("requires a host argument")
}
split := strings.Split(args[0], "@")
if len(split) == 2 {
user = split[0]
host = split[1]
} else {
host = args[0]
}
return nil
},
Short: "connect to a remote SSH server",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
cmd.SetOut(cmd.OutOrStdout())
err := util.InitLog(logLevel, "console")
if err != nil {
return fmt.Errorf("failed initializing log %v", err)
}
if !util.IsAdmin() {
cmd.Printf("error: you must have Administrator privileges to run this command\n")
return nil
}
ctx := internal.CtxInitState(cmd.Context())
config, err := internal.ReadConfig("", "", configPath, nil)
if err != nil {
return err
}
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGTERM, syscall.SIGINT)
sshctx, cancel := context.WithCancel(ctx)
go func() {
// blocking
if err := runSSH(sshctx, host, []byte(config.SSHKey), cmd); err != nil {
log.Print(err)
}
cancel()
}()
select {
case <-sig:
cancel()
case <-sshctx.Done():
}
return nil
},
}
func runSSH(ctx context.Context, addr string, pemKey []byte, cmd *cobra.Command) error {
c, err := nbssh.DialWithKey(fmt.Sprintf("%s:%d", addr, port), user, pemKey)
if err != nil {
cmd.Printf("Error: %v\n", err)
cmd.Printf("Couldn't connect. " +
"You might be disconnected from the NetBird network, or the NetBird agent isn't running.\n" +
"Run the status command: \n\n" +
" netbird status\n\n" +
"It might also be that the SSH server is disabled on the agent you are trying to connect to.\n")
return nil
}
go func() {
<-ctx.Done()
err = c.Close()
if err != nil {
return
}
}()
err = c.OpenTerminal()
if err != nil {
return err
}
return nil
}
func init() {
sshCmd.PersistentFlags().IntVarP(&port, "port", "p", nbssh.DefaultSSHPort, "Sets remote SSH port. Defaults to "+fmt.Sprint(nbssh.DefaultSSHPort))
}

View File

@@ -1,301 +0,0 @@
package cmd
import (
"context"
"fmt"
"github.com/netbirdio/netbird/client/internal"
"github.com/netbirdio/netbird/client/internal/peer"
"github.com/netbirdio/netbird/client/proto"
nbStatus "github.com/netbirdio/netbird/client/status"
"github.com/netbirdio/netbird/client/system"
"github.com/netbirdio/netbird/util"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
"net/netip"
"sort"
"strings"
)
var (
detailFlag bool
ipsFilter []string
statusFilter string
ipsFilterMap map[string]struct{}
)
var statusCmd = &cobra.Command{
Use: "status",
Short: "status of the Netbird Service",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
cmd.SetOut(cmd.OutOrStdout())
err := parseFilters()
if err != nil {
return err
}
err = util.InitLog(logLevel, "console")
if err != nil {
return fmt.Errorf("failed initializing log %v", err)
}
ctx := internal.CtxInitState(context.Background())
conn, err := DialClientGRPCServer(ctx, daemonAddr)
if err != nil {
return fmt.Errorf("failed to connect to daemon error: %v\n"+
"If the daemon is not running please run: "+
"\nnetbird service install \nnetbird service start\n", err)
}
defer conn.Close()
resp, err := proto.NewDaemonServiceClient(conn).Status(cmd.Context(), &proto.StatusRequest{GetFullPeerStatus: true})
if err != nil {
return fmt.Errorf("status failed: %v", status.Convert(err).Message())
}
daemonStatus := fmt.Sprintf("Daemon status: %s\n", resp.GetStatus())
if resp.GetStatus() == string(internal.StatusNeedsLogin) || resp.GetStatus() == string(internal.StatusLoginFailed) {
cmd.Printf("%s\n"+
"Run UP command to log in with SSO (interactive login):\n\n"+
" netbird up \n\n"+
"If you are running a self-hosted version and no SSO provider has been configured in your Management Server,\n"+
"you can use a setup-key:\n\n netbird up --management-url <YOUR_MANAGEMENT_URL> --setup-key <YOUR_SETUP_KEY>\n\n"+
"More info: https://www.netbird.io/docs/overview/setup-keys\n\n",
daemonStatus,
)
return nil
}
pbFullStatus := resp.GetFullStatus()
fullStatus := fromProtoFullStatus(pbFullStatus)
cmd.Print(parseFullStatus(fullStatus, detailFlag, daemonStatus, resp.GetDaemonVersion()))
return nil
},
}
func init() {
ipsFilterMap = make(map[string]struct{})
statusCmd.PersistentFlags().BoolVarP(&detailFlag, "detail", "d", false, "display detailed status information")
statusCmd.PersistentFlags().StringSliceVar(&ipsFilter, "filter-by-ips", []string{}, "filters the detailed output by a list of one or more IPs, e.g. --filter-by-ips 100.64.0.100,100.64.0.200")
statusCmd.PersistentFlags().StringVar(&statusFilter, "filter-by-status", "", "filters the detailed output by connection status(connected|disconnected), e.g. --filter-by-status connected")
}
func parseFilters() error {
switch strings.ToLower(statusFilter) {
case "", "disconnected", "connected":
default:
return fmt.Errorf("wrong status filter, should be one of connected|disconnected, got: %s", statusFilter)
}
if len(ipsFilter) > 0 {
for _, addr := range ipsFilter {
_, err := netip.ParseAddr(addr)
if err != nil {
return fmt.Errorf("got an invalid IP address in the filter: address %s, error %s", addr, err)
}
ipsFilterMap[addr] = struct{}{}
}
}
return nil
}
func fromProtoFullStatus(pbFullStatus *proto.FullStatus) nbStatus.FullStatus {
var fullStatus nbStatus.FullStatus
managementState := pbFullStatus.GetManagementState()
fullStatus.ManagementState.URL = managementState.GetURL()
fullStatus.ManagementState.Connected = managementState.GetConnected()
signalState := pbFullStatus.GetSignalState()
fullStatus.SignalState.URL = signalState.GetURL()
fullStatus.SignalState.Connected = signalState.GetConnected()
localPeerState := pbFullStatus.GetLocalPeerState()
fullStatus.LocalPeerState.IP = localPeerState.GetIP()
fullStatus.LocalPeerState.PubKey = localPeerState.GetPubKey()
fullStatus.LocalPeerState.KernelInterface = localPeerState.GetKernelInterface()
var peersState []nbStatus.PeerState
for _, pbPeerState := range pbFullStatus.GetPeers() {
timeLocal := pbPeerState.GetConnStatusUpdate().AsTime().Local()
peerState := nbStatus.PeerState{
IP: pbPeerState.GetIP(),
PubKey: pbPeerState.GetPubKey(),
ConnStatus: pbPeerState.GetConnStatus(),
ConnStatusUpdate: timeLocal,
Relayed: pbPeerState.GetRelayed(),
Direct: pbPeerState.GetDirect(),
LocalIceCandidateType: pbPeerState.GetLocalIceCandidateType(),
RemoteIceCandidateType: pbPeerState.GetRemoteIceCandidateType(),
}
peersState = append(peersState, peerState)
}
fullStatus.Peers = peersState
return fullStatus
}
func parseFullStatus(fullStatus nbStatus.FullStatus, printDetail bool, daemonStatus string, daemonVersion string) string {
var (
managementStatusURL = ""
signalStatusURL = ""
managementConnString = "Disconnected"
signalConnString = "Disconnected"
interfaceTypeString = "Userspace"
)
if printDetail {
managementStatusURL = fmt.Sprintf(" to %s", fullStatus.ManagementState.URL)
signalStatusURL = fmt.Sprintf(" to %s", fullStatus.SignalState.URL)
}
if fullStatus.ManagementState.Connected {
managementConnString = "Connected"
}
if fullStatus.SignalState.Connected {
signalConnString = "Connected"
}
interfaceIP := fullStatus.LocalPeerState.IP
if fullStatus.LocalPeerState.KernelInterface {
interfaceTypeString = "Kernel"
} else if fullStatus.LocalPeerState.IP == "" {
interfaceTypeString = "N/A"
interfaceIP = "N/A"
}
parsedPeersString, peersConnected := parsePeers(fullStatus.Peers, printDetail)
peersCountString := fmt.Sprintf("%d/%d Connected", peersConnected, len(fullStatus.Peers))
summary := fmt.Sprintf(
"Daemon version: %s\n"+
"CLI version: %s\n"+
"%s"+ // daemon status
"Management: %s%s\n"+
"Signal: %s%s\n"+
"NetBird IP: %s\n"+
"Interface type: %s\n"+
"Peers count: %s\n",
daemonVersion,
system.NetbirdVersion(),
daemonStatus,
managementConnString,
managementStatusURL,
signalConnString,
signalStatusURL,
interfaceIP,
interfaceTypeString,
peersCountString,
)
if printDetail {
return fmt.Sprintf(
"Peers detail:"+
"%s\n"+
"%s",
parsedPeersString,
summary,
)
}
return summary
}
func parsePeers(peers []nbStatus.PeerState, printDetail bool) (string, int) {
var (
peersString = ""
peersConnected = 0
)
if len(peers) > 0 {
sort.SliceStable(peers, func(i, j int) bool {
iAddr, _ := netip.ParseAddr(peers[i].IP)
jAddr, _ := netip.ParseAddr(peers[j].IP)
return iAddr.Compare(jAddr) == -1
})
}
connectedStatusString := peer.StatusConnected.String()
for _, peerState := range peers {
peerConnectionStatus := false
if peerState.ConnStatus == connectedStatusString {
peersConnected = peersConnected + 1
peerConnectionStatus = true
}
if printDetail {
if skipDetailByFilters(peerState, peerConnectionStatus) {
continue
}
localICE := "-"
remoteICE := "-"
connType := "-"
if peerConnectionStatus {
localICE = peerState.LocalIceCandidateType
remoteICE = peerState.RemoteIceCandidateType
connType = "P2P"
if peerState.Relayed {
connType = "Relayed"
}
}
peerString := fmt.Sprintf(
"\n Peer:\n"+
" NetBird IP: %s\n"+
" Public key: %s\n"+
" Status: %s\n"+
" -- detail --\n"+
" Connection type: %s\n"+
" Direct: %t\n"+
" ICE candidate (Local/Remote): %s/%s\n"+
" Last connection update: %s\n",
peerState.IP,
peerState.PubKey,
peerState.ConnStatus,
connType,
peerState.Direct,
localICE,
remoteICE,
peerState.ConnStatusUpdate.Format("2006-01-02 15:04:05"),
)
peersString = peersString + peerString
}
}
return peersString, peersConnected
}
func skipDetailByFilters(peerState nbStatus.PeerState, isConnected bool) bool {
statusEval := false
ipEval := false
if statusFilter != "" {
lowerStatusFilter := strings.ToLower(statusFilter)
if lowerStatusFilter == "disconnected" && isConnected {
statusEval = true
} else if lowerStatusFilter == "connected" && !isConnected {
statusEval = true
}
}
if len(ipsFilter) > 0 {
_, ok := ipsFilterMap[peerState.IP]
if !ok {
ipEval = true
}
}
return statusEval || ipEval
}

View File

@@ -1,45 +1,15 @@
package cmd
import (
"context"
"net"
"path/filepath"
"testing"
"time"
"github.com/netbirdio/netbird/util"
clientProto "github.com/netbirdio/netbird/client/proto"
client "github.com/netbirdio/netbird/client/server"
mgmtProto "github.com/netbirdio/netbird/management/proto"
mgmt "github.com/netbirdio/netbird/management/server"
sigProto "github.com/netbirdio/netbird/signal/proto"
sig "github.com/netbirdio/netbird/signal/server"
mgmtProto "github.com/wiretrustee/wiretrustee/management/proto"
mgmt "github.com/wiretrustee/wiretrustee/management/server"
sigProto "github.com/wiretrustee/wiretrustee/signal/proto"
sig "github.com/wiretrustee/wiretrustee/signal/server"
"google.golang.org/grpc"
"net"
"testing"
)
func startTestingServices(t *testing.T) string {
config := &mgmt.Config{}
_, err := util.ReadJson("../testdata/management.json", config)
if err != nil {
t.Fatal(err)
}
testDir := t.TempDir()
config.Datadir = testDir
err = util.CopyFileContents("../testdata/store.json", filepath.Join(testDir, "store.json"))
if err != nil {
t.Fatal(err)
}
_, signalLis := startSignal(t)
signalAddr := signalLis.Addr().String()
config.Signal.URI = signalAddr
_, mgmLis := startManagement(t, config)
mgmAddr := mgmLis.Addr().String()
return mgmAddr
}
func startSignal(t *testing.T) (*grpc.Server, net.Listener) {
lis, err := net.Listen("tcp", ":0")
if err != nil {
@@ -56,7 +26,7 @@ func startSignal(t *testing.T) (*grpc.Server, net.Listener) {
return s, lis
}
func startManagement(t *testing.T, config *mgmt.Config) (*grpc.Server, net.Listener) {
func startManagement(config *mgmt.Config, t *testing.T) (*grpc.Server, net.Listener) {
lis, err := net.Listen("tcp", ":0")
if err != nil {
t.Fatal(err)
@@ -68,10 +38,7 @@ func startManagement(t *testing.T, config *mgmt.Config) (*grpc.Server, net.Liste
}
peersUpdateManager := mgmt.NewPeersUpdateManager()
accountManager, err := mgmt.BuildManager(store, peersUpdateManager, nil)
if err != nil {
t.Fatal(err)
}
accountManager := mgmt.NewManager(store, peersUpdateManager)
turnManager := mgmt.NewTimeBasedAuthSecretsManager(peersUpdateManager, config.TURNConfig)
mgmtServer, err := mgmt.NewServer(config, accountManager, peersUpdateManager, turnManager)
if err != nil {
@@ -81,33 +48,9 @@ func startManagement(t *testing.T, config *mgmt.Config) (*grpc.Server, net.Liste
go func() {
if err := s.Serve(lis); err != nil {
t.Error(err)
return
}
}()
return s, lis
}
func startClientDaemon(
t *testing.T, ctx context.Context, managementURL, configPath string,
) (*grpc.Server, net.Listener) {
lis, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
s := grpc.NewServer()
server := client.New(ctx, managementURL, adminURL, configPath, "")
if err := server.Start(); err != nil {
t.Fatal(err)
}
clientProto.RegisterDaemonServiceServer(s, server)
go func() {
if err := s.Serve(lis); err != nil {
t.Error(err)
}
}()
time.Sleep(time.Second)
return s, lis
}

View File

@@ -2,127 +2,234 @@ package cmd
import (
"context"
"fmt"
"github.com/netbirdio/netbird/client/internal"
"github.com/netbirdio/netbird/client/proto"
nbStatus "github.com/netbirdio/netbird/client/status"
"github.com/netbirdio/netbird/util"
"github.com/cenkalti/backoff/v4"
"github.com/kardianos/service"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/wiretrustee/wiretrustee/client/internal"
mgm "github.com/wiretrustee/wiretrustee/management/client"
mgmProto "github.com/wiretrustee/wiretrustee/management/proto"
signal "github.com/wiretrustee/wiretrustee/signal/client"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"google.golang.org/grpc/codes"
gstatus "google.golang.org/grpc/status"
"google.golang.org/grpc/status"
"time"
)
var upCmd = &cobra.Command{
Use: "up",
Short: "install, login and start Netbird client",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
cmd.SetOut(cmd.OutOrStdout())
err := util.InitLog(logLevel, "console")
if err != nil {
return fmt.Errorf("failed initializing log %v", err)
}
ctx := internal.CtxInitState(cmd.Context())
// workaround to run without service
if logFile == "console" {
err = handleRebrand(cmd)
var (
upCmd = &cobra.Command{
Use: "up",
Short: "install, login and start wiretrustee client",
RunE: func(cmd *cobra.Command, args []string) error {
SetFlagsFromEnvVars()
err := loginCmd.RunE(cmd, args)
if err != nil {
return err
}
config, err := internal.GetConfig(managementURL, adminURL, configPath, preSharedKey)
if err != nil {
return fmt.Errorf("get config file: %v", err)
if logFile == "console" {
return runClient()
}
config, _ = internal.UpdateOldManagementPort(ctx, config, configPath)
err = foregroundLogin(ctx, cmd, config, setupKey)
s, err := newSVC(&program{}, newSVCConfig())
if err != nil {
return fmt.Errorf("foreground login failed: %v", err)
cmd.PrintErrln(err)
return err
}
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
SetupCloseHandler(ctx, cancel)
return internal.RunClient(ctx, config, nbStatus.NewRecorder())
}
srvStatus, err := s.Status()
if err != nil {
if err == service.ErrNotInstalled {
log.Infof("%s. Installing it now", err.Error())
e := installCmd.RunE(cmd, args)
if e != nil {
return e
}
} else {
log.Warnf("failed retrieving service status: %v", err)
}
}
if srvStatus == service.StatusRunning {
stopCmd.Run(cmd, args)
}
return startCmd.RunE(cmd, args)
},
}
)
conn, err := DialClientGRPCServer(ctx, daemonAddr)
// createEngineConfig converts configuration received from Management Service to EngineConfig
func createEngineConfig(key wgtypes.Key, config *internal.Config, peerConfig *mgmProto.PeerConfig) (*internal.EngineConfig, error) {
iFaceBlackList := make(map[string]struct{})
for i := 0; i < len(config.IFaceBlackList); i += 2 {
iFaceBlackList[config.IFaceBlackList[i]] = struct{}{}
}
engineConf := &internal.EngineConfig{
WgIface: config.WgIface,
WgAddr: peerConfig.Address,
IFaceBlackList: iFaceBlackList,
WgPrivateKey: key,
}
if config.PreSharedKey != "" {
preSharedKey, err := wgtypes.ParseKey(config.PreSharedKey)
if err != nil {
return fmt.Errorf("failed to connect to daemon error: %v\n"+
"If the daemon is not running please run: "+
"\nnetbird service install \nnetbird service start\n", err)
return nil, err
}
defer func() {
err := conn.Close()
if err != nil {
log.Warnf("failed closing dameon gRPC client connection %v", err)
return
}
engineConf.PreSharedKey = &preSharedKey
}
return engineConf, nil
}
// connectToSignal creates Signal Service client and established a connection
func connectToSignal(ctx context.Context, wtConfig *mgmProto.WiretrusteeConfig, ourPrivateKey wgtypes.Key) (*signal.Client, error) {
var sigTLSEnabled bool
if wtConfig.Signal.Protocol == mgmProto.HostConfig_HTTPS {
sigTLSEnabled = true
} else {
sigTLSEnabled = false
}
signalClient, err := signal.NewClient(ctx, wtConfig.Signal.Uri, ourPrivateKey, sigTLSEnabled)
if err != nil {
log.Errorf("error while connecting to the Signal Exchange Service %s: %s", wtConfig.Signal.Uri, err)
return nil, status.Errorf(codes.FailedPrecondition, "failed connecting to Signal Service : %s", err)
}
return signalClient, nil
}
// connectToManagement creates Management Services client, establishes a connection, logs-in and gets a global Wiretrustee config (signal, turn, stun hosts, etc)
func connectToManagement(ctx context.Context, managementAddr string, ourPrivateKey wgtypes.Key, tlsEnabled bool) (*mgm.Client, *mgmProto.LoginResponse, error) {
log.Debugf("connecting to management server %s", managementAddr)
client, err := mgm.NewClient(ctx, managementAddr, ourPrivateKey, tlsEnabled)
if err != nil {
return nil, nil, status.Errorf(codes.FailedPrecondition, "failed connecting to Management Service : %s", err)
}
log.Debugf("connected to management server %s", managementAddr)
serverPublicKey, err := client.GetServerPublicKey()
if err != nil {
return nil, nil, status.Errorf(codes.FailedPrecondition, "failed while getting Management Service public key: %s", err)
}
loginResp, err := client.Login(*serverPublicKey)
if err != nil {
if s, ok := status.FromError(err); ok && s.Code() == codes.PermissionDenied {
log.Error("peer registration required. Please run wiretrustee login command first")
return nil, nil, err
} else {
return nil, nil, err
}
}
log.Debugf("peer logged in to Management Service %s", managementAddr)
return client, loginResp, nil
}
func runClient() error {
var backOff = &backoff.ExponentialBackOff{
InitialInterval: time.Second,
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: 10 * time.Second,
MaxElapsedTime: 24 * 3 * time.Hour, //stop the client after 3 days trying (must be a huge problem, e.g permission denied)
Stop: backoff.Stop,
Clock: backoff.SystemClock,
}
operation := func() error {
config, err := internal.ReadConfig(managementURL, configPath)
if err != nil {
log.Errorf("failed reading config %s %v", configPath, err)
return err
}
//validate our peer's Wireguard PRIVATE key
myPrivateKey, err := wgtypes.ParseKey(config.PrivateKey)
if err != nil {
log.Errorf("failed parsing Wireguard key %s: [%s]", config.PrivateKey, err.Error())
return err
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mgmTlsEnabled := false
if config.ManagementURL.Scheme == "https" {
mgmTlsEnabled = true
}
// connect (just a connection, no stream yet) and login to Management Service to get an initial global Wiretrustee config
mgmClient, loginResp, err := connectToManagement(ctx, config.ManagementURL.Host, myPrivateKey, mgmTlsEnabled)
if err != nil {
log.Warn(err)
return err
}
// with the global Wiretrustee config in hand connect (just a connection, no stream yet) Signal
signalClient, err := connectToSignal(ctx, loginResp.GetWiretrusteeConfig(), myPrivateKey)
if err != nil {
log.Error(err)
return err
}
peerConfig := loginResp.GetPeerConfig()
engineConfig, err := createEngineConfig(myPrivateKey, config, peerConfig)
if err != nil {
log.Error(err)
return err
}
// create start the Wiretrustee Engine that will connect to the Signal and Management streams and manage connections to remote peers.
engine := internal.NewEngine(signalClient, mgmClient, engineConfig, cancel, ctx)
err = engine.Start()
if err != nil {
log.Errorf("error while starting Wiretrustee Connection Engine: %s", err)
return err
}
log.Print("Wiretrustee engine started, my IP is: ", peerConfig.Address)
select {
case <-stopCh:
case <-ctx.Done():
}
backOff.Reset()
err = mgmClient.Close()
if err != nil {
log.Errorf("failed closing Management Service client %v", err)
return err
}
err = signalClient.Close()
if err != nil {
log.Errorf("failed closing Signal Service client %v", err)
return err
}
err = engine.Stop()
if err != nil {
log.Errorf("failed stopping engine %v", err)
return err
}
go func() {
cleanupCh <- struct{}{}
}()
client := proto.NewDaemonServiceClient(conn)
log.Info("stopped Wiretrustee client")
status, err := client.Status(ctx, &proto.StatusRequest{})
if err != nil {
return fmt.Errorf("unable to get daemon status: %v", err)
}
return ctx.Err()
}
if status.Status == string(internal.StatusConnected) {
cmd.Println("Already connected")
return nil
}
loginRequest := proto.LoginRequest{
SetupKey: setupKey,
PreSharedKey: preSharedKey,
ManagementUrl: managementURL,
}
var loginErr error
var loginResp *proto.LoginResponse
err = WithBackOff(func() error {
var backOffErr error
loginResp, backOffErr = client.Login(ctx, &loginRequest)
if s, ok := gstatus.FromError(backOffErr); ok && (s.Code() == codes.InvalidArgument ||
s.Code() == codes.PermissionDenied ||
s.Code() == codes.NotFound ||
s.Code() == codes.Unimplemented) {
loginErr = backOffErr
return nil
}
return backOffErr
})
if err != nil {
return fmt.Errorf("login backoff cycle failed: %v", err)
}
if loginErr != nil {
return fmt.Errorf("login failed: %v", loginErr)
}
if loginResp.NeedsSSOLogin {
openURL(cmd, loginResp.VerificationURIComplete)
_, err = client.WaitSSOLogin(ctx, &proto.WaitSSOLoginRequest{UserCode: loginResp.UserCode})
if err != nil {
return fmt.Errorf("waiting sso login failed with: %v", err)
}
}
if _, err := client.Up(ctx, &proto.UpRequest{}); err != nil {
return fmt.Errorf("call service up method: %v", err)
}
cmd.Println("Connected")
return nil
},
err := backoff.Retry(operation, backOff)
if err != nil {
log.Errorf("exiting client retry loop due to unrecoverable error: %s", err)
return err
}
return nil
}

View File

@@ -1,75 +0,0 @@
package cmd
import (
"context"
"testing"
"time"
"github.com/netbirdio/netbird/client/internal"
)
var cliAddr string
func TestUpDaemon(t *testing.T) {
mgmAddr := startTestingServices(t)
tempDir := t.TempDir()
confPath := tempDir + "/config.json"
ctx := internal.CtxInitState(context.Background())
state := internal.CtxGetState(ctx)
_, cliLis := startClientDaemon(t, ctx, "http://"+mgmAddr, confPath)
cliAddr = cliLis.Addr().String()
daemonAddr = "tcp://" + cliAddr
rootCmd.SetArgs([]string{
"login",
"--daemon-addr", "tcp://" + cliAddr,
"--setup-key", "A2C8E62B-38F5-4553-B31E-DD66C696CEBB",
"--log-file", "",
})
if err := rootCmd.Execute(); err != nil {
t.Errorf("expected no error while running up command, got %v", err)
return
}
time.Sleep(time.Second * 3)
if status, err := state.Status(); err != nil && status != internal.StatusIdle {
t.Errorf("wrong status after login: %s, %v", internal.StatusIdle, err)
return
}
rootCmd.SetArgs([]string{
"up",
"--daemon-addr", "tcp://" + cliAddr,
"--log-file", "",
})
if err := rootCmd.Execute(); err != nil {
t.Errorf("expected no error while running up command, got %v", err)
return
}
time.Sleep(time.Second * 3)
if status, err := state.Status(); err != nil && status != internal.StatusConnected {
t.Errorf("wrong status after connect: %s, %v", status, err)
return
}
rootCmd.SetArgs([]string{
"status",
"--daemon-addr", "tcp://" + cliAddr,
})
if err := rootCmd.Execute(); err != nil {
t.Errorf("expected no error while running up command, got %v", err)
return
}
time.Sleep(time.Second * 3)
rootCmd.SetErr(nil)
rootCmd.SetArgs([]string{"down", "--daemon-addr", "tcp://" + cliAddr})
if err := rootCmd.Execute(); err != nil {
t.Errorf("expected no error while running up command, got %v", err)
return
}
// we can't check status here, because context already canceled
}

82
client/cmd/up_test.go Normal file
View File

@@ -0,0 +1,82 @@
package cmd
import (
"github.com/wiretrustee/wiretrustee/iface"
mgmt "github.com/wiretrustee/wiretrustee/management/server"
"github.com/wiretrustee/wiretrustee/util"
"net/url"
"path/filepath"
"testing"
"time"
)
var signalAddr string
func TestUp_Start(t *testing.T) {
config := &mgmt.Config{}
_, err := util.ReadJson("../testdata/management.json", config)
if err != nil {
t.Fatal(err)
}
testDir := t.TempDir()
config.Datadir = testDir
err = util.CopyFileContents("../testdata/store.json", filepath.Join(testDir, "store.json"))
if err != nil {
t.Fatal(err)
}
_, signalLis := startSignal(t)
signalAddr = signalLis.Addr().String()
config.Signal.URI = signalAddr
_, mgmLis := startManagement(config, t)
mgmAddr = mgmLis.Addr().String()
}
func TestUp(t *testing.T) {
defer iface.Close()
tempDir := t.TempDir()
confPath := tempDir + "/config.json"
mgmtURL, err := url.Parse("http://" + mgmAddr)
if err != nil {
t.Fatal(err)
}
rootCmd.SetArgs([]string{
"up",
"--config",
confPath,
"--setup-key",
"A2C8E62B-38F5-4553-B31E-DD66C696CEBB",
"--management-url",
mgmtURL.String(),
"--log-file",
"console",
})
go func() {
err = rootCmd.Execute()
if err != nil {
t.Errorf("expected no error while running up command, got %v", err)
}
}()
exists := false
for start := time.Now(); time.Since(start) < 15*time.Second; {
e, err := iface.Exists(iface.WgInterfaceDefault)
if err != nil {
continue
}
if *e {
exists = true
break
}
}
if !exists {
t.Errorf("expected wireguard interface %s to be created", iface.WgInterfaceDefault)
}
}

View File

@@ -1,17 +1,14 @@
package cmd
import (
"github.com/netbirdio/netbird/client/system"
"github.com/spf13/cobra"
)
import "github.com/spf13/cobra"
var (
Version string
versionCmd = &cobra.Command{
Use: "version",
Short: "prints Netbird version",
Short: "prints wiretrustee version",
Run: func(cmd *cobra.Command, args []string) {
cmd.SetOut(cmd.OutOrStdout())
cmd.Println(system.NetbirdVersion())
cmd.Println(Version)
},
}
)

View File

@@ -1,98 +0,0 @@
package main
/*
import (
"flag"
"github.com/netbirdio/netbird/iface"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"net"
"net/http"
_ "net/http/pprof"
"time"
)
var name = flag.String("name", "wg0", "WireGuard interface name")
var addr = flag.String("addr", "100.64.0.1/24", "interface WireGuard IP addr")
var key = flag.String("key", "100.64.0.1/24", "WireGuard private key")
var port = flag.Int("port", 51820, "WireGuard port")
var remoteKey = flag.String("remote-key", "", "remote WireGuard public key")
var remoteAddr = flag.String("remote-addr", "100.64.0.2/32", "remote WireGuard IP addr")
var remoteEndpoint = flag.String("remote-endpoint", "127.0.0.1:51820", "remote WireGuard endpoint")
func fff() {
flag.Parse()
go func() {
log.Println(http.ListenAndServe("localhost:6060", nil))
}()
myKey, err := wgtypes.ParseKey(*key)
if err != nil {
log.Error(err)
return
}
log.Infof("public key and addr [%s] [%s] ", myKey.PublicKey().String(), *addr)
wgIFace, err := iface.NewWGIFace(*name, *addr, 1280)
if err != nil {
log.Error(err)
return
}
defer wgIFace.Close()
// todo wrap into UDPMux
sharedSock, _, err := listenNet("udp4", *port)
if err != nil {
log.Error(err)
return
}
defer sharedSock.Close()
// err = wgIFace.Create()
err = wgIFace.CreateNew(sharedSock)
if err != nil {
log.Errorf("failed to create interface %s %v", *name, err)
return
}
err = wgIFace.Configure(*key, *port)
if err != nil {
log.Errorf("failed to configure interface %s %v", *name, err)
return
}
ip, err := net.ResolveUDPAddr("udp4", *remoteEndpoint)
if err != nil {
// handle error
}
err = wgIFace.UpdatePeer(*remoteKey, *remoteAddr, 20*time.Second, ip, nil)
if err != nil {
log.Errorf("failed to configure remote peer %s %v", *remoteKey, err)
return
}
select {}
}
func listenNet(network string, port int) (*net.UDPConn, int, error) {
conn, err := net.ListenUDP(network, &net.UDPAddr{Port: port})
if err != nil {
return nil, 0, err
}
// Retrieve port.
laddr := conn.LocalAddr()
uaddr, err := net.ResolveUDPAddr(
laddr.Network(),
laddr.String(),
)
if err != nil {
return nil, 0, err
}
return conn, uaddr.Port, nil
}*/

View File

@@ -1,12 +1,12 @@
!define APP_NAME "Netbird"
!define COMP_NAME "Netbird"
!define WEB_SITE "Netbird.io"
!define APP_NAME "Wiretrustee"
!define COMP_NAME "Wiretrustee"
!define WEB_SITE "wiretrustee.com"
!define VERSION $%APPVER%
!define COPYRIGHT "Netbird Authors, 2022"
!define COPYRIGHT "Wiretrustee Authors, 2021"
!define DESCRIPTION "A WireGuard®-based mesh network that connects your devices into a single private network"
!define INSTALLER_NAME "netbird-installer.exe"
!define MAIN_APP_EXE "Netbird"
!define ICON "ui\\netbird.ico"
!define INSTALLER_NAME "wiretrustee-installer.exe"
!define MAIN_APP_EXE "Wiretrustee"
!define ICON "ui\\wiretrustee.ico"
!define BANNER "ui\\banner.bmp"
!define LICENSE_DATA "..\\LICENSE"
@@ -15,13 +15,6 @@
!define REG_ROOT "HKLM"
!define REG_APP_PATH "Software\Microsoft\Windows\CurrentVersion\App Paths\${MAIN_APP_EXE}"
!define UNINSTALL_PATH "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APP_NAME}"
!define UI_APP_NAME "Netbird UI"
!define UI_APP_EXE "Netbird-ui"
!define UI_REG_APP_PATH "Software\Microsoft\Windows\CurrentVersion\App Paths\${UI_APP_EXE}"
!define UI_UNINSTALL_PATH "Software\Microsoft\Windows\CurrentVersion\Uninstall\${UI_APP_NAME}"
Unicode True
######################################################################
@@ -51,13 +44,10 @@ ShowInstDetails Show
!define MUI_UNICON "${ICON}"
!define MUI_WELCOMEFINISHPAGE_BITMAP "${BANNER}"
!define MUI_UNWELCOMEFINISHPAGE_BITMAP "${BANNER}"
!define MUI_FINISHPAGE_RUN
!define MUI_FINISHPAGE_RUN_TEXT "Start ${UI_APP_NAME}"
!define MUI_FINISHPAGE_RUN_FUNCTION "LaunchLink"
######################################################################
!include "MUI2.nsh"
!include LogicLib.nsh
!define MUI_ABORTWARNING
!define MUI_UNABORTWARNING
@@ -82,71 +72,11 @@ ShowInstDetails Show
######################################################################
Function GetAppFromCommand
Exch $1
Push $2
StrCpy $2 $1 1 0
StrCmp $2 '"' 0 done
Push $3
StrCpy $3 ""
loop:
IntOp $3 $3 + 1
StrCpy $2 $1 1 $3
StrCmp $2 '' +2
StrCmp $2 '"' 0 loop
StrCpy $1 $1 $3
StrCpy $1 $1 "" 1 ; Remove starting quote
Pop $3
done:
Pop $2
Exch $1
FunctionEnd
!macro GetAppFromCommand in out
Push "${in}"
Call GetAppFromCommand
Pop ${out}
!macroend
!macro UninstallPreviousNSIS UninstCommand CustomParameters
Push $0
Push $1
Push $2
Push '${CustomParameters}'
Push '${UninstCommand}'
Call GetAppFromCommand ; Remove quotes and parameters from UninstCommand
Pop $0
Pop $1
GetFullPathName $2 "$0\.."
ExecWait '"$0" $1 _?=$2'
Delete "$0" ; Extra cleanup because we used _?=
RMDir "$2"
Pop $2
Pop $1
Pop $0
!macroend
Function .onInit
ReadRegStr $R0 HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\Wiretrustee" "UninstallString"
${If} $R0 != ""
MessageBox MB_YESNO|MB_ICONQUESTION "Wiretrustee is installed. We must remove it before installing Netbird. Procced?" IDNO noWTUninstOld
!insertmacro UninstallPreviousNSIS $R0 "/NoMsgBox"
noWTUninstOld:
${EndIf}
ReadRegStr $R0 HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\$(^NAME)" "UninstallString"
${If} $R0 != ""
MessageBox MB_YESNO|MB_ICONQUESTION "$(^NAME) is already installed. Do you want to remove the previous version?" IDNO noUninstOld
!insertmacro UninstallPreviousNSIS $R0 "/NoMsgBox"
noUninstOld:
${EndIf}
FunctionEnd
######################################################################
Section -MainProgram
${INSTALL_TYPE}
SetOverwrite ifnewer
SetOutPath "$INSTDIR"
File /r "..\\dist\\netbird_windows_amd64\\"
File /r "..\\dist\\wiretrustee_windows_amd64\\"
SectionEnd
@@ -154,27 +84,19 @@ SectionEnd
Section -Icons_Reg
SetOutPath "$INSTDIR"
WriteUninstaller "$INSTDIR\netbird_uninstall.exe"
WriteUninstaller "$INSTDIR\wiretrustee_uninstall.exe"
WriteRegStr ${REG_ROOT} "${REG_APP_PATH}" "" "$INSTDIR\${MAIN_APP_EXE}"
WriteRegStr ${REG_ROOT} "${UNINSTALL_PATH}" "DisplayName" "${APP_NAME}"
WriteRegStr ${REG_ROOT} "${UNINSTALL_PATH}" "UninstallString" "$INSTDIR\netbird_uninstall.exe"
WriteRegStr ${REG_ROOT} "${UNINSTALL_PATH}" "UninstallString" "$INSTDIR\wiretrustee_uninstall.exe"
WriteRegStr ${REG_ROOT} "${UNINSTALL_PATH}" "DisplayIcon" "$INSTDIR\${MAIN_APP_EXE}"
WriteRegStr ${REG_ROOT} "${UNINSTALL_PATH}" "DisplayVersion" "${VERSION}"
WriteRegStr ${REG_ROOT} "${UNINSTALL_PATH}" "Publisher" "${COMP_NAME}"
WriteRegStr ${REG_ROOT} "${UI_REG_APP_PATH}" "" "$INSTDIR\${UI_APP_EXE}"
EnVar::SetHKLM
EnVar::AddValueEx "path" "$INSTDIR"
SetShellVarContext current
CreateShortCut "$SMPROGRAMS\${APP_NAME}.lnk" "$INSTDIR\${UI_APP_EXE}"
CreateShortCut "$DESKTOP\${APP_NAME}.lnk" "$INSTDIR\${UI_APP_EXE}"
SetShellVarContext all
ExecWait '"$INSTDIR\${MAIN_APP_EXE}" service install'
Exec '"$INSTDIR\${MAIN_APP_EXE}" service start'
Exec '"$INSTDIR\${MAIN_APP_EXE}" service install'
# sleep a bit for visibility
Sleep 1000
SectionEnd
@@ -184,29 +106,14 @@ SectionEnd
Section Uninstall
${INSTALL_TYPE}
ExecWait '"$INSTDIR\${MAIN_APP_EXE}" service stop'
Exec '"$INSTDIR\${MAIN_APP_EXE}" service stop'
Exec '"$INSTDIR\${MAIN_APP_EXE}" service uninstall'
# kill ui client
ExecWait `taskkill /im ${UI_APP_EXE}.exe`
# wait the service uninstall take unblock the executable
Sleep 3000
RmDir /r "$INSTDIR"
SetShellVarContext current
Delete "$DESKTOP\${APP_NAME}.lnk"
Delete "$SMPROGRAMS\${APP_NAME}.lnk"
SetShellVarContext all
DeleteRegKey ${REG_ROOT} "${REG_APP_PATH}"
DeleteRegKey ${REG_ROOT} "${UNINSTALL_PATH}"
EnVar::SetHKLM
EnVar::DeleteValue "path" "$INSTDIR"
SectionEnd
Function LaunchLink
SetShellVarContext current
SetOutPath $INSTDIR
ShellExecAsUser::ShellExecAsUser "" "$DESKTOP\${APP_NAME}.lnk"
SetShellVarContext all
FunctionEnd
SectionEnd

32
client/internal/cond.go Normal file
View File

@@ -0,0 +1,32 @@
package internal
import "sync"
// A Cond is a condition variable like sync.Cond, but using a channel so we can use select.
type Cond struct {
once sync.Once
C chan struct{}
}
// NewCond creates a new condition variable.
func NewCond() *Cond {
return &Cond{C: make(chan struct{})}
}
// Do runs f if the condition hasn't been signaled yet. Afterwards it will be signaled.
func (c *Cond) Do(f func()) {
c.once.Do(func() {
f()
close(c.C)
})
}
// Signal closes the condition variable channel.
func (c *Cond) Signal() {
c.Do(func() {})
}
// Wait waits for the condition variable channel to close.
func (c *Cond) Wait() {
<-c.C
}

View File

@@ -1,16 +1,11 @@
package internal
import (
"context"
"fmt"
"github.com/netbirdio/netbird/client/ssh"
"github.com/netbirdio/netbird/iface"
mgm "github.com/netbirdio/netbird/management/client"
"github.com/netbirdio/netbird/util"
log "github.com/sirupsen/logrus"
"github.com/wiretrustee/wiretrustee/iface"
"github.com/wiretrustee/wiretrustee/util"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"net/url"
"os"
)
@@ -22,7 +17,7 @@ func ManagementURLDefault() *url.URL {
}
func init() {
managementURL, err := ParseURL("Management URL", "https://api.wiretrustee.com:443")
managementURL, err := parseManagementURL("https://api.wiretrustee.com:33073")
if err != nil {
panic(err)
}
@@ -35,30 +30,16 @@ type Config struct {
PrivateKey string
PreSharedKey string
ManagementURL *url.URL
AdminURL *url.URL
WgIface string
WgPort int
IFaceBlackList []string
// SSHKey is a private SSH key in a PEM format
SSHKey string
}
// createNewConfig creates a new config generating a new Wireguard key and saving to file
func createNewConfig(managementURL, adminURL, configPath, preSharedKey string) (*Config, error) {
//createNewConfig creates a new config generating a new Wireguard key and saving to file
func createNewConfig(managementURL string, configPath string, preSharedKey string) (*Config, error) {
wgKey := generateKey()
pem, err := ssh.GeneratePrivateKey(ssh.ED25519)
if err != nil {
return nil, err
}
config := &Config{
SSHKey: string(pem),
PrivateKey: wgKey,
WgIface: iface.WgInterfaceDefault,
WgPort: iface.DefaultWgPort,
IFaceBlackList: []string{},
}
config := &Config{PrivateKey: wgKey, WgIface: iface.WgInterfaceDefault, IFaceBlackList: []string{}}
if managementURL != "" {
URL, err := ParseURL("Management URL", managementURL)
URL, err := parseManagementURL(managementURL)
if err != nil {
return nil, err
}
@@ -71,18 +52,9 @@ func createNewConfig(managementURL, adminURL, configPath, preSharedKey string) (
config.PreSharedKey = preSharedKey
}
if adminURL != "" {
newURL, err := ParseURL("Admin Panel URL", adminURL)
if err != nil {
return nil, err
}
config.AdminURL = newURL
}
config.IFaceBlackList = []string{iface.WgInterfaceDefault, "tun0"}
config.IFaceBlackList = []string{iface.WgInterfaceDefault, "wt", "utun", "tun0", "zt", "ZeroTier", "utun", "wg", "ts",
"Tailscale", "tailscale", "docker", "vet"}
err = util.WriteJson(configPath, config)
err := util.WriteJson(configPath, config)
if err != nil {
return nil, err
}
@@ -90,189 +62,57 @@ func createNewConfig(managementURL, adminURL, configPath, preSharedKey string) (
return config, nil
}
// ParseURL parses and validates management URL
func ParseURL(serviceName, managementURL string) (*url.URL, error) {
func parseManagementURL(managementURL string) (*url.URL, error) {
parsedMgmtURL, err := url.ParseRequestURI(managementURL)
if err != nil {
log.Errorf("failed parsing management URL %s: [%s]", managementURL, err.Error())
return nil, err
}
if parsedMgmtURL.Scheme != "https" && parsedMgmtURL.Scheme != "http" {
return nil, fmt.Errorf(
"invalid %s URL provided %s. Supported format [http|https]://[host]:[port]",
serviceName, managementURL)
if !(parsedMgmtURL.Scheme == "https" || parsedMgmtURL.Scheme == "http") {
return nil, fmt.Errorf("invalid Management Service URL provided %s. Supported format [http|https]://[host]:[port]", managementURL)
}
return parsedMgmtURL, err
}
// ReadConfig reads existing config. In case provided managementURL is not empty overrides the read property
func ReadConfig(managementURL, adminURL, configPath string, preSharedKey *string) (*Config, error) {
func ReadConfig(managementURL string, configPath string) (*Config, error) {
config := &Config{}
if _, err := os.Stat(configPath); os.IsNotExist(err) {
return nil, status.Errorf(codes.NotFound, "config file doesn't exist")
}
if _, err := util.ReadJson(configPath, config); err != nil {
_, err := util.ReadJson(configPath, config)
if err != nil {
return nil, err
}
refresh := false
if managementURL != "" && config.ManagementURL.String() != managementURL {
log.Infof("new Management URL provided, updated to %s (old value %s)",
managementURL, config.ManagementURL)
newURL, err := ParseURL("Management URL", managementURL)
if managementURL != "" {
URL, err := parseManagementURL(managementURL)
if err != nil {
return nil, err
}
config.ManagementURL = newURL
refresh = true
config.ManagementURL = URL
}
if adminURL != "" && (config.AdminURL == nil || config.AdminURL.String() != adminURL) {
log.Infof("new Admin Panel URL provided, updated to %s (old value %s)",
adminURL, config.AdminURL)
newURL, err := ParseURL("Admin Panel URL", adminURL)
if err != nil {
return nil, err
}
config.AdminURL = newURL
refresh = true
}
if preSharedKey != nil && config.PreSharedKey != *preSharedKey {
log.Infof("new pre-shared key provided, updated to %s (old value %s)",
*preSharedKey, config.PreSharedKey)
config.PreSharedKey = *preSharedKey
refresh = true
}
if config.SSHKey == "" {
pem, err := ssh.GeneratePrivateKey(ssh.ED25519)
if err != nil {
return nil, err
}
config.SSHKey = string(pem)
refresh = true
}
if config.WgPort == 0 {
config.WgPort = iface.DefaultWgPort
refresh = true
}
if refresh {
// since we have new management URL, we need to update config file
if err := util.WriteJson(configPath, config); err != nil {
return nil, err
}
}
return config, nil
return config, err
}
// GetConfig reads existing config or generates a new one
func GetConfig(managementURL, adminURL, configPath, preSharedKey string) (*Config, error) {
func GetConfig(managementURL string, configPath string, preSharedKey string) (*Config, error) {
if _, err := os.Stat(configPath); os.IsNotExist(err) {
log.Infof("generating new config %s", configPath)
return createNewConfig(managementURL, adminURL, configPath, preSharedKey)
return createNewConfig(managementURL, configPath, preSharedKey)
} else {
// don't overwrite pre-shared key if we receive asterisks from UI
pk := &preSharedKey
if preSharedKey == "**********" {
pk = nil
}
return ReadConfig(managementURL, adminURL, configPath, pk)
return ReadConfig(managementURL, configPath)
}
}
// generateKey generates a new Wireguard private key
func generateKey() string {
key, err := wgtypes.GeneratePrivateKey()
key, err := wgtypes.GenerateKey()
if err != nil {
panic(err)
}
return key.String()
}
// DeviceAuthorizationFlow represents Device Authorization Flow information
type DeviceAuthorizationFlow struct {
Provider string
ProviderConfig ProviderConfig
}
// ProviderConfig has all attributes needed to initiate a device authorization flow
type ProviderConfig struct {
// ClientID An IDP application client id
ClientID string
// ClientSecret An IDP application client secret
ClientSecret string
// Domain An IDP API domain
// Deprecated. Use OIDCConfigEndpoint instead
Domain string
// Audience An Audience for to authorization validation
Audience string
// TokenEndpoint is the endpoint of an IDP manager where clients can obtain access token
TokenEndpoint string
// DeviceAuthEndpoint is the endpoint of an IDP manager where clients can obtain device authorization code
DeviceAuthEndpoint string
}
func GetDeviceAuthorizationFlowInfo(ctx context.Context, config *Config) (DeviceAuthorizationFlow, error) {
// validate our peer's Wireguard PRIVATE key
myPrivateKey, err := wgtypes.ParseKey(config.PrivateKey)
if err != nil {
log.Errorf("failed parsing Wireguard key %s: [%s]", config.PrivateKey, err.Error())
return DeviceAuthorizationFlow{}, err
}
var mgmTlsEnabled bool
if config.ManagementURL.Scheme == "https" {
mgmTlsEnabled = true
}
log.Debugf("connecting to Management Service %s", config.ManagementURL.String())
mgmClient, err := mgm.NewClient(ctx, config.ManagementURL.Host, myPrivateKey, mgmTlsEnabled)
if err != nil {
log.Errorf("failed connecting to Management Service %s %v", config.ManagementURL.String(), err)
return DeviceAuthorizationFlow{}, err
}
log.Debugf("connected to the Management service %s", config.ManagementURL.String())
defer func() {
err = mgmClient.Close()
if err != nil {
log.Warnf("failed to close the Management service client %v", err)
}
}()
serverKey, err := mgmClient.GetServerPublicKey()
if err != nil {
log.Errorf("failed while getting Management Service public key: %v", err)
return DeviceAuthorizationFlow{}, err
}
protoDeviceAuthorizationFlow, err := mgmClient.GetDeviceAuthorizationFlow(*serverKey)
if err != nil {
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
log.Warnf("server couldn't find device flow, contact admin: %v", err)
return DeviceAuthorizationFlow{}, err
} else {
log.Errorf("failed to retrieve device flow: %v", err)
return DeviceAuthorizationFlow{}, err
}
}
return DeviceAuthorizationFlow{
Provider: protoDeviceAuthorizationFlow.Provider.String(),
ProviderConfig: ProviderConfig{
Audience: protoDeviceAuthorizationFlow.GetProviderConfig().GetAudience(),
ClientID: protoDeviceAuthorizationFlow.GetProviderConfig().GetClientID(),
ClientSecret: protoDeviceAuthorizationFlow.GetProviderConfig().GetClientSecret(),
Domain: protoDeviceAuthorizationFlow.GetProviderConfig().Domain,
TokenEndpoint: protoDeviceAuthorizationFlow.GetProviderConfig().GetTokenEndpoint(),
DeviceAuthEndpoint: protoDeviceAuthorizationFlow.GetProviderConfig().GetDeviceAuthEndpoint(),
},
}, nil
}

View File

@@ -1,60 +0,0 @@
package internal
import (
"errors"
"os"
"path/filepath"
"testing"
"github.com/netbirdio/netbird/util"
"github.com/stretchr/testify/assert"
)
func TestReadConfig(t *testing.T) {
}
func TestGetConfig(t *testing.T) {
managementURL := "https://test.management.url:33071"
adminURL := "https://app.admin.url"
path := filepath.Join(t.TempDir(), "config.json")
preSharedKey := "preSharedKey"
// case 1: new config has to be generated
config, err := GetConfig(managementURL, adminURL, path, preSharedKey)
if err != nil {
return
}
assert.Equal(t, config.ManagementURL.String(), managementURL)
assert.Equal(t, config.PreSharedKey, preSharedKey)
if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
t.Errorf("config file was expected to be created under path %s", path)
}
// case 2: existing config -> fetch it
config, err = GetConfig(managementURL, adminURL, path, preSharedKey)
if err != nil {
return
}
assert.Equal(t, config.ManagementURL.String(), managementURL)
assert.Equal(t, config.PreSharedKey, preSharedKey)
// case 3: existing config, but new managementURL has been provided -> update config
newManagementURL := "https://test.newManagement.url:33071"
config, err = GetConfig(newManagementURL, adminURL, path, preSharedKey)
if err != nil {
return
}
assert.Equal(t, config.ManagementURL.String(), newManagementURL)
assert.Equal(t, config.PreSharedKey, preSharedKey)
// read once more to make sure that config file has been updated with the new management URL
readConf, err := util.ReadJson(path, config)
if err != nil {
return
}
assert.Equal(t, readConf.(*Config).ManagementURL.String(), newManagementURL)
}

View File

@@ -1,313 +0,0 @@
package internal
import (
"context"
"fmt"
"github.com/netbirdio/netbird/client/ssh"
nbStatus "github.com/netbirdio/netbird/client/status"
"strings"
"time"
"github.com/netbirdio/netbird/client/system"
"github.com/netbirdio/netbird/iface"
mgm "github.com/netbirdio/netbird/management/client"
mgmProto "github.com/netbirdio/netbird/management/proto"
signal "github.com/netbirdio/netbird/signal/client"
log "github.com/sirupsen/logrus"
"github.com/cenkalti/backoff/v4"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"google.golang.org/grpc/codes"
gstatus "google.golang.org/grpc/status"
)
// RunClient with main logic.
func RunClient(ctx context.Context, config *Config, statusRecorder *nbStatus.Status) error {
backOff := &backoff.ExponentialBackOff{
InitialInterval: time.Second,
RandomizationFactor: 1,
Multiplier: 1.7,
MaxInterval: 15 * time.Second,
MaxElapsedTime: 3 * 30 * 24 * time.Hour, // 3 months
Stop: backoff.Stop,
Clock: backoff.SystemClock,
}
state := CtxGetState(ctx)
defer func() {
s, err := state.Status()
if err != nil || s != StatusNeedsLogin {
state.Set(StatusIdle)
}
}()
wrapErr := state.Wrap
myPrivateKey, err := wgtypes.ParseKey(config.PrivateKey)
if err != nil {
log.Errorf("failed parsing Wireguard key %s: [%s]", config.PrivateKey, err.Error())
return wrapErr(err)
}
var mgmTlsEnabled bool
if config.ManagementURL.Scheme == "https" {
mgmTlsEnabled = true
}
publicSSHKey, err := ssh.GeneratePublicKey([]byte(config.SSHKey))
if err != nil {
return err
}
managementURL := config.ManagementURL.String()
statusRecorder.MarkManagementDisconnected(managementURL)
operation := func() error {
// if context cancelled we not start new backoff cycle
select {
case <-ctx.Done():
return nil
default:
}
state.Set(StatusConnecting)
engineCtx, cancel := context.WithCancel(ctx)
defer func() {
statusRecorder.MarkManagementDisconnected(managementURL)
statusRecorder.CleanLocalPeerState()
cancel()
}()
log.Debugf("conecting to the Management service %s", config.ManagementURL.Host)
mgmClient, err := mgm.NewClient(engineCtx, config.ManagementURL.Host, myPrivateKey, mgmTlsEnabled)
if err != nil {
return wrapErr(gstatus.Errorf(codes.FailedPrecondition, "failed connecting to Management Service : %s", err))
}
log.Debugf("connected to the Management service %s", config.ManagementURL.Host)
defer func() {
err = mgmClient.Close()
if err != nil {
log.Warnf("failed to close the Management service client %v", err)
}
}()
// connect (just a connection, no stream yet) and login to Management Service to get an initial global Wiretrustee config
loginResp, err := loginToManagement(engineCtx, mgmClient, publicSSHKey)
if err != nil {
log.Debug(err)
if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.PermissionDenied) {
state.Set(StatusNeedsLogin)
return backoff.Permanent(wrapErr(err)) // unrecoverable error
}
return wrapErr(err)
}
statusRecorder.MarkManagementConnected(managementURL)
localPeerState := nbStatus.LocalPeerState{
IP: loginResp.GetPeerConfig().GetAddress(),
PubKey: myPrivateKey.PublicKey().String(),
KernelInterface: iface.WireguardModExists(),
}
statusRecorder.UpdateLocalPeerState(localPeerState)
signalURL := fmt.Sprintf("%s://%s",
strings.ToLower(loginResp.GetWiretrusteeConfig().GetSignal().GetProtocol().String()),
loginResp.GetWiretrusteeConfig().GetSignal().GetUri(),
)
statusRecorder.MarkSignalDisconnected(signalURL)
defer statusRecorder.MarkSignalDisconnected(signalURL)
// with the global Wiretrustee config in hand connect (just a connection, no stream yet) Signal
signalClient, err := connectToSignal(engineCtx, loginResp.GetWiretrusteeConfig(), myPrivateKey)
if err != nil {
log.Error(err)
return wrapErr(err)
}
defer func() {
err = signalClient.Close()
if err != nil {
log.Warnf("failed closing Signal service client %v", err)
}
}()
statusRecorder.MarkSignalConnected(signalURL)
peerConfig := loginResp.GetPeerConfig()
engineConfig, err := createEngineConfig(myPrivateKey, config, peerConfig)
if err != nil {
log.Error(err)
return wrapErr(err)
}
engine := NewEngine(engineCtx, cancel, signalClient, mgmClient, engineConfig, statusRecorder)
err = engine.Start()
if err != nil {
log.Errorf("error while starting Netbird Connection Engine: %s", err)
return wrapErr(err)
}
log.Print("Netbird engine started, my IP is: ", peerConfig.Address)
state.Set(StatusConnected)
<-engineCtx.Done()
backOff.Reset()
err = engine.Stop()
if err != nil {
log.Errorf("failed stopping engine %v", err)
return wrapErr(err)
}
log.Info("stopped NetBird client")
if _, err := state.Status(); err == ErrResetConnection {
return err
}
return nil
}
err = backoff.Retry(operation, backOff)
if err != nil {
log.Debugf("exiting client retry loop due to unrecoverable error: %s", err)
return err
}
return nil
}
// createEngineConfig converts configuration received from Management Service to EngineConfig
func createEngineConfig(key wgtypes.Key, config *Config, peerConfig *mgmProto.PeerConfig) (*EngineConfig, error) {
engineConf := &EngineConfig{
WgIfaceName: config.WgIface,
WgAddr: peerConfig.Address,
IFaceBlackList: config.IFaceBlackList,
WgPrivateKey: key,
WgPort: config.WgPort,
SSHKey: []byte(config.SSHKey),
}
if config.PreSharedKey != "" {
preSharedKey, err := wgtypes.ParseKey(config.PreSharedKey)
if err != nil {
return nil, err
}
engineConf.PreSharedKey = &preSharedKey
}
return engineConf, nil
}
// connectToSignal creates Signal Service client and established a connection
func connectToSignal(ctx context.Context, wtConfig *mgmProto.WiretrusteeConfig, ourPrivateKey wgtypes.Key) (*signal.GrpcClient, error) {
var sigTLSEnabled bool
if wtConfig.Signal.Protocol == mgmProto.HostConfig_HTTPS {
sigTLSEnabled = true
} else {
sigTLSEnabled = false
}
signalClient, err := signal.NewClient(ctx, wtConfig.Signal.Uri, ourPrivateKey, sigTLSEnabled)
if err != nil {
log.Errorf("error while connecting to the Signal Exchange Service %s: %s", wtConfig.Signal.Uri, err)
return nil, gstatus.Errorf(codes.FailedPrecondition, "failed connecting to Signal Service : %s", err)
}
return signalClient, nil
}
// loginToManagement creates Management Services client, establishes a connection, logs-in and gets a global Wiretrustee config (signal, turn, stun hosts, etc)
func loginToManagement(ctx context.Context, client mgm.Client, pubSSHKey []byte) (*mgmProto.LoginResponse, error) {
serverPublicKey, err := client.GetServerPublicKey()
if err != nil {
return nil, gstatus.Errorf(codes.FailedPrecondition, "failed while getting Management Service public key: %s", err)
}
sysInfo := system.GetInfo(ctx)
loginResp, err := client.Login(*serverPublicKey, sysInfo, pubSSHKey)
if err != nil {
return nil, err
}
return loginResp, nil
}
// ManagementLegacyPort is the port that was used before by the Management gRPC server.
// It is used for backward compatibility now.
// NB: hardcoded from github.com/netbirdio/netbird/management/cmd to avoid import
const ManagementLegacyPort = 33073
// UpdateOldManagementPort checks whether client can switch to the new Management port 443.
// If it can switch, then it updates the config and returns a new one. Otherwise, it returns the provided config.
// The check is performed only for the NetBird's managed version.
func UpdateOldManagementPort(ctx context.Context, config *Config, configPath string) (*Config, error) {
if config.ManagementURL.Hostname() != ManagementURLDefault().Hostname() {
// only do the check for the NetBird's managed version
return config, nil
}
var mgmTlsEnabled bool
if config.ManagementURL.Scheme == "https" {
mgmTlsEnabled = true
}
if !mgmTlsEnabled {
// only do the check for HTTPs scheme (the hosted version of the Management service is always HTTPs)
return config, nil
}
if mgmTlsEnabled && config.ManagementURL.Port() == fmt.Sprintf("%d", ManagementLegacyPort) {
newURL, err := ParseURL("Management URL", fmt.Sprintf("%s://%s:%d",
config.ManagementURL.Scheme, config.ManagementURL.Hostname(), 443))
if err != nil {
return nil, err
}
// here we check whether we could switch from the legacy 33073 port to the new 443
log.Infof("attempting to switch from the legacy Management URL %s to the new one %s",
config.ManagementURL.String(), newURL.String())
key, err := wgtypes.ParseKey(config.PrivateKey)
if err != nil {
log.Infof("couldn't switch to the new Management %s", newURL.String())
return config, err
}
client, err := mgm.NewClient(ctx, newURL.Host, key, mgmTlsEnabled)
if err != nil {
log.Infof("couldn't switch to the new Management %s", newURL.String())
return config, err
}
defer func() {
err = client.Close()
if err != nil {
log.Warnf("failed to close the Management service client %v", err)
}
}()
// gRPC check
_, err = client.GetServerPublicKey()
if err != nil {
log.Infof("couldn't switch to the new Management %s", newURL.String())
return nil, err
}
// everything is alright => update the config
newConfig, err := ReadConfig(newURL.String(), "", configPath, nil)
if err != nil {
log.Infof("couldn't switch to the new Management %s", newURL.String())
return config, fmt.Errorf("failed updating config file: %v", err)
}
log.Infof("successfully switched to the new Management URL: %s", newURL.String())
return newConfig, nil
}
return config, nil
}

View File

@@ -0,0 +1,425 @@
package internal
import (
"context"
"fmt"
ice "github.com/pion/ice/v2"
log "github.com/sirupsen/logrus"
"github.com/wiretrustee/wiretrustee/iface"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"net"
"sync"
"time"
)
var (
// DefaultWgKeepAlive default Wireguard keep alive constant
DefaultWgKeepAlive = 20 * time.Second
privateIPBlocks []*net.IPNet
)
type Status string
const (
StatusConnected Status = "Connected"
StatusConnecting Status = "Connecting"
StatusDisconnected Status = "Disconnected"
)
func init() {
for _, cidr := range []string{
"127.0.0.0/8", // IPv4 loopback
"10.0.0.0/8", // RFC1918
"172.16.0.0/12", // RFC1918
"192.168.0.0/16", // RFC1918
"169.254.0.0/16", // RFC3927 link-local
"::1/128", // IPv6 loopback
"fe80::/10", // IPv6 link-local
"fc00::/7", // IPv6 unique local addr
} {
_, block, err := net.ParseCIDR(cidr)
if err != nil {
panic(fmt.Errorf("parse error on %q: %v", cidr, err))
}
privateIPBlocks = append(privateIPBlocks, block)
}
}
// ConnConfig Connection configuration struct
type ConnConfig struct {
// Local Wireguard listening address e.g. 127.0.0.1:51820
WgListenAddr string
// A Local Wireguard Peer IP address in CIDR notation e.g. 10.30.30.1/24
WgPeerIP string
// Local Wireguard Interface name (e.g. wg0)
WgIface string
// Wireguard allowed IPs (e.g. 10.30.30.2/32)
WgAllowedIPs string
// Local Wireguard private key
WgKey wgtypes.Key
// Remote Wireguard public key
RemoteWgKey wgtypes.Key
PreSharedKey *wgtypes.Key
StunTurnURLS []*ice.URL
iFaceBlackList map[string]struct{}
}
// IceCredentials ICE protocol credentials struct
type IceCredentials struct {
uFrag string
pwd string
}
// Connection Holds information about a connection and handles signal protocol
type Connection struct {
Config ConnConfig
// signalCandidate is a handler function to signal remote peer about local connection candidate
signalCandidate func(candidate ice.Candidate) error
// signalOffer is a handler function to signal remote peer our connection offer (credentials)
signalOffer func(uFrag string, pwd string) error
// signalOffer is a handler function to signal remote peer our connection answer (credentials)
signalAnswer func(uFrag string, pwd string) error
// remoteAuthChannel is a channel used to wait for remote credentials to proceed with the connection
remoteAuthChannel chan IceCredentials
// agent is an actual ice.Agent that is used to negotiate and maintain a connection to a remote peer
agent *ice.Agent
wgProxy *WgProxy
connected *Cond
closeCond *Cond
remoteAuthCond sync.Once
Status Status
}
// NewConnection Creates a new connection and sets handling functions for signal protocol
func NewConnection(config ConnConfig,
signalCandidate func(candidate ice.Candidate) error,
signalOffer func(uFrag string, pwd string) error,
signalAnswer func(uFrag string, pwd string) error,
) *Connection {
return &Connection{
Config: config,
signalCandidate: signalCandidate,
signalOffer: signalOffer,
signalAnswer: signalAnswer,
remoteAuthChannel: make(chan IceCredentials, 1),
closeCond: NewCond(),
connected: NewCond(),
agent: nil,
wgProxy: NewWgProxy(config.WgIface, config.RemoteWgKey.String(), config.WgAllowedIPs, config.WgListenAddr, config.PreSharedKey),
Status: StatusDisconnected,
}
}
// Open opens connection to a remote peer.
// Will block until the connection has successfully established
func (conn *Connection) Open(timeout time.Duration) error {
// create an ice.Agent that will be responsible for negotiating and establishing actual peer-to-peer connection
a, err := ice.NewAgent(&ice.AgentConfig{
// MulticastDNSMode: ice.MulticastDNSModeQueryAndGather,
NetworkTypes: []ice.NetworkType{ice.NetworkTypeUDP4},
Urls: conn.Config.StunTurnURLS,
CandidateTypes: []ice.CandidateType{ice.CandidateTypeHost, ice.CandidateTypeServerReflexive, ice.CandidateTypeRelay},
InterfaceFilter: func(s string) bool {
if conn.Config.iFaceBlackList == nil {
return true
}
_, ok := conn.Config.iFaceBlackList[s]
return !ok
},
})
if err != nil {
return err
}
conn.agent = a
defer func() {
err := conn.agent.Close()
if err != nil {
return
}
}()
err = conn.listenOnLocalCandidates()
if err != nil {
return err
}
err = conn.listenOnConnectionStateChanges()
if err != nil {
return err
}
err = conn.signalCredentials()
if err != nil {
return err
}
conn.Status = StatusConnecting
log.Debugf("trying to connect to peer %s", conn.Config.RemoteWgKey.String())
// wait until credentials have been sent from the remote peer (will arrive via a signal server)
select {
case remoteAuth := <-conn.remoteAuthChannel:
log.Debugf("got a connection confirmation from peer %s", conn.Config.RemoteWgKey.String())
err = conn.agent.GatherCandidates()
if err != nil {
return err
}
isControlling := conn.Config.WgKey.PublicKey().String() > conn.Config.RemoteWgKey.String()
var remoteConn *ice.Conn
remoteConn, err = conn.openConnectionToRemote(isControlling, remoteAuth)
if err != nil {
log.Errorf("failed establishing connection with the remote peer %s %s", conn.Config.RemoteWgKey.String(), err)
return err
}
var pair *ice.CandidatePair
pair, err = conn.agent.GetSelectedCandidatePair()
if err != nil {
return err
}
useProxy := useProxy(pair)
// in case the remote peer is in the local network or one of the peers has public static IP -> no need for a Wireguard proxy, direct communication is possible.
if !useProxy {
log.Debugf("it is possible to establish a direct connection (without proxy) to peer %s - my addr: %s, remote addr: %s", conn.Config.RemoteWgKey.String(), pair.Local, pair.Remote)
err = conn.wgProxy.StartLocal(fmt.Sprintf("%s:%d", pair.Remote.Address(), iface.WgPort))
if err != nil {
return err
}
} else {
log.Debugf("establishing secure tunnel to peer %s via selected candidate pair %s", conn.Config.RemoteWgKey.String(), pair)
err = conn.wgProxy.Start(remoteConn)
if err != nil {
return err
}
}
relayed := pair.Remote.Type() == ice.CandidateTypeRelay || pair.Local.Type() == ice.CandidateTypeRelay
conn.Status = StatusConnected
log.Infof("opened connection to peer %s [localProxy=%v, relayed=%v]", conn.Config.RemoteWgKey.String(), useProxy, relayed)
case <-conn.closeCond.C:
conn.Status = StatusDisconnected
return fmt.Errorf("connection to peer %s has been closed", conn.Config.RemoteWgKey.String())
case <-time.After(timeout):
err = conn.Close()
if err != nil {
log.Warnf("error while closing connection to peer %s -> %s", conn.Config.RemoteWgKey.String(), err.Error())
}
conn.Status = StatusDisconnected
return fmt.Errorf("timeout of %vs exceeded while waiting for the remote peer %s", timeout.Seconds(), conn.Config.RemoteWgKey.String())
}
// wait until connection has been closed
<-conn.closeCond.C
conn.Status = StatusDisconnected
return fmt.Errorf("connection to peer %s has been closed", conn.Config.RemoteWgKey.String())
}
func isPublicIP(ip net.IP) bool {
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() {
return false
}
for _, block := range privateIPBlocks {
if block.Contains(ip) {
return false
}
}
return true
}
//useProxy determines whether a direct connection (without a go proxy) is possible
//There are 3 cases: one of the peers has a public IP or both peers are in the same private network
//Please note, that this check happens when peers were already able to ping each other with ICE layer.
func useProxy(pair *ice.CandidatePair) bool {
remoteIP := net.ParseIP(pair.Remote.Address())
myIp := net.ParseIP(pair.Local.Address())
remoteIsPublic := isPublicIP(remoteIP)
myIsPublic := isPublicIP(myIp)
//one of the hosts has a public IP
if remoteIsPublic && pair.Remote.Type() == ice.CandidateTypeHost {
return false
}
if myIsPublic && pair.Local.Type() == ice.CandidateTypeHost {
return false
}
if pair.Local.Type() == ice.CandidateTypeHost && pair.Remote.Type() == ice.CandidateTypeHost {
if !remoteIsPublic && !myIsPublic {
//both hosts are in the same private network
return false
}
}
return true
}
// Close Closes a peer connection
func (conn *Connection) Close() error {
var err error
conn.closeCond.Do(func() {
log.Debugf("closing connection to peer %s", conn.Config.RemoteWgKey.String())
if a := conn.agent; a != nil {
e := a.Close()
if e != nil {
log.Warnf("error while closing ICE agent of peer connection %s", conn.Config.RemoteWgKey.String())
err = e
}
}
if c := conn.wgProxy; c != nil {
e := c.Close()
if e != nil {
log.Warnf("error while closingWireguard proxy connection of peer connection %s", conn.Config.RemoteWgKey.String())
err = e
}
}
})
return err
}
// OnAnswer Handles the answer from the other peer
func (conn *Connection) OnAnswer(remoteAuth IceCredentials) error {
conn.remoteAuthCond.Do(func() {
log.Debugf("OnAnswer from peer %s", conn.Config.RemoteWgKey.String())
conn.remoteAuthChannel <- remoteAuth
})
return nil
}
// OnOffer Handles the offer from the other peer
func (conn *Connection) OnOffer(remoteAuth IceCredentials) error {
conn.remoteAuthCond.Do(func() {
log.Debugf("OnOffer from peer %s", conn.Config.RemoteWgKey.String())
conn.remoteAuthChannel <- remoteAuth
uFrag, pwd, err := conn.agent.GetLocalUserCredentials()
if err != nil { //nolint
}
err = conn.signalAnswer(uFrag, pwd)
if err != nil { //nolint
}
})
return nil
}
// OnRemoteCandidate Handles remote candidate provided by the peer.
func (conn *Connection) OnRemoteCandidate(candidate ice.Candidate) error {
log.Debugf("onRemoteCandidate from peer %s -> %s", conn.Config.RemoteWgKey.String(), candidate.String())
err := conn.agent.AddRemoteCandidate(candidate)
if err != nil {
return err
}
return nil
}
// openConnectionToRemote opens an ice.Conn to the remote peer. This is a real peer-to-peer connection
// blocks until connection has been established
func (conn *Connection) openConnectionToRemote(isControlling bool, credentials IceCredentials) (*ice.Conn, error) {
var realConn *ice.Conn
var err error
if isControlling {
realConn, err = conn.agent.Dial(context.TODO(), credentials.uFrag, credentials.pwd)
} else {
realConn, err = conn.agent.Accept(context.TODO(), credentials.uFrag, credentials.pwd)
}
if err != nil {
return nil, err
}
return realConn, err
}
// signalCredentials prepares local user credentials and signals them to the remote peer
func (conn *Connection) signalCredentials() error {
localUFrag, localPwd, err := conn.agent.GetLocalUserCredentials()
if err != nil {
return err
}
err = conn.signalOffer(localUFrag, localPwd)
if err != nil {
return err
}
return nil
}
// listenOnLocalCandidates registers callback of an ICE Agent to receive new local connection candidates and then
// signals them to the remote peer
func (conn *Connection) listenOnLocalCandidates() error {
err := conn.agent.OnCandidate(func(candidate ice.Candidate) {
if candidate != nil {
log.Debugf("discovered local candidate %s", candidate.String())
err := conn.signalCandidate(candidate)
if err != nil {
log.Errorf("failed signaling candidate to the remote peer %s %s", conn.Config.RemoteWgKey.String(), err)
//todo ??
return
}
}
})
if err != nil {
return err
}
return nil
}
// listenOnConnectionStateChanges registers callback of an ICE Agent to track connection state
func (conn *Connection) listenOnConnectionStateChanges() error {
err := conn.agent.OnConnectionStateChange(func(state ice.ConnectionState) {
log.Debugf("ICE Connection State has changed for peer %s -> %s", conn.Config.RemoteWgKey.String(), state.String())
if state == ice.ConnectionStateConnected {
// closed the connection has been established we can check the selected candidate pair
pair, err := conn.agent.GetSelectedCandidatePair()
if err != nil {
log.Errorf("failed selecting active ICE candidate pair %s", err)
return
}
log.Debugf("ICE connected to peer %s via a selected connnection candidate pair %s", conn.Config.RemoteWgKey.String(), pair)
} else if state == ice.ConnectionStateDisconnected || state == ice.ConnectionStateFailed {
err := conn.Close()
if err != nil {
log.Warnf("error while closing connection to peer %s -> %s", conn.Config.RemoteWgKey.String(), err.Error())
}
}
})
if err != nil {
return err
}
return nil
}

View File

@@ -3,80 +3,56 @@ package internal
import (
"context"
"fmt"
"github.com/netbirdio/netbird/client/internal/routemanager"
nbssh "github.com/netbirdio/netbird/client/ssh"
nbstatus "github.com/netbirdio/netbird/client/status"
"github.com/netbirdio/netbird/route"
"math/rand"
"reflect"
"runtime"
"github.com/cenkalti/backoff/v4"
"github.com/pion/ice/v2"
log "github.com/sirupsen/logrus"
"github.com/wiretrustee/wiretrustee/iface"
mgm "github.com/wiretrustee/wiretrustee/management/client"
mgmProto "github.com/wiretrustee/wiretrustee/management/proto"
signal "github.com/wiretrustee/wiretrustee/signal/client"
sProto "github.com/wiretrustee/wiretrustee/signal/proto"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"strings"
"sync"
"time"
"github.com/netbirdio/netbird/client/internal/peer"
"github.com/netbirdio/netbird/client/internal/proxy"
"github.com/netbirdio/netbird/iface"
mgm "github.com/netbirdio/netbird/management/client"
mgmProto "github.com/netbirdio/netbird/management/proto"
signal "github.com/netbirdio/netbird/signal/client"
sProto "github.com/netbirdio/netbird/signal/proto"
"github.com/netbirdio/netbird/util"
"github.com/pion/ice/v2"
log "github.com/sirupsen/logrus"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
)
// PeerConnectionTimeoutMax is a timeout of an initial connection attempt to a remote peer.
// E.g. this peer will wait PeerConnectionTimeoutMax for the remote peer to respond,
// if not successful then it will retry the connection attempt.
// Todo pass timeout at EnginConfig
const (
PeerConnectionTimeoutMax = 45000 // ms
PeerConnectionTimeoutMin = 30000 // ms
)
var ErrResetConnection = fmt.Errorf("reset connection")
// PeerConnectionTimeout is a timeout of an initial connection attempt to a remote peer.
// E.g. this peer will wait PeerConnectionTimeout for the remote peer to respond, if not successful then it will retry the connection attempt.
const PeerConnectionTimeout = 40 * time.Second
// EngineConfig is a config for the Engine
type EngineConfig struct {
WgPort int
WgIfaceName string
// WgAddr is a Wireguard local address (Netbird Network IP)
WgIface string
// WgAddr is a Wireguard local address (Wiretrustee Network IP)
WgAddr string
// WgPrivateKey is a Wireguard private key of our peer (it MUST never leave the machine)
WgPrivateKey wgtypes.Key
// IFaceBlackList is a list of network interfaces to ignore when discovering connection candidates (ICE related)
IFaceBlackList []string
IFaceBlackList map[string]struct{}
PreSharedKey *wgtypes.Key
// UDPMuxPort default value 0 - the system will pick an available port
UDPMuxPort int
// UDPMuxSrflxPort default value 0 - the system will pick an available port
UDPMuxSrflxPort int
// SSHKey is a private SSH key in a PEM format
SSHKey []byte
}
// Engine is a mechanism responsible for reacting on Signal and Management stream events and managing connections to the remote peers.
type Engine struct {
// signal is a Signal Service client
signal signal.Client
signal *signal.Client
// mgmClient is a Management Service client
mgmClient mgm.Client
// peerConns is a map that holds all the peers that are known to this peer
peerConns map[string]*peer.Conn
mgmClient *mgm.Client
// conns is a collection of remote peer connections indexed by local public key of the remote peers
conns map[string]*Connection
// peerMux is used to sync peer operations (e.g. open connection, peer removal)
peerMux *sync.Mutex
// syncMsgMux is used to guarantee sequential Management Service message processing
syncMsgMux *sync.Mutex
config *EngineConfig
// wgPort is a Wireguard local listen port
wgPort int
// STUNs is a list of STUN servers used by ICE
STUNs []*ice.URL
// TURNs is a list of STUN servers used by ICE
@@ -85,20 +61,6 @@ type Engine struct {
cancel context.CancelFunc
ctx context.Context
wgInterface *iface.WGIface
iceMux ice.UniversalUDPMux
// networkSerial is the latest CurrentSerial (state ID) of the network sent by the Management service
networkSerial uint64
sshServerFunc func(hostKeyPEM []byte, addr string) (nbssh.Server, error)
sshServer nbssh.Server
statusRecorder *nbstatus.Status
routeManager routemanager.Manager
}
// Peer is an instance of the Connection Peer
@@ -108,61 +70,35 @@ type Peer struct {
}
// NewEngine creates a new Connection Engine
func NewEngine(
ctx context.Context, cancel context.CancelFunc,
signalClient signal.Client, mgmClient mgm.Client,
config *EngineConfig, statusRecorder *nbstatus.Status,
) *Engine {
func NewEngine(signalClient *signal.Client, mgmClient *mgm.Client, config *EngineConfig, cancel context.CancelFunc, ctx context.Context) *Engine {
return &Engine{
ctx: ctx,
cancel: cancel,
signal: signalClient,
mgmClient: mgmClient,
peerConns: map[string]*peer.Conn{},
syncMsgMux: &sync.Mutex{},
config: config,
STUNs: []*ice.URL{},
TURNs: []*ice.URL{},
networkSerial: 0,
sshServerFunc: nbssh.DefaultSSHServer,
statusRecorder: statusRecorder,
signal: signalClient,
mgmClient: mgmClient,
conns: map[string]*Connection{},
peerMux: &sync.Mutex{},
syncMsgMux: &sync.Mutex{},
config: config,
STUNs: []*ice.URL{},
TURNs: []*ice.URL{},
cancel: cancel,
ctx: ctx,
}
}
func (e *Engine) Stop() error {
e.syncMsgMux.Lock()
defer e.syncMsgMux.Unlock()
err := e.removeAllPeers()
err := e.removeAllPeerConnections()
if err != nil {
return err
}
// very ugly but we want to remove peers from the WireGuard interface first before removing interface.
// Removing peers happens in the conn.CLose() asynchronously
time.Sleep(500 * time.Millisecond)
log.Debugf("removing Netbird interface %s", e.config.WgIfaceName)
if e.wgInterface.Interface != nil {
err = e.wgInterface.Close()
if err != nil {
log.Errorf("failed closing Netbird interface %s %v", e.config.WgIfaceName, err)
return err
}
log.Debugf("removing Wiretrustee interface %s", e.config.WgIface)
err = iface.Close()
if err != nil {
log.Errorf("failed closing Wiretrustee interface %s %v", e.config.WgIface, err)
return err
}
if !isNil(e.sshServer) {
err := e.sshServer.Stop()
if err != nil {
log.Warnf("failed stopping the SSH server: %v", err)
}
}
if e.routeManager != nil {
e.routeManager.Stop()
}
log.Infof("stopped Netbird Engine")
log.Infof("stopped Wiretrustee Engine")
return nil
}
@@ -171,48 +107,29 @@ func (e *Engine) Stop() error {
// Connections to remote peers are not established here.
// However, they will be established once an event with a list of peers to connect to will be received from Management Service
func (e *Engine) Start() error {
e.syncMsgMux.Lock()
defer e.syncMsgMux.Unlock()
wgIfaceName := e.config.WgIfaceName
wgIface := e.config.WgIface
wgAddr := e.config.WgAddr
myPrivateKey := e.config.WgPrivateKey
var err error
e.wgInterface, err = iface.NewWGIFace(wgIfaceName, wgAddr, iface.DefaultMTU)
err := iface.Create(wgIface, wgAddr)
if err != nil {
log.Errorf("failed creating wireguard interface instance %s: [%s]", wgIfaceName, err.Error())
log.Errorf("failed creating interface %s: [%s]", wgIface, err.Error())
return err
}
bind := &iface.ICEBind{}
err = e.wgInterface.CreateNew(bind)
err = iface.Configure(wgIface, myPrivateKey.String())
if err != nil {
log.Errorf("failed creating tunnel interface %s: [%s]", wgIfaceName, err.Error())
log.Errorf("failed configuring Wireguard interface [%s]: %s", wgIface, err.Error())
return err
}
port, err := e.wgInterface.GetListenPort()
port, err := iface.GetListenPort(wgIface)
if err != nil {
log.Errorf("failed getting Wireguard listen port [%s]: %s", wgIface, err.Error())
return err
}
err = e.wgInterface.Configure(myPrivateKey.String(), *port)
if err != nil {
log.Errorf("failed configuring Wireguard interface [%s]: %s", wgIfaceName, err.Error())
return err
}
iceMux, err := bind.GetICEMux()
if err != nil {
return err
}
e.iceMux = iceMux
log.Infof("NetBird Engine started listening on WireGuard port %d", *port)
e.routeManager = routemanager.NewManager(e.ctx, e.config.WgPrivateKey.PublicKey().String(), e.wgInterface, e.statusRecorder)
e.config.WgPort = *port
e.wgPort = *port
e.receiveSignalEvents()
e.receiveManagementEvents()
@@ -220,30 +137,51 @@ func (e *Engine) Start() error {
return nil
}
// modifyPeers updates peers that have been modified (e.g. IP address has been changed).
// It closes the existing connection, removes it from the peerConns map, and creates a new one.
func (e *Engine) modifyPeers(peersUpdate []*mgmProto.RemotePeerConfig) error {
// initializePeer peer agent attempt to open connection
func (e *Engine) initializePeer(peer Peer) {
var backOff = backoff.WithContext(&backoff.ExponentialBackOff{
InitialInterval: backoff.DefaultInitialInterval,
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: 5 * time.Second,
MaxElapsedTime: 0, //never stop
Stop: backoff.Stop,
Clock: backoff.SystemClock,
}, e.ctx)
// first, check if peers have been modified
var modified []*mgmProto.RemotePeerConfig
for _, p := range peersUpdate {
if peerConn, ok := e.peerConns[p.GetWgPubKey()]; ok {
if peerConn.GetConf().ProxyConfig.AllowedIps != strings.Join(p.AllowedIps, ",") {
modified = append(modified, p)
}
operation := func() error {
if e.signal.GetStatus() != signal.StreamConnected {
return fmt.Errorf("not opening connection to peer because Signal is unavailable")
}
_, err := e.openPeerConnection(e.wgPort, e.config.WgPrivateKey, peer)
e.peerMux.Lock()
defer e.peerMux.Unlock()
if _, ok := e.conns[peer.WgPubKey]; !ok {
log.Debugf("removed connection attempt to peer: %v, not retrying", peer.WgPubKey)
return nil
}
}
// second, close all modified connections and remove them from the state map
for _, p := range modified {
err := e.removePeer(p.GetWgPubKey())
if err != nil {
log.Debugf("retrying connection because of error: %s", err.Error())
return err
}
return nil
}
// third, add the peer connections again
for _, p := range modified {
err := e.addNewPeer(p)
err := backoff.Retry(operation, backOff)
if err != nil {
// should actually never happen
panic(err)
}
}
func (e *Engine) removePeerConnections(peers []string) error {
e.peerMux.Lock()
defer e.peerMux.Unlock()
for _, peer := range peers {
err := e.removePeerConnection(peer)
if err != nil {
return err
}
@@ -251,35 +189,12 @@ func (e *Engine) modifyPeers(peersUpdate []*mgmProto.RemotePeerConfig) error {
return nil
}
// removePeers finds and removes peers that do not exist anymore in the network map received from the Management Service.
// It also removes peers that have been modified (e.g. change of IP address). They will be added again in addPeers method.
func (e *Engine) removePeers(peersUpdate []*mgmProto.RemotePeerConfig) error {
currentPeers := make([]string, 0, len(e.peerConns))
for p := range e.peerConns {
currentPeers = append(currentPeers, p)
}
newPeers := make([]string, 0, len(peersUpdate))
for _, p := range peersUpdate {
newPeers = append(newPeers, p.GetWgPubKey())
}
toRemove := util.SliceDiff(currentPeers, newPeers)
for _, p := range toRemove {
err := e.removePeer(p)
if err != nil {
return err
}
log.Infof("removed peer %s", p)
}
return nil
}
func (e *Engine) removeAllPeers() error {
func (e *Engine) removeAllPeerConnections() error {
log.Debugf("removing all peer connections")
for p := range e.peerConns {
err := e.removePeer(p)
e.peerMux.Lock()
defer e.peerMux.Unlock()
for peer := range e.conns {
err := e.removePeerConnection(peer)
if err != nil {
return err
}
@@ -287,74 +202,70 @@ func (e *Engine) removeAllPeers() error {
return nil
}
// removePeer closes an existing peer connection, removes a peer, and clears authorized key of the SSH server
func (e *Engine) removePeer(peerKey string) error {
log.Debugf("removing peer from engine %s", peerKey)
if !isNil(e.sshServer) {
e.sshServer.RemoveAuthorizedKey(peerKey)
}
defer func() {
err := e.statusRecorder.RemovePeer(peerKey)
if err != nil {
log.Warnf("received error when removing peer %s from status recorder: %v", peerKey, err)
}
}()
conn, exists := e.peerConns[peerKey]
if exists {
delete(e.peerConns, peerKey)
err := conn.Close()
if err != nil {
switch err.(type) {
case *peer.ConnectionAlreadyClosedError:
return nil
default:
return err
}
}
// removePeerConnection closes existing peer connection and removes peer
func (e *Engine) removePeerConnection(peerKey string) error {
conn, exists := e.conns[peerKey]
if exists && conn != nil {
delete(e.conns, peerKey)
return conn.Close()
}
log.Infof("removed connection to peer %s", peerKey)
return nil
}
// GetPeerConnectionStatus returns a connection Status or nil if peer connection wasn't found
func (e *Engine) GetPeerConnectionStatus(peerKey string) peer.ConnStatus {
conn, exists := e.peerConns[peerKey]
func (e *Engine) GetPeerConnectionStatus(peerKey string) *Status {
e.peerMux.Lock()
defer e.peerMux.Unlock()
conn, exists := e.conns[peerKey]
if exists && conn != nil {
return conn.Status()
return &conn.Status
}
return -1
return nil
}
func (e *Engine) GetPeers() []string {
e.syncMsgMux.Lock()
defer e.syncMsgMux.Unlock()
// openPeerConnection opens a new remote peer connection
func (e *Engine) openPeerConnection(wgPort int, myKey wgtypes.Key, peer Peer) (*Connection, error) {
e.peerMux.Lock()
peers := []string{}
for s := range e.peerConns {
peers = append(peers, s)
}
return peers
}
// GetConnectedPeers returns a connection Status or nil if peer connection wasn't found
func (e *Engine) GetConnectedPeers() []string {
e.syncMsgMux.Lock()
defer e.syncMsgMux.Unlock()
peers := []string{}
for s, conn := range e.peerConns {
if conn.Status() == peer.StatusConnected {
peers = append(peers, s)
}
remoteKey, _ := wgtypes.ParseKey(peer.WgPubKey)
connConfig := &ConnConfig{
WgListenAddr: fmt.Sprintf("127.0.0.1:%d", wgPort),
WgPeerIP: e.config.WgAddr,
WgIface: e.config.WgIface,
WgAllowedIPs: peer.WgAllowedIps,
WgKey: myKey,
RemoteWgKey: remoteKey,
StunTurnURLS: append(e.STUNs, e.TURNs...),
iFaceBlackList: e.config.IFaceBlackList,
PreSharedKey: e.config.PreSharedKey,
}
return peers
signalOffer := func(uFrag string, pwd string) error {
return signalAuth(uFrag, pwd, myKey, remoteKey, e.signal, false)
}
signalAnswer := func(uFrag string, pwd string) error {
return signalAuth(uFrag, pwd, myKey, remoteKey, e.signal, true)
}
signalCandidate := func(candidate ice.Candidate) error {
return signalCandidate(candidate, myKey, remoteKey, e.signal)
}
conn := NewConnection(*connConfig, signalCandidate, signalOffer, signalAnswer)
e.conns[remoteKey.String()] = conn
e.peerMux.Unlock()
// blocks until the connection is open (or timeout)
err := conn.Open(PeerConnectionTimeout)
if err != nil {
return nil, err
}
return conn, nil
}
func signalCandidate(candidate ice.Candidate, myKey wgtypes.Key, remoteKey wgtypes.Key, s signal.Client) error {
func signalCandidate(candidate ice.Candidate, myKey wgtypes.Key, remoteKey wgtypes.Key, s *signal.Client) error {
err := s.Send(&sProto.Message{
Key: myKey.PublicKey().String(),
RemoteKey: remoteKey.String(),
@@ -364,14 +275,16 @@ func signalCandidate(candidate ice.Candidate, myKey wgtypes.Key, remoteKey wgtyp
},
})
if err != nil {
log.Errorf("failed signaling candidate to the remote peer %s %s", remoteKey.String(), err)
//todo ??
return err
}
return nil
}
// SignalOfferAnswer signals either an offer or an answer to remote peer
func SignalOfferAnswer(offerAnswer peer.OfferAnswer, myKey wgtypes.Key, remoteKey wgtypes.Key, s signal.Client, isAnswer bool) error {
func signalAuth(uFrag string, pwd string, myKey wgtypes.Key, remoteKey wgtypes.Key, s *signal.Client, isAnswer bool) error {
var t sProto.Body_Type
if isAnswer {
t = sProto.Body_ANSWER
@@ -379,10 +292,9 @@ func SignalOfferAnswer(offerAnswer peer.OfferAnswer, myKey wgtypes.Key, remoteKe
t = sProto.Body_OFFER
}
msg, err := signal.MarshalCredential(myKey, offerAnswer.WgListenPort, remoteKey, &signal.Credential{
UFrag: offerAnswer.IceCredentials.UFrag,
Pwd: offerAnswer.IceCredentials.Pwd,
}, t)
msg, err := signal.MarshalCredential(myKey, remoteKey, &signal.Credential{
UFrag: uFrag,
Pwd: pwd}, t)
if err != nil {
return err
}
@@ -394,115 +306,41 @@ func SignalOfferAnswer(offerAnswer peer.OfferAnswer, myKey wgtypes.Key, remoteKe
return nil
}
func (e *Engine) handleSync(update *mgmProto.SyncResponse) error {
e.syncMsgMux.Lock()
defer e.syncMsgMux.Unlock()
if update.GetWiretrusteeConfig() != nil {
err := e.updateTURNs(update.GetWiretrusteeConfig().GetTurns())
if err != nil {
return err
}
err = e.updateSTUNs(update.GetWiretrusteeConfig().GetStuns())
if err != nil {
return err
}
// todo update signal
}
if update.GetNetworkMap() != nil {
// only apply new changes and ignore old ones
err := e.updateNetworkMap(update.GetNetworkMap())
if err != nil {
return err
}
}
return nil
}
func isNil(server nbssh.Server) bool {
return server == nil || reflect.ValueOf(server).IsNil()
}
func (e *Engine) updateSSH(sshConf *mgmProto.SSHConfig) error {
if sshConf.GetSshEnabled() {
if runtime.GOOS == "windows" {
log.Warnf("running SSH server on Windows is not supported")
return nil
}
// start SSH server if it wasn't running
if isNil(e.sshServer) {
//nil sshServer means it has not yet been started
var err error
e.sshServer, err = e.sshServerFunc(e.config.SSHKey,
fmt.Sprintf("%s:%d", e.wgInterface.Address.IP.String(), nbssh.DefaultSSHPort))
if err != nil {
return err
}
go func() {
// blocking
err = e.sshServer.Start()
if err != nil {
// will throw error when we stop it even if it is a graceful stop
log.Debugf("stopped SSH server with error %v", err)
}
e.syncMsgMux.Lock()
defer e.syncMsgMux.Unlock()
e.sshServer = nil
log.Infof("stopped SSH server")
}()
} else {
log.Debugf("SSH server is already running")
}
} else {
// Disable SSH server request, so stop it if it was running
if !isNil(e.sshServer) {
err := e.sshServer.Stop()
if err != nil {
log.Warnf("failed to stop SSH server %v", err)
}
e.sshServer = nil
}
}
return nil
}
func (e *Engine) updateConfig(conf *mgmProto.PeerConfig) error {
if e.wgInterface.Address.String() != conf.Address {
oldAddr := e.wgInterface.Address.String()
log.Debugf("updating peer address from %s to %s", oldAddr, conf.Address)
err := e.wgInterface.UpdateAddr(conf.Address)
if err != nil {
return err
}
e.config.WgAddr = conf.Address
log.Infof("updated peer address from %s to %s", oldAddr, conf.Address)
}
if conf.GetSshConfig() != nil {
err := e.updateSSH(conf.GetSshConfig())
if err != nil {
log.Warnf("failed handling SSH server setup %v", e)
}
}
return nil
}
// receiveManagementEvents connects to the Management Service event stream to receive updates from the management service
// E.g. when a new peer has been registered and we are allowed to connect to it.
func (e *Engine) receiveManagementEvents() {
go func() {
err := e.mgmClient.Sync(func(update *mgmProto.SyncResponse) error {
return e.handleSync(update)
e.syncMsgMux.Lock()
defer e.syncMsgMux.Unlock()
if update.GetWiretrusteeConfig() != nil {
err := e.updateTURNs(update.GetWiretrusteeConfig().GetTurns())
if err != nil {
return err
}
err = e.updateSTUNs(update.GetWiretrusteeConfig().GetStuns())
if err != nil {
return err
}
//todo update signal
}
if update.GetRemotePeers() != nil || update.GetRemotePeersIsEmpty() {
// empty arrays are serialized by protobuf to null, but for our case empty array is a valid state.
err := e.updatePeers(update.GetRemotePeers())
if err != nil {
return err
}
}
return nil
})
if err != nil {
// happens if management is unavailable for a long time.
// We want to cancel the operation of the whole client
_ = CtxGetState(e.ctx).Wrap(ErrResetConnection)
e.cancel()
return
}
@@ -549,271 +387,102 @@ func (e *Engine) updateTURNs(turns []*mgmProto.ProtectedHostConfig) error {
return nil
}
func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error {
// intentionally leave it before checking serial because for now it can happen that peer IP changed but serial didn't
if networkMap.GetPeerConfig() != nil {
err := e.updateConfig(networkMap.GetPeerConfig())
if err != nil {
return err
}
func (e *Engine) updatePeers(remotePeers []*mgmProto.RemotePeerConfig) error {
log.Debugf("got peers update from Management Service, updating")
remotePeerMap := make(map[string]struct{})
for _, peer := range remotePeers {
remotePeerMap[peer.GetWgPubKey()] = struct{}{}
}
serial := networkMap.GetSerial()
if e.networkSerial > serial {
log.Debugf("received outdated NetworkMap with serial %d, ignoring", serial)
return nil
}
log.Debugf("got peers update from Management Service, total peers to connect to = %d", len(networkMap.GetRemotePeers()))
// cleanup request, most likely our peer has been deleted
if networkMap.GetRemotePeersIsEmpty() {
err := e.removeAllPeers()
if err != nil {
return err
}
} else {
err := e.removePeers(networkMap.GetRemotePeers())
if err != nil {
return err
}
err = e.modifyPeers(networkMap.GetRemotePeers())
if err != nil {
return err
}
err = e.addNewPeers(networkMap.GetRemotePeers())
if err != nil {
return err
}
// update SSHServer by adding remote peer SSH keys
if !isNil(e.sshServer) {
for _, config := range networkMap.GetRemotePeers() {
if config.GetSshConfig() != nil && config.GetSshConfig().GetSshPubKey() != nil {
err := e.sshServer.AddAuthorizedKey(config.WgPubKey, string(config.GetSshConfig().GetSshPubKey()))
if err != nil {
log.Warnf("failed adding authroized key to SSH DefaultServer %v", err)
}
}
}
//remove peers that are no longer available for us
toRemove := []string{}
for p := range e.conns {
if _, ok := remotePeerMap[p]; !ok {
toRemove = append(toRemove, p)
}
}
protoRoutes := networkMap.GetRoutes()
if protoRoutes == nil {
protoRoutes = []*mgmProto.Route{}
}
err := e.routeManager.UpdateRoutes(serial, toRoutes(protoRoutes))
err := e.removePeerConnections(toRemove)
if err != nil {
log.Errorf("failed to update routes, err: %v", err)
return err
}
e.networkSerial = serial
return nil
}
func toRoutes(protoRoutes []*mgmProto.Route) []*route.Route {
routes := make([]*route.Route, 0)
for _, protoRoute := range protoRoutes {
_, prefix, _ := route.ParseNetwork(protoRoute.Network)
convertedRoute := &route.Route{
ID: protoRoute.ID,
Network: prefix,
NetID: protoRoute.NetID,
NetworkType: route.NetworkType(protoRoute.NetworkType),
Peer: protoRoute.Peer,
Metric: int(protoRoute.Metric),
Masquerade: protoRoute.Masquerade,
// add new peers
for _, peer := range remotePeers {
peerKey := peer.GetWgPubKey()
peerIPs := peer.GetAllowedIps()
if _, ok := e.conns[peerKey]; !ok {
go e.initializePeer(Peer{
WgPubKey: peerKey,
WgAllowedIps: strings.Join(peerIPs, ","),
})
}
routes = append(routes, convertedRoute)
}
return routes
}
// addNewPeers adds peers that were not know before but arrived from the Management service with the update
func (e *Engine) addNewPeers(peersUpdate []*mgmProto.RemotePeerConfig) error {
for _, p := range peersUpdate {
err := e.addNewPeer(p)
if err != nil {
return err
}
}
return nil
}
// addNewPeer add peer if connection doesn't exist
func (e *Engine) addNewPeer(peerConfig *mgmProto.RemotePeerConfig) error {
peerKey := peerConfig.GetWgPubKey()
peerIPs := peerConfig.GetAllowedIps()
if _, ok := e.peerConns[peerKey]; !ok {
conn, err := e.createPeerConn(peerKey, strings.Join(peerIPs, ","))
if err != nil {
return err
}
e.peerConns[peerKey] = conn
err = e.statusRecorder.AddPeer(peerKey)
if err != nil {
log.Warnf("error adding peer %s to status recorder, got error: %v", peerKey, err)
}
go e.connWorker(conn, peerKey)
}
return nil
}
func (e *Engine) connWorker(conn *peer.Conn, peerKey string) {
for {
// randomize starting time a bit
min := 500
max := 2000
time.Sleep(time.Duration(rand.Intn(max-min)+min) * time.Millisecond)
// if peer has been removed -> give up
if !e.peerExists(peerKey) {
log.Debugf("peer %s doesn't exist anymore, won't retry connection", peerKey)
return
}
if !e.signal.Ready() {
log.Infof("signal client isn't ready, skipping connection attempt %s", peerKey)
continue
}
// we might have received new STUN and TURN servers meanwhile, so update them
e.syncMsgMux.Lock()
conf := conn.GetConf()
conf.StunTurn = append(e.STUNs, e.TURNs...)
conn.UpdateConf(conf)
e.syncMsgMux.Unlock()
err := conn.Open()
if err != nil {
log.Debugf("connection to peer %s failed: %v", peerKey, err)
switch err.(type) {
case *peer.ConnectionClosedError:
// conn has been forced to close, so we exit the loop
return
default:
}
}
}
}
func (e Engine) peerExists(peerKey string) bool {
e.syncMsgMux.Lock()
defer e.syncMsgMux.Unlock()
_, ok := e.peerConns[peerKey]
return ok
}
func (e Engine) createPeerConn(pubKey string, allowedIPs string) (*peer.Conn, error) {
log.Debugf("creating peer connection %s", pubKey)
var stunTurn []*ice.URL
stunTurn = append(stunTurn, e.STUNs...)
stunTurn = append(stunTurn, e.TURNs...)
proxyConfig := proxy.Config{
RemoteKey: pubKey,
WgListenAddr: fmt.Sprintf("127.0.0.1:%d", e.config.WgPort),
WgInterface: e.wgInterface,
AllowedIps: allowedIPs,
PreSharedKey: e.config.PreSharedKey,
}
// randomize connection timeout
timeout := time.Duration(rand.Intn(PeerConnectionTimeoutMax-PeerConnectionTimeoutMin)+PeerConnectionTimeoutMin) * time.Millisecond
config := peer.ConnConfig{
Key: pubKey,
LocalKey: e.config.WgPrivateKey.PublicKey().String(),
StunTurn: stunTurn,
InterfaceBlackList: e.config.IFaceBlackList,
Timeout: timeout,
UDPMux: e.iceMux,
UDPMuxSrflx: e.iceMux,
ProxyConfig: proxyConfig,
LocalWgPort: e.config.WgPort,
}
peerConn, err := peer.NewConn(config, e.statusRecorder)
if err != nil {
return nil, err
}
wgPubKey, err := wgtypes.ParseKey(pubKey)
if err != nil {
return nil, err
}
signalOffer := func(offerAnswer peer.OfferAnswer) error {
return SignalOfferAnswer(offerAnswer, e.config.WgPrivateKey, wgPubKey, e.signal, false)
}
signalCandidate := func(candidate ice.Candidate) error {
return signalCandidate(candidate, e.config.WgPrivateKey, wgPubKey, e.signal)
}
signalAnswer := func(offerAnswer peer.OfferAnswer) error {
return SignalOfferAnswer(offerAnswer, e.config.WgPrivateKey, wgPubKey, e.signal, true)
}
peerConn.SetSignalCandidate(signalCandidate)
peerConn.SetSignalOffer(signalOffer)
peerConn.SetSignalAnswer(signalAnswer)
return peerConn, nil
}
// receiveSignalEvents connects to the Signal Service event stream to negotiate connection with remote peers
func (e *Engine) receiveSignalEvents() {
go func() {
// connect to a stream of messages coming from the signal server
err := e.signal.Receive(func(msg *sProto.Message) error {
e.syncMsgMux.Lock()
defer e.syncMsgMux.Unlock()
conn := e.peerConns[msg.Key]
conn := e.conns[msg.Key]
if conn == nil {
return fmt.Errorf("wrongly addressed message %s", msg.Key)
}
if conn.Config.RemoteWgKey.String() != msg.Key {
return fmt.Errorf("unknown peer %s", msg.Key)
}
switch msg.GetBody().Type {
case sProto.Body_OFFER:
remoteCred, err := signal.UnMarshalCredential(msg)
if err != nil {
return err
}
conn.OnRemoteOffer(peer.OfferAnswer{
IceCredentials: peer.IceCredentials{
UFrag: remoteCred.UFrag,
Pwd: remoteCred.Pwd,
},
WgListenPort: int(msg.GetBody().GetWgListenPort()),
Version: msg.GetBody().GetNetBirdVersion(),
err = conn.OnOffer(IceCredentials{
uFrag: remoteCred.UFrag,
pwd: remoteCred.Pwd,
})
if err != nil {
return err
}
return nil
case sProto.Body_ANSWER:
remoteCred, err := signal.UnMarshalCredential(msg)
if err != nil {
return err
}
conn.OnRemoteAnswer(peer.OfferAnswer{
IceCredentials: peer.IceCredentials{
UFrag: remoteCred.UFrag,
Pwd: remoteCred.Pwd,
},
WgListenPort: int(msg.GetBody().GetWgListenPort()),
Version: msg.GetBody().GetNetBirdVersion(),
err = conn.OnAnswer(IceCredentials{
uFrag: remoteCred.UFrag,
pwd: remoteCred.Pwd,
})
if err != nil {
return err
}
case sProto.Body_CANDIDATE:
candidate, err := ice.UnmarshalCandidate(msg.GetBody().Payload)
if err != nil {
log.Errorf("failed on parsing remote candidate %s -> %s", candidate, err)
return err
}
conn.OnRemoteCandidate(candidate)
err = conn.OnRemoteCandidate(candidate)
if err != nil {
log.Errorf("error handling CANDIATE from %s", msg.Key)
return err
}
}
return nil
@@ -821,7 +490,6 @@ func (e *Engine) receiveSignalEvents() {
if err != nil {
// happens if signal is unavailable for a long time.
// We want to cancel the operation of the whole client
_ = CtxGetState(e.ctx).Wrap(ErrResetConnection)
e.cancel()
return
}

View File

@@ -1,781 +0,0 @@
package internal
import (
"context"
"fmt"
"github.com/netbirdio/netbird/client/internal/routemanager"
"github.com/netbirdio/netbird/client/ssh"
nbstatus "github.com/netbirdio/netbird/client/status"
"github.com/netbirdio/netbird/iface"
"github.com/netbirdio/netbird/route"
"github.com/stretchr/testify/assert"
"net"
"net/netip"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"testing"
"time"
"github.com/netbirdio/netbird/client/system"
mgmt "github.com/netbirdio/netbird/management/client"
mgmtProto "github.com/netbirdio/netbird/management/proto"
"github.com/netbirdio/netbird/management/server"
signal "github.com/netbirdio/netbird/signal/client"
"github.com/netbirdio/netbird/signal/proto"
signalServer "github.com/netbirdio/netbird/signal/server"
"github.com/netbirdio/netbird/util"
log "github.com/sirupsen/logrus"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
)
var (
kaep = keepalive.EnforcementPolicy{
MinTime: 15 * time.Second,
PermitWithoutStream: true,
}
kasp = keepalive.ServerParameters{
MaxConnectionIdle: 15 * time.Second,
MaxConnectionAgeGrace: 5 * time.Second,
Time: 5 * time.Second,
Timeout: 2 * time.Second,
}
)
func TestEngine_SSH(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping TestEngine_SSH on Windows")
}
key, err := wgtypes.GeneratePrivateKey()
if err != nil {
t.Fatal(err)
return
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, &EngineConfig{
WgIfaceName: "utun101",
WgAddr: "100.64.0.1/24",
WgPrivateKey: key,
WgPort: 33100,
}, nbstatus.NewRecorder())
var sshKeysAdded []string
var sshPeersRemoved []string
sshCtx, cancel := context.WithCancel(context.Background())
engine.sshServerFunc = func(hostKeyPEM []byte, addr string) (ssh.Server, error) {
return &ssh.MockServer{
Ctx: sshCtx,
StopFunc: func() error {
cancel()
return nil
},
StartFunc: func() error {
<-ctx.Done()
return ctx.Err()
},
AddAuthorizedKeyFunc: func(peer, newKey string) error {
sshKeysAdded = append(sshKeysAdded, newKey)
return nil
},
RemoveAuthorizedKeyFunc: func(peer string) {
sshPeersRemoved = append(sshPeersRemoved, peer)
},
}, nil
}
err = engine.Start()
if err != nil {
t.Fatal(err)
}
defer func() {
err := engine.Stop()
if err != nil {
return
}
}()
peerWithSSH := &mgmtProto.RemotePeerConfig{
WgPubKey: "MNHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=",
AllowedIps: []string{"100.64.0.21/24"},
SshConfig: &mgmtProto.SSHConfig{
SshPubKey: []byte("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFATYCqaQw/9id1Qkq3n16JYhDhXraI6Pc1fgB8ynEfQ"),
},
}
// SSH server is not enabled so SSH config of a remote peer should be ignored
networkMap := &mgmtProto.NetworkMap{
Serial: 6,
PeerConfig: nil,
RemotePeers: []*mgmtProto.RemotePeerConfig{peerWithSSH},
RemotePeersIsEmpty: false,
}
err = engine.updateNetworkMap(networkMap)
if err != nil {
t.Fatal(err)
}
assert.Nil(t, engine.sshServer)
// SSH server is enabled, therefore SSH config should be applied
networkMap = &mgmtProto.NetworkMap{
Serial: 7,
PeerConfig: &mgmtProto.PeerConfig{Address: "100.64.0.1/24",
SshConfig: &mgmtProto.SSHConfig{SshEnabled: true}},
RemotePeers: []*mgmtProto.RemotePeerConfig{peerWithSSH},
RemotePeersIsEmpty: false,
}
err = engine.updateNetworkMap(networkMap)
if err != nil {
t.Fatal(err)
}
time.Sleep(250 * time.Millisecond)
assert.NotNil(t, engine.sshServer)
assert.Contains(t, sshKeysAdded, "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFATYCqaQw/9id1Qkq3n16JYhDhXraI6Pc1fgB8ynEfQ")
// now remove peer
networkMap = &mgmtProto.NetworkMap{
Serial: 8,
RemotePeers: []*mgmtProto.RemotePeerConfig{},
RemotePeersIsEmpty: false,
}
err = engine.updateNetworkMap(networkMap)
if err != nil {
t.Fatal(err)
}
//time.Sleep(250 * time.Millisecond)
assert.NotNil(t, engine.sshServer)
assert.Contains(t, sshPeersRemoved, "MNHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=")
// now disable SSH server
networkMap = &mgmtProto.NetworkMap{
Serial: 9,
PeerConfig: &mgmtProto.PeerConfig{Address: "100.64.0.1/24",
SshConfig: &mgmtProto.SSHConfig{SshEnabled: false}},
RemotePeers: []*mgmtProto.RemotePeerConfig{peerWithSSH},
RemotePeersIsEmpty: false,
}
err = engine.updateNetworkMap(networkMap)
if err != nil {
t.Fatal(err)
}
assert.Nil(t, engine.sshServer)
}
func TestEngine_UpdateNetworkMap(t *testing.T) {
// test setup
key, err := wgtypes.GeneratePrivateKey()
if err != nil {
t.Fatal(err)
return
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, &EngineConfig{
WgIfaceName: "utun102",
WgAddr: "100.64.0.1/24",
WgPrivateKey: key,
WgPort: 33100,
}, nbstatus.NewRecorder())
engine.wgInterface, err = iface.NewWGIFace("utun102", "100.64.0.1/24", iface.DefaultMTU)
engine.routeManager = routemanager.NewManager(ctx, key.PublicKey().String(), engine.wgInterface, engine.statusRecorder)
type testCase struct {
name string
networkMap *mgmtProto.NetworkMap
expectedLen int
expectedPeers []*mgmtProto.RemotePeerConfig
expectedSerial uint64
}
peer1 := &mgmtProto.RemotePeerConfig{
WgPubKey: "RRHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=",
AllowedIps: []string{"100.64.0.10/24"},
}
peer2 := &mgmtProto.RemotePeerConfig{
WgPubKey: "LLHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=",
AllowedIps: []string{"100.64.0.11/24"},
}
peer3 := &mgmtProto.RemotePeerConfig{
WgPubKey: "GGHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=",
AllowedIps: []string{"100.64.0.12/24"},
}
modifiedPeer3 := &mgmtProto.RemotePeerConfig{
WgPubKey: "GGHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=",
AllowedIps: []string{"100.64.0.20/24"},
}
case1 := testCase{
name: "input with a new peer to add",
networkMap: &mgmtProto.NetworkMap{
Serial: 1,
PeerConfig: nil,
RemotePeers: []*mgmtProto.RemotePeerConfig{
peer1,
},
RemotePeersIsEmpty: false,
},
expectedLen: 1,
expectedPeers: []*mgmtProto.RemotePeerConfig{peer1},
expectedSerial: 1,
}
// 2nd case - one extra peer added and network map has CurrentSerial grater than local => apply the update
case2 := testCase{
name: "input with an old peer and a new peer to add",
networkMap: &mgmtProto.NetworkMap{
Serial: 2,
PeerConfig: nil,
RemotePeers: []*mgmtProto.RemotePeerConfig{
peer1, peer2,
},
RemotePeersIsEmpty: false,
},
expectedLen: 2,
expectedPeers: []*mgmtProto.RemotePeerConfig{peer1, peer2},
expectedSerial: 2,
}
case3 := testCase{
name: "input with outdated (old) update to ignore",
networkMap: &mgmtProto.NetworkMap{
Serial: 0,
PeerConfig: nil,
RemotePeers: []*mgmtProto.RemotePeerConfig{
peer1, peer2, peer3,
},
RemotePeersIsEmpty: false,
},
expectedLen: 2,
expectedPeers: []*mgmtProto.RemotePeerConfig{peer1, peer2},
expectedSerial: 2,
}
case4 := testCase{
name: "input with one peer to remove and one new to add",
networkMap: &mgmtProto.NetworkMap{
Serial: 4,
PeerConfig: nil,
RemotePeers: []*mgmtProto.RemotePeerConfig{
peer2, peer3,
},
RemotePeersIsEmpty: false,
},
expectedLen: 2,
expectedPeers: []*mgmtProto.RemotePeerConfig{peer2, peer3},
expectedSerial: 4,
}
case5 := testCase{
name: "input with one peer to modify",
networkMap: &mgmtProto.NetworkMap{
Serial: 4,
PeerConfig: nil,
RemotePeers: []*mgmtProto.RemotePeerConfig{
modifiedPeer3, peer2,
},
RemotePeersIsEmpty: false,
},
expectedLen: 2,
expectedPeers: []*mgmtProto.RemotePeerConfig{peer2, modifiedPeer3},
expectedSerial: 4,
}
case6 := testCase{
name: "input with all peers to remove",
networkMap: &mgmtProto.NetworkMap{
Serial: 5,
PeerConfig: nil,
RemotePeers: []*mgmtProto.RemotePeerConfig{},
RemotePeersIsEmpty: true,
},
expectedLen: 0,
expectedPeers: nil,
expectedSerial: 5,
}
for _, c := range []testCase{case1, case2, case3, case4, case5, case6} {
t.Run(c.name, func(t *testing.T) {
err = engine.updateNetworkMap(c.networkMap)
if err != nil {
t.Fatal(err)
return
}
if len(engine.peerConns) != c.expectedLen {
t.Errorf("expecting Engine.peerConns to be of size %d, got %d", c.expectedLen, len(engine.peerConns))
}
if engine.networkSerial != c.expectedSerial {
t.Errorf("expecting Engine.networkSerial to be equal to %d, actual %d", c.expectedSerial, engine.networkSerial)
}
for _, p := range c.expectedPeers {
conn, ok := engine.peerConns[p.GetWgPubKey()]
if !ok {
t.Errorf("expecting Engine.peerConns to contain peer %s", p)
}
expectedAllowedIPs := strings.Join(p.AllowedIps, ",")
if conn.GetConf().ProxyConfig.AllowedIps != expectedAllowedIPs {
t.Errorf("expecting peer %s to have AllowedIPs= %s, got %s", p.GetWgPubKey(),
expectedAllowedIPs, conn.GetConf().ProxyConfig.AllowedIps)
}
}
})
}
}
func TestEngine_Sync(t *testing.T) {
key, err := wgtypes.GeneratePrivateKey()
if err != nil {
t.Fatal(err)
return
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// feed updates to Engine via mocked Management client
updates := make(chan *mgmtProto.SyncResponse)
defer close(updates)
syncFunc := func(msgHandler func(msg *mgmtProto.SyncResponse) error) error {
for msg := range updates {
err := msgHandler(msg)
if err != nil {
t.Fatal(err)
}
}
return nil
}
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{SyncFunc: syncFunc}, &EngineConfig{
WgIfaceName: "utun103",
WgAddr: "100.64.0.1/24",
WgPrivateKey: key,
WgPort: 33100,
}, nbstatus.NewRecorder())
defer func() {
err := engine.Stop()
if err != nil {
return
}
}()
err = engine.Start()
if err != nil {
t.Fatal(err)
return
}
peer1 := &mgmtProto.RemotePeerConfig{
WgPubKey: "RRHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=",
AllowedIps: []string{"100.64.0.10/24"},
}
peer2 := &mgmtProto.RemotePeerConfig{
WgPubKey: "LLHf3Ma6z6mdLbriAJbqhX9+nM/B71lgw2+91q3LlhU=",
AllowedIps: []string{"100.64.0.11/24"},
}
peer3 := &mgmtProto.RemotePeerConfig{
WgPubKey: "GGHf3Ma6z6mdLbriAJbqhX9+nM/B71lgw2+91q3LlhU=",
AllowedIps: []string{"100.64.0.12/24"},
}
// 1st update with just 1 peer and serial larger than the current serial of the engine => apply update
updates <- &mgmtProto.SyncResponse{
NetworkMap: &mgmtProto.NetworkMap{
Serial: 10,
PeerConfig: nil,
RemotePeers: []*mgmtProto.RemotePeerConfig{peer1, peer2, peer3},
RemotePeersIsEmpty: false,
},
}
timeout := time.After(time.Second * 2)
for {
select {
case <-timeout:
t.Fatalf("timeout while waiting for test to finish")
return
default:
}
if len(engine.GetPeers()) == 3 && engine.networkSerial == 10 {
break
}
}
}
func TestEngine_UpdateNetworkMapWithRoutes(t *testing.T) {
testCases := []struct {
name string
inputErr error
networkMap *mgmtProto.NetworkMap
expectedLen int
expectedRoutes []*route.Route
expectedSerial uint64
}{
{
name: "Routes Update Should Be Passed To Manager",
networkMap: &mgmtProto.NetworkMap{
Serial: 1,
PeerConfig: nil,
RemotePeersIsEmpty: false,
Routes: []*mgmtProto.Route{
{
ID: "a",
Network: "192.168.0.0/24",
NetID: "n1",
Peer: "p1",
NetworkType: 1,
Masquerade: false,
},
{
ID: "b",
Network: "192.168.1.0/24",
NetID: "n2",
Peer: "p1",
NetworkType: 1,
Masquerade: false,
},
},
},
expectedLen: 2,
expectedRoutes: []*route.Route{
{
ID: "a",
Network: netip.MustParsePrefix("192.168.0.0/24"),
NetID: "n1",
Peer: "p1",
NetworkType: 1,
Masquerade: false,
},
{
ID: "b",
Network: netip.MustParsePrefix("192.168.1.0/24"),
NetID: "n2",
Peer: "p1",
NetworkType: 1,
Masquerade: false,
},
},
expectedSerial: 1,
},
{
name: "Empty Routes Update Should Be Passed",
networkMap: &mgmtProto.NetworkMap{
Serial: 1,
PeerConfig: nil,
RemotePeersIsEmpty: false,
Routes: nil,
},
expectedLen: 0,
expectedRoutes: []*route.Route{},
expectedSerial: 1,
},
{
name: "Error Shouldn't Break Engine",
inputErr: fmt.Errorf("mocking error"),
networkMap: &mgmtProto.NetworkMap{
Serial: 1,
PeerConfig: nil,
RemotePeersIsEmpty: false,
Routes: nil,
},
expectedLen: 0,
expectedRoutes: []*route.Route{},
expectedSerial: 1,
},
}
for n, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
// test setup
key, err := wgtypes.GeneratePrivateKey()
if err != nil {
t.Fatal(err)
return
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wgIfaceName := fmt.Sprintf("utun%d", 104+n)
wgAddr := fmt.Sprintf("100.66.%d.1/24", n)
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, &EngineConfig{
WgIfaceName: wgIfaceName,
WgAddr: wgAddr,
WgPrivateKey: key,
WgPort: 33100,
}, nbstatus.NewRecorder())
engine.wgInterface, err = iface.NewWGIFace(wgIfaceName, wgAddr, iface.DefaultMTU)
assert.NoError(t, err, "shouldn't return error")
input := struct {
inputSerial uint64
inputRoutes []*route.Route
}{}
mockRouteManager := &routemanager.MockManager{
UpdateRoutesFunc: func(updateSerial uint64, newRoutes []*route.Route) error {
input.inputSerial = updateSerial
input.inputRoutes = newRoutes
return testCase.inputErr
},
}
engine.routeManager = mockRouteManager
defer func() {
exitErr := engine.Stop()
if exitErr != nil {
return
}
}()
err = engine.updateNetworkMap(testCase.networkMap)
assert.NoError(t, err, "shouldn't return error")
assert.Equal(t, testCase.expectedSerial, input.inputSerial, "serial should match")
assert.Len(t, input.inputRoutes, testCase.expectedLen, "routes len should match")
assert.Equal(t, testCase.expectedRoutes, input.inputRoutes, "routes should match")
})
}
}
func TestEngine_MultiplePeers(t *testing.T) {
// log.SetLevel(log.DebugLevel)
dir := t.TempDir()
err := util.CopyFileContents("../testdata/store.json", filepath.Join(dir, "store.json"))
if err != nil {
t.Fatal(err)
}
defer func() {
err = os.Remove(filepath.Join(dir, "store.json")) //nolint
if err != nil {
t.Fatal(err)
return
}
}()
ctx, cancel := context.WithCancel(CtxInitState(context.Background()))
defer cancel()
sport := 10010
sigServer, err := startSignal(sport)
if err != nil {
t.Fatal(err)
return
}
defer sigServer.Stop()
mport := 33081
mgmtServer, err := startManagement(mport, dir)
if err != nil {
t.Fatal(err)
return
}
defer mgmtServer.GracefulStop()
setupKey := "A2C8E62B-38F5-4553-B31E-DD66C696CEBB"
mu := sync.Mutex{}
engines := []*Engine{}
numPeers := 10
wg := sync.WaitGroup{}
wg.Add(numPeers)
// create and start peers
for i := 0; i < numPeers; i++ {
j := i
go func() {
engine, err := createEngine(ctx, cancel, setupKey, j, mport, sport)
if err != nil {
wg.Done()
t.Errorf("unable to create the engine for peer %d with error %v", j, err)
return
}
mu.Lock()
defer mu.Unlock()
err = engine.Start()
if err != nil {
t.Errorf("unable to start engine for peer %d with error %v", j, err)
wg.Done()
return
}
engines = append(engines, engine)
wg.Done()
}()
}
// wait until all have been created and started
wg.Wait()
if len(engines) != numPeers {
t.Fatal("not all peers was started")
}
// check whether all the peer have expected peers connected
expectedConnected := numPeers * (numPeers - 1)
// adjust according to timeouts
timeout := 50 * time.Second
timeoutChan := time.After(timeout)
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
loop:
for {
select {
case <-timeoutChan:
t.Fatalf("waiting for expected connections timeout after %s", timeout.String())
break loop
case <-ticker.C:
totalConnected := 0
for _, engine := range engines {
totalConnected = totalConnected + len(engine.GetConnectedPeers())
}
if totalConnected == expectedConnected {
log.Infof("total connected=%d", totalConnected)
break loop
}
log.Infof("total connected=%d", totalConnected)
}
}
// cleanup test
for n, peerEngine := range engines {
t.Logf("stopping peer with interface %s from multipeer test, loopIndex %d", peerEngine.wgInterface.Name, n)
errStop := peerEngine.mgmClient.Close()
if errStop != nil {
log.Infoln("got error trying to close management clients from engine: ", errStop)
}
errStop = peerEngine.Stop()
if errStop != nil {
log.Infoln("got error trying to close testing peers engine: ", errStop)
}
}
}
func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey string, i int, mport int, sport int) (*Engine, error) {
key, err := wgtypes.GeneratePrivateKey()
if err != nil {
return nil, err
}
mgmtClient, err := mgmt.NewClient(ctx, fmt.Sprintf("localhost:%d", mport), key, false)
if err != nil {
return nil, err
}
signalClient, err := signal.NewClient(ctx, fmt.Sprintf("localhost:%d", sport), key, false)
if err != nil {
return nil, err
}
publicKey, err := mgmtClient.GetServerPublicKey()
if err != nil {
return nil, err
}
info := system.GetInfo(ctx)
resp, err := mgmtClient.Register(*publicKey, setupKey, "", info, nil)
if err != nil {
return nil, err
}
var ifaceName string
if runtime.GOOS == "darwin" {
ifaceName = fmt.Sprintf("utun1%d", i)
} else {
ifaceName = fmt.Sprintf("wt%d", i)
}
wgPort := 33100 + i
conf := &EngineConfig{
WgIfaceName: ifaceName,
WgAddr: resp.PeerConfig.Address,
WgPrivateKey: key,
WgPort: wgPort,
}
return NewEngine(ctx, cancel, signalClient, mgmtClient, conf, nbstatus.NewRecorder()), nil
}
func startSignal(port int) (*grpc.Server, error) {
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
proto.RegisterSignalExchangeServer(s, signalServer.NewServer())
go func() {
if err = s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}()
return s, nil
}
func startManagement(port int, dataDir string) (*grpc.Server, error) {
config := &server.Config{
Stuns: []*server.Host{},
TURNConfig: &server.TURNConfig{},
Signal: &server.Host{
Proto: "http",
URI: "localhost:10000",
},
Datadir: dataDir,
HttpConfig: nil,
}
lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", port))
if err != nil {
return nil, err
}
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
store, err := server.NewStore(config.Datadir)
if err != nil {
log.Fatalf("failed creating a store: %s: %v", config.Datadir, err)
}
peersUpdateManager := server.NewPeersUpdateManager()
accountManager, err := server.BuildManager(store, peersUpdateManager, nil)
if err != nil {
return nil, err
}
turnManager := server.NewTimeBasedAuthSecretsManager(peersUpdateManager, config.TURNConfig)
mgmtServer, err := server.NewServer(config, accountManager, peersUpdateManager, turnManager)
if err != nil {
return nil, err
}
mgmtProto.RegisterManagementServiceServer(s, mgmtServer)
go func() {
if err = s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}()
return s, nil
}

View File

@@ -1,104 +0,0 @@
package internal
import (
"context"
"github.com/google/uuid"
"github.com/netbirdio/netbird/client/ssh"
"github.com/netbirdio/netbird/client/system"
mgm "github.com/netbirdio/netbird/management/client"
mgmProto "github.com/netbirdio/netbird/management/proto"
log "github.com/sirupsen/logrus"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func Login(ctx context.Context, config *Config, setupKey string, jwtToken string) error {
// validate our peer's Wireguard PRIVATE key
myPrivateKey, err := wgtypes.ParseKey(config.PrivateKey)
if err != nil {
log.Errorf("failed parsing Wireguard key %s: [%s]", config.PrivateKey, err.Error())
return err
}
var mgmTlsEnabled bool
if config.ManagementURL.Scheme == "https" {
mgmTlsEnabled = true
}
log.Debugf("connecting to the Management service %s", config.ManagementURL.String())
mgmClient, err := mgm.NewClient(ctx, config.ManagementURL.Host, myPrivateKey, mgmTlsEnabled)
if err != nil {
log.Errorf("failed connecting to the Management service %s %v", config.ManagementURL.String(), err)
return err
}
log.Debugf("connected to the Management service %s", config.ManagementURL.String())
defer func() {
err = mgmClient.Close()
if err != nil {
log.Warnf("failed to close the Management service client %v", err)
}
}()
serverKey, err := mgmClient.GetServerPublicKey()
if err != nil {
log.Errorf("failed while getting Management Service public key: %v", err)
return err
}
pubSSHKey, err := ssh.GeneratePublicKey([]byte(config.SSHKey))
if err != nil {
return err
}
_, err = loginPeer(ctx, *serverKey, mgmClient, setupKey, jwtToken, pubSSHKey)
if err != nil {
log.Errorf("failed logging-in peer on Management Service : %v", err)
return err
}
log.Infof("peer has successfully logged-in to the Management service %s", config.ManagementURL.String())
err = mgmClient.Close()
if err != nil {
log.Errorf("failed to close the Management service client: %v", err)
return err
}
return nil
}
// loginPeer attempts to login to Management Service. If peer wasn't registered, tries the registration flow.
func loginPeer(ctx context.Context, serverPublicKey wgtypes.Key, client *mgm.GrpcClient, setupKey string, jwtToken string, pubSSHKey []byte) (*mgmProto.LoginResponse, error) {
sysInfo := system.GetInfo(ctx)
loginResp, err := client.Login(serverPublicKey, sysInfo, pubSSHKey)
if err != nil {
if s, ok := status.FromError(err); ok && s.Code() == codes.PermissionDenied {
log.Debugf("peer registration required")
return registerPeer(ctx, serverPublicKey, client, setupKey, jwtToken, pubSSHKey)
} else {
return nil, err
}
}
return loginResp, nil
}
// registerPeer checks whether setupKey was provided via cmd line and if not then it prompts user to enter a key.
// Otherwise tries to register with the provided setupKey via command line.
func registerPeer(ctx context.Context, serverPublicKey wgtypes.Key, client *mgm.GrpcClient, setupKey string, jwtToken string, pubSSHKey []byte) (*mgmProto.LoginResponse, error) {
validSetupKey, err := uuid.Parse(setupKey)
if err != nil && jwtToken == "" {
return nil, status.Errorf(codes.InvalidArgument, "invalid setup-key or no sso information provided, err: %v", err)
}
log.Debugf("sending peer registration request to Management Service")
info := system.GetInfo(ctx)
loginResp, err := client.Register(serverPublicKey, validSetupKey.String(), jwtToken, info, pubSSHKey)
if err != nil {
log.Errorf("failed registering peer %v,%s", err, validSetupKey.String())
return nil, err
}
log.Infof("peer has been successfully registered on Management Service")
return loginResp, nil
}

View File

@@ -1,278 +0,0 @@
package internal
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"reflect"
"strings"
"time"
)
// OAuthClient is a OAuth client interface for various idp providers
type OAuthClient interface {
RequestDeviceCode(ctx context.Context) (DeviceAuthInfo, error)
WaitToken(ctx context.Context, info DeviceAuthInfo) (TokenInfo, error)
GetClientID(ctx context.Context) string
}
// HTTPClient http client interface for API calls
type HTTPClient interface {
Do(req *http.Request) (*http.Response, error)
}
// DeviceAuthInfo holds information for the OAuth device login flow
type DeviceAuthInfo struct {
DeviceCode string `json:"device_code"`
UserCode string `json:"user_code"`
VerificationURI string `json:"verification_uri"`
VerificationURIComplete string `json:"verification_uri_complete"`
ExpiresIn int `json:"expires_in"`
Interval int `json:"interval"`
}
// TokenInfo holds information of issued access token
type TokenInfo struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
IDToken string `json:"id_token"`
TokenType string `json:"token_type"`
ExpiresIn int `json:"expires_in"`
}
// HostedGrantType grant type for device flow on Hosted
const (
HostedGrantType = "urn:ietf:params:oauth:grant-type:device_code"
HostedRefreshGrant = "refresh_token"
)
// Hosted client
type Hosted struct {
// Hosted API Audience for validation
Audience string
// Hosted Native application client id
ClientID string
// TokenEndpoint to request access token
TokenEndpoint string
// DeviceAuthEndpoint to request device authorization code
DeviceAuthEndpoint string
HTTPClient HTTPClient
}
// RequestDeviceCodePayload used for request device code payload for auth0
type RequestDeviceCodePayload struct {
Audience string `json:"audience"`
ClientID string `json:"client_id"`
}
// TokenRequestPayload used for requesting the auth0 token
type TokenRequestPayload struct {
GrantType string `json:"grant_type"`
DeviceCode string `json:"device_code,omitempty"`
ClientID string `json:"client_id"`
RefreshToken string `json:"refresh_token,omitempty"`
}
// TokenRequestResponse used for parsing Hosted token's response
type TokenRequestResponse struct {
Error string `json:"error"`
ErrorDescription string `json:"error_description"`
TokenInfo
}
// Claims used when validating the access token
type Claims struct {
Audience interface{} `json:"aud"`
}
// NewHostedDeviceFlow returns an Hosted OAuth client
func NewHostedDeviceFlow(audience string, clientID string, tokenEndpoint string, deviceAuthEndpoint string) *Hosted {
httpTransport := http.DefaultTransport.(*http.Transport).Clone()
httpTransport.MaxIdleConns = 5
httpClient := &http.Client{
Timeout: 10 * time.Second,
Transport: httpTransport,
}
return &Hosted{
Audience: audience,
ClientID: clientID,
TokenEndpoint: tokenEndpoint,
HTTPClient: httpClient,
DeviceAuthEndpoint: deviceAuthEndpoint,
}
}
// GetClientID returns the provider client id
func (h *Hosted) GetClientID(ctx context.Context) string {
return h.ClientID
}
// RequestDeviceCode requests a device code login flow information from Hosted
func (h *Hosted) RequestDeviceCode(ctx context.Context) (DeviceAuthInfo, error) {
form := url.Values{}
form.Add("client_id", h.ClientID)
form.Add("audience", h.Audience)
req, err := http.NewRequest("POST", h.DeviceAuthEndpoint,
strings.NewReader(form.Encode()))
if err != nil {
return DeviceAuthInfo{}, fmt.Errorf("creating request failed with error: %v", err)
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
res, err := h.HTTPClient.Do(req)
if err != nil {
return DeviceAuthInfo{}, fmt.Errorf("doing request failed with error: %v", err)
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return DeviceAuthInfo{}, fmt.Errorf("reading body failed with error: %v", err)
}
if res.StatusCode != 200 {
return DeviceAuthInfo{}, fmt.Errorf("request device code returned status %d error: %s", res.StatusCode, string(body))
}
deviceCode := DeviceAuthInfo{}
err = json.Unmarshal(body, &deviceCode)
if err != nil {
return DeviceAuthInfo{}, fmt.Errorf("unmarshaling response failed with error: %v", err)
}
return deviceCode, err
}
func (h *Hosted) requestToken(info DeviceAuthInfo) (TokenRequestResponse, error) {
form := url.Values{}
form.Add("client_id", h.ClientID)
form.Add("grant_type", HostedGrantType)
form.Add("device_code", info.DeviceCode)
req, err := http.NewRequest("POST", h.TokenEndpoint, strings.NewReader(form.Encode()))
if err != nil {
return TokenRequestResponse{}, fmt.Errorf("failed to create request access token: %v", err)
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
res, err := h.HTTPClient.Do(req)
if err != nil {
return TokenRequestResponse{}, fmt.Errorf("failed to request access token with error: %v", err)
}
defer func() {
err := res.Body.Close()
if err != nil {
return
}
}()
body, err := io.ReadAll(res.Body)
if err != nil {
return TokenRequestResponse{}, fmt.Errorf("failed reading access token response body with error: %v", err)
}
if res.StatusCode > 499 {
return TokenRequestResponse{}, fmt.Errorf("access token response returned code: %s", string(body))
}
tokenResponse := TokenRequestResponse{}
err = json.Unmarshal(body, &tokenResponse)
if err != nil {
return TokenRequestResponse{}, fmt.Errorf("parsing token response failed with error: %v", err)
}
return tokenResponse, nil
}
// WaitToken waits user's login and authorize the app. Once the user's authorize
// it retrieves the access token from Hosted's endpoint and validates it before returning
func (h *Hosted) WaitToken(ctx context.Context, info DeviceAuthInfo) (TokenInfo, error) {
interval := time.Duration(info.Interval) * time.Second
ticker := time.NewTicker(interval)
for {
select {
case <-ctx.Done():
return TokenInfo{}, ctx.Err()
case <-ticker.C:
tokenResponse, err := h.requestToken(info)
if err != nil {
return TokenInfo{}, fmt.Errorf("parsing token response failed with error: %v", err)
}
if tokenResponse.Error != "" {
if tokenResponse.Error == "authorization_pending" {
continue
} else if tokenResponse.Error == "slow_down" {
interval = interval + (3 * time.Second)
ticker.Reset(interval)
continue
}
return TokenInfo{}, fmt.Errorf(tokenResponse.ErrorDescription)
}
err = isValidAccessToken(tokenResponse.AccessToken, h.Audience)
if err != nil {
return TokenInfo{}, fmt.Errorf("validate access token failed with error: %v", err)
}
tokenInfo := TokenInfo{
AccessToken: tokenResponse.AccessToken,
TokenType: tokenResponse.TokenType,
RefreshToken: tokenResponse.RefreshToken,
IDToken: tokenResponse.IDToken,
ExpiresIn: tokenResponse.ExpiresIn,
}
return tokenInfo, err
}
}
}
// isValidAccessToken is a simple validation of the access token
func isValidAccessToken(token string, audience string) error {
if token == "" {
return fmt.Errorf("token received is empty")
}
encodedClaims := strings.Split(token, ".")[1]
claimsString, err := base64.RawURLEncoding.DecodeString(encodedClaims)
if err != nil {
return err
}
claims := Claims{}
err = json.Unmarshal(claimsString, &claims)
if err != nil {
return err
}
if claims.Audience == nil {
return fmt.Errorf("required token field audience is absent")
}
// Audience claim of JWT can be a string or an array of strings
typ := reflect.TypeOf(claims.Audience)
switch typ.Kind() {
case reflect.String:
if claims.Audience == audience {
return nil
}
case reflect.Slice:
for _, aud := range claims.Audience.([]interface{}) {
if audience == aud {
return nil
}
}
}
return fmt.Errorf("invalid JWT token audience field")
}

View File

@@ -1,295 +0,0 @@
package internal
import (
"context"
"fmt"
"github.com/golang-jwt/jwt"
"github.com/stretchr/testify/require"
"io"
"net/http"
"net/url"
"strings"
"testing"
"time"
)
type mockHTTPClient struct {
code int
resBody string
reqBody string
MaxReqs int
count int
countResBody string
err error
}
func (c *mockHTTPClient) Do(req *http.Request) (*http.Response, error) {
body, err := io.ReadAll(req.Body)
if err == nil {
c.reqBody = string(body)
}
if c.MaxReqs > c.count {
c.count++
return &http.Response{
StatusCode: c.code,
Body: io.NopCloser(strings.NewReader(c.countResBody)),
}, c.err
}
return &http.Response{
StatusCode: c.code,
Body: io.NopCloser(strings.NewReader(c.resBody)),
}, c.err
}
func TestHosted_RequestDeviceCode(t *testing.T) {
type test struct {
name string
inputResBody string
inputReqCode int
inputReqError error
testingErrFunc require.ErrorAssertionFunc
expectedErrorMSG string
testingFunc require.ComparisonAssertionFunc
expectedOut DeviceAuthInfo
expectedMSG string
expectPayload string
}
expectedAudience := "ok"
expectedClientID := "bla"
form := url.Values{}
form.Add("audience", expectedAudience)
form.Add("client_id", expectedClientID)
expectPayload := form.Encode()
testCase1 := test{
name: "Payload Is Valid",
expectPayload: expectPayload,
inputReqCode: 200,
testingErrFunc: require.Error,
testingFunc: require.EqualValues,
}
testCase2 := test{
name: "Exit On Network Error",
inputReqError: fmt.Errorf("error"),
testingErrFunc: require.Error,
expectedErrorMSG: "should return error",
testingFunc: require.EqualValues,
expectPayload: expectPayload,
}
testCase3 := test{
name: "Exit On Exit Code",
inputReqCode: 400,
testingErrFunc: require.Error,
expectedErrorMSG: "should return error",
testingFunc: require.EqualValues,
expectPayload: expectPayload,
}
testCase4Out := DeviceAuthInfo{ExpiresIn: 10}
testCase4 := test{
name: "Got Device Code",
inputResBody: fmt.Sprintf("{\"expires_in\":%d}", testCase4Out.ExpiresIn),
expectPayload: expectPayload,
inputReqCode: 200,
testingErrFunc: require.NoError,
testingFunc: require.EqualValues,
expectedOut: testCase4Out,
expectedMSG: "out should match",
}
for _, testCase := range []test{testCase1, testCase2, testCase3, testCase4} {
t.Run(testCase.name, func(t *testing.T) {
httpClient := mockHTTPClient{
resBody: testCase.inputResBody,
code: testCase.inputReqCode,
err: testCase.inputReqError,
}
hosted := Hosted{
Audience: expectedAudience,
ClientID: expectedClientID,
TokenEndpoint: "test.hosted.com/token",
DeviceAuthEndpoint: "test.hosted.com/device/auth",
HTTPClient: &httpClient,
}
authInfo, err := hosted.RequestDeviceCode(context.TODO())
testCase.testingErrFunc(t, err, testCase.expectedErrorMSG)
require.EqualValues(t, expectPayload, httpClient.reqBody, "payload should match")
testCase.testingFunc(t, testCase.expectedOut, authInfo, testCase.expectedMSG)
})
}
}
func TestHosted_WaitToken(t *testing.T) {
type test struct {
name string
inputResBody string
inputReqCode int
inputReqError error
inputMaxReqs int
inputCountResBody string
inputTimeout time.Duration
inputInfo DeviceAuthInfo
inputAudience string
testingErrFunc require.ErrorAssertionFunc
expectedErrorMSG string
testingFunc require.ComparisonAssertionFunc
expectedOut TokenInfo
expectedMSG string
expectPayload string
}
defaultInfo := DeviceAuthInfo{
DeviceCode: "test",
ExpiresIn: 10,
Interval: 1,
}
clientID := "test"
form := url.Values{}
form.Add("grant_type", HostedGrantType)
form.Add("device_code", defaultInfo.DeviceCode)
form.Add("client_id", clientID)
tokenReqPayload := form.Encode()
testCase1 := test{
name: "Payload Is Valid",
inputInfo: defaultInfo,
inputTimeout: time.Duration(defaultInfo.ExpiresIn) * time.Second,
inputReqCode: 200,
testingErrFunc: require.Error,
testingFunc: require.EqualValues,
expectPayload: tokenReqPayload,
}
testCase2 := test{
name: "Exit On Network Error",
inputInfo: defaultInfo,
inputTimeout: time.Duration(defaultInfo.ExpiresIn) * time.Second,
expectPayload: tokenReqPayload,
inputReqError: fmt.Errorf("error"),
testingErrFunc: require.Error,
expectedErrorMSG: "should return error",
testingFunc: require.EqualValues,
}
testCase3 := test{
name: "Exit On 4XX When Not Pending",
inputInfo: defaultInfo,
inputTimeout: time.Duration(defaultInfo.ExpiresIn) * time.Second,
inputReqCode: 400,
expectPayload: tokenReqPayload,
testingErrFunc: require.Error,
expectedErrorMSG: "should return error",
testingFunc: require.EqualValues,
}
testCase4 := test{
name: "Exit On Exit Code 5XX",
inputInfo: defaultInfo,
inputTimeout: time.Duration(defaultInfo.ExpiresIn) * time.Second,
inputReqCode: 500,
expectPayload: tokenReqPayload,
testingErrFunc: require.Error,
expectedErrorMSG: "should return error",
testingFunc: require.EqualValues,
}
testCase5 := test{
name: "Exit On Content Timeout",
inputInfo: defaultInfo,
inputTimeout: 0 * time.Second,
testingErrFunc: require.Error,
expectedErrorMSG: "should return error",
testingFunc: require.EqualValues,
}
audience := "test"
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{"aud": audience})
var hmacSampleSecret []byte
tokenString, _ := token.SignedString(hmacSampleSecret)
testCase6 := test{
name: "Exit On Invalid Audience",
inputInfo: defaultInfo,
inputResBody: fmt.Sprintf("{\"access_token\":\"%s\"}", tokenString),
inputTimeout: time.Duration(defaultInfo.ExpiresIn) * time.Second,
inputReqCode: 200,
inputAudience: "super test",
testingErrFunc: require.Error,
testingFunc: require.EqualValues,
expectPayload: tokenReqPayload,
}
testCase7 := test{
name: "Received Token Info",
inputInfo: defaultInfo,
inputResBody: fmt.Sprintf("{\"access_token\":\"%s\"}", tokenString),
inputTimeout: time.Duration(defaultInfo.ExpiresIn) * time.Second,
inputReqCode: 200,
inputAudience: audience,
testingErrFunc: require.NoError,
testingFunc: require.EqualValues,
expectPayload: tokenReqPayload,
expectedOut: TokenInfo{AccessToken: tokenString},
}
testCase8 := test{
name: "Received Token Info after Multiple tries",
inputInfo: defaultInfo,
inputResBody: fmt.Sprintf("{\"access_token\":\"%s\"}", tokenString),
inputTimeout: time.Duration(defaultInfo.ExpiresIn) * time.Second,
inputMaxReqs: 2,
inputCountResBody: "{\"error\":\"authorization_pending\"}",
inputReqCode: 200,
inputAudience: audience,
testingErrFunc: require.NoError,
testingFunc: require.EqualValues,
expectPayload: tokenReqPayload,
expectedOut: TokenInfo{AccessToken: tokenString},
}
for _, testCase := range []test{testCase1, testCase2, testCase3, testCase4, testCase5, testCase6, testCase7, testCase8} {
t.Run(testCase.name, func(t *testing.T) {
httpClient := mockHTTPClient{
resBody: testCase.inputResBody,
code: testCase.inputReqCode,
err: testCase.inputReqError,
MaxReqs: testCase.inputMaxReqs,
countResBody: testCase.inputCountResBody,
}
hosted := Hosted{
Audience: testCase.inputAudience,
ClientID: clientID,
TokenEndpoint: "test.hosted.com/token",
DeviceAuthEndpoint: "test.hosted.com/device/auth",
HTTPClient: &httpClient,
}
ctx, cancel := context.WithTimeout(context.TODO(), testCase.inputTimeout)
defer cancel()
tokenInfo, err := hosted.WaitToken(ctx, testCase.inputInfo)
testCase.testingErrFunc(t, err, testCase.expectedErrorMSG)
require.EqualValues(t, testCase.expectPayload, httpClient.reqBody, "payload should match")
testCase.testingFunc(t, testCase.expectedOut, tokenInfo, testCase.expectedMSG)
require.GreaterOrEqualf(t, testCase.inputMaxReqs, httpClient.count, "should run %d times", testCase.inputMaxReqs)
})
}
}

View File

@@ -1,593 +0,0 @@
package peer
import (
"context"
nbStatus "github.com/netbirdio/netbird/client/status"
"github.com/netbirdio/netbird/client/system"
"github.com/netbirdio/netbird/iface"
"golang.zx2c4.com/wireguard/wgctrl"
"net"
"strings"
"sync"
"time"
"github.com/netbirdio/netbird/client/internal/proxy"
"github.com/pion/ice/v2"
log "github.com/sirupsen/logrus"
)
// ConnConfig is a peer Connection configuration
type ConnConfig struct {
// Key is a public key of a remote peer
Key string
// LocalKey is a public key of a local peer
LocalKey string
// StunTurn is a list of STUN and TURN URLs
StunTurn []*ice.URL
// InterfaceBlackList is a list of machine interfaces that should be filtered out by ICE Candidate gathering
// (e.g. if eth0 is in the list, host candidate of this interface won't be used)
InterfaceBlackList []string
Timeout time.Duration
ProxyConfig proxy.Config
UDPMux ice.UDPMux
UDPMuxSrflx ice.UniversalUDPMux
LocalWgPort int
}
// OfferAnswer represents a session establishment offer or answer
type OfferAnswer struct {
IceCredentials IceCredentials
// WgListenPort is a remote WireGuard listen port.
// This field is used when establishing a direct WireGuard connection without any proxy.
// We can set the remote peer's endpoint with this port.
WgListenPort int
// Version of NetBird Agent
Version string
}
// IceCredentials ICE protocol credentials struct
type IceCredentials struct {
UFrag string
Pwd string
}
type Conn struct {
config ConnConfig
mu sync.Mutex
// signalCandidate is a handler function to signal remote peer about local connection candidate
signalCandidate func(candidate ice.Candidate) error
// signalOffer is a handler function to signal remote peer our connection offer (credentials)
signalOffer func(OfferAnswer) error
signalAnswer func(OfferAnswer) error
// remoteOffersCh is a channel used to wait for remote credentials to proceed with the connection
remoteOffersCh chan OfferAnswer
// remoteAnswerCh is a channel used to wait for remote credentials answer (confirmation of our offer) to proceed with the connection
remoteAnswerCh chan OfferAnswer
closeCh chan struct{}
ctx context.Context
notifyDisconnected context.CancelFunc
agent *ice.Agent
status ConnStatus
statusRecorder *nbStatus.Status
proxy proxy.Proxy
}
// GetConf returns the connection config
func (conn *Conn) GetConf() ConnConfig {
return conn.config
}
// UpdateConf updates the connection config
func (conn *Conn) UpdateConf(conf ConnConfig) {
conn.config = conf
}
// NewConn creates a new not opened Conn to the remote peer.
// To establish a connection run Conn.Open
func NewConn(config ConnConfig, statusRecorder *nbStatus.Status) (*Conn, error) {
return &Conn{
config: config,
mu: sync.Mutex{},
status: StatusDisconnected,
closeCh: make(chan struct{}),
remoteOffersCh: make(chan OfferAnswer),
remoteAnswerCh: make(chan OfferAnswer),
statusRecorder: statusRecorder,
}, nil
}
// interfaceFilter is a function passed to ICE Agent to filter out not allowed interfaces
// to avoid building tunnel over them
func interfaceFilter(blackList []string) func(string) bool {
return func(iFace string) bool {
for _, s := range blackList {
if strings.HasPrefix(iFace, s) {
log.Debugf("ignoring interface %s - it is not allowed", iFace)
return false
}
}
// look for unlisted WireGuard interfaces
wg, err := wgctrl.New()
if err != nil {
log.Debugf("trying to create a wgctrl client failed with: %v", err)
}
defer func() {
err := wg.Close()
if err != nil {
return
}
}()
_, err = wg.Device(iFace)
return err != nil
}
}
func (conn *Conn) reCreateAgent() error {
conn.mu.Lock()
defer conn.mu.Unlock()
failedTimeout := 6 * time.Second
var err error
conn.agent, err = ice.NewAgent(&ice.AgentConfig{
MulticastDNSMode: ice.MulticastDNSModeDisabled,
NetworkTypes: []ice.NetworkType{ice.NetworkTypeUDP4},
Urls: conn.config.StunTurn,
CandidateTypes: []ice.CandidateType{ice.CandidateTypeServerReflexive, ice.CandidateTypeHost, ice.CandidateTypeRelay},
FailedTimeout: &failedTimeout,
InterfaceFilter: interfaceFilter(conn.config.InterfaceBlackList),
UDPMux: conn.config.UDPMux,
UDPMuxSrflx: conn.config.UDPMuxSrflx,
})
if err != nil {
return err
}
err = conn.agent.OnCandidate(conn.onICECandidate)
if err != nil {
return err
}
err = conn.agent.OnConnectionStateChange(conn.onICEConnectionStateChange)
if err != nil {
return err
}
err = conn.agent.OnSelectedCandidatePairChange(conn.onICESelectedCandidatePair)
if err != nil {
return err
}
return nil
}
// Open opens connection to the remote peer starting ICE candidate gathering process.
// Blocks until connection has been closed or connection timeout.
// ConnStatus will be set accordingly
func (conn *Conn) Open() error {
log.Debugf("trying to connect to peer %s", conn.config.Key)
peerState := nbStatus.PeerState{PubKey: conn.config.Key}
peerState.IP = strings.Split(conn.config.ProxyConfig.AllowedIps, "/")[0]
peerState.ConnStatusUpdate = time.Now()
peerState.ConnStatus = conn.status.String()
err := conn.statusRecorder.UpdatePeerState(peerState)
if err != nil {
log.Warnf("erro while updating the state of peer %s,err: %v", conn.config.Key, err)
}
defer func() {
err := conn.cleanup()
if err != nil {
log.Warnf("error while cleaning up peer connection %s: %v", conn.config.Key, err)
return
}
}()
err = conn.reCreateAgent()
if err != nil {
return err
}
err = conn.sendOffer()
if err != nil {
return err
}
log.Debugf("connection offer sent to peer %s, waiting for the confirmation", conn.config.Key)
// Only continue once we got a connection confirmation from the remote peer.
// The connection timeout could have happened before a confirmation received from the remote.
// The connection could have also been closed externally (e.g. when we received an update from the management that peer shouldn't be connected)
var remoteOfferAnswer OfferAnswer
select {
case remoteOfferAnswer = <-conn.remoteOffersCh:
// received confirmation from the remote peer -> ready to proceed
err = conn.sendAnswer()
if err != nil {
return err
}
case remoteOfferAnswer = <-conn.remoteAnswerCh:
case <-time.After(conn.config.Timeout):
return NewConnectionTimeoutError(conn.config.Key, conn.config.Timeout)
case <-conn.closeCh:
// closed externally
return NewConnectionClosedError(conn.config.Key)
}
log.Debugf("received connection confirmation from peer %s running version %s and with remote WireGuard listen port %d",
conn.config.Key, remoteOfferAnswer.Version, remoteOfferAnswer.WgListenPort)
// at this point we received offer/answer and we are ready to gather candidates
conn.mu.Lock()
conn.status = StatusConnecting
conn.ctx, conn.notifyDisconnected = context.WithCancel(context.Background())
defer conn.notifyDisconnected()
conn.mu.Unlock()
peerState = nbStatus.PeerState{PubKey: conn.config.Key}
peerState.ConnStatus = conn.status.String()
peerState.ConnStatusUpdate = time.Now()
err = conn.statusRecorder.UpdatePeerState(peerState)
if err != nil {
log.Warnf("erro while updating the state of peer %s,err: %v", conn.config.Key, err)
}
err = conn.agent.GatherCandidates()
if err != nil {
return err
}
// will block until connection succeeded
// but it won't release if ICE Agent went into Disconnected or Failed state,
// so we have to cancel it with the provided context once agent detected a broken connection
isControlling := conn.config.LocalKey > conn.config.Key
var remoteConn *ice.Conn
if isControlling {
remoteConn, err = conn.agent.Dial(conn.ctx, remoteOfferAnswer.IceCredentials.UFrag, remoteOfferAnswer.IceCredentials.Pwd)
} else {
remoteConn, err = conn.agent.Accept(conn.ctx, remoteOfferAnswer.IceCredentials.UFrag, remoteOfferAnswer.IceCredentials.Pwd)
}
if err != nil {
return err
}
// dynamically set remote WireGuard port is other side specified a different one from the default one
remoteWgPort := iface.DefaultWgPort
if remoteOfferAnswer.WgListenPort != 0 {
remoteWgPort = remoteOfferAnswer.WgListenPort
}
// the ice connection has been established successfully so we are ready to start the proxy
err = conn.startProxy(remoteConn, remoteWgPort)
if err != nil {
return err
}
log.Infof("connected to peer %s [laddr <-> raddr] [%s <-> %s]", conn.config.Key, remoteConn.LocalAddr().String(), remoteConn.RemoteAddr().String())
// wait until connection disconnected or has been closed externally (upper layer, e.g. engine)
select {
case <-conn.closeCh:
// closed externally
return NewConnectionClosedError(conn.config.Key)
case <-conn.ctx.Done():
// disconnected from the remote peer
return NewConnectionDisconnectedError(conn.config.Key)
}
}
// useProxy determines whether a direct connection (without a go proxy) is possible
// There are 3 cases: one of the peers has a public IP or both peers are in the same private network
// Please note, that this check happens when peers were already able to ping each other using ICE layer.
func shouldUseProxy(pair *ice.CandidatePair) bool {
remoteIP := net.ParseIP(pair.Remote.Address())
myIp := net.ParseIP(pair.Local.Address())
remoteIsPublic := IsPublicIP(remoteIP)
myIsPublic := IsPublicIP(myIp)
if pair.Local.Type() == ice.CandidateTypeRelay || pair.Remote.Type() == ice.CandidateTypeRelay {
return true
}
//one of the hosts has a public IP
if remoteIsPublic && pair.Remote.Type() == ice.CandidateTypeHost {
return false
}
if myIsPublic && pair.Local.Type() == ice.CandidateTypeHost {
return false
}
if pair.Local.Type() == ice.CandidateTypeHost && pair.Remote.Type() == ice.CandidateTypeHost {
if !remoteIsPublic && !myIsPublic {
//both hosts are in the same private network
return false
}
}
return true
}
// IsPublicIP indicates whether IP is public or not.
func IsPublicIP(ip net.IP) bool {
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsPrivate() {
return false
}
return true
}
// startProxy starts proxying traffic from/to local Wireguard and sets connection status to StatusConnected
func (conn *Conn) startProxy(remoteConn net.Conn, remoteWgPort int) error {
conn.mu.Lock()
defer conn.mu.Unlock()
var pair *ice.CandidatePair
pair, err := conn.agent.GetSelectedCandidatePair()
if err != nil {
return err
}
peerState := nbStatus.PeerState{PubKey: conn.config.Key}
var p proxy.Proxy
if pair.Local.Type() == ice.CandidateTypeRelay || pair.Remote.Type() == ice.CandidateTypeRelay {
p = proxy.NewWireguardProxy(conn.config.ProxyConfig)
peerState.Direct = false
} else {
p = proxy.NewNoProxy(conn.config.ProxyConfig, remoteWgPort)
peerState.Direct = true
}
conn.proxy = p
err = p.Start(remoteConn)
if err != nil {
return err
}
conn.status = StatusConnected
peerState.ConnStatus = conn.status.String()
peerState.ConnStatusUpdate = time.Now()
peerState.LocalIceCandidateType = pair.Local.Type().String()
peerState.RemoteIceCandidateType = pair.Remote.Type().String()
if pair.Local.Type() == ice.CandidateTypeRelay || pair.Remote.Type() == ice.CandidateTypeRelay {
peerState.Relayed = true
}
err = conn.statusRecorder.UpdatePeerState(peerState)
if err != nil {
log.Warnf("unable to save peer's state, got error: %v", err)
}
return nil
}
// cleanup closes all open resources and sets status to StatusDisconnected
func (conn *Conn) cleanup() error {
log.Debugf("trying to cleanup %s", conn.config.Key)
conn.mu.Lock()
defer conn.mu.Unlock()
if conn.agent != nil {
err := conn.agent.Close()
if err != nil {
return err
}
conn.agent = nil
}
if conn.proxy != nil {
err := conn.proxy.Close()
if err != nil {
return err
}
conn.proxy = nil
}
if conn.notifyDisconnected != nil {
conn.notifyDisconnected()
conn.notifyDisconnected = nil
}
conn.status = StatusDisconnected
peerState := nbStatus.PeerState{PubKey: conn.config.Key}
peerState.ConnStatus = conn.status.String()
peerState.ConnStatusUpdate = time.Now()
err := conn.statusRecorder.UpdatePeerState(peerState)
if err != nil {
// pretty common error because by that time Engine can already remove the peer and status won't be available.
//todo rethink status updates
log.Debugf("error while updating peer's %s state, err: %v", conn.config.Key, err)
}
log.Debugf("cleaned up connection to peer %s", conn.config.Key)
return nil
}
// SetSignalOffer sets a handler function to be triggered by Conn when a new connection offer has to be signalled to the remote peer
func (conn *Conn) SetSignalOffer(handler func(offer OfferAnswer) error) {
conn.signalOffer = handler
}
// SetSignalAnswer sets a handler function to be triggered by Conn when a new connection answer has to be signalled to the remote peer
func (conn *Conn) SetSignalAnswer(handler func(answer OfferAnswer) error) {
conn.signalAnswer = handler
}
// SetSignalCandidate sets a handler function to be triggered by Conn when a new ICE local connection candidate has to be signalled to the remote peer
func (conn *Conn) SetSignalCandidate(handler func(candidate ice.Candidate) error) {
conn.signalCandidate = handler
}
// onICECandidate is a callback attached to an ICE Agent to receive new local connection candidates
// and then signals them to the remote peer
func (conn *Conn) onICECandidate(candidate ice.Candidate) {
if candidate != nil {
log.Debugf("discovered local candidate %s", candidate.String())
go func() {
err := conn.signalCandidate(candidate)
if err != nil {
log.Errorf("failed signaling candidate to the remote peer %s %s", conn.config.Key, err)
}
}()
}
}
func (conn *Conn) onICESelectedCandidatePair(c1 ice.Candidate, c2 ice.Candidate) {
log.Debugf("selected candidate pair [local <-> remote] -> [%s <-> %s], peer %s", c1.String(), c2.String(),
conn.config.Key)
}
// onICEConnectionStateChange registers callback of an ICE Agent to track connection state
func (conn *Conn) onICEConnectionStateChange(state ice.ConnectionState) {
log.Debugf("peer %s ICE ConnectionState has changed to %s", conn.config.Key, state.String())
if state == ice.ConnectionStateFailed || state == ice.ConnectionStateDisconnected {
conn.notifyDisconnected()
}
}
func (conn *Conn) sendAnswer() error {
conn.mu.Lock()
defer conn.mu.Unlock()
localUFrag, localPwd, err := conn.agent.GetLocalUserCredentials()
if err != nil {
return err
}
log.Debugf("sending answer to %s", conn.config.Key)
err = conn.signalAnswer(OfferAnswer{
IceCredentials: IceCredentials{localUFrag, localPwd},
WgListenPort: conn.config.LocalWgPort,
Version: system.NetbirdVersion(),
})
if err != nil {
return err
}
return nil
}
// sendOffer prepares local user credentials and signals them to the remote peer
func (conn *Conn) sendOffer() error {
conn.mu.Lock()
defer conn.mu.Unlock()
localUFrag, localPwd, err := conn.agent.GetLocalUserCredentials()
if err != nil {
return err
}
err = conn.signalOffer(OfferAnswer{
IceCredentials: IceCredentials{localUFrag, localPwd},
WgListenPort: conn.config.LocalWgPort,
Version: system.NetbirdVersion(),
})
if err != nil {
return err
}
return nil
}
// Close closes this peer Conn issuing a close event to the Conn closeCh
func (conn *Conn) Close() error {
conn.mu.Lock()
defer conn.mu.Unlock()
select {
case conn.closeCh <- struct{}{}:
return nil
default:
// probably could happen when peer has been added and removed right after not even starting to connect
// todo further investigate
// this really happens due to unordered messages coming from management
// more importantly it causes inconsistency -> 2 Conn objects for the same peer
// e.g. this flow:
// update from management has peers: [1,2,3,4]
// engine creates a Conn for peers: [1,2,3,4] and schedules Open in ~1sec
// before conn.Open() another update from management arrives with peers: [1,2,3]
// engine removes peer 4 and calls conn.Close() which does nothing (this default clause)
// before conn.Open() another update from management arrives with peers: [1,2,3,4,5]
// engine adds a new Conn for 4 and 5
// therefore peer 4 has 2 Conn objects
log.Warnf("connection has been already closed or attempted closing not started coonection %s", conn.config.Key)
return NewConnectionAlreadyClosed(conn.config.Key)
}
}
// Status returns current status of the Conn
func (conn *Conn) Status() ConnStatus {
conn.mu.Lock()
defer conn.mu.Unlock()
return conn.status
}
// OnRemoteOffer handles an offer from the remote peer and returns true if the message was accepted, false otherwise
// doesn't block, discards the message if connection wasn't ready
func (conn *Conn) OnRemoteOffer(offer OfferAnswer) bool {
log.Debugf("OnRemoteOffer from peer %s on status %s", conn.config.Key, conn.status.String())
select {
case conn.remoteOffersCh <- offer:
return true
default:
log.Debugf("OnRemoteOffer skipping message from peer %s on status %s because is not ready", conn.config.Key, conn.status.String())
// connection might not be ready yet to receive so we ignore the message
return false
}
}
// OnRemoteAnswer handles an offer from the remote peer and returns true if the message was accepted, false otherwise
// doesn't block, discards the message if connection wasn't ready
func (conn *Conn) OnRemoteAnswer(answer OfferAnswer) bool {
log.Debugf("OnRemoteAnswer from peer %s on status %s", conn.config.Key, conn.status.String())
select {
case conn.remoteAnswerCh <- answer:
return true
default:
// connection might not be ready yet to receive so we ignore the message
log.Debugf("OnRemoteAnswer skipping message from peer %s on status %s because is not ready", conn.config.Key, conn.status.String())
return false
}
}
// OnRemoteCandidate Handles ICE connection Candidate provided by the remote peer.
func (conn *Conn) OnRemoteCandidate(candidate ice.Candidate) {
log.Debugf("OnRemoteCandidate from peer %s -> %s", conn.config.Key, candidate.String())
go func() {
conn.mu.Lock()
defer conn.mu.Unlock()
if conn.agent == nil {
return
}
err := conn.agent.AddRemoteCandidate(candidate)
if err != nil {
log.Errorf("error while handling remote candidate from peer %s", conn.config.Key)
return
}
}()
}
func (conn *Conn) GetKey() string {
return conn.config.Key
}

View File

@@ -1,167 +0,0 @@
package peer
import (
"github.com/magiconair/properties/assert"
"github.com/netbirdio/netbird/client/internal/proxy"
nbstatus "github.com/netbirdio/netbird/client/status"
"github.com/netbirdio/netbird/iface"
"github.com/pion/ice/v2"
"sync"
"testing"
"time"
)
var connConf = ConnConfig{
Key: "LLHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=",
LocalKey: "RRHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=",
StunTurn: []*ice.URL{},
InterfaceBlackList: nil,
Timeout: time.Second,
ProxyConfig: proxy.Config{},
LocalWgPort: 51820,
}
func TestNewConn_interfaceFilter(t *testing.T) {
ignore := []string{iface.WgInterfaceDefault, "tun0", "zt", "ZeroTier", "utun", "wg", "ts",
"Tailscale", "tailscale"}
filter := interfaceFilter(ignore)
for _, s := range ignore {
assert.Equal(t, filter(s), false)
}
}
func TestConn_GetKey(t *testing.T) {
conn, err := NewConn(connConf, nil)
if err != nil {
return
}
got := conn.GetKey()
assert.Equal(t, got, connConf.Key, "they should be equal")
}
func TestConn_OnRemoteOffer(t *testing.T) {
conn, err := NewConn(connConf, nbstatus.NewRecorder())
if err != nil {
return
}
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
<-conn.remoteOffersCh
wg.Done()
}()
go func() {
for {
accepted := conn.OnRemoteOffer(OfferAnswer{
IceCredentials: IceCredentials{
UFrag: "test",
Pwd: "test",
},
WgListenPort: 0,
Version: "",
})
if accepted {
wg.Done()
return
}
}
}()
wg.Wait()
}
func TestConn_OnRemoteAnswer(t *testing.T) {
conn, err := NewConn(connConf, nbstatus.NewRecorder())
if err != nil {
return
}
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
<-conn.remoteAnswerCh
wg.Done()
}()
go func() {
for {
accepted := conn.OnRemoteAnswer(OfferAnswer{
IceCredentials: IceCredentials{
UFrag: "test",
Pwd: "test",
},
WgListenPort: 0,
Version: "",
})
if accepted {
wg.Done()
return
}
}
}()
wg.Wait()
}
func TestConn_Status(t *testing.T) {
conn, err := NewConn(connConf, nbstatus.NewRecorder())
if err != nil {
return
}
tables := []struct {
name string
status ConnStatus
want ConnStatus
}{
{"StatusConnected", StatusConnected, StatusConnected},
{"StatusDisconnected", StatusDisconnected, StatusDisconnected},
{"StatusConnecting", StatusConnecting, StatusConnecting},
}
for _, table := range tables {
t.Run(table.name, func(t *testing.T) {
conn.status = table.status
got := conn.Status()
assert.Equal(t, got, table.want, "they should be equal")
})
}
}
func TestConn_Close(t *testing.T) {
conn, err := NewConn(connConf, nbstatus.NewRecorder())
if err != nil {
return
}
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
<-conn.closeCh
wg.Done()
}()
go func() {
for {
err := conn.Close()
if err != nil {
continue
} else {
return
}
}
}()
wg.Wait()
}

View File

@@ -1,72 +0,0 @@
package peer
import (
"fmt"
"time"
)
// ConnectionTimeoutError is an error indicating that a peer Conn has been timed out
type ConnectionTimeoutError struct {
peer string
timeout time.Duration
}
func (e *ConnectionTimeoutError) Error() string {
return fmt.Sprintf("connection to peer %s timed out after %s", e.peer, e.timeout.String())
}
// NewConnectionTimeoutError creates a new ConnectionTimeoutError error
func NewConnectionTimeoutError(peer string, timeout time.Duration) error {
return &ConnectionTimeoutError{
peer: peer,
timeout: timeout,
}
}
// ConnectionClosedError is an error indicating that a peer Conn has been forcefully closed
type ConnectionClosedError struct {
peer string
}
func (e *ConnectionClosedError) Error() string {
return fmt.Sprintf("connection to peer %s has been closed", e.peer)
}
// NewConnectionClosedError creates a new ConnectionClosedError error
func NewConnectionClosedError(peer string) error {
return &ConnectionClosedError{
peer: peer,
}
}
// ConnectionDisconnectedError is an error indicating that a peer Conn has ctx from the remote
type ConnectionDisconnectedError struct {
peer string
}
func (e *ConnectionDisconnectedError) Error() string {
return fmt.Sprintf("disconnected from peer %s", e.peer)
}
// NewConnectionDisconnectedError creates a new ConnectionDisconnectedError error
func NewConnectionDisconnectedError(peer string) error {
return &ConnectionDisconnectedError{
peer: peer,
}
}
// ConnectionAlreadyClosedError is an error indicating that a peer Conn has been already closed and the invocation of the Close() method has been performed over a closed connection
type ConnectionAlreadyClosedError struct {
peer string
}
func (e *ConnectionAlreadyClosedError) Error() string {
return fmt.Sprintf("connection to peer %s has been already closed", e.peer)
}
// NewConnectionAlreadyClosed creates a new ConnectionAlreadyClosedError error
func NewConnectionAlreadyClosed(peer string) error {
return &ConnectionAlreadyClosedError{
peer: peer,
}
}

View File

@@ -1,27 +0,0 @@
package peer
import (
"github.com/stretchr/testify/assert"
"testing"
"time"
)
func TestNewConnectionClosedError(t *testing.T) {
err := NewConnectionClosedError("X")
assert.Equal(t, &ConnectionClosedError{peer: "X"}, err)
}
func TestNewConnectionDisconnectedError(t *testing.T) {
err := NewConnectionDisconnectedError("X")
assert.Equal(t, &ConnectionDisconnectedError{peer: "X"}, err)
}
func TestNewConnectionTimeoutErrorC(t *testing.T) {
err := NewConnectionTimeoutError("X", time.Second)
assert.Equal(t, &ConnectionTimeoutError{peer: "X", timeout: time.Second}, err)
}
func TestNewConnectionAlreadyClosed(t *testing.T) {
err := NewConnectionAlreadyClosed("X")
assert.Equal(t, &ConnectionAlreadyClosedError{peer: "X"}, err)
}

View File

@@ -1,25 +0,0 @@
package peer
import log "github.com/sirupsen/logrus"
type ConnStatus int
func (s ConnStatus) String() string {
switch s {
case StatusConnecting:
return "Connecting"
case StatusConnected:
return "Connected"
case StatusDisconnected:
return "Disconnected"
default:
log.Errorf("unknown status: %d", s)
return "INVALID_PEER_CONNECTION_STATUS"
}
}
const (
StatusConnected ConnStatus = iota
StatusConnecting
StatusDisconnected
)

View File

@@ -1,27 +0,0 @@
package peer
import (
"github.com/magiconair/properties/assert"
"testing"
)
func TestConnStatus_String(t *testing.T) {
tables := []struct {
name string
status ConnStatus
want string
}{
{"StatusConnected", StatusConnected, "Connected"},
{"StatusDisconnected", StatusDisconnected, "Disconnected"},
{"StatusConnecting", StatusConnecting, "Connecting"},
}
for _, table := range tables {
t.Run(table.name, func(t *testing.T) {
got := table.status.String()
assert.Equal(t, got, table.want, "they should be equal")
})
}
}

View File

@@ -1,72 +0,0 @@
package proxy
import (
"context"
log "github.com/sirupsen/logrus"
"net"
"time"
)
// DummyProxy just sends pings to the RemoteKey peer and reads responses
type DummyProxy struct {
conn net.Conn
remote string
ctx context.Context
cancel context.CancelFunc
}
func NewDummyProxy(remote string) *DummyProxy {
p := &DummyProxy{remote: remote}
p.ctx, p.cancel = context.WithCancel(context.Background())
return p
}
func (p *DummyProxy) Close() error {
p.cancel()
return nil
}
func (p *DummyProxy) Start(remoteConn net.Conn) error {
p.conn = remoteConn
go func() {
buf := make([]byte, 1500)
for {
select {
case <-p.ctx.Done():
return
default:
_, err := p.conn.Read(buf)
if err != nil {
log.Errorf("error while reading RemoteKey %s proxy %v", p.remote, err)
return
}
//log.Debugf("received %s from %s", string(buf[:n]), p.remote)
}
}
}()
go func() {
for {
select {
case <-p.ctx.Done():
return
default:
_, err := p.conn.Write([]byte("hello"))
//log.Debugf("sent ping to %s", p.remote)
if err != nil {
log.Errorf("error while writing to RemoteKey %s proxy %v", p.remote, err)
return
}
time.Sleep(5 * time.Second)
}
}
}()
return nil
}
func (p *DummyProxy) Type() Type {
return TypeDummy
}

View File

@@ -1,54 +0,0 @@
package proxy
import (
log "github.com/sirupsen/logrus"
"net"
)
// NoProxy is used when there is no need for a proxy between ICE and Wireguard.
// This is possible in either of these cases:
// - peers are in the same local network
// - one of the peers has a public static IP (host)
// NoProxy will just update remote peer with a remote host and fixed Wireguard port (r.g. 51820).
// In order NoProxy to work, Wireguard port has to be fixed for the time being.
type NoProxy struct {
config Config
// RemoteWgListenPort is a WireGuard port of a remote peer.
// It is used instead of the hardcoded 51820 port.
RemoteWgListenPort int
}
// NewNoProxy creates a new NoProxy with a provided config and remote peer's WireGuard listen port
func NewNoProxy(config Config, remoteWgPort int) *NoProxy {
return &NoProxy{config: config, RemoteWgListenPort: remoteWgPort}
}
func (p *NoProxy) Close() error {
err := p.config.WgInterface.RemovePeer(p.config.RemoteKey)
if err != nil {
return err
}
return nil
}
// Start just updates Wireguard peer with the remote IP and default Wireguard port
func (p *NoProxy) Start(remoteConn net.Conn) error {
log.Debugf("using NoProxy while connecting to peer %s", p.config.RemoteKey)
addr, err := net.ResolveUDPAddr("udp", remoteConn.RemoteAddr().String())
if err != nil {
return err
}
err = p.config.WgInterface.UpdatePeer(p.config.RemoteKey, p.config.AllowedIps, DefaultWgKeepAlive,
addr, p.config.PreSharedKey)
if err != nil {
return err
}
return nil
}
func (p *NoProxy) Type() Type {
return TypeNoProxy
}

View File

@@ -1,34 +0,0 @@
package proxy
import (
"github.com/netbirdio/netbird/iface"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"io"
"net"
"time"
)
const DefaultWgKeepAlive = 25 * time.Second
type Type string
const (
TypeNoProxy Type = "NoProxy"
TypeWireguard Type = "Wireguard"
TypeDummy Type = "Dummy"
)
type Config struct {
WgListenAddr string
RemoteKey string
WgInterface *iface.WGIface
AllowedIps string
PreSharedKey *wgtypes.Key
}
type Proxy interface {
io.Closer
// Start creates a local remoteConn and starts proxying data from/to remoteConn
Start(remoteConn net.Conn) error
Type() Type
}

View File

@@ -1,128 +0,0 @@
package proxy
import (
"context"
log "github.com/sirupsen/logrus"
"net"
)
// WireguardProxy proxies
type WireguardProxy struct {
ctx context.Context
cancel context.CancelFunc
config Config
remoteConn net.Conn
localConn net.Conn
}
func NewWireguardProxy(config Config) *WireguardProxy {
p := &WireguardProxy{config: config}
p.ctx, p.cancel = context.WithCancel(context.Background())
return p
}
func (p *WireguardProxy) updateEndpoint() error {
udpAddr, err := net.ResolveUDPAddr(p.localConn.LocalAddr().Network(), p.localConn.LocalAddr().String())
if err != nil {
return err
}
// add local proxy connection as a Wireguard peer
err = p.config.WgInterface.UpdatePeer(p.config.RemoteKey, p.config.AllowedIps, DefaultWgKeepAlive,
udpAddr, p.config.PreSharedKey)
if err != nil {
return err
}
return nil
}
func (p *WireguardProxy) Start(remoteConn net.Conn) error {
p.remoteConn = remoteConn
var err error
p.localConn, err = net.Dial("udp", p.config.WgListenAddr)
if err != nil {
log.Errorf("failed dialing to local Wireguard port %s", err)
return err
}
err = p.updateEndpoint()
if err != nil {
log.Errorf("error while updating Wireguard peer endpoint [%s] %v", p.config.RemoteKey, err)
return err
}
go p.proxyToRemote()
go p.proxyToLocal()
return nil
}
func (p *WireguardProxy) Close() error {
p.cancel()
if c := p.localConn; c != nil {
err := p.localConn.Close()
if err != nil {
return err
}
}
err := p.config.WgInterface.RemovePeer(p.config.RemoteKey)
if err != nil {
return err
}
return nil
}
// proxyToRemote proxies everything from Wireguard to the RemoteKey peer
// blocks
func (p *WireguardProxy) proxyToRemote() {
buf := make([]byte, 1500)
for {
select {
case <-p.ctx.Done():
log.Debugf("stopped proxying to remote peer %s due to closed connection", p.config.RemoteKey)
return
default:
n, err := p.localConn.Read(buf)
if err != nil {
continue
}
_, err = p.remoteConn.Write(buf[:n])
if err != nil {
continue
}
}
}
}
// proxyToLocal proxies everything from the RemoteKey peer to local Wireguard
// blocks
func (p *WireguardProxy) proxyToLocal() {
buf := make([]byte, 1500)
for {
select {
case <-p.ctx.Done():
log.Debugf("stopped proxying from remote peer %s due to closed connection", p.config.RemoteKey)
return
default:
n, err := p.remoteConn.Read(buf)
if err != nil {
continue
}
_, err = p.localConn.Write(buf[:n])
if err != nil {
continue
}
}
}
}
func (p *WireguardProxy) Type() Type {
return TypeWireguard
}

View File

@@ -1,285 +0,0 @@
package routemanager
import (
"context"
"fmt"
"github.com/netbirdio/netbird/client/internal/peer"
"github.com/netbirdio/netbird/client/status"
"github.com/netbirdio/netbird/iface"
"github.com/netbirdio/netbird/route"
log "github.com/sirupsen/logrus"
"net/netip"
)
type routerPeerStatus struct {
connected bool
relayed bool
direct bool
}
type routesUpdate struct {
updateSerial uint64
routes []*route.Route
}
type clientNetwork struct {
ctx context.Context
stop context.CancelFunc
statusRecorder *status.Status
wgInterface *iface.WGIface
routes map[string]*route.Route
routeUpdate chan routesUpdate
peerStateUpdate chan struct{}
routePeersNotifiers map[string]chan struct{}
chosenRoute *route.Route
network netip.Prefix
updateSerial uint64
}
func newClientNetworkWatcher(ctx context.Context, wgInterface *iface.WGIface, statusRecorder *status.Status, network netip.Prefix) *clientNetwork {
ctx, cancel := context.WithCancel(ctx)
client := &clientNetwork{
ctx: ctx,
stop: cancel,
statusRecorder: statusRecorder,
wgInterface: wgInterface,
routes: make(map[string]*route.Route),
routePeersNotifiers: make(map[string]chan struct{}),
routeUpdate: make(chan routesUpdate),
peerStateUpdate: make(chan struct{}),
network: network,
}
return client
}
func getClientNetworkID(input *route.Route) string {
return input.NetID + "-" + input.Network.String()
}
func (c *clientNetwork) getRouterPeerStatuses() map[string]routerPeerStatus {
routePeerStatuses := make(map[string]routerPeerStatus)
for _, r := range c.routes {
peerStatus, err := c.statusRecorder.GetPeer(r.Peer)
if err != nil {
log.Debugf("couldn't fetch peer state: %v", err)
continue
}
routePeerStatuses[r.ID] = routerPeerStatus{
connected: peerStatus.ConnStatus == peer.StatusConnected.String(),
relayed: peerStatus.Relayed,
direct: peerStatus.Direct,
}
}
return routePeerStatuses
}
func (c *clientNetwork) getBestRouteFromStatuses(routePeerStatuses map[string]routerPeerStatus) string {
var chosen string
chosenScore := 0
currID := ""
if c.chosenRoute != nil {
currID = c.chosenRoute.ID
}
for _, r := range c.routes {
tempScore := 0
peerStatus, found := routePeerStatuses[r.ID]
if !found || !peerStatus.connected {
continue
}
if r.Metric < route.MaxMetric {
metricDiff := route.MaxMetric - r.Metric
tempScore = metricDiff * 10
}
if !peerStatus.relayed {
tempScore++
}
if !peerStatus.direct {
tempScore++
}
if tempScore > chosenScore || (tempScore == chosenScore && currID == r.ID) {
chosen = r.ID
chosenScore = tempScore
}
}
if chosen == "" {
var peers []string
for _, r := range c.routes {
peers = append(peers, r.Peer)
}
log.Warnf("no route was chosen for network %s because no peers from list %s were connected", c.network, peers)
} else if chosen != currID {
log.Infof("new chosen route is %s with peer %s with score %d", chosen, c.routes[chosen].Peer, chosenScore)
}
return chosen
}
func (c *clientNetwork) watchPeerStatusChanges(ctx context.Context, peerKey string, peerStateUpdate chan struct{}, closer chan struct{}) {
for {
select {
case <-ctx.Done():
return
case <-closer:
return
case <-c.statusRecorder.GetPeerStateChangeNotifier(peerKey):
state, err := c.statusRecorder.GetPeer(peerKey)
if err != nil || state.ConnStatus == peer.StatusConnecting.String() {
continue
}
peerStateUpdate <- struct{}{}
log.Debugf("triggered route state update for Peer %s, state: %s", peerKey, state.ConnStatus)
}
}
}
func (c *clientNetwork) startPeersStatusChangeWatcher() {
for _, r := range c.routes {
_, found := c.routePeersNotifiers[r.Peer]
if !found {
c.routePeersNotifiers[r.Peer] = make(chan struct{})
go c.watchPeerStatusChanges(c.ctx, r.Peer, c.peerStateUpdate, c.routePeersNotifiers[r.Peer])
}
}
}
func (c *clientNetwork) removeRouteFromWireguardPeer(peerKey string) error {
state, err := c.statusRecorder.GetPeer(peerKey)
if err != nil || state.ConnStatus != peer.StatusConnected.String() {
return nil
}
err = c.wgInterface.RemoveAllowedIP(peerKey, c.network.String())
if err != nil {
return fmt.Errorf("couldn't remove allowed IP %s removed for peer %s, err: %v",
c.network, c.chosenRoute.Peer, err)
}
return nil
}
func (c *clientNetwork) removeRouteFromPeerAndSystem() error {
if c.chosenRoute != nil {
err := c.removeRouteFromWireguardPeer(c.chosenRoute.Peer)
if err != nil {
return err
}
err = removeFromRouteTableIfNonSystem(c.network, c.wgInterface.GetAddress().IP.String())
if err != nil {
return fmt.Errorf("couldn't remove route %s from system, err: %v",
c.network, err)
}
}
return nil
}
func (c *clientNetwork) recalculateRouteAndUpdatePeerAndSystem() error {
var err error
routerPeerStatuses := c.getRouterPeerStatuses()
chosen := c.getBestRouteFromStatuses(routerPeerStatuses)
if chosen == "" {
err = c.removeRouteFromPeerAndSystem()
if err != nil {
return err
}
c.chosenRoute = nil
return nil
}
if c.chosenRoute != nil && c.chosenRoute.ID == chosen {
if c.chosenRoute.IsEqual(c.routes[chosen]) {
return nil
}
}
if c.chosenRoute != nil {
err = c.removeRouteFromWireguardPeer(c.chosenRoute.Peer)
if err != nil {
return err
}
} else {
err = addToRouteTableIfNoExists(c.network, c.wgInterface.GetAddress().IP.String())
if err != nil {
return fmt.Errorf("route %s couldn't be added for peer %s, err: %v",
c.chosenRoute.Network.String(), c.wgInterface.GetAddress().IP.String(), err)
}
}
c.chosenRoute = c.routes[chosen]
err = c.wgInterface.AddAllowedIP(c.chosenRoute.Peer, c.network.String())
if err != nil {
log.Errorf("couldn't add allowed IP %s added for peer %s, err: %v",
c.network, c.chosenRoute.Peer, err)
}
return nil
}
func (c *clientNetwork) sendUpdateToClientNetworkWatcher(update routesUpdate) {
go func() {
c.routeUpdate <- update
}()
}
func (c *clientNetwork) handleUpdate(update routesUpdate) {
updateMap := make(map[string]*route.Route)
for _, r := range update.routes {
updateMap[r.ID] = r
}
for id, r := range c.routes {
_, found := updateMap[id]
if !found {
close(c.routePeersNotifiers[r.Peer])
delete(c.routePeersNotifiers, r.Peer)
}
}
c.routes = updateMap
}
// peersStateAndUpdateWatcher is the main point of reacting on client network routing events.
// All the processing related to the client network should be done here. Thread-safe.
func (c *clientNetwork) peersStateAndUpdateWatcher() {
for {
select {
case <-c.ctx.Done():
log.Debugf("stopping watcher for network %s", c.network)
err := c.removeRouteFromPeerAndSystem()
if err != nil {
log.Error(err)
}
return
case <-c.peerStateUpdate:
err := c.recalculateRouteAndUpdatePeerAndSystem()
if err != nil {
log.Error(err)
}
case update := <-c.routeUpdate:
if update.updateSerial < c.updateSerial {
log.Warnf("received a routes update with smaller serial number, ignoring it")
continue
}
log.Debugf("received a new client network route update for %s", c.network)
c.handleUpdate(update)
c.updateSerial = update.updateSerial
err := c.recalculateRouteAndUpdatePeerAndSystem()
if err != nil {
log.Error(err)
}
c.startPeersStatusChangeWatcher()
}
}
}

View File

@@ -1,75 +0,0 @@
package routemanager
var insertRuleTestCases = []struct {
name string
inputPair routerPair
ipVersion string
}{
{
name: "Insert Forwarding IPV4 Rule",
inputPair: routerPair{
ID: "zxa",
source: "100.100.100.1/32",
destination: "100.100.200.0/24",
masquerade: false,
},
ipVersion: ipv4,
},
{
name: "Insert Forwarding And Nat IPV4 Rules",
inputPair: routerPair{
ID: "zxa",
source: "100.100.100.1/32",
destination: "100.100.200.0/24",
masquerade: true,
},
ipVersion: ipv4,
},
{
name: "Insert Forwarding IPV6 Rule",
inputPair: routerPair{
ID: "zxa",
source: "fc00::1/128",
destination: "fc12::/64",
masquerade: false,
},
ipVersion: ipv6,
},
{
name: "Insert Forwarding And Nat IPV6 Rules",
inputPair: routerPair{
ID: "zxa",
source: "fc00::1/128",
destination: "fc12::/64",
masquerade: true,
},
ipVersion: ipv6,
},
}
var removeRuleTestCases = []struct {
name string
inputPair routerPair
ipVersion string
}{
{
name: "Remove Forwarding And Nat IPV4 Rules",
inputPair: routerPair{
ID: "zxa",
source: "100.100.100.1/32",
destination: "100.100.200.0/24",
masquerade: true,
},
ipVersion: ipv4,
},
{
name: "Remove Forwarding And Nat IPV6 Rules",
inputPair: routerPair{
ID: "zxa",
source: "fc00::1/128",
destination: "fc12::/64",
masquerade: true,
},
ipVersion: ipv6,
},
}

View File

@@ -1,12 +0,0 @@
package routemanager
type firewallManager interface {
// RestoreOrCreateContainers restores or creates a firewall container set of rules, tables and default rules
RestoreOrCreateContainers() error
// InsertRoutingRules inserts a routing firewall rule
InsertRoutingRules(pair routerPair) error
// RemoveRoutingRules removes a routing firewall rule
RemoveRoutingRules(pair routerPair) error
// CleanRoutingRules cleans a firewall set of containers
CleanRoutingRules()
}

View File

@@ -1,55 +0,0 @@
package routemanager
import (
"context"
"fmt"
"github.com/coreos/go-iptables/iptables"
log "github.com/sirupsen/logrus"
)
import "github.com/google/nftables"
const (
ipv6Forwarding = "netbird-rt-ipv6-forwarding"
ipv4Forwarding = "netbird-rt-ipv4-forwarding"
ipv6Nat = "netbird-rt-ipv6-nat"
ipv4Nat = "netbird-rt-ipv4-nat"
natFormat = "netbird-nat-%s"
forwardingFormat = "netbird-fwd-%s"
ipv6 = "ipv6"
ipv4 = "ipv4"
)
func genKey(format string, input string) string {
return fmt.Sprintf(format, input)
}
// NewFirewall if supported, returns an iptables manager, otherwise returns a nftables manager
func NewFirewall(parentCTX context.Context) firewallManager {
ctx, cancel := context.WithCancel(parentCTX)
if isIptablesSupported() {
log.Debugf("iptables is supported")
ipv4Client, _ := iptables.NewWithProtocol(iptables.ProtocolIPv4)
ipv6Client, _ := iptables.NewWithProtocol(iptables.ProtocolIPv6)
return &iptablesManager{
ctx: ctx,
stop: cancel,
ipv4Client: ipv4Client,
ipv6Client: ipv6Client,
rules: make(map[string]map[string][]string),
}
}
log.Debugf("iptables is not supported, using nftables")
manager := &nftablesManager{
ctx: ctx,
stop: cancel,
conn: &nftables.Conn{},
chains: make(map[string]map[string]*nftables.Chain),
rules: make(map[string]*nftables.Rule),
}
return manager
}

View File

@@ -1,27 +0,0 @@
//go:build !linux
// +build !linux
package routemanager
import "context"
type unimplementedFirewall struct{}
func (unimplementedFirewall) RestoreOrCreateContainers() error {
return nil
}
func (unimplementedFirewall) InsertRoutingRules(pair routerPair) error {
return nil
}
func (unimplementedFirewall) RemoveRoutingRules(pair routerPair) error {
return nil
}
func (unimplementedFirewall) CleanRoutingRules() {
return
}
// NewFirewall returns an unimplemented Firewall manager
func NewFirewall(parentCtx context.Context) firewallManager {
return unimplementedFirewall{}
}

View File

@@ -1,403 +0,0 @@
package routemanager
import (
"context"
"fmt"
"github.com/coreos/go-iptables/iptables"
log "github.com/sirupsen/logrus"
"net/netip"
"os/exec"
"strings"
"sync"
)
func isIptablesSupported() bool {
_, err4 := exec.LookPath("iptables")
_, err6 := exec.LookPath("ip6tables")
return err4 == nil && err6 == nil
}
// constants needed to manage and create iptable rules
const (
iptablesFilterTable = "filter"
iptablesNatTable = "nat"
iptablesForwardChain = "FORWARD"
iptablesPostRoutingChain = "POSTROUTING"
iptablesRoutingNatChain = "NETBIRD-RT-NAT"
iptablesRoutingForwardingChain = "NETBIRD-RT-FWD"
routingFinalForwardJump = "ACCEPT"
routingFinalNatJump = "MASQUERADE"
)
// some presets for building nftable rules
var (
iptablesDefaultForwardingRule = []string{"-j", iptablesRoutingForwardingChain, "-m", "comment", "--comment"}
iptablesDefaultNetbirdForwardingRule = []string{"-j", "RETURN"}
iptablesDefaultNatRule = []string{"-j", iptablesRoutingNatChain, "-m", "comment", "--comment"}
iptablesDefaultNetbirdNatRule = []string{"-j", "RETURN"}
)
type iptablesManager struct {
ctx context.Context
stop context.CancelFunc
ipv4Client *iptables.IPTables
ipv6Client *iptables.IPTables
rules map[string]map[string][]string
mux sync.Mutex
}
// CleanRoutingRules cleans existing iptables resources that we created by the agent
func (i *iptablesManager) CleanRoutingRules() {
i.mux.Lock()
defer i.mux.Unlock()
err := i.cleanJumpRules()
if err != nil {
log.Error(err)
}
log.Debug("flushing tables")
errMSGFormat := "iptables: failed cleaning %s chain %s,error: %v"
err = i.ipv4Client.ClearAndDeleteChain(iptablesFilterTable, iptablesRoutingForwardingChain)
if err != nil {
log.Errorf(errMSGFormat, ipv4, iptablesRoutingForwardingChain, err)
}
err = i.ipv4Client.ClearAndDeleteChain(iptablesNatTable, iptablesRoutingNatChain)
if err != nil {
log.Errorf(errMSGFormat, ipv4, iptablesRoutingNatChain, err)
}
err = i.ipv6Client.ClearAndDeleteChain(iptablesFilterTable, iptablesRoutingForwardingChain)
if err != nil {
log.Errorf(errMSGFormat, ipv6, iptablesRoutingForwardingChain, err)
}
err = i.ipv6Client.ClearAndDeleteChain(iptablesNatTable, iptablesRoutingNatChain)
if err != nil {
log.Errorf(errMSGFormat, ipv6, iptablesRoutingNatChain, err)
}
log.Info("done cleaning up iptables rules")
}
// RestoreOrCreateContainers restores existing iptables containers (chains and rules)
// if they don't exist, we create them
func (i *iptablesManager) RestoreOrCreateContainers() error {
i.mux.Lock()
defer i.mux.Unlock()
if i.rules[ipv4][ipv4Forwarding] != nil && i.rules[ipv6][ipv6Forwarding] != nil {
return nil
}
errMSGFormat := "iptables: failed creating %s chain %s,error: %v"
err := createChain(i.ipv4Client, iptablesFilterTable, iptablesRoutingForwardingChain)
if err != nil {
return fmt.Errorf(errMSGFormat, ipv4, iptablesRoutingForwardingChain, err)
}
err = createChain(i.ipv4Client, iptablesNatTable, iptablesRoutingNatChain)
if err != nil {
return fmt.Errorf(errMSGFormat, ipv4, iptablesRoutingNatChain, err)
}
err = createChain(i.ipv6Client, iptablesFilterTable, iptablesRoutingForwardingChain)
if err != nil {
return fmt.Errorf(errMSGFormat, ipv6, iptablesRoutingForwardingChain, err)
}
err = createChain(i.ipv6Client, iptablesNatTable, iptablesRoutingNatChain)
if err != nil {
return fmt.Errorf(errMSGFormat, ipv6, iptablesRoutingNatChain, err)
}
err = i.restoreRules(i.ipv4Client)
if err != nil {
return fmt.Errorf("iptables: error while restoring ipv4 rules: %v", err)
}
err = i.restoreRules(i.ipv6Client)
if err != nil {
return fmt.Errorf("iptables: error while restoring ipv6 rules: %v", err)
}
err = i.addJumpRules()
if err != nil {
return fmt.Errorf("iptables: error while creating jump rules: %v", err)
}
return nil
}
// addJumpRules create jump rules to send packets to NetBird chains
func (i *iptablesManager) addJumpRules() error {
err := i.cleanJumpRules()
if err != nil {
return err
}
rule := append(iptablesDefaultForwardingRule, ipv4Forwarding)
err = i.ipv4Client.Insert(iptablesFilterTable, iptablesForwardChain, 1, rule...)
if err != nil {
return err
}
i.rules[ipv4][ipv4Forwarding] = rule
rule = append(iptablesDefaultNatRule, ipv4Nat)
err = i.ipv4Client.Insert(iptablesNatTable, iptablesPostRoutingChain, 1, rule...)
if err != nil {
return err
}
i.rules[ipv4][ipv4Nat] = rule
rule = append(iptablesDefaultForwardingRule, ipv6Forwarding)
err = i.ipv6Client.Insert(iptablesFilterTable, iptablesForwardChain, 1, rule...)
if err != nil {
return err
}
i.rules[ipv6][ipv6Forwarding] = rule
rule = append(iptablesDefaultNatRule, ipv6Nat)
err = i.ipv6Client.Insert(iptablesNatTable, iptablesPostRoutingChain, 1, rule...)
if err != nil {
return err
}
i.rules[ipv6][ipv6Nat] = rule
return nil
}
// cleanJumpRules cleans jump rules that was sending packets to NetBird chains
func (i *iptablesManager) cleanJumpRules() error {
var err error
errMSGFormat := "iptables: failed cleaning rule from %s chain %s,err: %v"
rule, found := i.rules[ipv4][ipv4Forwarding]
if found {
log.Debugf("iptables: removing %s rule: %s ", ipv4, ipv4Forwarding)
err = i.ipv4Client.DeleteIfExists(iptablesFilterTable, iptablesForwardChain, rule...)
if err != nil {
return fmt.Errorf(errMSGFormat, ipv4, iptablesForwardChain, err)
}
}
rule, found = i.rules[ipv4][ipv4Nat]
if found {
log.Debugf("iptables: removing %s rule: %s ", ipv4, ipv4Nat)
err = i.ipv4Client.DeleteIfExists(iptablesNatTable, iptablesPostRoutingChain, rule...)
if err != nil {
return fmt.Errorf(errMSGFormat, ipv4, iptablesPostRoutingChain, err)
}
}
rule, found = i.rules[ipv6][ipv6Forwarding]
if found {
log.Debugf("iptables: removing %s rule: %s ", ipv6, ipv6Forwarding)
err = i.ipv6Client.DeleteIfExists(iptablesFilterTable, iptablesForwardChain, rule...)
if err != nil {
return fmt.Errorf(errMSGFormat, ipv6, iptablesForwardChain, err)
}
}
rule, found = i.rules[ipv6][ipv6Nat]
if found {
log.Debugf("iptables: removing %s rule: %s ", ipv6, ipv6Nat)
err = i.ipv6Client.DeleteIfExists(iptablesNatTable, iptablesPostRoutingChain, rule...)
if err != nil {
return fmt.Errorf(errMSGFormat, ipv6, iptablesPostRoutingChain, err)
}
}
return nil
}
func iptablesProtoToString(proto iptables.Protocol) string {
if proto == iptables.ProtocolIPv6 {
return ipv6
}
return ipv4
}
// restoreRules restores existing NetBird rules
func (i *iptablesManager) restoreRules(iptablesClient *iptables.IPTables) error {
ipVersion := iptablesProtoToString(iptablesClient.Proto())
if i.rules[ipVersion] == nil {
i.rules[ipVersion] = make(map[string][]string)
}
table := iptablesFilterTable
for _, chain := range []string{iptablesForwardChain, iptablesRoutingForwardingChain} {
rules, err := iptablesClient.List(table, chain)
if err != nil {
return err
}
for _, ruleString := range rules {
rule := strings.Fields(ruleString)
id := getRuleRouteID(rule)
if id != "" {
i.rules[ipVersion][id] = rule[2:]
}
}
}
table = iptablesNatTable
for _, chain := range []string{iptablesPostRoutingChain, iptablesRoutingNatChain} {
rules, err := iptablesClient.List(table, chain)
if err != nil {
return err
}
for _, ruleString := range rules {
rule := strings.Fields(ruleString)
id := getRuleRouteID(rule)
if id != "" {
i.rules[ipVersion][id] = rule[2:]
}
}
}
return nil
}
// createChain create NetBird chains
func createChain(iptables *iptables.IPTables, table, newChain string) error {
chains, err := iptables.ListChains(table)
if err != nil {
return fmt.Errorf("couldn't get %s %s table chains, error: %v", iptablesProtoToString(iptables.Proto()), table, err)
}
shouldCreateChain := true
for _, chain := range chains {
if chain == newChain {
shouldCreateChain = false
}
}
if shouldCreateChain {
err = iptables.NewChain(table, newChain)
if err != nil {
return fmt.Errorf("couldn't create %s chain %s in %s table, error: %v", iptablesProtoToString(iptables.Proto()), newChain, table, err)
}
if table == iptablesNatTable {
err = iptables.Append(table, newChain, iptablesDefaultNetbirdNatRule...)
} else {
err = iptables.Append(table, newChain, iptablesDefaultNetbirdForwardingRule...)
}
if err != nil {
return fmt.Errorf("couldn't create %s chain %s default rule, error: %v", iptablesProtoToString(iptables.Proto()), newChain, err)
}
}
return nil
}
// genRuleSpec generates rule specification with comment identifier
func genRuleSpec(jump, id, source, destination string) []string {
return []string{"-s", source, "-d", destination, "-j", jump, "-m", "comment", "--comment", id}
}
// getRuleRouteID returns the rule ID if matches our prefix
func getRuleRouteID(rule []string) string {
for i, flag := range rule {
if flag == "--comment" {
id := rule[i+1]
if strings.HasPrefix(id, "netbird-") {
return id
}
}
}
return ""
}
// InsertRoutingRules inserts an iptables rule pair to the forwarding chain and if enabled, to the nat chain
func (i *iptablesManager) InsertRoutingRules(pair routerPair) error {
i.mux.Lock()
defer i.mux.Unlock()
var err error
prefix := netip.MustParsePrefix(pair.source)
ipVersion := ipv4
iptablesClient := i.ipv4Client
if prefix.Addr().Unmap().Is6() {
iptablesClient = i.ipv6Client
ipVersion = ipv6
}
forwardRuleKey := genKey(forwardingFormat, pair.ID)
forwardRule := genRuleSpec(routingFinalForwardJump, forwardRuleKey, pair.source, pair.destination)
existingRule, found := i.rules[ipVersion][forwardRuleKey]
if found {
err = iptablesClient.DeleteIfExists(iptablesFilterTable, iptablesRoutingForwardingChain, existingRule...)
if err != nil {
return fmt.Errorf("iptables: error while removing existing forwarding rule for %s: %v", pair.destination, err)
}
delete(i.rules[ipVersion], forwardRuleKey)
}
err = iptablesClient.Insert(iptablesFilterTable, iptablesRoutingForwardingChain, 1, forwardRule...)
if err != nil {
return fmt.Errorf("iptables: error while adding new forwarding rule for %s: %v", pair.destination, err)
}
i.rules[ipVersion][forwardRuleKey] = forwardRule
if !pair.masquerade {
return nil
}
natRuleKey := genKey(natFormat, pair.ID)
natRule := genRuleSpec(routingFinalNatJump, natRuleKey, pair.source, pair.destination)
existingRule, found = i.rules[ipVersion][natRuleKey]
if found {
err = iptablesClient.DeleteIfExists(iptablesNatTable, iptablesRoutingNatChain, existingRule...)
if err != nil {
return fmt.Errorf("iptables: error while removing existing nat rulefor %s: %v", pair.destination, err)
}
delete(i.rules[ipVersion], natRuleKey)
}
err = iptablesClient.Insert(iptablesNatTable, iptablesRoutingNatChain, 1, natRule...)
if err != nil {
return fmt.Errorf("iptables: error while adding new nat rulefor %s: %v", pair.destination, err)
}
i.rules[ipVersion][natRuleKey] = natRule
return nil
}
// RemoveRoutingRules removes an iptables rule pair from forwarding and nat chains
func (i *iptablesManager) RemoveRoutingRules(pair routerPair) error {
i.mux.Lock()
defer i.mux.Unlock()
var err error
prefix := netip.MustParsePrefix(pair.source)
ipVersion := ipv4
iptablesClient := i.ipv4Client
if prefix.Addr().Unmap().Is6() {
iptablesClient = i.ipv6Client
ipVersion = ipv6
}
forwardRuleKey := genKey(forwardingFormat, pair.ID)
existingRule, found := i.rules[ipVersion][forwardRuleKey]
if found {
err = iptablesClient.DeleteIfExists(iptablesFilterTable, iptablesRoutingForwardingChain, existingRule...)
if err != nil {
return fmt.Errorf("iptables: error while removing existing forwarding rule for %s: %v", pair.destination, err)
}
}
delete(i.rules[ipVersion], forwardRuleKey)
if !pair.masquerade {
return nil
}
natRuleKey := genKey(natFormat, pair.ID)
existingRule, found = i.rules[ipVersion][natRuleKey]
if found {
err = iptablesClient.DeleteIfExists(iptablesNatTable, iptablesRoutingNatChain, existingRule...)
if err != nil {
return fmt.Errorf("iptables: error while removing existing nat rule for %s: %v", pair.destination, err)
}
}
delete(i.rules[ipVersion], natRuleKey)
return nil
}

View File

@@ -1,247 +0,0 @@
package routemanager
import (
"context"
"github.com/coreos/go-iptables/iptables"
"github.com/stretchr/testify/require"
"testing"
)
func TestIptablesManager_RestoreOrCreateContainers(t *testing.T) {
if !isIptablesSupported() {
t.SkipNow()
}
ctx, cancel := context.WithCancel(context.TODO())
ipv4Client, _ := iptables.NewWithProtocol(iptables.ProtocolIPv4)
ipv6Client, _ := iptables.NewWithProtocol(iptables.ProtocolIPv6)
manager := &iptablesManager{
ctx: ctx,
stop: cancel,
ipv4Client: ipv4Client,
ipv6Client: ipv6Client,
rules: make(map[string]map[string][]string),
}
defer manager.CleanRoutingRules()
err := manager.RestoreOrCreateContainers()
require.NoError(t, err, "shouldn't return error")
require.Len(t, manager.rules, 2, "should have created maps for ipv4 and ipv6")
require.Len(t, manager.rules[ipv4], 2, "should have created minimal rules for ipv4")
exists, err := ipv4Client.Exists(iptablesFilterTable, iptablesForwardChain, manager.rules[ipv4][ipv4Forwarding]...)
require.NoError(t, err, "should be able to query the iptables %s %s table and %s chain", ipv4, iptablesFilterTable, iptablesForwardChain)
require.True(t, exists, "forwarding rule should exist")
exists, err = ipv4Client.Exists(iptablesNatTable, iptablesPostRoutingChain, manager.rules[ipv4][ipv4Nat]...)
require.NoError(t, err, "should be able to query the iptables %s %s table and %s chain", ipv4, iptablesNatTable, iptablesPostRoutingChain)
require.True(t, exists, "postrouting rule should exist")
require.Len(t, manager.rules[ipv6], 2, "should have created minimal rules for ipv6")
exists, err = ipv6Client.Exists(iptablesFilterTable, iptablesForwardChain, manager.rules[ipv6][ipv6Forwarding]...)
require.NoError(t, err, "should be able to query the iptables %s %s table and %s chain", ipv6, iptablesFilterTable, iptablesForwardChain)
require.True(t, exists, "forwarding rule should exist")
exists, err = ipv6Client.Exists(iptablesNatTable, iptablesPostRoutingChain, manager.rules[ipv6][ipv6Nat]...)
require.NoError(t, err, "should be able to query the iptables %s %s table and %s chain", ipv6, iptablesNatTable, iptablesPostRoutingChain)
require.True(t, exists, "postrouting rule should exist")
pair := routerPair{
ID: "abc",
source: "100.100.100.1/32",
destination: "100.100.100.0/24",
masquerade: true,
}
forward4RuleKey := genKey(forwardingFormat, pair.ID)
forward4Rule := genRuleSpec(routingFinalForwardJump, forward4RuleKey, pair.source, pair.destination)
err = ipv4Client.Insert(iptablesFilterTable, iptablesRoutingForwardingChain, 1, forward4Rule...)
require.NoError(t, err, "inserting rule should not return error")
nat4RuleKey := genKey(natFormat, pair.ID)
nat4Rule := genRuleSpec(routingFinalNatJump, nat4RuleKey, pair.source, pair.destination)
err = ipv4Client.Insert(iptablesNatTable, iptablesRoutingNatChain, 1, nat4Rule...)
require.NoError(t, err, "inserting rule should not return error")
pair = routerPair{
ID: "abc",
source: "fc00::1/128",
destination: "fc11::/64",
masquerade: true,
}
forward6RuleKey := genKey(forwardingFormat, pair.ID)
forward6Rule := genRuleSpec(routingFinalForwardJump, forward6RuleKey, pair.source, pair.destination)
err = ipv6Client.Insert(iptablesFilterTable, iptablesRoutingForwardingChain, 1, forward6Rule...)
require.NoError(t, err, "inserting rule should not return error")
nat6RuleKey := genKey(natFormat, pair.ID)
nat6Rule := genRuleSpec(routingFinalNatJump, nat6RuleKey, pair.source, pair.destination)
err = ipv6Client.Insert(iptablesNatTable, iptablesRoutingNatChain, 1, nat6Rule...)
require.NoError(t, err, "inserting rule should not return error")
delete(manager.rules, ipv4)
delete(manager.rules, ipv6)
err = manager.RestoreOrCreateContainers()
require.NoError(t, err, "shouldn't return error")
require.Len(t, manager.rules[ipv4], 4, "should have restored all rules for ipv4")
foundRule, found := manager.rules[ipv4][forward4RuleKey]
require.True(t, found, "forwarding rule should exist in the map")
require.Equal(t, forward4Rule[:4], foundRule[:4], "stored forwarding rule should match")
foundRule, found = manager.rules[ipv4][nat4RuleKey]
require.True(t, found, "nat rule should exist in the map")
require.Equal(t, nat4Rule[:4], foundRule[:4], "stored nat rule should match")
require.Len(t, manager.rules[ipv6], 4, "should have restored all rules for ipv6")
foundRule, found = manager.rules[ipv6][forward6RuleKey]
require.True(t, found, "forwarding rule should exist in the map")
require.Equal(t, forward6Rule[:4], foundRule[:4], "stored forward rule should match")
foundRule, found = manager.rules[ipv6][nat6RuleKey]
require.True(t, found, "nat rule should exist in the map")
require.Equal(t, nat6Rule[:4], foundRule[:4], "stored nat rule should match")
}
func TestIptablesManager_InsertRoutingRules(t *testing.T) {
if !isIptablesSupported() {
t.SkipNow()
}
for _, testCase := range insertRuleTestCases {
t.Run(testCase.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
ipv4Client, _ := iptables.NewWithProtocol(iptables.ProtocolIPv4)
ipv6Client, _ := iptables.NewWithProtocol(iptables.ProtocolIPv6)
iptablesClient := ipv4Client
if testCase.ipVersion == ipv6 {
iptablesClient = ipv6Client
}
manager := &iptablesManager{
ctx: ctx,
stop: cancel,
ipv4Client: ipv4Client,
ipv6Client: ipv6Client,
rules: make(map[string]map[string][]string),
}
defer manager.CleanRoutingRules()
err := manager.RestoreOrCreateContainers()
require.NoError(t, err, "shouldn't return error")
err = manager.InsertRoutingRules(testCase.inputPair)
require.NoError(t, err, "forwarding pair should be inserted")
forwardRuleKey := genKey(forwardingFormat, testCase.inputPair.ID)
forwardRule := genRuleSpec(routingFinalForwardJump, forwardRuleKey, testCase.inputPair.source, testCase.inputPair.destination)
exists, err := iptablesClient.Exists(iptablesFilterTable, iptablesRoutingForwardingChain, forwardRule...)
require.NoError(t, err, "should be able to query the iptables %s %s table and %s chain", testCase.ipVersion, iptablesFilterTable, iptablesRoutingForwardingChain)
require.True(t, exists, "forwarding rule should exist")
foundRule, found := manager.rules[testCase.ipVersion][forwardRuleKey]
require.True(t, found, "forwarding rule should exist in the manager map")
require.Equal(t, forwardRule[:4], foundRule[:4], "stored forwarding rule should match")
natRuleKey := genKey(natFormat, testCase.inputPair.ID)
natRule := genRuleSpec(routingFinalNatJump, natRuleKey, testCase.inputPair.source, testCase.inputPair.destination)
exists, err = iptablesClient.Exists(iptablesNatTable, iptablesRoutingNatChain, natRule...)
require.NoError(t, err, "should be able to query the iptables %s %s table and %s chain", testCase.ipVersion, iptablesNatTable, iptablesRoutingNatChain)
if testCase.inputPair.masquerade {
require.True(t, exists, "nat rule should be created")
foundNatRule, foundNat := manager.rules[testCase.ipVersion][natRuleKey]
require.True(t, foundNat, "nat rule should exist in the map")
require.Equal(t, natRule[:4], foundNatRule[:4], "stored nat rule should match")
} else {
require.False(t, exists, "nat rule should not be created")
_, foundNat := manager.rules[testCase.ipVersion][natRuleKey]
require.False(t, foundNat, "nat rule should exist in the map")
}
})
}
}
func TestIptablesManager_RemoveRoutingRules(t *testing.T) {
if !isIptablesSupported() {
t.SkipNow()
}
for _, testCase := range removeRuleTestCases {
t.Run(testCase.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
ipv4Client, _ := iptables.NewWithProtocol(iptables.ProtocolIPv4)
ipv6Client, _ := iptables.NewWithProtocol(iptables.ProtocolIPv6)
iptablesClient := ipv4Client
if testCase.ipVersion == ipv6 {
iptablesClient = ipv6Client
}
manager := &iptablesManager{
ctx: ctx,
stop: cancel,
ipv4Client: ipv4Client,
ipv6Client: ipv6Client,
rules: make(map[string]map[string][]string),
}
defer manager.CleanRoutingRules()
err := manager.RestoreOrCreateContainers()
require.NoError(t, err, "shouldn't return error")
forwardRuleKey := genKey(forwardingFormat, testCase.inputPair.ID)
forwardRule := genRuleSpec(routingFinalForwardJump, forwardRuleKey, testCase.inputPair.source, testCase.inputPair.destination)
err = iptablesClient.Insert(iptablesFilterTable, iptablesRoutingForwardingChain, 1, forwardRule...)
require.NoError(t, err, "inserting rule should not return error")
natRuleKey := genKey(natFormat, testCase.inputPair.ID)
natRule := genRuleSpec(routingFinalNatJump, natRuleKey, testCase.inputPair.source, testCase.inputPair.destination)
err = iptablesClient.Insert(iptablesNatTable, iptablesRoutingNatChain, 1, natRule...)
require.NoError(t, err, "inserting rule should not return error")
delete(manager.rules, ipv4)
delete(manager.rules, ipv6)
err = manager.RestoreOrCreateContainers()
require.NoError(t, err, "shouldn't return error")
err = manager.RemoveRoutingRules(testCase.inputPair)
require.NoError(t, err, "shouldn't return error")
exists, err := iptablesClient.Exists(iptablesFilterTable, iptablesRoutingForwardingChain, forwardRule...)
require.NoError(t, err, "should be able to query the iptables %s %s table and %s chain", testCase.ipVersion, iptablesFilterTable, iptablesRoutingForwardingChain)
require.False(t, exists, "forwarding rule should not exist")
_, found := manager.rules[testCase.ipVersion][forwardRuleKey]
require.False(t, found, "forwarding rule should exist in the manager map")
exists, err = iptablesClient.Exists(iptablesNatTable, iptablesRoutingNatChain, natRule...)
require.NoError(t, err, "should be able to query the iptables %s %s table and %s chain", testCase.ipVersion, iptablesNatTable, iptablesRoutingNatChain)
require.False(t, exists, "nat rule should not exist")
_, found = manager.rules[testCase.ipVersion][natRuleKey]
require.False(t, found, "forwarding rule should exist in the manager map")
})
}
}

View File

@@ -1,181 +0,0 @@
package routemanager
import (
"context"
"fmt"
"github.com/netbirdio/netbird/client/status"
"github.com/netbirdio/netbird/client/system"
"github.com/netbirdio/netbird/iface"
"github.com/netbirdio/netbird/route"
log "github.com/sirupsen/logrus"
"runtime"
"sync"
)
// Manager is a route manager interface
type Manager interface {
UpdateRoutes(updateSerial uint64, newRoutes []*route.Route) error
Stop()
}
// DefaultManager is the default instance of a route manager
type DefaultManager struct {
ctx context.Context
stop context.CancelFunc
mux sync.Mutex
clientNetworks map[string]*clientNetwork
serverRoutes map[string]*route.Route
serverRouter *serverRouter
statusRecorder *status.Status
wgInterface *iface.WGIface
pubKey string
}
// NewManager returns a new route manager
func NewManager(ctx context.Context, pubKey string, wgInterface *iface.WGIface, statusRecorder *status.Status) *DefaultManager {
mCTX, cancel := context.WithCancel(ctx)
return &DefaultManager{
ctx: mCTX,
stop: cancel,
clientNetworks: make(map[string]*clientNetwork),
serverRoutes: make(map[string]*route.Route),
serverRouter: &serverRouter{
routes: make(map[string]*route.Route),
netForwardHistoryEnabled: isNetForwardHistoryEnabled(),
firewall: NewFirewall(ctx),
},
statusRecorder: statusRecorder,
wgInterface: wgInterface,
pubKey: pubKey,
}
}
// Stop stops the manager watchers and clean firewall rules
func (m *DefaultManager) Stop() {
m.stop()
m.serverRouter.firewall.CleanRoutingRules()
}
func (m *DefaultManager) updateClientNetworks(updateSerial uint64, networks map[string][]*route.Route) {
// removing routes that do not exist as per the update from the Management service.
for id, client := range m.clientNetworks {
_, found := networks[id]
if !found {
log.Debugf("stopping client network watcher, %s", id)
client.stop()
delete(m.clientNetworks, id)
}
}
for id, routes := range networks {
clientNetworkWatcher, found := m.clientNetworks[id]
if !found {
clientNetworkWatcher = newClientNetworkWatcher(m.ctx, m.wgInterface, m.statusRecorder, routes[0].Network)
m.clientNetworks[id] = clientNetworkWatcher
go clientNetworkWatcher.peersStateAndUpdateWatcher()
}
update := routesUpdate{
updateSerial: updateSerial,
routes: routes,
}
clientNetworkWatcher.sendUpdateToClientNetworkWatcher(update)
}
}
func (m *DefaultManager) updateServerRoutes(routesMap map[string]*route.Route) error {
serverRoutesToRemove := make([]string, 0)
if len(routesMap) > 0 {
err := m.serverRouter.firewall.RestoreOrCreateContainers()
if err != nil {
return fmt.Errorf("couldn't initialize firewall containers, got err: %v", err)
}
}
for routeID := range m.serverRoutes {
update, found := routesMap[routeID]
if !found || !update.IsEqual(m.serverRoutes[routeID]) {
serverRoutesToRemove = append(serverRoutesToRemove, routeID)
continue
}
}
for _, routeID := range serverRoutesToRemove {
oldRoute := m.serverRoutes[routeID]
err := m.removeFromServerNetwork(oldRoute)
if err != nil {
log.Errorf("unable to remove route id: %s, network %s, from server, got: %v",
oldRoute.ID, oldRoute.Network, err)
}
delete(m.serverRoutes, routeID)
}
for id, newRoute := range routesMap {
_, found := m.serverRoutes[id]
if found {
continue
}
err := m.addToServerNetwork(newRoute)
if err != nil {
log.Errorf("unable to add route %s from server, got: %v", newRoute.ID, err)
continue
}
m.serverRoutes[id] = newRoute
}
if len(m.serverRoutes) > 0 {
err := enableIPForwarding()
if err != nil {
return err
}
}
return nil
}
// UpdateRoutes compares received routes with existing routes and remove, update or add them to the client and server maps
func (m *DefaultManager) UpdateRoutes(updateSerial uint64, newRoutes []*route.Route) error {
select {
case <-m.ctx.Done():
log.Infof("not updating routes as context is closed")
return m.ctx.Err()
default:
m.mux.Lock()
defer m.mux.Unlock()
newClientRoutesIDMap := make(map[string][]*route.Route)
newServerRoutesMap := make(map[string]*route.Route)
for _, newRoute := range newRoutes {
// only linux is supported for now
if newRoute.Peer == m.pubKey {
if runtime.GOOS != "linux" {
log.Warnf("received a route to manage, but agent doesn't support router mode on %s OS", runtime.GOOS)
continue
}
newServerRoutesMap[newRoute.ID] = newRoute
} else {
// if prefix is too small, lets assume is a possible default route which is not yet supported
// we skip this route management
if newRoute.Network.Bits() < 7 {
log.Errorf("this agent version: %s, doesn't support default routes, received %s, skiping this route",
system.NetbirdVersion(), newRoute.Network)
continue
}
clientNetworkID := getClientNetworkID(newRoute)
newClientRoutesIDMap[clientNetworkID] = append(newClientRoutesIDMap[clientNetworkID], newRoute)
}
}
m.updateClientNetworks(updateSerial, newClientRoutesIDMap)
err := m.updateServerRoutes(newServerRoutesMap)
if err != nil {
return err
}
return nil
}
}

View File

@@ -1,370 +0,0 @@
package routemanager
import (
"context"
"fmt"
"github.com/netbirdio/netbird/client/status"
"github.com/netbirdio/netbird/iface"
"github.com/netbirdio/netbird/route"
"github.com/stretchr/testify/require"
"net/netip"
"runtime"
"testing"
)
// send 5 routes, one for server and 4 for clients, one normal and 2 HA and one small
// if linux host, should have one for server in map
// we should have 2 client manager
// 2 ranges in our routing table
const localPeerKey = "local"
const remotePeerKey1 = "remote1"
const remotePeerKey2 = "remote1"
func TestManagerUpdateRoutes(t *testing.T) {
testCases := []struct {
name string
inputInitRoutes []*route.Route
inputRoutes []*route.Route
inputSerial uint64
shouldCheckServerRoutes bool
serverRoutesExpected int
clientNetworkWatchersExpected int
}{
{
name: "Should create 2 client networks",
inputInitRoutes: []*route.Route{},
inputRoutes: []*route.Route{
{
ID: "a",
NetID: "routeA",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("100.64.251.250/30"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
{
ID: "b",
NetID: "routeB",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("8.8.8.8/32"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
},
inputSerial: 1,
clientNetworkWatchersExpected: 2,
},
{
name: "Should Create 2 Server Routes",
inputRoutes: []*route.Route{
{
ID: "a",
NetID: "routeA",
Peer: localPeerKey,
Network: netip.MustParsePrefix("100.64.252.250/30"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
{
ID: "b",
NetID: "routeB",
Peer: localPeerKey,
Network: netip.MustParsePrefix("8.8.8.9/32"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
},
inputSerial: 1,
shouldCheckServerRoutes: runtime.GOOS == "linux",
serverRoutesExpected: 2,
clientNetworkWatchersExpected: 0,
},
{
name: "Should Create 1 Route For Client And Server",
inputRoutes: []*route.Route{
{
ID: "a",
NetID: "routeA",
Peer: localPeerKey,
Network: netip.MustParsePrefix("100.64.30.250/30"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
{
ID: "b",
NetID: "routeB",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("8.8.9.9/32"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
},
inputSerial: 1,
shouldCheckServerRoutes: runtime.GOOS == "linux",
serverRoutesExpected: 1,
clientNetworkWatchersExpected: 1,
},
{
name: "Should Create 1 HA Route and 1 Standalone",
inputRoutes: []*route.Route{
{
ID: "a",
NetID: "routeA",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("8.8.20.0/24"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
{
ID: "b",
NetID: "routeA",
Peer: remotePeerKey2,
Network: netip.MustParsePrefix("8.8.20.0/24"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
{
ID: "c",
NetID: "routeB",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("8.8.9.9/32"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
},
inputSerial: 1,
clientNetworkWatchersExpected: 2,
},
{
name: "No Small Client Route Should Be Added",
inputRoutes: []*route.Route{
{
ID: "a",
NetID: "routeA",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("0.0.0.0/0"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
},
inputSerial: 1,
clientNetworkWatchersExpected: 0,
},
{
name: "No Server Routes Should Be Added To Non Linux",
inputRoutes: []*route.Route{
{
ID: "a",
NetID: "routeA",
Peer: localPeerKey,
Network: netip.MustParsePrefix("1.2.3.4/32"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
},
inputSerial: 1,
shouldCheckServerRoutes: runtime.GOOS != "linux",
serverRoutesExpected: 0,
clientNetworkWatchersExpected: 0,
},
{
name: "Remove 1 Client Route",
inputInitRoutes: []*route.Route{
{
ID: "a",
NetID: "routeA",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("100.64.251.250/30"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
{
ID: "b",
NetID: "routeB",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("8.8.8.8/32"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
},
inputRoutes: []*route.Route{
{
ID: "a",
NetID: "routeA",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("100.64.251.250/30"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
},
inputSerial: 1,
clientNetworkWatchersExpected: 1,
},
{
name: "Update Route to HA",
inputInitRoutes: []*route.Route{
{
ID: "a",
NetID: "routeA",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("100.64.251.250/30"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
{
ID: "b",
NetID: "routeB",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("8.8.8.8/32"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
},
inputRoutes: []*route.Route{
{
ID: "a",
NetID: "routeA",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("100.64.251.250/30"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
{
ID: "b",
NetID: "routeA",
Peer: remotePeerKey2,
Network: netip.MustParsePrefix("100.64.251.250/30"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
},
inputSerial: 1,
clientNetworkWatchersExpected: 1,
},
{
name: "Remove Client Routes",
inputInitRoutes: []*route.Route{
{
ID: "a",
NetID: "routeA",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("100.64.251.250/30"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
{
ID: "b",
NetID: "routeB",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("8.8.8.8/32"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
},
inputRoutes: []*route.Route{},
inputSerial: 1,
clientNetworkWatchersExpected: 0,
},
{
name: "Remove All Routes",
inputInitRoutes: []*route.Route{
{
ID: "a",
NetID: "routeA",
Peer: localPeerKey,
Network: netip.MustParsePrefix("100.64.251.250/30"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
{
ID: "b",
NetID: "routeB",
Peer: remotePeerKey1,
Network: netip.MustParsePrefix("8.8.8.8/32"),
NetworkType: route.IPv4Network,
Metric: 9999,
Masquerade: false,
Enabled: true,
},
},
inputRoutes: []*route.Route{},
inputSerial: 1,
shouldCheckServerRoutes: true,
serverRoutesExpected: 0,
clientNetworkWatchersExpected: 0,
},
}
for n, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
wgInterface, err := iface.NewWGIFace(fmt.Sprintf("utun43%d", n), "100.65.65.2/24", iface.DefaultMTU)
require.NoError(t, err, "should create testing WGIface interface")
defer wgInterface.Close()
err = wgInterface.Create()
require.NoError(t, err, "should create testing wireguard interface")
statusRecorder := status.NewRecorder()
ctx := context.TODO()
routeManager := NewManager(ctx, localPeerKey, wgInterface, statusRecorder)
defer routeManager.Stop()
if len(testCase.inputInitRoutes) > 0 {
err = routeManager.UpdateRoutes(testCase.inputSerial, testCase.inputRoutes)
require.NoError(t, err, "should update routes with init routes")
}
err = routeManager.UpdateRoutes(testCase.inputSerial+uint64(len(testCase.inputInitRoutes)), testCase.inputRoutes)
require.NoError(t, err, "should update routes")
require.Len(t, routeManager.clientNetworks, testCase.clientNetworkWatchersExpected, "client networks size should match")
if testCase.shouldCheckServerRoutes {
require.Len(t, routeManager.serverRoutes, testCase.serverRoutesExpected, "server networks size should match")
}
})
}
}

View File

@@ -1,27 +0,0 @@
package routemanager
import (
"fmt"
"github.com/netbirdio/netbird/route"
)
// MockManager is the mock instance of a route manager
type MockManager struct {
UpdateRoutesFunc func(updateSerial uint64, newRoutes []*route.Route) error
StopFunc func()
}
// UpdateRoutes mock implementation of UpdateRoutes from Manager interface
func (m *MockManager) UpdateRoutes(updateSerial uint64, newRoutes []*route.Route) error {
if m.UpdateRoutesFunc != nil {
return m.UpdateRoutesFunc(updateSerial, newRoutes)
}
return fmt.Errorf("method UpdateRoutes is not implemented")
}
// Stop mock implementation of Stop from Manager interface
func (m *MockManager) Stop() {
if m.StopFunc != nil {
m.StopFunc()
}
}

View File

@@ -1,384 +0,0 @@
package routemanager
import (
"context"
"fmt"
"github.com/google/nftables/binaryutil"
"github.com/google/nftables/expr"
log "github.com/sirupsen/logrus"
"net"
"net/netip"
"sync"
)
import "github.com/google/nftables"
//
const (
nftablesTable = "netbird-rt"
nftablesRoutingForwardingChain = "netbird-rt-fwd"
nftablesRoutingNatChain = "netbird-rt-nat"
)
// constants needed to create nftable rules
const (
ipv4Len = 4
ipv4SrcOffset = 12
ipv4DestOffset = 16
ipv6Len = 16
ipv6SrcOffset = 8
ipv6DestOffset = 24
exprDirectionSource = "source"
exprDirectionDestination = "destination"
)
// some presets for building nftable rules
var (
zeroXor = binaryutil.NativeEndian.PutUint32(0)
zeroXor6 = append(binaryutil.NativeEndian.PutUint64(0), binaryutil.NativeEndian.PutUint64(0)...)
exprAllowRelatedEstablished = []expr.Any{
&expr.Ct{
Register: 1,
SourceRegister: false,
Key: 0,
},
&expr.Bitwise{
DestRegister: 1,
SourceRegister: 1,
Len: 4,
Mask: []uint8{0x6, 0x0, 0x0, 0x0},
Xor: zeroXor,
},
&expr.Cmp{
Register: 1,
Data: binaryutil.NativeEndian.PutUint32(0),
},
&expr.Counter{},
&expr.Verdict{
Kind: expr.VerdictAccept,
},
}
exprCounterAccept = []expr.Any{
&expr.Counter{},
&expr.Verdict{
Kind: expr.VerdictAccept,
},
}
)
type nftablesManager struct {
ctx context.Context
stop context.CancelFunc
conn *nftables.Conn
tableIPv4 *nftables.Table
tableIPv6 *nftables.Table
chains map[string]map[string]*nftables.Chain
rules map[string]*nftables.Rule
mux sync.Mutex
}
// CleanRoutingRules cleans existing nftables rules from the system
func (n *nftablesManager) CleanRoutingRules() {
n.mux.Lock()
defer n.mux.Unlock()
log.Debug("flushing tables")
n.conn.FlushTable(n.tableIPv6)
n.conn.FlushTable(n.tableIPv4)
log.Debugf("flushing tables result in: %v error", n.conn.Flush())
}
// RestoreOrCreateContainers restores existing nftables containers (tables and chains)
// if they don't exist, we create them
func (n *nftablesManager) RestoreOrCreateContainers() error {
n.mux.Lock()
defer n.mux.Unlock()
if n.tableIPv6 != nil && n.tableIPv4 != nil {
log.Debugf("nftables: containers already restored, skipping")
return nil
}
tables, err := n.conn.ListTables()
if err != nil {
return fmt.Errorf("nftables: unable to list tables: %v", err)
}
for _, table := range tables {
if table.Name == nftablesTable {
if table.Family == nftables.TableFamilyIPv4 {
n.tableIPv4 = table
continue
}
n.tableIPv6 = table
}
}
if n.tableIPv4 == nil {
n.tableIPv4 = n.conn.AddTable(&nftables.Table{
Name: nftablesTable,
Family: nftables.TableFamilyIPv4,
})
}
if n.tableIPv6 == nil {
n.tableIPv6 = n.conn.AddTable(&nftables.Table{
Name: nftablesTable,
Family: nftables.TableFamilyIPv6,
})
}
chains, err := n.conn.ListChains()
if err != nil {
return fmt.Errorf("nftables: unable to list chains: %v", err)
}
n.chains[ipv4] = make(map[string]*nftables.Chain)
n.chains[ipv6] = make(map[string]*nftables.Chain)
for _, chain := range chains {
switch {
case chain.Table.Name == nftablesTable && chain.Table.Family == nftables.TableFamilyIPv4:
n.chains[ipv4][chain.Name] = chain
case chain.Table.Name == nftablesTable && chain.Table.Family == nftables.TableFamilyIPv6:
n.chains[ipv6][chain.Name] = chain
}
}
if _, found := n.chains[ipv4][nftablesRoutingForwardingChain]; !found {
n.chains[ipv4][nftablesRoutingForwardingChain] = n.conn.AddChain(&nftables.Chain{
Name: nftablesRoutingForwardingChain,
Table: n.tableIPv4,
Hooknum: nftables.ChainHookForward,
Priority: nftables.ChainPriorityNATDest + 1,
Type: nftables.ChainTypeFilter,
})
}
if _, found := n.chains[ipv4][nftablesRoutingNatChain]; !found {
n.chains[ipv4][nftablesRoutingNatChain] = n.conn.AddChain(&nftables.Chain{
Name: nftablesRoutingNatChain,
Table: n.tableIPv4,
Hooknum: nftables.ChainHookPostrouting,
Priority: nftables.ChainPriorityNATSource - 1,
Type: nftables.ChainTypeNAT,
})
}
if _, found := n.chains[ipv6][nftablesRoutingForwardingChain]; !found {
n.chains[ipv6][nftablesRoutingForwardingChain] = n.conn.AddChain(&nftables.Chain{
Name: nftablesRoutingForwardingChain,
Table: n.tableIPv6,
Hooknum: nftables.ChainHookForward,
Priority: nftables.ChainPriorityNATDest + 1,
Type: nftables.ChainTypeFilter,
})
}
if _, found := n.chains[ipv6][nftablesRoutingNatChain]; !found {
n.chains[ipv6][nftablesRoutingNatChain] = n.conn.AddChain(&nftables.Chain{
Name: nftablesRoutingNatChain,
Table: n.tableIPv6,
Hooknum: nftables.ChainHookPostrouting,
Priority: nftables.ChainPriorityNATSource - 1,
Type: nftables.ChainTypeNAT,
})
}
err = n.refreshRulesMap()
if err != nil {
return err
}
n.checkOrCreateDefaultForwardingRules()
err = n.conn.Flush()
if err != nil {
return fmt.Errorf("nftables: unable to initialize table: %v", err)
}
return nil
}
// refreshRulesMap refreshes the rule map with the latest rules. this is useful to avoid
// duplicates and to get missing attributes that we don't have when adding new rules
func (n *nftablesManager) refreshRulesMap() error {
for _, registeredChains := range n.chains {
for _, chain := range registeredChains {
rules, err := n.conn.GetRules(chain.Table, chain)
if err != nil {
return fmt.Errorf("nftables: unable to list rules: %v", err)
}
for _, rule := range rules {
if len(rule.UserData) > 0 {
n.rules[string(rule.UserData)] = rule
}
}
}
}
return nil
}
// checkOrCreateDefaultForwardingRules checks if the default forwarding rules are enabled
func (n *nftablesManager) checkOrCreateDefaultForwardingRules() {
_, foundIPv4 := n.rules[ipv4Forwarding]
if !foundIPv4 {
n.rules[ipv4Forwarding] = n.conn.AddRule(&nftables.Rule{
Table: n.tableIPv4,
Chain: n.chains[ipv4][nftablesRoutingForwardingChain],
Exprs: exprAllowRelatedEstablished,
UserData: []byte(ipv4Forwarding),
})
}
_, foundIPv6 := n.rules[ipv6Forwarding]
if !foundIPv6 {
n.rules[ipv6Forwarding] = n.conn.AddRule(&nftables.Rule{
Table: n.tableIPv6,
Chain: n.chains[ipv6][nftablesRoutingForwardingChain],
Exprs: exprAllowRelatedEstablished,
UserData: []byte(ipv6Forwarding),
})
}
}
// InsertRoutingRules inserts a nftable rule pair to the forwarding chain and if enabled, to the nat chain
func (n *nftablesManager) InsertRoutingRules(pair routerPair) error {
n.mux.Lock()
defer n.mux.Unlock()
prefix := netip.MustParsePrefix(pair.source)
sourceExp := generateCIDRMatcherExpressions("source", pair.source)
destExp := generateCIDRMatcherExpressions("destination", pair.destination)
forwardExp := append(sourceExp, append(destExp, exprCounterAccept...)...)
fwdKey := genKey(forwardingFormat, pair.ID)
if prefix.Addr().Unmap().Is4() {
n.rules[fwdKey] = n.conn.InsertRule(&nftables.Rule{
Table: n.tableIPv4,
Chain: n.chains[ipv4][nftablesRoutingForwardingChain],
Exprs: forwardExp,
UserData: []byte(fwdKey),
})
} else {
n.rules[fwdKey] = n.conn.InsertRule(&nftables.Rule{
Table: n.tableIPv6,
Chain: n.chains[ipv6][nftablesRoutingForwardingChain],
Exprs: forwardExp,
UserData: []byte(fwdKey),
})
}
if pair.masquerade {
natExp := append(sourceExp, append(destExp, &expr.Counter{}, &expr.Masq{})...)
natKey := genKey(natFormat, pair.ID)
if prefix.Addr().Unmap().Is4() {
n.rules[natKey] = n.conn.InsertRule(&nftables.Rule{
Table: n.tableIPv4,
Chain: n.chains[ipv4][nftablesRoutingNatChain],
Exprs: natExp,
UserData: []byte(natKey),
})
} else {
n.rules[natKey] = n.conn.InsertRule(&nftables.Rule{
Table: n.tableIPv6,
Chain: n.chains[ipv6][nftablesRoutingNatChain],
Exprs: natExp,
UserData: []byte(natKey),
})
}
}
err := n.conn.Flush()
if err != nil {
return fmt.Errorf("nftables: unable to insert rules for %s: %v", pair.destination, err)
}
return nil
}
// RemoveRoutingRules removes a nftable rule pair from forwarding and nat chains
func (n *nftablesManager) RemoveRoutingRules(pair routerPair) error {
n.mux.Lock()
defer n.mux.Unlock()
err := n.refreshRulesMap()
if err != nil {
return err
}
fwdKey := genKey(forwardingFormat, pair.ID)
natKey := genKey(natFormat, pair.ID)
fwdRule, found := n.rules[fwdKey]
if found {
err = n.conn.DelRule(fwdRule)
if err != nil {
return fmt.Errorf("nftables: unable to remove forwarding rule for %s: %v", pair.destination, err)
}
log.Debugf("nftables: removing forwarding rule for %s", pair.destination)
delete(n.rules, fwdKey)
}
natRule, found := n.rules[natKey]
if found {
err = n.conn.DelRule(natRule)
if err != nil {
return fmt.Errorf("nftables: unable to remove nat rule for %s: %v", pair.destination, err)
}
log.Debugf("nftables: removing nat rule for %s", pair.destination)
delete(n.rules, natKey)
}
err = n.conn.Flush()
if err != nil {
return fmt.Errorf("nftables: received error while applying rule removal for %s: %v", pair.destination, err)
}
log.Debugf("nftables: removed rules for %s", pair.destination)
return nil
}
// getPayloadDirectives get expression directives based on ip version and direction
func getPayloadDirectives(direction string, isIPv4 bool, isIPv6 bool) (uint32, uint32, []byte) {
switch {
case direction == exprDirectionSource && isIPv4:
return ipv4SrcOffset, ipv4Len, zeroXor
case direction == exprDirectionDestination && isIPv4:
return ipv4DestOffset, ipv4Len, zeroXor
case direction == exprDirectionSource && isIPv6:
return ipv6SrcOffset, ipv6Len, zeroXor6
case direction == exprDirectionDestination && isIPv6:
return ipv6DestOffset, ipv6Len, zeroXor6
default:
panic("no matched payload directive")
}
}
// generateCIDRMatcherExpressions generates nftables expressions that matches a CIDR
func generateCIDRMatcherExpressions(direction string, cidr string) []expr.Any {
ip, network, _ := net.ParseCIDR(cidr)
ipToAdd, _ := netip.AddrFromSlice(ip)
add := ipToAdd.Unmap()
offSet, packetLen, zeroXor := getPayloadDirectives(direction, add.Is4(), add.Is6())
return []expr.Any{
// fetch src add
&expr.Payload{
DestRegister: 1,
Base: expr.PayloadBaseNetworkHeader,
Offset: offSet,
Len: packetLen,
},
// net mask
&expr.Bitwise{
DestRegister: 1,
SourceRegister: 1,
Len: packetLen,
Mask: network.Mask,
Xor: zeroXor,
},
// net address
&expr.Cmp{
Register: 1,
Data: add.AsSlice(),
},
}
}

View File

@@ -1,270 +0,0 @@
package routemanager
import (
"context"
"github.com/google/nftables"
"github.com/google/nftables/expr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"testing"
)
func TestNftablesManager_RestoreOrCreateContainers(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
manager := &nftablesManager{
ctx: ctx,
stop: cancel,
conn: &nftables.Conn{},
chains: make(map[string]map[string]*nftables.Chain),
rules: make(map[string]*nftables.Rule),
}
nftablesTestingClient := &nftables.Conn{}
defer manager.CleanRoutingRules()
err := manager.RestoreOrCreateContainers()
require.NoError(t, err, "shouldn't return error")
require.Len(t, manager.chains, 2, "should have created chains for ipv4 and ipv6")
require.Len(t, manager.chains[ipv4], 2, "should have created chains for ipv4")
require.Len(t, manager.chains[ipv4], 2, "should have created chains for ipv6")
require.Len(t, manager.rules, 2, "should have created rules for ipv4 and ipv6")
pair := routerPair{
ID: "abc",
source: "100.100.100.1/32",
destination: "100.100.100.0/24",
masquerade: true,
}
sourceExp := generateCIDRMatcherExpressions("source", pair.source)
destExp := generateCIDRMatcherExpressions("destination", pair.destination)
forward4Exp := append(sourceExp, append(destExp, exprCounterAccept...)...)
forward4RuleKey := genKey(forwardingFormat, pair.ID)
inserted4Forwarding := nftablesTestingClient.InsertRule(&nftables.Rule{
Table: manager.tableIPv4,
Chain: manager.chains[ipv4][nftablesRoutingForwardingChain],
Exprs: forward4Exp,
UserData: []byte(forward4RuleKey),
})
nat4Exp := append(sourceExp, append(destExp, &expr.Counter{}, &expr.Masq{})...)
nat4RuleKey := genKey(natFormat, pair.ID)
inserted4Nat := nftablesTestingClient.InsertRule(&nftables.Rule{
Table: manager.tableIPv4,
Chain: manager.chains[ipv4][nftablesRoutingNatChain],
Exprs: nat4Exp,
UserData: []byte(nat4RuleKey),
})
err = nftablesTestingClient.Flush()
require.NoError(t, err, "shouldn't return error")
pair = routerPair{
ID: "xyz",
source: "fc00::1/128",
destination: "fc11::/64",
masquerade: true,
}
sourceExp = generateCIDRMatcherExpressions("source", pair.source)
destExp = generateCIDRMatcherExpressions("destination", pair.destination)
forward6Exp := append(sourceExp, append(destExp, exprCounterAccept...)...)
forward6RuleKey := genKey(forwardingFormat, pair.ID)
inserted6Forwarding := nftablesTestingClient.InsertRule(&nftables.Rule{
Table: manager.tableIPv6,
Chain: manager.chains[ipv6][nftablesRoutingForwardingChain],
Exprs: forward6Exp,
UserData: []byte(forward6RuleKey),
})
nat6Exp := append(sourceExp, append(destExp, &expr.Counter{}, &expr.Masq{})...)
nat6RuleKey := genKey(natFormat, pair.ID)
inserted6Nat := nftablesTestingClient.InsertRule(&nftables.Rule{
Table: manager.tableIPv6,
Chain: manager.chains[ipv6][nftablesRoutingNatChain],
Exprs: nat6Exp,
UserData: []byte(nat6RuleKey),
})
err = nftablesTestingClient.Flush()
require.NoError(t, err, "shouldn't return error")
manager.tableIPv4 = nil
manager.tableIPv6 = nil
err = manager.RestoreOrCreateContainers()
require.NoError(t, err, "shouldn't return error")
require.Len(t, manager.chains, 2, "should have created chains for ipv4 and ipv6")
require.Len(t, manager.chains[ipv4], 2, "should have created chains for ipv4")
require.Len(t, manager.chains[ipv4], 2, "should have created chains for ipv6")
require.Len(t, manager.rules, 6, "should have restored all rules for ipv4 and ipv6")
foundRule, found := manager.rules[forward4RuleKey]
require.True(t, found, "forwarding rule should exist in the map")
assert.Equal(t, inserted4Forwarding.Exprs, foundRule.Exprs, "stored forwarding rule expressions should match")
foundRule, found = manager.rules[nat4RuleKey]
require.True(t, found, "nat rule should exist in the map")
// match len of output as nftables client doesn't return expressions with masquerade expression
assert.ElementsMatch(t, inserted4Nat.Exprs[:len(foundRule.Exprs)], foundRule.Exprs, "stored nat rule expressions should match")
foundRule, found = manager.rules[forward6RuleKey]
require.True(t, found, "forwarding rule should exist in the map")
assert.Equal(t, inserted6Forwarding.Exprs, foundRule.Exprs, "stored forward rule should match")
foundRule, found = manager.rules[nat6RuleKey]
require.True(t, found, "nat rule should exist in the map")
// match len of output as nftables client doesn't return expressions with masquerade expression
assert.ElementsMatch(t, inserted6Nat.Exprs[:len(foundRule.Exprs)], foundRule.Exprs, "stored nat rule should match")
}
func TestNftablesManager_InsertRoutingRules(t *testing.T) {
for _, testCase := range insertRuleTestCases {
t.Run(testCase.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
manager := &nftablesManager{
ctx: ctx,
stop: cancel,
conn: &nftables.Conn{},
chains: make(map[string]map[string]*nftables.Chain),
rules: make(map[string]*nftables.Rule),
}
nftablesTestingClient := &nftables.Conn{}
defer manager.CleanRoutingRules()
err := manager.RestoreOrCreateContainers()
require.NoError(t, err, "shouldn't return error")
err = manager.InsertRoutingRules(testCase.inputPair)
require.NoError(t, err, "forwarding pair should be inserted")
sourceExp := generateCIDRMatcherExpressions("source", testCase.inputPair.source)
destExp := generateCIDRMatcherExpressions("destination", testCase.inputPair.destination)
testingExpression := append(sourceExp, destExp...)
fwdRuleKey := genKey(forwardingFormat, testCase.inputPair.ID)
found := 0
for _, registeredChains := range manager.chains {
for _, chain := range registeredChains {
rules, err := nftablesTestingClient.GetRules(chain.Table, chain)
require.NoError(t, err, "should list rules for %s table and %s chain", chain.Table.Name, chain.Name)
for _, rule := range rules {
if len(rule.UserData) > 0 && string(rule.UserData) == fwdRuleKey {
require.ElementsMatchf(t, rule.Exprs[:len(testingExpression)], testingExpression, "forwarding rule elements should match")
found = 1
}
}
}
}
require.Equal(t, 1, found, "should find at least 1 rule to test")
if testCase.inputPair.masquerade {
natRuleKey := genKey(natFormat, testCase.inputPair.ID)
found := 0
for _, registeredChains := range manager.chains {
for _, chain := range registeredChains {
rules, err := nftablesTestingClient.GetRules(chain.Table, chain)
require.NoError(t, err, "should list rules for %s table and %s chain", chain.Table.Name, chain.Name)
for _, rule := range rules {
if len(rule.UserData) > 0 && string(rule.UserData) == natRuleKey {
require.ElementsMatchf(t, rule.Exprs[:len(testingExpression)], testingExpression, "nat rule elements should match")
found = 1
}
}
}
}
require.Equal(t, 1, found, "should find at least 1 rule to test")
}
})
}
}
func TestNftablesManager_RemoveRoutingRules(t *testing.T) {
for _, testCase := range removeRuleTestCases {
t.Run(testCase.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
manager := &nftablesManager{
ctx: ctx,
stop: cancel,
conn: &nftables.Conn{},
chains: make(map[string]map[string]*nftables.Chain),
rules: make(map[string]*nftables.Rule),
}
nftablesTestingClient := &nftables.Conn{}
defer manager.CleanRoutingRules()
err := manager.RestoreOrCreateContainers()
require.NoError(t, err, "shouldn't return error")
table := manager.tableIPv4
if testCase.ipVersion == ipv6 {
table = manager.tableIPv6
}
sourceExp := generateCIDRMatcherExpressions("source", testCase.inputPair.source)
destExp := generateCIDRMatcherExpressions("destination", testCase.inputPair.destination)
forwardExp := append(sourceExp, append(destExp, exprCounterAccept...)...)
forwardRuleKey := genKey(forwardingFormat, testCase.inputPair.ID)
insertedForwarding := nftablesTestingClient.InsertRule(&nftables.Rule{
Table: table,
Chain: manager.chains[testCase.ipVersion][nftablesRoutingForwardingChain],
Exprs: forwardExp,
UserData: []byte(forwardRuleKey),
})
natExp := append(sourceExp, append(destExp, &expr.Counter{}, &expr.Masq{})...)
natRuleKey := genKey(natFormat, testCase.inputPair.ID)
insertedNat := nftablesTestingClient.InsertRule(&nftables.Rule{
Table: table,
Chain: manager.chains[testCase.ipVersion][nftablesRoutingNatChain],
Exprs: natExp,
UserData: []byte(natRuleKey),
})
err = nftablesTestingClient.Flush()
require.NoError(t, err, "shouldn't return error")
manager.tableIPv4 = nil
manager.tableIPv6 = nil
err = manager.RestoreOrCreateContainers()
require.NoError(t, err, "shouldn't return error")
err = manager.RemoveRoutingRules(testCase.inputPair)
require.NoError(t, err, "shouldn't return error")
for _, registeredChains := range manager.chains {
for _, chain := range registeredChains {
rules, err := nftablesTestingClient.GetRules(chain.Table, chain)
require.NoError(t, err, "should list rules for %s table and %s chain", chain.Table.Name, chain.Name)
for _, rule := range rules {
if len(rule.UserData) > 0 {
require.NotEqual(t, insertedForwarding.UserData, rule.UserData, "forwarding rule should exist")
require.NotEqual(t, insertedNat.UserData, rule.UserData, "nat rule should exist")
}
}
}
}
})
}
}

View File

@@ -1,67 +0,0 @@
package routemanager
import (
"github.com/netbirdio/netbird/route"
log "github.com/sirupsen/logrus"
"net/netip"
"sync"
)
type serverRouter struct {
routes map[string]*route.Route
// best effort to keep net forward configuration as it was
netForwardHistoryEnabled bool
mux sync.Mutex
firewall firewallManager
}
type routerPair struct {
ID string
source string
destination string
masquerade bool
}
func routeToRouterPair(source string, route *route.Route) routerPair {
parsed := netip.MustParsePrefix(source).Masked()
return routerPair{
ID: route.ID,
source: parsed.String(),
destination: route.Network.Masked().String(),
masquerade: route.Masquerade,
}
}
func (m *DefaultManager) removeFromServerNetwork(route *route.Route) error {
select {
case <-m.ctx.Done():
log.Infof("not removing from server network because context is done")
return m.ctx.Err()
default:
m.serverRouter.mux.Lock()
defer m.serverRouter.mux.Unlock()
err := m.serverRouter.firewall.RemoveRoutingRules(routeToRouterPair(m.wgInterface.Address.String(), route))
if err != nil {
return err
}
delete(m.serverRouter.routes, route.ID)
return nil
}
}
func (m *DefaultManager) addToServerNetwork(route *route.Route) error {
select {
case <-m.ctx.Done():
log.Infof("not adding to server network because context is done")
return m.ctx.Err()
default:
m.serverRouter.mux.Lock()
defer m.serverRouter.mux.Unlock()
err := m.serverRouter.firewall.InsertRoutingRules(routeToRouterPair(m.wgInterface.Address.String(), route))
if err != nil {
return err
}
m.serverRouter.routes[route.ID] = route
return nil
}
}

View File

@@ -1,55 +0,0 @@
package routemanager
import (
"fmt"
"github.com/libp2p/go-netroute"
log "github.com/sirupsen/logrus"
"net"
"net/netip"
)
var errRouteNotFound = fmt.Errorf("route not found")
func addToRouteTableIfNoExists(prefix netip.Prefix, addr string) error {
gateway, err := getExistingRIBRouteGateway(netip.MustParsePrefix("0.0.0.0/0"))
if err != nil && err != errRouteNotFound {
return err
}
prefixGateway, err := getExistingRIBRouteGateway(prefix)
if err != nil && err != errRouteNotFound {
return err
}
if prefixGateway != nil && !prefixGateway.Equal(gateway) {
log.Warnf("route for network %s already exist and is pointing to the gateway: %s, won't add another one", prefix, prefixGateway)
return nil
}
return addToRouteTable(prefix, addr)
}
func removeFromRouteTableIfNonSystem(prefix netip.Prefix, addr string) error {
addrIP := net.ParseIP(addr)
prefixGateway, err := getExistingRIBRouteGateway(prefix)
if err != nil {
return err
}
if prefixGateway != nil && !prefixGateway.Equal(addrIP) {
log.Warnf("route for network %s is pointing to a different gateway: %s, should be pointing to: %s, not removing", prefix, prefixGateway, addrIP)
return nil
}
return removeFromRouteTable(prefix)
}
func getExistingRIBRouteGateway(prefix netip.Prefix) (net.IP, error) {
r, err := netroute.New()
if err != nil {
return nil, err
}
_, _, localGatewayAddress, err := r.Route(prefix.Addr().AsSlice())
if err != nil {
log.Errorf("getting routes returned an error: %v", err)
return nil, errRouteNotFound
}
return localGatewayAddress, nil
}

View File

@@ -1,73 +0,0 @@
package routemanager
import (
"github.com/vishvananda/netlink"
"io/ioutil"
"net"
"net/netip"
)
const ipv4ForwardingPath = "/proc/sys/net/ipv4/ip_forward"
func addToRouteTable(prefix netip.Prefix, addr string) error {
_, ipNet, err := net.ParseCIDR(prefix.String())
if err != nil {
return err
}
addrMask := "/32"
if prefix.Addr().Unmap().Is6() {
addrMask = "/128"
}
ip, _, err := net.ParseCIDR(addr + addrMask)
if err != nil {
return err
}
route := &netlink.Route{
Scope: netlink.SCOPE_UNIVERSE,
Dst: ipNet,
Gw: ip,
}
err = netlink.RouteAdd(route)
if err != nil {
return err
}
return nil
}
func removeFromRouteTable(prefix netip.Prefix) error {
_, ipNet, err := net.ParseCIDR(prefix.String())
if err != nil {
return err
}
route := &netlink.Route{
Scope: netlink.SCOPE_UNIVERSE,
Dst: ipNet,
}
err = netlink.RouteDel(route)
if err != nil {
return err
}
return nil
}
func enableIPForwarding() error {
err := ioutil.WriteFile(ipv4ForwardingPath, []byte("1"), 0644)
return err
}
func isNetForwardHistoryEnabled() bool {
out, err := ioutil.ReadFile(ipv4ForwardingPath)
if err != nil {
// todo
panic(err)
}
return string(out) == "1"
}

View File

@@ -1,41 +0,0 @@
//go:build !linux
// +build !linux
package routemanager
import (
log "github.com/sirupsen/logrus"
"net/netip"
"os/exec"
"runtime"
)
func addToRouteTable(prefix netip.Prefix, addr string) error {
cmd := exec.Command("route", "add", prefix.String(), addr)
out, err := cmd.Output()
if err != nil {
return err
}
log.Debugf(string(out))
return nil
}
func removeFromRouteTable(prefix netip.Prefix) error {
cmd := exec.Command("route", "delete", prefix.String())
out, err := cmd.Output()
if err != nil {
return err
}
log.Debugf(string(out))
return nil
}
func enableIPForwarding() error {
log.Infof("enable IP forwarding is not implemented on %s", runtime.GOOS)
return nil
}
func isNetForwardHistoryEnabled() bool {
log.Infof("check netforwad history is not implemented on %s", runtime.GOOS)
return false
}

View File

@@ -1,68 +0,0 @@
package routemanager
import (
"fmt"
"github.com/netbirdio/netbird/iface"
"github.com/stretchr/testify/require"
"net/netip"
"testing"
)
func TestAddRemoveRoutes(t *testing.T) {
testCases := []struct {
name string
prefix netip.Prefix
shouldRouteToWireguard bool
shouldBeRemoved bool
}{
{
name: "Should Add And Remove Route",
prefix: netip.MustParsePrefix("100.66.120.0/24"),
shouldRouteToWireguard: true,
shouldBeRemoved: true,
},
{
name: "Should Not Add Or Remove Route",
prefix: netip.MustParsePrefix("127.0.0.1/32"),
shouldRouteToWireguard: false,
shouldBeRemoved: false,
},
}
for n, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
wgInterface, err := iface.NewWGIFace(fmt.Sprintf("utun53%d", n), "100.65.75.2/24", iface.DefaultMTU)
require.NoError(t, err, "should create testing WGIface interface")
defer wgInterface.Close()
err = wgInterface.Create()
require.NoError(t, err, "should create testing wireguard interface")
err = addToRouteTableIfNoExists(testCase.prefix, wgInterface.GetAddress().IP.String())
require.NoError(t, err, "should not return err")
prefixGateway, err := getExistingRIBRouteGateway(testCase.prefix)
require.NoError(t, err, "should not return err")
if testCase.shouldRouteToWireguard {
require.Equal(t, wgInterface.GetAddress().IP.String(), prefixGateway.String(), "route should point to wireguard interface IP")
} else {
require.NotEqual(t, wgInterface.GetAddress().IP.String(), prefixGateway.String(), "route should point to a different interface")
}
err = removeFromRouteTableIfNonSystem(testCase.prefix, wgInterface.GetAddress().IP.String())
require.NoError(t, err, "should not return err")
prefixGateway, err = getExistingRIBRouteGateway(testCase.prefix)
require.NoError(t, err, "should not return err")
internetGateway, err := getExistingRIBRouteGateway(netip.MustParsePrefix("0.0.0.0/0"))
require.NoError(t, err)
if testCase.shouldBeRemoved {
require.Equal(t, internetGateway, prefixGateway, "route should be pointing to default internet gateway")
} else {
require.NotEqual(t, internetGateway, prefixGateway, "route should be pointing to a different gateway than the internet gateway")
}
})
}
}

View File

@@ -1,69 +0,0 @@
package internal
import (
"context"
"sync"
)
type StatusType string
const (
StatusIdle StatusType = "Idle"
StatusConnecting StatusType = "Connecting"
StatusConnected StatusType = "Connected"
StatusNeedsLogin StatusType = "NeedsLogin"
StatusLoginFailed StatusType = "LoginFailed"
)
// CtxInitState setup context state into the context tree.
//
// This function should be used to initialize context before
// CtxGetState will be executed.
func CtxInitState(ctx context.Context) context.Context {
return context.WithValue(ctx, stateCtx, &contextState{
status: StatusIdle,
})
}
// CtxGetState object to get/update state/errors of process.
func CtxGetState(ctx context.Context) *contextState {
return ctx.Value(stateCtx).(*contextState)
}
type contextState struct {
err error
status StatusType
mutex sync.Mutex
}
func (c *contextState) Set(update StatusType) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.status = update
c.err = nil
}
func (c *contextState) Status() (StatusType, error) {
c.mutex.Lock()
defer c.mutex.Unlock()
if c.err != nil {
return "", c.err
}
return c.status, nil
}
func (c *contextState) Wrap(err error) error {
c.mutex.Lock()
defer c.mutex.Unlock()
c.err = err
return err
}
type stateKey int
var stateCtx stateKey

131
client/internal/wgproxy.go Normal file
View File

@@ -0,0 +1,131 @@
package internal
import (
ice "github.com/pion/ice/v2"
log "github.com/sirupsen/logrus"
"github.com/wiretrustee/wiretrustee/iface"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"net"
)
// WgProxy an instance of an instance of the Connection Wireguard Proxy
type WgProxy struct {
iface string
remoteKey string
allowedIps string
wgAddr string
close chan struct{}
wgConn net.Conn
preSharedKey *wgtypes.Key
}
// NewWgProxy creates a new Connection Wireguard Proxy
func NewWgProxy(iface string, remoteKey string, allowedIps string, wgAddr string, preSharedKey *wgtypes.Key) *WgProxy {
return &WgProxy{
iface: iface,
remoteKey: remoteKey,
allowedIps: allowedIps,
wgAddr: wgAddr,
close: make(chan struct{}),
preSharedKey: preSharedKey,
}
}
// Close closes the proxy
func (p *WgProxy) Close() error {
close(p.close)
if c := p.wgConn; c != nil {
err := p.wgConn.Close()
if err != nil {
return err
}
}
err := iface.RemovePeer(p.iface, p.remoteKey)
if err != nil {
return err
}
return nil
}
// StartLocal configure the interface with a peer using a direct IP:Port endpoint to the remote host
func (p *WgProxy) StartLocal(host string) error {
err := iface.UpdatePeer(p.iface, p.remoteKey, p.allowedIps, DefaultWgKeepAlive, host, p.preSharedKey)
if err != nil {
log.Errorf("error while configuring Wireguard peer [%s] %s", p.remoteKey, err.Error())
return err
}
return nil
}
// Start starts a new proxy using the ICE connection
func (p *WgProxy) Start(remoteConn *ice.Conn) error {
wgConn, err := net.Dial("udp", p.wgAddr)
if err != nil {
log.Fatalf("failed dialing to local Wireguard port %s", err)
return err
}
p.wgConn = wgConn
// add local proxy connection as a Wireguard peer
err = iface.UpdatePeer(p.iface, p.remoteKey, p.allowedIps, DefaultWgKeepAlive,
wgConn.LocalAddr().String(), p.preSharedKey)
if err != nil {
log.Errorf("error while configuring Wireguard peer [%s] %s", p.remoteKey, err.Error())
return err
}
go func() { p.proxyToRemotePeer(remoteConn) }()
go func() { p.proxyToLocalWireguard(remoteConn) }()
return err
}
// proxyToRemotePeer proxies everything from Wireguard to the remote peer
// blocks
func (p *WgProxy) proxyToRemotePeer(remoteConn *ice.Conn) {
buf := make([]byte, 1500)
for {
select {
case <-p.close:
log.Debugf("stopped proxying from remote peer %s due to closed connection", p.remoteKey)
return
default:
n, err := p.wgConn.Read(buf)
if err != nil {
continue
}
_, err = remoteConn.Write(buf[:n])
if err != nil {
continue
}
}
}
}
// proxyToLocalWireguard proxies everything from the remote peer to local Wireguard
// blocks
func (p *WgProxy) proxyToLocalWireguard(remoteConn *ice.Conn) {
buf := make([]byte, 1500)
for {
select {
case <-p.close:
log.Debugf("stopped proxying from remote peer %s due to closed connection", p.remoteKey)
return
default:
n, err := remoteConn.Read(buf)
if err != nil {
continue
}
_, err = p.wgConn.Write(buf[:n])
if err != nil {
continue
}
}
}
}

View File

@@ -1,11 +1,15 @@
package main
import (
"github.com/netbirdio/netbird/client/cmd"
"github.com/wiretrustee/wiretrustee/client/cmd"
"os"
)
var version = "development"
func main() {
cmd.Version = version
if err := cmd.Execute(); err != nil {
os.Exit(1)
}

View File

@@ -3,10 +3,10 @@
<assemblyIdentity
version="0.0.0.1"
processorArchitecture="*"
name="netbird.exe"
name="wiretrustee.exe"
type="win32"
/>
<description>Netbird application</description>
<description>Wiretrustee application</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>

File diff suppressed because it is too large Load Diff

View File

@@ -1,134 +0,0 @@
syntax = "proto3";
import "google/protobuf/descriptor.proto";
import "google/protobuf/timestamp.proto";
option go_package = "/proto";
package daemon;
service DaemonService {
// Login uses setup key to prepare configuration for the daemon.
rpc Login(LoginRequest) returns (LoginResponse) {}
// WaitSSOLogin uses the userCode to validate the TokenInfo and
// waits for the user to continue with the login on a browser
rpc WaitSSOLogin(WaitSSOLoginRequest) returns (WaitSSOLoginResponse) {}
// Up starts engine work in the daemon.
rpc Up(UpRequest) returns (UpResponse) {}
// Status of the service.
rpc Status(StatusRequest) returns (StatusResponse) {}
// Down engine work in the daemon.
rpc Down(DownRequest) returns (DownResponse) {}
// GetConfig of the daemon.
rpc GetConfig(GetConfigRequest) returns (GetConfigResponse) {}
};
message LoginRequest {
// setupKey wiretrustee setup key.
string setupKey = 1;
// preSharedKey for wireguard setup.
string preSharedKey = 2;
// managementUrl to authenticate.
string managementUrl = 3;
// adminUrl to manage keys.
string adminURL = 4;
}
message LoginResponse {
bool needsSSOLogin = 1;
string userCode = 2;
string verificationURI = 3;
string verificationURIComplete = 4;
}
message WaitSSOLoginRequest {
string userCode = 1;
}
message WaitSSOLoginResponse {}
message UpRequest {}
message UpResponse {}
message StatusRequest{
bool getFullPeerStatus = 1;
}
message StatusResponse{
// status of the server.
string status = 1;
FullStatus fullStatus = 2;
// NetBird daemon version
string daemonVersion = 3;
}
message DownRequest {}
message DownResponse {}
message GetConfigRequest {}
message GetConfigResponse {
// managementUrl settings value.
string managementUrl = 1;
// configFile settings value.
string configFile = 2;
// logFile settings value.
string logFile = 3;
// preSharedKey settings value.
string preSharedKey = 4;
// adminURL settings value.
string adminURL = 5;
}
// PeerState contains the latest state of a peer
message PeerState {
string IP = 1;
string pubKey = 2;
string connStatus = 3;
google.protobuf.Timestamp connStatusUpdate = 4;
bool relayed = 5;
bool direct = 6;
string localIceCandidateType = 7;
string remoteIceCandidateType =8;
}
// LocalPeerState contains the latest state of the local peer
message LocalPeerState {
string IP = 1;
string pubKey = 2;
bool kernelInterface =3;
}
// SignalState contains the latest state of a signal connection
message SignalState {
string URL = 1;
bool connected = 2;
}
// ManagementState contains the latest state of a management connection
message ManagementState {
string URL = 1;
bool connected = 2;
}
// FullStatus contains the full state held by the Status instance
message FullStatus {
ManagementState managementState = 1;
SignalState signalState = 2;
LocalPeerState localPeerState = 3;
repeated PeerState peers = 4;
}

View File

@@ -1,295 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
package proto
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// DaemonServiceClient is the client API for DaemonService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type DaemonServiceClient interface {
// Login uses setup key to prepare configuration for the daemon.
Login(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginResponse, error)
// WaitSSOLogin uses the userCode to validate the TokenInfo and
// waits for the user to continue with the login on a browser
WaitSSOLogin(ctx context.Context, in *WaitSSOLoginRequest, opts ...grpc.CallOption) (*WaitSSOLoginResponse, error)
// Up starts engine work in the daemon.
Up(ctx context.Context, in *UpRequest, opts ...grpc.CallOption) (*UpResponse, error)
// Status of the service.
Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
// Down engine work in the daemon.
Down(ctx context.Context, in *DownRequest, opts ...grpc.CallOption) (*DownResponse, error)
// GetConfig of the daemon.
GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error)
}
type daemonServiceClient struct {
cc grpc.ClientConnInterface
}
func NewDaemonServiceClient(cc grpc.ClientConnInterface) DaemonServiceClient {
return &daemonServiceClient{cc}
}
func (c *daemonServiceClient) Login(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginResponse, error) {
out := new(LoginResponse)
err := c.cc.Invoke(ctx, "/daemon.DaemonService/Login", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonServiceClient) WaitSSOLogin(ctx context.Context, in *WaitSSOLoginRequest, opts ...grpc.CallOption) (*WaitSSOLoginResponse, error) {
out := new(WaitSSOLoginResponse)
err := c.cc.Invoke(ctx, "/daemon.DaemonService/WaitSSOLogin", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonServiceClient) Up(ctx context.Context, in *UpRequest, opts ...grpc.CallOption) (*UpResponse, error) {
out := new(UpResponse)
err := c.cc.Invoke(ctx, "/daemon.DaemonService/Up", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonServiceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
out := new(StatusResponse)
err := c.cc.Invoke(ctx, "/daemon.DaemonService/Status", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonServiceClient) Down(ctx context.Context, in *DownRequest, opts ...grpc.CallOption) (*DownResponse, error) {
out := new(DownResponse)
err := c.cc.Invoke(ctx, "/daemon.DaemonService/Down", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonServiceClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) {
out := new(GetConfigResponse)
err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// DaemonServiceServer is the server API for DaemonService service.
// All implementations must embed UnimplementedDaemonServiceServer
// for forward compatibility
type DaemonServiceServer interface {
// Login uses setup key to prepare configuration for the daemon.
Login(context.Context, *LoginRequest) (*LoginResponse, error)
// WaitSSOLogin uses the userCode to validate the TokenInfo and
// waits for the user to continue with the login on a browser
WaitSSOLogin(context.Context, *WaitSSOLoginRequest) (*WaitSSOLoginResponse, error)
// Up starts engine work in the daemon.
Up(context.Context, *UpRequest) (*UpResponse, error)
// Status of the service.
Status(context.Context, *StatusRequest) (*StatusResponse, error)
// Down engine work in the daemon.
Down(context.Context, *DownRequest) (*DownResponse, error)
// GetConfig of the daemon.
GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error)
mustEmbedUnimplementedDaemonServiceServer()
}
// UnimplementedDaemonServiceServer must be embedded to have forward compatible implementations.
type UnimplementedDaemonServiceServer struct {
}
func (UnimplementedDaemonServiceServer) Login(context.Context, *LoginRequest) (*LoginResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Login not implemented")
}
func (UnimplementedDaemonServiceServer) WaitSSOLogin(context.Context, *WaitSSOLoginRequest) (*WaitSSOLoginResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method WaitSSOLogin not implemented")
}
func (UnimplementedDaemonServiceServer) Up(context.Context, *UpRequest) (*UpResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Up not implemented")
}
func (UnimplementedDaemonServiceServer) Status(context.Context, *StatusRequest) (*StatusResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
}
func (UnimplementedDaemonServiceServer) Down(context.Context, *DownRequest) (*DownResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Down not implemented")
}
func (UnimplementedDaemonServiceServer) GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetConfig not implemented")
}
func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {}
// UnsafeDaemonServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to DaemonServiceServer will
// result in compilation errors.
type UnsafeDaemonServiceServer interface {
mustEmbedUnimplementedDaemonServiceServer()
}
func RegisterDaemonServiceServer(s grpc.ServiceRegistrar, srv DaemonServiceServer) {
s.RegisterService(&DaemonService_ServiceDesc, srv)
}
func _DaemonService_Login_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(LoginRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServiceServer).Login(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/daemon.DaemonService/Login",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServiceServer).Login(ctx, req.(*LoginRequest))
}
return interceptor(ctx, in, info, handler)
}
func _DaemonService_WaitSSOLogin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(WaitSSOLoginRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServiceServer).WaitSSOLogin(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/daemon.DaemonService/WaitSSOLogin",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServiceServer).WaitSSOLogin(ctx, req.(*WaitSSOLoginRequest))
}
return interceptor(ctx, in, info, handler)
}
func _DaemonService_Up_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServiceServer).Up(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/daemon.DaemonService/Up",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServiceServer).Up(ctx, req.(*UpRequest))
}
return interceptor(ctx, in, info, handler)
}
func _DaemonService_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StatusRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServiceServer).Status(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/daemon.DaemonService/Status",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServiceServer).Status(ctx, req.(*StatusRequest))
}
return interceptor(ctx, in, info, handler)
}
func _DaemonService_Down_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DownRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServiceServer).Down(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/daemon.DaemonService/Down",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServiceServer).Down(ctx, req.(*DownRequest))
}
return interceptor(ctx, in, info, handler)
}
func _DaemonService_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServiceServer).GetConfig(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/daemon.DaemonService/GetConfig",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServiceServer).GetConfig(ctx, req.(*GetConfigRequest))
}
return interceptor(ctx, in, info, handler)
}
// DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var DaemonService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "daemon.DaemonService",
HandlerType: (*DaemonServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Login",
Handler: _DaemonService_Login_Handler,
},
{
MethodName: "WaitSSOLogin",
Handler: _DaemonService_WaitSSOLogin_Handler,
},
{
MethodName: "Up",
Handler: _DaemonService_Up_Handler,
},
{
MethodName: "Status",
Handler: _DaemonService_Status_Handler,
},
{
MethodName: "Down",
Handler: _DaemonService_Down_Handler,
},
{
MethodName: "GetConfig",
Handler: _DaemonService_GetConfig_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "daemon.proto",
}

View File

@@ -1,4 +0,0 @@
#!/bin/bash
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.1
protoc -I proto/ proto/daemon.proto --go_out=. --go-grpc_out=.

View File

@@ -5,5 +5,5 @@
#define STRINGIZE(x) #x
#define EXPAND(x) STRINGIZE(x)
CREATEPROCESS_MANIFEST_RESOURCE_ID RT_MANIFEST manifest.xml
7 ICON ui/netbird.ico
wireguard.dll RCDATA wireguard.dll
wintun.dll RCDATA wintun.dll

Binary file not shown.

View File

@@ -1,493 +0,0 @@
package server
import (
"context"
"fmt"
nbStatus "github.com/netbirdio/netbird/client/status"
"github.com/netbirdio/netbird/client/system"
"google.golang.org/protobuf/types/known/timestamppb"
"sync"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/client/internal"
"github.com/netbirdio/netbird/client/proto"
)
// Server for service control.
type Server struct {
rootCtx context.Context
actCancel context.CancelFunc
managementURL string
adminURL string
configPath string
logFile string
oauthAuthFlow oauthAuthFlow
mutex sync.Mutex
config *internal.Config
proto.UnimplementedDaemonServiceServer
statusRecorder *nbStatus.Status
}
type oauthAuthFlow struct {
expiresAt time.Time
client internal.OAuthClient
info internal.DeviceAuthInfo
waitCancel context.CancelFunc
}
// New server instance constructor.
func New(ctx context.Context, managementURL, adminURL, configPath, logFile string) *Server {
return &Server{
rootCtx: ctx,
managementURL: managementURL,
adminURL: adminURL,
configPath: configPath,
logFile: logFile,
}
}
func (s *Server) Start() error {
s.mutex.Lock()
defer s.mutex.Unlock()
state := internal.CtxGetState(s.rootCtx)
// if current state contains any error, return it
// in all other cases we can continue execution only if status is idle and up command was
// not in the progress or already successfully established connection.
status, err := state.Status()
if err != nil {
return err
}
if status != internal.StatusIdle {
return nil
}
ctx, cancel := context.WithCancel(s.rootCtx)
s.actCancel = cancel
// if configuration exists, we just start connections. if is new config we skip and set status NeedsLogin
// on failure we return error to retry
config, err := internal.ReadConfig(s.managementURL, s.adminURL, s.configPath, nil)
if errorStatus, ok := gstatus.FromError(err); ok && errorStatus.Code() == codes.NotFound {
config, err = internal.GetConfig(s.managementURL, s.adminURL, s.configPath, "")
if err != nil {
log.Warnf("unable to create configuration file: %v", err)
return err
}
state.Set(internal.StatusNeedsLogin)
return nil
} else if err != nil {
log.Warnf("unable to create configuration file: %v", err)
return err
}
// if configuration exists, we just start connections.
config, _ = internal.UpdateOldManagementPort(ctx, config, s.configPath)
s.config = config
if s.statusRecorder == nil {
s.statusRecorder = nbStatus.NewRecorder()
}
go func() {
if err := internal.RunClient(ctx, config, s.statusRecorder); err != nil {
log.Errorf("init connections: %v", err)
}
}()
return nil
}
// loginAttempt attempts to login using the provided information. it returns a status in case something fails
func (s *Server) loginAttempt(ctx context.Context, setupKey, jwtToken string) (internal.StatusType, error) {
var status internal.StatusType
err := internal.Login(ctx, s.config, setupKey, jwtToken)
if err != nil {
if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) {
log.Warnf("failed login: %v", err)
status = internal.StatusNeedsLogin
} else {
log.Errorf("failed login: %v", err)
status = internal.StatusLoginFailed
}
return status, err
}
return "", nil
}
// Login uses setup key to prepare configuration for the daemon.
func (s *Server) Login(callerCtx context.Context, msg *proto.LoginRequest) (*proto.LoginResponse, error) {
s.mutex.Lock()
if s.actCancel != nil {
s.actCancel()
}
ctx, cancel := context.WithCancel(s.rootCtx)
md, ok := metadata.FromIncomingContext(callerCtx)
if ok {
ctx = metadata.NewOutgoingContext(ctx, md)
}
s.actCancel = cancel
s.mutex.Unlock()
state := internal.CtxGetState(ctx)
defer func() {
status, err := state.Status()
if err != nil || (status != internal.StatusNeedsLogin && status != internal.StatusLoginFailed) {
state.Set(internal.StatusIdle)
}
}()
s.mutex.Lock()
managementURL := s.managementURL
if msg.ManagementUrl != "" {
managementURL = msg.ManagementUrl
s.managementURL = msg.ManagementUrl
}
adminURL := s.adminURL
if msg.AdminURL != "" {
adminURL = msg.AdminURL
s.adminURL = msg.AdminURL
}
s.mutex.Unlock()
config, err := internal.GetConfig(managementURL, adminURL, s.configPath, msg.PreSharedKey)
if err != nil {
return nil, err
}
if msg.ManagementUrl == "" {
config, _ = internal.UpdateOldManagementPort(ctx, config, s.configPath)
s.config = config
s.managementURL = config.ManagementURL.String()
}
s.mutex.Lock()
s.config = config
s.mutex.Unlock()
if _, err := s.loginAttempt(ctx, "", ""); err == nil {
state.Set(internal.StatusIdle)
return &proto.LoginResponse{}, nil
}
state.Set(internal.StatusConnecting)
if msg.SetupKey == "" {
providerConfig, err := internal.GetDeviceAuthorizationFlowInfo(ctx, config)
if err != nil {
state.Set(internal.StatusLoginFailed)
s, ok := gstatus.FromError(err)
if ok && s.Code() == codes.NotFound {
return nil, gstatus.Errorf(codes.NotFound, "no SSO provider returned from management. "+
"If you are using hosting Netbird see documentation at "+
"https://github.com/netbirdio/netbird/tree/main/management for details")
} else if ok && s.Code() == codes.Unimplemented {
return nil, gstatus.Errorf(codes.Unimplemented, "the management server, %s, does not support SSO providers, "+
"please update your server or use Setup Keys to login", config.ManagementURL)
} else {
log.Errorf("getting device authorization flow info failed with error: %v", err)
return nil, err
}
}
hostedClient := internal.NewHostedDeviceFlow(
providerConfig.ProviderConfig.Audience,
providerConfig.ProviderConfig.ClientID,
providerConfig.ProviderConfig.TokenEndpoint,
providerConfig.ProviderConfig.DeviceAuthEndpoint,
)
if s.oauthAuthFlow.client != nil && s.oauthAuthFlow.client.GetClientID(ctx) == hostedClient.GetClientID(context.TODO()) {
if s.oauthAuthFlow.expiresAt.After(time.Now().Add(90 * time.Second)) {
log.Debugf("using previous device flow info")
return &proto.LoginResponse{
NeedsSSOLogin: true,
VerificationURI: s.oauthAuthFlow.info.VerificationURI,
VerificationURIComplete: s.oauthAuthFlow.info.VerificationURIComplete,
UserCode: s.oauthAuthFlow.info.UserCode,
}, nil
} else {
log.Warnf("canceling previous waiting execution")
s.oauthAuthFlow.waitCancel()
}
}
deviceAuthInfo, err := hostedClient.RequestDeviceCode(context.TODO())
if err != nil {
log.Errorf("getting a request device code failed: %v", err)
return nil, err
}
s.mutex.Lock()
s.oauthAuthFlow.client = hostedClient
s.oauthAuthFlow.info = deviceAuthInfo
s.oauthAuthFlow.expiresAt = time.Now().Add(time.Duration(deviceAuthInfo.ExpiresIn) * time.Second)
s.mutex.Unlock()
state.Set(internal.StatusNeedsLogin)
return &proto.LoginResponse{
NeedsSSOLogin: true,
VerificationURI: deviceAuthInfo.VerificationURI,
VerificationURIComplete: deviceAuthInfo.VerificationURIComplete,
UserCode: deviceAuthInfo.UserCode,
}, nil
}
if loginStatus, err := s.loginAttempt(ctx, msg.SetupKey, ""); err != nil {
state.Set(loginStatus)
return nil, err
}
return &proto.LoginResponse{}, nil
}
// WaitSSOLogin uses the userCode to validate the TokenInfo and
// waits for the user to continue with the login on a browser
func (s *Server) WaitSSOLogin(callerCtx context.Context, msg *proto.WaitSSOLoginRequest) (*proto.WaitSSOLoginResponse, error) {
s.mutex.Lock()
if s.actCancel != nil {
s.actCancel()
}
ctx, cancel := context.WithCancel(s.rootCtx)
md, ok := metadata.FromIncomingContext(callerCtx)
if ok {
ctx = metadata.NewOutgoingContext(ctx, md)
}
s.actCancel = cancel
s.mutex.Unlock()
if s.oauthAuthFlow.client == nil {
return nil, gstatus.Errorf(codes.Internal, "oauth client is not initialized")
}
state := internal.CtxGetState(ctx)
defer func() {
s, err := state.Status()
if err != nil || (s != internal.StatusNeedsLogin && s != internal.StatusLoginFailed) {
state.Set(internal.StatusIdle)
}
}()
state.Set(internal.StatusConnecting)
s.mutex.Lock()
deviceAuthInfo := s.oauthAuthFlow.info
s.mutex.Unlock()
if deviceAuthInfo.UserCode != msg.UserCode {
state.Set(internal.StatusLoginFailed)
return nil, gstatus.Errorf(codes.InvalidArgument, "sso user code is invalid")
}
if s.oauthAuthFlow.waitCancel != nil {
s.oauthAuthFlow.waitCancel()
}
waitTimeout := time.Until(s.oauthAuthFlow.expiresAt)
waitCTX, cancel := context.WithTimeout(ctx, waitTimeout)
defer cancel()
s.mutex.Lock()
s.oauthAuthFlow.waitCancel = cancel
s.mutex.Unlock()
tokenInfo, err := s.oauthAuthFlow.client.WaitToken(waitCTX, deviceAuthInfo)
if err != nil {
if err == context.Canceled {
return nil, nil
}
s.mutex.Lock()
s.oauthAuthFlow.expiresAt = time.Now()
s.mutex.Unlock()
state.Set(internal.StatusLoginFailed)
log.Errorf("waiting for browser login failed: %v", err)
return nil, err
}
s.mutex.Lock()
s.oauthAuthFlow.expiresAt = time.Now()
s.mutex.Unlock()
if loginStatus, err := s.loginAttempt(ctx, "", tokenInfo.AccessToken); err != nil {
state.Set(loginStatus)
return nil, err
}
return &proto.WaitSSOLoginResponse{}, nil
}
// Up starts engine work in the daemon.
func (s *Server) Up(callerCtx context.Context, msg *proto.UpRequest) (*proto.UpResponse, error) {
s.mutex.Lock()
defer s.mutex.Unlock()
state := internal.CtxGetState(s.rootCtx)
// if current state contains any error, return it
// in all other cases we can continue execution only if status is idle and up command was
// not in the progress or already successfully established connection.
status, err := state.Status()
if err != nil {
return nil, err
}
if status != internal.StatusIdle {
return nil, fmt.Errorf("up already in progress: current status %s", status)
}
// it should be nil here, but .
if s.actCancel != nil {
s.actCancel()
}
ctx, cancel := context.WithCancel(s.rootCtx)
md, ok := metadata.FromIncomingContext(callerCtx)
if ok {
ctx = metadata.NewOutgoingContext(ctx, md)
}
s.actCancel = cancel
if s.config == nil {
return nil, fmt.Errorf("config is not defined, please call login command first")
}
if s.statusRecorder == nil {
s.statusRecorder = nbStatus.NewRecorder()
}
go func() {
if err := internal.RunClient(ctx, s.config, s.statusRecorder); err != nil {
log.Errorf("run client connection: %v", state.Wrap(err))
return
}
}()
return &proto.UpResponse{}, nil
}
// Down engine work in the daemon.
func (s *Server) Down(ctx context.Context, msg *proto.DownRequest) (*proto.DownResponse, error) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.actCancel == nil {
return nil, fmt.Errorf("service is not up")
}
s.actCancel()
return &proto.DownResponse{}, nil
}
// Status starts engine work in the daemon.
func (s *Server) Status(
_ context.Context,
msg *proto.StatusRequest,
) (*proto.StatusResponse, error) {
s.mutex.Lock()
defer s.mutex.Unlock()
status, err := internal.CtxGetState(s.rootCtx).Status()
if err != nil {
return nil, err
}
statusResponse := proto.StatusResponse{Status: string(status), DaemonVersion: system.NetbirdVersion()}
if s.statusRecorder == nil {
s.statusRecorder = nbStatus.NewRecorder()
}
if msg.GetFullPeerStatus {
fullStatus := s.statusRecorder.GetFullStatus()
pbFullStatus := toProtoFullStatus(fullStatus)
statusResponse.FullStatus = pbFullStatus
}
return &statusResponse, nil
}
// GetConfig of the daemon.
func (s *Server) GetConfig(ctx context.Context, msg *proto.GetConfigRequest) (*proto.GetConfigResponse, error) {
s.mutex.Lock()
defer s.mutex.Unlock()
managementURL := s.managementURL
adminURL := s.adminURL
preSharedKey := ""
if s.config != nil {
if managementURL == "" && s.config.ManagementURL != nil {
managementURL = s.config.ManagementURL.String()
}
if s.config.AdminURL != nil {
adminURL = s.config.AdminURL.String()
}
preSharedKey = s.config.PreSharedKey
if preSharedKey != "" {
preSharedKey = "**********"
}
}
return &proto.GetConfigResponse{
ManagementUrl: managementURL,
AdminURL: adminURL,
ConfigFile: s.configPath,
LogFile: s.logFile,
PreSharedKey: preSharedKey,
}, nil
}
func toProtoFullStatus(fullStatus nbStatus.FullStatus) *proto.FullStatus {
pbFullStatus := proto.FullStatus{
ManagementState: &proto.ManagementState{},
SignalState: &proto.SignalState{},
LocalPeerState: &proto.LocalPeerState{},
Peers: []*proto.PeerState{},
}
pbFullStatus.ManagementState.URL = fullStatus.ManagementState.URL
pbFullStatus.ManagementState.Connected = fullStatus.ManagementState.Connected
pbFullStatus.SignalState.URL = fullStatus.SignalState.URL
pbFullStatus.SignalState.Connected = fullStatus.SignalState.Connected
pbFullStatus.LocalPeerState.IP = fullStatus.LocalPeerState.IP
pbFullStatus.LocalPeerState.PubKey = fullStatus.LocalPeerState.PubKey
pbFullStatus.LocalPeerState.KernelInterface = fullStatus.LocalPeerState.KernelInterface
for _, peerState := range fullStatus.Peers {
pbPeerState := &proto.PeerState{
IP: peerState.IP,
PubKey: peerState.PubKey,
ConnStatus: peerState.ConnStatus,
ConnStatusUpdate: timestamppb.New(peerState.ConnStatusUpdate),
Relayed: peerState.Relayed,
Direct: peerState.Direct,
LocalIceCandidateType: peerState.LocalIceCandidateType,
RemoteIceCandidateType: peerState.RemoteIceCandidateType,
}
pbFullStatus.Peers = append(pbFullStatus.Peers, pbPeerState)
}
return &pbFullStatus
}

View File

@@ -1,116 +0,0 @@
package ssh
import (
"fmt"
"golang.org/x/crypto/ssh"
"golang.org/x/term"
"net"
"os"
"time"
)
// Client wraps crypto/ssh Client to simplify usage
type Client struct {
client *ssh.Client
}
// Close closes the wrapped SSH Client
func (c *Client) Close() error {
return c.client.Close()
}
// OpenTerminal starts an interactive terminal session with the remote SSH server
func (c *Client) OpenTerminal() error {
session, err := c.client.NewSession()
if err != nil {
return fmt.Errorf("failed to open new session: %v", err)
}
defer func() {
err := session.Close()
if err != nil {
return
}
}()
fd := int(os.Stdout.Fd())
state, err := term.MakeRaw(fd)
if err != nil {
return fmt.Errorf("failed to run raw terminal: %s", err)
}
defer func() {
err := term.Restore(fd, state)
if err != nil {
return
}
}()
w, h, err := term.GetSize(fd)
if err != nil {
return fmt.Errorf("terminal get size: %s", err)
}
modes := ssh.TerminalModes{
ssh.ECHO: 1,
ssh.TTY_OP_ISPEED: 14400,
ssh.TTY_OP_OSPEED: 14400,
}
terminal := os.Getenv("TERM")
if terminal == "" {
terminal = "xterm-256color"
}
if err := session.RequestPty(terminal, h, w, modes); err != nil {
return fmt.Errorf("failed requesting pty session with xterm: %s", err)
}
session.Stdout = os.Stdout
session.Stderr = os.Stderr
session.Stdin = os.Stdin
if err := session.Shell(); err != nil {
return fmt.Errorf("failed to start login shell on the remote host: %s", err)
}
if err := session.Wait(); err != nil {
if e, ok := err.(*ssh.ExitError); ok {
switch e.ExitStatus() {
case 130:
return nil
}
}
return fmt.Errorf("failed running SSH session: %s", err)
}
return nil
}
// DialWithKey connects to the remote SSH server with a provided private key file (PEM).
func DialWithKey(addr, user string, privateKey []byte) (*Client, error) {
signer, err := ssh.ParsePrivateKey(privateKey)
if err != nil {
return nil, err
}
config := &ssh.ClientConfig{
User: user,
Timeout: 5 * time.Second,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.HostKeyCallback(func(hostname string, remote net.Addr, key ssh.PublicKey) error { return nil }),
}
return Dial("tcp", addr, config)
}
// Dial connects to the remote SSH server.
func Dial(network, addr string, config *ssh.ClientConfig) (*Client, error) {
client, err := ssh.Dial(network, addr, config)
if err != nil {
return nil, err
}
return &Client{
client: client,
}, nil
}

View File

@@ -1,36 +0,0 @@
package ssh
import (
"fmt"
"github.com/netbirdio/netbird/util"
"net"
"net/netip"
"os/exec"
"runtime"
)
func getLoginCmd(user string, remoteAddr net.Addr) (loginPath string, args []string, err error) {
loginPath, err = exec.LookPath("login")
if err != nil {
return "", nil, err
}
addrPort, err := netip.ParseAddrPort(remoteAddr.String())
if err != nil {
return "", nil, err
}
if runtime.GOOS == "linux" {
if util.FileExists("/etc/arch-release") && !util.FileExists("/etc/pam.d/remote") {
// detect if Arch Linux
return loginPath, []string{"-f", user, "-p"}, nil
}
return loginPath, []string{"-f", user, "-h", addrPort.Addr().String(), "-p"}, nil
} else if runtime.GOOS == "darwin" {
return loginPath, []string{"-fp", "-h", addrPort.Addr().String(), user}, nil
}
return "", nil, fmt.Errorf("unsupported platform")
}

View File

@@ -1,10 +0,0 @@
//go:build !darwin
// +build !darwin
package ssh
import "os/user"
func userNameLookup(username string) (*user.User, error) {
return user.Lookup(username)
}

View File

@@ -1,47 +0,0 @@
//go:build darwin
// +build darwin
package ssh
import (
"bytes"
"fmt"
"os/exec"
"os/user"
"strings"
)
func userNameLookup(username string) (*user.User, error) {
var userObject *user.User
userObject, err := user.Lookup(username)
if err != nil && err.Error() == user.UnknownUserError(username).Error() {
return idUserNameLookup(username)
} else if err != nil {
return nil, err
}
return userObject, nil
}
func idUserNameLookup(username string) (*user.User, error) {
cmd := exec.Command("id", "-P", username)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, fmt.Errorf("error while retrieving user with id -P command, error: %v", err)
}
colon := ":"
if !bytes.Contains(out, []byte(username+colon)) {
return nil, fmt.Errorf("unable to find user in returned string")
}
// netbird:********:501:20::0:0:netbird:/Users/netbird:/bin/zsh
parts := strings.SplitN(string(out), colon, 10)
userObject := &user.User{
Username: parts[0],
Uid: parts[2],
Gid: parts[3],
Name: parts[7],
HomeDir: parts[8],
}
return userObject, nil
}

View File

@@ -1,250 +0,0 @@
package ssh
import (
"fmt"
"github.com/creack/pty"
"github.com/gliderlabs/ssh"
log "github.com/sirupsen/logrus"
"io"
"net"
"os"
"os/exec"
"os/user"
"runtime"
"strings"
"sync"
)
// DefaultSSHPort is the default SSH port of the NetBird's embedded SSH server
const DefaultSSHPort = 44338
// DefaultSSHServer is a function that creates DefaultServer
func DefaultSSHServer(hostKeyPEM []byte, addr string) (Server, error) {
return newDefaultServer(hostKeyPEM, addr)
}
// Server is an interface of SSH server
type Server interface {
// Stop stops SSH server.
Stop() error
// Start starts SSH server. Blocking
Start() error
// RemoveAuthorizedKey removes SSH key of a given peer from the authorized keys
RemoveAuthorizedKey(peer string)
// AddAuthorizedKey add a given peer key to server authorized keys
AddAuthorizedKey(peer, newKey string) error
}
// DefaultServer is the embedded NetBird SSH server
type DefaultServer struct {
listener net.Listener
// authorizedKeys is ssh pub key indexed by peer WireGuard public key
authorizedKeys map[string]ssh.PublicKey
mu sync.Mutex
hostKeyPEM []byte
sessions []ssh.Session
}
// newDefaultServer creates new server with provided host key
func newDefaultServer(hostKeyPEM []byte, addr string) (*DefaultServer, error) {
ln, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
allowedKeys := make(map[string]ssh.PublicKey)
return &DefaultServer{listener: ln, mu: sync.Mutex{}, hostKeyPEM: hostKeyPEM, authorizedKeys: allowedKeys, sessions: make([]ssh.Session, 0)}, nil
}
// RemoveAuthorizedKey removes SSH key of a given peer from the authorized keys
func (srv *DefaultServer) RemoveAuthorizedKey(peer string) {
srv.mu.Lock()
defer srv.mu.Unlock()
delete(srv.authorizedKeys, peer)
}
// AddAuthorizedKey add a given peer key to server authorized keys
func (srv *DefaultServer) AddAuthorizedKey(peer, newKey string) error {
srv.mu.Lock()
defer srv.mu.Unlock()
parsedKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(newKey))
if err != nil {
return err
}
srv.authorizedKeys[peer] = parsedKey
return nil
}
// Stop stops SSH server.
func (srv *DefaultServer) Stop() error {
srv.mu.Lock()
defer srv.mu.Unlock()
err := srv.listener.Close()
if err != nil {
return err
}
for _, session := range srv.sessions {
err := session.Close()
if err != nil {
log.Warnf("failed closing SSH session from %v", err)
}
}
return nil
}
func (srv *DefaultServer) publicKeyHandler(ctx ssh.Context, key ssh.PublicKey) bool {
srv.mu.Lock()
defer srv.mu.Unlock()
for _, allowed := range srv.authorizedKeys {
if ssh.KeysEqual(allowed, key) {
return true
}
}
return false
}
func prepareUserEnv(user *user.User, shell string) []string {
return []string{
fmt.Sprintf("SHELL=" + shell),
fmt.Sprintf("USER=" + user.Username),
fmt.Sprintf("HOME=" + user.HomeDir),
}
}
func acceptEnv(s string) bool {
split := strings.Split(s, "=")
if len(split) != 2 {
return false
}
return split[0] == "TERM" || split[0] == "LANG" || strings.HasPrefix(split[0], "LC_")
}
// sessionHandler handles SSH session post auth
func (srv *DefaultServer) sessionHandler(session ssh.Session) {
srv.mu.Lock()
srv.sessions = append(srv.sessions, session)
srv.mu.Unlock()
defer func() {
err := session.Close()
if err != nil {
return
}
}()
localUser, err := userNameLookup(session.User())
if err != nil {
_, err = fmt.Fprintf(session, "remote SSH server couldn't find local user %s\n", session.User()) //nolint
err = session.Exit(1)
if err != nil {
return
}
log.Warnf("failed SSH session from %v, user %s", session.RemoteAddr(), session.User())
return
}
ptyReq, winCh, isPty := session.Pty()
if isPty {
loginCmd, loginArgs, err := getLoginCmd(localUser.Username, session.RemoteAddr())
if err != nil {
log.Warnf("failed logging-in user %s from remote IP %s", localUser.Username, session.RemoteAddr().String())
return
}
cmd := exec.Command(loginCmd, loginArgs...)
go func() {
<-session.Context().Done()
err := cmd.Process.Kill()
if err != nil {
return
}
}()
cmd.Dir = localUser.HomeDir
cmd.Env = append(cmd.Env, fmt.Sprintf("TERM=%s", ptyReq.Term))
cmd.Env = append(cmd.Env, prepareUserEnv(localUser, getUserShell(localUser.Uid))...)
for _, v := range session.Environ() {
if acceptEnv(v) {
cmd.Env = append(cmd.Env, v)
}
}
file, err := pty.Start(cmd)
if err != nil {
log.Errorf("failed starting SSH server %v", err)
}
go func() {
for win := range winCh {
setWinSize(file, win.Width, win.Height)
}
}()
srv.stdInOut(file, session)
err = cmd.Wait()
if err != nil {
return
}
} else {
_, err := io.WriteString(session, "only PTY is supported.\n")
if err != nil {
return
}
err = session.Exit(1)
if err != nil {
return
}
}
}
func (srv *DefaultServer) stdInOut(file *os.File, session ssh.Session) {
go func() {
// stdin
_, err := io.Copy(file, session)
if err != nil {
return
}
}()
go func() {
// stdout
_, err := io.Copy(session, file)
if err != nil {
return
}
}()
}
// Start starts SSH server. Blocking
func (srv *DefaultServer) Start() error {
log.Infof("starting SSH server on addr: %s", srv.listener.Addr().String())
publicKeyOption := ssh.PublicKeyAuth(srv.publicKeyHandler)
hostKeyPEM := ssh.HostKeyPEM(srv.hostKeyPEM)
err := ssh.Serve(srv.listener, srv.sessionHandler, publicKeyOption, hostKeyPEM)
if err != nil {
return err
}
return nil
}
func getUserShell(userID string) string {
if runtime.GOOS == "linux" {
output, _ := exec.Command("getent", "passwd", userID).Output()
line := strings.SplitN(string(output), ":", 10)
if len(line) > 6 {
return strings.TrimSpace(line[6])
}
}
shell := os.Getenv("SHELL")
if shell == "" {
shell = "/bin/sh"
}
return shell
}

View File

@@ -1,44 +0,0 @@
package ssh
import "context"
// MockServer mocks ssh.Server
type MockServer struct {
Ctx context.Context
StopFunc func() error
StartFunc func() error
AddAuthorizedKeyFunc func(peer, newKey string) error
RemoveAuthorizedKeyFunc func(peer string)
}
// RemoveAuthorizedKey removes SSH key of a given peer from the authorized keys
func (srv *MockServer) RemoveAuthorizedKey(peer string) {
if srv.RemoveAuthorizedKeyFunc == nil {
return
}
srv.RemoveAuthorizedKeyFunc(peer)
}
// AddAuthorizedKey add a given peer key to server authorized keys
func (srv *MockServer) AddAuthorizedKey(peer, newKey string) error {
if srv.AddAuthorizedKeyFunc == nil {
return nil
}
return srv.AddAuthorizedKeyFunc(peer, newKey)
}
// Stop stops SSH server.
func (srv *MockServer) Stop() error {
if srv.StopFunc == nil {
return nil
}
return srv.StopFunc()
}
// Start starts SSH server. Blocking
func (srv *MockServer) Start() error {
if srv.StartFunc == nil {
return nil
}
return srv.StartFunc()
}

View File

@@ -1,121 +0,0 @@
package ssh
import (
"fmt"
"github.com/stretchr/testify/assert"
"golang.org/x/crypto/ssh"
"strings"
"testing"
)
func TestServer_AddAuthorizedKey(t *testing.T) {
key, err := GeneratePrivateKey(ED25519)
if err != nil {
t.Fatal(err)
}
server, err := newDefaultServer(key, "localhost:")
if err != nil {
t.Fatal(err)
}
// add multiple keys
keys := map[string][]byte{}
for i := 0; i < 10; i++ {
peer := fmt.Sprintf("%s-%d", "remotePeer", i)
remotePrivKey, err := GeneratePrivateKey(ED25519)
if err != nil {
t.Fatal(err)
}
remotePubKey, err := GeneratePublicKey(remotePrivKey)
if err != nil {
t.Fatal(err)
}
err = server.AddAuthorizedKey(peer, string(remotePubKey))
if err != nil {
t.Error(err)
}
keys[peer] = remotePubKey
}
// make sure that all keys have been added
for peer, remotePubKey := range keys {
k, ok := server.authorizedKeys[peer]
assert.True(t, ok, "expecting remotePeer key to be found in authorizedKeys")
assert.Equal(t, string(remotePubKey), strings.TrimSpace(string(ssh.MarshalAuthorizedKey(k))))
}
}
func TestServer_RemoveAuthorizedKey(t *testing.T) {
key, err := GeneratePrivateKey(ED25519)
if err != nil {
t.Fatal(err)
}
server, err := newDefaultServer(key, "localhost:")
if err != nil {
t.Fatal(err)
}
remotePrivKey, err := GeneratePrivateKey(ED25519)
if err != nil {
t.Fatal(err)
}
remotePubKey, err := GeneratePublicKey(remotePrivKey)
if err != nil {
t.Fatal(err)
}
err = server.AddAuthorizedKey("remotePeer", string(remotePubKey))
if err != nil {
t.Error(err)
}
server.RemoveAuthorizedKey("remotePeer")
_, ok := server.authorizedKeys["remotePeer"]
assert.False(t, ok, "expecting remotePeer's SSH key to be removed")
}
func TestServer_PubKeyHandler(t *testing.T) {
key, err := GeneratePrivateKey(ED25519)
if err != nil {
t.Fatal(err)
}
server, err := newDefaultServer(key, "localhost:")
if err != nil {
t.Fatal(err)
}
var keys []ssh.PublicKey
for i := 0; i < 10; i++ {
peer := fmt.Sprintf("%s-%d", "remotePeer", i)
remotePrivKey, err := GeneratePrivateKey(ED25519)
if err != nil {
t.Fatal(err)
}
remotePubKey, err := GeneratePublicKey(remotePrivKey)
if err != nil {
t.Fatal(err)
}
remoteParsedPubKey, _, _, _, err := ssh.ParseAuthorizedKey(remotePubKey)
if err != nil {
t.Fatal(err)
}
err = server.AddAuthorizedKey(peer, string(remotePubKey))
if err != nil {
t.Error(err)
}
keys = append(keys, remoteParsedPubKey)
}
for _, key := range keys {
accepted := server.publicKeyHandler(nil, key)
assert.Truef(t, accepted, "expecting SSH connection to be accepted for a given SSH key %s", string(ssh.MarshalAuthorizedKey(key)))
}
}

View File

@@ -1,86 +0,0 @@
package ssh
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"golang.org/x/crypto/ed25519"
gossh "golang.org/x/crypto/ssh"
"strings"
)
// KeyType is a type of SSH key
type KeyType string
// ED25519 is key of type ed25519
const ED25519 KeyType = "ed25519"
// ECDSA is key of type ecdsa
const ECDSA KeyType = "ecdsa"
// RSA is key of type rsa
const RSA KeyType = "rsa"
// RSAKeySize is a size of newly generated RSA key
const RSAKeySize = 2048
// GeneratePrivateKey creates RSA Private Key of specified byte size
func GeneratePrivateKey(keyType KeyType) ([]byte, error) {
var key crypto.Signer
var err error
switch keyType {
case ED25519:
_, key, err = ed25519.GenerateKey(rand.Reader)
case ECDSA:
key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
case RSA:
key, err = rsa.GenerateKey(rand.Reader, RSAKeySize)
default:
return nil, fmt.Errorf("unsupported ket type %s", keyType)
}
if err != nil {
return nil, err
}
pemBytes, err := EncodePrivateKeyToPEM(key)
if err != nil {
return nil, err
}
return pemBytes, nil
}
// GeneratePublicKey returns the public part of the private key
func GeneratePublicKey(key []byte) ([]byte, error) {
signer, err := gossh.ParsePrivateKey(key)
if err != nil {
return nil, err
}
strKey := strings.TrimSpace(string(gossh.MarshalAuthorizedKey(signer.PublicKey())))
return []byte(strKey), nil
}
// EncodePrivateKeyToPEM encodes Private Key from RSA to PEM format
func EncodePrivateKeyToPEM(privateKey crypto.Signer) ([]byte, error) {
mk, err := x509.MarshalPKCS8PrivateKey(privateKey)
if err != nil {
return nil, err
}
// pem.Block
privBlock := pem.Block{
Type: "PRIVATE KEY",
Bytes: mk,
}
// Private key in PEM format
privatePEM := pem.EncodeToMemory(&privBlock)
return privatePEM, nil
}

View File

@@ -1,14 +0,0 @@
//go:build linux || darwin
package ssh
import (
"os"
"syscall"
"unsafe"
)
func setWinSize(file *os.File, width, height int) {
syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TIOCSWINSZ), //nolint
uintptr(unsafe.Pointer(&struct{ h, w, x, y uint16 }{uint16(height), uint16(width), 0, 0})))
}

View File

@@ -1,9 +0,0 @@
package ssh
import (
"os"
)
func setWinSize(file *os.File, width, height int) {
}

View File

@@ -1,223 +0,0 @@
package status
import (
"errors"
"sync"
"time"
)
// PeerState contains the latest state of a peer
type PeerState struct {
IP string
PubKey string
ConnStatus string
ConnStatusUpdate time.Time
Relayed bool
Direct bool
LocalIceCandidateType string
RemoteIceCandidateType string
}
// LocalPeerState contains the latest state of the local peer
type LocalPeerState struct {
IP string
PubKey string
KernelInterface bool
}
// SignalState contains the latest state of a signal connection
type SignalState struct {
URL string
Connected bool
}
// ManagementState contains the latest state of a management connection
type ManagementState struct {
URL string
Connected bool
}
// FullStatus contains the full state held by the Status instance
type FullStatus struct {
Peers []PeerState
ManagementState ManagementState
SignalState SignalState
LocalPeerState LocalPeerState
}
// Status holds a state of peers, signal and management connections
type Status struct {
mux sync.Mutex
peers map[string]PeerState
changeNotify map[string]chan struct{}
signal SignalState
management ManagementState
localPeer LocalPeerState
}
// NewRecorder returns a new Status instance
func NewRecorder() *Status {
return &Status{
peers: make(map[string]PeerState),
changeNotify: make(map[string]chan struct{}),
}
}
// AddPeer adds peer to Daemon status map
func (d *Status) AddPeer(peerPubKey string) error {
d.mux.Lock()
defer d.mux.Unlock()
_, ok := d.peers[peerPubKey]
if ok {
return errors.New("peer already exist")
}
d.peers[peerPubKey] = PeerState{PubKey: peerPubKey}
return nil
}
// GetPeer adds peer to Daemon status map
func (d *Status) GetPeer(peerPubKey string) (PeerState, error) {
d.mux.Lock()
defer d.mux.Unlock()
state, ok := d.peers[peerPubKey]
if !ok {
return PeerState{}, errors.New("peer not found")
}
return state, nil
}
// RemovePeer removes peer from Daemon status map
func (d *Status) RemovePeer(peerPubKey string) error {
d.mux.Lock()
defer d.mux.Unlock()
_, ok := d.peers[peerPubKey]
if ok {
delete(d.peers, peerPubKey)
return nil
}
return errors.New("no peer with to remove")
}
// UpdatePeerState updates peer status
func (d *Status) UpdatePeerState(receivedState PeerState) error {
d.mux.Lock()
defer d.mux.Unlock()
peerState, ok := d.peers[receivedState.PubKey]
if !ok {
return errors.New("peer doesn't exist")
}
if receivedState.IP != "" {
peerState.IP = receivedState.IP
}
if receivedState.ConnStatus != peerState.ConnStatus {
peerState.ConnStatus = receivedState.ConnStatus
peerState.ConnStatusUpdate = receivedState.ConnStatusUpdate
peerState.Direct = receivedState.Direct
peerState.Relayed = receivedState.Relayed
peerState.LocalIceCandidateType = receivedState.LocalIceCandidateType
peerState.RemoteIceCandidateType = receivedState.RemoteIceCandidateType
}
d.peers[receivedState.PubKey] = peerState
ch, found := d.changeNotify[receivedState.PubKey]
if found && ch != nil {
close(ch)
d.changeNotify[receivedState.PubKey] = nil
}
return nil
}
// GetPeerStateChangeNotifier returns a change notifier channel for a peer
func (d *Status) GetPeerStateChangeNotifier(peer string) <-chan struct{} {
d.mux.Lock()
defer d.mux.Unlock()
ch, found := d.changeNotify[peer]
if !found || ch == nil {
ch = make(chan struct{})
d.changeNotify[peer] = ch
}
return ch
}
// UpdateLocalPeerState updates local peer status
func (d *Status) UpdateLocalPeerState(localPeerState LocalPeerState) {
d.mux.Lock()
defer d.mux.Unlock()
d.localPeer = localPeerState
}
// CleanLocalPeerState cleans local peer status
func (d *Status) CleanLocalPeerState() {
d.mux.Lock()
defer d.mux.Unlock()
d.localPeer = LocalPeerState{}
}
// MarkManagementDisconnected sets ManagementState to disconnected
func (d *Status) MarkManagementDisconnected(managementURL string) {
d.mux.Lock()
defer d.mux.Unlock()
d.management = ManagementState{
URL: managementURL,
Connected: false,
}
}
// MarkManagementConnected sets ManagementState to connected
func (d *Status) MarkManagementConnected(managementURL string) {
d.mux.Lock()
defer d.mux.Unlock()
d.management = ManagementState{
URL: managementURL,
Connected: true,
}
}
// MarkSignalDisconnected sets SignalState to disconnected
func (d *Status) MarkSignalDisconnected(signalURL string) {
d.mux.Lock()
defer d.mux.Unlock()
d.signal = SignalState{
signalURL,
false,
}
}
// MarkSignalConnected sets SignalState to connected
func (d *Status) MarkSignalConnected(signalURL string) {
d.mux.Lock()
defer d.mux.Unlock()
d.signal = SignalState{
signalURL,
true,
}
}
// GetFullStatus gets full status
func (d *Status) GetFullStatus() FullStatus {
d.mux.Lock()
defer d.mux.Unlock()
fullStatus := FullStatus{
ManagementState: d.management,
SignalState: d.signal,
LocalPeerState: d.localPeer,
}
for _, status := range d.peers {
fullStatus.Peers = append(fullStatus.Peers, status)
}
return fullStatus
}

View File

@@ -1,225 +0,0 @@
package status
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestAddPeer(t *testing.T) {
key := "abc"
status := NewRecorder()
err := status.AddPeer(key)
assert.NoError(t, err, "shouldn't return error")
_, exists := status.peers[key]
assert.True(t, exists, "value was found")
err = status.AddPeer(key)
assert.Error(t, err, "should return error on duplicate")
}
func TestGetPeer(t *testing.T) {
key := "abc"
status := NewRecorder()
err := status.AddPeer(key)
assert.NoError(t, err, "shouldn't return error")
peerStatus, err := status.GetPeer(key)
assert.NoError(t, err, "shouldn't return error on getting peer")
assert.Equal(t, key, peerStatus.PubKey, "retrieved public key should match")
_, err = status.GetPeer("non_existing_key")
assert.Error(t, err, "should return error when peer doesn't exist")
}
func TestUpdatePeerState(t *testing.T) {
key := "abc"
ip := "10.10.10.10"
status := NewRecorder()
peerState := PeerState{
PubKey: key,
}
status.peers[key] = peerState
peerState.IP = ip
err := status.UpdatePeerState(peerState)
assert.NoError(t, err, "shouldn't return error")
state, exists := status.peers[key]
assert.True(t, exists, "state should be found")
assert.Equal(t, ip, state.IP, "ip should be equal")
}
func TestGetPeerStateChangeNotifierLogic(t *testing.T) {
key := "abc"
ip := "10.10.10.10"
status := NewRecorder()
peerState := PeerState{
PubKey: key,
}
status.peers[key] = peerState
ch := status.GetPeerStateChangeNotifier(key)
assert.NotNil(t, ch, "channel shouldn't be nil")
peerState.IP = ip
err := status.UpdatePeerState(peerState)
assert.NoError(t, err, "shouldn't return error")
select {
case <-ch:
default:
t.Errorf("channel wasn't closed after update")
}
}
func TestRemovePeer(t *testing.T) {
key := "abc"
status := NewRecorder()
peerState := PeerState{
PubKey: key,
}
status.peers[key] = peerState
err := status.RemovePeer(key)
assert.NoError(t, err, "shouldn't return error")
_, exists := status.peers[key]
assert.False(t, exists, "state value shouldn't be found")
err = status.RemovePeer("not existing")
assert.Error(t, err, "should return error when peer doesn't exist")
}
func TestUpdateLocalPeerState(t *testing.T) {
localPeerState := LocalPeerState{
IP: "10.10.10.10",
PubKey: "abc",
KernelInterface: false,
}
status := NewRecorder()
status.UpdateLocalPeerState(localPeerState)
assert.Equal(t, localPeerState, status.localPeer, "local peer status should be equal")
}
func TestCleanLocalPeerState(t *testing.T) {
emptyLocalPeerState := LocalPeerState{}
localPeerState := LocalPeerState{
IP: "10.10.10.10",
PubKey: "abc",
KernelInterface: false,
}
status := NewRecorder()
status.localPeer = localPeerState
status.CleanLocalPeerState()
assert.Equal(t, emptyLocalPeerState, status.localPeer, "local peer status should be empty")
}
func TestUpdateSignalState(t *testing.T) {
url := "https://signal"
var tests = []struct {
name string
connected bool
want SignalState
}{
{"should mark as connected", true, SignalState{
URL: url,
Connected: true,
}},
{"should mark as disconnected", false, SignalState{
URL: url,
Connected: false,
}},
}
status := NewRecorder()
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if test.connected {
status.MarkSignalConnected(url)
} else {
status.MarkSignalDisconnected(url)
}
assert.Equal(t, test.want, status.signal, "signal status should be equal")
})
}
}
func TestUpdateManagementState(t *testing.T) {
url := "https://management"
var tests = []struct {
name string
connected bool
want ManagementState
}{
{"should mark as connected", true, ManagementState{
URL: url,
Connected: true,
}},
{"should mark as disconnected", false, ManagementState{
URL: url,
Connected: false,
}},
}
status := NewRecorder()
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if test.connected {
status.MarkManagementConnected(url)
} else {
status.MarkManagementDisconnected(url)
}
assert.Equal(t, test.want, status.management, "signal status should be equal")
})
}
}
func TestGetFullStatus(t *testing.T) {
key1 := "abc"
key2 := "def"
managementState := ManagementState{
URL: "https://signal",
Connected: true,
}
signalState := SignalState{
URL: "https://signal",
Connected: true,
}
peerState1 := PeerState{
PubKey: key1,
}
peerState2 := PeerState{
PubKey: key2,
}
status := NewRecorder()
status.management = managementState
status.signal = signalState
status.peers[key1] = peerState1
status.peers[key2] = peerState2
fullStatus := status.GetFullStatus()
assert.Equal(t, managementState, fullStatus.ManagementState, "management status should be equal")
assert.Equal(t, signalState, fullStatus.SignalState, "signal status should be equal")
assert.ElementsMatch(t, []PeerState{peerState1, peerState2}, fullStatus.Peers, "peers states should match")
}

View File

@@ -1,52 +1,14 @@
package system
import (
"context"
"google.golang.org/grpc/metadata"
"strings"
)
// this is the wiretrustee version
// will be replaced with the release version when using goreleaser
var version = "development"
//Info is an object that contains machine information
// Most of the code is taken from https://github.com/matishsiao/goInfo
type Info struct {
GoOS string
Kernel string
Core string
Platform string
OS string
OSVersion string
Hostname string
CPUs int
WiretrusteeVersion string
UIVersion string
}
// NetbirdVersion returns the Netbird version
func NetbirdVersion() string {
return version
}
// extractUserAgent extracts Netbird's agent (client) name and version from the outgoing context
func extractUserAgent(ctx context.Context) string {
md, hasMeta := metadata.FromOutgoingContext(ctx)
if hasMeta {
agent, ok := md["user-agent"]
if ok {
nbAgent := strings.Split(agent[0], " ")[0]
if strings.HasPrefix(nbAgent, "netbird") {
return nbAgent
}
return ""
}
}
return ""
}
// GetDesktopUIUserAgent returns the Desktop ui user agent
func GetDesktopUIUserAgent() string {
return "netbird-desktop-ui/" + NetbirdVersion()
GoOS string
Kernel string
Core string
Platform string
OS string
OSVersion string
Hostname string
CPUs int
}

View File

@@ -2,27 +2,38 @@ package system
import (
"bytes"
"context"
"fmt"
"golang.org/x/sys/unix"
"os"
"os/exec"
"runtime"
"strings"
"time"
)
// GetInfo retrieves and parses the system information
func GetInfo(ctx context.Context) *Info {
utsname := unix.Utsname{}
err := unix.Uname(&utsname)
func GetInfo() *Info {
out := _getInfo()
for strings.Contains(out, "broken pipe") {
out = _getInfo()
time.Sleep(500 * time.Millisecond)
}
osStr := strings.Replace(out, "\n", "", -1)
osStr = strings.Replace(osStr, "\r\n", "", -1)
osInfo := strings.Split(osStr, " ")
gio := &Info{Kernel: osInfo[0], OSVersion: osInfo[1], Core: osInfo[1], Platform: osInfo[2], OS: osInfo[0], GoOS: runtime.GOOS, CPUs: runtime.NumCPU()}
gio.Hostname, _ = os.Hostname()
return gio
}
func _getInfo() string {
cmd := exec.Command("uname", "-srm")
cmd.Stdin = strings.NewReader("some input")
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
fmt.Println("getInfo:", err)
}
sysName := string(bytes.Split(utsname.Sysname[:], []byte{0})[0])
machine := string(bytes.Split(utsname.Machine[:], []byte{0})[0])
release := string(bytes.Split(utsname.Release[:], []byte{0})[0])
gio := &Info{Kernel: sysName, OSVersion: release, Core: release, Platform: machine, OS: sysName, GoOS: runtime.GOOS, CPUs: runtime.NumCPU()}
gio.Hostname, _ = os.Hostname()
gio.WiretrusteeVersion = NetbirdVersion()
gio.UIVersion = extractUserAgent(ctx)
return gio
return out.String()
}

View File

@@ -2,7 +2,6 @@ package system
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
@@ -11,8 +10,7 @@ import (
"time"
)
// GetInfo retrieves and parses the system information
func GetInfo(ctx context.Context) *Info {
func GetInfo() *Info {
out := _getInfo()
for strings.Contains(out, "broken pipe") {
out = _getInfo()
@@ -23,9 +21,6 @@ func GetInfo(ctx context.Context) *Info {
osInfo := strings.Split(osStr, " ")
gio := &Info{Kernel: osInfo[0], Core: osInfo[1], Platform: runtime.GOARCH, OS: osInfo[2], GoOS: runtime.GOOS, CPUs: runtime.NumCPU()}
gio.Hostname, _ = os.Hostname()
gio.WiretrusteeVersion = NetbirdVersion()
gio.UIVersion = extractUserAgent(ctx)
return gio
}

View File

@@ -2,7 +2,6 @@ package system
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
@@ -11,8 +10,7 @@ import (
"time"
)
// GetInfo retrieves and parses the system information
func GetInfo(ctx context.Context) *Info {
func GetInfo() *Info {
info := _getInfo()
for strings.Contains(info, "broken pipe") {
info = _getInfo()
@@ -46,9 +44,6 @@ func GetInfo(ctx context.Context) *Info {
}
gio := &Info{Kernel: osInfo[0], Core: osInfo[1], Platform: osInfo[2], OS: osName, OSVersion: osVer, GoOS: runtime.GOOS, CPUs: runtime.NumCPU()}
gio.Hostname, _ = os.Hostname()
gio.WiretrusteeVersion = NetbirdVersion()
gio.UIVersion = extractUserAgent(ctx)
return gio
}

View File

@@ -1,26 +0,0 @@
package system
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/metadata"
)
func Test_LocalWTVersion(t *testing.T) {
got := GetInfo(context.TODO())
want := "development"
assert.Equal(t, want, got.WiretrusteeVersion)
}
func Test_UIVersion(t *testing.T) {
ctx := context.Background()
want := "netbird-desktop-ui/development"
ctx = metadata.NewOutgoingContext(ctx, map[string][]string{
"user-agent": {want},
})
got := GetInfo(ctx)
assert.Equal(t, want, got.UIVersion)
}

View File

@@ -2,15 +2,13 @@ package system
import (
"bytes"
"context"
"os"
"os/exec"
"runtime"
"strings"
)
// GetInfo retrieves and parses the system information
func GetInfo(ctx context.Context) *Info {
func GetInfo() *Info {
cmd := exec.Command("cmd", "ver")
cmd.Stdin = strings.NewReader("some")
var out bytes.Buffer
@@ -33,8 +31,5 @@ func GetInfo(ctx context.Context) *Info {
}
gio := &Info{Kernel: "windows", OSVersion: ver, Core: ver, Platform: "unknown", OS: "windows", GoOS: runtime.GOOS, CPUs: runtime.NumCPU()}
gio.Hostname, _ = os.Hostname()
gio.WiretrusteeVersion = NetbirdVersion()
gio.UIVersion = extractUserAgent(ctx)
return gio
}

View File

@@ -1,37 +1,37 @@
{
"Stuns": [
{
"Proto": "udp",
"URI": "stun:stun.wiretrustee.com:3468",
"Username": "",
"Password": null
}
],
"TURNConfig": {
"Turns": [
{
"Proto": "udp",
"URI": "turn:stun.wiretrustee.com:3468",
"Username": "some_user",
"Password": "c29tZV9wYXNzd29yZA=="
}
"Stuns": [
{
"Proto": "udp",
"URI": "stun:stun.wiretrustee.com:3468",
"Username": "",
"Password": null
}
],
"CredentialsTTL": "1h",
"Secret": "c29tZV9wYXNzd29yZA==",
"TimeBasedCredentials": true
},
"Signal": {
"Proto": "http",
"URI": "signal.wiretrustee.com:10000",
"Username": "",
"Password": null
},
"DataDir": "",
"HttpConfig": {
"LetsEncryptDomain": "<PASTE YOUR LET'S ENCRYPT DOMAIN HERE>",
"Address": "0.0.0.0:33071",
"AuthIssuer": "<PASTE YOUR AUTH0 ISSUER HERE>,",
"AuthAudience": "<PASTE YOUR AUTH0 AUDIENCE HERE>",
"AuthKeysLocation": "<PASTE YOUR AUTH0 PUBLIC JWT KEYS LOCATION HERE>"
}
"TURNConfig": {
"Turns": [
{
"Proto": "udp",
"URI": "turn:stun.wiretrustee.com:3468",
"Username": "some_user",
"Password": "c29tZV9wYXNzd29yZA=="
}
],
"CredentialsTTL": "1h",
"Secret": "c29tZV9wYXNzd29yZA==",
"TimeBasedCredentials": true
},
"Signal": {
"Proto": "http",
"URI": "signal.wiretrustee.com:10000",
"Username": "",
"Password": null
},
"DataDir": "",
"HttpConfig": {
"LetsEncryptDomain": "<PASTE YOUR LET'S ENCRYPT DOMAIN HERE>",
"Address": "0.0.0.0:33071",
"AuthIssuer": "<PASTE YOUR AUTH0 ISSUER HERE>,",
"AuthAudience": "<PASTE YOUR AUTH0 AUDIENCE HERE>",
"AuthKeysLocation": "<PASTE YOUR AUTH0 PUBLIC JWT KEYS LOCATION HERE>"
}
}

View File

@@ -11,28 +11,17 @@
"ExpiresAt": "2321-09-18T20:46:20.005936822+02:00",
"Revoked": false,
"UsedTimes": 0
}
},
"Network": {
"Id": "af1c8024-ha40-4ce2-9418-34653101fc3c",
"Net": {
"IP": "100.64.0.0",
"Mask": "//8AAA=="
"Mask": "/8AAAA=="
},
"Dns": null
},
"Peers": {},
"Users": {
"edafee4e-63fb-11ec-90d6-0242ac120003": {
"Id": "edafee4e-63fb-11ec-90d6-0242ac120003",
"Role": "admin"
},
"f4f6d672-63fb-11ec-90d6-0242ac120003": {
"Id": "f4f6d672-63fb-11ec-90d6-0242ac120003",
"Role": "user"
}
}
"Peers": {}
}
}
}

Some files were not shown because too many files have changed in this diff Show More