mirror of
https://github.com/netbirdio/netbird.git
synced 2026-04-01 07:04:17 -04:00
Compare commits
91 Commits
test/conne
...
fix/ssh-pr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3692a2aba4 | ||
|
|
fd9d430334 | ||
|
|
91f0d5cefd | ||
|
|
82762280ee | ||
|
|
b550a2face | ||
|
|
ab77508950 | ||
|
|
b9462f5c6b | ||
|
|
5ffaa5cdd6 | ||
|
|
a1858a9cb7 | ||
|
|
212b34f639 | ||
|
|
af8eaa23e2 | ||
|
|
f0eed50678 | ||
|
|
19d94c6158 | ||
|
|
628eb56073 | ||
|
|
a590c38d8b | ||
|
|
4e149c9222 | ||
|
|
59f5b34280 | ||
|
|
dff06d0898 | ||
|
|
80a8816b1d | ||
|
|
387e374e4b | ||
|
|
3e6baea405 | ||
|
|
fe9b844511 | ||
|
|
2e1aa497d2 | ||
|
|
529c0314f8 | ||
|
|
d86875aeac | ||
|
|
f80fe506d5 | ||
|
|
967c6f3cd3 | ||
|
|
e50e124e70 | ||
|
|
c545689448 | ||
|
|
8f389fef19 | ||
|
|
d3d6a327e0 | ||
|
|
b5489d4986 | ||
|
|
7a23c57cf8 | ||
|
|
11f891220e | ||
|
|
5585adce18 | ||
|
|
f884299823 | ||
|
|
15aa6bae1b | ||
|
|
11eb725ac8 | ||
|
|
30c02ab78c | ||
|
|
3acd86e346 | ||
|
|
5c20f13c48 | ||
|
|
e6587b071d | ||
|
|
85451ab4cd | ||
|
|
a7f3ba03eb | ||
|
|
4f0a3a77ad | ||
|
|
44655ca9b5 | ||
|
|
e601278117 | ||
|
|
8e7b016be2 | ||
|
|
9e01ea7aae | ||
|
|
cfc7ec8bb9 | ||
|
|
b3bbc0e5c6 | ||
|
|
d7c8e37ff4 | ||
|
|
05b66e73bc | ||
|
|
01ceedac89 | ||
|
|
403babd433 | ||
|
|
47133031e5 | ||
|
|
82da606886 | ||
|
|
bbe5ae2145 | ||
|
|
0b21498b39 | ||
|
|
0ca59535f1 | ||
|
|
59c77d0658 | ||
|
|
333e045099 | ||
|
|
c2c4d9d336 | ||
|
|
9a6a72e88e | ||
|
|
afe6d9fca4 | ||
|
|
ef82905526 | ||
|
|
d18747e846 | ||
|
|
f341d69314 | ||
|
|
327142837c | ||
|
|
f8c0321aee | ||
|
|
89115ff76a | ||
|
|
63c83aa8d2 | ||
|
|
37f025c966 | ||
|
|
4a54f0d670 | ||
|
|
98890a29e3 | ||
|
|
9d123ec059 | ||
|
|
5d171f181a | ||
|
|
22f878b3b7 | ||
|
|
44ef1a18dd | ||
|
|
2b98dc4e52 | ||
|
|
2a26cb4567 | ||
|
|
5ca1b64328 | ||
|
|
36752a8cbb | ||
|
|
f117fc7509 | ||
|
|
fc6b93ae59 | ||
|
|
564fa4ab04 | ||
|
|
a6db88fbd2 | ||
|
|
4b5294e596 | ||
|
|
a322dce42a | ||
|
|
d1ead2265b | ||
|
|
bbca74476e |
14
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
14
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Community Support
|
||||
url: https://forum.netbird.io/
|
||||
about: Community support forum
|
||||
- name: Cloud Support
|
||||
url: https://docs.netbird.io/help/report-bug-issues
|
||||
about: Contact us for support
|
||||
- name: Client/Connection Troubleshooting
|
||||
url: https://docs.netbird.io/help/troubleshooting-client
|
||||
about: See our client troubleshooting guide for help addressing common issues
|
||||
- name: Self-host Troubleshooting
|
||||
url: https://docs.netbird.io/selfhosted/troubleshooting
|
||||
about: See our self-host troubleshooting guide for help addressing common issues
|
||||
37
.github/workflows/golang-test-linux.yml
vendored
37
.github/workflows/golang-test-linux.yml
vendored
@@ -409,12 +409,19 @@ jobs:
|
||||
run: git --no-pager diff --exit-code
|
||||
|
||||
- name: Login to Docker hub
|
||||
if: matrix.store == 'mysql' && (github.repository == github.head.repo.full_name || !github.head_ref)
|
||||
uses: docker/login-action@v1
|
||||
if: github.event.pull_request && github.event.pull_request.head.repo && github.event.pull_request.head.repo.full_name == '' || github.repository == github.event.pull_request.head.repo.full_name || !github.head_ref
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: docker login for root user
|
||||
if: github.event.pull_request && github.event.pull_request.head.repo && github.event.pull_request.head.repo.full_name == '' || github.repository == github.event.pull_request.head.repo.full_name || !github.head_ref
|
||||
env:
|
||||
DOCKER_USER: ${{ secrets.DOCKER_USER }}
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
run: echo "$DOCKER_TOKEN" | sudo docker login --username "$DOCKER_USER" --password-stdin
|
||||
|
||||
- name: download mysql image
|
||||
if: matrix.store == 'mysql'
|
||||
run: docker pull mlsmaycon/warmed-mysql:8
|
||||
@@ -497,15 +504,18 @@ jobs:
|
||||
run: git --no-pager diff --exit-code
|
||||
|
||||
- name: Login to Docker hub
|
||||
if: matrix.store == 'mysql' && (github.repository == github.head.repo.full_name || !github.head_ref)
|
||||
uses: docker/login-action@v1
|
||||
if: github.event.pull_request && github.event.pull_request.head.repo && github.event.pull_request.head.repo.full_name == '' || github.repository == github.event.pull_request.head.repo.full_name || !github.head_ref
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: download mysql image
|
||||
if: matrix.store == 'mysql'
|
||||
run: docker pull mlsmaycon/warmed-mysql:8
|
||||
- name: docker login for root user
|
||||
if: github.event.pull_request && github.event.pull_request.head.repo && github.event.pull_request.head.repo.full_name == '' || github.repository == github.event.pull_request.head.repo.full_name || !github.head_ref
|
||||
env:
|
||||
DOCKER_USER: ${{ secrets.DOCKER_USER }}
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
run: echo "$DOCKER_TOKEN" | sudo docker login --username "$DOCKER_USER" --password-stdin
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
@@ -586,15 +596,18 @@ jobs:
|
||||
run: git --no-pager diff --exit-code
|
||||
|
||||
- name: Login to Docker hub
|
||||
if: matrix.store == 'mysql' && (github.repository == github.head.repo.full_name || !github.head_ref)
|
||||
uses: docker/login-action@v1
|
||||
if: github.event.pull_request && github.event.pull_request.head.repo && github.event.pull_request.head.repo.full_name == '' || github.repository == github.event.pull_request.head.repo.full_name || !github.head_ref
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: download mysql image
|
||||
if: matrix.store == 'mysql'
|
||||
run: docker pull mlsmaycon/warmed-mysql:8
|
||||
- name: docker login for root user
|
||||
if: github.event.pull_request && github.event.pull_request.head.repo && github.event.pull_request.head.repo.full_name == '' || github.repository == github.event.pull_request.head.repo.full_name || !github.head_ref
|
||||
env:
|
||||
DOCKER_USER: ${{ secrets.DOCKER_USER }}
|
||||
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
|
||||
run: echo "$DOCKER_TOKEN" | sudo docker login --username "$DOCKER_USER" --password-stdin
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
|
||||
9
.github/workflows/golang-test-windows.yml
vendored
9
.github/workflows/golang-test-windows.yml
vendored
@@ -63,10 +63,15 @@ jobs:
|
||||
- run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe env -w GOMODCACHE=${{ env.cache }}
|
||||
- run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe env -w GOCACHE=${{ env.modcache }}
|
||||
- run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe mod tidy
|
||||
- run: echo "files=$(go list ./... | ForEach-Object { $_ } | Where-Object { $_ -notmatch '/management' } | Where-Object { $_ -notmatch '/relay' } | Where-Object { $_ -notmatch '/signal' } | Where-Object { $_ -notmatch '/proxy' } | Where-Object { $_ -notmatch '/combined' })" >> $env:GITHUB_ENV
|
||||
- name: Generate test script
|
||||
run: |
|
||||
$packages = go list ./... | Where-Object { $_ -notmatch '/management' } | Where-Object { $_ -notmatch '/relay' } | Where-Object { $_ -notmatch '/signal' } | Where-Object { $_ -notmatch '/proxy' } | Where-Object { $_ -notmatch '/combined' }
|
||||
$goExe = "C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe"
|
||||
$cmd = "$goExe test -tags=devcert -timeout 10m -p 1 $($packages -join ' ') > test-out.txt 2>&1"
|
||||
Set-Content -Path "${{ github.workspace }}\run-tests.cmd" -Value $cmd
|
||||
|
||||
- name: test
|
||||
run: PsExec64 -s -w ${{ github.workspace }} cmd.exe /c "C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe test -tags=devcert -timeout 10m -p 1 ${{ env.files }} > test-out.txt 2>&1"
|
||||
run: PsExec64 -s -w ${{ github.workspace }} cmd.exe /c "${{ github.workspace }}\run-tests.cmd"
|
||||
- name: test output
|
||||
if: ${{ always() }}
|
||||
run: Get-Content test-out.txt
|
||||
|
||||
2
.github/workflows/golangci-lint.yml
vendored
2
.github/workflows/golangci-lint.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
- name: codespell
|
||||
uses: codespell-project/actions-codespell@v2
|
||||
with:
|
||||
ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans,deriver
|
||||
ignore_words_list: erro,clienta,hastable,iif,groupd,testin,groupe,cros,ans,deriver,te
|
||||
skip: go.mod,go.sum,**/proxy/web/**
|
||||
golangci:
|
||||
strategy:
|
||||
|
||||
51
.github/workflows/pr-title-check.yml
vendored
Normal file
51
.github/workflows/pr-title-check.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: PR Title Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
check-title:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Validate PR title prefix
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const title = context.payload.pull_request.title;
|
||||
const allowedTags = [
|
||||
'management',
|
||||
'client',
|
||||
'signal',
|
||||
'proxy',
|
||||
'relay',
|
||||
'misc',
|
||||
'infrastructure',
|
||||
'self-hosted',
|
||||
'doc',
|
||||
];
|
||||
|
||||
const pattern = /^\[([^\]]+)\]\s+.+/;
|
||||
const match = title.match(pattern);
|
||||
|
||||
if (!match) {
|
||||
core.setFailed(
|
||||
`PR title must start with a tag in brackets.\n` +
|
||||
`Example: [client] fix something\n` +
|
||||
`Allowed tags: ${allowedTags.join(', ')}`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const tags = match[1].split(',').map(t => t.trim().toLowerCase());
|
||||
|
||||
const invalid = tags.filter(t => !allowedTags.includes(t));
|
||||
if (invalid.length > 0) {
|
||||
core.setFailed(
|
||||
`Invalid tag(s): ${invalid.join(', ')}\n` +
|
||||
`Allowed tags: ${allowedTags.join(', ')}`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`Valid PR title tags: [${tags.join(', ')}]`);
|
||||
86
.github/workflows/release.yml
vendored
86
.github/workflows/release.yml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
|
||||
env:
|
||||
SIGN_PIPE_VER: "v0.1.1"
|
||||
GORELEASER_VER: "v2.3.2"
|
||||
GORELEASER_VER: "v2.14.3"
|
||||
PRODUCT_NAME: "NetBird"
|
||||
COPYRIGHT: "NetBird GmbH"
|
||||
|
||||
@@ -169,6 +169,13 @@ jobs:
|
||||
- name: Install OS build dependencies
|
||||
run: sudo apt update && sudo apt install -y -q gcc-arm-linux-gnueabihf gcc-aarch64-linux-gnu
|
||||
|
||||
- name: Decode GPG signing key
|
||||
env:
|
||||
GPG_RPM_PRIVATE_KEY: ${{ secrets.GPG_RPM_PRIVATE_KEY }}
|
||||
run: |
|
||||
echo "$GPG_RPM_PRIVATE_KEY" | base64 -d > /tmp/gpg-rpm-signing-key.asc
|
||||
echo "GPG_RPM_KEY_FILE=/tmp/gpg-rpm-signing-key.asc" >> $GITHUB_ENV
|
||||
|
||||
- name: Install goversioninfo
|
||||
run: go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@233067e
|
||||
- name: Generate windows syso amd64
|
||||
@@ -186,18 +193,54 @@ jobs:
|
||||
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
|
||||
UPLOAD_DEBIAN_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }}
|
||||
UPLOAD_YUM_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }}
|
||||
- name: Tag and push PR images (amd64 only)
|
||||
if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository
|
||||
GPG_RPM_KEY_FILE: ${{ env.GPG_RPM_KEY_FILE }}
|
||||
NFPM_NETBIRD_RPM_PASSPHRASE: ${{ secrets.GPG_RPM_PASSPHRASE }}
|
||||
- name: Verify RPM signatures
|
||||
run: |
|
||||
PR_TAG="pr-${{ github.event.pull_request.number }}"
|
||||
docker run --rm -v $(pwd)/dist:/dist fedora:41 bash -c '
|
||||
dnf install -y -q rpm-sign curl >/dev/null 2>&1
|
||||
curl -sSL https://pkgs.netbird.io/yum/repodata/repomd.xml.key -o /tmp/rpm-pub.key
|
||||
rpm --import /tmp/rpm-pub.key
|
||||
echo "=== Verifying RPM signatures ==="
|
||||
for rpm_file in /dist/*amd64*.rpm; do
|
||||
[ -f "$rpm_file" ] || continue
|
||||
echo "--- $(basename $rpm_file) ---"
|
||||
rpm -K "$rpm_file"
|
||||
done
|
||||
'
|
||||
- name: Clean up GPG key
|
||||
if: always()
|
||||
run: rm -f /tmp/gpg-rpm-signing-key.asc
|
||||
- name: Tag and push images (amd64 only)
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) ||
|
||||
(github.event_name == 'push' && github.ref == 'refs/heads/main')
|
||||
run: |
|
||||
resolve_tags() {
|
||||
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
echo "pr-${{ github.event.pull_request.number }}"
|
||||
else
|
||||
echo "main sha-$(git rev-parse --short HEAD)"
|
||||
fi
|
||||
}
|
||||
|
||||
tag_and_push() {
|
||||
local src="$1" img_name tag dst
|
||||
img_name="${src%%:*}"
|
||||
for tag in $(resolve_tags); do
|
||||
dst="${img_name}:${tag}"
|
||||
echo "Tagging ${src} -> ${dst}"
|
||||
docker tag "$src" "$dst"
|
||||
docker push "$dst"
|
||||
done
|
||||
}
|
||||
|
||||
export -f tag_and_push resolve_tags
|
||||
|
||||
echo '${{ steps.goreleaser.outputs.artifacts }}' | \
|
||||
jq -r '.[] | select(.type == "Docker Image") | select(.goarch == "amd64") | .name' | \
|
||||
grep '^ghcr.io/' | while read -r SRC; do
|
||||
IMG_NAME="${SRC%%:*}"
|
||||
DST="${IMG_NAME}:${PR_TAG}"
|
||||
echo "Tagging ${SRC} -> ${DST}"
|
||||
docker tag "$SRC" "$DST"
|
||||
docker push "$DST"
|
||||
tag_and_push "$SRC"
|
||||
done
|
||||
- name: upload non tags for debug purposes
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -265,6 +308,13 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: sudo apt update && sudo apt install -y -q libappindicator3-dev gir1.2-appindicator3-0.1 libxxf86vm-dev gcc-mingw-w64-x86-64
|
||||
|
||||
- name: Decode GPG signing key
|
||||
env:
|
||||
GPG_RPM_PRIVATE_KEY: ${{ secrets.GPG_RPM_PRIVATE_KEY }}
|
||||
run: |
|
||||
echo "$GPG_RPM_PRIVATE_KEY" | base64 -d > /tmp/gpg-rpm-signing-key.asc
|
||||
echo "GPG_RPM_KEY_FILE=/tmp/gpg-rpm-signing-key.asc" >> $GITHUB_ENV
|
||||
|
||||
- name: Install LLVM-MinGW for ARM64 cross-compilation
|
||||
run: |
|
||||
cd /tmp
|
||||
@@ -289,6 +339,24 @@ jobs:
|
||||
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
|
||||
UPLOAD_DEBIAN_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }}
|
||||
UPLOAD_YUM_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }}
|
||||
GPG_RPM_KEY_FILE: ${{ env.GPG_RPM_KEY_FILE }}
|
||||
NFPM_NETBIRD_UI_RPM_PASSPHRASE: ${{ secrets.GPG_RPM_PASSPHRASE }}
|
||||
- name: Verify RPM signatures
|
||||
run: |
|
||||
docker run --rm -v $(pwd)/dist:/dist fedora:41 bash -c '
|
||||
dnf install -y -q rpm-sign curl >/dev/null 2>&1
|
||||
curl -sSL https://pkgs.netbird.io/yum/repodata/repomd.xml.key -o /tmp/rpm-pub.key
|
||||
rpm --import /tmp/rpm-pub.key
|
||||
echo "=== Verifying RPM signatures ==="
|
||||
for rpm_file in /dist/*.rpm; do
|
||||
[ -f "$rpm_file" ] || continue
|
||||
echo "--- $(basename $rpm_file) ---"
|
||||
rpm -K "$rpm_file"
|
||||
done
|
||||
'
|
||||
- name: Clean up GPG key
|
||||
if: always()
|
||||
run: rm -f /tmp/gpg-rpm-signing-key.asc
|
||||
- name: upload non tags for debug purposes
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
|
||||
@@ -171,13 +171,12 @@ nfpms:
|
||||
- maintainer: Netbird <dev@netbird.io>
|
||||
description: Netbird client.
|
||||
homepage: https://netbird.io/
|
||||
id: netbird-deb
|
||||
id: netbird_deb
|
||||
bindir: /usr/bin
|
||||
builds:
|
||||
- netbird
|
||||
formats:
|
||||
- deb
|
||||
|
||||
scripts:
|
||||
postinstall: "release_files/post_install.sh"
|
||||
preremove: "release_files/pre_remove.sh"
|
||||
@@ -185,16 +184,18 @@ nfpms:
|
||||
- maintainer: Netbird <dev@netbird.io>
|
||||
description: Netbird client.
|
||||
homepage: https://netbird.io/
|
||||
id: netbird-rpm
|
||||
id: netbird_rpm
|
||||
bindir: /usr/bin
|
||||
builds:
|
||||
- netbird
|
||||
formats:
|
||||
- rpm
|
||||
|
||||
scripts:
|
||||
postinstall: "release_files/post_install.sh"
|
||||
preremove: "release_files/pre_remove.sh"
|
||||
rpm:
|
||||
signature:
|
||||
key_file: '{{ if index .Env "GPG_RPM_KEY_FILE" }}{{ .Env.GPG_RPM_KEY_FILE }}{{ end }}'
|
||||
dockers:
|
||||
- image_templates:
|
||||
- netbirdio/netbird:{{ .Version }}-amd64
|
||||
@@ -876,7 +877,7 @@ brews:
|
||||
uploads:
|
||||
- name: debian
|
||||
ids:
|
||||
- netbird-deb
|
||||
- netbird_deb
|
||||
mode: archive
|
||||
target: https://pkgs.wiretrustee.com/debian/pool/{{ .ArtifactName }};deb.distribution=stable;deb.component=main;deb.architecture={{ if .Arm }}armhf{{ else }}{{ .Arch }}{{ end }};deb.package=
|
||||
username: dev@wiretrustee.com
|
||||
@@ -884,7 +885,7 @@ uploads:
|
||||
|
||||
- name: yum
|
||||
ids:
|
||||
- netbird-rpm
|
||||
- netbird_rpm
|
||||
mode: archive
|
||||
target: https://pkgs.wiretrustee.com/yum/{{ .Arch }}{{ if .Arm }}{{ .Arm }}{{ end }}
|
||||
username: dev@wiretrustee.com
|
||||
|
||||
@@ -61,7 +61,7 @@ nfpms:
|
||||
- maintainer: Netbird <dev@netbird.io>
|
||||
description: Netbird client UI.
|
||||
homepage: https://netbird.io/
|
||||
id: netbird-ui-deb
|
||||
id: netbird_ui_deb
|
||||
package_name: netbird-ui
|
||||
builds:
|
||||
- netbird-ui
|
||||
@@ -80,7 +80,7 @@ nfpms:
|
||||
- maintainer: Netbird <dev@netbird.io>
|
||||
description: Netbird client UI.
|
||||
homepage: https://netbird.io/
|
||||
id: netbird-ui-rpm
|
||||
id: netbird_ui_rpm
|
||||
package_name: netbird-ui
|
||||
builds:
|
||||
- netbird-ui
|
||||
@@ -95,11 +95,14 @@ nfpms:
|
||||
dst: /usr/share/pixmaps/netbird.png
|
||||
dependencies:
|
||||
- netbird
|
||||
rpm:
|
||||
signature:
|
||||
key_file: '{{ if index .Env "GPG_RPM_KEY_FILE" }}{{ .Env.GPG_RPM_KEY_FILE }}{{ end }}'
|
||||
|
||||
uploads:
|
||||
- name: debian
|
||||
ids:
|
||||
- netbird-ui-deb
|
||||
- netbird_ui_deb
|
||||
mode: archive
|
||||
target: https://pkgs.wiretrustee.com/debian/pool/{{ .ArtifactName }};deb.distribution=stable;deb.component=main;deb.architecture={{ if .Arm }}armhf{{ else }}{{ .Arch }}{{ end }};deb.package=
|
||||
username: dev@wiretrustee.com
|
||||
@@ -107,7 +110,7 @@ uploads:
|
||||
|
||||
- name: yum
|
||||
ids:
|
||||
- netbird-ui-rpm
|
||||
- netbird_ui_rpm
|
||||
mode: archive
|
||||
target: https://pkgs.wiretrustee.com/yum/{{ .Arch }}{{ if .Arm }}{{ .Arm }}{{ end }}
|
||||
username: dev@wiretrustee.com
|
||||
|
||||
@@ -126,6 +126,7 @@ See a complete [architecture overview](https://docs.netbird.io/about-netbird/how
|
||||
### Community projects
|
||||
- [NetBird installer script](https://github.com/physk/netbird-installer)
|
||||
- [NetBird ansible collection by Dominion Solutions](https://galaxy.ansible.com/ui/repo/published/dominion_solutions/netbird/)
|
||||
- [netbird-tui](https://github.com/n0pashkov/netbird-tui) — terminal UI for managing NetBird peers, routes, and settings
|
||||
|
||||
**Note**: The `main` branch may be in an *unstable or even broken state* during development.
|
||||
For stable versions, see [releases](https://github.com/netbirdio/netbird/releases).
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# sudo podman build -t localhost/netbird:latest -f client/Dockerfile --ignorefile .dockerignore-client .
|
||||
# sudo podman run --rm -it --cap-add={BPF,NET_ADMIN,NET_RAW} localhost/netbird:latest
|
||||
|
||||
FROM alpine:3.23.2
|
||||
FROM alpine:3.23.3
|
||||
# iproute2: busybox doesn't display ip rules properly
|
||||
RUN apk add --no-cache \
|
||||
bash \
|
||||
@@ -17,8 +17,7 @@ ENV \
|
||||
NETBIRD_BIN="/usr/local/bin/netbird" \
|
||||
NB_LOG_FILE="console,/var/log/netbird/client.log" \
|
||||
NB_DAEMON_ADDR="unix:///var/run/netbird.sock" \
|
||||
NB_ENTRYPOINT_SERVICE_TIMEOUT="5" \
|
||||
NB_ENTRYPOINT_LOGIN_TIMEOUT="5"
|
||||
NB_ENTRYPOINT_SERVICE_TIMEOUT="30"
|
||||
|
||||
ENTRYPOINT [ "/usr/local/bin/netbird-entrypoint.sh" ]
|
||||
|
||||
|
||||
@@ -23,8 +23,7 @@ ENV \
|
||||
NB_DAEMON_ADDR="unix:///var/lib/netbird/netbird.sock" \
|
||||
NB_LOG_FILE="console,/var/lib/netbird/client.log" \
|
||||
NB_DISABLE_DNS="true" \
|
||||
NB_ENTRYPOINT_SERVICE_TIMEOUT="5" \
|
||||
NB_ENTRYPOINT_LOGIN_TIMEOUT="1"
|
||||
NB_ENTRYPOINT_SERVICE_TIMEOUT="30"
|
||||
|
||||
ENTRYPOINT [ "/usr/local/bin/netbird-entrypoint.sh" ]
|
||||
|
||||
|
||||
@@ -124,7 +124,7 @@ func (c *Client) Run(platformFiles PlatformFiles, urlOpener URLOpener, isAndroid
|
||||
|
||||
// todo do not throw error in case of cancelled context
|
||||
ctx = internal.CtxInitState(ctx)
|
||||
c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder, false)
|
||||
c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder)
|
||||
return c.connectClient.RunOnAndroid(c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, slices.Clone(dns.items), dnsReadyListener, stateFile)
|
||||
}
|
||||
|
||||
@@ -157,7 +157,7 @@ func (c *Client) RunWithoutLogin(platformFiles PlatformFiles, dns *DNSList, dnsR
|
||||
|
||||
// todo do not throw error in case of cancelled context
|
||||
ctx = internal.CtxInitState(ctx)
|
||||
c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder, false)
|
||||
c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder)
|
||||
return c.connectClient.RunOnAndroid(c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, slices.Clone(dns.items), dnsReadyListener, stateFile)
|
||||
}
|
||||
|
||||
|
||||
280
client/cmd/expose.go
Normal file
280
client/cmd/expose.go
Normal file
@@ -0,0 +1,280 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/netbirdio/netbird/client/proto"
|
||||
"github.com/netbirdio/netbird/util"
|
||||
)
|
||||
|
||||
var pinRegexp = regexp.MustCompile(`^\d{6}$`)
|
||||
|
||||
var (
|
||||
exposePin string
|
||||
exposePassword string
|
||||
exposeUserGroups []string
|
||||
exposeDomain string
|
||||
exposeNamePrefix string
|
||||
exposeProtocol string
|
||||
exposeExternalPort uint16
|
||||
)
|
||||
|
||||
var exposeCmd = &cobra.Command{
|
||||
Use: "expose <port>",
|
||||
Short: "Expose a local port via the NetBird reverse proxy",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Example: ` netbird expose --with-password safe-pass 8080
|
||||
netbird expose --protocol tcp 5432
|
||||
netbird expose --protocol tcp --with-external-port 5433 5432
|
||||
netbird expose --protocol tls --with-custom-domain tls.example.com 4443`,
|
||||
RunE: exposeFn,
|
||||
}
|
||||
|
||||
func init() {
|
||||
exposeCmd.Flags().StringVar(&exposePin, "with-pin", "", "Protect the exposed service with a 6-digit PIN (e.g. --with-pin 123456)")
|
||||
exposeCmd.Flags().StringVar(&exposePassword, "with-password", "", "Protect the exposed service with a password (e.g. --with-password my-secret)")
|
||||
exposeCmd.Flags().StringSliceVar(&exposeUserGroups, "with-user-groups", nil, "Restrict access to specific user groups with SSO (e.g. --with-user-groups devops,Backend)")
|
||||
exposeCmd.Flags().StringVar(&exposeDomain, "with-custom-domain", "", "Custom domain for the exposed service, must be configured to your account (e.g. --with-custom-domain myapp.example.com)")
|
||||
exposeCmd.Flags().StringVar(&exposeNamePrefix, "with-name-prefix", "", "Prefix for the generated service name (e.g. --with-name-prefix my-app)")
|
||||
exposeCmd.Flags().StringVar(&exposeProtocol, "protocol", "http", "Protocol to use: http, https, tcp, udp, or tls (e.g. --protocol tcp)")
|
||||
exposeCmd.Flags().Uint16Var(&exposeExternalPort, "with-external-port", 0, "Public-facing external port on the proxy cluster (defaults to the target port for L4)")
|
||||
}
|
||||
|
||||
// isClusterProtocol returns true for L4/TLS protocols that reject HTTP-style auth flags.
|
||||
func isClusterProtocol(protocol string) bool {
|
||||
switch strings.ToLower(protocol) {
|
||||
case "tcp", "udp", "tls":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isPortBasedProtocol returns true for pure port-based protocols (TCP/UDP)
|
||||
// where domain display doesn't apply. TLS uses SNI so it has a domain.
|
||||
func isPortBasedProtocol(protocol string) bool {
|
||||
switch strings.ToLower(protocol) {
|
||||
case "tcp", "udp":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// extractPort returns the port portion of a URL like "tcp://host:12345", or
|
||||
// falls back to the given default formatted as a string.
|
||||
func extractPort(serviceURL string, fallback uint16) string {
|
||||
u := serviceURL
|
||||
if idx := strings.Index(u, "://"); idx != -1 {
|
||||
u = u[idx+3:]
|
||||
}
|
||||
if i := strings.LastIndex(u, ":"); i != -1 {
|
||||
if p := u[i+1:]; p != "" {
|
||||
return p
|
||||
}
|
||||
}
|
||||
return strconv.FormatUint(uint64(fallback), 10)
|
||||
}
|
||||
|
||||
// resolveExternalPort returns the effective external port, defaulting to the target port.
|
||||
func resolveExternalPort(targetPort uint64) uint16 {
|
||||
if exposeExternalPort != 0 {
|
||||
return exposeExternalPort
|
||||
}
|
||||
return uint16(targetPort)
|
||||
}
|
||||
|
||||
func validateExposeFlags(cmd *cobra.Command, portStr string) (uint64, error) {
|
||||
port, err := strconv.ParseUint(portStr, 10, 32)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid port number: %s", portStr)
|
||||
}
|
||||
if port == 0 || port > 65535 {
|
||||
return 0, fmt.Errorf("invalid port number: must be between 1 and 65535")
|
||||
}
|
||||
|
||||
if !isProtocolValid(exposeProtocol) {
|
||||
return 0, fmt.Errorf("unsupported protocol %q: must be http, https, tcp, udp, or tls", exposeProtocol)
|
||||
}
|
||||
|
||||
if isClusterProtocol(exposeProtocol) {
|
||||
if exposePin != "" || exposePassword != "" || len(exposeUserGroups) > 0 {
|
||||
return 0, fmt.Errorf("auth flags (--with-pin, --with-password, --with-user-groups) are not supported for %s protocol", exposeProtocol)
|
||||
}
|
||||
} else if cmd.Flags().Changed("with-external-port") {
|
||||
return 0, fmt.Errorf("--with-external-port is not supported for %s protocol", exposeProtocol)
|
||||
}
|
||||
|
||||
if exposePin != "" && !pinRegexp.MatchString(exposePin) {
|
||||
return 0, fmt.Errorf("invalid pin: must be exactly 6 digits")
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("with-password") && exposePassword == "" {
|
||||
return 0, fmt.Errorf("password cannot be empty")
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("with-user-groups") && len(exposeUserGroups) == 0 {
|
||||
return 0, fmt.Errorf("user groups cannot be empty")
|
||||
}
|
||||
|
||||
return port, nil
|
||||
}
|
||||
|
||||
func isProtocolValid(exposeProtocol string) bool {
|
||||
switch strings.ToLower(exposeProtocol) {
|
||||
case "http", "https", "tcp", "udp", "tls":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func exposeFn(cmd *cobra.Command, args []string) error {
|
||||
SetFlagsFromEnvVars(rootCmd)
|
||||
|
||||
if err := util.InitLog(logLevel, util.LogConsole); err != nil {
|
||||
log.Errorf("failed initializing log %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
cmd.Root().SilenceUsage = false
|
||||
|
||||
port, err := validateExposeFlags(cmd, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd.Root().SilenceUsage = true
|
||||
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-sigCh
|
||||
cancel()
|
||||
}()
|
||||
|
||||
conn, err := DialClientGRPCServer(ctx, daemonAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("connect to daemon: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
log.Debugf("failed to close daemon connection: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
client := proto.NewDaemonServiceClient(conn)
|
||||
|
||||
protocol, err := toExposeProtocol(exposeProtocol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req := &proto.ExposeServiceRequest{
|
||||
Port: uint32(port),
|
||||
Protocol: protocol,
|
||||
Pin: exposePin,
|
||||
Password: exposePassword,
|
||||
UserGroups: exposeUserGroups,
|
||||
Domain: exposeDomain,
|
||||
NamePrefix: exposeNamePrefix,
|
||||
}
|
||||
if isClusterProtocol(exposeProtocol) {
|
||||
req.ListenPort = uint32(resolveExternalPort(port))
|
||||
}
|
||||
|
||||
stream, err := client.ExposeService(ctx, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expose service: %w", err)
|
||||
}
|
||||
|
||||
if err := handleExposeReady(cmd, stream, port); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return waitForExposeEvents(cmd, ctx, stream)
|
||||
}
|
||||
|
||||
func toExposeProtocol(exposeProtocol string) (proto.ExposeProtocol, error) {
|
||||
switch strings.ToLower(exposeProtocol) {
|
||||
case "http":
|
||||
return proto.ExposeProtocol_EXPOSE_HTTP, nil
|
||||
case "https":
|
||||
return proto.ExposeProtocol_EXPOSE_HTTPS, nil
|
||||
case "tcp":
|
||||
return proto.ExposeProtocol_EXPOSE_TCP, nil
|
||||
case "udp":
|
||||
return proto.ExposeProtocol_EXPOSE_UDP, nil
|
||||
case "tls":
|
||||
return proto.ExposeProtocol_EXPOSE_TLS, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unsupported protocol %q: must be http, https, tcp, udp, or tls", exposeProtocol)
|
||||
}
|
||||
}
|
||||
|
||||
func handleExposeReady(cmd *cobra.Command, stream proto.DaemonService_ExposeServiceClient, port uint64) error {
|
||||
event, err := stream.Recv()
|
||||
if err != nil {
|
||||
return fmt.Errorf("receive expose event: %w", err)
|
||||
}
|
||||
|
||||
ready, ok := event.Event.(*proto.ExposeServiceEvent_Ready)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected expose event: %T", event.Event)
|
||||
}
|
||||
printExposeReady(cmd, ready.Ready, port)
|
||||
return nil
|
||||
}
|
||||
|
||||
func printExposeReady(cmd *cobra.Command, r *proto.ExposeServiceReady, port uint64) {
|
||||
cmd.Println("Service exposed successfully!")
|
||||
cmd.Printf(" Name: %s\n", r.ServiceName)
|
||||
if r.ServiceUrl != "" {
|
||||
cmd.Printf(" URL: %s\n", r.ServiceUrl)
|
||||
}
|
||||
if r.Domain != "" && !isPortBasedProtocol(exposeProtocol) {
|
||||
cmd.Printf(" Domain: %s\n", r.Domain)
|
||||
}
|
||||
cmd.Printf(" Protocol: %s\n", exposeProtocol)
|
||||
cmd.Printf(" Internal: %d\n", port)
|
||||
if isClusterProtocol(exposeProtocol) {
|
||||
cmd.Printf(" External: %s\n", extractPort(r.ServiceUrl, resolveExternalPort(port)))
|
||||
}
|
||||
if r.PortAutoAssigned && exposeExternalPort != 0 {
|
||||
cmd.Printf("\n Note: requested port %d was reassigned\n", exposeExternalPort)
|
||||
}
|
||||
cmd.Println()
|
||||
cmd.Println("Press Ctrl+C to stop exposing.")
|
||||
}
|
||||
|
||||
func waitForExposeEvents(cmd *cobra.Command, ctx context.Context, stream proto.DaemonService_ExposeServiceClient) error {
|
||||
for {
|
||||
_, err := stream.Recv()
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
cmd.Println("\nService stopped.")
|
||||
//nolint:nilerr
|
||||
return nil
|
||||
}
|
||||
if errors.Is(err, io.EOF) {
|
||||
return fmt.Errorf("connection to daemon closed unexpectedly")
|
||||
}
|
||||
return fmt.Errorf("stream error: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
daddr "github.com/netbirdio/netbird/client/internal/daemonaddr"
|
||||
"github.com/netbirdio/netbird/client/internal/profilemanager"
|
||||
)
|
||||
|
||||
@@ -80,6 +81,15 @@ var (
|
||||
Short: "",
|
||||
Long: "",
|
||||
SilenceUsage: true,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
SetFlagsFromEnvVars(cmd.Root())
|
||||
|
||||
// Don't resolve for service commands — they create the socket, not connect to it.
|
||||
if !isServiceCmd(cmd) {
|
||||
daemonAddr = daddr.ResolveUnixDaemonAddr(daemonAddr)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
@@ -144,6 +154,7 @@ func init() {
|
||||
rootCmd.AddCommand(forwardingRulesCmd)
|
||||
rootCmd.AddCommand(debugCmd)
|
||||
rootCmd.AddCommand(profileCmd)
|
||||
rootCmd.AddCommand(exposeCmd)
|
||||
|
||||
networksCMD.AddCommand(routesListCmd)
|
||||
networksCMD.AddCommand(routesSelectCmd, routesDeselectCmd)
|
||||
@@ -385,7 +396,6 @@ func migrateToNetbird(oldPath, newPath string) bool {
|
||||
}
|
||||
|
||||
func getClient(cmd *cobra.Command) (*grpc.ClientConn, error) {
|
||||
SetFlagsFromEnvVars(rootCmd)
|
||||
cmd.SetOut(cmd.OutOrStdout())
|
||||
|
||||
conn, err := DialClientGRPCServer(cmd.Context(), daemonAddr)
|
||||
@@ -398,3 +408,13 @@ func getClient(cmd *cobra.Command) (*grpc.ClientConn, error) {
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// isServiceCmd returns true if cmd is the "service" command or a child of it.
|
||||
func isServiceCmd(cmd *cobra.Command) bool {
|
||||
for c := cmd; c != nil; c = c.Parent() {
|
||||
if c.Name() == "service" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -103,7 +103,7 @@ func (p *program) Stop(srv service.Service) error {
|
||||
|
||||
// Common setup for service control commands
|
||||
func setupServiceControlCommand(cmd *cobra.Command, ctx context.Context, cancel context.CancelFunc) (service.Service, error) {
|
||||
SetFlagsFromEnvVars(rootCmd)
|
||||
// rootCmd env vars are already applied by PersistentPreRunE.
|
||||
SetFlagsFromEnvVars(serviceCmd)
|
||||
|
||||
cmd.SetOut(cmd.OutOrStdout())
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/updatemanager/reposign"
|
||||
"github.com/netbirdio/netbird/client/internal/updater/reposign"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/updatemanager/reposign"
|
||||
"github.com/netbirdio/netbird/client/internal/updater/reposign"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/updatemanager/reposign"
|
||||
"github.com/netbirdio/netbird/client/internal/updater/reposign"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/updatemanager/reposign"
|
||||
"github.com/netbirdio/netbird/client/internal/updater/reposign"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -28,6 +28,7 @@ var (
|
||||
ipsFilterMap map[string]struct{}
|
||||
prefixNamesFilterMap map[string]struct{}
|
||||
connectionTypeFilter string
|
||||
checkFlag string
|
||||
)
|
||||
|
||||
var statusCmd = &cobra.Command{
|
||||
@@ -49,6 +50,7 @@ func init() {
|
||||
statusCmd.PersistentFlags().StringSliceVar(&prefixNamesFilter, "filter-by-names", []string{}, "filters the detailed output by a list of one or more peer FQDN or hostnames, e.g., --filter-by-names peer-a,peer-b.netbird.cloud")
|
||||
statusCmd.PersistentFlags().StringVar(&statusFilter, "filter-by-status", "", "filters the detailed output by connection status(idle|connecting|connected), e.g., --filter-by-status connected")
|
||||
statusCmd.PersistentFlags().StringVar(&connectionTypeFilter, "filter-by-connection-type", "", "filters the detailed output by connection type (P2P|Relayed), e.g., --filter-by-connection-type P2P")
|
||||
statusCmd.PersistentFlags().StringVar(&checkFlag, "check", "", "run a health check and exit with code 0 on success, 1 on failure (live|ready|startup)")
|
||||
}
|
||||
|
||||
func statusFunc(cmd *cobra.Command, args []string) error {
|
||||
@@ -56,6 +58,10 @@ func statusFunc(cmd *cobra.Command, args []string) error {
|
||||
|
||||
cmd.SetOut(cmd.OutOrStdout())
|
||||
|
||||
if checkFlag != "" {
|
||||
return runHealthCheck(cmd)
|
||||
}
|
||||
|
||||
err := parseFilters()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -68,15 +74,17 @@ func statusFunc(cmd *cobra.Command, args []string) error {
|
||||
|
||||
ctx := internal.CtxInitState(cmd.Context())
|
||||
|
||||
resp, err := getStatus(ctx, false)
|
||||
resp, err := getStatus(ctx, true, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
status := resp.GetStatus()
|
||||
|
||||
if status == string(internal.StatusNeedsLogin) || status == string(internal.StatusLoginFailed) ||
|
||||
status == string(internal.StatusSessionExpired) {
|
||||
needsAuth := status == string(internal.StatusNeedsLogin) || status == string(internal.StatusLoginFailed) ||
|
||||
status == string(internal.StatusSessionExpired)
|
||||
|
||||
if needsAuth && !jsonFlag && !yamlFlag {
|
||||
cmd.Printf("Daemon status: %s\n\n"+
|
||||
"Run UP command to log in with SSO (interactive login):\n\n"+
|
||||
" netbird up \n\n"+
|
||||
@@ -99,7 +107,17 @@ func statusFunc(cmd *cobra.Command, args []string) error {
|
||||
profName = activeProf.Name
|
||||
}
|
||||
|
||||
var outputInformationHolder = nbstatus.ConvertToStatusOutputOverview(resp.GetFullStatus(), anonymizeFlag, resp.GetDaemonVersion(), statusFilter, prefixNamesFilter, prefixNamesFilterMap, ipsFilterMap, connectionTypeFilter, profName)
|
||||
var outputInformationHolder = nbstatus.ConvertToStatusOutputOverview(resp.GetFullStatus(), nbstatus.ConvertOptions{
|
||||
Anonymize: anonymizeFlag,
|
||||
DaemonVersion: resp.GetDaemonVersion(),
|
||||
DaemonStatus: nbstatus.ParseDaemonStatus(status),
|
||||
StatusFilter: statusFilter,
|
||||
PrefixNamesFilter: prefixNamesFilter,
|
||||
PrefixNamesFilterMap: prefixNamesFilterMap,
|
||||
IPsFilter: ipsFilterMap,
|
||||
ConnectionTypeFilter: connectionTypeFilter,
|
||||
ProfileName: profName,
|
||||
})
|
||||
var statusOutputString string
|
||||
switch {
|
||||
case detailFlag:
|
||||
@@ -121,7 +139,7 @@ func statusFunc(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getStatus(ctx context.Context, shouldRunProbes bool) (*proto.StatusResponse, error) {
|
||||
func getStatus(ctx context.Context, fullPeerStatus bool, shouldRunProbes bool) (*proto.StatusResponse, error) {
|
||||
conn, err := DialClientGRPCServer(ctx, daemonAddr)
|
||||
if err != nil {
|
||||
//nolint
|
||||
@@ -131,7 +149,7 @@ func getStatus(ctx context.Context, shouldRunProbes bool) (*proto.StatusResponse
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
resp, err := proto.NewDaemonServiceClient(conn).Status(ctx, &proto.StatusRequest{GetFullPeerStatus: true, ShouldRunProbes: shouldRunProbes})
|
||||
resp, err := proto.NewDaemonServiceClient(conn).Status(ctx, &proto.StatusRequest{GetFullPeerStatus: fullPeerStatus, ShouldRunProbes: shouldRunProbes})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("status failed: %v", status.Convert(err).Message())
|
||||
}
|
||||
@@ -185,6 +203,83 @@ func enableDetailFlagWhenFilterFlag() {
|
||||
}
|
||||
}
|
||||
|
||||
func runHealthCheck(cmd *cobra.Command) error {
|
||||
check := strings.ToLower(checkFlag)
|
||||
switch check {
|
||||
case "live", "ready", "startup":
|
||||
default:
|
||||
return fmt.Errorf("unknown check %q, must be one of: live, ready, startup", checkFlag)
|
||||
}
|
||||
|
||||
if err := util.InitLog(logLevel, util.LogConsole); err != nil {
|
||||
return fmt.Errorf("init log: %w", err)
|
||||
}
|
||||
|
||||
ctx := internal.CtxInitState(cmd.Context())
|
||||
|
||||
isStartup := check == "startup"
|
||||
resp, err := getStatus(ctx, isStartup, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch check {
|
||||
case "live":
|
||||
return nil
|
||||
case "ready":
|
||||
return checkReadiness(resp)
|
||||
case "startup":
|
||||
return checkStartup(resp)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkReadiness(resp *proto.StatusResponse) error {
|
||||
daemonStatus := internal.StatusType(resp.GetStatus())
|
||||
switch daemonStatus {
|
||||
case internal.StatusIdle, internal.StatusConnecting, internal.StatusConnected:
|
||||
return nil
|
||||
case internal.StatusNeedsLogin, internal.StatusLoginFailed, internal.StatusSessionExpired:
|
||||
return fmt.Errorf("readiness check: daemon status is %s", daemonStatus)
|
||||
default:
|
||||
return fmt.Errorf("readiness check: unexpected daemon status %q", daemonStatus)
|
||||
}
|
||||
}
|
||||
|
||||
func checkStartup(resp *proto.StatusResponse) error {
|
||||
fullStatus := resp.GetFullStatus()
|
||||
if fullStatus == nil {
|
||||
return fmt.Errorf("startup check: no full status available")
|
||||
}
|
||||
|
||||
if !fullStatus.GetManagementState().GetConnected() {
|
||||
return fmt.Errorf("startup check: management not connected")
|
||||
}
|
||||
|
||||
if !fullStatus.GetSignalState().GetConnected() {
|
||||
return fmt.Errorf("startup check: signal not connected")
|
||||
}
|
||||
|
||||
var relayCount, relaysConnected int
|
||||
for _, r := range fullStatus.GetRelays() {
|
||||
uri := r.GetURI()
|
||||
if !strings.HasPrefix(uri, "rel://") && !strings.HasPrefix(uri, "rels://") {
|
||||
continue
|
||||
}
|
||||
relayCount++
|
||||
if r.GetAvailable() {
|
||||
relaysConnected++
|
||||
}
|
||||
}
|
||||
|
||||
if relayCount > 0 && relaysConnected == 0 {
|
||||
return fmt.Errorf("startup check: no relay servers available (0/%d connected)", relayCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseInterfaceIP(interfaceIP string) string {
|
||||
ip, _, err := net.ParseCIDR(interfaceIP)
|
||||
if err != nil {
|
||||
|
||||
@@ -197,7 +197,7 @@ func runInForegroundMode(ctx context.Context, cmd *cobra.Command, activeProf *pr
|
||||
r := peer.NewRecorder(config.ManagementURL.String())
|
||||
r.GetFullStatus()
|
||||
|
||||
connectClient := internal.NewConnectClient(ctx, config, r, false)
|
||||
connectClient := internal.NewConnectClient(ctx, config, r)
|
||||
SetupDebugHandler(ctx, config, r, connectClient, "")
|
||||
|
||||
return connectClient.Run(nil, util.FindFirstLogPath(logFiles))
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/updatemanager/installer"
|
||||
"github.com/netbirdio/netbird/client/internal/updater/installer"
|
||||
"github.com/netbirdio/netbird/util"
|
||||
)
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
wgnetstack "golang.zx2c4.com/wireguard/tun/netstack"
|
||||
|
||||
"github.com/netbirdio/netbird/client/iface"
|
||||
"github.com/netbirdio/netbird/client/iface/netstack"
|
||||
"github.com/netbirdio/netbird/client/internal"
|
||||
"github.com/netbirdio/netbird/client/internal/auth"
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"github.com/netbirdio/netbird/client/internal/profilemanager"
|
||||
sshcommon "github.com/netbirdio/netbird/client/ssh"
|
||||
"github.com/netbirdio/netbird/client/system"
|
||||
"github.com/netbirdio/netbird/shared/management/domain"
|
||||
mgmProto "github.com/netbirdio/netbird/shared/management/proto"
|
||||
)
|
||||
|
||||
@@ -81,6 +83,14 @@ type Options struct {
|
||||
BlockInbound bool
|
||||
// WireguardPort is the port for the WireGuard interface. Use 0 for a random port.
|
||||
WireguardPort *int
|
||||
// MTU is the MTU for the WireGuard interface.
|
||||
// Valid values are in the range 576..8192 bytes.
|
||||
// If non-nil, this value overrides any value stored in the config file.
|
||||
// If nil, the existing config MTU (if non-zero) is preserved; otherwise it defaults to 1280.
|
||||
// Set to a higher value (e.g. 1400) if carrying QUIC or other protocols that require larger datagrams.
|
||||
MTU *uint16
|
||||
// DNSLabels defines additional DNS labels configured in the peer.
|
||||
DNSLabels []string
|
||||
}
|
||||
|
||||
// validateCredentials checks that exactly one credential type is provided
|
||||
@@ -112,6 +122,12 @@ func New(opts Options) (*Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opts.MTU != nil {
|
||||
if err := iface.ValidateMTU(*opts.MTU); err != nil {
|
||||
return nil, fmt.Errorf("invalid MTU: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if opts.LogOutput != nil {
|
||||
logrus.SetOutput(opts.LogOutput)
|
||||
}
|
||||
@@ -140,9 +156,14 @@ func New(opts Options) (*Client, error) {
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
var parsedLabels domain.List
|
||||
if parsedLabels, err = domain.FromStringList(opts.DNSLabels); err != nil {
|
||||
return nil, fmt.Errorf("invalid dns labels: %w", err)
|
||||
}
|
||||
|
||||
t := true
|
||||
var config *profilemanager.Config
|
||||
var err error
|
||||
input := profilemanager.ConfigInput{
|
||||
ConfigPath: opts.ConfigPath,
|
||||
ManagementURL: opts.ManagementURL,
|
||||
@@ -151,6 +172,8 @@ func New(opts Options) (*Client, error) {
|
||||
DisableClientRoutes: &opts.DisableClientRoutes,
|
||||
BlockInbound: &opts.BlockInbound,
|
||||
WireguardPort: opts.WireguardPort,
|
||||
MTU: opts.MTU,
|
||||
DNSLabels: parsedLabels,
|
||||
}
|
||||
if opts.ConfigPath != "" {
|
||||
config, err = profilemanager.UpdateOrCreateConfig(input)
|
||||
@@ -202,7 +225,7 @@ func (c *Client) Start(startCtx context.Context) error {
|
||||
if err, _ := authClient.Login(ctx, c.setupKey, c.jwtToken); err != nil {
|
||||
return fmt.Errorf("login: %w", err)
|
||||
}
|
||||
client := internal.NewConnectClient(ctx, c.config, c.recorder, false)
|
||||
client := internal.NewConnectClient(ctx, c.config, c.recorder)
|
||||
client.SetSyncResponsePersistence(true)
|
||||
|
||||
// either startup error (permanent backoff err) or nil err (successful engine up)
|
||||
|
||||
@@ -23,9 +23,10 @@ type Manager struct {
|
||||
|
||||
wgIface iFaceMapper
|
||||
|
||||
ipv4Client *iptables.IPTables
|
||||
aclMgr *aclManager
|
||||
router *router
|
||||
ipv4Client *iptables.IPTables
|
||||
aclMgr *aclManager
|
||||
router *router
|
||||
rawSupported bool
|
||||
}
|
||||
|
||||
// iFaceMapper defines subset methods of interface required for manager
|
||||
@@ -84,7 +85,7 @@ func (m *Manager) Init(stateManager *statemanager.Manager) error {
|
||||
}
|
||||
|
||||
if err := m.initNoTrackChain(); err != nil {
|
||||
return fmt.Errorf("init notrack chain: %w", err)
|
||||
log.Warnf("raw table not available, notrack rules will be disabled: %v", err)
|
||||
}
|
||||
|
||||
// persist early to ensure cleanup of chains
|
||||
@@ -318,6 +319,10 @@ func (m *Manager) SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
if !m.rawSupported {
|
||||
return fmt.Errorf("raw table not available")
|
||||
}
|
||||
|
||||
wgPortStr := fmt.Sprintf("%d", wgPort)
|
||||
proxyPortStr := fmt.Sprintf("%d", proxyPort)
|
||||
|
||||
@@ -375,12 +380,16 @@ func (m *Manager) initNoTrackChain() error {
|
||||
return fmt.Errorf("add prerouting jump rule: %w", err)
|
||||
}
|
||||
|
||||
m.rawSupported = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) cleanupNoTrackChain() error {
|
||||
exists, err := m.ipv4Client.ChainExists(tableRaw, chainNameRaw)
|
||||
if err != nil {
|
||||
if !m.rawSupported {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("check chain exists: %w", err)
|
||||
}
|
||||
if !exists {
|
||||
@@ -401,6 +410,7 @@ func (m *Manager) cleanupNoTrackChain() error {
|
||||
return fmt.Errorf("clear and delete chain: %w", err)
|
||||
}
|
||||
|
||||
m.rawSupported = false
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ func (m *Manager) Init(stateManager *statemanager.Manager) error {
|
||||
}
|
||||
|
||||
if err := m.initNoTrackChains(workTable); err != nil {
|
||||
return fmt.Errorf("init notrack chains: %w", err)
|
||||
log.Warnf("raw priority chains not available, notrack rules will be disabled: %v", err)
|
||||
}
|
||||
|
||||
stateManager.RegisterState(&ShutdownState{})
|
||||
|
||||
@@ -28,7 +28,7 @@ func Backoff(ctx context.Context) backoff.BackOff {
|
||||
|
||||
// CreateConnection creates a gRPC client connection with the appropriate transport options.
|
||||
// The component parameter specifies the WebSocket proxy component path (e.g., "/management", "/signal").
|
||||
func CreateConnection(ctx context.Context, addr string, tlsEnabled bool, component string) (*grpc.ClientConn, error) {
|
||||
func CreateConnection(ctx context.Context, addr string, tlsEnabled bool, component string, extraOpts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
transportOption := grpc.WithTransportCredentials(insecure.NewCredentials())
|
||||
// for js, the outer websocket layer takes care of tls
|
||||
if tlsEnabled && runtime.GOOS != "js" {
|
||||
@@ -46,9 +46,7 @@ func CreateConnection(ctx context.Context, addr string, tlsEnabled bool, compone
|
||||
connCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn, err := grpc.DialContext(
|
||||
connCtx,
|
||||
addr,
|
||||
opts := []grpc.DialOption{
|
||||
transportOption,
|
||||
WithCustomDialer(tlsEnabled, component),
|
||||
grpc.WithBlock(),
|
||||
@@ -56,7 +54,10 @@ func CreateConnection(ctx context.Context, addr string, tlsEnabled bool, compone
|
||||
Time: 30 * time.Second,
|
||||
Timeout: 10 * time.Second,
|
||||
}),
|
||||
)
|
||||
}
|
||||
opts = append(opts, extraOpts...)
|
||||
|
||||
conn, err := grpc.DialContext(connCtx, addr, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dial context: %w", err)
|
||||
}
|
||||
|
||||
@@ -5,20 +5,18 @@ package configurer
|
||||
import (
|
||||
"net"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.zx2c4.com/wireguard/ipc"
|
||||
)
|
||||
|
||||
func openUAPI(deviceName string) (net.Listener, error) {
|
||||
uapiSock, err := ipc.UAPIOpen(deviceName)
|
||||
if err != nil {
|
||||
log.Errorf("failed to open uapi socket: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listener, err := ipc.UAPIListen(deviceName, uapiSock)
|
||||
if err != nil {
|
||||
log.Errorf("failed to listen on uapi socket: %v", err)
|
||||
_ = uapiSock.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -54,6 +54,14 @@ func NewUSPConfigurer(device *device.Device, deviceName string, activityRecorder
|
||||
return wgCfg
|
||||
}
|
||||
|
||||
func NewUSPConfigurerNoUAPI(device *device.Device, deviceName string, activityRecorder *bind.ActivityRecorder) *WGUSPConfigurer {
|
||||
return &WGUSPConfigurer{
|
||||
device: device,
|
||||
deviceName: deviceName,
|
||||
activityRecorder: activityRecorder,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *WGUSPConfigurer) ConfigureInterface(privateKey string, port int) error {
|
||||
log.Debugf("adding Wireguard private key")
|
||||
key, err := wgtypes.ParseKey(privateKey)
|
||||
|
||||
@@ -79,7 +79,7 @@ func (t *TunNetstackDevice) create() (WGConfigurer, error) {
|
||||
device.NewLogger(wgLogLevel(), "[netbird] "),
|
||||
)
|
||||
|
||||
t.configurer = configurer.NewUSPConfigurer(t.device, t.name, t.bind.ActivityRecorder())
|
||||
t.configurer = configurer.NewUSPConfigurerNoUAPI(t.device, t.name, t.bind.ActivityRecorder())
|
||||
err = t.configurer.ConfigureInterface(t.key, t.port)
|
||||
if err != nil {
|
||||
if cErr := tunIface.Close(); cErr != nil {
|
||||
|
||||
@@ -23,12 +23,13 @@ import (
|
||||
"github.com/netbirdio/netbird/client/iface/netstack"
|
||||
"github.com/netbirdio/netbird/client/internal/dns"
|
||||
"github.com/netbirdio/netbird/client/internal/listener"
|
||||
"github.com/netbirdio/netbird/client/internal/metrics"
|
||||
"github.com/netbirdio/netbird/client/internal/peer"
|
||||
"github.com/netbirdio/netbird/client/internal/profilemanager"
|
||||
"github.com/netbirdio/netbird/client/internal/statemanager"
|
||||
"github.com/netbirdio/netbird/client/internal/stdnet"
|
||||
"github.com/netbirdio/netbird/client/internal/updatemanager"
|
||||
"github.com/netbirdio/netbird/client/internal/updatemanager/installer"
|
||||
"github.com/netbirdio/netbird/client/internal/updater"
|
||||
"github.com/netbirdio/netbird/client/internal/updater/installer"
|
||||
nbnet "github.com/netbirdio/netbird/client/net"
|
||||
cProto "github.com/netbirdio/netbird/client/proto"
|
||||
"github.com/netbirdio/netbird/client/ssh"
|
||||
@@ -44,13 +45,14 @@ import (
|
||||
)
|
||||
|
||||
type ConnectClient struct {
|
||||
ctx context.Context
|
||||
config *profilemanager.Config
|
||||
statusRecorder *peer.Status
|
||||
doInitialAutoUpdate bool
|
||||
ctx context.Context
|
||||
config *profilemanager.Config
|
||||
statusRecorder *peer.Status
|
||||
|
||||
engine *Engine
|
||||
engineMutex sync.Mutex
|
||||
engine *Engine
|
||||
engineMutex sync.Mutex
|
||||
clientMetrics *metrics.ClientMetrics
|
||||
updateManager *updater.Manager
|
||||
|
||||
persistSyncResponse bool
|
||||
}
|
||||
@@ -59,17 +61,19 @@ func NewConnectClient(
|
||||
ctx context.Context,
|
||||
config *profilemanager.Config,
|
||||
statusRecorder *peer.Status,
|
||||
doInitalAutoUpdate bool,
|
||||
) *ConnectClient {
|
||||
return &ConnectClient{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
statusRecorder: statusRecorder,
|
||||
doInitialAutoUpdate: doInitalAutoUpdate,
|
||||
engineMutex: sync.Mutex{},
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
statusRecorder: statusRecorder,
|
||||
engineMutex: sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConnectClient) SetUpdateManager(um *updater.Manager) {
|
||||
c.updateManager = um
|
||||
}
|
||||
|
||||
// Run with main logic.
|
||||
func (c *ConnectClient) Run(runningChan chan struct{}, logPath string) error {
|
||||
return c.run(MobileDependency{}, runningChan, logPath)
|
||||
@@ -131,10 +135,34 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
|
||||
}
|
||||
}()
|
||||
|
||||
// Stop metrics push on exit
|
||||
defer func() {
|
||||
if c.clientMetrics != nil {
|
||||
c.clientMetrics.StopPush()
|
||||
}
|
||||
}()
|
||||
|
||||
log.Infof("starting NetBird client version %s on %s/%s", version.NetbirdVersion(), runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
nbnet.Init()
|
||||
|
||||
// Initialize metrics once at startup (always active for debug bundles)
|
||||
if c.clientMetrics == nil {
|
||||
agentInfo := metrics.AgentInfo{
|
||||
DeploymentType: metrics.DeploymentTypeUnknown,
|
||||
Version: version.NetbirdVersion(),
|
||||
OS: runtime.GOOS,
|
||||
Arch: runtime.GOARCH,
|
||||
}
|
||||
c.clientMetrics = metrics.NewClientMetrics(agentInfo)
|
||||
log.Debugf("initialized client metrics")
|
||||
|
||||
// Start metrics push if enabled (uses daemon context, persists across engine restarts)
|
||||
if metrics.IsMetricsPushEnabled() {
|
||||
c.clientMetrics.StartPush(c.ctx, metrics.PushConfigFromEnv())
|
||||
}
|
||||
}
|
||||
|
||||
backOff := &backoff.ExponentialBackOff{
|
||||
InitialInterval: time.Second,
|
||||
RandomizationFactor: 1,
|
||||
@@ -187,14 +215,13 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
|
||||
stateManager := statemanager.New(path)
|
||||
stateManager.RegisterState(&sshconfig.ShutdownState{})
|
||||
|
||||
updateManager, err := updatemanager.NewManager(c.statusRecorder, stateManager)
|
||||
if err == nil {
|
||||
updateManager.CheckUpdateSuccess(c.ctx)
|
||||
if c.updateManager != nil {
|
||||
c.updateManager.CheckUpdateSuccess(c.ctx)
|
||||
}
|
||||
|
||||
inst := installer.New()
|
||||
if err := inst.CleanUpInstallerFiles(); err != nil {
|
||||
log.Errorf("failed to clean up temporary installer file: %v", err)
|
||||
}
|
||||
inst := installer.New()
|
||||
if err := inst.CleanUpInstallerFiles(); err != nil {
|
||||
log.Errorf("failed to clean up temporary installer file: %v", err)
|
||||
}
|
||||
|
||||
defer c.statusRecorder.ClientStop()
|
||||
@@ -222,6 +249,16 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
|
||||
mgmNotifier := statusRecorderToMgmConnStateNotifier(c.statusRecorder)
|
||||
mgmClient.SetConnStateListener(mgmNotifier)
|
||||
|
||||
// Update metrics with actual deployment type after connection
|
||||
deploymentType := metrics.DetermineDeploymentType(mgmClient.GetServerURL())
|
||||
agentInfo := metrics.AgentInfo{
|
||||
DeploymentType: deploymentType,
|
||||
Version: version.NetbirdVersion(),
|
||||
OS: runtime.GOOS,
|
||||
Arch: runtime.GOARCH,
|
||||
}
|
||||
c.clientMetrics.UpdateAgentInfo(agentInfo, myPrivateKey.PublicKey().String())
|
||||
|
||||
log.Debugf("connected to the Management service %s", c.config.ManagementURL.Host)
|
||||
defer func() {
|
||||
if err = mgmClient.Close(); err != nil {
|
||||
@@ -230,8 +267,10 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
|
||||
}()
|
||||
|
||||
// connect (just a connection, no stream yet) and login to Management Service to get an initial global Netbird config
|
||||
loginStarted := time.Now()
|
||||
loginResp, err := loginToManagement(engineCtx, mgmClient, publicSSHKey, c.config)
|
||||
if err != nil {
|
||||
c.clientMetrics.RecordLoginDuration(engineCtx, time.Since(loginStarted), false)
|
||||
log.Debug(err)
|
||||
if s, ok := gstatus.FromError(err); ok && (s.Code() == codes.PermissionDenied) {
|
||||
state.Set(StatusNeedsLogin)
|
||||
@@ -240,6 +279,7 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
|
||||
}
|
||||
return wrapErr(err)
|
||||
}
|
||||
c.clientMetrics.RecordLoginDuration(engineCtx, time.Since(loginStarted), true)
|
||||
c.statusRecorder.MarkManagementConnected()
|
||||
|
||||
localPeerState := peer.LocalPeerState{
|
||||
@@ -308,7 +348,16 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
|
||||
checks := loginResp.GetChecks()
|
||||
|
||||
c.engineMutex.Lock()
|
||||
engine := NewEngine(engineCtx, cancel, signalClient, mgmClient, relayManager, engineConfig, mobileDependency, c.statusRecorder, checks, stateManager)
|
||||
engine := NewEngine(engineCtx, cancel, engineConfig, EngineServices{
|
||||
SignalClient: signalClient,
|
||||
MgmClient: mgmClient,
|
||||
RelayManager: relayManager,
|
||||
StatusRecorder: c.statusRecorder,
|
||||
Checks: checks,
|
||||
StateManager: stateManager,
|
||||
UpdateManager: c.updateManager,
|
||||
ClientMetrics: c.clientMetrics,
|
||||
}, mobileDependency)
|
||||
engine.SetSyncResponsePersistence(c.persistSyncResponse)
|
||||
c.engine = engine
|
||||
c.engineMutex.Unlock()
|
||||
@@ -318,21 +367,15 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan
|
||||
return wrapErr(err)
|
||||
}
|
||||
|
||||
if loginResp.PeerConfig != nil && loginResp.PeerConfig.AutoUpdate != nil {
|
||||
// AutoUpdate will be true when the user click on "Connect" menu on the UI
|
||||
if c.doInitialAutoUpdate {
|
||||
log.Infof("start engine by ui, run auto-update check")
|
||||
c.engine.InitialUpdateHandling(loginResp.PeerConfig.AutoUpdate)
|
||||
c.doInitialAutoUpdate = false
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Netbird engine started, the IP is: %s", peerConfig.GetAddress())
|
||||
state.Set(StatusConnected)
|
||||
|
||||
if runningChan != nil {
|
||||
close(runningChan)
|
||||
runningChan = nil
|
||||
select {
|
||||
case <-runningChan:
|
||||
default:
|
||||
close(runningChan)
|
||||
}
|
||||
}
|
||||
|
||||
<-engineCtx.Done()
|
||||
|
||||
60
client/internal/daemonaddr/resolve.go
Normal file
60
client/internal/daemonaddr/resolve.go
Normal file
@@ -0,0 +1,60 @@
|
||||
//go:build !windows && !ios && !android
|
||||
|
||||
package daemonaddr
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var scanDir = "/var/run/netbird"
|
||||
|
||||
// setScanDir overrides the scan directory (used by tests).
|
||||
func setScanDir(dir string) {
|
||||
scanDir = dir
|
||||
}
|
||||
|
||||
// ResolveUnixDaemonAddr checks whether the default Unix socket exists and, if not,
|
||||
// scans /var/run/netbird/ for a single .sock file to use instead. This handles the
|
||||
// mismatch between the netbird@.service template (which places the socket under
|
||||
// /var/run/netbird/<instance>.sock) and the CLI default (/var/run/netbird.sock).
|
||||
func ResolveUnixDaemonAddr(addr string) string {
|
||||
if !strings.HasPrefix(addr, "unix://") {
|
||||
return addr
|
||||
}
|
||||
|
||||
sockPath := strings.TrimPrefix(addr, "unix://")
|
||||
if _, err := os.Stat(sockPath); err == nil {
|
||||
return addr
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(scanDir)
|
||||
if err != nil {
|
||||
return addr
|
||||
}
|
||||
|
||||
var found []string
|
||||
for _, e := range entries {
|
||||
if e.IsDir() {
|
||||
continue
|
||||
}
|
||||
if strings.HasSuffix(e.Name(), ".sock") {
|
||||
found = append(found, filepath.Join(scanDir, e.Name()))
|
||||
}
|
||||
}
|
||||
|
||||
switch len(found) {
|
||||
case 1:
|
||||
resolved := "unix://" + found[0]
|
||||
log.Debugf("Default daemon socket not found, using discovered socket: %s", resolved)
|
||||
return resolved
|
||||
case 0:
|
||||
return addr
|
||||
default:
|
||||
log.Warnf("Default daemon socket not found and multiple sockets discovered in %s; pass --daemon-addr explicitly", scanDir)
|
||||
return addr
|
||||
}
|
||||
}
|
||||
8
client/internal/daemonaddr/resolve_stub.go
Normal file
8
client/internal/daemonaddr/resolve_stub.go
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build windows || ios || android
|
||||
|
||||
package daemonaddr
|
||||
|
||||
// ResolveUnixDaemonAddr is a no-op on platforms that don't use Unix sockets.
|
||||
func ResolveUnixDaemonAddr(addr string) string {
|
||||
return addr
|
||||
}
|
||||
121
client/internal/daemonaddr/resolve_test.go
Normal file
121
client/internal/daemonaddr/resolve_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
//go:build !windows && !ios && !android
|
||||
|
||||
package daemonaddr
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// createSockFile creates a regular file with a .sock extension.
|
||||
// ResolveUnixDaemonAddr uses os.Stat (not net.Dial), so a regular file is
|
||||
// sufficient and avoids Unix socket path-length limits on macOS.
|
||||
func createSockFile(t *testing.T, path string) {
|
||||
t.Helper()
|
||||
if err := os.WriteFile(path, nil, 0o600); err != nil {
|
||||
t.Fatalf("failed to create test sock file at %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveUnixDaemonAddr_DefaultExists(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
sock := filepath.Join(tmp, "netbird.sock")
|
||||
createSockFile(t, sock)
|
||||
|
||||
addr := "unix://" + sock
|
||||
got := ResolveUnixDaemonAddr(addr)
|
||||
if got != addr {
|
||||
t.Errorf("expected %s, got %s", addr, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveUnixDaemonAddr_SingleDiscovered(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
|
||||
// Default socket does not exist
|
||||
defaultAddr := "unix://" + filepath.Join(tmp, "netbird.sock")
|
||||
|
||||
// Create a scan dir with one socket
|
||||
sd := filepath.Join(tmp, "netbird")
|
||||
if err := os.MkdirAll(sd, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
instanceSock := filepath.Join(sd, "main.sock")
|
||||
createSockFile(t, instanceSock)
|
||||
|
||||
origScanDir := scanDir
|
||||
setScanDir(sd)
|
||||
t.Cleanup(func() { setScanDir(origScanDir) })
|
||||
|
||||
got := ResolveUnixDaemonAddr(defaultAddr)
|
||||
expected := "unix://" + instanceSock
|
||||
if got != expected {
|
||||
t.Errorf("expected %s, got %s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveUnixDaemonAddr_MultipleDiscovered(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
|
||||
defaultAddr := "unix://" + filepath.Join(tmp, "netbird.sock")
|
||||
|
||||
sd := filepath.Join(tmp, "netbird")
|
||||
if err := os.MkdirAll(sd, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createSockFile(t, filepath.Join(sd, "main.sock"))
|
||||
createSockFile(t, filepath.Join(sd, "other.sock"))
|
||||
|
||||
origScanDir := scanDir
|
||||
setScanDir(sd)
|
||||
t.Cleanup(func() { setScanDir(origScanDir) })
|
||||
|
||||
got := ResolveUnixDaemonAddr(defaultAddr)
|
||||
if got != defaultAddr {
|
||||
t.Errorf("expected original %s, got %s", defaultAddr, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveUnixDaemonAddr_NoSocketsFound(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
|
||||
defaultAddr := "unix://" + filepath.Join(tmp, "netbird.sock")
|
||||
|
||||
sd := filepath.Join(tmp, "netbird")
|
||||
if err := os.MkdirAll(sd, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
origScanDir := scanDir
|
||||
setScanDir(sd)
|
||||
t.Cleanup(func() { setScanDir(origScanDir) })
|
||||
|
||||
got := ResolveUnixDaemonAddr(defaultAddr)
|
||||
if got != defaultAddr {
|
||||
t.Errorf("expected original %s, got %s", defaultAddr, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveUnixDaemonAddr_NonUnixAddr(t *testing.T) {
|
||||
addr := "tcp://127.0.0.1:41731"
|
||||
got := ResolveUnixDaemonAddr(addr)
|
||||
if got != addr {
|
||||
t.Errorf("expected %s, got %s", addr, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveUnixDaemonAddr_ScanDirMissing(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
|
||||
defaultAddr := "unix://" + filepath.Join(tmp, "netbird.sock")
|
||||
|
||||
origScanDir := scanDir
|
||||
setScanDir(filepath.Join(tmp, "nonexistent"))
|
||||
t.Cleanup(func() { setScanDir(origScanDir) })
|
||||
|
||||
got := ResolveUnixDaemonAddr(defaultAddr)
|
||||
if got != defaultAddr {
|
||||
t.Errorf("expected original %s, got %s", defaultAddr, got)
|
||||
}
|
||||
}
|
||||
@@ -27,11 +27,10 @@ import (
|
||||
"github.com/netbirdio/netbird/client/anonymize"
|
||||
"github.com/netbirdio/netbird/client/internal/peer"
|
||||
"github.com/netbirdio/netbird/client/internal/profilemanager"
|
||||
"github.com/netbirdio/netbird/client/internal/updatemanager/installer"
|
||||
"github.com/netbirdio/netbird/client/internal/updater/installer"
|
||||
nbstatus "github.com/netbirdio/netbird/client/status"
|
||||
mgmProto "github.com/netbirdio/netbird/shared/management/proto"
|
||||
"github.com/netbirdio/netbird/util"
|
||||
"github.com/netbirdio/netbird/version"
|
||||
)
|
||||
|
||||
const readmeContent = `Netbird debug bundle
|
||||
@@ -53,6 +52,7 @@ resolved_domains.txt: Anonymized resolved domain IP addresses from the status re
|
||||
config.txt: Anonymized configuration information of the NetBird client.
|
||||
network_map.json: Anonymized sync response containing peer configurations, routes, DNS settings, and firewall rules.
|
||||
state.json: Anonymized client state dump containing netbird states for the active profile.
|
||||
metrics.txt: Buffered client metrics in InfluxDB line protocol format. Only present when metrics collection is enabled. Peer identifiers are anonymized.
|
||||
mutex.prof: Mutex profiling information.
|
||||
goroutine.prof: Goroutine profiling information.
|
||||
block.prof: Block profiling information.
|
||||
@@ -219,6 +219,11 @@ const (
|
||||
darwinStdoutLogPath = "/var/log/netbird.err.log"
|
||||
)
|
||||
|
||||
// MetricsExporter is an interface for exporting metrics
|
||||
type MetricsExporter interface {
|
||||
Export(w io.Writer) error
|
||||
}
|
||||
|
||||
type BundleGenerator struct {
|
||||
anonymizer *anonymize.Anonymizer
|
||||
|
||||
@@ -229,6 +234,7 @@ type BundleGenerator struct {
|
||||
logPath string
|
||||
cpuProfile []byte
|
||||
refreshStatus func() // Optional callback to refresh status before bundle generation
|
||||
clientMetrics MetricsExporter
|
||||
|
||||
anonymize bool
|
||||
includeSystemInfo bool
|
||||
@@ -250,6 +256,7 @@ type GeneratorDependencies struct {
|
||||
LogPath string
|
||||
CPUProfile []byte
|
||||
RefreshStatus func() // Optional callback to refresh status before bundle generation
|
||||
ClientMetrics MetricsExporter
|
||||
}
|
||||
|
||||
func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGenerator {
|
||||
@@ -268,6 +275,7 @@ func NewBundleGenerator(deps GeneratorDependencies, cfg BundleConfig) *BundleGen
|
||||
logPath: deps.LogPath,
|
||||
cpuProfile: deps.CPUProfile,
|
||||
refreshStatus: deps.RefreshStatus,
|
||||
clientMetrics: deps.ClientMetrics,
|
||||
|
||||
anonymize: cfg.Anonymize,
|
||||
includeSystemInfo: cfg.IncludeSystemInfo,
|
||||
@@ -351,6 +359,10 @@ func (g *BundleGenerator) createArchive() error {
|
||||
log.Errorf("failed to add corrupted state files to debug bundle: %v", err)
|
||||
}
|
||||
|
||||
if err := g.addMetrics(); err != nil {
|
||||
log.Errorf("failed to add metrics to debug bundle: %v", err)
|
||||
}
|
||||
|
||||
if err := g.addWgShow(); err != nil {
|
||||
log.Errorf("failed to add wg show output: %v", err)
|
||||
}
|
||||
@@ -418,7 +430,10 @@ func (g *BundleGenerator) addStatus() error {
|
||||
fullStatus := g.statusRecorder.GetFullStatus()
|
||||
protoFullStatus := nbstatus.ToProtoFullStatus(fullStatus)
|
||||
protoFullStatus.Events = g.statusRecorder.GetEventHistory()
|
||||
overview := nbstatus.ConvertToStatusOutputOverview(protoFullStatus, g.anonymize, version.NetbirdVersion(), "", nil, nil, nil, "", profName)
|
||||
overview := nbstatus.ConvertToStatusOutputOverview(protoFullStatus, nbstatus.ConvertOptions{
|
||||
Anonymize: g.anonymize,
|
||||
ProfileName: profName,
|
||||
})
|
||||
statusOutput := overview.FullDetailSummary()
|
||||
|
||||
statusReader := strings.NewReader(statusOutput)
|
||||
@@ -744,6 +759,30 @@ func (g *BundleGenerator) addCorruptedStateFiles() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *BundleGenerator) addMetrics() error {
|
||||
if g.clientMetrics == nil {
|
||||
log.Debugf("skipping metrics in debug bundle: no metrics collector")
|
||||
return nil
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := g.clientMetrics.Export(&buf); err != nil {
|
||||
return fmt.Errorf("export metrics: %w", err)
|
||||
}
|
||||
|
||||
if buf.Len() == 0 {
|
||||
log.Debugf("skipping metrics.txt in debug bundle: no metrics data")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := g.addFileToZip(&buf, "metrics.txt"); err != nil {
|
||||
return fmt.Errorf("add metrics file to zip: %w", err)
|
||||
}
|
||||
|
||||
log.Debugf("added metrics to debug bundle")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *BundleGenerator) addLogfile() error {
|
||||
if g.logPath == "" {
|
||||
log.Debugf("skipping empty log file in debug bundle")
|
||||
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
nberrors "github.com/netbirdio/netbird/client/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
@@ -22,6 +24,7 @@ import (
|
||||
|
||||
const (
|
||||
netbirdDNSStateKeyFormat = "State:/Network/Service/NetBird-%s/DNS"
|
||||
netbirdDNSStateKeyIndexedFormat = "State:/Network/Service/NetBird-%s-%d/DNS"
|
||||
globalIPv4State = "State:/Network/Global/IPv4"
|
||||
primaryServiceStateKeyFormat = "State:/Network/Service/%s/DNS"
|
||||
keySupplementalMatchDomains = "SupplementalMatchDomains"
|
||||
@@ -35,6 +38,14 @@ const (
|
||||
searchSuffix = "Search"
|
||||
matchSuffix = "Match"
|
||||
localSuffix = "Local"
|
||||
|
||||
// maxDomainsPerResolverEntry is the max number of domains per scutil resolver key.
|
||||
// scutil's d.add has maxArgs=101 (key + * + 99 values), so 99 is the hard cap.
|
||||
maxDomainsPerResolverEntry = 50
|
||||
|
||||
// maxDomainBytesPerResolverEntry is the max total bytes of domain strings per key.
|
||||
// scutil has an undocumented ~2048 byte value buffer; we stay well under it.
|
||||
maxDomainBytesPerResolverEntry = 1500
|
||||
)
|
||||
|
||||
type systemConfigurator struct {
|
||||
@@ -84,28 +95,23 @@ func (s *systemConfigurator) applyDNSConfig(config HostDNSConfig, stateManager *
|
||||
searchDomains = append(searchDomains, strings.TrimSuffix(""+dConf.Domain, "."))
|
||||
}
|
||||
|
||||
matchKey := getKeyWithInput(netbirdDNSStateKeyFormat, matchSuffix)
|
||||
var err error
|
||||
if len(matchDomains) != 0 {
|
||||
err = s.addMatchDomains(matchKey, strings.Join(matchDomains, " "), config.ServerIP, config.ServerPort)
|
||||
} else {
|
||||
log.Infof("removing match domains from the system")
|
||||
err = s.removeKeyFromSystemConfig(matchKey)
|
||||
if err := s.removeKeysContaining(matchSuffix); err != nil {
|
||||
log.Warnf("failed to remove old match keys: %v", err)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("add match domains: %w", err)
|
||||
if len(matchDomains) != 0 {
|
||||
if err := s.addBatchedDomains(matchSuffix, matchDomains, config.ServerIP, config.ServerPort, false); err != nil {
|
||||
return fmt.Errorf("add match domains: %w", err)
|
||||
}
|
||||
}
|
||||
s.updateState(stateManager)
|
||||
|
||||
searchKey := getKeyWithInput(netbirdDNSStateKeyFormat, searchSuffix)
|
||||
if len(searchDomains) != 0 {
|
||||
err = s.addSearchDomains(searchKey, strings.Join(searchDomains, " "), config.ServerIP, config.ServerPort)
|
||||
} else {
|
||||
log.Infof("removing search domains from the system")
|
||||
err = s.removeKeyFromSystemConfig(searchKey)
|
||||
if err := s.removeKeysContaining(searchSuffix); err != nil {
|
||||
log.Warnf("failed to remove old search keys: %v", err)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("add search domains: %w", err)
|
||||
if len(searchDomains) != 0 {
|
||||
if err := s.addBatchedDomains(searchSuffix, searchDomains, config.ServerIP, config.ServerPort, true); err != nil {
|
||||
return fmt.Errorf("add search domains: %w", err)
|
||||
}
|
||||
}
|
||||
s.updateState(stateManager)
|
||||
|
||||
@@ -149,8 +155,7 @@ func (s *systemConfigurator) restoreHostDNS() error {
|
||||
|
||||
func (s *systemConfigurator) getRemovableKeysWithDefaults() []string {
|
||||
if len(s.createdKeys) == 0 {
|
||||
// return defaults for startup calls
|
||||
return []string{getKeyWithInput(netbirdDNSStateKeyFormat, searchSuffix), getKeyWithInput(netbirdDNSStateKeyFormat, matchSuffix)}
|
||||
return s.discoverExistingKeys()
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(s.createdKeys))
|
||||
@@ -160,6 +165,47 @@ func (s *systemConfigurator) getRemovableKeysWithDefaults() []string {
|
||||
return keys
|
||||
}
|
||||
|
||||
// discoverExistingKeys probes scutil for all NetBird DNS keys that may exist.
|
||||
// This handles the case where createdKeys is empty (e.g., state file lost after unclean shutdown).
|
||||
func (s *systemConfigurator) discoverExistingKeys() []string {
|
||||
dnsKeys, err := getSystemDNSKeys()
|
||||
if err != nil {
|
||||
log.Errorf("failed to get system DNS keys: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var keys []string
|
||||
|
||||
for _, suffix := range []string{searchSuffix, matchSuffix, localSuffix} {
|
||||
key := getKeyWithInput(netbirdDNSStateKeyFormat, suffix)
|
||||
if strings.Contains(dnsKeys, key) {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
|
||||
for _, suffix := range []string{searchSuffix, matchSuffix} {
|
||||
for i := 0; ; i++ {
|
||||
key := fmt.Sprintf(netbirdDNSStateKeyIndexedFormat, suffix, i)
|
||||
if !strings.Contains(dnsKeys, key) {
|
||||
break
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// getSystemDNSKeys gets all DNS keys
|
||||
func getSystemDNSKeys() (string, error) {
|
||||
command := "list .*DNS\nquit\n"
|
||||
out, err := runSystemConfigCommand(command)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(out), nil
|
||||
}
|
||||
|
||||
func (s *systemConfigurator) removeKeyFromSystemConfig(key string) error {
|
||||
line := buildRemoveKeyOperation(key)
|
||||
_, err := runSystemConfigCommand(wrapCommand(line))
|
||||
@@ -184,12 +230,11 @@ func (s *systemConfigurator) addLocalDNS() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.addSearchDomains(
|
||||
localKey,
|
||||
strings.Join(s.systemDNSSettings.Domains, " "), s.systemDNSSettings.ServerIP, s.systemDNSSettings.ServerPort,
|
||||
); err != nil {
|
||||
return fmt.Errorf("add search domains: %w", err)
|
||||
domainsStr := strings.Join(s.systemDNSSettings.Domains, " ")
|
||||
if err := s.addDNSState(localKey, domainsStr, s.systemDNSSettings.ServerIP, s.systemDNSSettings.ServerPort, true); err != nil {
|
||||
return fmt.Errorf("add local dns state: %w", err)
|
||||
}
|
||||
s.createdKeys[localKey] = struct{}{}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -280,28 +325,77 @@ func (s *systemConfigurator) getOriginalNameservers() []netip.Addr {
|
||||
return slices.Clone(s.origNameservers)
|
||||
}
|
||||
|
||||
func (s *systemConfigurator) addSearchDomains(key, domains string, ip netip.Addr, port int) error {
|
||||
err := s.addDNSState(key, domains, ip, port, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("add dns state: %w", err)
|
||||
// splitDomainsIntoBatches splits domains into batches respecting both element count and byte size limits.
|
||||
func splitDomainsIntoBatches(domains []string) [][]string {
|
||||
if len(domains) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof("added %d search domains to the state. Domain list: %s", len(strings.Split(domains, " ")), domains)
|
||||
var batches [][]string
|
||||
var current []string
|
||||
currentBytes := 0
|
||||
|
||||
s.createdKeys[key] = struct{}{}
|
||||
for _, d := range domains {
|
||||
domainLen := len(d)
|
||||
newBytes := currentBytes + domainLen
|
||||
if currentBytes > 0 {
|
||||
newBytes++ // space separator
|
||||
}
|
||||
|
||||
return nil
|
||||
if len(current) > 0 && (len(current) >= maxDomainsPerResolverEntry || newBytes > maxDomainBytesPerResolverEntry) {
|
||||
batches = append(batches, current)
|
||||
current = nil
|
||||
currentBytes = 0
|
||||
}
|
||||
|
||||
current = append(current, d)
|
||||
if currentBytes > 0 {
|
||||
currentBytes += 1 + domainLen
|
||||
} else {
|
||||
currentBytes = domainLen
|
||||
}
|
||||
}
|
||||
|
||||
if len(current) > 0 {
|
||||
batches = append(batches, current)
|
||||
}
|
||||
|
||||
return batches
|
||||
}
|
||||
|
||||
func (s *systemConfigurator) addMatchDomains(key, domains string, dnsServer netip.Addr, port int) error {
|
||||
err := s.addDNSState(key, domains, dnsServer, port, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("add dns state: %w", err)
|
||||
// removeKeysContaining removes all created keys that contain the given substring.
|
||||
func (s *systemConfigurator) removeKeysContaining(suffix string) error {
|
||||
var toRemove []string
|
||||
for key := range s.createdKeys {
|
||||
if strings.Contains(key, suffix) {
|
||||
toRemove = append(toRemove, key)
|
||||
}
|
||||
}
|
||||
var multiErr *multierror.Error
|
||||
for _, key := range toRemove {
|
||||
if err := s.removeKeyFromSystemConfig(key); err != nil {
|
||||
multiErr = multierror.Append(multiErr, fmt.Errorf("couldn't remove key %s: %w", key, err))
|
||||
}
|
||||
}
|
||||
return nberrors.FormatErrorOrNil(multiErr)
|
||||
}
|
||||
|
||||
// addBatchedDomains splits domains into batches and creates indexed scutil keys for each batch.
|
||||
func (s *systemConfigurator) addBatchedDomains(suffix string, domains []string, ip netip.Addr, port int, enableSearch bool) error {
|
||||
batches := splitDomainsIntoBatches(domains)
|
||||
|
||||
for i, batch := range batches {
|
||||
key := fmt.Sprintf(netbirdDNSStateKeyIndexedFormat, suffix, i)
|
||||
domainsStr := strings.Join(batch, " ")
|
||||
|
||||
if err := s.addDNSState(key, domainsStr, ip, port, enableSearch); err != nil {
|
||||
return fmt.Errorf("add dns state for batch %d: %w", i, err)
|
||||
}
|
||||
|
||||
s.createdKeys[key] = struct{}{}
|
||||
}
|
||||
|
||||
log.Infof("added %d match domains to the state. Domain list: %s", len(strings.Split(domains, " ")), domains)
|
||||
|
||||
s.createdKeys[key] = struct{}{}
|
||||
log.Infof("added %d %s domains across %d resolver entries", len(domains), suffix, len(batches))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -364,7 +458,6 @@ func (s *systemConfigurator) flushDNSCache() error {
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("restart mDNSResponder: %w, output: %s", err, out)
|
||||
}
|
||||
|
||||
log.Info("flushed DNS cache")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,10 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -49,17 +52,22 @@ func TestDarwinDNSUncleanShutdownCleanup(t *testing.T) {
|
||||
|
||||
require.NoError(t, sm.PersistState(context.Background()))
|
||||
|
||||
searchKey := getKeyWithInput(netbirdDNSStateKeyFormat, searchSuffix)
|
||||
matchKey := getKeyWithInput(netbirdDNSStateKeyFormat, matchSuffix)
|
||||
localKey := getKeyWithInput(netbirdDNSStateKeyFormat, localSuffix)
|
||||
|
||||
// Collect all created keys for cleanup verification
|
||||
createdKeys := make([]string, 0, len(configurator.createdKeys))
|
||||
for key := range configurator.createdKeys {
|
||||
createdKeys = append(createdKeys, key)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
for _, key := range []string{searchKey, matchKey, localKey} {
|
||||
for _, key := range createdKeys {
|
||||
_ = removeTestDNSKey(key)
|
||||
}
|
||||
_ = removeTestDNSKey(localKey)
|
||||
}()
|
||||
|
||||
for _, key := range []string{searchKey, matchKey, localKey} {
|
||||
for _, key := range createdKeys {
|
||||
exists, err := checkDNSKeyExists(key)
|
||||
require.NoError(t, err)
|
||||
if exists {
|
||||
@@ -83,13 +91,223 @@ func TestDarwinDNSUncleanShutdownCleanup(t *testing.T) {
|
||||
err = shutdownState.Cleanup()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, key := range []string{searchKey, matchKey, localKey} {
|
||||
for _, key := range createdKeys {
|
||||
exists, err := checkDNSKeyExists(key)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, exists, "Key %s should NOT exist after cleanup", key)
|
||||
}
|
||||
}
|
||||
|
||||
// generateShortDomains generates domains like a.com, b.com, ..., aa.com, ab.com, etc.
|
||||
func generateShortDomains(count int) []string {
|
||||
domains := make([]string, 0, count)
|
||||
for i := range count {
|
||||
label := ""
|
||||
n := i
|
||||
for {
|
||||
label = string(rune('a'+n%26)) + label
|
||||
n = n/26 - 1
|
||||
if n < 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
domains = append(domains, label+".com")
|
||||
}
|
||||
return domains
|
||||
}
|
||||
|
||||
// generateLongDomains generates domains like subdomain-000.department.organization-name.example.com
|
||||
func generateLongDomains(count int) []string {
|
||||
domains := make([]string, 0, count)
|
||||
for i := range count {
|
||||
domains = append(domains, fmt.Sprintf("subdomain-%03d.department.organization-name.example.com", i))
|
||||
}
|
||||
return domains
|
||||
}
|
||||
|
||||
// readDomainsFromKey reads the SupplementalMatchDomains array back from scutil for a given key.
|
||||
func readDomainsFromKey(t *testing.T, key string) []string {
|
||||
t.Helper()
|
||||
|
||||
cmd := exec.Command(scutilPath)
|
||||
cmd.Stdin = strings.NewReader(fmt.Sprintf("open\nshow %s\nquit\n", key))
|
||||
out, err := cmd.Output()
|
||||
require.NoError(t, err, "scutil show should succeed")
|
||||
|
||||
var domains []string
|
||||
inArray := false
|
||||
scanner := bufio.NewScanner(bytes.NewReader(out))
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if strings.HasPrefix(line, "SupplementalMatchDomains") && strings.Contains(line, "<array>") {
|
||||
inArray = true
|
||||
continue
|
||||
}
|
||||
if inArray {
|
||||
if line == "}" {
|
||||
break
|
||||
}
|
||||
// lines look like: "0 : a.com"
|
||||
parts := strings.SplitN(line, " : ", 2)
|
||||
if len(parts) == 2 {
|
||||
domains = append(domains, parts[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
require.NoError(t, scanner.Err())
|
||||
return domains
|
||||
}
|
||||
|
||||
func TestSplitDomainsIntoBatches(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
domains []string
|
||||
expectedCount int
|
||||
checkAllPresent bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
domains: nil,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "under_limit",
|
||||
domains: generateShortDomains(10),
|
||||
expectedCount: 1,
|
||||
checkAllPresent: true,
|
||||
},
|
||||
{
|
||||
name: "at_element_limit",
|
||||
domains: generateShortDomains(50),
|
||||
expectedCount: 1,
|
||||
checkAllPresent: true,
|
||||
},
|
||||
{
|
||||
name: "over_element_limit",
|
||||
domains: generateShortDomains(51),
|
||||
expectedCount: 2,
|
||||
checkAllPresent: true,
|
||||
},
|
||||
{
|
||||
name: "triple_element_limit",
|
||||
domains: generateShortDomains(150),
|
||||
expectedCount: 3,
|
||||
checkAllPresent: true,
|
||||
},
|
||||
{
|
||||
name: "long_domains_hit_byte_limit",
|
||||
domains: generateLongDomains(50),
|
||||
checkAllPresent: true,
|
||||
},
|
||||
{
|
||||
name: "500_short_domains",
|
||||
domains: generateShortDomains(500),
|
||||
expectedCount: 10,
|
||||
checkAllPresent: true,
|
||||
},
|
||||
{
|
||||
name: "500_long_domains",
|
||||
domains: generateLongDomains(500),
|
||||
checkAllPresent: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
batches := splitDomainsIntoBatches(tc.domains)
|
||||
|
||||
if tc.expectedCount > 0 {
|
||||
assert.Len(t, batches, tc.expectedCount, "expected %d batches", tc.expectedCount)
|
||||
}
|
||||
|
||||
// Verify each batch respects limits
|
||||
for i, batch := range batches {
|
||||
assert.LessOrEqual(t, len(batch), maxDomainsPerResolverEntry,
|
||||
"batch %d exceeds element limit", i)
|
||||
|
||||
totalBytes := 0
|
||||
for j, d := range batch {
|
||||
if j > 0 {
|
||||
totalBytes++
|
||||
}
|
||||
totalBytes += len(d)
|
||||
}
|
||||
assert.LessOrEqual(t, totalBytes, maxDomainBytesPerResolverEntry,
|
||||
"batch %d exceeds byte limit (%d bytes)", i, totalBytes)
|
||||
}
|
||||
|
||||
if tc.checkAllPresent {
|
||||
var all []string
|
||||
for _, batch := range batches {
|
||||
all = append(all, batch...)
|
||||
}
|
||||
assert.Equal(t, tc.domains, all, "all domains should be present in order")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestMatchDomainBatching writes increasing numbers of domains via the batching mechanism
|
||||
// and verifies all domains are readable across multiple scutil keys.
|
||||
func TestMatchDomainBatching(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping scutil integration test in short mode")
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
count int
|
||||
generator func(int) []string
|
||||
}{
|
||||
{"short_10", 10, generateShortDomains},
|
||||
{"short_50", 50, generateShortDomains},
|
||||
{"short_100", 100, generateShortDomains},
|
||||
{"short_200", 200, generateShortDomains},
|
||||
{"short_500", 500, generateShortDomains},
|
||||
{"long_10", 10, generateLongDomains},
|
||||
{"long_50", 50, generateLongDomains},
|
||||
{"long_100", 100, generateLongDomains},
|
||||
{"long_200", 200, generateLongDomains},
|
||||
{"long_500", 500, generateLongDomains},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
configurator := &systemConfigurator{
|
||||
createdKeys: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
for key := range configurator.createdKeys {
|
||||
_ = removeTestDNSKey(key)
|
||||
}
|
||||
}()
|
||||
|
||||
domains := tc.generator(tc.count)
|
||||
err := configurator.addBatchedDomains(matchSuffix, domains, netip.MustParseAddr("100.64.0.1"), 53, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
batches := splitDomainsIntoBatches(domains)
|
||||
t.Logf("wrote %d domains across %d batched keys", tc.count, len(batches))
|
||||
|
||||
// Read back all domains from all batched keys
|
||||
var got []string
|
||||
for i := range batches {
|
||||
key := fmt.Sprintf(netbirdDNSStateKeyIndexedFormat, matchSuffix, i)
|
||||
exists, err := checkDNSKeyExists(key)
|
||||
require.NoError(t, err)
|
||||
require.True(t, exists, "key %s should exist", key)
|
||||
|
||||
got = append(got, readDomainsFromKey(t, key)...)
|
||||
}
|
||||
|
||||
t.Logf("read back %d/%d domains from %d keys", len(got), tc.count, len(batches))
|
||||
assert.Equal(t, tc.count, len(got), "all domains should be readable")
|
||||
assert.Equal(t, domains, got, "domains should match in order")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func checkDNSKeyExists(key string) (bool, error) {
|
||||
cmd := exec.Command(scutilPath)
|
||||
cmd.Stdin = strings.NewReader("show " + key + "\nquit\n")
|
||||
@@ -158,15 +376,15 @@ func setupTestConfigurator(t *testing.T) (*systemConfigurator, *statemanager.Man
|
||||
createdKeys: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
searchKey := getKeyWithInput(netbirdDNSStateKeyFormat, searchSuffix)
|
||||
matchKey := getKeyWithInput(netbirdDNSStateKeyFormat, matchSuffix)
|
||||
localKey := getKeyWithInput(netbirdDNSStateKeyFormat, localSuffix)
|
||||
|
||||
cleanup := func() {
|
||||
_ = sm.Stop(context.Background())
|
||||
for _, key := range []string{searchKey, matchKey, localKey} {
|
||||
for key := range configurator.createdKeys {
|
||||
_ = removeTestDNSKey(key)
|
||||
}
|
||||
// Also clean up old-format keys and local key in case they exist
|
||||
_ = removeTestDNSKey(getKeyWithInput(netbirdDNSStateKeyFormat, searchSuffix))
|
||||
_ = removeTestDNSKey(getKeyWithInput(netbirdDNSStateKeyFormat, matchSuffix))
|
||||
_ = removeTestDNSKey(getKeyWithInput(netbirdDNSStateKeyFormat, localSuffix))
|
||||
}
|
||||
|
||||
return configurator, sm, cleanup
|
||||
|
||||
@@ -277,7 +277,7 @@ func (r *registryConfigurator) addDNSMatchPolicy(domains []string, ip netip.Addr
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("added %d NRPT rules for %d domains. Domain list: %v", ruleIndex, len(domains), domains)
|
||||
log.Infof("added %d NRPT rules for %d domains", ruleIndex, len(domains))
|
||||
return ruleIndex, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ func (d *Resolver) ID() types.HandlerID {
|
||||
return "local-resolver"
|
||||
}
|
||||
|
||||
func (d *Resolver) ProbeAvailability() {}
|
||||
func (d *Resolver) ProbeAvailability(context.Context) {}
|
||||
|
||||
// ServeDNS handles a DNS request
|
||||
func (d *Resolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
|
||||
|
||||
@@ -376,9 +376,9 @@ func (m *Resolver) extractDomainsFromServerDomains(serverDomains dnsconfig.Serve
|
||||
}
|
||||
}
|
||||
|
||||
if serverDomains.Flow != "" {
|
||||
domains = append(domains, serverDomains.Flow)
|
||||
}
|
||||
// Flow receiver domain is intentionally excluded from caching.
|
||||
// Cloud providers may rotate the IP behind this domain; a stale cached record
|
||||
// causes TLS certificate verification failures on reconnect.
|
||||
|
||||
for _, stun := range serverDomains.Stuns {
|
||||
if stun != "" {
|
||||
|
||||
@@ -391,7 +391,8 @@ func TestResolver_PartialUpdateAddsNewTypePreservesExisting(t *testing.T) {
|
||||
}
|
||||
assert.Len(t, resolver.GetCachedDomains(), 3)
|
||||
|
||||
// Update with partial ServerDomains (only flow domain - new type, should preserve all existing)
|
||||
// Update with partial ServerDomains (only flow domain - flow is intentionally excluded from
|
||||
// caching to prevent TLS failures from stale records, so all existing domains are preserved)
|
||||
partialDomains := dnsconfig.ServerDomains{
|
||||
Flow: "github.com",
|
||||
}
|
||||
@@ -400,10 +401,10 @@ func TestResolver_PartialUpdateAddsNewTypePreservesExisting(t *testing.T) {
|
||||
t.Skipf("Skipping test due to DNS resolution failure: %v", err)
|
||||
}
|
||||
|
||||
assert.Len(t, removedDomains, 0, "Should not remove any domains when adding new type")
|
||||
assert.Len(t, removedDomains, 0, "Should not remove any domains when only flow domain is provided")
|
||||
|
||||
finalDomains := resolver.GetCachedDomains()
|
||||
assert.Len(t, finalDomains, 4, "Should have all original domains plus new flow domain")
|
||||
assert.Len(t, finalDomains, 3, "Flow domain is not cached; all original domains should be preserved")
|
||||
|
||||
domainStrings := make([]string, len(finalDomains))
|
||||
for i, d := range finalDomains {
|
||||
@@ -412,5 +413,5 @@ func TestResolver_PartialUpdateAddsNewTypePreservesExisting(t *testing.T) {
|
||||
assert.Contains(t, domainStrings, "example.org")
|
||||
assert.Contains(t, domainStrings, "google.com")
|
||||
assert.Contains(t, domainStrings, "cloudflare.com")
|
||||
assert.Contains(t, domainStrings, "github.com")
|
||||
assert.NotContains(t, domainStrings, "github.com")
|
||||
}
|
||||
|
||||
@@ -104,12 +104,16 @@ type DefaultServer struct {
|
||||
|
||||
statusRecorder *peer.Status
|
||||
stateManager *statemanager.Manager
|
||||
|
||||
probeMu sync.Mutex
|
||||
probeCancel context.CancelFunc
|
||||
probeWg sync.WaitGroup
|
||||
}
|
||||
|
||||
type handlerWithStop interface {
|
||||
dns.Handler
|
||||
Stop()
|
||||
ProbeAvailability()
|
||||
ProbeAvailability(context.Context)
|
||||
ID() types.HandlerID
|
||||
}
|
||||
|
||||
@@ -362,7 +366,13 @@ func (s *DefaultServer) DnsIP() netip.Addr {
|
||||
|
||||
// Stop stops the server
|
||||
func (s *DefaultServer) Stop() {
|
||||
s.probeMu.Lock()
|
||||
if s.probeCancel != nil {
|
||||
s.probeCancel()
|
||||
}
|
||||
s.ctxCancel()
|
||||
s.probeMu.Unlock()
|
||||
s.probeWg.Wait()
|
||||
s.shutdownWg.Wait()
|
||||
|
||||
s.mux.Lock()
|
||||
@@ -479,7 +489,8 @@ func (s *DefaultServer) SearchDomains() []string {
|
||||
}
|
||||
|
||||
// ProbeAvailability tests each upstream group's servers for availability
|
||||
// and deactivates the group if no server responds
|
||||
// and deactivates the group if no server responds.
|
||||
// If a previous probe is still running, it will be cancelled before starting a new one.
|
||||
func (s *DefaultServer) ProbeAvailability() {
|
||||
if val := os.Getenv(envSkipDNSProbe); val != "" {
|
||||
skipProbe, err := strconv.ParseBool(val)
|
||||
@@ -492,15 +503,52 @@ func (s *DefaultServer) ProbeAvailability() {
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, mux := range s.dnsMuxMap {
|
||||
wg.Add(1)
|
||||
go func(mux handlerWithStop) {
|
||||
defer wg.Done()
|
||||
mux.ProbeAvailability()
|
||||
}(mux.handler)
|
||||
s.probeMu.Lock()
|
||||
|
||||
// don't start probes on a stopped server
|
||||
if s.ctx.Err() != nil {
|
||||
s.probeMu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// cancel any running probe
|
||||
if s.probeCancel != nil {
|
||||
s.probeCancel()
|
||||
s.probeCancel = nil
|
||||
}
|
||||
|
||||
// wait for the previous probe goroutines to finish while holding
|
||||
// the mutex so no other caller can start a new probe concurrently
|
||||
s.probeWg.Wait()
|
||||
|
||||
// start a new probe
|
||||
probeCtx, probeCancel := context.WithCancel(s.ctx)
|
||||
s.probeCancel = probeCancel
|
||||
|
||||
s.probeWg.Add(1)
|
||||
defer s.probeWg.Done()
|
||||
|
||||
// Snapshot handlers under s.mux to avoid racing with updateMux/dnsMuxMap writers.
|
||||
s.mux.Lock()
|
||||
handlers := make([]handlerWithStop, 0, len(s.dnsMuxMap))
|
||||
for _, mux := range s.dnsMuxMap {
|
||||
handlers = append(handlers, mux.handler)
|
||||
}
|
||||
s.mux.Unlock()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, handler := range handlers {
|
||||
wg.Add(1)
|
||||
go func(h handlerWithStop) {
|
||||
defer wg.Done()
|
||||
h.ProbeAvailability(probeCtx)
|
||||
}(handler)
|
||||
}
|
||||
|
||||
s.probeMu.Unlock()
|
||||
|
||||
wg.Wait()
|
||||
probeCancel()
|
||||
}
|
||||
|
||||
func (s *DefaultServer) UpdateServerConfig(domains dnsconfig.ServerDomains) error {
|
||||
|
||||
@@ -1065,7 +1065,7 @@ type mockHandler struct {
|
||||
|
||||
func (m *mockHandler) ServeDNS(dns.ResponseWriter, *dns.Msg) {}
|
||||
func (m *mockHandler) Stop() {}
|
||||
func (m *mockHandler) ProbeAvailability() {}
|
||||
func (m *mockHandler) ProbeAvailability(context.Context) {}
|
||||
func (m *mockHandler) ID() types.HandlerID { return types.HandlerID(m.Id) }
|
||||
|
||||
type mockService struct{}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -69,7 +70,7 @@ func (s *serviceViaListener) Listen() error {
|
||||
return fmt.Errorf("eval listen address: %w", err)
|
||||
}
|
||||
s.listenIP = s.listenIP.Unmap()
|
||||
s.server.Addr = fmt.Sprintf("%s:%d", s.listenIP, s.listenPort)
|
||||
s.server.Addr = net.JoinHostPort(s.listenIP.String(), strconv.Itoa(int(s.listenPort)))
|
||||
log.Debugf("starting dns on %s", s.server.Addr)
|
||||
go func() {
|
||||
s.setListenerStatus(true)
|
||||
@@ -186,7 +187,7 @@ func (s *serviceViaListener) testFreePort(port int) (netip.Addr, bool) {
|
||||
}
|
||||
|
||||
func (s *serviceViaListener) tryToBind(ip netip.Addr, port int) bool {
|
||||
addrString := fmt.Sprintf("%s:%d", ip, port)
|
||||
addrString := net.JoinHostPort(ip.String(), strconv.Itoa(port))
|
||||
udpAddr := net.UDPAddrFromAddrPort(netip.MustParseAddrPort(addrString))
|
||||
probeListener, err := net.ListenUDP("udp", udpAddr)
|
||||
if err != nil {
|
||||
|
||||
@@ -65,6 +65,7 @@ type upstreamResolverBase struct {
|
||||
mutex sync.Mutex
|
||||
reactivatePeriod time.Duration
|
||||
upstreamTimeout time.Duration
|
||||
wg sync.WaitGroup
|
||||
|
||||
deactivate func(error)
|
||||
reactivate func()
|
||||
@@ -115,6 +116,11 @@ func (u *upstreamResolverBase) MatchSubdomains() bool {
|
||||
func (u *upstreamResolverBase) Stop() {
|
||||
log.Debugf("stopping serving DNS for upstreams %s", u.upstreamServers)
|
||||
u.cancel()
|
||||
|
||||
u.mutex.Lock()
|
||||
u.wg.Wait()
|
||||
u.mutex.Unlock()
|
||||
|
||||
}
|
||||
|
||||
// ServeDNS handles a DNS request
|
||||
@@ -260,16 +266,10 @@ func formatFailures(failures []upstreamFailure) string {
|
||||
|
||||
// ProbeAvailability tests all upstream servers simultaneously and
|
||||
// disables the resolver if none work
|
||||
func (u *upstreamResolverBase) ProbeAvailability() {
|
||||
func (u *upstreamResolverBase) ProbeAvailability(ctx context.Context) {
|
||||
u.mutex.Lock()
|
||||
defer u.mutex.Unlock()
|
||||
|
||||
select {
|
||||
case <-u.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// avoid probe if upstreams could resolve at least one query
|
||||
if u.successCount.Load() > 0 {
|
||||
return
|
||||
@@ -279,31 +279,39 @@ func (u *upstreamResolverBase) ProbeAvailability() {
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var errors *multierror.Error
|
||||
var errs *multierror.Error
|
||||
for _, upstream := range u.upstreamServers {
|
||||
upstream := upstream
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
go func(upstream netip.AddrPort) {
|
||||
defer wg.Done()
|
||||
err := u.testNameserver(upstream, 500*time.Millisecond)
|
||||
err := u.testNameserver(u.ctx, ctx, upstream, 500*time.Millisecond)
|
||||
if err != nil {
|
||||
errors = multierror.Append(errors, err)
|
||||
mu.Lock()
|
||||
errs = multierror.Append(errs, err)
|
||||
mu.Unlock()
|
||||
log.Warnf("probing upstream nameserver %s: %s", upstream, err)
|
||||
return
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
success = true
|
||||
}()
|
||||
mu.Unlock()
|
||||
}(upstream)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-u.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// didn't find a working upstream server, let's disable and try later
|
||||
if !success {
|
||||
u.disable(errors.ErrorOrNil())
|
||||
u.disable(errs.ErrorOrNil())
|
||||
|
||||
if u.statusRecorder == nil {
|
||||
return
|
||||
@@ -339,7 +347,7 @@ func (u *upstreamResolverBase) waitUntilResponse() {
|
||||
}
|
||||
|
||||
for _, upstream := range u.upstreamServers {
|
||||
if err := u.testNameserver(upstream, probeTimeout); err != nil {
|
||||
if err := u.testNameserver(u.ctx, nil, upstream, probeTimeout); err != nil {
|
||||
log.Tracef("upstream check for %s: %s", upstream, err)
|
||||
} else {
|
||||
// at least one upstream server is available, stop probing
|
||||
@@ -351,16 +359,22 @@ func (u *upstreamResolverBase) waitUntilResponse() {
|
||||
return fmt.Errorf("upstream check call error")
|
||||
}
|
||||
|
||||
err := backoff.Retry(operation, exponentialBackOff)
|
||||
err := backoff.Retry(operation, backoff.WithContext(exponentialBackOff, u.ctx))
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
log.Debugf("upstream retry loop exited for upstreams %s", u.upstreamServersString())
|
||||
} else {
|
||||
log.Warnf("upstream retry loop exited for upstreams %s: %v", u.upstreamServersString(), err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("upstreams %s are responsive again. Adding them back to system", u.upstreamServersString())
|
||||
u.successCount.Add(1)
|
||||
u.reactivate()
|
||||
u.mutex.Lock()
|
||||
u.disabled = false
|
||||
u.mutex.Unlock()
|
||||
}
|
||||
|
||||
// isTimeout returns true if the given error is a network timeout error.
|
||||
@@ -383,7 +397,11 @@ func (u *upstreamResolverBase) disable(err error) {
|
||||
u.successCount.Store(0)
|
||||
u.deactivate(err)
|
||||
u.disabled = true
|
||||
go u.waitUntilResponse()
|
||||
u.wg.Add(1)
|
||||
go func() {
|
||||
defer u.wg.Done()
|
||||
u.waitUntilResponse()
|
||||
}()
|
||||
}
|
||||
|
||||
func (u *upstreamResolverBase) upstreamServersString() string {
|
||||
@@ -394,13 +412,18 @@ func (u *upstreamResolverBase) upstreamServersString() string {
|
||||
return strings.Join(servers, ", ")
|
||||
}
|
||||
|
||||
func (u *upstreamResolverBase) testNameserver(server netip.AddrPort, timeout time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(u.ctx, timeout)
|
||||
func (u *upstreamResolverBase) testNameserver(baseCtx context.Context, externalCtx context.Context, server netip.AddrPort, timeout time.Duration) error {
|
||||
mergedCtx, cancel := context.WithTimeout(baseCtx, timeout)
|
||||
defer cancel()
|
||||
|
||||
if externalCtx != nil {
|
||||
stop2 := context.AfterFunc(externalCtx, cancel)
|
||||
defer stop2()
|
||||
}
|
||||
|
||||
r := new(dns.Msg).SetQuestion(testRecord, dns.TypeSOA)
|
||||
|
||||
_, _, err := u.upstreamClient.exchange(ctx, server.String(), r)
|
||||
_, _, err := u.upstreamClient.exchange(mergedCtx, server.String(), r)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -188,7 +188,7 @@ func TestUpstreamResolver_DeactivationReactivation(t *testing.T) {
|
||||
reactivated = true
|
||||
}
|
||||
|
||||
resolver.ProbeAvailability()
|
||||
resolver.ProbeAvailability(context.TODO())
|
||||
|
||||
if !failed {
|
||||
t.Errorf("expected that resolving was deactivated")
|
||||
|
||||
@@ -28,15 +28,17 @@ import (
|
||||
"github.com/netbirdio/netbird/client/firewall"
|
||||
firewallManager "github.com/netbirdio/netbird/client/firewall/manager"
|
||||
"github.com/netbirdio/netbird/client/iface"
|
||||
nbnetstack "github.com/netbirdio/netbird/client/iface/netstack"
|
||||
"github.com/netbirdio/netbird/client/iface/device"
|
||||
nbnetstack "github.com/netbirdio/netbird/client/iface/netstack"
|
||||
"github.com/netbirdio/netbird/client/iface/udpmux"
|
||||
"github.com/netbirdio/netbird/client/internal/acl"
|
||||
"github.com/netbirdio/netbird/client/internal/debug"
|
||||
"github.com/netbirdio/netbird/client/internal/dns"
|
||||
dnsconfig "github.com/netbirdio/netbird/client/internal/dns/config"
|
||||
"github.com/netbirdio/netbird/client/internal/dnsfwd"
|
||||
"github.com/netbirdio/netbird/client/internal/expose"
|
||||
"github.com/netbirdio/netbird/client/internal/ingressgw"
|
||||
"github.com/netbirdio/netbird/client/internal/metrics"
|
||||
"github.com/netbirdio/netbird/client/internal/netflow"
|
||||
nftypes "github.com/netbirdio/netbird/client/internal/netflow/types"
|
||||
"github.com/netbirdio/netbird/client/internal/networkmonitor"
|
||||
@@ -50,16 +52,14 @@ import (
|
||||
"github.com/netbirdio/netbird/client/internal/routemanager"
|
||||
"github.com/netbirdio/netbird/client/internal/routemanager/systemops"
|
||||
"github.com/netbirdio/netbird/client/internal/statemanager"
|
||||
"github.com/netbirdio/netbird/client/internal/updatemanager"
|
||||
"github.com/netbirdio/netbird/client/internal/updater"
|
||||
"github.com/netbirdio/netbird/client/jobexec"
|
||||
cProto "github.com/netbirdio/netbird/client/proto"
|
||||
"github.com/netbirdio/netbird/shared/management/domain"
|
||||
semaphoregroup "github.com/netbirdio/netbird/util/semaphore-group"
|
||||
|
||||
"github.com/netbirdio/netbird/client/system"
|
||||
nbdns "github.com/netbirdio/netbird/dns"
|
||||
"github.com/netbirdio/netbird/route"
|
||||
mgm "github.com/netbirdio/netbird/shared/management/client"
|
||||
"github.com/netbirdio/netbird/shared/management/domain"
|
||||
mgmProto "github.com/netbirdio/netbird/shared/management/proto"
|
||||
auth "github.com/netbirdio/netbird/shared/relay/auth/hmac"
|
||||
relayClient "github.com/netbirdio/netbird/shared/relay/client"
|
||||
@@ -75,13 +75,11 @@ import (
|
||||
const (
|
||||
PeerConnectionTimeoutMax = 45000 // ms
|
||||
PeerConnectionTimeoutMin = 30000 // ms
|
||||
connInitLimit = 200
|
||||
disableAutoUpdate = "disabled"
|
||||
)
|
||||
|
||||
var ErrResetConnection = fmt.Errorf("reset connection")
|
||||
|
||||
// EngineConfig is a config for the Engine
|
||||
type EngineConfig struct {
|
||||
WgPort int
|
||||
WgIfaceName string
|
||||
@@ -143,6 +141,18 @@ type EngineConfig struct {
|
||||
LogPath string
|
||||
}
|
||||
|
||||
// EngineServices holds the external service dependencies required by the Engine.
|
||||
type EngineServices struct {
|
||||
SignalClient signal.Client
|
||||
MgmClient mgm.Client
|
||||
RelayManager *relayClient.Manager
|
||||
StatusRecorder *peer.Status
|
||||
Checks []*mgmProto.Checks
|
||||
StateManager *statemanager.Manager
|
||||
UpdateManager *updater.Manager
|
||||
ClientMetrics *metrics.ClientMetrics
|
||||
}
|
||||
|
||||
// Engine is a mechanism responsible for reacting on Signal and Management stream events and managing connections to the remote peers.
|
||||
type Engine struct {
|
||||
// signal is a Signal Service client
|
||||
@@ -208,11 +218,10 @@ type Engine struct {
|
||||
syncRespMux sync.RWMutex
|
||||
persistSyncResponse bool
|
||||
latestSyncResponse *mgmProto.SyncResponse
|
||||
connSemaphore *semaphoregroup.SemaphoreGroup
|
||||
flowManager nftypes.FlowManager
|
||||
|
||||
// auto-update
|
||||
updateManager *updatemanager.Manager
|
||||
updateManager *updater.Manager
|
||||
|
||||
// WireGuard interface monitor
|
||||
wgIfaceMonitor *WGIfaceMonitor
|
||||
@@ -222,8 +231,13 @@ type Engine struct {
|
||||
|
||||
probeStunTurn *relay.StunTurnProbe
|
||||
|
||||
// clientMetrics collects and pushes metrics
|
||||
clientMetrics *metrics.ClientMetrics
|
||||
|
||||
jobExecutor *jobexec.Executor
|
||||
jobExecutorWG sync.WaitGroup
|
||||
|
||||
exposeManager *expose.Manager
|
||||
}
|
||||
|
||||
// Peer is an instance of the Connection Peer
|
||||
@@ -240,22 +254,17 @@ type localIpUpdater interface {
|
||||
func NewEngine(
|
||||
clientCtx context.Context,
|
||||
clientCancel context.CancelFunc,
|
||||
signalClient signal.Client,
|
||||
mgmClient mgm.Client,
|
||||
relayManager *relayClient.Manager,
|
||||
config *EngineConfig,
|
||||
services EngineServices,
|
||||
mobileDep MobileDependency,
|
||||
statusRecorder *peer.Status,
|
||||
checks []*mgmProto.Checks,
|
||||
stateManager *statemanager.Manager,
|
||||
) *Engine {
|
||||
engine := &Engine{
|
||||
clientCtx: clientCtx,
|
||||
clientCancel: clientCancel,
|
||||
signal: signalClient,
|
||||
signaler: peer.NewSignaler(signalClient, config.WgPrivateKey),
|
||||
mgmClient: mgmClient,
|
||||
relayManager: relayManager,
|
||||
signal: services.SignalClient,
|
||||
signaler: peer.NewSignaler(services.SignalClient, config.WgPrivateKey),
|
||||
mgmClient: services.MgmClient,
|
||||
relayManager: services.RelayManager,
|
||||
peerStore: peerstore.NewConnStore(),
|
||||
syncMsgMux: &sync.Mutex{},
|
||||
config: config,
|
||||
@@ -263,12 +272,13 @@ func NewEngine(
|
||||
STUNs: []*stun.URI{},
|
||||
TURNs: []*stun.URI{},
|
||||
networkSerial: 0,
|
||||
statusRecorder: statusRecorder,
|
||||
stateManager: stateManager,
|
||||
checks: checks,
|
||||
connSemaphore: semaphoregroup.NewSemaphoreGroup(connInitLimit),
|
||||
statusRecorder: services.StatusRecorder,
|
||||
stateManager: services.StateManager,
|
||||
checks: services.Checks,
|
||||
probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL),
|
||||
jobExecutor: jobexec.NewExecutor(),
|
||||
clientMetrics: services.ClientMetrics,
|
||||
updateManager: services.UpdateManager,
|
||||
}
|
||||
|
||||
log.Infof("I am: %s", config.WgPrivateKey.PublicKey().String())
|
||||
@@ -311,7 +321,7 @@ func (e *Engine) Stop() error {
|
||||
}
|
||||
|
||||
if e.updateManager != nil {
|
||||
e.updateManager.Stop()
|
||||
e.updateManager.SetDownloadOnly()
|
||||
}
|
||||
|
||||
log.Info("cleaning up status recorder states")
|
||||
@@ -419,6 +429,7 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL)
|
||||
e.cancel()
|
||||
}
|
||||
e.ctx, e.cancel = context.WithCancel(e.clientCtx)
|
||||
e.exposeManager = expose.NewManager(e.ctx, e.mgmClient)
|
||||
|
||||
wgIface, err := e.newWgIface()
|
||||
if err != nil {
|
||||
@@ -560,13 +571,6 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Engine) InitialUpdateHandling(autoUpdateSettings *mgmProto.AutoUpdateSettings) {
|
||||
e.syncMsgMux.Lock()
|
||||
defer e.syncMsgMux.Unlock()
|
||||
|
||||
e.handleAutoUpdateVersion(autoUpdateSettings, true)
|
||||
}
|
||||
|
||||
func (e *Engine) createFirewall() error {
|
||||
if e.config.DisableFirewall {
|
||||
log.Infof("firewall is disabled")
|
||||
@@ -794,45 +798,30 @@ func (e *Engine) PopulateNetbirdConfig(netbirdConfig *mgmProto.NetbirdConfig, mg
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Engine) handleAutoUpdateVersion(autoUpdateSettings *mgmProto.AutoUpdateSettings, initialCheck bool) {
|
||||
func (e *Engine) handleAutoUpdateVersion(autoUpdateSettings *mgmProto.AutoUpdateSettings) {
|
||||
if e.updateManager == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if autoUpdateSettings == nil {
|
||||
return
|
||||
}
|
||||
|
||||
disabled := autoUpdateSettings.Version == disableAutoUpdate
|
||||
|
||||
// Stop and cleanup if disabled
|
||||
if e.updateManager != nil && disabled {
|
||||
log.Infof("auto-update is disabled, stopping update manager")
|
||||
e.updateManager.Stop()
|
||||
e.updateManager = nil
|
||||
if autoUpdateSettings.Version == disableAutoUpdate {
|
||||
log.Infof("auto-update is disabled")
|
||||
e.updateManager.SetDownloadOnly()
|
||||
return
|
||||
}
|
||||
|
||||
// Skip check unless AlwaysUpdate is enabled or this is the initial check at startup
|
||||
if !autoUpdateSettings.AlwaysUpdate && !initialCheck {
|
||||
log.Debugf("skipping auto-update check, AlwaysUpdate is false and this is not the initial check")
|
||||
return
|
||||
}
|
||||
|
||||
// Start manager if needed
|
||||
if e.updateManager == nil {
|
||||
log.Infof("starting auto-update manager")
|
||||
updateManager, err := updatemanager.NewManager(e.statusRecorder, e.stateManager)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
e.updateManager = updateManager
|
||||
e.updateManager.Start(e.ctx)
|
||||
}
|
||||
log.Infof("handling auto-update version: %s", autoUpdateSettings.Version)
|
||||
e.updateManager.SetVersion(autoUpdateSettings.Version)
|
||||
e.updateManager.SetVersion(autoUpdateSettings.Version, autoUpdateSettings.AlwaysUpdate)
|
||||
}
|
||||
|
||||
func (e *Engine) handleSync(update *mgmProto.SyncResponse) error {
|
||||
started := time.Now()
|
||||
defer func() {
|
||||
log.Infof("sync finished in %s", time.Since(started))
|
||||
duration := time.Since(started)
|
||||
log.Infof("sync finished in %s", duration)
|
||||
e.clientMetrics.RecordSyncDuration(e.ctx, duration)
|
||||
}()
|
||||
e.syncMsgMux.Lock()
|
||||
defer e.syncMsgMux.Unlock()
|
||||
@@ -843,7 +832,7 @@ func (e *Engine) handleSync(update *mgmProto.SyncResponse) error {
|
||||
}
|
||||
|
||||
if update.NetworkMap != nil && update.NetworkMap.PeerConfig != nil {
|
||||
e.handleAutoUpdateVersion(update.NetworkMap.PeerConfig.AutoUpdate, false)
|
||||
e.handleAutoUpdateVersion(update.NetworkMap.PeerConfig.AutoUpdate)
|
||||
}
|
||||
|
||||
if update.GetNetbirdConfig() != nil {
|
||||
@@ -1008,10 +997,11 @@ func (e *Engine) updateConfig(conf *mgmProto.PeerConfig) error {
|
||||
return errors.New("wireguard interface is not initialized")
|
||||
}
|
||||
|
||||
// Cannot update the IP address without restarting the engine because
|
||||
// the firewall, route manager, and other components cache the old address
|
||||
if e.wgInterface.Address().String() != conf.Address {
|
||||
log.Infof("peer IP address has changed from %s to %s", e.wgInterface.Address().String(), conf.Address)
|
||||
log.Infof("peer IP address changed from %s to %s, restarting client", e.wgInterface.Address().String(), conf.Address)
|
||||
_ = CtxGetState(e.ctx).Wrap(ErrResetConnection)
|
||||
e.clientCancel()
|
||||
return ErrResetConnection
|
||||
}
|
||||
|
||||
if conf.GetSshConfig() != nil {
|
||||
@@ -1079,6 +1069,7 @@ func (e *Engine) handleBundle(params *mgmProto.BundleParameters) (*mgmProto.JobR
|
||||
StatusRecorder: e.statusRecorder,
|
||||
SyncResponse: syncResponse,
|
||||
LogPath: e.config.LogPath,
|
||||
ClientMetrics: e.clientMetrics,
|
||||
RefreshStatus: func() {
|
||||
e.RunHealthProbes(true)
|
||||
},
|
||||
@@ -1316,8 +1307,7 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error {
|
||||
|
||||
// Test received (upstream) servers for availability right away instead of upon usage.
|
||||
// If no server of a server group responds this will disable the respective handler and retry later.
|
||||
e.dnsServer.ProbeAvailability()
|
||||
|
||||
go e.dnsServer.ProbeAvailability()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1534,12 +1524,12 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs []netip.Prefix, agentV
|
||||
}
|
||||
|
||||
serviceDependencies := peer.ServiceDependencies{
|
||||
StatusRecorder: e.statusRecorder,
|
||||
Signaler: e.signaler,
|
||||
IFaceDiscover: e.mobileDep.IFaceDiscover,
|
||||
RelayManager: e.relayManager,
|
||||
SrWatcher: e.srWatcher,
|
||||
Semaphore: e.connSemaphore,
|
||||
StatusRecorder: e.statusRecorder,
|
||||
Signaler: e.signaler,
|
||||
IFaceDiscover: e.mobileDep.IFaceDiscover,
|
||||
RelayManager: e.relayManager,
|
||||
SrWatcher: e.srWatcher,
|
||||
MetricsRecorder: e.clientMetrics,
|
||||
}
|
||||
peerConn, err := peer.NewConn(config, serviceDependencies)
|
||||
if err != nil {
|
||||
@@ -1562,8 +1552,10 @@ func (e *Engine) receiveSignalEvents() {
|
||||
defer e.shutdownWg.Done()
|
||||
// connect to a stream of messages coming from the signal server
|
||||
err := e.signal.Receive(e.ctx, func(msg *sProto.Message) error {
|
||||
start := time.Now()
|
||||
e.syncMsgMux.Lock()
|
||||
defer e.syncMsgMux.Unlock()
|
||||
gotLock := time.Since(start)
|
||||
|
||||
// Check context INSIDE lock to ensure atomicity with shutdown
|
||||
if e.ctx.Err() != nil {
|
||||
@@ -1587,6 +1579,8 @@ func (e *Engine) receiveSignalEvents() {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("receiveMSG: took %s to get lock for peer %s with session id %s", gotLock, msg.Key, offerAnswer.SessionID)
|
||||
|
||||
if msg.Body.Type == sProto.Body_OFFER {
|
||||
conn.OnRemoteOffer(*offerAnswer)
|
||||
} else {
|
||||
@@ -1820,11 +1814,23 @@ func (e *Engine) GetRouteManager() routemanager.Manager {
|
||||
return e.routeManager
|
||||
}
|
||||
|
||||
// GetFirewallManager returns the firewall manager
|
||||
// GetFirewallManager returns the firewall manager.
|
||||
func (e *Engine) GetFirewallManager() firewallManager.Manager {
|
||||
return e.firewall
|
||||
}
|
||||
|
||||
// GetExposeManager returns the expose session manager.
|
||||
func (e *Engine) GetExposeManager() *expose.Manager {
|
||||
e.syncMsgMux.Lock()
|
||||
defer e.syncMsgMux.Unlock()
|
||||
return e.exposeManager
|
||||
}
|
||||
|
||||
// GetClientMetrics returns the client metrics
|
||||
func (e *Engine) GetClientMetrics() *metrics.ClientMetrics {
|
||||
return e.clientMetrics
|
||||
}
|
||||
|
||||
func findIPFromInterfaceName(ifaceName string) (net.IP, error) {
|
||||
iface, err := net.InterfaceByName(ifaceName)
|
||||
if err != nil {
|
||||
|
||||
@@ -251,9 +251,6 @@ func TestEngine_SSH(t *testing.T) {
|
||||
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
|
||||
engine := NewEngine(
|
||||
ctx, cancel,
|
||||
&signal.MockClient{},
|
||||
&mgmt.MockClient{},
|
||||
relayMgr,
|
||||
&EngineConfig{
|
||||
WgIfaceName: "utun101",
|
||||
WgAddr: "100.64.0.1/24",
|
||||
@@ -263,10 +260,13 @@ func TestEngine_SSH(t *testing.T) {
|
||||
MTU: iface.DefaultMTU,
|
||||
SSHKey: sshKey,
|
||||
},
|
||||
EngineServices{
|
||||
SignalClient: &signal.MockClient{},
|
||||
MgmClient: &mgmt.MockClient{},
|
||||
RelayManager: relayMgr,
|
||||
StatusRecorder: peer.NewRecorder("https://mgm"),
|
||||
},
|
||||
MobileDependency{},
|
||||
peer.NewRecorder("https://mgm"),
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
engine.dnsServer = &dns.MockServer{
|
||||
@@ -428,13 +428,18 @@ func TestEngine_UpdateNetworkMap(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
|
||||
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, relayMgr, &EngineConfig{
|
||||
engine := NewEngine(ctx, cancel, &EngineConfig{
|
||||
WgIfaceName: "utun102",
|
||||
WgAddr: "100.64.0.1/24",
|
||||
WgPrivateKey: key,
|
||||
WgPort: 33100,
|
||||
MTU: iface.DefaultMTU,
|
||||
}, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil)
|
||||
}, EngineServices{
|
||||
SignalClient: &signal.MockClient{},
|
||||
MgmClient: &mgmt.MockClient{},
|
||||
RelayManager: relayMgr,
|
||||
StatusRecorder: peer.NewRecorder("https://mgm"),
|
||||
}, MobileDependency{})
|
||||
|
||||
wgIface := &MockWGIface{
|
||||
NameFunc: func() string { return "utun102" },
|
||||
@@ -647,13 +652,18 @@ func TestEngine_Sync(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
|
||||
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{SyncFunc: syncFunc}, relayMgr, &EngineConfig{
|
||||
engine := NewEngine(ctx, cancel, &EngineConfig{
|
||||
WgIfaceName: "utun103",
|
||||
WgAddr: "100.64.0.1/24",
|
||||
WgPrivateKey: key,
|
||||
WgPort: 33100,
|
||||
MTU: iface.DefaultMTU,
|
||||
}, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil)
|
||||
}, EngineServices{
|
||||
SignalClient: &signal.MockClient{},
|
||||
MgmClient: &mgmt.MockClient{SyncFunc: syncFunc},
|
||||
RelayManager: relayMgr,
|
||||
StatusRecorder: peer.NewRecorder("https://mgm"),
|
||||
}, MobileDependency{})
|
||||
engine.ctx = ctx
|
||||
|
||||
engine.dnsServer = &dns.MockServer{
|
||||
@@ -812,13 +822,18 @@ func TestEngine_UpdateNetworkMapWithRoutes(t *testing.T) {
|
||||
wgAddr := fmt.Sprintf("100.66.%d.1/24", n)
|
||||
|
||||
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
|
||||
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, relayMgr, &EngineConfig{
|
||||
engine := NewEngine(ctx, cancel, &EngineConfig{
|
||||
WgIfaceName: wgIfaceName,
|
||||
WgAddr: wgAddr,
|
||||
WgPrivateKey: key,
|
||||
WgPort: 33100,
|
||||
MTU: iface.DefaultMTU,
|
||||
}, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil)
|
||||
}, EngineServices{
|
||||
SignalClient: &signal.MockClient{},
|
||||
MgmClient: &mgmt.MockClient{},
|
||||
RelayManager: relayMgr,
|
||||
StatusRecorder: peer.NewRecorder("https://mgm"),
|
||||
}, MobileDependency{})
|
||||
engine.ctx = ctx
|
||||
newNet, err := stdnet.NewNet(context.Background(), nil)
|
||||
if err != nil {
|
||||
@@ -1014,13 +1029,18 @@ func TestEngine_UpdateNetworkMapWithDNSUpdate(t *testing.T) {
|
||||
wgAddr := fmt.Sprintf("100.66.%d.1/24", n)
|
||||
|
||||
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
|
||||
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, relayMgr, &EngineConfig{
|
||||
engine := NewEngine(ctx, cancel, &EngineConfig{
|
||||
WgIfaceName: wgIfaceName,
|
||||
WgAddr: wgAddr,
|
||||
WgPrivateKey: key,
|
||||
WgPort: 33100,
|
||||
MTU: iface.DefaultMTU,
|
||||
}, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil)
|
||||
}, EngineServices{
|
||||
SignalClient: &signal.MockClient{},
|
||||
MgmClient: &mgmt.MockClient{},
|
||||
RelayManager: relayMgr,
|
||||
StatusRecorder: peer.NewRecorder("https://mgm"),
|
||||
}, MobileDependency{})
|
||||
engine.ctx = ctx
|
||||
|
||||
newNet, err := stdnet.NewNet(context.Background(), nil)
|
||||
@@ -1546,7 +1566,12 @@ func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey strin
|
||||
}
|
||||
|
||||
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
|
||||
e, err := NewEngine(ctx, cancel, signalClient, mgmtClient, relayMgr, conf, MobileDependency{}, peer.NewRecorder("https://mgm"), nil, nil), nil
|
||||
e, err := NewEngine(ctx, cancel, conf, EngineServices{
|
||||
SignalClient: signalClient,
|
||||
MgmClient: mgmtClient,
|
||||
RelayManager: relayMgr,
|
||||
StatusRecorder: peer.NewRecorder("https://mgm"),
|
||||
}, MobileDependency{}), nil
|
||||
e.ctx = ctx
|
||||
return e, err
|
||||
}
|
||||
|
||||
97
client/internal/expose/manager.go
Normal file
97
client/internal/expose/manager.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package expose
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
mgm "github.com/netbirdio/netbird/shared/management/client"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const renewTimeout = 10 * time.Second
|
||||
|
||||
// Response holds the response from exposing a service.
|
||||
type Response struct {
|
||||
ServiceName string
|
||||
ServiceURL string
|
||||
Domain string
|
||||
PortAutoAssigned bool
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
NamePrefix string
|
||||
Domain string
|
||||
Port uint16
|
||||
Protocol int
|
||||
Pin string
|
||||
Password string
|
||||
UserGroups []string
|
||||
ListenPort uint16
|
||||
}
|
||||
|
||||
type ManagementClient interface {
|
||||
CreateExpose(ctx context.Context, req mgm.ExposeRequest) (*mgm.ExposeResponse, error)
|
||||
RenewExpose(ctx context.Context, domain string) error
|
||||
StopExpose(ctx context.Context, domain string) error
|
||||
}
|
||||
|
||||
// Manager handles expose session lifecycle via the management client.
|
||||
type Manager struct {
|
||||
mgmClient ManagementClient
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// NewManager creates a new expose Manager using the given management client.
|
||||
func NewManager(ctx context.Context, mgmClient ManagementClient) *Manager {
|
||||
return &Manager{mgmClient: mgmClient, ctx: ctx}
|
||||
}
|
||||
|
||||
// Expose creates a new expose session via the management server.
|
||||
func (m *Manager) Expose(ctx context.Context, req Request) (*Response, error) {
|
||||
log.Infof("exposing service on port %d", req.Port)
|
||||
resp, err := m.mgmClient.CreateExpose(ctx, toClientExposeRequest(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Infof("expose session created for %s", resp.Domain)
|
||||
|
||||
return fromClientExposeResponse(resp), nil
|
||||
}
|
||||
|
||||
func (m *Manager) KeepAlive(ctx context.Context, domain string) error {
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
defer m.stop(domain)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Infof("context canceled, stopping keep alive for %s", domain)
|
||||
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
if err := m.renew(ctx, domain); err != nil {
|
||||
log.Errorf("renewing expose session for %s: %v", domain, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// renew extends the TTL of an active expose session.
|
||||
func (m *Manager) renew(ctx context.Context, domain string) error {
|
||||
renewCtx, cancel := context.WithTimeout(ctx, renewTimeout)
|
||||
defer cancel()
|
||||
return m.mgmClient.RenewExpose(renewCtx, domain)
|
||||
}
|
||||
|
||||
// stop terminates an active expose session.
|
||||
func (m *Manager) stop(domain string) {
|
||||
stopCtx, cancel := context.WithTimeout(m.ctx, renewTimeout)
|
||||
defer cancel()
|
||||
err := m.mgmClient.StopExpose(stopCtx, domain)
|
||||
if err != nil {
|
||||
log.Warnf("Failed stopping expose session for %s: %v", domain, err)
|
||||
}
|
||||
}
|
||||
95
client/internal/expose/manager_test.go
Normal file
95
client/internal/expose/manager_test.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package expose
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
daemonProto "github.com/netbirdio/netbird/client/proto"
|
||||
mgm "github.com/netbirdio/netbird/shared/management/client"
|
||||
)
|
||||
|
||||
func TestManager_Expose_Success(t *testing.T) {
|
||||
mock := &mgm.MockClient{
|
||||
CreateExposeFunc: func(ctx context.Context, req mgm.ExposeRequest) (*mgm.ExposeResponse, error) {
|
||||
return &mgm.ExposeResponse{
|
||||
ServiceName: "my-service",
|
||||
ServiceURL: "https://my-service.example.com",
|
||||
Domain: "my-service.example.com",
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
|
||||
m := NewManager(context.Background(), mock)
|
||||
result, err := m.Expose(context.Background(), Request{Port: 8080})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "my-service", result.ServiceName, "service name should match")
|
||||
assert.Equal(t, "https://my-service.example.com", result.ServiceURL, "service URL should match")
|
||||
assert.Equal(t, "my-service.example.com", result.Domain, "domain should match")
|
||||
}
|
||||
|
||||
func TestManager_Expose_Error(t *testing.T) {
|
||||
mock := &mgm.MockClient{
|
||||
CreateExposeFunc: func(ctx context.Context, req mgm.ExposeRequest) (*mgm.ExposeResponse, error) {
|
||||
return nil, errors.New("permission denied")
|
||||
},
|
||||
}
|
||||
|
||||
m := NewManager(context.Background(), mock)
|
||||
_, err := m.Expose(context.Background(), Request{Port: 8080})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "permission denied", "error should propagate")
|
||||
}
|
||||
|
||||
func TestManager_Renew_Success(t *testing.T) {
|
||||
mock := &mgm.MockClient{
|
||||
RenewExposeFunc: func(ctx context.Context, domain string) error {
|
||||
assert.Equal(t, "my-service.example.com", domain, "domain should be passed through")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
m := NewManager(context.Background(), mock)
|
||||
err := m.renew(context.Background(), "my-service.example.com")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestManager_Renew_Timeout(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
mock := &mgm.MockClient{
|
||||
RenewExposeFunc: func(ctx context.Context, domain string) error {
|
||||
return ctx.Err()
|
||||
},
|
||||
}
|
||||
|
||||
m := NewManager(ctx, mock)
|
||||
err := m.renew(ctx, "my-service.example.com")
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestNewRequest(t *testing.T) {
|
||||
req := &daemonProto.ExposeServiceRequest{
|
||||
Port: 8080,
|
||||
Protocol: daemonProto.ExposeProtocol_EXPOSE_HTTPS,
|
||||
Pin: "123456",
|
||||
Password: "secret",
|
||||
UserGroups: []string{"group1", "group2"},
|
||||
Domain: "custom.example.com",
|
||||
NamePrefix: "my-prefix",
|
||||
}
|
||||
|
||||
exposeReq := NewRequest(req)
|
||||
|
||||
assert.Equal(t, uint16(8080), exposeReq.Port, "port should match")
|
||||
assert.Equal(t, int(daemonProto.ExposeProtocol_EXPOSE_HTTPS), exposeReq.Protocol, "protocol should match")
|
||||
assert.Equal(t, "123456", exposeReq.Pin, "pin should match")
|
||||
assert.Equal(t, "secret", exposeReq.Password, "password should match")
|
||||
assert.Equal(t, []string{"group1", "group2"}, exposeReq.UserGroups, "user groups should match")
|
||||
assert.Equal(t, "custom.example.com", exposeReq.Domain, "domain should match")
|
||||
assert.Equal(t, "my-prefix", exposeReq.NamePrefix, "name prefix should match")
|
||||
}
|
||||
42
client/internal/expose/request.go
Normal file
42
client/internal/expose/request.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package expose
|
||||
|
||||
import (
|
||||
daemonProto "github.com/netbirdio/netbird/client/proto"
|
||||
mgm "github.com/netbirdio/netbird/shared/management/client"
|
||||
)
|
||||
|
||||
// NewRequest converts a daemon ExposeServiceRequest to a management ExposeServiceRequest.
|
||||
func NewRequest(req *daemonProto.ExposeServiceRequest) *Request {
|
||||
return &Request{
|
||||
Port: uint16(req.Port),
|
||||
Protocol: int(req.Protocol),
|
||||
Pin: req.Pin,
|
||||
Password: req.Password,
|
||||
UserGroups: req.UserGroups,
|
||||
Domain: req.Domain,
|
||||
NamePrefix: req.NamePrefix,
|
||||
ListenPort: uint16(req.ListenPort),
|
||||
}
|
||||
}
|
||||
|
||||
func toClientExposeRequest(req Request) mgm.ExposeRequest {
|
||||
return mgm.ExposeRequest{
|
||||
NamePrefix: req.NamePrefix,
|
||||
Domain: req.Domain,
|
||||
Port: req.Port,
|
||||
Protocol: req.Protocol,
|
||||
Pin: req.Pin,
|
||||
Password: req.Password,
|
||||
UserGroups: req.UserGroups,
|
||||
ListenPort: req.ListenPort,
|
||||
}
|
||||
}
|
||||
|
||||
func fromClientExposeResponse(response *mgm.ExposeResponse) *Response {
|
||||
return &Response{
|
||||
ServiceName: response.ServiceName,
|
||||
Domain: response.Domain,
|
||||
ServiceURL: response.ServiceURL,
|
||||
PortAutoAssigned: response.PortAutoAssigned,
|
||||
}
|
||||
}
|
||||
17
client/internal/metrics/connection_type.go
Normal file
17
client/internal/metrics/connection_type.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package metrics
|
||||
|
||||
// ConnectionType represents the type of peer connection
|
||||
type ConnectionType string
|
||||
|
||||
const (
|
||||
// ConnectionTypeICE represents a direct peer-to-peer connection using ICE
|
||||
ConnectionTypeICE ConnectionType = "ice"
|
||||
|
||||
// ConnectionTypeRelay represents a relayed connection
|
||||
ConnectionTypeRelay ConnectionType = "relay"
|
||||
)
|
||||
|
||||
// String returns the string representation of the connection type
|
||||
func (c ConnectionType) String() string {
|
||||
return string(c)
|
||||
}
|
||||
51
client/internal/metrics/deployment_type.go
Normal file
51
client/internal/metrics/deployment_type.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DeploymentType represents the type of NetBird deployment
|
||||
type DeploymentType int
|
||||
|
||||
const (
|
||||
// DeploymentTypeUnknown represents an unknown or uninitialized deployment type
|
||||
DeploymentTypeUnknown DeploymentType = iota
|
||||
|
||||
// DeploymentTypeCloud represents a cloud-hosted NetBird deployment
|
||||
DeploymentTypeCloud
|
||||
|
||||
// DeploymentTypeSelfHosted represents a self-hosted NetBird deployment
|
||||
DeploymentTypeSelfHosted
|
||||
)
|
||||
|
||||
// String returns the string representation of the deployment type
|
||||
func (d DeploymentType) String() string {
|
||||
switch d {
|
||||
case DeploymentTypeCloud:
|
||||
return "cloud"
|
||||
case DeploymentTypeSelfHosted:
|
||||
return "selfhosted"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// DetermineDeploymentType determines if the deployment is cloud or self-hosted
|
||||
// based on the management URL string
|
||||
func DetermineDeploymentType(managementURL string) DeploymentType {
|
||||
if managementURL == "" {
|
||||
return DeploymentTypeUnknown
|
||||
}
|
||||
|
||||
u, err := url.Parse(managementURL)
|
||||
if err != nil {
|
||||
return DeploymentTypeSelfHosted
|
||||
}
|
||||
|
||||
if strings.ToLower(u.Hostname()) == "api.netbird.io" {
|
||||
return DeploymentTypeCloud
|
||||
}
|
||||
|
||||
return DeploymentTypeSelfHosted
|
||||
}
|
||||
93
client/internal/metrics/env.go
Normal file
93
client/internal/metrics/env.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// EnvMetricsPushEnabled controls whether collected metrics are pushed to the backend.
|
||||
// Metrics collection itself is always active (for debug bundles).
|
||||
// Disabled by default. Set NB_METRICS_PUSH_ENABLED=true to enable push.
|
||||
EnvMetricsPushEnabled = "NB_METRICS_PUSH_ENABLED"
|
||||
|
||||
// EnvMetricsForceSending if set to true, skips remote configuration fetch and forces metric sending
|
||||
EnvMetricsForceSending = "NB_METRICS_FORCE_SENDING"
|
||||
|
||||
// EnvMetricsConfigURL is the environment variable to override the metrics push config ServerAddress
|
||||
EnvMetricsConfigURL = "NB_METRICS_CONFIG_URL"
|
||||
|
||||
// EnvMetricsServerURL is the environment variable to override the metrics server address.
|
||||
// When set, this takes precedence over the server_url from remote push config.
|
||||
EnvMetricsServerURL = "NB_METRICS_SERVER_URL"
|
||||
|
||||
// EnvMetricsInterval overrides the push interval from the remote config.
|
||||
// Only affects how often metrics are pushed; remote config availability
|
||||
// and version range checks are still respected.
|
||||
// Format: duration string like "1h", "30m", "4h"
|
||||
EnvMetricsInterval = "NB_METRICS_INTERVAL"
|
||||
|
||||
defaultMetricsConfigURL = "https://ingest.netbird.io/config"
|
||||
)
|
||||
|
||||
// IsMetricsPushEnabled returns true if metrics push is enabled via NB_METRICS_PUSH_ENABLED env var.
|
||||
// Disabled by default. Metrics collection is always active for debug bundles.
|
||||
func IsMetricsPushEnabled() bool {
|
||||
enabled, _ := strconv.ParseBool(os.Getenv(EnvMetricsPushEnabled))
|
||||
return enabled
|
||||
}
|
||||
|
||||
// getMetricsInterval returns the metrics push interval from NB_METRICS_INTERVAL env var.
|
||||
// Returns 0 if not set or invalid.
|
||||
func getMetricsInterval() time.Duration {
|
||||
intervalStr := os.Getenv(EnvMetricsInterval)
|
||||
if intervalStr == "" {
|
||||
return 0
|
||||
}
|
||||
interval, err := time.ParseDuration(intervalStr)
|
||||
if err != nil {
|
||||
log.Warnf("invalid metrics interval from env %q: %v", intervalStr, err)
|
||||
return 0
|
||||
}
|
||||
if interval <= 0 {
|
||||
log.Warnf("invalid metrics interval from env %q: must be positive", intervalStr)
|
||||
return 0
|
||||
}
|
||||
return interval
|
||||
}
|
||||
|
||||
func isForceSending() bool {
|
||||
force, _ := strconv.ParseBool(os.Getenv(EnvMetricsForceSending))
|
||||
return force
|
||||
}
|
||||
|
||||
// getMetricsConfigURL returns the URL to fetch push configuration from
|
||||
func getMetricsConfigURL() string {
|
||||
if envURL := os.Getenv(EnvMetricsConfigURL); envURL != "" {
|
||||
return envURL
|
||||
}
|
||||
return defaultMetricsConfigURL
|
||||
}
|
||||
|
||||
// getMetricsServerURL returns the metrics server URL from NB_METRICS_SERVER_URL env var.
|
||||
// Returns nil if not set or invalid.
|
||||
func getMetricsServerURL() *url.URL {
|
||||
envURL := os.Getenv(EnvMetricsServerURL)
|
||||
if envURL == "" {
|
||||
return nil
|
||||
}
|
||||
parsed, err := url.ParseRequestURI(envURL)
|
||||
if err != nil || parsed.Host == "" {
|
||||
log.Warnf("invalid metrics server URL %q: must be an absolute HTTP(S) URL", envURL)
|
||||
return nil
|
||||
}
|
||||
if parsed.Scheme != "http" && parsed.Scheme != "https" {
|
||||
log.Warnf("invalid metrics server URL %q: unsupported scheme %q", envURL, parsed.Scheme)
|
||||
return nil
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
219
client/internal/metrics/influxdb.go
Normal file
219
client/internal/metrics/influxdb.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
maxSampleAge = 5 * 24 * time.Hour // drop samples older than 5 days
|
||||
maxBufferSize = 5 * 1024 * 1024 // drop oldest samples when estimated size exceeds 5 MB
|
||||
// estimatedSampleSize is a rough per-sample memory estimate (measurement + tags + fields + timestamp)
|
||||
estimatedSampleSize = 256
|
||||
)
|
||||
|
||||
// influxSample is a single InfluxDB line protocol entry.
|
||||
type influxSample struct {
|
||||
measurement string
|
||||
tags string
|
||||
fields map[string]float64
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
// influxDBMetrics collects metric events as timestamped samples.
|
||||
// Each event is recorded with its exact timestamp, pushed once, then cleared.
|
||||
type influxDBMetrics struct {
|
||||
mu sync.Mutex
|
||||
samples []influxSample
|
||||
}
|
||||
|
||||
func newInfluxDBMetrics() metricsImplementation {
|
||||
return &influxDBMetrics{}
|
||||
}
|
||||
func (m *influxDBMetrics) RecordConnectionStages(
|
||||
_ context.Context,
|
||||
agentInfo AgentInfo,
|
||||
connectionPairID string,
|
||||
connectionType ConnectionType,
|
||||
isReconnection bool,
|
||||
timestamps ConnectionStageTimestamps,
|
||||
) {
|
||||
var signalingReceivedToConnection, connectionToWgHandshake, totalDuration float64
|
||||
|
||||
if !timestamps.SignalingReceived.IsZero() && !timestamps.ConnectionReady.IsZero() {
|
||||
signalingReceivedToConnection = timestamps.ConnectionReady.Sub(timestamps.SignalingReceived).Seconds()
|
||||
}
|
||||
|
||||
if !timestamps.ConnectionReady.IsZero() && !timestamps.WgHandshakeSuccess.IsZero() {
|
||||
connectionToWgHandshake = timestamps.WgHandshakeSuccess.Sub(timestamps.ConnectionReady).Seconds()
|
||||
}
|
||||
|
||||
if !timestamps.SignalingReceived.IsZero() && !timestamps.WgHandshakeSuccess.IsZero() {
|
||||
totalDuration = timestamps.WgHandshakeSuccess.Sub(timestamps.SignalingReceived).Seconds()
|
||||
}
|
||||
|
||||
attemptType := "initial"
|
||||
if isReconnection {
|
||||
attemptType = "reconnection"
|
||||
}
|
||||
|
||||
connTypeStr := connectionType.String()
|
||||
tags := fmt.Sprintf("deployment_type=%s,connection_type=%s,attempt_type=%s,version=%s,os=%s,arch=%s,peer_id=%s,connection_pair_id=%s",
|
||||
agentInfo.DeploymentType.String(),
|
||||
connTypeStr,
|
||||
attemptType,
|
||||
agentInfo.Version,
|
||||
agentInfo.OS,
|
||||
agentInfo.Arch,
|
||||
agentInfo.peerID,
|
||||
connectionPairID,
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.samples = append(m.samples, influxSample{
|
||||
measurement: "netbird_peer_connection",
|
||||
tags: tags,
|
||||
fields: map[string]float64{
|
||||
"signaling_to_connection_seconds": signalingReceivedToConnection,
|
||||
"connection_to_wg_handshake_seconds": connectionToWgHandshake,
|
||||
"total_seconds": totalDuration,
|
||||
},
|
||||
timestamp: now,
|
||||
})
|
||||
m.trimLocked()
|
||||
|
||||
log.Tracef("peer connection metrics [%s, %s, %s]: signalingReceived→connection: %.3fs, connection→wg_handshake: %.3fs, total: %.3fs",
|
||||
agentInfo.DeploymentType.String(), connTypeStr, attemptType, signalingReceivedToConnection, connectionToWgHandshake, totalDuration)
|
||||
}
|
||||
|
||||
func (m *influxDBMetrics) RecordSyncDuration(_ context.Context, agentInfo AgentInfo, duration time.Duration) {
|
||||
tags := fmt.Sprintf("deployment_type=%s,version=%s,os=%s,arch=%s,peer_id=%s",
|
||||
agentInfo.DeploymentType.String(),
|
||||
agentInfo.Version,
|
||||
agentInfo.OS,
|
||||
agentInfo.Arch,
|
||||
agentInfo.peerID,
|
||||
)
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.samples = append(m.samples, influxSample{
|
||||
measurement: "netbird_sync",
|
||||
tags: tags,
|
||||
fields: map[string]float64{
|
||||
"duration_seconds": duration.Seconds(),
|
||||
},
|
||||
timestamp: time.Now(),
|
||||
})
|
||||
m.trimLocked()
|
||||
}
|
||||
|
||||
func (m *influxDBMetrics) RecordLoginDuration(_ context.Context, agentInfo AgentInfo, duration time.Duration, success bool) {
|
||||
result := "success"
|
||||
if !success {
|
||||
result = "failure"
|
||||
}
|
||||
|
||||
tags := fmt.Sprintf("deployment_type=%s,result=%s,version=%s,os=%s,arch=%s,peer_id=%s",
|
||||
agentInfo.DeploymentType.String(),
|
||||
result,
|
||||
agentInfo.Version,
|
||||
agentInfo.OS,
|
||||
agentInfo.Arch,
|
||||
agentInfo.peerID,
|
||||
)
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.samples = append(m.samples, influxSample{
|
||||
measurement: "netbird_login",
|
||||
tags: tags,
|
||||
fields: map[string]float64{
|
||||
"duration_seconds": duration.Seconds(),
|
||||
},
|
||||
timestamp: time.Now(),
|
||||
})
|
||||
m.trimLocked()
|
||||
|
||||
log.Tracef("login metrics [%s, %s]: duration=%.3fs", agentInfo.DeploymentType.String(), result, duration.Seconds())
|
||||
}
|
||||
|
||||
// Export writes pending samples in InfluxDB line protocol format.
|
||||
// Format: measurement,tag=val,tag=val field=val,field=val timestamp_ns
|
||||
func (m *influxDBMetrics) Export(w io.Writer) error {
|
||||
m.mu.Lock()
|
||||
samples := make([]influxSample, len(m.samples))
|
||||
copy(samples, m.samples)
|
||||
m.mu.Unlock()
|
||||
|
||||
for _, s := range samples {
|
||||
if _, err := fmt.Fprintf(w, "%s,%s ", s.measurement, s.tags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sortedKeys := slices.Sorted(maps.Keys(s.fields))
|
||||
first := true
|
||||
for _, k := range sortedKeys {
|
||||
if !first {
|
||||
if _, err := fmt.Fprint(w, ","); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s=%g", k, s.fields[k]); err != nil {
|
||||
return err
|
||||
}
|
||||
first = false
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(w, " %d\n", s.timestamp.UnixNano()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reset clears pending samples after a successful push
|
||||
func (m *influxDBMetrics) Reset() {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.samples = m.samples[:0]
|
||||
}
|
||||
|
||||
// trimLocked removes samples that exceed age or size limits.
|
||||
// Must be called with m.mu held.
|
||||
func (m *influxDBMetrics) trimLocked() {
|
||||
now := time.Now()
|
||||
|
||||
// drop samples older than maxSampleAge
|
||||
cutoff := 0
|
||||
for cutoff < len(m.samples) && now.Sub(m.samples[cutoff].timestamp) > maxSampleAge {
|
||||
cutoff++
|
||||
}
|
||||
if cutoff > 0 {
|
||||
copy(m.samples, m.samples[cutoff:])
|
||||
m.samples = m.samples[:len(m.samples)-cutoff]
|
||||
log.Debugf("influxdb metrics: dropped %d samples older than %s", cutoff, maxSampleAge)
|
||||
}
|
||||
|
||||
// drop oldest samples if estimated size exceeds maxBufferSize
|
||||
maxSamples := maxBufferSize / estimatedSampleSize
|
||||
if len(m.samples) > maxSamples {
|
||||
drop := len(m.samples) - maxSamples
|
||||
copy(m.samples, m.samples[drop:])
|
||||
m.samples = m.samples[:maxSamples]
|
||||
log.Debugf("influxdb metrics: dropped %d oldest samples to stay under %d MB size limit", drop, maxBufferSize/(1024*1024))
|
||||
}
|
||||
}
|
||||
229
client/internal/metrics/influxdb_test.go
Normal file
229
client/internal/metrics/influxdb_test.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInfluxDBMetrics_RecordAndExport(t *testing.T) {
|
||||
m := newInfluxDBMetrics().(*influxDBMetrics)
|
||||
|
||||
agentInfo := AgentInfo{
|
||||
DeploymentType: DeploymentTypeCloud,
|
||||
Version: "1.0.0",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
peerID: "abc123",
|
||||
}
|
||||
|
||||
ts := ConnectionStageTimestamps{
|
||||
SignalingReceived: time.Now().Add(-3 * time.Second),
|
||||
ConnectionReady: time.Now().Add(-2 * time.Second),
|
||||
WgHandshakeSuccess: time.Now().Add(-1 * time.Second),
|
||||
}
|
||||
|
||||
m.RecordConnectionStages(context.Background(), agentInfo, "pair123", ConnectionTypeICE, false, ts)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := m.Export(&buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "netbird_peer_connection,")
|
||||
assert.Contains(t, output, "connection_to_wg_handshake_seconds=")
|
||||
assert.Contains(t, output, "signaling_to_connection_seconds=")
|
||||
assert.Contains(t, output, "total_seconds=")
|
||||
}
|
||||
|
||||
func TestInfluxDBMetrics_ExportDeterministicFieldOrder(t *testing.T) {
|
||||
m := newInfluxDBMetrics().(*influxDBMetrics)
|
||||
|
||||
agentInfo := AgentInfo{
|
||||
DeploymentType: DeploymentTypeCloud,
|
||||
Version: "1.0.0",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
peerID: "abc123",
|
||||
}
|
||||
|
||||
ts := ConnectionStageTimestamps{
|
||||
SignalingReceived: time.Now().Add(-3 * time.Second),
|
||||
ConnectionReady: time.Now().Add(-2 * time.Second),
|
||||
WgHandshakeSuccess: time.Now().Add(-1 * time.Second),
|
||||
}
|
||||
|
||||
// Record multiple times and verify consistent field order
|
||||
for i := 0; i < 10; i++ {
|
||||
m.RecordConnectionStages(context.Background(), agentInfo, "pair123", ConnectionTypeICE, false, ts)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := m.Export(&buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(buf.String()), "\n")
|
||||
require.Len(t, lines, 10)
|
||||
|
||||
// Extract field portion from each line and verify they're all identical
|
||||
var fieldSections []string
|
||||
for _, line := range lines {
|
||||
parts := strings.SplitN(line, " ", 3)
|
||||
require.Len(t, parts, 3, "each line should have measurement, fields, timestamp")
|
||||
fieldSections = append(fieldSections, parts[1])
|
||||
}
|
||||
|
||||
for i := 1; i < len(fieldSections); i++ {
|
||||
assert.Equal(t, fieldSections[0], fieldSections[i], "field order should be deterministic across samples")
|
||||
}
|
||||
|
||||
// Fields should be alphabetically sorted
|
||||
assert.True(t, strings.HasPrefix(fieldSections[0], "connection_to_wg_handshake_seconds="),
|
||||
"fields should be sorted: connection_to_wg < signaling_to < total")
|
||||
}
|
||||
|
||||
func TestInfluxDBMetrics_RecordSyncDuration(t *testing.T) {
|
||||
m := newInfluxDBMetrics().(*influxDBMetrics)
|
||||
|
||||
agentInfo := AgentInfo{
|
||||
DeploymentType: DeploymentTypeSelfHosted,
|
||||
Version: "2.0.0",
|
||||
OS: "darwin",
|
||||
Arch: "arm64",
|
||||
peerID: "def456",
|
||||
}
|
||||
|
||||
m.RecordSyncDuration(context.Background(), agentInfo, 1500*time.Millisecond)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := m.Export(&buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "netbird_sync,")
|
||||
assert.Contains(t, output, "duration_seconds=1.5")
|
||||
assert.Contains(t, output, "deployment_type=selfhosted")
|
||||
}
|
||||
|
||||
func TestInfluxDBMetrics_Reset(t *testing.T) {
|
||||
m := newInfluxDBMetrics().(*influxDBMetrics)
|
||||
|
||||
agentInfo := AgentInfo{
|
||||
DeploymentType: DeploymentTypeCloud,
|
||||
Version: "1.0.0",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
peerID: "abc123",
|
||||
}
|
||||
|
||||
m.RecordSyncDuration(context.Background(), agentInfo, time.Second)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := m.Export(&buf)
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, buf.String())
|
||||
|
||||
m.Reset()
|
||||
|
||||
buf.Reset()
|
||||
err = m.Export(&buf)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, buf.String(), "should be empty after reset")
|
||||
}
|
||||
|
||||
func TestInfluxDBMetrics_ExportEmpty(t *testing.T) {
|
||||
m := newInfluxDBMetrics().(*influxDBMetrics)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := m.Export(&buf)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, buf.String())
|
||||
}
|
||||
|
||||
func TestInfluxDBMetrics_TrimByAge(t *testing.T) {
|
||||
m := newInfluxDBMetrics().(*influxDBMetrics)
|
||||
|
||||
m.mu.Lock()
|
||||
m.samples = append(m.samples, influxSample{
|
||||
measurement: "old",
|
||||
tags: "t=1",
|
||||
fields: map[string]float64{"v": 1},
|
||||
timestamp: time.Now().Add(-maxSampleAge - time.Hour),
|
||||
})
|
||||
m.trimLocked()
|
||||
remaining := len(m.samples)
|
||||
m.mu.Unlock()
|
||||
|
||||
assert.Equal(t, 0, remaining, "old samples should be trimmed")
|
||||
}
|
||||
|
||||
func TestInfluxDBMetrics_RecordLoginDuration(t *testing.T) {
|
||||
m := newInfluxDBMetrics().(*influxDBMetrics)
|
||||
|
||||
agentInfo := AgentInfo{
|
||||
DeploymentType: DeploymentTypeCloud,
|
||||
Version: "1.0.0",
|
||||
OS: "linux",
|
||||
Arch: "amd64",
|
||||
peerID: "abc123",
|
||||
}
|
||||
|
||||
m.RecordLoginDuration(context.Background(), agentInfo, 2500*time.Millisecond, true)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := m.Export(&buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "netbird_login,")
|
||||
assert.Contains(t, output, "duration_seconds=2.5")
|
||||
assert.Contains(t, output, "result=success")
|
||||
}
|
||||
|
||||
func TestInfluxDBMetrics_RecordLoginDurationFailure(t *testing.T) {
|
||||
m := newInfluxDBMetrics().(*influxDBMetrics)
|
||||
|
||||
agentInfo := AgentInfo{
|
||||
DeploymentType: DeploymentTypeSelfHosted,
|
||||
Version: "1.0.0",
|
||||
OS: "darwin",
|
||||
Arch: "arm64",
|
||||
peerID: "xyz789",
|
||||
}
|
||||
|
||||
m.RecordLoginDuration(context.Background(), agentInfo, 5*time.Second, false)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := m.Export(&buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, "netbird_login,")
|
||||
assert.Contains(t, output, "result=failure")
|
||||
assert.Contains(t, output, "deployment_type=selfhosted")
|
||||
}
|
||||
|
||||
func TestInfluxDBMetrics_TrimBySize(t *testing.T) {
|
||||
m := newInfluxDBMetrics().(*influxDBMetrics)
|
||||
|
||||
maxSamples := maxBufferSize / estimatedSampleSize
|
||||
m.mu.Lock()
|
||||
for i := 0; i < maxSamples+100; i++ {
|
||||
m.samples = append(m.samples, influxSample{
|
||||
measurement: "test",
|
||||
tags: "t=1",
|
||||
fields: map[string]float64{"v": float64(i)},
|
||||
timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
m.trimLocked()
|
||||
remaining := len(m.samples)
|
||||
m.mu.Unlock()
|
||||
|
||||
assert.Equal(t, maxSamples, remaining, "should trim to max samples")
|
||||
}
|
||||
16
client/internal/metrics/infra/.env.example
Normal file
16
client/internal/metrics/infra/.env.example
Normal file
@@ -0,0 +1,16 @@
|
||||
# Copy to .env and adjust values before running docker compose
|
||||
|
||||
# InfluxDB admin (server-side only, never exposed to clients)
|
||||
INFLUXDB_ADMIN_PASSWORD=changeme
|
||||
INFLUXDB_ADMIN_TOKEN=changeme
|
||||
|
||||
# Grafana admin credentials
|
||||
GRAFANA_ADMIN_USER=admin
|
||||
GRAFANA_ADMIN_PASSWORD=changeme
|
||||
|
||||
# Remote config served by ingest at /config
|
||||
# Set CONFIG_METRICS_SERVER_URL to the ingest server's public address to enable
|
||||
CONFIG_METRICS_SERVER_URL=
|
||||
CONFIG_VERSION_SINCE=0.0.0
|
||||
CONFIG_VERSION_UNTIL=99.99.99
|
||||
CONFIG_PERIOD_MINUTES=5
|
||||
1
client/internal/metrics/infra/.gitignore
vendored
Normal file
1
client/internal/metrics/infra/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.env
|
||||
194
client/internal/metrics/infra/README.md
Normal file
194
client/internal/metrics/infra/README.md
Normal file
@@ -0,0 +1,194 @@
|
||||
# Client Metrics
|
||||
|
||||
Internal documentation for the NetBird client metrics system.
|
||||
|
||||
## Overview
|
||||
|
||||
Client metrics track connection performance and sync durations using InfluxDB line protocol (`influxdb.go`). Each event is pushed once then cleared.
|
||||
|
||||
Metrics collection is always active (for debug bundles). Push to backend is:
|
||||
- Disabled by default (opt-in via `NB_METRICS_PUSH_ENABLED=true`)
|
||||
- Managed at daemon layer (survives engine restarts)
|
||||
|
||||
## Architecture
|
||||
|
||||
### Layer Separation
|
||||
|
||||
```text
|
||||
Daemon Layer (connect.go)
|
||||
├─ Creates ClientMetrics instance once
|
||||
├─ Starts/stops push lifecycle
|
||||
└─ Updates AgentInfo on profile switch
|
||||
│
|
||||
▼
|
||||
Engine Layer (engine.go)
|
||||
└─ Records metrics via ClientMetrics methods
|
||||
```
|
||||
|
||||
### Ingest Server
|
||||
|
||||
Clients do not talk to InfluxDB directly. An ingest server sits between clients and InfluxDB:
|
||||
|
||||
```text
|
||||
Client ──POST──▶ Ingest Server (:8087) ──▶ InfluxDB (internal)
|
||||
│
|
||||
├─ Validates line protocol
|
||||
├─ Allowlists measurements, fields, and tags
|
||||
├─ Rejects out-of-bound values
|
||||
└─ Serves remote config at /config
|
||||
```
|
||||
|
||||
- **No secret/token-based client auth** — the ingest server holds the InfluxDB token server-side. Clients must send a hashed peer ID via `X-Peer-ID` header.
|
||||
- **InfluxDB is not exposed** — only accessible within the docker network
|
||||
- Source: `ingest/main.go`
|
||||
|
||||
## Metrics Collected
|
||||
|
||||
### Connection Stage Timing
|
||||
|
||||
Measurement: `netbird_peer_connection`
|
||||
|
||||
| Field | Timestamps | Description |
|
||||
|-------|-----------|-------------|
|
||||
| `signaling_to_connection_seconds` | `SignalingReceived → ConnectionReady` | ICE/relay negotiation time after the first signal is received from the remote peer |
|
||||
| `connection_to_wg_handshake_seconds` | `ConnectionReady → WgHandshakeSuccess` | WireGuard cryptographic handshake latency once the transport layer is ready |
|
||||
| `total_seconds` | `SignalingReceived → WgHandshakeSuccess` | End-to-end connection time anchored at the first received signal |
|
||||
|
||||
Tags:
|
||||
- `deployment_type`: "cloud" | "selfhosted" | "unknown"
|
||||
- `connection_type`: "ice" | "relay"
|
||||
- `attempt_type`: "initial" | "reconnection"
|
||||
- `version`: NetBird version string
|
||||
- `os`: Operating system (linux, darwin, windows, android, ios, etc.)
|
||||
- `arch`: CPU architecture (amd64, arm64, etc.)
|
||||
|
||||
**Note:** `SignalingReceived` is set when the first offer or answer arrives from the remote peer (in both initial and reconnection paths). It excludes the potentially unbounded wait for the remote peer to come online.
|
||||
|
||||
### Sync Duration
|
||||
|
||||
Measurement: `netbird_sync`
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| `duration_seconds` | Time to process a sync message from management server |
|
||||
|
||||
Tags:
|
||||
- `deployment_type`: "cloud" | "selfhosted" | "unknown"
|
||||
- `version`: NetBird version string
|
||||
- `os`: Operating system (linux, darwin, windows, android, ios, etc.)
|
||||
- `arch`: CPU architecture (amd64, arm64, etc.)
|
||||
|
||||
### Login Duration
|
||||
|
||||
Measurement: `netbird_login`
|
||||
|
||||
| Field | Description |
|
||||
|-------|-------------|
|
||||
| `duration_seconds` | Time to complete the login/auth exchange with management server |
|
||||
|
||||
Tags:
|
||||
- `deployment_type`: "cloud" | "selfhosted" | "unknown"
|
||||
- `result`: "success" | "failure"
|
||||
- `version`: NetBird version string
|
||||
- `os`: Operating system (linux, darwin, windows, android, ios, etc.)
|
||||
- `arch`: CPU architecture (amd64, arm64, etc.)
|
||||
|
||||
## Buffer Limits
|
||||
|
||||
The InfluxDB backend limits in-memory sample storage to prevent unbounded growth when pushes fail:
|
||||
- **Max age:** Samples older than 5 days are dropped
|
||||
- **Max size:** Estimated buffer size capped at 5 MB (~20k samples)
|
||||
|
||||
## Configuration
|
||||
|
||||
### Client Environment Variables
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `NB_METRICS_PUSH_ENABLED` | `false` | Enable metrics push to backend |
|
||||
| `NB_METRICS_SERVER_URL` | *(from remote config)* | Ingest server URL (e.g., `https://ingest.netbird.io`) |
|
||||
| `NB_METRICS_INTERVAL` | *(from remote config)* | Push interval (e.g., "1m", "30m", "4h") |
|
||||
| `NB_METRICS_FORCE_SENDING` | `false` | Skip remote config, push unconditionally |
|
||||
| `NB_METRICS_CONFIG_URL` | `https://ingest.netbird.io/config` | Remote push config URL |
|
||||
|
||||
`NB_METRICS_SERVER_URL` and `NB_METRICS_INTERVAL` override their respective values but do not bypass remote config eligibility checks (version range). Use `NB_METRICS_FORCE_SENDING=true` to skip all remote config gating.
|
||||
|
||||
### Ingest Server Environment Variables
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `INGEST_LISTEN_ADDR` | `:8087` | Listen address |
|
||||
| `INFLUXDB_URL` | `http://influxdb:8086/api/v2/write?org=netbird&bucket=metrics&precision=ns` | InfluxDB write endpoint |
|
||||
| `INFLUXDB_TOKEN` | *(required)* | InfluxDB auth token (server-side only) |
|
||||
| `CONFIG_METRICS_SERVER_URL` | *(empty — disables /config)* | `server_url` in the remote config JSON (the URL clients push metrics to) |
|
||||
| `CONFIG_VERSION_SINCE` | `0.0.0` | Minimum client version to push metrics |
|
||||
| `CONFIG_VERSION_UNTIL` | `99.99.99` | Maximum client version to push metrics |
|
||||
| `CONFIG_PERIOD_MINUTES` | `5` | Push interval in minutes |
|
||||
|
||||
The ingest server serves a remote config JSON at `GET /config` when `CONFIG_METRICS_SERVER_URL` is set. Clients can use `NB_METRICS_CONFIG_URL=http://<ingest>/config` to fetch it.
|
||||
|
||||
### Configuration Precedence
|
||||
|
||||
For URL and Interval, the precedence is:
|
||||
1. **Environment variable** - `NB_METRICS_SERVER_URL` / `NB_METRICS_INTERVAL`
|
||||
2. **Remote config** - fetched from `NB_METRICS_CONFIG_URL`
|
||||
3. **Default** - 5 minute interval, URL from remote config
|
||||
|
||||
## Push Behavior
|
||||
|
||||
1. `StartPush()` spawns background goroutine with timer
|
||||
2. First push happens immediately on startup
|
||||
3. Periodically: `push()` → `Export()` → HTTP POST to ingest server
|
||||
4. On failure: log error, continue (non-blocking)
|
||||
5. On success: `Reset()` clears pushed samples
|
||||
6. `StopPush()` cancels context and waits for goroutine
|
||||
|
||||
Samples are collected with exact timestamps, pushed once, then cleared. No data is resent.
|
||||
|
||||
## Local Development Setup
|
||||
|
||||
### 1. Configure and Start Services
|
||||
|
||||
```bash
|
||||
# From this directory (client/internal/metrics/infra)
|
||||
cp .env.example .env
|
||||
# Edit .env to set INFLUXDB_ADMIN_PASSWORD, INFLUXDB_ADMIN_TOKEN, and GRAFANA_ADMIN_PASSWORD
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This starts:
|
||||
- **Ingest server** on http://localhost:8087 — accepts client metrics (requires `X-Peer-ID` header, no secret/token auth)
|
||||
- **InfluxDB** — internal only, not exposed to host
|
||||
- **Grafana** on http://localhost:3001
|
||||
|
||||
### 2. Configure Client
|
||||
|
||||
```bash
|
||||
export NB_METRICS_PUSH_ENABLED=true
|
||||
export NB_METRICS_FORCE_SENDING=true
|
||||
export NB_METRICS_SERVER_URL=http://localhost:8087
|
||||
export NB_METRICS_INTERVAL=1m
|
||||
```
|
||||
|
||||
### 3. Run Client
|
||||
|
||||
```bash
|
||||
cd ../../../..
|
||||
go run ./client/ up
|
||||
```
|
||||
|
||||
### 4. View in Grafana
|
||||
|
||||
- **InfluxDB dashboard:** http://localhost:3001/d/netbird-influxdb-metrics
|
||||
|
||||
### 5. Verify Data
|
||||
|
||||
```bash
|
||||
# Query via InfluxDB (using admin token from .env)
|
||||
docker compose exec influxdb influx query \
|
||||
'from(bucket: "metrics") |> range(start: -1h)' \
|
||||
--org netbird
|
||||
|
||||
# Check ingest server health
|
||||
curl http://localhost:8087/health
|
||||
```
|
||||
69
client/internal/metrics/infra/docker-compose.yml
Normal file
69
client/internal/metrics/infra/docker-compose.yml
Normal file
@@ -0,0 +1,69 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
ingest:
|
||||
container_name: ingest
|
||||
build:
|
||||
context: ./ingest
|
||||
ports:
|
||||
- "8087:8087"
|
||||
environment:
|
||||
- INGEST_LISTEN_ADDR=:8087
|
||||
- INFLUXDB_URL=http://influxdb:8086/api/v2/write?org=netbird&bucket=metrics&precision=ns
|
||||
- INFLUXDB_TOKEN=${INFLUXDB_ADMIN_TOKEN:?required}
|
||||
- CONFIG_METRICS_SERVER_URL=${CONFIG_METRICS_SERVER_URL:-}
|
||||
- CONFIG_VERSION_SINCE=${CONFIG_VERSION_SINCE:-0.0.0}
|
||||
- CONFIG_VERSION_UNTIL=${CONFIG_VERSION_UNTIL:-99.99.99}
|
||||
- CONFIG_PERIOD_MINUTES=${CONFIG_PERIOD_MINUTES:-5}
|
||||
depends_on:
|
||||
- influxdb
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- metrics
|
||||
|
||||
influxdb:
|
||||
container_name: influxdb
|
||||
image: influxdb:2
|
||||
# No ports exposed — only accessible within the metrics network
|
||||
volumes:
|
||||
- influxdb-data:/var/lib/influxdb2
|
||||
- ./influxdb/scripts:/docker-entrypoint-initdb.d
|
||||
environment:
|
||||
- DOCKER_INFLUXDB_INIT_MODE=setup
|
||||
- DOCKER_INFLUXDB_INIT_USERNAME=admin
|
||||
- DOCKER_INFLUXDB_INIT_PASSWORD=${INFLUXDB_ADMIN_PASSWORD:?required}
|
||||
- DOCKER_INFLUXDB_INIT_ORG=netbird
|
||||
- DOCKER_INFLUXDB_INIT_BUCKET=metrics
|
||||
- DOCKER_INFLUXDB_INIT_RETENTION=365d
|
||||
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=${INFLUXDB_ADMIN_TOKEN:-}
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- metrics
|
||||
|
||||
grafana:
|
||||
container_name: grafana
|
||||
image: grafana/grafana:11.6.0
|
||||
ports:
|
||||
- "3001:3000"
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=${GRAFANA_ADMIN_USER:-admin}
|
||||
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD:?required}
|
||||
- GF_USERS_ALLOW_SIGN_UP=false
|
||||
- GF_INSTALL_PLUGINS=
|
||||
- INFLUXDB_ADMIN_TOKEN=${INFLUXDB_ADMIN_TOKEN:-}
|
||||
volumes:
|
||||
- grafana-data:/var/lib/grafana
|
||||
- ./grafana/provisioning:/etc/grafana/provisioning
|
||||
depends_on:
|
||||
- influxdb
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- metrics
|
||||
|
||||
volumes:
|
||||
influxdb-data:
|
||||
grafana-data:
|
||||
|
||||
networks:
|
||||
metrics:
|
||||
driver: bridge
|
||||
@@ -0,0 +1,12 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'NetBird Dashboards'
|
||||
orgId: 1
|
||||
folder: ''
|
||||
type: file
|
||||
disableDeletion: false
|
||||
updateIntervalSeconds: 10
|
||||
allowUiUpdates: true
|
||||
options:
|
||||
path: /etc/grafana/provisioning/dashboards/json
|
||||
@@ -0,0 +1,280 @@
|
||||
{
|
||||
"uid": "netbird-influxdb-metrics",
|
||||
"title": "NetBird Client Metrics (InfluxDB)",
|
||||
"tags": ["netbird", "connections", "influxdb"],
|
||||
"timezone": "browser",
|
||||
"panels": [
|
||||
{
|
||||
"id": 5,
|
||||
"title": "Sync Duration Extremes",
|
||||
"type": "stat",
|
||||
"datasource": {
|
||||
"type": "influxdb",
|
||||
"uid": "influxdb"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_sync\" and r._field == \"duration_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"version\", \"os\", \"arch\", \"peer_id\"])\n |> min()\n |> set(key: \"_field\", value: \"Min\")",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_sync\" and r._field == \"duration_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"version\", \"os\", \"arch\", \"peer_id\"])\n |> max()\n |> set(key: \"_field\", value: \"Max\")",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"min": 0
|
||||
}
|
||||
},
|
||||
"options": {
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"]
|
||||
},
|
||||
"colorMode": "value",
|
||||
"graphMode": "none",
|
||||
"textMode": "auto"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"title": "Total Connection Time Extremes",
|
||||
"type": "stat",
|
||||
"datasource": {
|
||||
"type": "influxdb",
|
||||
"uid": "influxdb"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_peer_connection\" and r._field == \"total_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"connection_type\", \"attempt_type\", \"version\", \"os\", \"arch\", \"peer_id\", \"connection_pair_id\"])\n |> min()\n |> set(key: \"_field\", value: \"Min\")",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_peer_connection\" and r._field == \"total_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"connection_type\", \"attempt_type\", \"version\", \"os\", \"arch\", \"peer_id\", \"connection_pair_id\"])\n |> max()\n |> set(key: \"_field\", value: \"Max\")",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"min": 0
|
||||
}
|
||||
},
|
||||
"options": {
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"]
|
||||
},
|
||||
"colorMode": "value",
|
||||
"graphMode": "none",
|
||||
"textMode": "auto"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Sync Duration",
|
||||
"type": "timeseries",
|
||||
"datasource": {
|
||||
"type": "influxdb",
|
||||
"uid": "influxdb"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_sync\" and r._field == \"duration_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"version\", \"os\", \"arch\", \"peer_id\"])\n |> set(key: \"_field\", value: \"Sync Duration\")",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"min": 0,
|
||||
"custom": {
|
||||
"drawStyle": "points",
|
||||
"pointSize": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"title": "ICE vs Relay",
|
||||
"type": "piechart",
|
||||
"datasource": {
|
||||
"type": "influxdb",
|
||||
"uid": "influxdb"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_peer_connection\" and r._field == \"total_seconds\")\n |> drop(columns: [\"deployment_type\", \"attempt_type\", \"version\", \"os\", \"arch\", \"peer_id\"])\n |> group(columns: [\"connection_pair_id\"])\n |> last()\n |> group(columns: [\"connection_type\"])\n |> count()",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"options": {
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"]
|
||||
},
|
||||
"pieType": "donut",
|
||||
"tooltip": {
|
||||
"mode": "multi"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"title": "Connection Stage Durations (avg)",
|
||||
"type": "bargauge",
|
||||
"datasource": {
|
||||
"type": "influxdb",
|
||||
"uid": "influxdb"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_peer_connection\" and r._field == \"signaling_to_connection_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"connection_type\", \"attempt_type\", \"version\", \"os\", \"arch\", \"peer_id\", \"connection_pair_id\"])\n |> mean()\n |> drop(columns: [\"_start\", \"_stop\", \"_measurement\", \"_time\", \"_field\"])\n |> rename(columns: {_value: \"Avg Signaling to Connection\"})",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_peer_connection\" and r._field == \"connection_to_wg_handshake_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"connection_type\", \"attempt_type\", \"version\", \"os\", \"arch\", \"peer_id\", \"connection_pair_id\"])\n |> mean()\n |> drop(columns: [\"_start\", \"_stop\", \"_measurement\", \"_time\", \"_field\"])\n |> rename(columns: {_value: \"Avg Connection to WG Handshake\"})",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"min": 0
|
||||
}
|
||||
},
|
||||
"options": {
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"]
|
||||
},
|
||||
"orientation": "horizontal",
|
||||
"displayMode": "gradient"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"title": "Total Connection Time",
|
||||
"type": "timeseries",
|
||||
"datasource": {
|
||||
"type": "influxdb",
|
||||
"uid": "influxdb"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_peer_connection\" and r._field == \"total_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"connection_type\", \"attempt_type\", \"version\", \"os\", \"arch\", \"peer_id\", \"connection_pair_id\"])\n |> set(key: \"_field\", value: \"Total Connection Time\")",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"min": 0,
|
||||
"custom": {
|
||||
"drawStyle": "points",
|
||||
"pointSize": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"title": "Login Duration",
|
||||
"type": "timeseries",
|
||||
"datasource": {
|
||||
"type": "influxdb",
|
||||
"uid": "influxdb"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_login\" and r._field == \"duration_seconds\")\n |> map(fn: (r) => ({r with _value: r._value * 1000.0}))\n |> drop(columns: [\"deployment_type\", \"version\", \"os\", \"arch\", \"peer_id\"])\n |> set(key: \"_field\", value: \"Login Duration\")",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"min": 0,
|
||||
"custom": {
|
||||
"drawStyle": "points",
|
||||
"pointSize": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"title": "Login Success vs Failure",
|
||||
"type": "piechart",
|
||||
"datasource": {
|
||||
"type": "influxdb",
|
||||
"uid": "influxdb"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"query": "from(bucket: \"metrics\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"netbird_login\" and r._field == \"duration_seconds\")\n |> drop(columns: [\"deployment_type\", \"version\", \"os\", \"arch\", \"peer_id\"])\n |> group(columns: [\"result\"])\n |> count()",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"options": {
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"]
|
||||
},
|
||||
"pieType": "donut",
|
||||
"tooltip": {
|
||||
"mode": "multi"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"schemaVersion": 27,
|
||||
"version": 2,
|
||||
"refresh": "30s"
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: InfluxDB
|
||||
uid: influxdb
|
||||
type: influxdb
|
||||
access: proxy
|
||||
url: http://influxdb:8086
|
||||
editable: true
|
||||
jsonData:
|
||||
version: Flux
|
||||
organization: netbird
|
||||
defaultBucket: metrics
|
||||
secureJsonData:
|
||||
token: ${INFLUXDB_ADMIN_TOKEN}
|
||||
25
client/internal/metrics/infra/influxdb/scripts/create-tokens.sh
Executable file
25
client/internal/metrics/infra/influxdb/scripts/create-tokens.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
# Creates a scoped InfluxDB read-only token for Grafana.
|
||||
# Clients do not need a token — they push via the ingest server.
|
||||
|
||||
BUCKET_ID=$(influx bucket list --org netbird --name metrics --json | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1)
|
||||
ORG_ID=$(influx org list --name netbird --json | grep -oP '"id"\s*:\s*"\K[^"]+' | head -1)
|
||||
|
||||
if [[ -z "$BUCKET_ID" ]] || [[ -z "$ORG_ID" ]]; then
|
||||
echo "ERROR: Could not determine bucket or org ID" >&2
|
||||
echo "BUCKET_ID=$BUCKET_ID ORG_ID=$ORG_ID" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create read-only token for Grafana
|
||||
READ_TOKEN=$(influx auth create \
|
||||
--org netbird \
|
||||
--read-bucket "$BUCKET_ID" \
|
||||
--description "Grafana read-only token" \
|
||||
--json | grep -oP '"token"\s*:\s*"\K[^"]+' | head -1)
|
||||
|
||||
echo ""
|
||||
echo "============================================"
|
||||
echo "GRAFANA READ-ONLY TOKEN:"
|
||||
echo "$READ_TOKEN"
|
||||
echo "============================================"
|
||||
10
client/internal/metrics/infra/ingest/Dockerfile
Normal file
10
client/internal/metrics/infra/ingest/Dockerfile
Normal file
@@ -0,0 +1,10 @@
|
||||
FROM golang:1.25-alpine AS build
|
||||
WORKDIR /app
|
||||
COPY go.mod main.go ./
|
||||
RUN CGO_ENABLED=0 go build -o ingest .
|
||||
|
||||
FROM alpine:3.20
|
||||
RUN adduser -D -H ingest
|
||||
COPY --from=build /app/ingest /usr/local/bin/ingest
|
||||
USER ingest
|
||||
ENTRYPOINT ["ingest"]
|
||||
11
client/internal/metrics/infra/ingest/go.mod
Normal file
11
client/internal/metrics/infra/ingest/go.mod
Normal file
@@ -0,0 +1,11 @@
|
||||
module github.com/netbirdio/netbird/client/internal/metrics/infra/ingest
|
||||
|
||||
go 1.25
|
||||
|
||||
require github.com/stretchr/testify v1.11.1
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
10
client/internal/metrics/infra/ingest/go.sum
Normal file
10
client/internal/metrics/infra/ingest/go.sum
Normal file
@@ -0,0 +1,10 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
355
client/internal/metrics/infra/ingest/main.go
Normal file
355
client/internal/metrics/infra/ingest/main.go
Normal file
@@ -0,0 +1,355 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultListenAddr = ":8087"
|
||||
defaultInfluxDBURL = "http://influxdb:8086/api/v2/write?org=netbird&bucket=metrics&precision=ns"
|
||||
maxBodySize = 50 * 1024 * 1024 // 50 MB max request body
|
||||
maxDurationSeconds = 300.0 // reject any duration field > 5 minutes
|
||||
peerIDLength = 16 // truncated SHA-256: 8 bytes = 16 hex chars
|
||||
maxTagValueLength = 64 // reject tag values longer than this
|
||||
)
|
||||
|
||||
type measurementSpec struct {
|
||||
allowedFields map[string]bool
|
||||
allowedTags map[string]bool
|
||||
}
|
||||
|
||||
var allowedMeasurements = map[string]measurementSpec{
|
||||
"netbird_peer_connection": {
|
||||
allowedFields: map[string]bool{
|
||||
"signaling_to_connection_seconds": true,
|
||||
"connection_to_wg_handshake_seconds": true,
|
||||
"total_seconds": true,
|
||||
},
|
||||
allowedTags: map[string]bool{
|
||||
"deployment_type": true,
|
||||
"connection_type": true,
|
||||
"attempt_type": true,
|
||||
"version": true,
|
||||
"os": true,
|
||||
"arch": true,
|
||||
"peer_id": true,
|
||||
"connection_pair_id": true,
|
||||
},
|
||||
},
|
||||
"netbird_sync": {
|
||||
allowedFields: map[string]bool{
|
||||
"duration_seconds": true,
|
||||
},
|
||||
allowedTags: map[string]bool{
|
||||
"deployment_type": true,
|
||||
"version": true,
|
||||
"os": true,
|
||||
"arch": true,
|
||||
"peer_id": true,
|
||||
},
|
||||
},
|
||||
"netbird_login": {
|
||||
allowedFields: map[string]bool{
|
||||
"duration_seconds": true,
|
||||
},
|
||||
allowedTags: map[string]bool{
|
||||
"deployment_type": true,
|
||||
"result": true,
|
||||
"version": true,
|
||||
"os": true,
|
||||
"arch": true,
|
||||
"peer_id": true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func main() {
|
||||
listenAddr := envOr("INGEST_LISTEN_ADDR", defaultListenAddr)
|
||||
influxURL := envOr("INFLUXDB_URL", defaultInfluxDBURL)
|
||||
influxToken := os.Getenv("INFLUXDB_TOKEN")
|
||||
|
||||
if influxToken == "" {
|
||||
log.Fatal("INFLUXDB_TOKEN is required")
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
|
||||
http.HandleFunc("/", handleIngest(client, influxURL, influxToken))
|
||||
|
||||
// Build config JSON once at startup from env vars
|
||||
configJSON := buildConfigJSON()
|
||||
if configJSON != nil {
|
||||
log.Printf("serving remote config at /config")
|
||||
}
|
||||
|
||||
http.HandleFunc("/config", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
if configJSON == nil {
|
||||
http.Error(w, "config not configured", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(configJSON) //nolint:errcheck
|
||||
})
|
||||
|
||||
http.HandleFunc("/health", func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprint(w, "ok") //nolint:errcheck
|
||||
})
|
||||
|
||||
log.Printf("ingest server listening on %s, forwarding to %s", listenAddr, influxURL)
|
||||
if err := http.ListenAndServe(listenAddr, nil); err != nil { //nolint:gosec
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func handleIngest(client *http.Client, influxURL, influxToken string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
if err := validateAuth(r); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := readBody(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if len(body) > maxBodySize {
|
||||
http.Error(w, "body too large", http.StatusRequestEntityTooLarge)
|
||||
return
|
||||
}
|
||||
|
||||
validated, err := validateLineProtocol(body)
|
||||
if err != nil {
|
||||
log.Printf("WARN validation failed from %s: %v", r.RemoteAddr, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
forwardToInflux(w, r, client, influxURL, influxToken, validated)
|
||||
}
|
||||
}
|
||||
|
||||
func forwardToInflux(w http.ResponseWriter, r *http.Request, client *http.Client, influxURL, influxToken string, body []byte) {
|
||||
req, err := http.NewRequestWithContext(r.Context(), http.MethodPost, influxURL, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
log.Printf("ERROR create request: %v", err)
|
||||
http.Error(w, "internal error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
req.Header.Set("Content-Type", "text/plain; charset=utf-8")
|
||||
req.Header.Set("Authorization", "Token "+influxToken)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
log.Printf("ERROR forward to influxdb: %v", err)
|
||||
http.Error(w, "upstream error", http.StatusBadGateway)
|
||||
return
|
||||
}
|
||||
defer func(Body io.ReadCloser) {
|
||||
_ = Body.Close()
|
||||
}(resp.Body)
|
||||
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
io.Copy(w, resp.Body) //nolint:errcheck
|
||||
}
|
||||
|
||||
// validateAuth checks that the X-Peer-ID header contains a valid hashed peer ID.
|
||||
func validateAuth(r *http.Request) error {
|
||||
peerID := r.Header.Get("X-Peer-ID")
|
||||
if peerID == "" {
|
||||
return fmt.Errorf("missing X-Peer-ID header")
|
||||
}
|
||||
if len(peerID) != peerIDLength {
|
||||
return fmt.Errorf("invalid X-Peer-ID header length")
|
||||
}
|
||||
if _, err := hex.DecodeString(peerID); err != nil {
|
||||
return fmt.Errorf("invalid X-Peer-ID header format")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readBody reads the request body, decompressing gzip if Content-Encoding indicates it.
|
||||
func readBody(r *http.Request) ([]byte, error) {
|
||||
reader := io.LimitReader(r.Body, maxBodySize+1)
|
||||
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
gz, err := gzip.NewReader(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid gzip: %w", err)
|
||||
}
|
||||
defer gz.Close()
|
||||
reader = io.LimitReader(gz, maxBodySize+1)
|
||||
}
|
||||
|
||||
return io.ReadAll(reader)
|
||||
}
|
||||
|
||||
// validateLineProtocol parses InfluxDB line protocol lines,
|
||||
// whitelists measurements and fields, and checks value bounds.
|
||||
func validateLineProtocol(body []byte) ([]byte, error) {
|
||||
lines := strings.Split(strings.TrimSpace(string(body)), "\n")
|
||||
var valid []string
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := validateLine(line); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
valid = append(valid, line)
|
||||
}
|
||||
|
||||
if len(valid) == 0 {
|
||||
return nil, fmt.Errorf("no valid lines")
|
||||
}
|
||||
|
||||
return []byte(strings.Join(valid, "\n") + "\n"), nil
|
||||
}
|
||||
|
||||
func validateLine(line string) error {
|
||||
// line protocol: measurement,tag=val,tag=val field=val,field=val timestamp
|
||||
parts := strings.SplitN(line, " ", 3)
|
||||
if len(parts) < 2 {
|
||||
return fmt.Errorf("invalid line protocol: %q", truncate(line, 100))
|
||||
}
|
||||
|
||||
// parts[0] is "measurement,tag=val,tag=val"
|
||||
measurementAndTags := strings.Split(parts[0], ",")
|
||||
measurement := measurementAndTags[0]
|
||||
|
||||
spec, ok := allowedMeasurements[measurement]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown measurement: %q", measurement)
|
||||
}
|
||||
|
||||
// Validate tags (everything after measurement name in parts[0])
|
||||
for _, tagPair := range measurementAndTags[1:] {
|
||||
if err := validateTag(tagPair, measurement, spec.allowedTags); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Validate fields
|
||||
for _, pair := range strings.Split(parts[1], ",") {
|
||||
if err := validateField(pair, measurement, spec.allowedFields); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateTag(pair, measurement string, allowedTags map[string]bool) error {
|
||||
kv := strings.SplitN(pair, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return fmt.Errorf("invalid tag: %q", pair)
|
||||
}
|
||||
|
||||
tagName := kv[0]
|
||||
if !allowedTags[tagName] {
|
||||
return fmt.Errorf("unknown tag %q in measurement %q", tagName, measurement)
|
||||
}
|
||||
|
||||
if len(kv[1]) > maxTagValueLength {
|
||||
return fmt.Errorf("tag value too long for %q: %d > %d", tagName, len(kv[1]), maxTagValueLength)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateField(pair, measurement string, allowedFields map[string]bool) error {
|
||||
kv := strings.SplitN(pair, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return fmt.Errorf("invalid field: %q", pair)
|
||||
}
|
||||
|
||||
fieldName := kv[0]
|
||||
if !allowedFields[fieldName] {
|
||||
return fmt.Errorf("unknown field %q in measurement %q", fieldName, measurement)
|
||||
}
|
||||
|
||||
val, err := strconv.ParseFloat(kv[1], 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid field value %q for %q", kv[1], fieldName)
|
||||
}
|
||||
if val < 0 {
|
||||
return fmt.Errorf("negative value for %q: %g", fieldName, val)
|
||||
}
|
||||
if strings.HasSuffix(fieldName, "_seconds") && val > maxDurationSeconds {
|
||||
return fmt.Errorf("%q too large: %g > %g", fieldName, val, maxDurationSeconds)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildConfigJSON builds the remote config JSON from env vars.
|
||||
// Returns nil if required vars are not set.
|
||||
func buildConfigJSON() []byte {
|
||||
serverURL := os.Getenv("CONFIG_METRICS_SERVER_URL")
|
||||
versionSince := envOr("CONFIG_VERSION_SINCE", "0.0.0")
|
||||
versionUntil := envOr("CONFIG_VERSION_UNTIL", "99.99.99")
|
||||
periodMinutes := envOr("CONFIG_PERIOD_MINUTES", "5")
|
||||
|
||||
if serverURL == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
period, err := strconv.Atoi(periodMinutes)
|
||||
if err != nil || period <= 0 {
|
||||
log.Printf("WARN invalid CONFIG_PERIOD_MINUTES: %q, using 5", periodMinutes)
|
||||
period = 5
|
||||
}
|
||||
|
||||
cfg := map[string]any{
|
||||
"server_url": serverURL,
|
||||
"version-since": versionSince,
|
||||
"version-until": versionUntil,
|
||||
"period_minutes": period,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
log.Printf("ERROR failed to marshal config: %v", err)
|
||||
return nil
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func envOr(key, defaultVal string) string {
|
||||
if v := os.Getenv(key); v != "" {
|
||||
return v
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
func truncate(s string, n int) string {
|
||||
if len(s) <= n {
|
||||
return s
|
||||
}
|
||||
return s[:n] + "..."
|
||||
}
|
||||
124
client/internal/metrics/infra/ingest/main_test.go
Normal file
124
client/internal/metrics/infra/ingest/main_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestValidateLine_ValidPeerConnection(t *testing.T) {
|
||||
line := `netbird_peer_connection,deployment_type=cloud,connection_type=ice,attempt_type=initial,version=1.0.0,os=linux,arch=amd64,peer_id=abcdef0123456789,connection_pair_id=pair1234 signaling_to_connection_seconds=1.5,connection_to_wg_handshake_seconds=0.5,total_seconds=2 1234567890`
|
||||
assert.NoError(t, validateLine(line))
|
||||
}
|
||||
|
||||
func TestValidateLine_ValidSync(t *testing.T) {
|
||||
line := `netbird_sync,deployment_type=selfhosted,version=2.0.0,os=darwin,arch=arm64,peer_id=abcdef0123456789 duration_seconds=1.5 1234567890`
|
||||
assert.NoError(t, validateLine(line))
|
||||
}
|
||||
|
||||
func TestValidateLine_ValidLogin(t *testing.T) {
|
||||
line := `netbird_login,deployment_type=cloud,result=success,version=1.0.0,os=linux,arch=amd64,peer_id=abcdef0123456789 duration_seconds=3.2 1234567890`
|
||||
assert.NoError(t, validateLine(line))
|
||||
}
|
||||
|
||||
func TestValidateLine_UnknownMeasurement(t *testing.T) {
|
||||
line := `unknown_metric,foo=bar value=1 1234567890`
|
||||
err := validateLine(line)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown measurement")
|
||||
}
|
||||
|
||||
func TestValidateLine_UnknownTag(t *testing.T) {
|
||||
line := `netbird_sync,deployment_type=cloud,evil_tag=injected,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=1.5 1234567890`
|
||||
err := validateLine(line)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown tag")
|
||||
}
|
||||
|
||||
func TestValidateLine_UnknownField(t *testing.T) {
|
||||
line := `netbird_sync,deployment_type=cloud,version=1.0.0,os=linux,arch=amd64,peer_id=abc injected_field=1 1234567890`
|
||||
err := validateLine(line)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown field")
|
||||
}
|
||||
|
||||
func TestValidateLine_NegativeValue(t *testing.T) {
|
||||
line := `netbird_sync,deployment_type=cloud,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=-1.5 1234567890`
|
||||
err := validateLine(line)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "negative")
|
||||
}
|
||||
|
||||
func TestValidateLine_DurationTooLarge(t *testing.T) {
|
||||
line := `netbird_sync,deployment_type=cloud,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=999 1234567890`
|
||||
err := validateLine(line)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "too large")
|
||||
}
|
||||
|
||||
func TestValidateLine_TotalSecondsTooLarge(t *testing.T) {
|
||||
line := `netbird_peer_connection,deployment_type=cloud,connection_type=ice,attempt_type=initial,version=1.0.0,os=linux,arch=amd64,peer_id=abc,connection_pair_id=pair total_seconds=500 1234567890`
|
||||
err := validateLine(line)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "too large")
|
||||
}
|
||||
|
||||
func TestValidateLine_TagValueTooLong(t *testing.T) {
|
||||
longTag := strings.Repeat("a", maxTagValueLength+1)
|
||||
line := `netbird_sync,deployment_type=` + longTag + `,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=1.5 1234567890`
|
||||
err := validateLine(line)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "tag value too long")
|
||||
}
|
||||
|
||||
func TestValidateLineProtocol_MultipleLines(t *testing.T) {
|
||||
body := []byte(
|
||||
"netbird_sync,deployment_type=cloud,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=1.5 1234567890\n" +
|
||||
"netbird_login,deployment_type=cloud,result=success,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=2.0 1234567890\n",
|
||||
)
|
||||
validated, err := validateLineProtocol(body)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, string(validated), "netbird_sync")
|
||||
assert.Contains(t, string(validated), "netbird_login")
|
||||
}
|
||||
|
||||
func TestValidateLineProtocol_RejectsOnBadLine(t *testing.T) {
|
||||
body := []byte(
|
||||
"netbird_sync,deployment_type=cloud,version=1.0.0,os=linux,arch=amd64,peer_id=abc duration_seconds=1.5 1234567890\n" +
|
||||
"evil_metric,foo=bar value=1 1234567890\n",
|
||||
)
|
||||
_, err := validateLineProtocol(body)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestValidateAuth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
peerID string
|
||||
wantErr bool
|
||||
}{
|
||||
{"valid hex", "abcdef0123456789", false},
|
||||
{"empty", "", true},
|
||||
{"too short", "abcdef01234567", true},
|
||||
{"too long", "abcdef01234567890", true},
|
||||
{"invalid hex", "ghijklmnopqrstuv", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r, _ := http.NewRequest(http.MethodPost, "/", nil)
|
||||
if tt.peerID != "" {
|
||||
r.Header.Set("X-Peer-ID", tt.peerID)
|
||||
}
|
||||
err := validateAuth(r)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
224
client/internal/metrics/metrics.go
Normal file
224
client/internal/metrics/metrics.go
Normal file
@@ -0,0 +1,224 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/metrics/remoteconfig"
|
||||
)
|
||||
|
||||
// AgentInfo holds static information about the agent
|
||||
type AgentInfo struct {
|
||||
DeploymentType DeploymentType
|
||||
Version string
|
||||
OS string // runtime.GOOS (linux, darwin, windows, etc.)
|
||||
Arch string // runtime.GOARCH (amd64, arm64, etc.)
|
||||
peerID string // anonymised peer identifier (SHA-256 of WireGuard public key)
|
||||
}
|
||||
|
||||
// peerIDFromPublicKey returns a truncated SHA-256 hash (8 bytes / 16 hex chars) of the given WireGuard public key.
|
||||
func peerIDFromPublicKey(pubKey string) string {
|
||||
hash := sha256.Sum256([]byte(pubKey))
|
||||
return hex.EncodeToString(hash[:8])
|
||||
}
|
||||
|
||||
// connectionPairID returns a deterministic identifier for a connection between two peers.
|
||||
// It sorts the two peer IDs before hashing so the same pair always produces the same ID
|
||||
// regardless of which side computes it.
|
||||
func connectionPairID(peerID1, peerID2 string) string {
|
||||
a, b := peerID1, peerID2
|
||||
if a > b {
|
||||
a, b = b, a
|
||||
}
|
||||
hash := sha256.Sum256([]byte(a + b))
|
||||
return hex.EncodeToString(hash[:8])
|
||||
}
|
||||
|
||||
// metricsImplementation defines the internal interface for metrics implementations
|
||||
type metricsImplementation interface {
|
||||
// RecordConnectionStages records connection stage metrics from timestamps
|
||||
RecordConnectionStages(
|
||||
ctx context.Context,
|
||||
agentInfo AgentInfo,
|
||||
connectionPairID string,
|
||||
connectionType ConnectionType,
|
||||
isReconnection bool,
|
||||
timestamps ConnectionStageTimestamps,
|
||||
)
|
||||
|
||||
// RecordSyncDuration records how long it took to process a sync message
|
||||
RecordSyncDuration(ctx context.Context, agentInfo AgentInfo, duration time.Duration)
|
||||
|
||||
// RecordLoginDuration records how long the login to management took
|
||||
RecordLoginDuration(ctx context.Context, agentInfo AgentInfo, duration time.Duration, success bool)
|
||||
|
||||
// Export exports metrics in InfluxDB line protocol format
|
||||
Export(w io.Writer) error
|
||||
|
||||
// Reset clears all collected metrics
|
||||
Reset()
|
||||
}
|
||||
|
||||
type ClientMetrics struct {
|
||||
impl metricsImplementation
|
||||
|
||||
agentInfo AgentInfo
|
||||
mu sync.RWMutex
|
||||
|
||||
push *Push
|
||||
pushMu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
pushCancel context.CancelFunc
|
||||
}
|
||||
|
||||
// ConnectionStageTimestamps holds timestamps for each connection stage
|
||||
type ConnectionStageTimestamps struct {
|
||||
SignalingReceived time.Time // First signal received from remote peer (both initial and reconnection)
|
||||
ConnectionReady time.Time
|
||||
WgHandshakeSuccess time.Time
|
||||
}
|
||||
|
||||
// String returns a human-readable representation of the connection stage timestamps
|
||||
func (c ConnectionStageTimestamps) String() string {
|
||||
return fmt.Sprintf("ConnectionStageTimestamps{SignalingReceived=%v, ConnectionReady=%v, WgHandshakeSuccess=%v}",
|
||||
c.SignalingReceived.Format(time.RFC3339Nano),
|
||||
c.ConnectionReady.Format(time.RFC3339Nano),
|
||||
c.WgHandshakeSuccess.Format(time.RFC3339Nano),
|
||||
)
|
||||
}
|
||||
|
||||
// RecordConnectionStages calculates stage durations from timestamps and records them.
|
||||
// remotePubKey is the remote peer's WireGuard public key; it will be hashed for anonymisation.
|
||||
func (c *ClientMetrics) RecordConnectionStages(
|
||||
ctx context.Context,
|
||||
remotePubKey string,
|
||||
connectionType ConnectionType,
|
||||
isReconnection bool,
|
||||
timestamps ConnectionStageTimestamps,
|
||||
) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.mu.RLock()
|
||||
agentInfo := c.agentInfo
|
||||
c.mu.RUnlock()
|
||||
|
||||
remotePeerID := peerIDFromPublicKey(remotePubKey)
|
||||
pairID := connectionPairID(agentInfo.peerID, remotePeerID)
|
||||
c.impl.RecordConnectionStages(ctx, agentInfo, pairID, connectionType, isReconnection, timestamps)
|
||||
}
|
||||
|
||||
// RecordSyncDuration records the duration of sync message processing
|
||||
func (c *ClientMetrics) RecordSyncDuration(ctx context.Context, duration time.Duration) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.mu.RLock()
|
||||
agentInfo := c.agentInfo
|
||||
c.mu.RUnlock()
|
||||
|
||||
c.impl.RecordSyncDuration(ctx, agentInfo, duration)
|
||||
}
|
||||
|
||||
// RecordLoginDuration records how long the login to management server took
|
||||
func (c *ClientMetrics) RecordLoginDuration(ctx context.Context, duration time.Duration, success bool) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.mu.RLock()
|
||||
agentInfo := c.agentInfo
|
||||
c.mu.RUnlock()
|
||||
|
||||
c.impl.RecordLoginDuration(ctx, agentInfo, duration, success)
|
||||
}
|
||||
|
||||
// UpdateAgentInfo updates the agent information (e.g., when switching profiles).
|
||||
// publicKey is the WireGuard public key; it will be hashed for anonymisation.
|
||||
func (c *ClientMetrics) UpdateAgentInfo(agentInfo AgentInfo, publicKey string) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
|
||||
agentInfo.peerID = peerIDFromPublicKey(publicKey)
|
||||
|
||||
c.mu.Lock()
|
||||
c.agentInfo = agentInfo
|
||||
c.mu.Unlock()
|
||||
|
||||
c.pushMu.Lock()
|
||||
push := c.push
|
||||
c.pushMu.Unlock()
|
||||
if push != nil {
|
||||
push.SetPeerID(agentInfo.peerID)
|
||||
}
|
||||
}
|
||||
|
||||
// Export exports metrics to the writer
|
||||
func (c *ClientMetrics) Export(w io.Writer) error {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.impl.Export(w)
|
||||
}
|
||||
|
||||
// StartPush starts periodic pushing of metrics with the given configuration
|
||||
// Precedence: PushConfig.ServerAddress > remote config server_url
|
||||
func (c *ClientMetrics) StartPush(ctx context.Context, config PushConfig) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.pushMu.Lock()
|
||||
defer c.pushMu.Unlock()
|
||||
|
||||
if c.push != nil {
|
||||
log.Warnf("metrics push already running")
|
||||
return
|
||||
}
|
||||
|
||||
c.mu.RLock()
|
||||
agentVersion := c.agentInfo.Version
|
||||
peerID := c.agentInfo.peerID
|
||||
c.mu.RUnlock()
|
||||
|
||||
configManager := remoteconfig.NewManager(getMetricsConfigURL(), remoteconfig.DefaultMinRefreshInterval)
|
||||
push, err := NewPush(c.impl, configManager, config, agentVersion)
|
||||
if err != nil {
|
||||
log.Errorf("failed to create metrics push: %v", err)
|
||||
return
|
||||
}
|
||||
push.SetPeerID(peerID)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
c.pushCancel = cancel
|
||||
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
defer c.wg.Done()
|
||||
push.Start(ctx)
|
||||
}()
|
||||
c.push = push
|
||||
}
|
||||
|
||||
func (c *ClientMetrics) StopPush() {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.pushMu.Lock()
|
||||
defer c.pushMu.Unlock()
|
||||
if c.push == nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.pushCancel()
|
||||
c.wg.Wait()
|
||||
c.push = nil
|
||||
}
|
||||
11
client/internal/metrics/metrics_default.go
Normal file
11
client/internal/metrics/metrics_default.go
Normal file
@@ -0,0 +1,11 @@
|
||||
//go:build !js
|
||||
|
||||
package metrics
|
||||
|
||||
// NewClientMetrics creates a new ClientMetrics instance
|
||||
func NewClientMetrics(agentInfo AgentInfo) *ClientMetrics {
|
||||
return &ClientMetrics{
|
||||
impl: newInfluxDBMetrics(),
|
||||
agentInfo: agentInfo,
|
||||
}
|
||||
}
|
||||
8
client/internal/metrics/metrics_js.go
Normal file
8
client/internal/metrics/metrics_js.go
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build js
|
||||
|
||||
package metrics
|
||||
|
||||
// NewClientMetrics returns nil on WASM builds — all ClientMetrics methods are nil-safe.
|
||||
func NewClientMetrics(AgentInfo) *ClientMetrics {
|
||||
return nil
|
||||
}
|
||||
289
client/internal/metrics/push.go
Normal file
289
client/internal/metrics/push.go
Normal file
@@ -0,0 +1,289 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
goversion "github.com/hashicorp/go-version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/metrics/remoteconfig"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultPushInterval is the default interval for pushing metrics
|
||||
defaultPushInterval = 5 * time.Minute
|
||||
)
|
||||
|
||||
// defaultMetricsServerURL is used as fallback when NB_METRICS_FORCE_SENDING is true
|
||||
var defaultMetricsServerURL *url.URL
|
||||
|
||||
func init() {
|
||||
defaultMetricsServerURL, _ = url.Parse("https://ingest.netbird.io")
|
||||
}
|
||||
|
||||
// PushConfig holds configuration for metrics push
|
||||
type PushConfig struct {
|
||||
// ServerAddress is the metrics server URL. If nil, uses remote config server_url.
|
||||
ServerAddress *url.URL
|
||||
// Interval is how often to push metrics. If 0, uses remote config interval or defaultPushInterval.
|
||||
Interval time.Duration
|
||||
// ForceSending skips remote configuration fetch and version checks, pushing unconditionally.
|
||||
ForceSending bool
|
||||
}
|
||||
|
||||
// PushConfigFromEnv builds a PushConfig from environment variables.
|
||||
func PushConfigFromEnv() PushConfig {
|
||||
config := PushConfig{}
|
||||
|
||||
config.ForceSending = isForceSending()
|
||||
config.ServerAddress = getMetricsServerURL()
|
||||
config.Interval = getMetricsInterval()
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// remoteConfigProvider abstracts remote push config fetching for testability
|
||||
type remoteConfigProvider interface {
|
||||
RefreshIfNeeded(ctx context.Context) *remoteconfig.Config
|
||||
}
|
||||
|
||||
// Push handles periodic pushing of metrics
|
||||
type Push struct {
|
||||
metrics metricsImplementation
|
||||
configManager remoteConfigProvider
|
||||
agentVersion *goversion.Version
|
||||
|
||||
peerID string
|
||||
peerMu sync.RWMutex
|
||||
|
||||
client *http.Client
|
||||
cfgForceSending bool
|
||||
cfgInterval time.Duration
|
||||
cfgAddress *url.URL
|
||||
}
|
||||
|
||||
// NewPush creates a new Push instance with configuration resolution
|
||||
func NewPush(metrics metricsImplementation, configManager remoteConfigProvider, config PushConfig, agentVersion string) (*Push, error) {
|
||||
var cfgInterval time.Duration
|
||||
var cfgAddress *url.URL
|
||||
|
||||
if config.ForceSending {
|
||||
cfgInterval = config.Interval
|
||||
if config.Interval <= 0 {
|
||||
cfgInterval = defaultPushInterval
|
||||
}
|
||||
|
||||
cfgAddress = config.ServerAddress
|
||||
if cfgAddress == nil {
|
||||
cfgAddress = defaultMetricsServerURL
|
||||
}
|
||||
} else {
|
||||
cfgAddress = config.ServerAddress
|
||||
|
||||
if config.Interval < 0 {
|
||||
log.Warnf("negative metrics push interval %s", config.Interval)
|
||||
} else {
|
||||
cfgInterval = config.Interval
|
||||
}
|
||||
}
|
||||
|
||||
parsedVersion, err := goversion.NewVersion(agentVersion)
|
||||
if err != nil {
|
||||
if !config.ForceSending {
|
||||
return nil, fmt.Errorf("parse agent version %q: %w", agentVersion, err)
|
||||
}
|
||||
}
|
||||
|
||||
return &Push{
|
||||
metrics: metrics,
|
||||
configManager: configManager,
|
||||
agentVersion: parsedVersion,
|
||||
cfgForceSending: config.ForceSending,
|
||||
cfgInterval: cfgInterval,
|
||||
cfgAddress: cfgAddress,
|
||||
client: &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetPeerID updates the hashed peer ID used for the Authorization header.
|
||||
func (p *Push) SetPeerID(peerID string) {
|
||||
p.peerMu.Lock()
|
||||
p.peerID = peerID
|
||||
p.peerMu.Unlock()
|
||||
}
|
||||
|
||||
// Start starts the periodic push loop.
|
||||
// The env interval override controls tick frequency but does not bypass remote config
|
||||
// version gating. Use ForceSending to skip remote config entirely.
|
||||
func (p *Push) Start(ctx context.Context) {
|
||||
// Log initial state
|
||||
switch {
|
||||
case p.cfgForceSending:
|
||||
log.Infof("started metrics push with force sending to %s, interval %s", p.cfgAddress, p.cfgInterval)
|
||||
case p.cfgAddress != nil:
|
||||
log.Infof("started metrics push with server URL override: %s", p.cfgAddress.String())
|
||||
default:
|
||||
log.Infof("started metrics push, server URL will be resolved from remote config")
|
||||
}
|
||||
|
||||
timer := time.NewTimer(0) // fire immediately on first iteration
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Debug("stopping metrics push")
|
||||
return
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
pushURL, interval := p.resolve(ctx)
|
||||
if pushURL != "" {
|
||||
if err := p.push(ctx, pushURL); err != nil {
|
||||
log.Errorf("failed to push metrics: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if interval <= 0 {
|
||||
interval = defaultPushInterval
|
||||
}
|
||||
timer.Reset(interval)
|
||||
}
|
||||
}
|
||||
|
||||
// resolve returns the push URL and interval for the next cycle.
|
||||
// Returns empty pushURL to skip this cycle.
|
||||
func (p *Push) resolve(ctx context.Context) (pushURL string, interval time.Duration) {
|
||||
if p.cfgForceSending {
|
||||
return p.resolveServerURL(nil), p.cfgInterval
|
||||
}
|
||||
|
||||
config := p.configManager.RefreshIfNeeded(ctx)
|
||||
if config == nil {
|
||||
log.Debug("no metrics push config available, waiting to retry")
|
||||
return "", defaultPushInterval
|
||||
}
|
||||
|
||||
// prefer env variables instead of remote config
|
||||
if p.cfgInterval > 0 {
|
||||
interval = p.cfgInterval
|
||||
} else {
|
||||
interval = config.Interval
|
||||
}
|
||||
|
||||
if !isVersionInRange(p.agentVersion, config.VersionSince, config.VersionUntil) {
|
||||
log.Debugf("agent version %s not in range [%s, %s), skipping metrics push",
|
||||
p.agentVersion, config.VersionSince, config.VersionUntil)
|
||||
return "", interval
|
||||
}
|
||||
|
||||
pushURL = p.resolveServerURL(&config.ServerURL)
|
||||
if pushURL == "" {
|
||||
log.Warn("no metrics server URL available, skipping push")
|
||||
}
|
||||
return pushURL, interval
|
||||
}
|
||||
|
||||
// push exports metrics and sends them to the metrics server
|
||||
func (p *Push) push(ctx context.Context, pushURL string) error {
|
||||
// Export metrics without clearing
|
||||
var buf bytes.Buffer
|
||||
if err := p.metrics.Export(&buf); err != nil {
|
||||
return fmt.Errorf("export metrics: %w", err)
|
||||
}
|
||||
|
||||
// Don't push if there are no metrics
|
||||
if buf.Len() == 0 {
|
||||
log.Tracef("no metrics to push")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gzip compress the body
|
||||
compressed, err := gzipCompress(buf.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("gzip compress: %w", err)
|
||||
}
|
||||
|
||||
// Create HTTP request
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", pushURL, compressed)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "text/plain; charset=utf-8")
|
||||
req.Header.Set("Content-Encoding", "gzip")
|
||||
|
||||
p.peerMu.RLock()
|
||||
peerID := p.peerID
|
||||
p.peerMu.RUnlock()
|
||||
if peerID != "" {
|
||||
req.Header.Set("X-Peer-ID", peerID)
|
||||
}
|
||||
|
||||
// Send request
|
||||
resp, err := p.client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("send request: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if resp.Body == nil {
|
||||
return
|
||||
}
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
log.Warnf("failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Check response status
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return fmt.Errorf("push failed with status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
log.Debugf("successfully pushed metrics to %s", pushURL)
|
||||
p.metrics.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolveServerURL determines the push URL.
|
||||
// Precedence: envAddress (env var) > remote config server_url
|
||||
func (p *Push) resolveServerURL(remoteServerURL *url.URL) string {
|
||||
var baseURL *url.URL
|
||||
if p.cfgAddress != nil {
|
||||
baseURL = p.cfgAddress
|
||||
} else {
|
||||
baseURL = remoteServerURL
|
||||
}
|
||||
|
||||
if baseURL == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return baseURL.String()
|
||||
}
|
||||
|
||||
// gzipCompress compresses data using gzip and returns the compressed buffer.
|
||||
func gzipCompress(data []byte) (*bytes.Buffer, error) {
|
||||
var buf bytes.Buffer
|
||||
gz := gzip.NewWriter(&buf)
|
||||
if _, err := gz.Write(data); err != nil {
|
||||
_ = gz.Close()
|
||||
return nil, err
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &buf, nil
|
||||
}
|
||||
|
||||
// isVersionInRange checks if current falls within [since, until)
|
||||
func isVersionInRange(current, since, until *goversion.Version) bool {
|
||||
return !current.LessThan(since) && current.LessThan(until)
|
||||
}
|
||||
343
client/internal/metrics/push_test.go
Normal file
343
client/internal/metrics/push_test.go
Normal file
@@ -0,0 +1,343 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
goversion "github.com/hashicorp/go-version"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/metrics/remoteconfig"
|
||||
)
|
||||
|
||||
func mustVersion(s string) *goversion.Version {
|
||||
v, err := goversion.NewVersion(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func mustURL(s string) url.URL {
|
||||
u, err := url.Parse(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return *u
|
||||
}
|
||||
|
||||
func parseURL(s string) *url.URL {
|
||||
u, err := url.Parse(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func testConfig(serverURL, since, until string, period time.Duration) *remoteconfig.Config {
|
||||
return &remoteconfig.Config{
|
||||
ServerURL: mustURL(serverURL),
|
||||
VersionSince: mustVersion(since),
|
||||
VersionUntil: mustVersion(until),
|
||||
Interval: period,
|
||||
}
|
||||
}
|
||||
|
||||
// mockConfigProvider implements remoteConfigProvider for testing
|
||||
type mockConfigProvider struct {
|
||||
config *remoteconfig.Config
|
||||
}
|
||||
|
||||
func (m *mockConfigProvider) RefreshIfNeeded(_ context.Context) *remoteconfig.Config {
|
||||
return m.config
|
||||
}
|
||||
|
||||
// mockMetrics implements metricsImplementation for testing
|
||||
type mockMetrics struct {
|
||||
exportData string
|
||||
}
|
||||
|
||||
func (m *mockMetrics) RecordConnectionStages(_ context.Context, _ AgentInfo, _ string, _ ConnectionType, _ bool, _ ConnectionStageTimestamps) {
|
||||
}
|
||||
|
||||
func (m *mockMetrics) RecordSyncDuration(_ context.Context, _ AgentInfo, _ time.Duration) {
|
||||
}
|
||||
|
||||
func (m *mockMetrics) RecordLoginDuration(_ context.Context, _ AgentInfo, _ time.Duration, _ bool) {
|
||||
}
|
||||
|
||||
func (m *mockMetrics) Export(w io.Writer) error {
|
||||
if m.exportData != "" {
|
||||
_, err := w.Write([]byte(m.exportData))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockMetrics) Reset() {
|
||||
}
|
||||
|
||||
func TestPush_OverrideIntervalPushes(t *testing.T) {
|
||||
var pushCount atomic.Int32
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
pushCount.Add(1)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
metrics := &mockMetrics{exportData: "test_metric 1\n"}
|
||||
configProvider := &mockConfigProvider{config: testConfig(server.URL, "1.0.0", "2.0.0", 60*time.Minute)}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{
|
||||
Interval: 50 * time.Millisecond,
|
||||
ServerAddress: parseURL(server.URL),
|
||||
}, "1.0.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
push.Start(ctx)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return pushCount.Load() >= 3
|
||||
}, 2*time.Second, 10*time.Millisecond)
|
||||
|
||||
cancel()
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestPush_RemoteConfigVersionInRange(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
metrics := &mockMetrics{exportData: "test_metric 1\n"}
|
||||
configProvider := &mockConfigProvider{config: testConfig(server.URL, "1.0.0", "2.0.0", 1*time.Minute)}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{}, "1.5.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
pushURL, interval := push.resolve(context.Background())
|
||||
assert.NotEmpty(t, pushURL)
|
||||
assert.Equal(t, 1*time.Minute, interval)
|
||||
}
|
||||
|
||||
func TestPush_RemoteConfigVersionOutOfRange(t *testing.T) {
|
||||
metrics := &mockMetrics{exportData: "test_metric 1\n"}
|
||||
configProvider := &mockConfigProvider{config: testConfig("http://localhost", "1.0.0", "1.5.0", 1*time.Minute)}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{}, "2.0.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
pushURL, interval := push.resolve(context.Background())
|
||||
assert.Empty(t, pushURL)
|
||||
assert.Equal(t, 1*time.Minute, interval)
|
||||
}
|
||||
|
||||
func TestPush_NoConfigReturnsDefault(t *testing.T) {
|
||||
metrics := &mockMetrics{}
|
||||
configProvider := &mockConfigProvider{config: nil}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{}, "1.0.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
pushURL, interval := push.resolve(context.Background())
|
||||
assert.Empty(t, pushURL)
|
||||
assert.Equal(t, defaultPushInterval, interval)
|
||||
}
|
||||
|
||||
func TestPush_OverrideIntervalRespectsVersionCheck(t *testing.T) {
|
||||
metrics := &mockMetrics{}
|
||||
configProvider := &mockConfigProvider{config: testConfig("http://localhost", "3.0.0", "4.0.0", 60*time.Minute)}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{
|
||||
Interval: 30 * time.Second,
|
||||
ServerAddress: parseURL("http://localhost"),
|
||||
}, "1.0.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
pushURL, interval := push.resolve(context.Background())
|
||||
assert.Empty(t, pushURL) // version out of range
|
||||
assert.Equal(t, 30*time.Second, interval) // but uses override interval
|
||||
}
|
||||
|
||||
func TestPush_OverrideIntervalUsedWhenVersionInRange(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
metrics := &mockMetrics{}
|
||||
configProvider := &mockConfigProvider{config: testConfig(server.URL, "1.0.0", "2.0.0", 60*time.Minute)}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{
|
||||
Interval: 30 * time.Second,
|
||||
}, "1.5.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
pushURL, interval := push.resolve(context.Background())
|
||||
assert.NotEmpty(t, pushURL)
|
||||
assert.Equal(t, 30*time.Second, interval)
|
||||
}
|
||||
|
||||
func TestPush_NoMetricsSkipsPush(t *testing.T) {
|
||||
var pushCount atomic.Int32
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
pushCount.Add(1)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
metrics := &mockMetrics{exportData: ""} // no metrics to export
|
||||
configProvider := &mockConfigProvider{config: nil}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{}, "1.0.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = push.push(context.Background(), server.URL)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int32(0), pushCount.Load())
|
||||
}
|
||||
|
||||
func TestPush_ServerURLFromRemoteConfig(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
metrics := &mockMetrics{exportData: "test_metric 1\n"}
|
||||
configProvider := &mockConfigProvider{config: testConfig(server.URL, "1.0.0", "2.0.0", 1*time.Minute)}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{}, "1.5.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
pushURL, interval := push.resolve(context.Background())
|
||||
assert.Contains(t, pushURL, server.URL)
|
||||
assert.Equal(t, 1*time.Minute, interval)
|
||||
}
|
||||
|
||||
func TestPush_ServerAddressOverridesTakePrecedenceOverRemoteConfig(t *testing.T) {
|
||||
overrideServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer overrideServer.Close()
|
||||
|
||||
metrics := &mockMetrics{exportData: "test_metric 1\n"}
|
||||
configProvider := &mockConfigProvider{config: testConfig("http://remote-config-server", "1.0.0", "2.0.0", 1*time.Minute)}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{
|
||||
ServerAddress: parseURL(overrideServer.URL),
|
||||
}, "1.5.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
pushURL, _ := push.resolve(context.Background())
|
||||
assert.Contains(t, pushURL, overrideServer.URL)
|
||||
assert.NotContains(t, pushURL, "remote-config-server")
|
||||
}
|
||||
|
||||
func TestPush_OverrideIntervalWithoutOverrideURL_UsesRemoteConfigURL(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
metrics := &mockMetrics{exportData: "test_metric 1\n"}
|
||||
configProvider := &mockConfigProvider{config: testConfig(server.URL, "1.0.0", "2.0.0", 60*time.Minute)}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{
|
||||
Interval: 30 * time.Second,
|
||||
}, "1.0.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
pushURL, interval := push.resolve(context.Background())
|
||||
assert.Contains(t, pushURL, server.URL)
|
||||
assert.Equal(t, 30*time.Second, interval)
|
||||
}
|
||||
|
||||
func TestPush_NoConfigSkipsPush(t *testing.T) {
|
||||
metrics := &mockMetrics{exportData: "test_metric 1\n"}
|
||||
configProvider := &mockConfigProvider{config: nil}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{
|
||||
Interval: 30 * time.Second,
|
||||
}, "1.0.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
pushURL, interval := push.resolve(context.Background())
|
||||
assert.Empty(t, pushURL)
|
||||
assert.Equal(t, defaultPushInterval, interval) // no config available, use default retry interval
|
||||
}
|
||||
|
||||
func TestPush_ForceSendingSkipsRemoteConfig(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
metrics := &mockMetrics{exportData: "test_metric 1\n"}
|
||||
configProvider := &mockConfigProvider{config: nil}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{
|
||||
ForceSending: true,
|
||||
Interval: 1 * time.Minute,
|
||||
ServerAddress: parseURL(server.URL),
|
||||
}, "1.0.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
pushURL, interval := push.resolve(context.Background())
|
||||
assert.NotEmpty(t, pushURL)
|
||||
assert.Equal(t, 1*time.Minute, interval)
|
||||
}
|
||||
|
||||
func TestPush_ForceSendingUsesDefaultInterval(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
metrics := &mockMetrics{exportData: "test_metric 1\n"}
|
||||
configProvider := &mockConfigProvider{config: nil}
|
||||
|
||||
push, err := NewPush(metrics, configProvider, PushConfig{
|
||||
ForceSending: true,
|
||||
ServerAddress: parseURL(server.URL),
|
||||
}, "1.0.0")
|
||||
require.NoError(t, err)
|
||||
|
||||
pushURL, interval := push.resolve(context.Background())
|
||||
assert.NotEmpty(t, pushURL)
|
||||
assert.Equal(t, defaultPushInterval, interval)
|
||||
}
|
||||
|
||||
func TestIsVersionInRange(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
current string
|
||||
since string
|
||||
until string
|
||||
expected bool
|
||||
}{
|
||||
{"at lower bound inclusive", "1.2.2", "1.2.2", "1.2.3", true},
|
||||
{"in range", "1.2.2", "1.2.0", "1.3.0", true},
|
||||
{"at upper bound exclusive", "1.2.3", "1.2.2", "1.2.3", false},
|
||||
{"below range", "1.2.1", "1.2.2", "1.2.3", false},
|
||||
{"above range", "1.3.0", "1.2.2", "1.2.3", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.expected, isVersionInRange(mustVersion(tt.current), mustVersion(tt.since), mustVersion(tt.until)))
|
||||
})
|
||||
}
|
||||
}
|
||||
149
client/internal/metrics/remoteconfig/manager.go
Normal file
149
client/internal/metrics/remoteconfig/manager.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package remoteconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
goversion "github.com/hashicorp/go-version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultMinRefreshInterval = 30 * time.Minute
|
||||
)
|
||||
|
||||
// Config holds the parsed remote push configuration
|
||||
type Config struct {
|
||||
ServerURL url.URL
|
||||
VersionSince *goversion.Version
|
||||
VersionUntil *goversion.Version
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
// rawConfig is the JSON wire format fetched from the remote server
|
||||
type rawConfig struct {
|
||||
ServerURL string `json:"server_url"`
|
||||
VersionSince string `json:"version-since"`
|
||||
VersionUntil string `json:"version-until"`
|
||||
PeriodMinutes int `json:"period_minutes"`
|
||||
}
|
||||
|
||||
// Manager handles fetching and caching remote push configuration
|
||||
type Manager struct {
|
||||
configURL string
|
||||
minRefreshInterval time.Duration
|
||||
client *http.Client
|
||||
|
||||
mu sync.Mutex
|
||||
lastConfig *Config
|
||||
lastFetched time.Time
|
||||
}
|
||||
|
||||
func NewManager(configURL string, minRefreshInterval time.Duration) *Manager {
|
||||
return &Manager{
|
||||
configURL: configURL,
|
||||
minRefreshInterval: minRefreshInterval,
|
||||
client: &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// RefreshIfNeeded fetches new config if the cached one is stale.
|
||||
// Returns the current config (possibly just fetched) or nil if unavailable.
|
||||
func (m *Manager) RefreshIfNeeded(ctx context.Context) *Config {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if m.isConfigFresh() {
|
||||
return m.lastConfig
|
||||
}
|
||||
|
||||
fetchedConfig, err := m.fetch(ctx)
|
||||
m.lastFetched = time.Now()
|
||||
if err != nil {
|
||||
log.Warnf("failed to fetch metrics remote config: %v", err)
|
||||
return m.lastConfig // return cached (may be nil)
|
||||
}
|
||||
|
||||
m.lastConfig = fetchedConfig
|
||||
|
||||
log.Tracef("fetched metrics remote config: version-since=%s version-until=%s period=%s",
|
||||
fetchedConfig.VersionSince, fetchedConfig.VersionUntil, fetchedConfig.Interval)
|
||||
|
||||
return fetchedConfig
|
||||
}
|
||||
|
||||
func (m *Manager) isConfigFresh() bool {
|
||||
if m.lastConfig == nil {
|
||||
return false
|
||||
}
|
||||
return time.Since(m.lastFetched) < m.minRefreshInterval
|
||||
}
|
||||
|
||||
func (m *Manager) fetch(ctx context.Context) (*Config, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, m.configURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := m.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("send request: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if resp.Body != nil {
|
||||
_ = resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(io.LimitReader(resp.Body, 4096))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read body: %w", err)
|
||||
}
|
||||
|
||||
var raw rawConfig
|
||||
if err := json.Unmarshal(body, &raw); err != nil {
|
||||
return nil, fmt.Errorf("parse config: %w", err)
|
||||
}
|
||||
|
||||
if raw.PeriodMinutes <= 0 {
|
||||
return nil, fmt.Errorf("invalid period_minutes: %d", raw.PeriodMinutes)
|
||||
}
|
||||
|
||||
if raw.ServerURL == "" {
|
||||
return nil, fmt.Errorf("server_url is required")
|
||||
}
|
||||
|
||||
serverURL, err := url.Parse(raw.ServerURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse server_url %q: %w", raw.ServerURL, err)
|
||||
}
|
||||
|
||||
since, err := goversion.NewVersion(raw.VersionSince)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse version-since %q: %w", raw.VersionSince, err)
|
||||
}
|
||||
|
||||
until, err := goversion.NewVersion(raw.VersionUntil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse version-until %q: %w", raw.VersionUntil, err)
|
||||
}
|
||||
|
||||
return &Config{
|
||||
ServerURL: *serverURL,
|
||||
VersionSince: since,
|
||||
VersionUntil: until,
|
||||
Interval: time.Duration(raw.PeriodMinutes) * time.Minute,
|
||||
}, nil
|
||||
}
|
||||
197
client/internal/metrics/remoteconfig/manager_test.go
Normal file
197
client/internal/metrics/remoteconfig/manager_test.go
Normal file
@@ -0,0 +1,197 @@
|
||||
package remoteconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const testMinRefresh = 100 * time.Millisecond
|
||||
|
||||
func TestManager_FetchSuccess(t *testing.T) {
|
||||
server := newConfigServer(t, rawConfig{
|
||||
ServerURL: "https://ingest.example.com",
|
||||
VersionSince: "1.0.0",
|
||||
VersionUntil: "2.0.0",
|
||||
PeriodMinutes: 60,
|
||||
})
|
||||
defer server.Close()
|
||||
|
||||
mgr := NewManager(server.URL, testMinRefresh)
|
||||
config := mgr.RefreshIfNeeded(context.Background())
|
||||
|
||||
require.NotNil(t, config)
|
||||
assert.Equal(t, "https://ingest.example.com", config.ServerURL.String())
|
||||
assert.Equal(t, "1.0.0", config.VersionSince.String())
|
||||
assert.Equal(t, "2.0.0", config.VersionUntil.String())
|
||||
assert.Equal(t, 60*time.Minute, config.Interval)
|
||||
}
|
||||
|
||||
func TestManager_CachesConfig(t *testing.T) {
|
||||
var fetchCount atomic.Int32
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fetchCount.Add(1)
|
||||
err := json.NewEncoder(w).Encode(rawConfig{
|
||||
ServerURL: "https://ingest.example.com",
|
||||
VersionSince: "1.0.0",
|
||||
VersionUntil: "2.0.0",
|
||||
PeriodMinutes: 60,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
mgr := NewManager(server.URL, testMinRefresh)
|
||||
|
||||
// First call fetches
|
||||
config1 := mgr.RefreshIfNeeded(context.Background())
|
||||
require.NotNil(t, config1)
|
||||
assert.Equal(t, int32(1), fetchCount.Load())
|
||||
|
||||
// Second call uses cache (within minRefreshInterval)
|
||||
config2 := mgr.RefreshIfNeeded(context.Background())
|
||||
require.NotNil(t, config2)
|
||||
assert.Equal(t, int32(1), fetchCount.Load())
|
||||
assert.Equal(t, config1, config2)
|
||||
}
|
||||
|
||||
func TestManager_RefetchesWhenStale(t *testing.T) {
|
||||
var fetchCount atomic.Int32
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fetchCount.Add(1)
|
||||
err := json.NewEncoder(w).Encode(rawConfig{
|
||||
ServerURL: "https://ingest.example.com",
|
||||
VersionSince: "1.0.0",
|
||||
VersionUntil: "2.0.0",
|
||||
PeriodMinutes: 60,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
mgr := NewManager(server.URL, testMinRefresh)
|
||||
|
||||
// First fetch
|
||||
mgr.RefreshIfNeeded(context.Background())
|
||||
assert.Equal(t, int32(1), fetchCount.Load())
|
||||
|
||||
// Wait for config to become stale
|
||||
time.Sleep(testMinRefresh + 10*time.Millisecond)
|
||||
|
||||
// Should refetch
|
||||
mgr.RefreshIfNeeded(context.Background())
|
||||
assert.Equal(t, int32(2), fetchCount.Load())
|
||||
}
|
||||
|
||||
func TestManager_FetchFailureReturnsNil(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
mgr := NewManager(server.URL, testMinRefresh)
|
||||
config := mgr.RefreshIfNeeded(context.Background())
|
||||
|
||||
assert.Nil(t, config)
|
||||
}
|
||||
|
||||
func TestManager_FetchFailureReturnsCached(t *testing.T) {
|
||||
var fetchCount atomic.Int32
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fetchCount.Add(1)
|
||||
if fetchCount.Load() > 1 {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(rawConfig{
|
||||
ServerURL: "https://ingest.example.com",
|
||||
VersionSince: "1.0.0",
|
||||
VersionUntil: "2.0.0",
|
||||
PeriodMinutes: 60,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
mgr := NewManager(server.URL, testMinRefresh)
|
||||
|
||||
// First call succeeds
|
||||
config1 := mgr.RefreshIfNeeded(context.Background())
|
||||
require.NotNil(t, config1)
|
||||
|
||||
// Wait for config to become stale
|
||||
time.Sleep(testMinRefresh + 10*time.Millisecond)
|
||||
|
||||
// Second call fails but returns cached
|
||||
config2 := mgr.RefreshIfNeeded(context.Background())
|
||||
require.NotNil(t, config2)
|
||||
assert.Equal(t, config1, config2)
|
||||
}
|
||||
|
||||
func TestManager_RejectsInvalidPeriod(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
period int
|
||||
}{
|
||||
{"zero", 0},
|
||||
{"negative", -5},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
server := newConfigServer(t, rawConfig{
|
||||
ServerURL: "https://ingest.example.com",
|
||||
VersionSince: "1.0.0",
|
||||
VersionUntil: "2.0.0",
|
||||
PeriodMinutes: tt.period,
|
||||
})
|
||||
defer server.Close()
|
||||
|
||||
mgr := NewManager(server.URL, testMinRefresh)
|
||||
config := mgr.RefreshIfNeeded(context.Background())
|
||||
assert.Nil(t, config)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestManager_RejectsEmptyServerURL(t *testing.T) {
|
||||
server := newConfigServer(t, rawConfig{
|
||||
ServerURL: "",
|
||||
VersionSince: "1.0.0",
|
||||
VersionUntil: "2.0.0",
|
||||
PeriodMinutes: 60,
|
||||
})
|
||||
defer server.Close()
|
||||
|
||||
mgr := NewManager(server.URL, testMinRefresh)
|
||||
config := mgr.RefreshIfNeeded(context.Background())
|
||||
assert.Nil(t, config)
|
||||
}
|
||||
|
||||
func TestManager_RejectsInvalidJSON(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("not json"))
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
mgr := NewManager(server.URL, testMinRefresh)
|
||||
config := mgr.RefreshIfNeeded(context.Background())
|
||||
assert.Nil(t, config)
|
||||
}
|
||||
|
||||
func newConfigServer(t *testing.T, config rawConfig) *httptest.Server {
|
||||
t.Helper()
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
err := json.NewEncoder(w).Encode(config)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
}
|
||||
@@ -22,51 +22,56 @@ func prepareFd() (int, error) {
|
||||
|
||||
func routeCheck(ctx context.Context, fd int, nexthopv4, nexthopv6 systemops.Nexthop) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
buf := make([]byte, 2048)
|
||||
n, err := unix.Read(fd, buf)
|
||||
// Wait until fd is readable or context is cancelled, to avoid a busy-loop
|
||||
// when the routing socket returns EAGAIN (e.g. immediately after wakeup).
|
||||
if err := waitReadable(ctx, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := make([]byte, 2048)
|
||||
n, err := unix.Read(fd, buf)
|
||||
if err != nil {
|
||||
if errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EINTR) {
|
||||
continue
|
||||
}
|
||||
if errors.Is(err, unix.EBADF) || errors.Is(err, unix.EINVAL) {
|
||||
return fmt.Errorf("routing socket closed: %w", err)
|
||||
}
|
||||
return fmt.Errorf("read routing socket: %w", err)
|
||||
}
|
||||
|
||||
if n < unix.SizeofRtMsghdr {
|
||||
log.Debugf("Network monitor: read from routing socket returned less than expected: %d bytes", n)
|
||||
continue
|
||||
}
|
||||
|
||||
msg := (*unix.RtMsghdr)(unsafe.Pointer(&buf[0]))
|
||||
|
||||
switch msg.Type {
|
||||
// handle route changes
|
||||
case unix.RTM_ADD, syscall.RTM_DELETE:
|
||||
route, err := parseRouteMessage(buf[:n])
|
||||
if err != nil {
|
||||
if !errors.Is(err, unix.EBADF) && !errors.Is(err, unix.EINVAL) {
|
||||
log.Warnf("Network monitor: failed to read from routing socket: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if n < unix.SizeofRtMsghdr {
|
||||
log.Debugf("Network monitor: read from routing socket returned less than expected: %d bytes", n)
|
||||
log.Debugf("Network monitor: error parsing routing message: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
msg := (*unix.RtMsghdr)(unsafe.Pointer(&buf[0]))
|
||||
if route.Dst.Bits() != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
intf := "<nil>"
|
||||
if route.Interface != nil {
|
||||
intf = route.Interface.Name
|
||||
}
|
||||
switch msg.Type {
|
||||
// handle route changes
|
||||
case unix.RTM_ADD, syscall.RTM_DELETE:
|
||||
route, err := parseRouteMessage(buf[:n])
|
||||
if err != nil {
|
||||
log.Debugf("Network monitor: error parsing routing message: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if route.Dst.Bits() != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
intf := "<nil>"
|
||||
if route.Interface != nil {
|
||||
intf = route.Interface.Name
|
||||
}
|
||||
switch msg.Type {
|
||||
case unix.RTM_ADD:
|
||||
log.Infof("Network monitor: default route changed: via %s, interface %s", route.Gw, intf)
|
||||
case unix.RTM_ADD:
|
||||
log.Infof("Network monitor: default route changed: via %s, interface %s", route.Gw, intf)
|
||||
return nil
|
||||
case unix.RTM_DELETE:
|
||||
if nexthopv4.Intf != nil && route.Gw.Compare(nexthopv4.IP) == 0 || nexthopv6.Intf != nil && route.Gw.Compare(nexthopv6.IP) == 0 {
|
||||
log.Infof("Network monitor: default route removed: via %s, interface %s", route.Gw, intf)
|
||||
return nil
|
||||
case unix.RTM_DELETE:
|
||||
if nexthopv4.Intf != nil && route.Gw.Compare(nexthopv4.IP) == 0 || nexthopv6.Intf != nil && route.Gw.Compare(nexthopv6.IP) == 0 {
|
||||
log.Infof("Network monitor: default route removed: via %s, interface %s", route.Gw, intf)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -90,3 +95,33 @@ func parseRouteMessage(buf []byte) (*systemops.Route, error) {
|
||||
|
||||
return systemops.MsgToRoute(msg)
|
||||
}
|
||||
|
||||
// waitReadable blocks until fd has data to read, or ctx is cancelled.
|
||||
func waitReadable(ctx context.Context, fd int) error {
|
||||
var fdset unix.FdSet
|
||||
if fd < 0 || fd/unix.NFDBITS >= len(fdset.Bits) {
|
||||
return fmt.Errorf("fd %d out of range for FdSet", fd)
|
||||
}
|
||||
|
||||
for {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fdset = unix.FdSet{}
|
||||
fdset.Set(fd)
|
||||
// Use a 1-second timeout so we can re-check ctx periodically.
|
||||
tv := unix.Timeval{Sec: 1}
|
||||
n, err := unix.Select(fd+1, &fdset, nil, nil, &tv)
|
||||
if err != nil {
|
||||
if errors.Is(err, unix.EINTR) {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("select on routing socket: %w", err)
|
||||
}
|
||||
if n > 0 {
|
||||
return nil
|
||||
}
|
||||
// timeout — loop back and re-check ctx
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package peer
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/netip"
|
||||
"runtime"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
|
||||
"github.com/netbirdio/netbird/client/iface/configurer"
|
||||
"github.com/netbirdio/netbird/client/iface/wgproxy"
|
||||
"github.com/netbirdio/netbird/client/internal/metrics"
|
||||
"github.com/netbirdio/netbird/client/internal/peer/conntype"
|
||||
"github.com/netbirdio/netbird/client/internal/peer/dispatcher"
|
||||
"github.com/netbirdio/netbird/client/internal/peer/guard"
|
||||
@@ -25,17 +25,27 @@ import (
|
||||
"github.com/netbirdio/netbird/client/internal/stdnet"
|
||||
"github.com/netbirdio/netbird/route"
|
||||
relayClient "github.com/netbirdio/netbird/shared/relay/client"
|
||||
semaphoregroup "github.com/netbirdio/netbird/util/semaphore-group"
|
||||
)
|
||||
|
||||
// MetricsRecorder is an interface for recording peer connection metrics
|
||||
type MetricsRecorder interface {
|
||||
RecordConnectionStages(
|
||||
ctx context.Context,
|
||||
remotePubKey string,
|
||||
connectionType metrics.ConnectionType,
|
||||
isReconnection bool,
|
||||
timestamps metrics.ConnectionStageTimestamps,
|
||||
)
|
||||
}
|
||||
|
||||
type ServiceDependencies struct {
|
||||
StatusRecorder *Status
|
||||
Signaler *Signaler
|
||||
IFaceDiscover stdnet.ExternalIFaceDiscover
|
||||
RelayManager *relayClient.Manager
|
||||
SrWatcher *guard.SRWatcher
|
||||
Semaphore *semaphoregroup.SemaphoreGroup
|
||||
PeerConnDispatcher *dispatcher.ConnectionDispatcher
|
||||
MetricsRecorder MetricsRecorder
|
||||
}
|
||||
|
||||
type WgConfig struct {
|
||||
@@ -111,14 +121,17 @@ type Conn struct {
|
||||
wgProxyRelay wgproxy.Proxy
|
||||
handshaker *Handshaker
|
||||
|
||||
guard *guard.Guard
|
||||
semaphore *semaphoregroup.SemaphoreGroup
|
||||
wg sync.WaitGroup
|
||||
guard *guard.Guard
|
||||
wg sync.WaitGroup
|
||||
|
||||
// debug purpose
|
||||
dumpState *stateDump
|
||||
|
||||
endpointUpdater *EndpointUpdater
|
||||
|
||||
// Connection stage timestamps for metrics
|
||||
metricsRecorder MetricsRecorder
|
||||
metricsStages *MetricsStages
|
||||
}
|
||||
|
||||
// NewConn creates a new not opened Conn to the remote peer.
|
||||
@@ -139,12 +152,12 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) {
|
||||
iFaceDiscover: services.IFaceDiscover,
|
||||
relayManager: services.RelayManager,
|
||||
srWatcher: services.SrWatcher,
|
||||
semaphore: services.Semaphore,
|
||||
statusRelay: worker.NewAtomicStatus(),
|
||||
statusICE: worker.NewAtomicStatus(),
|
||||
dumpState: dumpState,
|
||||
endpointUpdater: NewEndpointUpdater(connLog, config.WgConfig, isController(config)),
|
||||
wgWatcher: NewWGWatcher(connLog, config.WgConfig.WgInterface, config.Key, dumpState),
|
||||
metricsRecorder: services.MetricsRecorder,
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
@@ -154,18 +167,16 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) {
|
||||
// It will try to establish a connection using ICE and in parallel with relay. The higher priority connection type will
|
||||
// be used.
|
||||
func (conn *Conn) Open(engineCtx context.Context) error {
|
||||
if err := conn.semaphore.Add(engineCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conn.mu.Lock()
|
||||
defer conn.mu.Unlock()
|
||||
|
||||
if conn.opened {
|
||||
conn.semaphore.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Allocate new metrics stages so old goroutines don't corrupt new state
|
||||
conn.metricsStages = &MetricsStages{}
|
||||
|
||||
conn.ctx, conn.ctxCancel = context.WithCancel(engineCtx)
|
||||
|
||||
conn.workerRelay = NewWorkerRelay(conn.ctx, conn.Log, isController(conn.config), conn.config, conn, conn.relayManager)
|
||||
@@ -173,12 +184,11 @@ func (conn *Conn) Open(engineCtx context.Context) error {
|
||||
relayIsSupportedLocally := conn.workerRelay.RelayIsSupportedLocally()
|
||||
workerICE, err := NewWorkerICE(conn.ctx, conn.Log, conn.config, conn, conn.signaler, conn.iFaceDiscover, conn.statusRecorder, relayIsSupportedLocally)
|
||||
if err != nil {
|
||||
conn.semaphore.Done()
|
||||
return err
|
||||
}
|
||||
conn.workerICE = workerICE
|
||||
|
||||
conn.handshaker = NewHandshaker(conn.Log, conn.config, conn.signaler, conn.workerICE, conn.workerRelay)
|
||||
conn.handshaker = NewHandshaker(conn.Log, conn.config, conn.signaler, conn.workerICE, conn.workerRelay, conn.metricsStages)
|
||||
|
||||
conn.handshaker.AddRelayListener(conn.workerRelay.OnNewOffer)
|
||||
if !isForceRelayed() {
|
||||
@@ -207,10 +217,6 @@ func (conn *Conn) Open(engineCtx context.Context) error {
|
||||
conn.wg.Add(1)
|
||||
go func() {
|
||||
defer conn.wg.Done()
|
||||
|
||||
conn.waitInitialRandomSleepTime(conn.ctx)
|
||||
conn.semaphore.Done()
|
||||
|
||||
conn.guard.Start(conn.ctx, conn.onGuardEvent)
|
||||
}()
|
||||
conn.opened = true
|
||||
@@ -350,7 +356,7 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn
|
||||
if conn.currentConnPriority > priority {
|
||||
conn.Log.Infof("current connection priority (%s) is higher than the new one (%s), do not upgrade connection", conn.currentConnPriority, priority)
|
||||
conn.statusICE.SetConnected()
|
||||
conn.updateIceState(iceConnInfo)
|
||||
conn.updateIceState(iceConnInfo, time.Now())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -390,7 +396,8 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn
|
||||
}
|
||||
|
||||
conn.Log.Infof("configure WireGuard endpoint to: %s", ep.String())
|
||||
conn.enableWgWatcherIfNeeded()
|
||||
updateTime := time.Now()
|
||||
conn.enableWgWatcherIfNeeded(updateTime)
|
||||
|
||||
presharedKey := conn.presharedKey(iceConnInfo.RosenpassPubKey)
|
||||
if err = conn.endpointUpdater.ConfigureWGEndpoint(ep, presharedKey); err != nil {
|
||||
@@ -406,8 +413,8 @@ func (conn *Conn) onICEConnectionIsReady(priority conntype.ConnPriority, iceConn
|
||||
|
||||
conn.currentConnPriority = priority
|
||||
conn.statusICE.SetConnected()
|
||||
conn.updateIceState(iceConnInfo)
|
||||
conn.doOnConnected(iceConnInfo.RosenpassPubKey, iceConnInfo.RosenpassAddr)
|
||||
conn.updateIceState(iceConnInfo, updateTime)
|
||||
conn.doOnConnected(iceConnInfo.RosenpassPubKey, iceConnInfo.RosenpassAddr, updateTime)
|
||||
}
|
||||
|
||||
func (conn *Conn) onICEStateDisconnected(sessionChanged bool) {
|
||||
@@ -459,6 +466,10 @@ func (conn *Conn) onICEStateDisconnected(sessionChanged bool) {
|
||||
|
||||
conn.disableWgWatcherIfNeeded()
|
||||
|
||||
if conn.currentConnPriority == conntype.None {
|
||||
conn.metricsStages.Disconnected()
|
||||
}
|
||||
|
||||
peerState := State{
|
||||
PubKey: conn.config.Key,
|
||||
ConnStatus: conn.evalStatus(),
|
||||
@@ -499,7 +510,7 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) {
|
||||
conn.Log.Debugf("do not switch to relay because current priority is: %s", conn.currentConnPriority.String())
|
||||
conn.setRelayedProxy(wgProxy)
|
||||
conn.statusRelay.SetConnected()
|
||||
conn.updateRelayStatus(rci.relayedConn.RemoteAddr().String(), rci.rosenpassPubKey)
|
||||
conn.updateRelayStatus(rci.relayedConn.RemoteAddr().String(), rci.rosenpassPubKey, time.Now())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -508,7 +519,8 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) {
|
||||
if controller {
|
||||
wgProxy.Work()
|
||||
}
|
||||
conn.enableWgWatcherIfNeeded()
|
||||
updateTime := time.Now()
|
||||
conn.enableWgWatcherIfNeeded(updateTime)
|
||||
if err := conn.endpointUpdater.ConfigureWGEndpoint(wgProxy.EndpointAddr(), conn.presharedKey(rci.rosenpassPubKey)); err != nil {
|
||||
if err := wgProxy.CloseConn(); err != nil {
|
||||
conn.Log.Warnf("Failed to close relay connection: %v", err)
|
||||
@@ -519,13 +531,16 @@ func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) {
|
||||
if !controller {
|
||||
wgProxy.Work()
|
||||
}
|
||||
|
||||
wgConfigWorkaround()
|
||||
|
||||
conn.rosenpassRemoteKey = rci.rosenpassPubKey
|
||||
conn.currentConnPriority = conntype.Relay
|
||||
conn.statusRelay.SetConnected()
|
||||
conn.setRelayedProxy(wgProxy)
|
||||
conn.updateRelayStatus(rci.relayedConn.RemoteAddr().String(), rci.rosenpassPubKey)
|
||||
conn.updateRelayStatus(rci.relayedConn.RemoteAddr().String(), rci.rosenpassPubKey, updateTime)
|
||||
conn.Log.Infof("start to communicate with peer via relay")
|
||||
conn.doOnConnected(rci.rosenpassPubKey, rci.rosenpassAddr)
|
||||
conn.doOnConnected(rci.rosenpassPubKey, rci.rosenpassAddr, updateTime)
|
||||
}
|
||||
|
||||
func (conn *Conn) onRelayDisconnected() {
|
||||
@@ -563,6 +578,10 @@ func (conn *Conn) handleRelayDisconnectedLocked() {
|
||||
|
||||
conn.disableWgWatcherIfNeeded()
|
||||
|
||||
if conn.currentConnPriority == conntype.None {
|
||||
conn.metricsStages.Disconnected()
|
||||
}
|
||||
|
||||
peerState := State{
|
||||
PubKey: conn.config.Key,
|
||||
ConnStatus: conn.evalStatus(),
|
||||
@@ -603,10 +622,10 @@ func (conn *Conn) onWGDisconnected() {
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *Conn) updateRelayStatus(relayServerAddr string, rosenpassPubKey []byte) {
|
||||
func (conn *Conn) updateRelayStatus(relayServerAddr string, rosenpassPubKey []byte, updateTime time.Time) {
|
||||
peerState := State{
|
||||
PubKey: conn.config.Key,
|
||||
ConnStatusUpdate: time.Now(),
|
||||
ConnStatusUpdate: updateTime,
|
||||
ConnStatus: conn.evalStatus(),
|
||||
Relayed: conn.isRelayed(),
|
||||
RelayServerAddress: relayServerAddr,
|
||||
@@ -619,10 +638,10 @@ func (conn *Conn) updateRelayStatus(relayServerAddr string, rosenpassPubKey []by
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *Conn) updateIceState(iceConnInfo ICEConnInfo) {
|
||||
func (conn *Conn) updateIceState(iceConnInfo ICEConnInfo, updateTime time.Time) {
|
||||
peerState := State{
|
||||
PubKey: conn.config.Key,
|
||||
ConnStatusUpdate: time.Now(),
|
||||
ConnStatusUpdate: updateTime,
|
||||
ConnStatus: conn.evalStatus(),
|
||||
Relayed: iceConnInfo.Relayed,
|
||||
LocalIceCandidateType: iceConnInfo.LocalIceCandidateType,
|
||||
@@ -660,29 +679,18 @@ func (conn *Conn) setStatusToDisconnected() {
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *Conn) doOnConnected(remoteRosenpassPubKey []byte, remoteRosenpassAddr string) {
|
||||
func (conn *Conn) doOnConnected(remoteRosenpassPubKey []byte, remoteRosenpassAddr string, updateTime time.Time) {
|
||||
if runtime.GOOS == "ios" {
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
conn.metricsStages.RecordConnectionReady(updateTime)
|
||||
|
||||
if conn.onConnected != nil {
|
||||
conn.onConnected(conn.config.Key, remoteRosenpassPubKey, conn.config.WgConfig.AllowedIps[0].Addr().String(), remoteRosenpassAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *Conn) waitInitialRandomSleepTime(ctx context.Context) {
|
||||
maxWait := 300
|
||||
duration := time.Duration(rand.Intn(maxWait)) * time.Millisecond
|
||||
|
||||
timeout := time.NewTimer(duration)
|
||||
defer timeout.Stop()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-timeout.C:
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *Conn) isRelayed() bool {
|
||||
switch conn.currentConnPriority {
|
||||
case conntype.Relay, conntype.ICETurn:
|
||||
@@ -729,14 +737,14 @@ func (conn *Conn) isConnectedOnAllWay() (connected bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
func (conn *Conn) enableWgWatcherIfNeeded() {
|
||||
func (conn *Conn) enableWgWatcherIfNeeded(enabledTime time.Time) {
|
||||
if !conn.wgWatcher.IsEnabled() {
|
||||
wgWatcherCtx, wgWatcherCancel := context.WithCancel(conn.ctx)
|
||||
conn.wgWatcherCancel = wgWatcherCancel
|
||||
conn.wgWatcherWg.Add(1)
|
||||
go func() {
|
||||
defer conn.wgWatcherWg.Done()
|
||||
conn.wgWatcher.EnableWgWatcher(wgWatcherCtx, conn.onWGDisconnected)
|
||||
conn.wgWatcher.EnableWgWatcher(wgWatcherCtx, enabledTime, conn.onWGDisconnected, conn.onWGHandshakeSuccess)
|
||||
}()
|
||||
}
|
||||
}
|
||||
@@ -811,6 +819,41 @@ func (conn *Conn) setRelayedProxy(proxy wgproxy.Proxy) {
|
||||
conn.wgProxyRelay = proxy
|
||||
}
|
||||
|
||||
// onWGHandshakeSuccess is called when the first WireGuard handshake is detected
|
||||
func (conn *Conn) onWGHandshakeSuccess(when time.Time) {
|
||||
conn.metricsStages.RecordWGHandshakeSuccess(when)
|
||||
conn.recordConnectionMetrics()
|
||||
}
|
||||
|
||||
// recordConnectionMetrics records connection stage timestamps as metrics
|
||||
func (conn *Conn) recordConnectionMetrics() {
|
||||
if conn.metricsRecorder == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Determine connection type based on current priority
|
||||
conn.mu.Lock()
|
||||
priority := conn.currentConnPriority
|
||||
conn.mu.Unlock()
|
||||
|
||||
var connType metrics.ConnectionType
|
||||
switch priority {
|
||||
case conntype.Relay:
|
||||
connType = metrics.ConnectionTypeRelay
|
||||
default:
|
||||
connType = metrics.ConnectionTypeICE
|
||||
}
|
||||
|
||||
// Record metrics with timestamps - duration calculation happens in metrics package
|
||||
conn.metricsRecorder.RecordConnectionStages(
|
||||
context.Background(),
|
||||
conn.config.Key,
|
||||
connType,
|
||||
conn.metricsStages.IsReconnection(),
|
||||
conn.metricsStages.GetTimestamps(),
|
||||
)
|
||||
}
|
||||
|
||||
// AllowedIP returns the allowed IP of the remote peer
|
||||
func (conn *Conn) AllowedIP() netip.Addr {
|
||||
return conn.config.WgConfig.AllowedIps[0].Addr()
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/netbirdio/netbird/client/internal/peer/ice"
|
||||
"github.com/netbirdio/netbird/client/internal/stdnet"
|
||||
"github.com/netbirdio/netbird/util"
|
||||
semaphoregroup "github.com/netbirdio/netbird/util/semaphore-group"
|
||||
)
|
||||
|
||||
var testDispatcher = dispatcher.NewConnectionDispatcher()
|
||||
@@ -53,7 +52,6 @@ func TestConn_GetKey(t *testing.T) {
|
||||
|
||||
sd := ServiceDependencies{
|
||||
SrWatcher: swWatcher,
|
||||
Semaphore: semaphoregroup.NewSemaphoreGroup(1),
|
||||
PeerConnDispatcher: testDispatcher,
|
||||
}
|
||||
conn, err := NewConn(connConf, sd)
|
||||
@@ -71,7 +69,6 @@ func TestConn_OnRemoteOffer(t *testing.T) {
|
||||
sd := ServiceDependencies{
|
||||
StatusRecorder: NewRecorder("https://mgm"),
|
||||
SrWatcher: swWatcher,
|
||||
Semaphore: semaphoregroup.NewSemaphoreGroup(1),
|
||||
PeerConnDispatcher: testDispatcher,
|
||||
}
|
||||
conn, err := NewConn(connConf, sd)
|
||||
@@ -110,7 +107,6 @@ func TestConn_OnRemoteAnswer(t *testing.T) {
|
||||
sd := ServiceDependencies{
|
||||
StatusRecorder: NewRecorder("https://mgm"),
|
||||
SrWatcher: swWatcher,
|
||||
Semaphore: semaphoregroup.NewSemaphoreGroup(1),
|
||||
PeerConnDispatcher: testDispatcher,
|
||||
}
|
||||
conn, err := NewConn(connConf, sd)
|
||||
|
||||
@@ -44,12 +44,13 @@ type OfferAnswer struct {
|
||||
}
|
||||
|
||||
type Handshaker struct {
|
||||
mu sync.Mutex
|
||||
log *log.Entry
|
||||
config ConnConfig
|
||||
signaler *Signaler
|
||||
ice *WorkerICE
|
||||
relay *WorkerRelay
|
||||
mu sync.Mutex
|
||||
log *log.Entry
|
||||
config ConnConfig
|
||||
signaler *Signaler
|
||||
ice *WorkerICE
|
||||
relay *WorkerRelay
|
||||
metricsStages *MetricsStages
|
||||
// relayListener is not blocking because the listener is using a goroutine to process the messages
|
||||
// and it will only keep the latest message if multiple offers are received in a short time
|
||||
// this is to avoid blocking the handshaker if the listener is doing some heavy processing
|
||||
@@ -64,13 +65,14 @@ type Handshaker struct {
|
||||
remoteAnswerCh chan OfferAnswer
|
||||
}
|
||||
|
||||
func NewHandshaker(log *log.Entry, config ConnConfig, signaler *Signaler, ice *WorkerICE, relay *WorkerRelay) *Handshaker {
|
||||
func NewHandshaker(log *log.Entry, config ConnConfig, signaler *Signaler, ice *WorkerICE, relay *WorkerRelay, metricsStages *MetricsStages) *Handshaker {
|
||||
return &Handshaker{
|
||||
log: log,
|
||||
config: config,
|
||||
signaler: signaler,
|
||||
ice: ice,
|
||||
relay: relay,
|
||||
metricsStages: metricsStages,
|
||||
remoteOffersCh: make(chan OfferAnswer),
|
||||
remoteAnswerCh: make(chan OfferAnswer),
|
||||
}
|
||||
@@ -89,6 +91,12 @@ func (h *Handshaker) Listen(ctx context.Context) {
|
||||
select {
|
||||
case remoteOfferAnswer := <-h.remoteOffersCh:
|
||||
h.log.Infof("received offer, running version %s, remote WireGuard listen port %d, session id: %s", remoteOfferAnswer.Version, remoteOfferAnswer.WgListenPort, remoteOfferAnswer.SessionIDString())
|
||||
|
||||
// Record signaling received for reconnection attempts
|
||||
if h.metricsStages != nil {
|
||||
h.metricsStages.RecordSignalingReceived()
|
||||
}
|
||||
|
||||
if h.relayListener != nil {
|
||||
h.relayListener.Notify(&remoteOfferAnswer)
|
||||
}
|
||||
@@ -103,6 +111,12 @@ func (h *Handshaker) Listen(ctx context.Context) {
|
||||
}
|
||||
case remoteOfferAnswer := <-h.remoteAnswerCh:
|
||||
h.log.Infof("received answer, running version %s, remote WireGuard listen port %d, session id: %s", remoteOfferAnswer.Version, remoteOfferAnswer.WgListenPort, remoteOfferAnswer.SessionIDString())
|
||||
|
||||
// Record signaling received for reconnection attempts
|
||||
if h.metricsStages != nil {
|
||||
h.metricsStages.RecordSignalingReceived()
|
||||
}
|
||||
|
||||
if h.relayListener != nil {
|
||||
h.relayListener.Notify(&remoteOfferAnswer)
|
||||
}
|
||||
|
||||
73
client/internal/peer/metrics_saver.go
Normal file
73
client/internal/peer/metrics_saver.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package peer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/metrics"
|
||||
)
|
||||
|
||||
type MetricsStages struct {
|
||||
isReconnectionAttempt bool // Track if current attempt is a reconnection
|
||||
stageTimestamps metrics.ConnectionStageTimestamps
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// RecordSignalingReceived records when the first signal is received from the remote peer.
|
||||
// Used as the base for all subsequent stage durations to avoid inflating metrics when
|
||||
// the remote peer was offline.
|
||||
func (s *MetricsStages) RecordSignalingReceived() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.stageTimestamps.SignalingReceived.IsZero() {
|
||||
s.stageTimestamps.SignalingReceived = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MetricsStages) RecordConnectionReady(when time.Time) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.stageTimestamps.ConnectionReady.IsZero() {
|
||||
s.stageTimestamps.ConnectionReady = when
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MetricsStages) RecordWGHandshakeSuccess(handshakeTime time.Time) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if !s.stageTimestamps.ConnectionReady.IsZero() && s.stageTimestamps.WgHandshakeSuccess.IsZero() {
|
||||
// WireGuard only reports handshake times with second precision, but ConnectionReady
|
||||
// is captured with microsecond precision. If handshake appears before ConnectionReady
|
||||
// due to truncation (e.g., handshake at 6.042s truncated to 6.000s), normalize to
|
||||
// ConnectionReady to avoid negative duration metrics.
|
||||
if handshakeTime.Before(s.stageTimestamps.ConnectionReady) {
|
||||
s.stageTimestamps.WgHandshakeSuccess = s.stageTimestamps.ConnectionReady
|
||||
} else {
|
||||
s.stageTimestamps.WgHandshakeSuccess = handshakeTime
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Disconnected sets the mode to reconnection. It is called only when both ICE and Relay have been disconnected at the same time.
|
||||
func (s *MetricsStages) Disconnected() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Reset all timestamps for reconnection
|
||||
s.stageTimestamps = metrics.ConnectionStageTimestamps{}
|
||||
s.isReconnectionAttempt = true
|
||||
}
|
||||
|
||||
func (s *MetricsStages) IsReconnection() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.isReconnectionAttempt
|
||||
}
|
||||
|
||||
func (s *MetricsStages) GetTimestamps() metrics.ConnectionStageTimestamps {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.stageTimestamps
|
||||
}
|
||||
125
client/internal/peer/metrics_saver_test.go
Normal file
125
client/internal/peer/metrics_saver_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package peer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/metrics"
|
||||
)
|
||||
|
||||
func TestMetricsStages_RecordSignalingReceived(t *testing.T) {
|
||||
s := &MetricsStages{}
|
||||
|
||||
s.RecordSignalingReceived()
|
||||
ts := s.GetTimestamps()
|
||||
require.False(t, ts.SignalingReceived.IsZero())
|
||||
|
||||
// Second call should not overwrite
|
||||
first := ts.SignalingReceived
|
||||
time.Sleep(time.Millisecond)
|
||||
s.RecordSignalingReceived()
|
||||
ts = s.GetTimestamps()
|
||||
assert.Equal(t, first, ts.SignalingReceived, "should keep the first signaling timestamp")
|
||||
}
|
||||
|
||||
func TestMetricsStages_RecordConnectionReady(t *testing.T) {
|
||||
s := &MetricsStages{}
|
||||
|
||||
now := time.Now()
|
||||
s.RecordConnectionReady(now)
|
||||
ts := s.GetTimestamps()
|
||||
assert.Equal(t, now, ts.ConnectionReady)
|
||||
|
||||
// Second call should not overwrite
|
||||
later := now.Add(time.Second)
|
||||
s.RecordConnectionReady(later)
|
||||
ts = s.GetTimestamps()
|
||||
assert.Equal(t, now, ts.ConnectionReady, "should keep the first connection ready timestamp")
|
||||
}
|
||||
|
||||
func TestMetricsStages_RecordWGHandshakeSuccess(t *testing.T) {
|
||||
s := &MetricsStages{}
|
||||
|
||||
connReady := time.Now()
|
||||
s.RecordConnectionReady(connReady)
|
||||
|
||||
handshake := connReady.Add(500 * time.Millisecond)
|
||||
s.RecordWGHandshakeSuccess(handshake)
|
||||
|
||||
ts := s.GetTimestamps()
|
||||
assert.Equal(t, handshake, ts.WgHandshakeSuccess)
|
||||
}
|
||||
|
||||
func TestMetricsStages_HandshakeBeforeConnectionReady_Normalizes(t *testing.T) {
|
||||
s := &MetricsStages{}
|
||||
|
||||
connReady := time.Now()
|
||||
s.RecordConnectionReady(connReady)
|
||||
|
||||
// WG handshake appears before ConnectionReady due to second-precision truncation
|
||||
handshake := connReady.Add(-100 * time.Millisecond)
|
||||
s.RecordWGHandshakeSuccess(handshake)
|
||||
|
||||
ts := s.GetTimestamps()
|
||||
assert.Equal(t, connReady, ts.WgHandshakeSuccess, "should normalize to ConnectionReady when handshake appears earlier")
|
||||
}
|
||||
|
||||
func TestMetricsStages_HandshakeIgnoredWithoutConnectionReady(t *testing.T) {
|
||||
s := &MetricsStages{}
|
||||
|
||||
s.RecordWGHandshakeSuccess(time.Now())
|
||||
ts := s.GetTimestamps()
|
||||
assert.True(t, ts.WgHandshakeSuccess.IsZero(), "should not record handshake without connection ready")
|
||||
}
|
||||
|
||||
func TestMetricsStages_HandshakeRecordedOnce(t *testing.T) {
|
||||
s := &MetricsStages{}
|
||||
|
||||
connReady := time.Now()
|
||||
s.RecordConnectionReady(connReady)
|
||||
|
||||
first := connReady.Add(time.Second)
|
||||
s.RecordWGHandshakeSuccess(first)
|
||||
|
||||
// Second call (rekey) should be ignored
|
||||
second := connReady.Add(2 * time.Second)
|
||||
s.RecordWGHandshakeSuccess(second)
|
||||
|
||||
ts := s.GetTimestamps()
|
||||
assert.Equal(t, first, ts.WgHandshakeSuccess, "should preserve first handshake, ignore rekeys")
|
||||
}
|
||||
|
||||
func TestMetricsStages_Disconnected(t *testing.T) {
|
||||
s := &MetricsStages{}
|
||||
|
||||
s.RecordSignalingReceived()
|
||||
s.RecordConnectionReady(time.Now())
|
||||
assert.False(t, s.IsReconnection())
|
||||
|
||||
s.Disconnected()
|
||||
|
||||
assert.True(t, s.IsReconnection())
|
||||
ts := s.GetTimestamps()
|
||||
assert.True(t, ts.SignalingReceived.IsZero(), "timestamps should be reset after disconnect")
|
||||
assert.True(t, ts.ConnectionReady.IsZero(), "timestamps should be reset after disconnect")
|
||||
assert.True(t, ts.WgHandshakeSuccess.IsZero(), "timestamps should be reset after disconnect")
|
||||
}
|
||||
|
||||
func TestMetricsStages_GetTimestamps(t *testing.T) {
|
||||
s := &MetricsStages{}
|
||||
|
||||
ts := s.GetTimestamps()
|
||||
assert.Equal(t, metrics.ConnectionStageTimestamps{}, ts)
|
||||
|
||||
now := time.Now()
|
||||
s.RecordSignalingReceived()
|
||||
s.RecordConnectionReady(now)
|
||||
|
||||
ts = s.GetTimestamps()
|
||||
assert.False(t, ts.SignalingReceived.IsZero())
|
||||
assert.Equal(t, now, ts.ConnectionReady)
|
||||
assert.True(t, ts.WgHandshakeSuccess.IsZero())
|
||||
}
|
||||
@@ -48,7 +48,7 @@ func NewWGWatcher(log *log.Entry, wgIfaceStater WGInterfaceStater, peerKey strin
|
||||
|
||||
// EnableWgWatcher starts the WireGuard watcher. If it is already enabled, it will return immediately and do nothing.
|
||||
// The watcher runs until ctx is cancelled. Caller is responsible for context lifecycle management.
|
||||
func (w *WGWatcher) EnableWgWatcher(ctx context.Context, onDisconnectedFn func()) {
|
||||
func (w *WGWatcher) EnableWgWatcher(ctx context.Context, enabledTime time.Time, onDisconnectedFn func(), onHandshakeSuccessFn func(when time.Time)) {
|
||||
w.muEnabled.Lock()
|
||||
if w.enabled {
|
||||
w.muEnabled.Unlock()
|
||||
@@ -56,7 +56,6 @@ func (w *WGWatcher) EnableWgWatcher(ctx context.Context, onDisconnectedFn func()
|
||||
}
|
||||
|
||||
w.log.Debugf("enable WireGuard watcher")
|
||||
enabledTime := time.Now()
|
||||
w.enabled = true
|
||||
w.muEnabled.Unlock()
|
||||
|
||||
@@ -65,7 +64,7 @@ func (w *WGWatcher) EnableWgWatcher(ctx context.Context, onDisconnectedFn func()
|
||||
w.log.Warnf("failed to read initial wg stats: %v", err)
|
||||
}
|
||||
|
||||
w.periodicHandshakeCheck(ctx, onDisconnectedFn, enabledTime, initialHandshake)
|
||||
w.periodicHandshakeCheck(ctx, onDisconnectedFn, onHandshakeSuccessFn, enabledTime, initialHandshake)
|
||||
|
||||
w.muEnabled.Lock()
|
||||
w.enabled = false
|
||||
@@ -89,7 +88,7 @@ func (w *WGWatcher) Reset() {
|
||||
}
|
||||
|
||||
// wgStateCheck help to check the state of the WireGuard handshake and relay connection
|
||||
func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, onDisconnectedFn func(), enabledTime time.Time, initialHandshake time.Time) {
|
||||
func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, onDisconnectedFn func(), onHandshakeSuccessFn func(when time.Time), enabledTime time.Time, initialHandshake time.Time) {
|
||||
w.log.Infof("WireGuard watcher started")
|
||||
|
||||
timer := time.NewTimer(wgHandshakeOvertime)
|
||||
@@ -108,6 +107,9 @@ func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, onDisconnectedFn
|
||||
if lastHandshake.IsZero() {
|
||||
elapsed := calcElapsed(enabledTime, *handshake)
|
||||
w.log.Infof("first wg handshake detected within: %.2fsec, (%s)", elapsed, handshake)
|
||||
if onHandshakeSuccessFn != nil {
|
||||
onHandshakeSuccessFn(*handshake)
|
||||
}
|
||||
}
|
||||
|
||||
lastHandshake = *handshake
|
||||
|
||||
@@ -35,9 +35,11 @@ func TestWGWatcher_EnableWgWatcher(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
onDisconnected := make(chan struct{}, 1)
|
||||
go watcher.EnableWgWatcher(ctx, func() {
|
||||
go watcher.EnableWgWatcher(ctx, time.Now(), func() {
|
||||
mlog.Infof("onDisconnectedFn")
|
||||
onDisconnected <- struct{}{}
|
||||
}, func(when time.Time) {
|
||||
mlog.Infof("onHandshakeSuccess: %v", when)
|
||||
})
|
||||
|
||||
// wait for initial reading
|
||||
@@ -64,7 +66,7 @@ func TestWGWatcher_ReEnable(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
watcher.EnableWgWatcher(ctx, func() {})
|
||||
watcher.EnableWgWatcher(ctx, time.Now(), func() {}, func(when time.Time) {})
|
||||
}()
|
||||
cancel()
|
||||
|
||||
@@ -75,9 +77,9 @@ func TestWGWatcher_ReEnable(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
onDisconnected := make(chan struct{}, 1)
|
||||
go watcher.EnableWgWatcher(ctx, func() {
|
||||
go watcher.EnableWgWatcher(ctx, time.Now(), func() {
|
||||
onDisconnected <- struct{}{}
|
||||
})
|
||||
}, func(when time.Time) {})
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
mocWgIface.disconnect()
|
||||
|
||||
@@ -198,7 +198,7 @@ func getConfigDirForUser(username string) (string, error) {
|
||||
|
||||
configDir := filepath.Join(DefaultConfigPathDir, username)
|
||||
if _, err := os.Stat(configDir); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(configDir, 0600); err != nil {
|
||||
if err := os.MkdirAll(configDir, 0700); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
@@ -206,9 +206,15 @@ func getConfigDirForUser(username string) (string, error) {
|
||||
return configDir, nil
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
func fileExists(path string) (bool, error) {
|
||||
_, err := os.Stat(path)
|
||||
return !os.IsNotExist(err)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// createNewConfig creates a new config generating a new Wireguard key and saving to file
|
||||
@@ -635,7 +641,11 @@ func isPreSharedKeyHidden(preSharedKey *string) bool {
|
||||
|
||||
// UpdateConfig update existing configuration according to input configuration and return with the configuration
|
||||
func UpdateConfig(input ConfigInput) (*Config, error) {
|
||||
if !fileExists(input.ConfigPath) {
|
||||
configExists, err := fileExists(input.ConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if config file exists: %w", err)
|
||||
}
|
||||
if !configExists {
|
||||
return nil, fmt.Errorf("config file %s does not exist", input.ConfigPath)
|
||||
}
|
||||
|
||||
@@ -644,7 +654,11 @@ func UpdateConfig(input ConfigInput) (*Config, error) {
|
||||
|
||||
// UpdateOrCreateConfig reads existing config or generates a new one
|
||||
func UpdateOrCreateConfig(input ConfigInput) (*Config, error) {
|
||||
if !fileExists(input.ConfigPath) {
|
||||
configExists, err := fileExists(input.ConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if config file exists: %w", err)
|
||||
}
|
||||
if !configExists {
|
||||
log.Infof("generating new config %s", input.ConfigPath)
|
||||
cfg, err := createNewConfig(input)
|
||||
if err != nil {
|
||||
@@ -657,7 +671,7 @@ func UpdateOrCreateConfig(input ConfigInput) (*Config, error) {
|
||||
if isPreSharedKeyHidden(input.PreSharedKey) {
|
||||
input.PreSharedKey = nil
|
||||
}
|
||||
err := util.EnforcePermission(input.ConfigPath)
|
||||
err = util.EnforcePermission(input.ConfigPath)
|
||||
if err != nil {
|
||||
log.Errorf("failed to enforce permission on config dir: %v", err)
|
||||
}
|
||||
@@ -784,7 +798,12 @@ func ReadConfig(configPath string) (*Config, error) {
|
||||
|
||||
// ReadConfig read config file and return with Config. If it is not exists create a new with default values
|
||||
func readConfig(configPath string, createIfMissing bool) (*Config, error) {
|
||||
if fileExists(configPath) {
|
||||
configExists, err := fileExists(configPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if config file exists: %w", err)
|
||||
}
|
||||
|
||||
if configExists {
|
||||
err := util.EnforcePermission(configPath)
|
||||
if err != nil {
|
||||
log.Errorf("failed to enforce permission on config dir: %v", err)
|
||||
@@ -831,7 +850,11 @@ func DirectWriteOutConfig(path string, config *Config) error {
|
||||
// DirectUpdateOrCreateConfig is like UpdateOrCreateConfig but uses direct (non-atomic) writes.
|
||||
// Use this on platforms where atomic writes are blocked (e.g., tvOS sandbox).
|
||||
func DirectUpdateOrCreateConfig(input ConfigInput) (*Config, error) {
|
||||
if !fileExists(input.ConfigPath) {
|
||||
configExists, err := fileExists(input.ConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if config file exists: %w", err)
|
||||
}
|
||||
if !configExists {
|
||||
log.Infof("generating new config %s", input.ConfigPath)
|
||||
cfg, err := createNewConfig(input)
|
||||
if err != nil {
|
||||
|
||||
@@ -256,7 +256,11 @@ func (s *ServiceManager) AddProfile(profileName, username string) error {
|
||||
}
|
||||
|
||||
profPath := filepath.Join(configDir, profileName+".json")
|
||||
if fileExists(profPath) {
|
||||
profileExists, err := fileExists(profPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if profile exists: %w", err)
|
||||
}
|
||||
if profileExists {
|
||||
return ErrProfileAlreadyExists
|
||||
}
|
||||
|
||||
@@ -285,7 +289,11 @@ func (s *ServiceManager) RemoveProfile(profileName, username string) error {
|
||||
return fmt.Errorf("cannot remove profile with reserved name: %s", defaultProfileName)
|
||||
}
|
||||
profPath := filepath.Join(configDir, profileName+".json")
|
||||
if !fileExists(profPath) {
|
||||
profileExists, err := fileExists(profPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if profile exists: %w", err)
|
||||
}
|
||||
if !profileExists {
|
||||
return ErrProfileNotFound
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,11 @@ func (pm *ProfileManager) GetProfileState(profileName string) (*ProfileState, er
|
||||
}
|
||||
|
||||
stateFile := filepath.Join(configDir, profileName+".state.json")
|
||||
if !fileExists(stateFile) {
|
||||
stateFileExists, err := fileExists(stateFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if profile state file exists: %w", err)
|
||||
}
|
||||
if !stateFileExists {
|
||||
return nil, errors.New("profile state file does not exist")
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,9 @@ package client
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -263,8 +265,14 @@ func (w *Watcher) watchPeerStatusChanges(ctx context.Context, peerKey string, pe
|
||||
case <-closer:
|
||||
return
|
||||
case routerStates := <-subscription.Events():
|
||||
peerStateUpdate <- routerStates
|
||||
log.Debugf("triggered route state update for Peer: %s", peerKey)
|
||||
select {
|
||||
case peerStateUpdate <- routerStates:
|
||||
log.Debugf("triggered route state update for Peer: %s", peerKey)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-closer:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -558,7 +566,7 @@ func HandlerFromRoute(params common.HandlerParams) RouteHandler {
|
||||
return dnsinterceptor.New(params)
|
||||
case handlerTypeDynamic:
|
||||
dns := nbdns.NewServiceViaMemory(params.WgInterface)
|
||||
dnsAddr := fmt.Sprintf("%s:%d", dns.RuntimeIP(), dns.RuntimePort())
|
||||
dnsAddr := net.JoinHostPort(dns.RuntimeIP().String(), strconv.Itoa(dns.RuntimePort()))
|
||||
return dynamic.NewRoute(params, dnsAddr)
|
||||
default:
|
||||
return static.NewRoute(params)
|
||||
|
||||
@@ -4,8 +4,10 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -249,7 +251,7 @@ func (d *DnsInterceptor) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
|
||||
r.MsgHdr.AuthenticatedData = true
|
||||
}
|
||||
|
||||
upstream := fmt.Sprintf("%s:%d", upstreamIP.String(), uint16(d.forwarderPort.Load()))
|
||||
upstream := net.JoinHostPort(upstreamIP.String(), strconv.FormatUint(uint64(d.forwarderPort.Load()), 10))
|
||||
ctx, cancel := context.WithTimeout(context.Background(), dnsTimeout)
|
||||
defer cancel()
|
||||
|
||||
@@ -351,6 +353,11 @@ func (d *DnsInterceptor) writeMsg(w dns.ResponseWriter, r *dns.Msg, logger *log.
|
||||
logger.Errorf("failed to update domain prefixes: %v", err)
|
||||
}
|
||||
|
||||
// Allow time for route changes to be applied before sending
|
||||
// the DNS response (relevant on iOS where setTunnelNetworkSettings
|
||||
// is asynchronous).
|
||||
waitForRouteSettlement(logger)
|
||||
|
||||
d.replaceIPsInDNSResponse(r, newPrefixes, logger)
|
||||
}
|
||||
}
|
||||
|
||||
20
client/internal/routemanager/dnsinterceptor/handler_ios.go
Normal file
20
client/internal/routemanager/dnsinterceptor/handler_ios.go
Normal file
@@ -0,0 +1,20 @@
|
||||
//go:build ios
|
||||
|
||||
package dnsinterceptor
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const routeSettleDelay = 500 * time.Millisecond
|
||||
|
||||
// waitForRouteSettlement introduces a short delay on iOS to allow
|
||||
// setTunnelNetworkSettings to apply route changes before the DNS
|
||||
// response reaches the application. Without this, the first request
|
||||
// to a newly resolved domain may bypass the tunnel.
|
||||
func waitForRouteSettlement(logger *log.Entry) {
|
||||
logger.Tracef("waiting %v for iOS route settlement", routeSettleDelay)
|
||||
time.Sleep(routeSettleDelay)
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
//go:build !ios
|
||||
|
||||
package dnsinterceptor
|
||||
|
||||
import log "github.com/sirupsen/logrus"
|
||||
|
||||
func waitForRouteSettlement(_ *log.Entry) {
|
||||
// No-op on non-iOS platforms: route changes are applied synchronously by
|
||||
// the kernel, so no settlement delay is needed before the DNS response
|
||||
// reaches the application. The delay is only required on iOS where
|
||||
// setTunnelNetworkSettings applies routes asynchronously.
|
||||
}
|
||||
80
client/internal/sleep/handler/handler.go
Normal file
80
client/internal/sleep/handler/handler.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal"
|
||||
)
|
||||
|
||||
type Agent interface {
|
||||
Up(ctx context.Context) error
|
||||
Down(ctx context.Context) error
|
||||
Status() (internal.StatusType, error)
|
||||
}
|
||||
|
||||
type SleepHandler struct {
|
||||
agent Agent
|
||||
|
||||
mu sync.Mutex
|
||||
// sleepTriggeredDown indicates whether the sleep handler triggered the last client down, to avoid unnecessary up on wake
|
||||
sleepTriggeredDown bool
|
||||
}
|
||||
|
||||
func New(agent Agent) *SleepHandler {
|
||||
return &SleepHandler{
|
||||
agent: agent,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SleepHandler) HandleWakeUp(ctx context.Context) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if !s.sleepTriggeredDown {
|
||||
log.Info("skipping up because wasn't sleep down")
|
||||
return nil
|
||||
}
|
||||
|
||||
// avoid other wakeup runs if sleep didn't make the computer sleep
|
||||
s.sleepTriggeredDown = false
|
||||
|
||||
log.Info("running up after wake up")
|
||||
err := s.agent.Up(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("running up failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("running up command executed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SleepHandler) HandleSleep(ctx context.Context) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
status, err := s.agent.Status()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if status != internal.StatusConnecting && status != internal.StatusConnected {
|
||||
log.Infof("skipping setting the agent down because status is %s", status)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("running down after system started sleeping")
|
||||
|
||||
if err = s.agent.Down(ctx); err != nil {
|
||||
log.Errorf("running down failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
s.sleepTriggeredDown = true
|
||||
|
||||
log.Info("running down executed successfully")
|
||||
return nil
|
||||
}
|
||||
153
client/internal/sleep/handler/handler_test.go
Normal file
153
client/internal/sleep/handler/handler_test.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal"
|
||||
)
|
||||
|
||||
type mockAgent struct {
|
||||
upErr error
|
||||
downErr error
|
||||
statusErr error
|
||||
status internal.StatusType
|
||||
upCalls int
|
||||
}
|
||||
|
||||
func (m *mockAgent) Up(_ context.Context) error {
|
||||
m.upCalls++
|
||||
return m.upErr
|
||||
}
|
||||
|
||||
func (m *mockAgent) Down(_ context.Context) error {
|
||||
return m.downErr
|
||||
}
|
||||
|
||||
func (m *mockAgent) Status() (internal.StatusType, error) {
|
||||
return m.status, m.statusErr
|
||||
}
|
||||
|
||||
func newHandler(status internal.StatusType) (*SleepHandler, *mockAgent) {
|
||||
agent := &mockAgent{status: status}
|
||||
return New(agent), agent
|
||||
}
|
||||
|
||||
func TestHandleWakeUp_SkipsWhenFlagFalse(t *testing.T) {
|
||||
h, agent := newHandler(internal.StatusIdle)
|
||||
|
||||
err := h.HandleWakeUp(context.Background())
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, agent.upCalls, "Up should not be called when flag is false")
|
||||
}
|
||||
|
||||
func TestHandleWakeUp_ResetsFlagBeforeUp(t *testing.T) {
|
||||
h, _ := newHandler(internal.StatusIdle)
|
||||
h.sleepTriggeredDown = true
|
||||
|
||||
// Even if Up fails, flag should be reset
|
||||
_ = h.HandleWakeUp(context.Background())
|
||||
|
||||
assert.False(t, h.sleepTriggeredDown, "flag must be reset before calling Up")
|
||||
}
|
||||
|
||||
func TestHandleWakeUp_CallsUpWhenFlagSet(t *testing.T) {
|
||||
h, agent := newHandler(internal.StatusIdle)
|
||||
h.sleepTriggeredDown = true
|
||||
|
||||
err := h.HandleWakeUp(context.Background())
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, agent.upCalls)
|
||||
assert.False(t, h.sleepTriggeredDown)
|
||||
}
|
||||
|
||||
func TestHandleWakeUp_ReturnsErrorFromUp(t *testing.T) {
|
||||
h, agent := newHandler(internal.StatusIdle)
|
||||
h.sleepTriggeredDown = true
|
||||
agent.upErr = errors.New("up failed")
|
||||
|
||||
err := h.HandleWakeUp(context.Background())
|
||||
|
||||
assert.ErrorIs(t, err, agent.upErr)
|
||||
assert.False(t, h.sleepTriggeredDown, "flag should still be reset even when Up fails")
|
||||
}
|
||||
|
||||
func TestHandleWakeUp_SecondCallIsNoOp(t *testing.T) {
|
||||
h, agent := newHandler(internal.StatusIdle)
|
||||
h.sleepTriggeredDown = true
|
||||
|
||||
_ = h.HandleWakeUp(context.Background())
|
||||
err := h.HandleWakeUp(context.Background())
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, agent.upCalls, "second wakeup should be no-op")
|
||||
}
|
||||
|
||||
func TestHandleSleep_SkipsForNonActiveStates(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
status internal.StatusType
|
||||
}{
|
||||
{"Idle", internal.StatusIdle},
|
||||
{"NeedsLogin", internal.StatusNeedsLogin},
|
||||
{"LoginFailed", internal.StatusLoginFailed},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
h, _ := newHandler(tt.status)
|
||||
|
||||
err := h.HandleSleep(context.Background())
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.False(t, h.sleepTriggeredDown)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleSleep_ProceedsForActiveStates(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
status internal.StatusType
|
||||
}{
|
||||
{"Connecting", internal.StatusConnecting},
|
||||
{"Connected", internal.StatusConnected},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
h, _ := newHandler(tt.status)
|
||||
|
||||
err := h.HandleSleep(context.Background())
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.True(t, h.sleepTriggeredDown)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleSleep_ReturnsErrorFromStatus(t *testing.T) {
|
||||
agent := &mockAgent{statusErr: errors.New("status error")}
|
||||
h := New(agent)
|
||||
|
||||
err := h.HandleSleep(context.Background())
|
||||
|
||||
assert.ErrorIs(t, err, agent.statusErr)
|
||||
assert.False(t, h.sleepTriggeredDown)
|
||||
}
|
||||
|
||||
func TestHandleSleep_ReturnsErrorFromDown(t *testing.T) {
|
||||
agent := &mockAgent{status: internal.StatusConnected, downErr: errors.New("down failed")}
|
||||
h := New(agent)
|
||||
|
||||
err := h.HandleSleep(context.Background())
|
||||
|
||||
assert.ErrorIs(t, err, agent.downErr)
|
||||
assert.False(t, h.sleepTriggeredDown, "flag should not be set when Down fails")
|
||||
}
|
||||
@@ -1,214 +0,0 @@
|
||||
//go:build windows || darwin
|
||||
|
||||
package updatemanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v "github.com/hashicorp/go-version"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/peer"
|
||||
"github.com/netbirdio/netbird/client/internal/statemanager"
|
||||
)
|
||||
|
||||
type versionUpdateMock struct {
|
||||
latestVersion *v.Version
|
||||
onUpdate func()
|
||||
}
|
||||
|
||||
func (v versionUpdateMock) StopWatch() {}
|
||||
|
||||
func (v versionUpdateMock) SetDaemonVersion(newVersion string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (v *versionUpdateMock) SetOnUpdateListener(updateFn func()) {
|
||||
v.onUpdate = updateFn
|
||||
}
|
||||
|
||||
func (v versionUpdateMock) LatestVersion() *v.Version {
|
||||
return v.latestVersion
|
||||
}
|
||||
|
||||
func (v versionUpdateMock) StartFetcher() {}
|
||||
|
||||
func Test_LatestVersion(t *testing.T) {
|
||||
testMatrix := []struct {
|
||||
name string
|
||||
daemonVersion string
|
||||
initialLatestVersion *v.Version
|
||||
latestVersion *v.Version
|
||||
shouldUpdateInit bool
|
||||
shouldUpdateLater bool
|
||||
}{
|
||||
{
|
||||
name: "Should only trigger update once due to time between triggers being < 5 Minutes",
|
||||
daemonVersion: "1.0.0",
|
||||
initialLatestVersion: v.Must(v.NewSemver("1.0.1")),
|
||||
latestVersion: v.Must(v.NewSemver("1.0.2")),
|
||||
shouldUpdateInit: true,
|
||||
shouldUpdateLater: false,
|
||||
},
|
||||
{
|
||||
name: "Shouldn't update initially, but should update as soon as latest version is fetched",
|
||||
daemonVersion: "1.0.0",
|
||||
initialLatestVersion: nil,
|
||||
latestVersion: v.Must(v.NewSemver("1.0.1")),
|
||||
shouldUpdateInit: false,
|
||||
shouldUpdateLater: true,
|
||||
},
|
||||
}
|
||||
|
||||
for idx, c := range testMatrix {
|
||||
mockUpdate := &versionUpdateMock{latestVersion: c.initialLatestVersion}
|
||||
tmpFile := path.Join(t.TempDir(), fmt.Sprintf("update-test-%d.json", idx))
|
||||
m, _ := newManager(peer.NewRecorder(""), statemanager.New(tmpFile))
|
||||
m.update = mockUpdate
|
||||
|
||||
targetVersionChan := make(chan string, 1)
|
||||
|
||||
m.triggerUpdateFn = func(ctx context.Context, targetVersion string) error {
|
||||
targetVersionChan <- targetVersion
|
||||
return nil
|
||||
}
|
||||
m.currentVersion = c.daemonVersion
|
||||
m.Start(context.Background())
|
||||
m.SetVersion("latest")
|
||||
var triggeredInit bool
|
||||
select {
|
||||
case targetVersion := <-targetVersionChan:
|
||||
if targetVersion != c.initialLatestVersion.String() {
|
||||
t.Errorf("%s: Initial update version mismatch, expected %v, got %v", c.name, c.initialLatestVersion.String(), targetVersion)
|
||||
}
|
||||
triggeredInit = true
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
triggeredInit = false
|
||||
}
|
||||
if triggeredInit != c.shouldUpdateInit {
|
||||
t.Errorf("%s: Initial update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdateInit, triggeredInit)
|
||||
}
|
||||
|
||||
mockUpdate.latestVersion = c.latestVersion
|
||||
mockUpdate.onUpdate()
|
||||
|
||||
var triggeredLater bool
|
||||
select {
|
||||
case targetVersion := <-targetVersionChan:
|
||||
if targetVersion != c.latestVersion.String() {
|
||||
t.Errorf("%s: Update version mismatch, expected %v, got %v", c.name, c.latestVersion.String(), targetVersion)
|
||||
}
|
||||
triggeredLater = true
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
triggeredLater = false
|
||||
}
|
||||
if triggeredLater != c.shouldUpdateLater {
|
||||
t.Errorf("%s: Update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdateLater, triggeredLater)
|
||||
}
|
||||
|
||||
m.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func Test_HandleUpdate(t *testing.T) {
|
||||
testMatrix := []struct {
|
||||
name string
|
||||
daemonVersion string
|
||||
latestVersion *v.Version
|
||||
expectedVersion string
|
||||
shouldUpdate bool
|
||||
}{
|
||||
{
|
||||
name: "Update to a specific version should update regardless of if latestVersion is available yet",
|
||||
daemonVersion: "0.55.0",
|
||||
latestVersion: nil,
|
||||
expectedVersion: "0.56.0",
|
||||
shouldUpdate: true,
|
||||
},
|
||||
{
|
||||
name: "Update to specific version should not update if version matches",
|
||||
daemonVersion: "0.55.0",
|
||||
latestVersion: nil,
|
||||
expectedVersion: "0.55.0",
|
||||
shouldUpdate: false,
|
||||
},
|
||||
{
|
||||
name: "Update to specific version should not update if current version is newer",
|
||||
daemonVersion: "0.55.0",
|
||||
latestVersion: nil,
|
||||
expectedVersion: "0.54.0",
|
||||
shouldUpdate: false,
|
||||
},
|
||||
{
|
||||
name: "Update to latest version should update if latest is newer",
|
||||
daemonVersion: "0.55.0",
|
||||
latestVersion: v.Must(v.NewSemver("0.56.0")),
|
||||
expectedVersion: "latest",
|
||||
shouldUpdate: true,
|
||||
},
|
||||
{
|
||||
name: "Update to latest version should not update if latest == current",
|
||||
daemonVersion: "0.56.0",
|
||||
latestVersion: v.Must(v.NewSemver("0.56.0")),
|
||||
expectedVersion: "latest",
|
||||
shouldUpdate: false,
|
||||
},
|
||||
{
|
||||
name: "Should not update if daemon version is invalid",
|
||||
daemonVersion: "development",
|
||||
latestVersion: v.Must(v.NewSemver("1.0.0")),
|
||||
expectedVersion: "latest",
|
||||
shouldUpdate: false,
|
||||
},
|
||||
{
|
||||
name: "Should not update if expecting latest and latest version is unavailable",
|
||||
daemonVersion: "0.55.0",
|
||||
latestVersion: nil,
|
||||
expectedVersion: "latest",
|
||||
shouldUpdate: false,
|
||||
},
|
||||
{
|
||||
name: "Should not update if expected version is invalid",
|
||||
daemonVersion: "0.55.0",
|
||||
latestVersion: nil,
|
||||
expectedVersion: "development",
|
||||
shouldUpdate: false,
|
||||
},
|
||||
}
|
||||
for idx, c := range testMatrix {
|
||||
tmpFile := path.Join(t.TempDir(), fmt.Sprintf("update-test-%d.json", idx))
|
||||
m, _ := newManager(peer.NewRecorder(""), statemanager.New(tmpFile))
|
||||
m.update = &versionUpdateMock{latestVersion: c.latestVersion}
|
||||
targetVersionChan := make(chan string, 1)
|
||||
|
||||
m.triggerUpdateFn = func(ctx context.Context, targetVersion string) error {
|
||||
targetVersionChan <- targetVersion
|
||||
return nil
|
||||
}
|
||||
|
||||
m.currentVersion = c.daemonVersion
|
||||
m.Start(context.Background())
|
||||
m.SetVersion(c.expectedVersion)
|
||||
|
||||
var updateTriggered bool
|
||||
select {
|
||||
case targetVersion := <-targetVersionChan:
|
||||
if c.expectedVersion == "latest" && targetVersion != c.latestVersion.String() {
|
||||
t.Errorf("%s: Update version mismatch, expected %v, got %v", c.name, c.latestVersion.String(), targetVersion)
|
||||
} else if c.expectedVersion != "latest" && targetVersion != c.expectedVersion {
|
||||
t.Errorf("%s: Update version mismatch, expected %v, got %v", c.name, c.expectedVersion, targetVersion)
|
||||
}
|
||||
updateTriggered = true
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
updateTriggered = false
|
||||
}
|
||||
|
||||
if updateTriggered != c.shouldUpdate {
|
||||
t.Errorf("%s: Update trigger mismatch, expected %v, got %v", c.name, c.shouldUpdate, updateTriggered)
|
||||
}
|
||||
m.Stop()
|
||||
}
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
//go:build !windows && !darwin
|
||||
|
||||
package updatemanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/netbirdio/netbird/client/internal/peer"
|
||||
"github.com/netbirdio/netbird/client/internal/statemanager"
|
||||
)
|
||||
|
||||
// Manager is a no-op stub for unsupported platforms
|
||||
type Manager struct{}
|
||||
|
||||
// NewManager returns a no-op manager for unsupported platforms
|
||||
func NewManager(statusRecorder *peer.Status, stateManager *statemanager.Manager) (*Manager, error) {
|
||||
return nil, fmt.Errorf("update manager is not supported on this platform")
|
||||
}
|
||||
|
||||
// CheckUpdateSuccess is a no-op on unsupported platforms
|
||||
func (m *Manager) CheckUpdateSuccess(ctx context.Context) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// Start is a no-op on unsupported platforms
|
||||
func (m *Manager) Start(ctx context.Context) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// SetVersion is a no-op on unsupported platforms
|
||||
func (m *Manager) SetVersion(expectedVersion string) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// Stop is a no-op on unsupported platforms
|
||||
func (m *Manager) Stop() {
|
||||
// no-op
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package updatemanager provides automatic update management for the NetBird client.
|
||||
// Package updater provides automatic update management for the NetBird client.
|
||||
// It monitors for new versions, handles update triggers from management server directives,
|
||||
// and orchestrates the download and installation of client updates.
|
||||
//
|
||||
@@ -32,4 +32,4 @@
|
||||
//
|
||||
// This enables verification of successful updates and appropriate user notification
|
||||
// after the client restarts with the new version.
|
||||
package updatemanager
|
||||
package updater
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user