git subrepo clone https://github.com/greatest-ape/aquatic ./apps/aquatic
subrepo: subdir: "apps/aquatic" merged: "34b45e92" upstream: origin: "https://github.com/greatest-ape/aquatic" branch: "master" commit: "34b45e92" git-subrepo: version: "0.4.9" origin: "???" commit: "???"
This commit is contained in:
parent
fe0291091c
commit
694dc89f03
5
apps/aquatic/.dockerignore
Normal file
5
apps/aquatic/.dockerignore
Normal file
@ -0,0 +1,5 @@
|
||||
target
|
||||
docker
|
||||
.git
|
||||
tmp
|
||||
documents
|
16
apps/aquatic/.github/actions/test-file-transfers/action.yml
vendored
Normal file
16
apps/aquatic/.github/actions/test-file-transfers/action.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
name: 'test-file-transfers'
|
||||
description: 'test aquatic file transfers'
|
||||
outputs:
|
||||
# http_ipv4:
|
||||
# description: 'HTTP IPv4 status'
|
||||
http_tls_ipv4:
|
||||
description: 'HTTP IPv4 over TLS status'
|
||||
udp_ipv4:
|
||||
description: 'UDP IPv4 status'
|
||||
wss_ipv4:
|
||||
description: 'WSS IPv4 status'
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- run: $GITHUB_ACTION_PATH/entrypoint.sh
|
||||
shell: bash
|
309
apps/aquatic/.github/actions/test-file-transfers/entrypoint.sh
vendored
Executable file
309
apps/aquatic/.github/actions/test-file-transfers/entrypoint.sh
vendored
Executable file
@ -0,0 +1,309 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Test that file transfers work over all protocols.
|
||||
#
|
||||
# IPv6 is unfortunately disabled by default in Docker
|
||||
# (see sysctl net.ipv6.conf.lo.disable_ipv6)
|
||||
|
||||
set -e
|
||||
|
||||
# Install programs and build dependencies
|
||||
|
||||
if command -v sudo; then
|
||||
SUDO="sudo "
|
||||
else
|
||||
SUDO=""
|
||||
fi
|
||||
|
||||
ulimit -a
|
||||
|
||||
$SUDO apt-get update
|
||||
$SUDO apt-get install -y cmake libssl-dev screen rtorrent mktorrent ssl-cert ca-certificates curl golang libhwloc-dev
|
||||
|
||||
git clone https://github.com/anacrolix/torrent.git gotorrent
|
||||
cd gotorrent
|
||||
# Use commit known to work
|
||||
git checkout 16176b762e4a840fc5dfe3b1dfd2d6fa853b68d7
|
||||
go build -o $HOME/gotorrent ./cmd/torrent
|
||||
cd ..
|
||||
file $HOME/gotorrent
|
||||
|
||||
# Go to repository directory
|
||||
|
||||
if [[ -z "${GITHUB_WORKSPACE}" ]]; then
|
||||
exit 1
|
||||
else
|
||||
cd "$GITHUB_WORKSPACE"
|
||||
fi
|
||||
|
||||
# Setup bogus TLS certificate
|
||||
|
||||
$SUDO echo "127.0.0.1 example.com" >> /etc/hosts
|
||||
|
||||
openssl genrsa -out ca.key 2048
|
||||
openssl req -new -x509 -days 365 -key ca.key -subj "/C=CN/ST=GD/L=SZ/O=Acme, Inc./CN=Acme Root CA" -out ca.crt
|
||||
openssl req -newkey rsa:2048 -nodes -keyout server.key -subj "/C=CN/ST=GD/L=SZ/O=Acme, Inc./CN=*.example.com" -out server.csr
|
||||
openssl x509 -req -extfile <(printf "subjectAltName=DNS:example.com,DNS:www.example.com") -days 365 -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt
|
||||
openssl pkcs8 -in server.key -topk8 -nocrypt -out key.pk8
|
||||
|
||||
$SUDO cp ca.crt /usr/local/share/ca-certificates/snakeoil-ca.crt
|
||||
$SUDO cp server.crt /usr/local/share/ca-certificates/snakeoil-server.crt
|
||||
$SUDO update-ca-certificates
|
||||
|
||||
# Build and start tracker
|
||||
|
||||
cargo build --bin aquatic
|
||||
|
||||
# UDP
|
||||
echo "
|
||||
log_level = 'debug'
|
||||
|
||||
[network]
|
||||
address_ipv4 = '127.0.0.1:3000'" > udp.toml
|
||||
./target/debug/aquatic udp -c udp.toml > "$HOME/udp.log" 2>&1 &
|
||||
|
||||
# HTTP
|
||||
echo "log_level = 'debug'
|
||||
|
||||
[network]
|
||||
address_ipv4 = '127.0.0.1:3004'" > http.toml
|
||||
./target/debug/aquatic http -c http.toml > "$HOME/http.log" 2>&1 &
|
||||
|
||||
# HTTP with TLS
|
||||
echo "log_level = 'debug'
|
||||
|
||||
[network]
|
||||
address_ipv4 = '127.0.0.1:3001'
|
||||
enable_tls = true
|
||||
tls_certificate_path = './server.crt'
|
||||
tls_private_key_path = './key.pk8'
|
||||
" > tls.toml
|
||||
./target/debug/aquatic http -c tls.toml > "$HOME/tls.log" 2>&1 &
|
||||
|
||||
# WebTorrent
|
||||
echo "log_level = 'debug'
|
||||
|
||||
[network]
|
||||
address = '127.0.0.1:3003'
|
||||
enable_http_health_checks = true
|
||||
" > ws.toml
|
||||
./target/debug/aquatic ws -c ws.toml > "$HOME/ws.log" 2>&1 &
|
||||
|
||||
# WebTorrent with TLS
|
||||
echo "log_level = 'debug'
|
||||
|
||||
[network]
|
||||
address = '127.0.0.1:3002'
|
||||
enable_tls = true
|
||||
tls_certificate_path = './server.crt'
|
||||
tls_private_key_path = './key.pk8'
|
||||
" > ws-tls.toml
|
||||
./target/debug/aquatic ws -c ws-tls.toml > "$HOME/ws-tls.log" 2>&1 &
|
||||
|
||||
# Setup directories
|
||||
|
||||
cd "$HOME"
|
||||
|
||||
mkdir seed
|
||||
mkdir leech
|
||||
mkdir torrents
|
||||
|
||||
# Create torrents
|
||||
|
||||
echo "udp-test-ipv4" > seed/udp-test-ipv4
|
||||
echo "http-test-ipv4" > seed/http-test-ipv4
|
||||
echo "tls-test-ipv4" > seed/tls-test-ipv4
|
||||
echo "ws-test-ipv4" > seed/ws-test-ipv4
|
||||
echo "ws-tls-test-ipv4" > seed/ws-tls-test-ipv4
|
||||
|
||||
mktorrent -p -o "torrents/udp-ipv4.torrent" -a "udp://127.0.0.1:3000" "seed/udp-test-ipv4"
|
||||
mktorrent -p -o "torrents/http-ipv4.torrent" -a "http://127.0.0.1:3004/announce" "seed/http-test-ipv4"
|
||||
mktorrent -p -o "torrents/tls-ipv4.torrent" -a "https://example.com:3001/announce" "seed/tls-test-ipv4"
|
||||
mktorrent -p -o "torrents/ws-ipv4.torrent" -a "ws://example.com:3003" "seed/ws-test-ipv4"
|
||||
mktorrent -p -o "torrents/ws-tls-ipv4.torrent" -a "wss://example.com:3002" "seed/ws-tls-test-ipv4"
|
||||
|
||||
cp -r torrents torrents-seed
|
||||
cp -r torrents torrents-leech
|
||||
|
||||
# Setup ws-tls seeding client
|
||||
|
||||
echo "Starting seeding ws-tls (wss) client"
|
||||
cd seed
|
||||
GOPPROF=http $HOME/gotorrent download --dht=false --tcppeers=false --utppeers=false --pex=false --stats --seed ../torrents/ws-tls-ipv4.torrent > "$HOME/ws-tls-seed.log" 2>&1 &
|
||||
cd ..
|
||||
|
||||
# Setup ws seeding client
|
||||
|
||||
echo "Starting seeding ws client"
|
||||
cd seed
|
||||
GOPPROF=http $HOME/gotorrent download --dht=false --tcppeers=false --utppeers=false --pex=false --stats --seed ../torrents/ws-ipv4.torrent > "$HOME/ws-seed.log" 2>&1 &
|
||||
cd ..
|
||||
|
||||
# Start seeding rtorrent client
|
||||
|
||||
echo "directory.default.set = $HOME/seed
|
||||
schedule2 = watch_directory,5,5,load.start=$HOME/torrents-seed/*.torrent" > ~/.rtorrent.rc
|
||||
|
||||
echo "Starting seeding rtorrent client"
|
||||
screen -dmS rtorrent-seed rtorrent
|
||||
|
||||
# Give seeding clients time to load config files etc
|
||||
|
||||
echo "Waiting for a while"
|
||||
sleep 30
|
||||
|
||||
# Start leeching clients
|
||||
|
||||
echo "directory.default.set = $HOME/leech
|
||||
schedule2 = watch_directory,5,5,load.start=$HOME/torrents-leech/*.torrent" > ~/.rtorrent.rc
|
||||
|
||||
echo "Starting leeching client.."
|
||||
screen -dmS rtorrent-leech rtorrent
|
||||
|
||||
echo "Starting leeching ws-tls (wss) client"
|
||||
cd leech
|
||||
GOPPROF=http $HOME/gotorrent download --dht=false --tcppeers=false --utppeers=false --pex=false --stats --addr ":43000" ../torrents/ws-tls-ipv4.torrent > "$HOME/ws-tls-leech.log" 2>&1 &
|
||||
cd ..
|
||||
|
||||
echo "Starting leeching ws client"
|
||||
cd leech
|
||||
GOPPROF=http $HOME/gotorrent download --dht=false --tcppeers=false --utppeers=false --pex=false --stats --addr ":43001" ../torrents/ws-ipv4.torrent > "$HOME/ws-leech.log" 2>&1 &
|
||||
cd ..
|
||||
|
||||
# Check for completion
|
||||
|
||||
HTTP_IPv4="Failed"
|
||||
TLS_IPv4="Failed"
|
||||
UDP_IPv4="Failed"
|
||||
WS_TLS_IPv4="Failed"
|
||||
WS_IPv4="Failed"
|
||||
|
||||
i="0"
|
||||
|
||||
echo "Watching for finished files.."
|
||||
|
||||
while [ $i -lt 60 ]
|
||||
do
|
||||
if test -f "leech/http-test-ipv4"; then
|
||||
if grep -q "http-test-ipv4" "leech/http-test-ipv4"; then
|
||||
if [ "$HTTP_IPv4" != "Ok" ]; then
|
||||
HTTP_IPv4="Ok"
|
||||
echo "HTTP_IPv4 is Ok"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
if test -f "leech/tls-test-ipv4"; then
|
||||
if grep -q "tls-test-ipv4" "leech/tls-test-ipv4"; then
|
||||
if [ "$TLS_IPv4" != "Ok" ]; then
|
||||
TLS_IPv4="Ok"
|
||||
echo "TLS_IPv4 is Ok"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
if test -f "leech/udp-test-ipv4"; then
|
||||
if grep -q "udp-test-ipv4" "leech/udp-test-ipv4"; then
|
||||
if [ "$UDP_IPv4" != "Ok" ]; then
|
||||
UDP_IPv4="Ok"
|
||||
echo "UDP_IPv4 is Ok"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
if test -f "leech/ws-tls-test-ipv4"; then
|
||||
if grep -q "ws-tls-test-ipv4" "leech/ws-tls-test-ipv4"; then
|
||||
if [ "$WS_TLS_IPv4" != "Ok" ]; then
|
||||
WS_TLS_IPv4="Ok"
|
||||
echo "WS_TLS_IPv4 is Ok"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
if test -f "leech/ws-test-ipv4"; then
|
||||
if grep -q "ws-test-ipv4" "leech/ws-test-ipv4"; then
|
||||
if [ "$WS_IPv4" != "Ok" ]; then
|
||||
WS_IPv4="Ok"
|
||||
echo "WS_IPv4 is Ok"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$HTTP_IPv4" = "Ok" ] && [ "$TLS_IPv4" = "Ok" ] && [ "$UDP_IPv4" = "Ok" ] && [ "$WS_TLS_IPv4" = "Ok" ] && [ "$WS_IPv4" = "Ok" ]; then
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
|
||||
i=$[$i+1]
|
||||
done
|
||||
|
||||
echo "Waited for $i seconds"
|
||||
|
||||
echo "::set-output name=http_ipv4::$HTTP_IPv4"
|
||||
echo "::set-output name=http_tls_ipv4::$TLS_IPv4"
|
||||
echo "::set-output name=udp_ipv4::$UDP_IPv4"
|
||||
echo "::set-output name=ws_tls_ipv4::$WS_TLS_IPv4"
|
||||
echo "::set-output name=ws_ipv4::$WS_IPv4"
|
||||
|
||||
echo ""
|
||||
echo "# --- HTTP log --- #"
|
||||
cat "http.log"
|
||||
|
||||
sleep 1
|
||||
|
||||
echo ""
|
||||
echo "# --- HTTP over TLS log --- #"
|
||||
cat "tls.log"
|
||||
|
||||
sleep 1
|
||||
|
||||
echo ""
|
||||
echo "# --- UDP log --- #"
|
||||
cat "udp.log"
|
||||
|
||||
sleep 1
|
||||
|
||||
echo ""
|
||||
echo "# --- WS over TLS tracker log --- #"
|
||||
cat "ws-tls.log"
|
||||
|
||||
sleep 1
|
||||
|
||||
echo ""
|
||||
echo "# --- WS tracker log --- #"
|
||||
cat "ws.log"
|
||||
|
||||
sleep 1
|
||||
|
||||
echo ""
|
||||
echo "# --- WS over TLS seed log --- #"
|
||||
cat "ws-tls-seed.log"
|
||||
|
||||
sleep 1
|
||||
|
||||
echo ""
|
||||
echo "# --- WS over TLS leech log --- #"
|
||||
cat "ws-tls-leech.log"
|
||||
|
||||
sleep 1
|
||||
|
||||
echo ""
|
||||
echo "# --- WS seed log --- #"
|
||||
cat "ws-seed.log"
|
||||
|
||||
sleep 1
|
||||
|
||||
echo ""
|
||||
echo "# --- WS leech log --- #"
|
||||
cat "ws-leech.log"
|
||||
|
||||
sleep 1
|
||||
|
||||
echo ""
|
||||
echo "# --- Test results --- #"
|
||||
echo "HTTP: $HTTP_IPv4"
|
||||
echo "HTTP (TLS): $TLS_IPv4"
|
||||
echo "UDP: $UDP_IPv4"
|
||||
echo "WebTorrent (TLS): $WS_TLS_IPv4"
|
||||
echo "WebTorrent: $WS_IPv4"
|
||||
|
||||
if [ "$HTTP_IPv4" != "Ok" ] || [ "$TLS_IPv4" != "Ok" ] || [ "$UDP_IPv4" != "Ok" ] || [ "$WS_TLS_IPv4" != "Ok" ] || [ "$WS_IPv4" != "Ok" ]; then
|
||||
exit 1
|
||||
fi
|
66
apps/aquatic/.github/workflows/ci.yml
vendored
Normal file
66
apps/aquatic/.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build-linux:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install latest stable Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- name: Setup Rust dependency caching
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Build
|
||||
run: |
|
||||
cargo build --verbose -p aquatic_udp
|
||||
cargo build --verbose -p aquatic_http
|
||||
cargo build --verbose -p aquatic_ws
|
||||
|
||||
build-macos:
|
||||
runs-on: macos-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install latest stable Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- name: Setup Rust dependency caching
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Build
|
||||
run: cargo build --verbose -p aquatic_udp
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install latest stable Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- name: Setup Rust dependency caching
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: Run tests
|
||||
run: cargo test --verbose --profile "test-fast" --workspace
|
||||
- name: Run tests (aquatic_udp with io_uring)
|
||||
run: cargo test --verbose --profile "test-fast" -p aquatic_udp --features "io-uring"
|
||||
|
||||
test-file-transfers:
|
||||
runs-on: ubuntu-latest
|
||||
name: "Test BitTorrent file transfers (UDP, HTTP, WebTorrent)"
|
||||
timeout-minutes: 20
|
||||
container:
|
||||
image: rust:1-bookworm
|
||||
options: --ulimit memlock=524288:524288 --privileged --security-opt="seccomp=unconfined"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Test file transfers
|
||||
uses: ./.github/actions/test-file-transfers
|
||||
id: test_file_transfers
|
8
apps/aquatic/.gitignore
vendored
Normal file
8
apps/aquatic/.gitignore
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
/target
|
||||
/tmp
|
||||
|
||||
**/criterion/*/change
|
||||
**/criterion/*/new
|
||||
|
||||
.DS_Store
|
||||
.env
|
12
apps/aquatic/.gitrepo
Normal file
12
apps/aquatic/.gitrepo
Normal file
@ -0,0 +1,12 @@
|
||||
; DO NOT EDIT (unless you know what you are doing)
|
||||
;
|
||||
; This subdirectory is a git "subrepo", and this file is maintained by the
|
||||
; git-subrepo command. See https://github.com/ingydotnet/git-subrepo#readme
|
||||
;
|
||||
[subrepo]
|
||||
remote = https://github.com/greatest-ape/aquatic
|
||||
branch = master
|
||||
commit = 34b45e923f84421181fc43cf5e20709e69ce0dfd
|
||||
parent = fe0291091c67c0a832eeaa432fed52d61f1a1bb1
|
||||
method = merge
|
||||
cmdver = 0.4.9
|
206
apps/aquatic/CHANGELOG.md
Normal file
206
apps/aquatic/CHANGELOG.md
Normal file
@ -0,0 +1,206 @@
|
||||
# Changelog
|
||||
|
||||
## Unreleased
|
||||
|
||||
### aquatic_udp
|
||||
|
||||
#### Changed
|
||||
|
||||
* (Breaking) Open one socket each for IPv4 and IPv6. The config file now has
|
||||
one setting for each.
|
||||
|
||||
### aquatic_http
|
||||
|
||||
#### Changed
|
||||
|
||||
* (Breaking) Open one socket each for IPv4 and IPv6. The config file now has
|
||||
one setting for each.
|
||||
|
||||
## 0.9.0 - 2024-04-03
|
||||
|
||||
### General
|
||||
|
||||
#### Added
|
||||
|
||||
* Add `aquatic_peer_id` crate with peer client information logic
|
||||
* Add `aquatic_bencher` crate for automated benchmarking of aquatic and other
|
||||
BitTorrent trackers
|
||||
|
||||
### aquatic_udp
|
||||
|
||||
#### Added
|
||||
|
||||
* Add support for reporting peer client information
|
||||
|
||||
#### Changed
|
||||
|
||||
* Switch from socket worker/swarm worker division to a single type of worker,
|
||||
for performance reasons. Several config file keys were removed since they
|
||||
are no longer needed.
|
||||
* Index peers by packet source IP and provided port, instead of by peer_id.
|
||||
This prevents users from impersonating others and is likely also slightly
|
||||
faster for IPv4 peers.
|
||||
* Avoid a heap allocation for torrents with two or less peers. This can save
|
||||
a lot of memory if many torrents are tracked
|
||||
* Improve announce performance by avoiding having to filter response peers
|
||||
* In announce response statistics, don't include announcing peer
|
||||
* Harden ConnectionValidator to make IP spoofing even more costly
|
||||
* Remove config key `network.poll_event_capacity` (always use 1)
|
||||
* Speed up parsing and serialization of requests and responses by using
|
||||
[zerocopy](https://crates.io/crates/zerocopy)
|
||||
* Report socket worker related prometheus stats per worker
|
||||
* Remove CPU pinning support
|
||||
|
||||
#### Fixed
|
||||
|
||||
* Quit whole application if any worker thread quits
|
||||
* Disallow announce requests with port value of 0
|
||||
* Fix io_uring UB issues
|
||||
|
||||
### aquatic_http
|
||||
|
||||
#### Added
|
||||
|
||||
* Reload TLS certificate (and key) on SIGUSR1
|
||||
* Support running without TLS
|
||||
* Support running behind reverse proxy
|
||||
|
||||
#### Changed
|
||||
|
||||
* Index peers by packet source IP and provided port instead of by source ip
|
||||
and peer id. This is likely slightly faster.
|
||||
* Avoid a heap allocation for torrents with four or less peers. This can save
|
||||
a lot of memory if many torrents are tracked
|
||||
* Improve announce performance by avoiding having to filter response peers
|
||||
* In announce response statistics, don't include announcing peer
|
||||
* Remove CPU pinning support
|
||||
|
||||
#### Fixed
|
||||
|
||||
* Fix bug where clean up after closing connections wasn't always done
|
||||
* Quit whole application if any worker thread quits
|
||||
* Fix panic when sending failure response when running with metrics behind
|
||||
reverse proxy
|
||||
* Don't always close connections after sending failure response
|
||||
|
||||
### aquatic_ws
|
||||
|
||||
#### Added
|
||||
|
||||
* Add support for reporting peer client information
|
||||
* Reload TLS certificate (and key) on SIGUSR1
|
||||
* Keep track of which offers peers have sent and only allow matching answers
|
||||
|
||||
#### Changed
|
||||
|
||||
* A response is no longer generated when peers announce with AnnounceEvent::Stopped
|
||||
* Compiling with SIMD extensions enabled is no longer required, due to the
|
||||
addition of runtime detection to simd-json
|
||||
* Only consider announce and scrape responses as signs of connection still
|
||||
being alive. Previously, all messages sent to peer were considered.
|
||||
* Decrease default max_peer_age and max_connection_idle config values
|
||||
* Remove CPU pinning support
|
||||
|
||||
#### Fixed
|
||||
|
||||
* Fix memory leak
|
||||
* Fix bug where clean up after closing connections wasn't always done
|
||||
* Fix double counting of error responses
|
||||
* Actually close connections that are too slow to send responses to
|
||||
* If peers announce with AnnounceEvent::Stopped, allow them to later announce on
|
||||
same torrent with different peer_id
|
||||
* Quit whole application if any worker thread quits
|
||||
|
||||
## 0.8.0 - 2023-03-17
|
||||
|
||||
### General
|
||||
|
||||
#### Added
|
||||
|
||||
* Support exposing a Prometheus endpoint for metrics
|
||||
* Add cli flag for printing parsed config
|
||||
* Add `aquatic_http_private`, an experiment for integrating with private trackers
|
||||
|
||||
#### Changed
|
||||
|
||||
* Rename request workers to swarm workers
|
||||
* Switch to thin LTO for faster compile times
|
||||
* Use proper workspace path declarations instead of workspace patch section
|
||||
* Use [Rust 1.64 workspace inheritance](https://blog.rust-lang.org/2022/09/22/Rust-1.64.0.html)
|
||||
* Reduce space taken by ValidUntil struct from 128 to 32 bits, reducing memory
|
||||
consumption for each stored peer by same amount
|
||||
* Use regular indexmap instead of amortized-indexmap. This goes for torrent,
|
||||
peer and pending scrape response maps
|
||||
* Improve privilege dropping
|
||||
* Quit whole program if any thread panics
|
||||
* Update dependencies
|
||||
|
||||
#### Fixed
|
||||
|
||||
* Forbid unrecognized keys when parsing config files
|
||||
* Stop including invalid avx512 key in `./scripts/env-native-cpu-without-avx-512`
|
||||
|
||||
### aquatic_udp
|
||||
|
||||
#### Added
|
||||
|
||||
* Add experimental io_uring backend with higher throughput
|
||||
* Add optional response resend buffer for use on on operating systems that
|
||||
don't buffer outgoing UDP traffic
|
||||
* Add optional extended statistics (peers per torrent histogram)
|
||||
* Add Dockerfile to make it easier to get started
|
||||
|
||||
#### Changed
|
||||
|
||||
* Replace ConnectionMap with BLAKE3-based connection validator, greatly
|
||||
decreasing memory consumtion
|
||||
* Don't return any response peers if announce event is stopped
|
||||
* Ignore requests with source port value of zero
|
||||
|
||||
#### Fixed
|
||||
|
||||
* When calculating bandwidth statistics, include size of protocol headers
|
||||
|
||||
### aquatic_http
|
||||
|
||||
#### Changed
|
||||
|
||||
* Don't return any response peers if announce event is stopped
|
||||
|
||||
### aquatic_http_protocol
|
||||
|
||||
#### Fixed
|
||||
|
||||
* Explicity check for /scrape path
|
||||
* Return NeedMoreData until headers are fully parsed
|
||||
* Fix issues with ScrapeRequest::write and AnnounceRequest::write
|
||||
* Expose write and parse methods for subtypes
|
||||
|
||||
### aquatic_http_load_test
|
||||
|
||||
#### Changed
|
||||
|
||||
* Exclusively use TLS 1.3
|
||||
|
||||
### aquatic_ws
|
||||
|
||||
#### Added
|
||||
|
||||
* Add HTTP health check route when running without TLS
|
||||
|
||||
#### Changed
|
||||
|
||||
* Make TLS optional
|
||||
* Support reverse proxies
|
||||
* Reduce size of various structs
|
||||
|
||||
#### Fixed
|
||||
|
||||
* Remove peer from swarms immediately when connection is closed
|
||||
* Allow peers to use multiple peer IDs, as long as they only use one per info hash
|
||||
|
||||
### aquatic_ws_load_test
|
||||
|
||||
#### Changed
|
||||
|
||||
* Exclusively use TLS 1.3
|
3455
apps/aquatic/Cargo.lock
generated
Normal file
3455
apps/aquatic/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
60
apps/aquatic/Cargo.toml
Normal file
60
apps/aquatic/Cargo.toml
Normal file
@ -0,0 +1,60 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"crates/bencher",
|
||||
"crates/combined_binary",
|
||||
"crates/common",
|
||||
"crates/http",
|
||||
"crates/http_load_test",
|
||||
"crates/http_protocol",
|
||||
"crates/peer_id",
|
||||
"crates/toml_config",
|
||||
"crates/toml_config_derive",
|
||||
"crates/udp",
|
||||
"crates/udp_load_test",
|
||||
"crates/udp_protocol",
|
||||
"crates/ws",
|
||||
"crates/ws_load_test",
|
||||
"crates/ws_protocol",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.9.0"
|
||||
authors = ["Joakim Frostegård <joakim.frostegard@gmail.com>"]
|
||||
edition = "2021"
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/greatest-ape/aquatic"
|
||||
readme = "./README.md"
|
||||
rust-version = "1.64"
|
||||
|
||||
[workspace.dependencies]
|
||||
aquatic_common = { version = "0.9.0", path = "./crates/common" }
|
||||
aquatic_http_protocol = { version = "0.9.0", path = "./crates/http_protocol" }
|
||||
aquatic_http = { version = "0.9.0", path = "./crates/http" }
|
||||
aquatic_peer_id = { version = "0.9.0", path = "./crates/peer_id" }
|
||||
aquatic_toml_config = { version = "0.9.0", path = "./crates/toml_config" }
|
||||
aquatic_toml_config_derive = { version = "0.9.0", path = "./crates/toml_config_derive" }
|
||||
aquatic_udp_protocol = { version = "0.9.0", path = "./crates/udp_protocol" }
|
||||
aquatic_udp = { version = "0.9.0", path = "./crates/udp" }
|
||||
aquatic_udp_load_test = { version = "0.9.0", path = "./crates/udp_load_test" }
|
||||
aquatic_ws_protocol = { version = "0.9.0", path = "./crates/ws_protocol" }
|
||||
aquatic_ws = { version = "0.9.0", path = "./crates/ws" }
|
||||
|
||||
[profile.release]
|
||||
debug = false
|
||||
lto = "thin"
|
||||
opt-level = 3
|
||||
|
||||
[profile.test]
|
||||
inherits = "release-debug"
|
||||
|
||||
[profile.bench]
|
||||
inherits = "release-debug"
|
||||
|
||||
[profile.release-debug]
|
||||
inherits = "release"
|
||||
debug = true
|
||||
|
||||
[profile.test-fast]
|
||||
inherits = "release"
|
||||
lto = false
|
202
apps/aquatic/LICENSE
Normal file
202
apps/aquatic/LICENSE
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
79
apps/aquatic/README.md
Normal file
79
apps/aquatic/README.md
Normal file
@ -0,0 +1,79 @@
|
||||
# aquatic: high-performance open BitTorrent tracker
|
||||
|
||||
[](https://github.com/greatest-ape/aquatic/actions/workflows/ci.yml)
|
||||
|
||||
High-performance open BitTorrent tracker, consisting
|
||||
of sub-implementations for different protocols:
|
||||
|
||||
[aquatic_udp]: ./crates/udp
|
||||
[aquatic_http]: ./crates/http
|
||||
[aquatic_ws]: ./crates/ws
|
||||
|
||||
| Name | Protocol | OS requirements |
|
||||
|----------------|-------------------------------------------|--------------------|
|
||||
| [aquatic_udp] | BitTorrent over UDP | Unix-like |
|
||||
| [aquatic_http] | BitTorrent over HTTP, optionally over TLS | Linux 5.8 or later |
|
||||
| [aquatic_ws] | WebTorrent, optionally over TLS | Linux 5.8 or later |
|
||||
|
||||
Features at a glance:
|
||||
|
||||
- Multithreaded design for handling large amounts of traffic
|
||||
- All data is stored in-memory (no database needed)
|
||||
- IPv4 and IPv6 support
|
||||
- Supports forbidding/allowing info hashes
|
||||
- Prometheus metrics
|
||||
- Automated CI testing of full file transfers
|
||||
|
||||
Known users:
|
||||
|
||||
- [explodie.org public tracker](https://explodie.org/opentracker.html) (`udp://explodie.org:6969`), typically [serving ~100,000 requests per second](https://explodie.org/tracker-stats.html)
|
||||
- [tracker.webtorrent.dev](https://tracker.webtorrent.dev) (`wss://tracker.webtorrent.dev`)
|
||||
|
||||
## Performance of the UDP implementation
|
||||
|
||||

|
||||
|
||||
More benchmark details are available [here](./documents/aquatic-udp-load-test-2024-02-10.md).
|
||||
|
||||
## Usage
|
||||
|
||||
Please refer to the README pages for the respective implementations listed in
|
||||
the table above.
|
||||
|
||||
## Auxiliary software
|
||||
|
||||
There are also some auxiliary applications and libraries.
|
||||
|
||||
### Tracker load testing
|
||||
|
||||
Load test applications for aquatic and other trackers, useful for profiling:
|
||||
|
||||
- [aquatic_udp_load_test](./crates/udp_load_test/) - BitTorrent over UDP
|
||||
- [aquatic_http_load_test](./crates/http_load_test/) - BitTorrent over HTTP
|
||||
- [aquatic_ws_load_test](./crates/ws_load_test/) - WebTorrent
|
||||
|
||||
Automated benchmarking of aquatic and other trackers: [aquatic_bencher](./crates/bencher/)
|
||||
|
||||
### Client ⇄ tracker communication
|
||||
|
||||
Libraries for communication between clients and trackers:
|
||||
|
||||
- [aquatic_udp_protocol](./crates/udp_protocol/) - BitTorrent over UDP
|
||||
- [aquatic_http_protocol](./crates/http_protocol/) - BitTorrent over HTTP
|
||||
- [aquatic_ws_protocol](./crates/ws_protocol/) - WebTorrent
|
||||
|
||||
### Other
|
||||
|
||||
- [aquatic_peer_id](./crates/peer_id/) - extract BitTorrent client information
|
||||
from peer identifiers
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Copyright (c) Joakim Frostegård
|
||||
|
||||
Distributed under the terms of the Apache License, Version 2.0. Please refer to
|
||||
the `LICENSE` file in the repository root directory for details.
|
||||
|
||||
## Trivia
|
||||
|
||||
The tracker is called aquatic because it thrives under a torrent of bits ;-)
|
58
apps/aquatic/TODO.md
Normal file
58
apps/aquatic/TODO.md
Normal file
@ -0,0 +1,58 @@
|
||||
# TODO
|
||||
|
||||
## High priority
|
||||
|
||||
* Change network address handling to accept separate IPv4 and IPv6
|
||||
addresses. Open a socket for each one, setting ipv6_only flag on
|
||||
the IPv6 one (unless user opts out).
|
||||
* update zerocopy version (will likely require minor rewrite)
|
||||
|
||||
* udp (uring)
|
||||
* run tests under valgrind
|
||||
* hangs for integration tests, possibly related to https://bugs.kde.org/show_bug.cgi?id=463859
|
||||
* run tests with AddressSanitizer
|
||||
* `RUSTFLAGS=-Zsanitizer=address cargo +nightly test -Zbuild-std --target x86_64-unknown-linux-gnu --verbose --profile "test-fast" -p aquatic_udp --features "io-uring"`
|
||||
* build fails with `undefined reference to __asan_init`, currently unclear why
|
||||
|
||||
## Medium priority
|
||||
|
||||
* stagger cleaning tasks?
|
||||
* Run cargo-fuzz on protocol crates
|
||||
|
||||
* udp
|
||||
* support link to arbitrary homepage as well as embedded tracker URL in statistics page
|
||||
* Non-trivial dependency updates
|
||||
* toml v0.7
|
||||
* syn v2.0
|
||||
|
||||
* Run cargo-deny in CI
|
||||
|
||||
* aquatic_ws
|
||||
* Add cleaning task for ConnectionHandle.announced_info_hashes?
|
||||
|
||||
## Low priority
|
||||
|
||||
* aquatic_udp
|
||||
* udp uring
|
||||
* miri
|
||||
* thiserror?
|
||||
* CI
|
||||
* uring load test?
|
||||
|
||||
* Performance hyperoptimization (receive interrupts on correct core)
|
||||
* If there is no network card RSS support, do eBPF XDP CpuMap redirect based on packet info, to
|
||||
cpus where socket workers run. Support is work in progress in the larger Rust eBPF
|
||||
implementations, but exists in rebpf
|
||||
* Pin socket workers
|
||||
* Set SO_INCOMING_CPU (which should be fixed in very recent Linux?) to currently pinned thread
|
||||
* How does this relate to (currently unused) so_attach_reuseport_cbpf code?
|
||||
|
||||
# Not important
|
||||
|
||||
* aquatic_http:
|
||||
* consider better error type for request parsing, so that better error
|
||||
messages can be sent back (e.g., "full scrapes are not supported")
|
||||
* test torrent transfer with real clients
|
||||
* scrape: does it work (serialization etc), and with multiple hashes?
|
||||
* 'left' optional in magnet requests? Probably not. Transmission sends huge
|
||||
positive number.
|
39
apps/aquatic/crates/bencher/Cargo.toml
Normal file
39
apps/aquatic/crates/bencher/Cargo.toml
Normal file
@ -0,0 +1,39 @@
|
||||
[package]
|
||||
name = "aquatic_bencher"
|
||||
description = "Automated benchmarking of aquatic and other BitTorrent trackers (Linux only)"
|
||||
keywords = ["peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
readme = "./README.md"
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic_bencher"
|
||||
|
||||
[features]
|
||||
default = ["udp"]
|
||||
udp = ["aquatic_udp", "aquatic_udp_load_test"]
|
||||
|
||||
[dependencies]
|
||||
aquatic_udp = { optional = true, workspace = true, features = ["io-uring"] }
|
||||
aquatic_udp_load_test = { optional = true, workspace = true }
|
||||
|
||||
anyhow = "1"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
humanize-bytes = "1"
|
||||
indexmap = "2"
|
||||
indoc = "2"
|
||||
itertools = "0.14"
|
||||
num-format = "0.4"
|
||||
nonblock = "0.2"
|
||||
once_cell = "1"
|
||||
regex = "1"
|
||||
serde = "1"
|
||||
tempfile = "3"
|
||||
toml = "0.8"
|
||||
|
||||
[dev-dependencies]
|
112
apps/aquatic/crates/bencher/README.md
Normal file
112
apps/aquatic/crates/bencher/README.md
Normal file
@ -0,0 +1,112 @@
|
||||
# aquatic_bencher
|
||||
|
||||
Automated benchmarking of aquatic and other BitTorrent trackers.
|
||||
|
||||
Requires Linux 6.0 or later.
|
||||
|
||||
Currently, only UDP BitTorrent tracker support is implemented.
|
||||
|
||||
## UDP
|
||||
|
||||
| Name | Commit |
|
||||
|-------------------|-----------------------|
|
||||
| [aquatic_udp] | (use same as bencher) |
|
||||
| [opentracker] | 110868e |
|
||||
| [chihaya] | 2f79440 |
|
||||
| [torrust-tracker] | eaa86a7 |
|
||||
|
||||
The commits listed are ones known to work. It might be a good idea to first
|
||||
test with the latest commits for each project, and if they don't seem to work,
|
||||
revert to the listed commits.
|
||||
|
||||
Chihaya is known to crash under high load.
|
||||
|
||||
[aquatic_udp]: https://github.com/greatest-ape/aquatic/
|
||||
[opentracker]: http://erdgeist.org/arts/software/opentracker/
|
||||
[chihaya]: https://github.com/chihaya/chihaya
|
||||
[torrust-tracker]: https://github.com/torrust/torrust-tracker
|
||||
|
||||
### Usage
|
||||
|
||||
Install dependencies. This is done differently for different Linux
|
||||
distributions. On Debian 12, run:
|
||||
|
||||
```sh
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y curl cmake build-essential pkg-config git screen cvs zlib1g zlib1g-dev golang
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
source "$HOME/.cargo/env"
|
||||
```
|
||||
|
||||
Optionally install latest Linux kernel. On Debian 12, you can do so from backports:
|
||||
|
||||
```sh
|
||||
sudo echo "deb http://deb.debian.org/debian bookworm-backports main contrib" >> /etc/apt/sources.list
|
||||
sudo apt-get update && sudo apt-get install -y linux-image-amd64/bookworm-backports
|
||||
# You will have to restart to boot into the new kernel
|
||||
```
|
||||
|
||||
Compile aquatic_udp, aquatic_udp_load_test and aquatic_udp_bencher:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/greatest-ape/aquatic.git && cd aquatic
|
||||
# Optionally enable certain native platform optimizations
|
||||
. ./scripts/env-native-cpu-without-avx-512
|
||||
cargo build --profile "release-debug" -p aquatic_udp --features "io-uring"
|
||||
cargo build --profile "release-debug" -p aquatic_udp_load_test
|
||||
cargo build --profile "release-debug" -p aquatic_bencher --features udp
|
||||
cd ..
|
||||
```
|
||||
|
||||
Compile and install opentracker:
|
||||
|
||||
```sh
|
||||
cvs -d :pserver:cvs@cvs.fefe.de:/cvs -z9 co libowfat
|
||||
cd libowfat
|
||||
make
|
||||
cd ..
|
||||
git clone git://erdgeist.org/opentracker
|
||||
cd opentracker
|
||||
# Optionally enable native platform optimizations
|
||||
sed -i "s/^OPTS_production=-O3/OPTS_production=-O3 -march=native -mtune=native/g" Makefile
|
||||
make
|
||||
sudo cp ./opentracker /usr/local/bin/
|
||||
cd ..
|
||||
```
|
||||
|
||||
Compile and install chihaya:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/chihaya/chihaya.git
|
||||
cd chihaya
|
||||
go build ./cmd/chihaya
|
||||
sudo cp ./chihaya /usr/local/bin/
|
||||
```
|
||||
|
||||
Compile and install torrust-tracker:
|
||||
|
||||
```sh
|
||||
git clone git@github.com:torrust/torrust-tracker.git
|
||||
cd torrust-tracker
|
||||
cargo build --release
|
||||
cp ./target/release/torrust-tracker /usr/local/bin/
|
||||
```
|
||||
|
||||
You might need to raise locked memory limits:
|
||||
|
||||
```sh
|
||||
ulimit -l 65536
|
||||
```
|
||||
|
||||
Run the bencher:
|
||||
|
||||
```sh
|
||||
cd aquatic
|
||||
./target/release-debug/aquatic_bencher udp
|
||||
# or print info on command line arguments
|
||||
./target/release-debug/aquatic_bencher udp --help
|
||||
```
|
||||
|
||||
If you're running the load test on a virtual machine / virtual server, consider
|
||||
passing `--min-priority medium --cpu-mode subsequent-one-per-pair` for fairer
|
||||
results.
|
336
apps/aquatic/crates/bencher/src/common.rs
Normal file
336
apps/aquatic/crates/bencher/src/common.rs
Normal file
@ -0,0 +1,336 @@
|
||||
use std::{fmt::Display, ops::Range, thread::available_parallelism};
|
||||
|
||||
use itertools::Itertools;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, clap::ValueEnum)]
|
||||
pub enum Priority {
|
||||
Low,
|
||||
Medium,
|
||||
High,
|
||||
}
|
||||
|
||||
impl Display for Priority {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Low => f.write_str("low"),
|
||||
Self::Medium => f.write_str("medium"),
|
||||
Self::High => f.write_str("high"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TaskSetCpuList(pub Vec<TaskSetCpuIndicator>);
|
||||
|
||||
impl TaskSetCpuList {
|
||||
pub fn as_cpu_list(&self) -> String {
|
||||
let indicator = self.0.iter().map(|indicator| match indicator {
|
||||
TaskSetCpuIndicator::Single(i) => i.to_string(),
|
||||
TaskSetCpuIndicator::Range(range) => {
|
||||
format!("{}-{}", range.start, range.clone().last().unwrap())
|
||||
}
|
||||
});
|
||||
|
||||
Itertools::intersperse_with(indicator, || ",".to_string()).collect()
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
mode: CpuMode,
|
||||
direction: CpuDirection,
|
||||
requested_cpus: usize,
|
||||
) -> anyhow::Result<Self> {
|
||||
let available_parallelism: usize = available_parallelism()?.into();
|
||||
|
||||
Ok(Self::new_with_available_parallelism(
|
||||
available_parallelism,
|
||||
mode,
|
||||
direction,
|
||||
requested_cpus,
|
||||
))
|
||||
}
|
||||
|
||||
fn new_with_available_parallelism(
|
||||
available_parallelism: usize,
|
||||
mode: CpuMode,
|
||||
direction: CpuDirection,
|
||||
requested_cpus: usize,
|
||||
) -> Self {
|
||||
match direction {
|
||||
CpuDirection::Asc => match mode {
|
||||
CpuMode::Subsequent => {
|
||||
let range = 0..(available_parallelism.min(requested_cpus));
|
||||
|
||||
Self(vec![range.try_into().unwrap()])
|
||||
}
|
||||
CpuMode::SplitPairs => {
|
||||
let middle = available_parallelism / 2;
|
||||
|
||||
let range_a = 0..(middle.min(requested_cpus));
|
||||
let range_b = middle..(available_parallelism.min(middle + requested_cpus));
|
||||
|
||||
Self(vec![
|
||||
range_a.try_into().unwrap(),
|
||||
range_b.try_into().unwrap(),
|
||||
])
|
||||
}
|
||||
CpuMode::SubsequentPairs => {
|
||||
let range = 0..(available_parallelism.min(requested_cpus * 2));
|
||||
|
||||
Self(vec![range.try_into().unwrap()])
|
||||
}
|
||||
CpuMode::SubsequentOnePerPair => {
|
||||
let range = 0..(available_parallelism.min(requested_cpus * 2));
|
||||
|
||||
Self(
|
||||
range
|
||||
.chunks(2)
|
||||
.into_iter()
|
||||
.map(|mut chunk| TaskSetCpuIndicator::Single(chunk.next().unwrap()))
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
},
|
||||
CpuDirection::Desc => match mode {
|
||||
CpuMode::Subsequent => {
|
||||
let range =
|
||||
available_parallelism.saturating_sub(requested_cpus)..available_parallelism;
|
||||
|
||||
Self(vec![range.try_into().unwrap()])
|
||||
}
|
||||
CpuMode::SplitPairs => {
|
||||
let middle = available_parallelism / 2;
|
||||
|
||||
let range_a = middle.saturating_sub(requested_cpus)..middle;
|
||||
let range_b = available_parallelism
|
||||
.saturating_sub(requested_cpus)
|
||||
.max(middle)..available_parallelism;
|
||||
|
||||
Self(vec![
|
||||
range_a.try_into().unwrap(),
|
||||
range_b.try_into().unwrap(),
|
||||
])
|
||||
}
|
||||
CpuMode::SubsequentPairs => {
|
||||
let range = available_parallelism.saturating_sub(requested_cpus * 2)
|
||||
..available_parallelism;
|
||||
|
||||
Self(vec![range.try_into().unwrap()])
|
||||
}
|
||||
CpuMode::SubsequentOnePerPair => {
|
||||
let range = available_parallelism.saturating_sub(requested_cpus * 2)
|
||||
..available_parallelism;
|
||||
|
||||
Self(
|
||||
range
|
||||
.chunks(2)
|
||||
.into_iter()
|
||||
.map(|mut chunk| TaskSetCpuIndicator::Single(chunk.next().unwrap()))
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Vec<Range<usize>>> for TaskSetCpuList {
|
||||
type Error = String;
|
||||
|
||||
fn try_from(value: Vec<Range<usize>>) -> Result<Self, Self::Error> {
|
||||
let mut output = Vec::new();
|
||||
|
||||
for range in value {
|
||||
output.push(range.try_into()?);
|
||||
}
|
||||
|
||||
Ok(Self(output))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum TaskSetCpuIndicator {
|
||||
Single(usize),
|
||||
Range(Range<usize>),
|
||||
}
|
||||
|
||||
impl TryFrom<Range<usize>> for TaskSetCpuIndicator {
|
||||
type Error = String;
|
||||
|
||||
fn try_from(value: Range<usize>) -> Result<Self, Self::Error> {
|
||||
match value.len() {
|
||||
0 => Err("Empty ranges not supported".into()),
|
||||
1 => Ok(TaskSetCpuIndicator::Single(value.start)),
|
||||
_ => Ok(TaskSetCpuIndicator::Range(value)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, clap::ValueEnum)]
|
||||
pub enum CpuMode {
|
||||
/// Suitable for bare-metal machines without hyperthreads/SMT.
|
||||
///
|
||||
/// For 8 vCPU processor, uses vCPU groups 0, 1, 2, 3, 4, 5, 6 and 7
|
||||
Subsequent,
|
||||
/// Suitable for bare-metal machines with hyperthreads/SMT.
|
||||
///
|
||||
/// For 8 vCPU processor, uses vCPU groups 0 & 4, 1 & 5, 2 & 6 and 3 & 7
|
||||
SplitPairs,
|
||||
/// For 8 vCPU processor, uses vCPU groups 0 & 1, 2 & 3, 4 & 5 and 6 & 7
|
||||
SubsequentPairs,
|
||||
/// Suitable for somewhat fairly comparing trackers on Hetzner virtual
|
||||
/// machines. Since in-VM hyperthreads aren't really hyperthreads,
|
||||
/// enabling them causes unpredictable performance.
|
||||
///
|
||||
/// For 8 vCPU processor, uses vCPU groups 0, 2, 4 and 6
|
||||
SubsequentOnePerPair,
|
||||
}
|
||||
|
||||
impl Display for CpuMode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Subsequent => f.write_str("subsequent"),
|
||||
Self::SplitPairs => f.write_str("split-pairs"),
|
||||
Self::SubsequentPairs => f.write_str("subsequent-pairs"),
|
||||
Self::SubsequentOnePerPair => f.write_str("subsequent-one-per-pair"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum CpuDirection {
|
||||
Asc,
|
||||
Desc,
|
||||
}
|
||||
|
||||
pub fn simple_load_test_runs(
|
||||
cpu_mode: CpuMode,
|
||||
workers: &[(usize, Priority)],
|
||||
) -> Vec<(usize, Priority, TaskSetCpuList)> {
|
||||
workers
|
||||
.iter()
|
||||
.copied()
|
||||
.map(|(workers, priority)| {
|
||||
(
|
||||
workers,
|
||||
priority,
|
||||
TaskSetCpuList::new(cpu_mode, CpuDirection::Desc, workers).unwrap(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_task_set_cpu_list_split_pairs_asc() {
|
||||
let f = TaskSetCpuList::new_with_available_parallelism;
|
||||
|
||||
let mode = CpuMode::SplitPairs;
|
||||
let direction = CpuDirection::Asc;
|
||||
|
||||
assert_eq!(f(8, mode, direction, 1).as_cpu_list(), "0,4");
|
||||
assert_eq!(f(8, mode, direction, 2).as_cpu_list(), "0-1,4-5");
|
||||
assert_eq!(f(8, mode, direction, 4).as_cpu_list(), "0-3,4-7");
|
||||
assert_eq!(f(8, mode, direction, 8).as_cpu_list(), "0-3,4-7");
|
||||
assert_eq!(f(8, mode, direction, 9).as_cpu_list(), "0-3,4-7");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_task_set_cpu_list_split_pairs_desc() {
|
||||
let f = TaskSetCpuList::new_with_available_parallelism;
|
||||
|
||||
let mode = CpuMode::SplitPairs;
|
||||
let direction = CpuDirection::Desc;
|
||||
|
||||
assert_eq!(f(8, mode, direction, 1).as_cpu_list(), "3,7");
|
||||
assert_eq!(f(8, mode, direction, 2).as_cpu_list(), "2-3,6-7");
|
||||
assert_eq!(f(8, mode, direction, 4).as_cpu_list(), "0-3,4-7");
|
||||
assert_eq!(f(8, mode, direction, 8).as_cpu_list(), "0-3,4-7");
|
||||
assert_eq!(f(8, mode, direction, 9).as_cpu_list(), "0-3,4-7");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_task_set_cpu_list_subsequent_asc() {
|
||||
let f = TaskSetCpuList::new_with_available_parallelism;
|
||||
|
||||
let mode = CpuMode::Subsequent;
|
||||
let direction = CpuDirection::Asc;
|
||||
|
||||
assert_eq!(f(8, mode, direction, 1).as_cpu_list(), "0");
|
||||
assert_eq!(f(8, mode, direction, 2).as_cpu_list(), "0-1");
|
||||
assert_eq!(f(8, mode, direction, 4).as_cpu_list(), "0-3");
|
||||
assert_eq!(f(8, mode, direction, 8).as_cpu_list(), "0-7");
|
||||
assert_eq!(f(8, mode, direction, 9).as_cpu_list(), "0-7");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_task_set_cpu_list_subsequent_desc() {
|
||||
let f = TaskSetCpuList::new_with_available_parallelism;
|
||||
|
||||
let mode = CpuMode::Subsequent;
|
||||
let direction = CpuDirection::Desc;
|
||||
|
||||
assert_eq!(f(8, mode, direction, 1).as_cpu_list(), "7");
|
||||
assert_eq!(f(8, mode, direction, 2).as_cpu_list(), "6-7");
|
||||
assert_eq!(f(8, mode, direction, 4).as_cpu_list(), "4-7");
|
||||
assert_eq!(f(8, mode, direction, 8).as_cpu_list(), "0-7");
|
||||
assert_eq!(f(8, mode, direction, 9).as_cpu_list(), "0-7");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_task_set_cpu_list_subsequent_pairs_asc() {
|
||||
let f = TaskSetCpuList::new_with_available_parallelism;
|
||||
let mode = CpuMode::SubsequentPairs;
|
||||
let direction = CpuDirection::Asc;
|
||||
|
||||
assert_eq!(f(8, mode, direction, 1).as_cpu_list(), "0-1");
|
||||
assert_eq!(f(8, mode, direction, 2).as_cpu_list(), "0-3");
|
||||
assert_eq!(f(8, mode, direction, 4).as_cpu_list(), "0-7");
|
||||
assert_eq!(f(8, mode, direction, 8).as_cpu_list(), "0-7");
|
||||
assert_eq!(f(8, mode, direction, 9).as_cpu_list(), "0-7");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_task_set_cpu_list_subsequent_pairs_desc() {
|
||||
let f = TaskSetCpuList::new_with_available_parallelism;
|
||||
|
||||
let mode = CpuMode::SubsequentPairs;
|
||||
let direction = CpuDirection::Desc;
|
||||
|
||||
assert_eq!(f(8, mode, direction, 1).as_cpu_list(), "6-7");
|
||||
assert_eq!(f(8, mode, direction, 2).as_cpu_list(), "4-7");
|
||||
assert_eq!(f(8, mode, direction, 4).as_cpu_list(), "0-7");
|
||||
assert_eq!(f(8, mode, direction, 8).as_cpu_list(), "0-7");
|
||||
assert_eq!(f(8, mode, direction, 9).as_cpu_list(), "0-7");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_task_set_cpu_list_subsequent_one_per_pair_asc() {
|
||||
let f = TaskSetCpuList::new_with_available_parallelism;
|
||||
|
||||
let mode = CpuMode::SubsequentOnePerPair;
|
||||
let direction = CpuDirection::Asc;
|
||||
|
||||
assert_eq!(f(8, mode, direction, 1).as_cpu_list(), "0");
|
||||
assert_eq!(f(8, mode, direction, 2).as_cpu_list(), "0,2");
|
||||
assert_eq!(f(8, mode, direction, 4).as_cpu_list(), "0,2,4,6");
|
||||
assert_eq!(f(8, mode, direction, 8).as_cpu_list(), "0,2,4,6");
|
||||
assert_eq!(f(8, mode, direction, 9).as_cpu_list(), "0,2,4,6");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_task_set_cpu_list_subsequent_one_per_pair_desc() {
|
||||
let f = TaskSetCpuList::new_with_available_parallelism;
|
||||
|
||||
let mode = CpuMode::SubsequentOnePerPair;
|
||||
let direction = CpuDirection::Desc;
|
||||
|
||||
assert_eq!(f(8, mode, direction, 1).as_cpu_list(), "6");
|
||||
assert_eq!(f(8, mode, direction, 2).as_cpu_list(), "4,6");
|
||||
assert_eq!(f(8, mode, direction, 4).as_cpu_list(), "0,2,4,6");
|
||||
assert_eq!(f(8, mode, direction, 8).as_cpu_list(), "0,2,4,6");
|
||||
assert_eq!(f(8, mode, direction, 9).as_cpu_list(), "0,2,4,6");
|
||||
}
|
||||
}
|
230
apps/aquatic/crates/bencher/src/html.rs
Normal file
230
apps/aquatic/crates/bencher/src/html.rs
Normal file
@ -0,0 +1,230 @@
|
||||
use humanize_bytes::humanize_bytes_binary;
|
||||
use indexmap::{IndexMap, IndexSet};
|
||||
use indoc::formatdoc;
|
||||
use itertools::Itertools;
|
||||
use num_format::{Locale, ToFormattedString};
|
||||
|
||||
use crate::{
|
||||
run::ProcessStats,
|
||||
set::{LoadTestRunResults, TrackerCoreCountResults},
|
||||
};
|
||||
|
||||
pub fn html_best_results(results: &[TrackerCoreCountResults]) -> String {
|
||||
let mut all_implementation_names = IndexSet::new();
|
||||
|
||||
for core_count_results in results {
|
||||
all_implementation_names.extend(
|
||||
core_count_results
|
||||
.implementations
|
||||
.iter()
|
||||
.map(|r| r.name.clone()),
|
||||
);
|
||||
}
|
||||
|
||||
let mut data_rows = Vec::new();
|
||||
|
||||
for core_count_results in results {
|
||||
let best_results = core_count_results
|
||||
.implementations
|
||||
.iter()
|
||||
.map(|implementation| (implementation.name.clone(), implementation.best_result()))
|
||||
.collect::<IndexMap<_, _>>();
|
||||
|
||||
let best_results_for_all_implementations = all_implementation_names
|
||||
.iter()
|
||||
.map(|name| best_results.get(name).cloned().flatten())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let data_row = format!(
|
||||
"
|
||||
<tr>
|
||||
<th>{}</th>
|
||||
{}
|
||||
</tr>
|
||||
",
|
||||
core_count_results.core_count,
|
||||
best_results_for_all_implementations
|
||||
.into_iter()
|
||||
.map(|result| {
|
||||
if let Some(r) = result {
|
||||
format!(
|
||||
r#"<td><span title="{}, avg cpu utilization: {}%">{}</span></td>"#,
|
||||
r.tracker_info,
|
||||
r.tracker_process_stats.avg_cpu_utilization,
|
||||
r.average_responses.to_formatted_string(&Locale::en),
|
||||
)
|
||||
} else {
|
||||
"<td>-</td>".to_string()
|
||||
}
|
||||
})
|
||||
.join("\n"),
|
||||
);
|
||||
|
||||
data_rows.push(data_row);
|
||||
}
|
||||
|
||||
format!(
|
||||
"
|
||||
<h2>Best results</h2>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>CPU cores</th>
|
||||
{}
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
",
|
||||
all_implementation_names
|
||||
.iter()
|
||||
.map(|name| format!("<th>{name}</th>"))
|
||||
.join("\n"),
|
||||
data_rows.join("\n")
|
||||
)
|
||||
}
|
||||
|
||||
pub fn html_all_runs(all_results: &[TrackerCoreCountResults]) -> String {
|
||||
let mut all_implementation_names = IndexSet::new();
|
||||
|
||||
for core_count_results in all_results {
|
||||
all_implementation_names.extend(
|
||||
core_count_results
|
||||
.implementations
|
||||
.iter()
|
||||
.map(|r| r.name.clone()),
|
||||
);
|
||||
}
|
||||
|
||||
struct R {
|
||||
core_count: usize,
|
||||
avg_responses: Option<u64>,
|
||||
tracker_keys: IndexMap<String, String>,
|
||||
tracker_vcpus: String,
|
||||
tracker_stats: Option<ProcessStats>,
|
||||
load_test_keys: IndexMap<String, String>,
|
||||
load_test_vcpus: String,
|
||||
}
|
||||
|
||||
let mut output = String::new();
|
||||
|
||||
let mut results_by_implementation: IndexMap<String, Vec<R>> = Default::default();
|
||||
|
||||
for implementation_name in all_implementation_names {
|
||||
let results = results_by_implementation
|
||||
.entry(implementation_name.clone())
|
||||
.or_default();
|
||||
|
||||
let mut tracker_key_names: IndexSet<String> = Default::default();
|
||||
let mut load_test_key_names: IndexSet<String> = Default::default();
|
||||
|
||||
for r in all_results {
|
||||
for i in r
|
||||
.implementations
|
||||
.iter()
|
||||
.filter(|i| i.name == implementation_name)
|
||||
{
|
||||
for c in i.configurations.iter() {
|
||||
for l in c.load_tests.iter() {
|
||||
match l {
|
||||
LoadTestRunResults::Success(l) => {
|
||||
tracker_key_names.extend(l.tracker_keys.keys().cloned());
|
||||
load_test_key_names.extend(l.load_test_keys.keys().cloned());
|
||||
|
||||
results.push(R {
|
||||
core_count: r.core_count,
|
||||
avg_responses: Some(l.average_responses),
|
||||
tracker_keys: l.tracker_keys.clone(),
|
||||
tracker_vcpus: l.tracker_vcpus.as_cpu_list(),
|
||||
tracker_stats: Some(l.tracker_process_stats),
|
||||
load_test_keys: l.load_test_keys.clone(),
|
||||
load_test_vcpus: l.load_test_vcpus.as_cpu_list(),
|
||||
})
|
||||
}
|
||||
LoadTestRunResults::Failure(l) => {
|
||||
tracker_key_names.extend(l.tracker_keys.keys().cloned());
|
||||
load_test_key_names.extend(l.load_test_keys.keys().cloned());
|
||||
|
||||
results.push(R {
|
||||
core_count: r.core_count,
|
||||
avg_responses: None,
|
||||
tracker_keys: l.tracker_keys.clone(),
|
||||
tracker_vcpus: l.tracker_vcpus.as_cpu_list(),
|
||||
tracker_stats: None,
|
||||
load_test_keys: l.load_test_keys.clone(),
|
||||
load_test_vcpus: l.load_test_vcpus.as_cpu_list(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output.push_str(&formatdoc! {
|
||||
"
|
||||
<h2>Results for {implementation}</h2>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Cores</th>
|
||||
<th>Responses</th>
|
||||
{tracker_key_names}
|
||||
<th>Tracker avg CPU</th>
|
||||
<th>Tracker peak RSS</th>
|
||||
<th>Tracker vCPUs</th>
|
||||
{load_test_key_names}
|
||||
<th>Load test vCPUs</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{body}
|
||||
</tbody>
|
||||
</table>
|
||||
",
|
||||
implementation = implementation_name,
|
||||
tracker_key_names = tracker_key_names.iter()
|
||||
.map(|name| format!("<th>{}</th>", name))
|
||||
.join("\n"),
|
||||
load_test_key_names = load_test_key_names.iter()
|
||||
.map(|name| format!("<th>Load test {}</th>", name))
|
||||
.join("\n"),
|
||||
body = results.iter_mut().map(|r| {
|
||||
formatdoc! {
|
||||
"
|
||||
<tr>
|
||||
<td>{cores}</td>
|
||||
<td>{avg_responses}</td>
|
||||
{tracker_key_values}
|
||||
<td>{cpu}%</td>
|
||||
<td>{mem}</td>
|
||||
<td>{tracker_vcpus}</td>
|
||||
{load_test_key_values}
|
||||
<td>{load_test_vcpus}</td>
|
||||
</tr>
|
||||
",
|
||||
cores = r.core_count,
|
||||
avg_responses = r.avg_responses.map(|v| v.to_formatted_string(&Locale::en))
|
||||
.unwrap_or_else(|| "-".to_string()),
|
||||
tracker_key_values = tracker_key_names.iter().map(|name| {
|
||||
format!("<td>{}</td>", r.tracker_keys.get(name).cloned().unwrap_or_else(|| "-".to_string()))
|
||||
}).join("\n"),
|
||||
cpu = r.tracker_stats.map(|stats| stats.avg_cpu_utilization.to_string())
|
||||
.unwrap_or_else(|| "-".to_string()),
|
||||
mem = r.tracker_stats
|
||||
.map(|stats| humanize_bytes_binary!(stats.peak_rss_bytes).to_string())
|
||||
.unwrap_or_else(|| "-".to_string()),
|
||||
tracker_vcpus = r.tracker_vcpus,
|
||||
load_test_key_values = load_test_key_names.iter().map(|name| {
|
||||
format!("<td>{}</td>", r.load_test_keys.get(name).cloned().unwrap_or_else(|| "-".to_string()))
|
||||
}).join("\n"),
|
||||
load_test_vcpus = r.load_test_vcpus,
|
||||
}
|
||||
}).join("\n")
|
||||
});
|
||||
}
|
||||
|
||||
output
|
||||
}
|
71
apps/aquatic/crates/bencher/src/main.rs
Normal file
71
apps/aquatic/crates/bencher/src/main.rs
Normal file
@ -0,0 +1,71 @@
|
||||
pub mod common;
|
||||
pub mod html;
|
||||
pub mod protocols;
|
||||
pub mod run;
|
||||
pub mod set;
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
use common::{CpuMode, Priority};
|
||||
use set::run_sets;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(author, version, about)]
|
||||
struct Args {
|
||||
/// How to choose which virtual CPUs to allow trackers and load test
|
||||
/// executables on
|
||||
#[arg(long, default_value_t = CpuMode::SplitPairs)]
|
||||
cpu_mode: CpuMode,
|
||||
/// Minimum number of tracker cpu cores to run benchmarks for
|
||||
#[arg(long)]
|
||||
min_cores: Option<usize>,
|
||||
/// Maximum number of tracker cpu cores to run benchmarks for
|
||||
#[arg(long)]
|
||||
max_cores: Option<usize>,
|
||||
/// Minimum benchmark priority
|
||||
#[arg(long, default_value_t = Priority::Medium)]
|
||||
min_priority: Priority,
|
||||
/// How long to run each load test for
|
||||
#[arg(long, default_value_t = 30)]
|
||||
duration: usize,
|
||||
/// Only include data for last N seconds of load test runs.
|
||||
///
|
||||
/// Useful if the tracker/load tester combination is slow at reaching
|
||||
/// maximum throughput
|
||||
///
|
||||
/// 0 = use data for whole run
|
||||
#[arg(long, default_value_t = 0)]
|
||||
summarize_last: usize,
|
||||
#[command(subcommand)]
|
||||
command: Command,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum Command {
|
||||
/// Benchmark UDP BitTorrent trackers aquatic_udp, opentracker, chihaya and torrust-tracker
|
||||
#[cfg(feature = "udp")]
|
||||
Udp(protocols::udp::UdpCommand),
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args = Args::parse();
|
||||
|
||||
match args.command {
|
||||
#[cfg(feature = "udp")]
|
||||
Command::Udp(command) => {
|
||||
let sets = command.sets(args.cpu_mode);
|
||||
let load_test_gen = protocols::udp::UdpCommand::load_test_gen;
|
||||
|
||||
run_sets(
|
||||
&command,
|
||||
args.cpu_mode,
|
||||
args.min_cores,
|
||||
args.max_cores,
|
||||
args.min_priority,
|
||||
args.duration,
|
||||
args.summarize_last,
|
||||
sets,
|
||||
load_test_gen,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
2
apps/aquatic/crates/bencher/src/protocols/mod.rs
Normal file
2
apps/aquatic/crates/bencher/src/protocols/mod.rs
Normal file
@ -0,0 +1,2 @@
|
||||
#[cfg(feature = "udp")]
|
||||
pub mod udp;
|
558
apps/aquatic/crates/bencher/src/protocols/udp.rs
Normal file
558
apps/aquatic/crates/bencher/src/protocols/udp.rs
Normal file
@ -0,0 +1,558 @@
|
||||
use std::{
|
||||
io::Write,
|
||||
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
|
||||
path::PathBuf,
|
||||
process::{Child, Command, Stdio},
|
||||
rc::Rc,
|
||||
};
|
||||
|
||||
use clap::Parser;
|
||||
use indexmap::{indexmap, IndexMap};
|
||||
use indoc::writedoc;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
use crate::{
|
||||
common::{simple_load_test_runs, CpuMode, Priority, TaskSetCpuList},
|
||||
run::ProcessRunner,
|
||||
set::{LoadTestRunnerParameters, SetConfig, Tracker},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum UdpTracker {
|
||||
Aquatic,
|
||||
AquaticIoUring,
|
||||
OpenTracker,
|
||||
Chihaya,
|
||||
TorrustTracker,
|
||||
}
|
||||
|
||||
impl Tracker for UdpTracker {
|
||||
fn name(&self) -> String {
|
||||
match self {
|
||||
Self::Aquatic => "aquatic_udp".into(),
|
||||
Self::AquaticIoUring => "aquatic_udp (io_uring)".into(),
|
||||
Self::OpenTracker => "opentracker".into(),
|
||||
Self::Chihaya => "chihaya".into(),
|
||||
Self::TorrustTracker => "torrust-tracker".into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
pub struct UdpCommand {
|
||||
/// Path to aquatic_udp_load_test binary
|
||||
#[arg(long, default_value = "./target/release-debug/aquatic_udp_load_test")]
|
||||
load_test: PathBuf,
|
||||
/// Path to aquatic_udp binary
|
||||
#[arg(long, default_value = "./target/release-debug/aquatic_udp")]
|
||||
aquatic: PathBuf,
|
||||
/// Path to opentracker binary
|
||||
#[arg(long, default_value = "opentracker")]
|
||||
opentracker: PathBuf,
|
||||
/// Path to chihaya binary
|
||||
#[arg(long, default_value = "chihaya")]
|
||||
chihaya: PathBuf,
|
||||
/// Path to torrust-tracker binary
|
||||
#[arg(long, default_value = "torrust-tracker")]
|
||||
torrust_tracker: PathBuf,
|
||||
}
|
||||
|
||||
impl UdpCommand {
|
||||
pub fn sets(&self, cpu_mode: CpuMode) -> IndexMap<usize, SetConfig<UdpCommand, UdpTracker>> {
|
||||
// Priorities are based on what has previously produced the best results
|
||||
indexmap::indexmap! {
|
||||
1 => SetConfig {
|
||||
implementations: indexmap! {
|
||||
UdpTracker::Aquatic => vec![
|
||||
AquaticUdpRunner::with_mio(1, Priority::High),
|
||||
// Allow running two workers per core for aquatic and
|
||||
// opentracker. Skip this priority if testing on a
|
||||
// virtual machine
|
||||
AquaticUdpRunner::with_mio(2, Priority::Low),
|
||||
],
|
||||
UdpTracker::AquaticIoUring => vec![
|
||||
AquaticUdpRunner::with_io_uring(1, Priority::High),
|
||||
AquaticUdpRunner::with_io_uring(2, Priority::Low),
|
||||
],
|
||||
UdpTracker::OpenTracker => vec![
|
||||
OpenTrackerUdpRunner::new(0, Priority::Medium), // Handle requests within event loop
|
||||
OpenTrackerUdpRunner::new(1, Priority::High),
|
||||
OpenTrackerUdpRunner::new(2, Priority::Low),
|
||||
],
|
||||
UdpTracker::Chihaya => vec![
|
||||
ChihayaUdpRunner::new(),
|
||||
],
|
||||
UdpTracker::TorrustTracker => vec![
|
||||
TorrustTrackerUdpRunner::new(),
|
||||
],
|
||||
},
|
||||
load_test_runs: simple_load_test_runs(cpu_mode, &[
|
||||
(8, Priority::Medium),
|
||||
(12, Priority::High)
|
||||
]),
|
||||
},
|
||||
2 => SetConfig {
|
||||
implementations: indexmap! {
|
||||
UdpTracker::Aquatic => vec![
|
||||
AquaticUdpRunner::with_mio(2, Priority::High),
|
||||
AquaticUdpRunner::with_mio(4, Priority::Low),
|
||||
],
|
||||
UdpTracker::AquaticIoUring => vec![
|
||||
AquaticUdpRunner::with_io_uring(2, Priority::High),
|
||||
AquaticUdpRunner::with_io_uring(4, Priority::Low),
|
||||
],
|
||||
UdpTracker::OpenTracker => vec![
|
||||
OpenTrackerUdpRunner::new(2, Priority::High),
|
||||
OpenTrackerUdpRunner::new(4, Priority::Low),
|
||||
],
|
||||
UdpTracker::Chihaya => vec![
|
||||
ChihayaUdpRunner::new(),
|
||||
],
|
||||
UdpTracker::TorrustTracker => vec![
|
||||
TorrustTrackerUdpRunner::new(),
|
||||
],
|
||||
},
|
||||
load_test_runs: simple_load_test_runs(cpu_mode, &[
|
||||
(8, Priority::Medium),
|
||||
(12, Priority::High),
|
||||
]),
|
||||
},
|
||||
4 => SetConfig {
|
||||
implementations: indexmap! {
|
||||
UdpTracker::Aquatic => vec![
|
||||
AquaticUdpRunner::with_mio(4, Priority::High),
|
||||
AquaticUdpRunner::with_mio(8, Priority::Low),
|
||||
],
|
||||
UdpTracker::AquaticIoUring => vec![
|
||||
AquaticUdpRunner::with_io_uring(4, Priority::High),
|
||||
AquaticUdpRunner::with_io_uring(8, Priority::Low),
|
||||
],
|
||||
UdpTracker::OpenTracker => vec![
|
||||
OpenTrackerUdpRunner::new(4, Priority::High),
|
||||
OpenTrackerUdpRunner::new(8, Priority::Low),
|
||||
],
|
||||
UdpTracker::Chihaya => vec![
|
||||
ChihayaUdpRunner::new(),
|
||||
],
|
||||
UdpTracker::TorrustTracker => vec![
|
||||
TorrustTrackerUdpRunner::new(),
|
||||
],
|
||||
},
|
||||
load_test_runs: simple_load_test_runs(cpu_mode, &[
|
||||
(8, Priority::Medium),
|
||||
(12, Priority::High),
|
||||
]),
|
||||
},
|
||||
6 => SetConfig {
|
||||
implementations: indexmap! {
|
||||
UdpTracker::Aquatic => vec![
|
||||
AquaticUdpRunner::with_mio(6, Priority::High),
|
||||
AquaticUdpRunner::with_mio(12, Priority::Low),
|
||||
],
|
||||
UdpTracker::AquaticIoUring => vec![
|
||||
AquaticUdpRunner::with_io_uring(6, Priority::High),
|
||||
AquaticUdpRunner::with_io_uring(12, Priority::Low),
|
||||
],
|
||||
UdpTracker::OpenTracker => vec![
|
||||
OpenTrackerUdpRunner::new(6, Priority::High),
|
||||
OpenTrackerUdpRunner::new(12, Priority::Low),
|
||||
],
|
||||
UdpTracker::Chihaya => vec![
|
||||
ChihayaUdpRunner::new(),
|
||||
],
|
||||
UdpTracker::TorrustTracker => vec![
|
||||
TorrustTrackerUdpRunner::new(),
|
||||
],
|
||||
},
|
||||
load_test_runs: simple_load_test_runs(cpu_mode, &[
|
||||
(8, Priority::Medium),
|
||||
(12, Priority::High),
|
||||
]),
|
||||
},
|
||||
8 => SetConfig {
|
||||
implementations: indexmap! {
|
||||
UdpTracker::Aquatic => vec![
|
||||
AquaticUdpRunner::with_mio(8, Priority::High),
|
||||
AquaticUdpRunner::with_mio(16, Priority::Low),
|
||||
],
|
||||
UdpTracker::AquaticIoUring => vec![
|
||||
AquaticUdpRunner::with_io_uring(8, Priority::High),
|
||||
AquaticUdpRunner::with_io_uring(16, Priority::Low),
|
||||
],
|
||||
UdpTracker::OpenTracker => vec![
|
||||
OpenTrackerUdpRunner::new(8, Priority::High),
|
||||
OpenTrackerUdpRunner::new(16, Priority::Low),
|
||||
],
|
||||
UdpTracker::Chihaya => vec![
|
||||
ChihayaUdpRunner::new(),
|
||||
],
|
||||
UdpTracker::TorrustTracker => vec![
|
||||
TorrustTrackerUdpRunner::new(),
|
||||
],
|
||||
},
|
||||
load_test_runs: simple_load_test_runs(cpu_mode, &[
|
||||
(8, Priority::Medium),
|
||||
(12, Priority::High),
|
||||
]),
|
||||
},
|
||||
12 => SetConfig {
|
||||
implementations: indexmap! {
|
||||
UdpTracker::Aquatic => vec![
|
||||
AquaticUdpRunner::with_mio(12, Priority::High),
|
||||
AquaticUdpRunner::with_mio(24, Priority::Low),
|
||||
],
|
||||
UdpTracker::AquaticIoUring => vec![
|
||||
AquaticUdpRunner::with_io_uring(12, Priority::High),
|
||||
AquaticUdpRunner::with_io_uring(24, Priority::Low),
|
||||
],
|
||||
UdpTracker::OpenTracker => vec![
|
||||
OpenTrackerUdpRunner::new(12, Priority::High),
|
||||
OpenTrackerUdpRunner::new(24, Priority::Low),
|
||||
],
|
||||
UdpTracker::Chihaya => vec![
|
||||
ChihayaUdpRunner::new(),
|
||||
],
|
||||
UdpTracker::TorrustTracker => vec![
|
||||
TorrustTrackerUdpRunner::new(),
|
||||
],
|
||||
},
|
||||
load_test_runs: simple_load_test_runs(cpu_mode, &[
|
||||
(8, Priority::Medium),
|
||||
(12, Priority::High),
|
||||
]),
|
||||
},
|
||||
16 => SetConfig {
|
||||
implementations: indexmap! {
|
||||
UdpTracker::Aquatic => vec![
|
||||
AquaticUdpRunner::with_mio(16, Priority::High),
|
||||
AquaticUdpRunner::with_mio(32, Priority::Low),
|
||||
],
|
||||
UdpTracker::AquaticIoUring => vec![
|
||||
AquaticUdpRunner::with_io_uring(16, Priority::High),
|
||||
AquaticUdpRunner::with_io_uring(32, Priority::Low),
|
||||
],
|
||||
UdpTracker::OpenTracker => vec![
|
||||
OpenTrackerUdpRunner::new(16, Priority::High),
|
||||
OpenTrackerUdpRunner::new(32, Priority::Low),
|
||||
],
|
||||
UdpTracker::Chihaya => vec![
|
||||
ChihayaUdpRunner::new(),
|
||||
],
|
||||
UdpTracker::TorrustTracker => vec![
|
||||
TorrustTrackerUdpRunner::new(),
|
||||
],
|
||||
},
|
||||
load_test_runs: simple_load_test_runs(cpu_mode, &[
|
||||
(8, Priority::High),
|
||||
(12, Priority::High),
|
||||
]),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_test_gen(
|
||||
parameters: LoadTestRunnerParameters,
|
||||
) -> Box<dyn ProcessRunner<Command = UdpCommand>> {
|
||||
Box::new(AquaticUdpLoadTestRunner { parameters })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct AquaticUdpRunner {
|
||||
socket_workers: usize,
|
||||
use_io_uring: bool,
|
||||
priority: Priority,
|
||||
}
|
||||
|
||||
impl AquaticUdpRunner {
|
||||
fn with_mio(
|
||||
socket_workers: usize,
|
||||
priority: Priority,
|
||||
) -> Rc<dyn ProcessRunner<Command = UdpCommand>> {
|
||||
Rc::new(Self {
|
||||
socket_workers,
|
||||
use_io_uring: false,
|
||||
priority,
|
||||
})
|
||||
}
|
||||
fn with_io_uring(
|
||||
socket_workers: usize,
|
||||
priority: Priority,
|
||||
) -> Rc<dyn ProcessRunner<Command = UdpCommand>> {
|
||||
Rc::new(Self {
|
||||
socket_workers,
|
||||
use_io_uring: true,
|
||||
priority,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessRunner for AquaticUdpRunner {
|
||||
type Command = UdpCommand;
|
||||
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
fn run(
|
||||
&self,
|
||||
command: &Self::Command,
|
||||
vcpus: &TaskSetCpuList,
|
||||
tmp_file: &mut NamedTempFile,
|
||||
) -> anyhow::Result<Child> {
|
||||
let mut c = aquatic_udp::config::Config::default();
|
||||
|
||||
c.socket_workers = self.socket_workers;
|
||||
c.network.address_ipv4 = SocketAddrV4::new(Ipv4Addr::LOCALHOST, 3000);
|
||||
c.network.use_ipv6 = false;
|
||||
c.network.use_io_uring = self.use_io_uring;
|
||||
c.protocol.max_response_peers = 30;
|
||||
|
||||
let c = toml::to_string_pretty(&c)?;
|
||||
|
||||
tmp_file.write_all(c.as_bytes())?;
|
||||
|
||||
Ok(Command::new("taskset")
|
||||
.arg("--cpu-list")
|
||||
.arg(vcpus.as_cpu_list())
|
||||
.arg(&command.aquatic)
|
||||
.arg("-c")
|
||||
.arg(tmp_file.path())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()?)
|
||||
}
|
||||
|
||||
fn priority(&self) -> crate::common::Priority {
|
||||
self.priority
|
||||
}
|
||||
|
||||
fn keys(&self) -> IndexMap<String, String> {
|
||||
indexmap! {
|
||||
"socket workers".to_string() => self.socket_workers.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct OpenTrackerUdpRunner {
|
||||
workers: usize,
|
||||
priority: Priority,
|
||||
}
|
||||
|
||||
impl OpenTrackerUdpRunner {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
fn new(workers: usize, priority: Priority) -> Rc<dyn ProcessRunner<Command = UdpCommand>> {
|
||||
Rc::new(Self { workers, priority })
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessRunner for OpenTrackerUdpRunner {
|
||||
type Command = UdpCommand;
|
||||
|
||||
fn run(
|
||||
&self,
|
||||
command: &Self::Command,
|
||||
vcpus: &TaskSetCpuList,
|
||||
tmp_file: &mut NamedTempFile,
|
||||
) -> anyhow::Result<Child> {
|
||||
writeln!(
|
||||
tmp_file,
|
||||
"listen.udp.workers {}\nlisten.udp 127.0.0.1:3000",
|
||||
self.workers
|
||||
)?;
|
||||
|
||||
Ok(Command::new("taskset")
|
||||
.arg("--cpu-list")
|
||||
.arg(vcpus.as_cpu_list())
|
||||
.arg(&command.opentracker)
|
||||
.arg("-f")
|
||||
.arg(tmp_file.path())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()?)
|
||||
}
|
||||
|
||||
fn priority(&self) -> crate::common::Priority {
|
||||
self.priority
|
||||
}
|
||||
|
||||
fn keys(&self) -> IndexMap<String, String> {
|
||||
indexmap! {
|
||||
"workers".to_string() => self.workers.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct ChihayaUdpRunner;
|
||||
|
||||
impl ChihayaUdpRunner {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
fn new() -> Rc<dyn ProcessRunner<Command = UdpCommand>> {
|
||||
Rc::new(Self {})
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessRunner for ChihayaUdpRunner {
|
||||
type Command = UdpCommand;
|
||||
|
||||
fn run(
|
||||
&self,
|
||||
command: &Self::Command,
|
||||
vcpus: &TaskSetCpuList,
|
||||
tmp_file: &mut NamedTempFile,
|
||||
) -> anyhow::Result<Child> {
|
||||
writedoc!(
|
||||
tmp_file,
|
||||
r#"
|
||||
---
|
||||
chihaya:
|
||||
metrics_addr: "127.0.0.1:0"
|
||||
udp:
|
||||
addr: "127.0.0.1:3000"
|
||||
private_key: "abcdefghijklmnopqrst"
|
||||
max_numwant: 30
|
||||
default_numwant: 30
|
||||
storage:
|
||||
name: "memory"
|
||||
"#,
|
||||
)?;
|
||||
|
||||
Ok(Command::new("taskset")
|
||||
.arg("--cpu-list")
|
||||
.arg(vcpus.as_cpu_list())
|
||||
.arg(&command.chihaya)
|
||||
.arg("--config")
|
||||
.arg(tmp_file.path())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()?)
|
||||
}
|
||||
|
||||
fn priority(&self) -> crate::common::Priority {
|
||||
Priority::High
|
||||
}
|
||||
|
||||
fn keys(&self) -> IndexMap<String, String> {
|
||||
Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct TorrustTrackerUdpRunner;
|
||||
|
||||
impl TorrustTrackerUdpRunner {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
fn new() -> Rc<dyn ProcessRunner<Command = UdpCommand>> {
|
||||
Rc::new(Self {})
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessRunner for TorrustTrackerUdpRunner {
|
||||
type Command = UdpCommand;
|
||||
|
||||
fn run(
|
||||
&self,
|
||||
command: &Self::Command,
|
||||
vcpus: &TaskSetCpuList,
|
||||
tmp_file: &mut NamedTempFile,
|
||||
) -> anyhow::Result<Child> {
|
||||
writedoc!(
|
||||
tmp_file,
|
||||
r#"
|
||||
[metadata]
|
||||
schema_version = "2.0.0"
|
||||
|
||||
[logging]
|
||||
threshold = "error"
|
||||
|
||||
[core]
|
||||
listed = false
|
||||
private = false
|
||||
tracker_usage_statistics = false
|
||||
|
||||
[core.database]
|
||||
driver = "sqlite3"
|
||||
path = "./sqlite3.db"
|
||||
|
||||
[core.tracker_policy]
|
||||
persistent_torrent_completed_stat = false
|
||||
remove_peerless_torrents = false
|
||||
|
||||
[[udp_trackers]]
|
||||
bind_address = "0.0.0.0:3000"
|
||||
"#,
|
||||
)?;
|
||||
|
||||
Ok(Command::new("taskset")
|
||||
.arg("--cpu-list")
|
||||
.arg(vcpus.as_cpu_list())
|
||||
.env("TORRUST_TRACKER_CONFIG_TOML_PATH", tmp_file.path())
|
||||
.arg(&command.torrust_tracker)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()?)
|
||||
}
|
||||
|
||||
fn priority(&self) -> crate::common::Priority {
|
||||
Priority::High
|
||||
}
|
||||
|
||||
fn keys(&self) -> IndexMap<String, String> {
|
||||
Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct AquaticUdpLoadTestRunner {
|
||||
parameters: LoadTestRunnerParameters,
|
||||
}
|
||||
|
||||
impl ProcessRunner for AquaticUdpLoadTestRunner {
|
||||
type Command = UdpCommand;
|
||||
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
fn run(
|
||||
&self,
|
||||
command: &Self::Command,
|
||||
vcpus: &TaskSetCpuList,
|
||||
tmp_file: &mut NamedTempFile,
|
||||
) -> anyhow::Result<Child> {
|
||||
let mut c = aquatic_udp_load_test::config::Config::default();
|
||||
|
||||
c.workers = self.parameters.workers as u8;
|
||||
c.duration = self.parameters.duration;
|
||||
c.summarize_last = self.parameters.summarize_last;
|
||||
|
||||
c.extra_statistics = false;
|
||||
|
||||
c.requests.announce_peers_wanted = 30;
|
||||
c.requests.weight_connect = 0;
|
||||
c.requests.weight_announce = 100;
|
||||
c.requests.weight_scrape = 1;
|
||||
|
||||
let c = toml::to_string_pretty(&c)?;
|
||||
|
||||
tmp_file.write_all(c.as_bytes())?;
|
||||
|
||||
Ok(Command::new("taskset")
|
||||
.arg("--cpu-list")
|
||||
.arg(vcpus.as_cpu_list())
|
||||
.arg(&command.load_test)
|
||||
.arg("-c")
|
||||
.arg(tmp_file.path())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()?)
|
||||
}
|
||||
|
||||
fn priority(&self) -> crate::common::Priority {
|
||||
eprintln!("load test runner priority method called");
|
||||
|
||||
Priority::High
|
||||
}
|
||||
|
||||
fn keys(&self) -> IndexMap<String, String> {
|
||||
indexmap! {
|
||||
"workers".to_string() => self.parameters.workers.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
374
apps/aquatic/crates/bencher/src/run.rs
Normal file
374
apps/aquatic/crates/bencher/src/run.rs
Normal file
@ -0,0 +1,374 @@
|
||||
use std::{
|
||||
process::{Child, Command},
|
||||
rc::Rc,
|
||||
str::FromStr,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use indexmap::IndexMap;
|
||||
use itertools::Itertools;
|
||||
use nonblock::NonBlockingReader;
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
use crate::common::{Priority, TaskSetCpuList};
|
||||
|
||||
pub trait ProcessRunner: ::std::fmt::Debug {
|
||||
type Command;
|
||||
|
||||
fn run(
|
||||
&self,
|
||||
command: &Self::Command,
|
||||
vcpus: &TaskSetCpuList,
|
||||
tmp_file: &mut NamedTempFile,
|
||||
) -> anyhow::Result<Child>;
|
||||
|
||||
fn keys(&self) -> IndexMap<String, String>;
|
||||
|
||||
fn priority(&self) -> Priority;
|
||||
|
||||
fn info(&self) -> String {
|
||||
self.keys()
|
||||
.into_iter()
|
||||
.map(|(k, v)| format!("{}: {}", k, v))
|
||||
.join(", ")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RunConfig<C> {
|
||||
pub tracker_runner: Rc<dyn ProcessRunner<Command = C>>,
|
||||
pub tracker_vcpus: TaskSetCpuList,
|
||||
pub load_test_runner: Box<dyn ProcessRunner<Command = C>>,
|
||||
pub load_test_vcpus: TaskSetCpuList,
|
||||
}
|
||||
|
||||
impl<C> RunConfig<C> {
|
||||
pub fn run(
|
||||
self,
|
||||
command: &C,
|
||||
duration: usize,
|
||||
) -> Result<RunSuccessResults, RunErrorResults<C>> {
|
||||
let mut tracker_config_file = NamedTempFile::new().unwrap();
|
||||
let mut load_test_config_file = NamedTempFile::new().unwrap();
|
||||
|
||||
let mut tracker =
|
||||
match self
|
||||
.tracker_runner
|
||||
.run(command, &self.tracker_vcpus, &mut tracker_config_file)
|
||||
{
|
||||
Ok(handle) => ChildWrapper(handle),
|
||||
Err(err) => return Err(RunErrorResults::new(self).set_error(err, "run tracker")),
|
||||
};
|
||||
|
||||
::std::thread::sleep(Duration::from_secs(1));
|
||||
|
||||
let mut load_tester = match self.load_test_runner.run(
|
||||
command,
|
||||
&self.load_test_vcpus,
|
||||
&mut load_test_config_file,
|
||||
) {
|
||||
Ok(handle) => ChildWrapper(handle),
|
||||
Err(err) => {
|
||||
return Err(RunErrorResults::new(self)
|
||||
.set_error(err, "run load test")
|
||||
.set_tracker_outputs(tracker))
|
||||
}
|
||||
};
|
||||
|
||||
for _ in 0..(duration - 1) {
|
||||
if let Ok(Some(status)) = tracker.0.try_wait() {
|
||||
return Err(RunErrorResults::new(self)
|
||||
.set_tracker_outputs(tracker)
|
||||
.set_load_test_outputs(load_tester)
|
||||
.set_error_context(&format!("tracker exited with {}", status)));
|
||||
}
|
||||
|
||||
::std::thread::sleep(Duration::from_secs(1));
|
||||
}
|
||||
|
||||
// Note: a more advanced version tracking threads too would add argument
|
||||
// "-L" and add "comm" to output format list
|
||||
let tracker_process_stats_res = Command::new("ps")
|
||||
.arg("-p")
|
||||
.arg(tracker.0.id().to_string())
|
||||
.arg("-o")
|
||||
.arg("%cpu,rss")
|
||||
.arg("--noheader")
|
||||
.output();
|
||||
|
||||
let tracker_process_stats = match tracker_process_stats_res {
|
||||
Ok(output) if output.status.success() => {
|
||||
ProcessStats::from_str(&String::from_utf8_lossy(&output.stdout)).unwrap()
|
||||
}
|
||||
Ok(_) => {
|
||||
return Err(RunErrorResults::new(self)
|
||||
.set_error_context("run ps")
|
||||
.set_tracker_outputs(tracker)
|
||||
.set_load_test_outputs(load_tester));
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(RunErrorResults::new(self)
|
||||
.set_error(err.into(), "run ps")
|
||||
.set_tracker_outputs(tracker)
|
||||
.set_load_test_outputs(load_tester));
|
||||
}
|
||||
};
|
||||
|
||||
::std::thread::sleep(Duration::from_secs(5));
|
||||
|
||||
let (load_test_stdout, load_test_stderr) = match load_tester.0.try_wait() {
|
||||
Ok(Some(status)) if status.success() => read_child_outputs(load_tester),
|
||||
Ok(Some(_)) => {
|
||||
return Err(RunErrorResults::new(self)
|
||||
.set_error_context("wait for load tester")
|
||||
.set_tracker_outputs(tracker)
|
||||
.set_load_test_outputs(load_tester))
|
||||
}
|
||||
Ok(None) => {
|
||||
if let Err(err) = load_tester.0.kill() {
|
||||
return Err(RunErrorResults::new(self)
|
||||
.set_error(err.into(), "kill load tester")
|
||||
.set_tracker_outputs(tracker)
|
||||
.set_load_test_outputs(load_tester));
|
||||
}
|
||||
|
||||
::std::thread::sleep(Duration::from_secs(1));
|
||||
|
||||
match load_tester.0.try_wait() {
|
||||
Ok(_) => {
|
||||
return Err(RunErrorResults::new(self)
|
||||
.set_error_context("load tester didn't finish in time")
|
||||
.set_load_test_outputs(load_tester))
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(RunErrorResults::new(self)
|
||||
.set_error(err.into(), "wait for load tester after kill")
|
||||
.set_tracker_outputs(tracker));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(RunErrorResults::new(self)
|
||||
.set_error(err.into(), "wait for load tester")
|
||||
.set_tracker_outputs(tracker)
|
||||
.set_load_test_outputs(load_tester))
|
||||
}
|
||||
};
|
||||
|
||||
let load_test_stdout = if let Some(load_test_stdout) = load_test_stdout {
|
||||
load_test_stdout
|
||||
} else {
|
||||
return Err(RunErrorResults::new(self)
|
||||
.set_error_context("couldn't read load tester stdout")
|
||||
.set_tracker_outputs(tracker)
|
||||
.set_load_test_stderr(load_test_stderr));
|
||||
};
|
||||
|
||||
let avg_responses = {
|
||||
static RE: Lazy<Regex> =
|
||||
Lazy::new(|| Regex::new(r"Average responses per second: ([0-9]+)").unwrap());
|
||||
|
||||
let opt_avg_responses = RE
|
||||
.captures_iter(&load_test_stdout)
|
||||
.next()
|
||||
.map(|c| {
|
||||
let (_, [avg_responses]) = c.extract();
|
||||
|
||||
avg_responses.to_string()
|
||||
})
|
||||
.and_then(|v| v.parse::<u64>().ok());
|
||||
|
||||
if let Some(avg_responses) = opt_avg_responses {
|
||||
avg_responses
|
||||
} else {
|
||||
return Err(RunErrorResults::new(self)
|
||||
.set_error_context("couldn't extract avg_responses")
|
||||
.set_tracker_outputs(tracker)
|
||||
.set_load_test_stdout(Some(load_test_stdout))
|
||||
.set_load_test_stderr(load_test_stderr));
|
||||
}
|
||||
};
|
||||
|
||||
let results = RunSuccessResults {
|
||||
tracker_process_stats,
|
||||
avg_responses,
|
||||
};
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RunSuccessResults {
|
||||
pub tracker_process_stats: ProcessStats,
|
||||
pub avg_responses: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RunErrorResults<C> {
|
||||
pub run_config: RunConfig<C>,
|
||||
pub tracker_stdout: Option<String>,
|
||||
pub tracker_stderr: Option<String>,
|
||||
pub load_test_stdout: Option<String>,
|
||||
pub load_test_stderr: Option<String>,
|
||||
pub error: Option<anyhow::Error>,
|
||||
pub error_context: Option<String>,
|
||||
}
|
||||
|
||||
impl<C> RunErrorResults<C> {
|
||||
fn new(run_config: RunConfig<C>) -> Self {
|
||||
Self {
|
||||
run_config,
|
||||
tracker_stdout: Default::default(),
|
||||
tracker_stderr: Default::default(),
|
||||
load_test_stdout: Default::default(),
|
||||
load_test_stderr: Default::default(),
|
||||
error: Default::default(),
|
||||
error_context: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn set_tracker_outputs(mut self, tracker: ChildWrapper) -> Self {
|
||||
let (stdout, stderr) = read_child_outputs(tracker);
|
||||
|
||||
self.tracker_stdout = stdout;
|
||||
self.tracker_stderr = stderr;
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
fn set_load_test_outputs(mut self, load_test: ChildWrapper) -> Self {
|
||||
let (stdout, stderr) = read_child_outputs(load_test);
|
||||
|
||||
self.load_test_stdout = stdout;
|
||||
self.load_test_stderr = stderr;
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
fn set_load_test_stdout(mut self, stdout: Option<String>) -> Self {
|
||||
self.load_test_stdout = stdout;
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
fn set_load_test_stderr(mut self, stderr: Option<String>) -> Self {
|
||||
self.load_test_stderr = stderr;
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
fn set_error(mut self, error: anyhow::Error, context: &str) -> Self {
|
||||
self.error = Some(error);
|
||||
self.error_context = Some(context.to_string());
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
fn set_error_context(mut self, context: &str) -> Self {
|
||||
self.error_context = Some(context.to_string());
|
||||
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<C> std::fmt::Display for RunErrorResults<C> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
if let Some(t) = self.error_context.as_ref() {
|
||||
writeln!(f, "- {}", t)?;
|
||||
}
|
||||
if let Some(err) = self.error.as_ref() {
|
||||
writeln!(f, "- {:#}", err)?;
|
||||
}
|
||||
|
||||
writeln!(f, "- tracker_runner: {:?}", self.run_config.tracker_runner)?;
|
||||
writeln!(
|
||||
f,
|
||||
"- load_test_runner: {:?}",
|
||||
self.run_config.load_test_runner
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"- tracker_vcpus: {}",
|
||||
self.run_config.tracker_vcpus.as_cpu_list()
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"- load_test_vcpus: {}",
|
||||
self.run_config.load_test_vcpus.as_cpu_list()
|
||||
)?;
|
||||
|
||||
if let Some(t) = self.tracker_stdout.as_ref() {
|
||||
writeln!(f, "- tracker stdout:\n```\n{}\n```", t)?;
|
||||
}
|
||||
if let Some(t) = self.tracker_stderr.as_ref() {
|
||||
writeln!(f, "- tracker stderr:\n```\n{}\n```", t)?;
|
||||
}
|
||||
if let Some(t) = self.load_test_stdout.as_ref() {
|
||||
writeln!(f, "- load test stdout:\n```\n{}\n```", t)?;
|
||||
}
|
||||
if let Some(t) = self.load_test_stderr.as_ref() {
|
||||
writeln!(f, "- load test stderr:\n```\n{}\n```", t)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ProcessStats {
|
||||
pub avg_cpu_utilization: f32,
|
||||
pub peak_rss_bytes: u64,
|
||||
}
|
||||
|
||||
impl FromStr for ProcessStats {
|
||||
type Err = ();
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let mut parts = s.split_whitespace();
|
||||
|
||||
let avg_cpu_utilization = parts.next().ok_or(())?.parse().map_err(|_| ())?;
|
||||
let peak_rss_kb: f32 = parts.next().ok_or(())?.parse().map_err(|_| ())?;
|
||||
|
||||
Ok(Self {
|
||||
avg_cpu_utilization,
|
||||
peak_rss_bytes: (peak_rss_kb * 1000.0) as u64,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct ChildWrapper(Child);
|
||||
|
||||
impl Drop for ChildWrapper {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.0.kill();
|
||||
|
||||
::std::thread::sleep(Duration::from_secs(1));
|
||||
|
||||
let _ = self.0.try_wait();
|
||||
}
|
||||
}
|
||||
|
||||
fn read_child_outputs(mut child: ChildWrapper) -> (Option<String>, Option<String>) {
|
||||
let stdout = child.0.stdout.take().and_then(|stdout| {
|
||||
let mut buf = String::new();
|
||||
|
||||
let mut reader = NonBlockingReader::from_fd(stdout).unwrap();
|
||||
|
||||
reader.read_available_to_string(&mut buf).unwrap();
|
||||
|
||||
(!buf.is_empty()).then_some(buf)
|
||||
});
|
||||
let stderr = child.0.stderr.take().and_then(|stderr| {
|
||||
let mut buf = String::new();
|
||||
|
||||
let mut reader = NonBlockingReader::from_fd(stderr).unwrap();
|
||||
|
||||
reader.read_available_to_string(&mut buf).unwrap();
|
||||
|
||||
(!buf.is_empty()).then_some(buf)
|
||||
});
|
||||
|
||||
(stdout, stderr)
|
||||
}
|
290
apps/aquatic/crates/bencher/src/set.rs
Normal file
290
apps/aquatic/crates/bencher/src/set.rs
Normal file
@ -0,0 +1,290 @@
|
||||
use std::rc::Rc;
|
||||
|
||||
use humanize_bytes::humanize_bytes_binary;
|
||||
use indexmap::IndexMap;
|
||||
use num_format::{Locale, ToFormattedString};
|
||||
|
||||
use crate::{
|
||||
common::{CpuDirection, CpuMode, Priority, TaskSetCpuList},
|
||||
html::{html_all_runs, html_best_results},
|
||||
run::{ProcessRunner, ProcessStats, RunConfig},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct LoadTestRunnerParameters {
|
||||
pub workers: usize,
|
||||
pub duration: usize,
|
||||
pub summarize_last: usize,
|
||||
}
|
||||
|
||||
pub trait Tracker: ::std::fmt::Debug + Copy + Clone + ::std::hash::Hash {
|
||||
fn name(&self) -> String;
|
||||
}
|
||||
|
||||
pub struct SetConfig<C, I> {
|
||||
pub implementations: IndexMap<I, Vec<Rc<dyn ProcessRunner<Command = C>>>>,
|
||||
pub load_test_runs: Vec<(usize, Priority, TaskSetCpuList)>,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn run_sets<C, F, I>(
|
||||
command: &C,
|
||||
cpu_mode: CpuMode,
|
||||
min_cores: Option<usize>,
|
||||
max_cores: Option<usize>,
|
||||
min_priority: Priority,
|
||||
duration: usize,
|
||||
summarize_last: usize,
|
||||
mut set_configs: IndexMap<usize, SetConfig<C, I>>,
|
||||
load_test_gen: F,
|
||||
) where
|
||||
C: ::std::fmt::Debug,
|
||||
I: Tracker,
|
||||
F: Fn(LoadTestRunnerParameters) -> Box<dyn ProcessRunner<Command = C>>,
|
||||
{
|
||||
if let Some(min_cores) = min_cores {
|
||||
set_configs.retain(|cores, _| *cores >= min_cores);
|
||||
}
|
||||
if let Some(max_cores) = max_cores {
|
||||
set_configs.retain(|cores, _| *cores <= max_cores);
|
||||
}
|
||||
|
||||
for set_config in set_configs.values_mut() {
|
||||
for runners in set_config.implementations.values_mut() {
|
||||
runners.retain(|r| r.priority() >= min_priority);
|
||||
}
|
||||
|
||||
set_config
|
||||
.load_test_runs
|
||||
.retain(|(_, priority, _)| *priority >= min_priority);
|
||||
}
|
||||
|
||||
println!("# Benchmark report");
|
||||
|
||||
let total_num_runs = set_configs
|
||||
.values()
|
||||
.map(|set| {
|
||||
set.implementations.values().map(Vec::len).sum::<usize>() * set.load_test_runs.len()
|
||||
})
|
||||
.sum::<usize>();
|
||||
|
||||
let (estimated_hours, estimated_minutes) = {
|
||||
let minutes = (total_num_runs * (duration + 7)) / 60;
|
||||
|
||||
(minutes / 60, minutes % 60)
|
||||
};
|
||||
|
||||
println!();
|
||||
println!("Total number of load test runs: {}", total_num_runs);
|
||||
println!(
|
||||
"Estimated duration: {} hours, {} minutes",
|
||||
estimated_hours, estimated_minutes
|
||||
);
|
||||
println!();
|
||||
|
||||
let results = set_configs
|
||||
.into_iter()
|
||||
.map(|(tracker_core_count, set_config)| {
|
||||
let tracker_vcpus =
|
||||
TaskSetCpuList::new(cpu_mode, CpuDirection::Asc, tracker_core_count).unwrap();
|
||||
|
||||
println!(
|
||||
"## Tracker cores: {} (cpus: {})",
|
||||
tracker_core_count,
|
||||
tracker_vcpus.as_cpu_list()
|
||||
);
|
||||
|
||||
let tracker_results = set_config
|
||||
.implementations
|
||||
.into_iter()
|
||||
.map(|(implementation, tracker_runs)| {
|
||||
let tracker_run_results = tracker_runs
|
||||
.iter()
|
||||
.map(|tracker_run| {
|
||||
let load_test_run_results = set_config
|
||||
.load_test_runs
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(workers, _, load_test_vcpus)| {
|
||||
let load_test_parameters = LoadTestRunnerParameters {
|
||||
workers,
|
||||
duration,
|
||||
summarize_last,
|
||||
};
|
||||
LoadTestRunResults::produce(
|
||||
command,
|
||||
&load_test_gen,
|
||||
load_test_parameters,
|
||||
implementation,
|
||||
tracker_run,
|
||||
tracker_vcpus.clone(),
|
||||
load_test_vcpus,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
TrackerConfigurationResults {
|
||||
load_tests: load_test_run_results,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
ImplementationResults {
|
||||
name: implementation.name(),
|
||||
configurations: tracker_run_results,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
TrackerCoreCountResults {
|
||||
core_count: tracker_core_count,
|
||||
implementations: tracker_results,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
println!("{}", html_all_runs(&results));
|
||||
println!("{}", html_best_results(&results));
|
||||
}
|
||||
|
||||
pub struct TrackerCoreCountResults {
|
||||
pub core_count: usize,
|
||||
pub implementations: Vec<ImplementationResults>,
|
||||
}
|
||||
|
||||
pub struct ImplementationResults {
|
||||
pub name: String,
|
||||
pub configurations: Vec<TrackerConfigurationResults>,
|
||||
}
|
||||
|
||||
impl ImplementationResults {
|
||||
pub fn best_result(&self) -> Option<LoadTestRunResultsSuccess> {
|
||||
self.configurations
|
||||
.iter()
|
||||
.filter_map(|c| c.best_result())
|
||||
.reduce(|acc, r| {
|
||||
if r.average_responses > acc.average_responses {
|
||||
r
|
||||
} else {
|
||||
acc
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TrackerConfigurationResults {
|
||||
pub load_tests: Vec<LoadTestRunResults>,
|
||||
}
|
||||
|
||||
impl TrackerConfigurationResults {
|
||||
fn best_result(&self) -> Option<LoadTestRunResultsSuccess> {
|
||||
self.load_tests
|
||||
.iter()
|
||||
.filter_map(|r| match r {
|
||||
LoadTestRunResults::Success(r) => Some(r.clone()),
|
||||
LoadTestRunResults::Failure(_) => None,
|
||||
})
|
||||
.reduce(|acc, r| {
|
||||
if r.average_responses > acc.average_responses {
|
||||
r
|
||||
} else {
|
||||
acc
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub enum LoadTestRunResults {
|
||||
Success(LoadTestRunResultsSuccess),
|
||||
Failure(LoadTestRunResultsFailure),
|
||||
}
|
||||
|
||||
impl LoadTestRunResults {
|
||||
pub fn produce<C, F, I>(
|
||||
command: &C,
|
||||
load_test_gen: &F,
|
||||
load_test_parameters: LoadTestRunnerParameters,
|
||||
implementation: I,
|
||||
tracker_process: &Rc<dyn ProcessRunner<Command = C>>,
|
||||
tracker_vcpus: TaskSetCpuList,
|
||||
load_test_vcpus: TaskSetCpuList,
|
||||
) -> Self
|
||||
where
|
||||
C: ::std::fmt::Debug,
|
||||
I: Tracker,
|
||||
F: Fn(LoadTestRunnerParameters) -> Box<dyn ProcessRunner<Command = C>>,
|
||||
{
|
||||
println!(
|
||||
"### {} run ({}) (load test workers: {}, cpus: {})",
|
||||
implementation.name(),
|
||||
tracker_process.info(),
|
||||
load_test_parameters.workers,
|
||||
load_test_vcpus.as_cpu_list()
|
||||
);
|
||||
|
||||
let load_test_runner = load_test_gen(load_test_parameters);
|
||||
let load_test_keys = load_test_runner.keys();
|
||||
|
||||
let run_config = RunConfig {
|
||||
tracker_runner: tracker_process.clone(),
|
||||
tracker_vcpus: tracker_vcpus.clone(),
|
||||
load_test_runner,
|
||||
load_test_vcpus: load_test_vcpus.clone(),
|
||||
};
|
||||
|
||||
match run_config.run(command, load_test_parameters.duration) {
|
||||
Ok(r) => {
|
||||
println!(
|
||||
"- Average responses per second: {}",
|
||||
r.avg_responses.to_formatted_string(&Locale::en)
|
||||
);
|
||||
println!(
|
||||
"- Average tracker CPU utilization: {}%",
|
||||
r.tracker_process_stats.avg_cpu_utilization,
|
||||
);
|
||||
println!(
|
||||
"- Peak tracker RSS: {}",
|
||||
humanize_bytes_binary!(r.tracker_process_stats.peak_rss_bytes)
|
||||
);
|
||||
|
||||
LoadTestRunResults::Success(LoadTestRunResultsSuccess {
|
||||
average_responses: r.avg_responses,
|
||||
tracker_keys: tracker_process.keys(),
|
||||
tracker_info: tracker_process.info(),
|
||||
tracker_process_stats: r.tracker_process_stats,
|
||||
tracker_vcpus,
|
||||
load_test_keys,
|
||||
load_test_vcpus,
|
||||
})
|
||||
}
|
||||
Err(results) => {
|
||||
println!("\nRun failed:\n{:#}\n", results);
|
||||
|
||||
LoadTestRunResults::Failure(LoadTestRunResultsFailure {
|
||||
tracker_keys: tracker_process.keys(),
|
||||
tracker_vcpus,
|
||||
load_test_keys,
|
||||
load_test_vcpus,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LoadTestRunResultsSuccess {
|
||||
pub average_responses: u64,
|
||||
pub tracker_keys: IndexMap<String, String>,
|
||||
pub tracker_info: String,
|
||||
pub tracker_process_stats: ProcessStats,
|
||||
pub tracker_vcpus: TaskSetCpuList,
|
||||
pub load_test_keys: IndexMap<String, String>,
|
||||
pub load_test_vcpus: TaskSetCpuList,
|
||||
}
|
||||
|
||||
pub struct LoadTestRunResultsFailure {
|
||||
pub tracker_keys: IndexMap<String, String>,
|
||||
pub tracker_vcpus: TaskSetCpuList,
|
||||
pub load_test_keys: IndexMap<String, String>,
|
||||
pub load_test_vcpus: TaskSetCpuList,
|
||||
}
|
21
apps/aquatic/crates/combined_binary/Cargo.toml
Normal file
21
apps/aquatic/crates/combined_binary/Cargo.toml
Normal file
@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "aquatic"
|
||||
description = "High-performance open BitTorrent tracker (UDP, HTTP, WebTorrent)"
|
||||
keywords = ["bittorrent", "torrent", "webtorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic"
|
||||
|
||||
[dependencies]
|
||||
aquatic_common.workspace = true
|
||||
aquatic_http.workspace = true
|
||||
aquatic_udp.workspace = true
|
||||
aquatic_ws.workspace = true
|
||||
mimalloc = { version = "0.1", default-features = false }
|
91
apps/aquatic/crates/combined_binary/src/main.rs
Normal file
91
apps/aquatic/crates/combined_binary/src/main.rs
Normal file
@ -0,0 +1,91 @@
|
||||
use aquatic_common::cli::{print_help, run_app_with_cli_and_config, Options};
|
||||
use aquatic_http::config::Config as HttpConfig;
|
||||
use aquatic_udp::config::Config as UdpConfig;
|
||||
use aquatic_ws::config::Config as WsConfig;
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
const APP_NAME: &str = "aquatic: BitTorrent tracker";
|
||||
|
||||
fn main() {
|
||||
::std::process::exit(match run() {
|
||||
Ok(()) => 0,
|
||||
Err(None) => {
|
||||
print_help(gen_info, None);
|
||||
|
||||
0
|
||||
}
|
||||
Err(opt_err @ Some(_)) => {
|
||||
print_help(gen_info, opt_err);
|
||||
|
||||
1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn run() -> Result<(), Option<String>> {
|
||||
let mut arg_iter = ::std::env::args().skip(1);
|
||||
|
||||
let protocol = if let Some(protocol) = arg_iter.next() {
|
||||
protocol
|
||||
} else {
|
||||
return Err(None);
|
||||
};
|
||||
|
||||
let options = match Options::parse_args(arg_iter) {
|
||||
Ok(options) => options,
|
||||
Err(opt_err) => {
|
||||
return Err(opt_err);
|
||||
}
|
||||
};
|
||||
|
||||
match protocol.as_str() {
|
||||
"udp" => run_app_with_cli_and_config::<UdpConfig>(
|
||||
aquatic_udp::APP_NAME,
|
||||
aquatic_udp::APP_VERSION,
|
||||
aquatic_udp::run,
|
||||
Some(options),
|
||||
),
|
||||
"http" => run_app_with_cli_and_config::<HttpConfig>(
|
||||
aquatic_http::APP_NAME,
|
||||
aquatic_http::APP_VERSION,
|
||||
aquatic_http::run,
|
||||
Some(options),
|
||||
),
|
||||
"ws" => run_app_with_cli_and_config::<WsConfig>(
|
||||
aquatic_ws::APP_NAME,
|
||||
aquatic_ws::APP_VERSION,
|
||||
aquatic_ws::run,
|
||||
Some(options),
|
||||
),
|
||||
arg => {
|
||||
let opt_err = if arg == "-h" || arg == "--help" {
|
||||
None
|
||||
} else if arg.starts_with('-') {
|
||||
Some("First argument must be protocol".to_string())
|
||||
} else {
|
||||
Some("Invalid protocol".to_string())
|
||||
};
|
||||
|
||||
return Err(opt_err);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn gen_info() -> String {
|
||||
let mut info = String::new();
|
||||
|
||||
info.push_str(APP_NAME);
|
||||
|
||||
let app_path = ::std::env::args().next().unwrap();
|
||||
info.push_str(&format!("\n\nUsage: {} PROTOCOL [OPTIONS]", app_path));
|
||||
info.push_str("\n\nAvailable protocols:");
|
||||
info.push_str("\n udp BitTorrent over UDP");
|
||||
info.push_str("\n http BitTorrent over HTTP");
|
||||
info.push_str("\n ws WebTorrent");
|
||||
|
||||
info
|
||||
}
|
51
apps/aquatic/crates/common/Cargo.toml
Normal file
51
apps/aquatic/crates/common/Cargo.toml
Normal file
@ -0,0 +1,51 @@
|
||||
[package]
|
||||
name = "aquatic_common"
|
||||
description = "aquatic BitTorrent tracker common code"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "aquatic_common"
|
||||
|
||||
[features]
|
||||
rustls = ["dep:rustls", "rustls-pemfile"]
|
||||
prometheus = ["dep:metrics", "dep:metrics-util", "dep:metrics-exporter-prometheus", "dep:tokio"]
|
||||
# Experimental CPU pinning support. Requires hwloc (apt-get install libhwloc-dev)
|
||||
cpu-pinning = ["dep:hwloc"]
|
||||
|
||||
[dependencies]
|
||||
aquatic_toml_config.workspace = true
|
||||
|
||||
ahash = "0.8"
|
||||
anyhow = "1"
|
||||
arc-swap = "1"
|
||||
duplicate = "2"
|
||||
git-testament = "0.2"
|
||||
hashbrown = "0.15"
|
||||
hex = "0.4"
|
||||
indexmap = "2"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
privdrop = "0.5"
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
simplelog = { version = "0.12" }
|
||||
toml = "0.5"
|
||||
|
||||
# rustls feature
|
||||
rustls = { version = "0.23", optional = true }
|
||||
rustls-pemfile = { version = "2", optional = true }
|
||||
|
||||
# prometheus feature
|
||||
metrics = { version = "0.24", optional = true }
|
||||
metrics-util = { version = "0.19", optional = true }
|
||||
metrics-exporter-prometheus = { version = "0.16", optional = true, default-features = false, features = ["http-listener"] }
|
||||
tokio = { version = "1", optional = true, features = ["rt", "net", "time"] }
|
||||
|
||||
# cpu pinning feature
|
||||
hwloc = { version = "0.5", optional = true }
|
197
apps/aquatic/crates/common/src/access_list.rs
Normal file
197
apps/aquatic/crates/common/src/access_list.rs
Normal file
@ -0,0 +1,197 @@
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
use arc_swap::{ArcSwap, Cache};
|
||||
use hashbrown::HashSet;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Access list mode. Available modes are allow, deny and off.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, TomlConfig, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum AccessListMode {
|
||||
/// Only serve torrents with info hash present in file
|
||||
Allow,
|
||||
/// Do not serve torrents if info hash present in file
|
||||
Deny,
|
||||
/// Turn off access list functionality
|
||||
Off,
|
||||
}
|
||||
|
||||
impl AccessListMode {
|
||||
pub fn is_on(&self) -> bool {
|
||||
!matches!(self, Self::Off)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize, Serialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct AccessListConfig {
|
||||
pub mode: AccessListMode,
|
||||
/// Path to access list file consisting of newline-separated hex-encoded info hashes.
|
||||
///
|
||||
/// If using chroot mode, path must be relative to new root.
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
impl Default for AccessListConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
path: "./access-list.txt".into(),
|
||||
mode: AccessListMode::Off,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct AccessList(HashSet<[u8; 20]>);
|
||||
|
||||
impl AccessList {
|
||||
pub fn insert_from_line(&mut self, line: &str) -> anyhow::Result<()> {
|
||||
self.0.insert(parse_info_hash(line)?);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn create_from_path(path: &PathBuf) -> anyhow::Result<Self> {
|
||||
let file = File::open(path)?;
|
||||
let reader = BufReader::new(file);
|
||||
|
||||
let mut new_list = Self::default();
|
||||
|
||||
for line in reader.lines() {
|
||||
let line = line?;
|
||||
let line = line.trim();
|
||||
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
new_list
|
||||
.insert_from_line(line)
|
||||
.with_context(|| format!("Invalid line in access list: {}", line))?;
|
||||
}
|
||||
|
||||
Ok(new_list)
|
||||
}
|
||||
|
||||
pub fn allows(&self, mode: AccessListMode, info_hash: &[u8; 20]) -> bool {
|
||||
match mode {
|
||||
AccessListMode::Allow => self.0.contains(info_hash),
|
||||
AccessListMode::Deny => !self.0.contains(info_hash),
|
||||
AccessListMode::Off => true,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::len_without_is_empty)]
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AccessListQuery {
|
||||
fn update(&self, config: &AccessListConfig) -> anyhow::Result<()>;
|
||||
fn allows(&self, list_mode: AccessListMode, info_hash_bytes: &[u8; 20]) -> bool;
|
||||
}
|
||||
|
||||
pub type AccessListArcSwap = ArcSwap<AccessList>;
|
||||
pub type AccessListCache = Cache<Arc<AccessListArcSwap>, Arc<AccessList>>;
|
||||
|
||||
impl AccessListQuery for AccessListArcSwap {
|
||||
fn update(&self, config: &AccessListConfig) -> anyhow::Result<()> {
|
||||
self.store(Arc::new(AccessList::create_from_path(&config.path)?));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn allows(&self, mode: AccessListMode, info_hash_bytes: &[u8; 20]) -> bool {
|
||||
match mode {
|
||||
AccessListMode::Allow => self.load().0.contains(info_hash_bytes),
|
||||
AccessListMode::Deny => !self.load().0.contains(info_hash_bytes),
|
||||
AccessListMode::Off => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_access_list_cache(arc_swap: &Arc<AccessListArcSwap>) -> AccessListCache {
|
||||
Cache::from(Arc::clone(arc_swap))
|
||||
}
|
||||
|
||||
pub fn update_access_list(
|
||||
config: &AccessListConfig,
|
||||
access_list: &Arc<AccessListArcSwap>,
|
||||
) -> anyhow::Result<()> {
|
||||
if config.mode.is_on() {
|
||||
match access_list.update(config) {
|
||||
Ok(()) => {
|
||||
::log::info!("Access list updated")
|
||||
}
|
||||
Err(err) => {
|
||||
::log::error!("Updating access list failed: {:#}", err);
|
||||
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_info_hash(line: &str) -> anyhow::Result<[u8; 20]> {
|
||||
let mut bytes = [0u8; 20];
|
||||
|
||||
hex::decode_to_slice(line, &mut bytes)?;
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_info_hash() {
|
||||
let f = parse_info_hash;
|
||||
|
||||
assert!(f("aaaabbbbccccddddeeeeaaaabbbbccccddddeeee").is_ok());
|
||||
assert!(f("aaaabbbbccccddddeeeeaaaabbbbccccddddeeeef").is_err());
|
||||
assert!(f("aaaabbbbccccddddeeeeaaaabbbbccccddddeee").is_err());
|
||||
assert!(f("aaaabbbbccccddddeeeeaaaabbbbccccddddeeeö").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cache_allows() {
|
||||
let mut access_list = AccessList::default();
|
||||
|
||||
let a = parse_info_hash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").unwrap();
|
||||
let b = parse_info_hash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb").unwrap();
|
||||
let c = parse_info_hash("cccccccccccccccccccccccccccccccccccccccc").unwrap();
|
||||
|
||||
access_list.0.insert(a);
|
||||
access_list.0.insert(b);
|
||||
|
||||
let access_list = Arc::new(ArcSwap::new(Arc::new(access_list)));
|
||||
|
||||
let mut access_list_cache = Cache::new(Arc::clone(&access_list));
|
||||
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Allow, &a));
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Allow, &b));
|
||||
assert!(!access_list_cache.load().allows(AccessListMode::Allow, &c));
|
||||
|
||||
assert!(!access_list_cache.load().allows(AccessListMode::Deny, &a));
|
||||
assert!(!access_list_cache.load().allows(AccessListMode::Deny, &b));
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Deny, &c));
|
||||
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Off, &a));
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Off, &b));
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Off, &c));
|
||||
|
||||
access_list.store(Arc::new(AccessList::default()));
|
||||
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Deny, &a));
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Deny, &b));
|
||||
}
|
||||
}
|
255
apps/aquatic/crates/common/src/cli.rs
Normal file
255
apps/aquatic/crates/common/src/cli.rs
Normal file
@ -0,0 +1,255 @@
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
use git_testament::{git_testament, CommitKind};
|
||||
use log::LevelFilter;
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
use simplelog::{ColorChoice, TermLogger, TerminalMode, ThreadLogMode};
|
||||
|
||||
/// Log level. Available values are off, error, warn, info, debug and trace.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, TomlConfig, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum LogLevel {
|
||||
Off,
|
||||
Error,
|
||||
Warn,
|
||||
Info,
|
||||
Debug,
|
||||
Trace,
|
||||
}
|
||||
|
||||
impl Default for LogLevel {
|
||||
fn default() -> Self {
|
||||
Self::Warn
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Config: Default + TomlConfig + DeserializeOwned + std::fmt::Debug {
|
||||
fn get_log_level(&self) -> Option<LogLevel> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Options {
|
||||
config_file: Option<String>,
|
||||
print_config: bool,
|
||||
print_parsed_config: bool,
|
||||
print_version: bool,
|
||||
}
|
||||
|
||||
impl Options {
|
||||
pub fn parse_args<I>(mut arg_iter: I) -> Result<Options, Option<String>>
|
||||
where
|
||||
I: Iterator<Item = String>,
|
||||
{
|
||||
let mut options = Options::default();
|
||||
|
||||
#[allow(clippy::while_let_loop)] // False positive
|
||||
loop {
|
||||
if let Some(arg) = arg_iter.next() {
|
||||
match arg.as_str() {
|
||||
"-c" | "--config-file" => {
|
||||
if let Some(path) = arg_iter.next() {
|
||||
options.config_file = Some(path);
|
||||
} else {
|
||||
return Err(Some("No config file path given".to_string()));
|
||||
}
|
||||
}
|
||||
"-p" | "--print-config" => {
|
||||
options.print_config = true;
|
||||
}
|
||||
"-P" => {
|
||||
options.print_parsed_config = true;
|
||||
}
|
||||
"-v" | "--version" => {
|
||||
options.print_version = true;
|
||||
}
|
||||
"-h" | "--help" => {
|
||||
return Err(None);
|
||||
}
|
||||
"" => (),
|
||||
_ => {
|
||||
return Err(Some("Unrecognized argument".to_string()));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(options)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run_app_with_cli_and_config<T>(
|
||||
app_title: &str,
|
||||
crate_version: &str,
|
||||
// Function that takes config file and runs application
|
||||
app_fn: fn(T) -> anyhow::Result<()>,
|
||||
opts: Option<Options>,
|
||||
) where
|
||||
T: Config,
|
||||
{
|
||||
::std::process::exit(match run_inner(app_title, crate_version, app_fn, opts) {
|
||||
Ok(()) => 0,
|
||||
Err(err) => {
|
||||
eprintln!("Error: {:#}", err);
|
||||
|
||||
1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn run_inner<T>(
|
||||
app_title: &str,
|
||||
crate_version: &str,
|
||||
// Function that takes config file and runs application
|
||||
app_fn: fn(T) -> anyhow::Result<()>,
|
||||
// Possibly preparsed options
|
||||
options: Option<Options>,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
T: Config,
|
||||
{
|
||||
let options = if let Some(options) = options {
|
||||
options
|
||||
} else {
|
||||
let mut arg_iter = ::std::env::args();
|
||||
|
||||
let app_path = arg_iter.next().unwrap();
|
||||
|
||||
match Options::parse_args(arg_iter) {
|
||||
Ok(options) => options,
|
||||
Err(opt_err) => {
|
||||
let gen_info = || format!("{}\n\nUsage: {} [OPTIONS]", app_title, app_path);
|
||||
|
||||
print_help(gen_info, opt_err);
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if options.print_version {
|
||||
let commit_info = get_commit_info();
|
||||
|
||||
println!("{}{}", crate_version, commit_info);
|
||||
|
||||
Ok(())
|
||||
} else if options.print_config {
|
||||
print!("{}", default_config_as_toml::<T>());
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
let config = if let Some(path) = options.config_file {
|
||||
config_from_toml_file(path)?
|
||||
} else {
|
||||
T::default()
|
||||
};
|
||||
|
||||
if let Some(log_level) = config.get_log_level() {
|
||||
start_logger(log_level)?;
|
||||
}
|
||||
|
||||
if options.print_parsed_config {
|
||||
println!("Running with configuration: {:#?}", config);
|
||||
}
|
||||
|
||||
app_fn(config)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_help<F>(info_generator: F, opt_error: Option<String>)
|
||||
where
|
||||
F: FnOnce() -> String,
|
||||
{
|
||||
println!("{}", info_generator());
|
||||
|
||||
println!("\nOptions:");
|
||||
println!(" -c, --config-file Load config from this path");
|
||||
println!(" -h, --help Print this help message");
|
||||
println!(" -p, --print-config Print default config");
|
||||
println!(" -P Print parsed config");
|
||||
println!(" -v, --version Print version information");
|
||||
|
||||
if let Some(error) = opt_error {
|
||||
println!("\nError: {}.", error);
|
||||
}
|
||||
}
|
||||
|
||||
fn config_from_toml_file<T>(path: String) -> anyhow::Result<T>
|
||||
where
|
||||
T: DeserializeOwned,
|
||||
{
|
||||
let mut file = File::open(path.clone())
|
||||
.with_context(|| format!("Couldn't open config file {}", path.clone()))?;
|
||||
|
||||
let mut data = String::new();
|
||||
|
||||
file.read_to_string(&mut data)
|
||||
.with_context(|| format!("Couldn't read config file {}", path.clone()))?;
|
||||
|
||||
toml::from_str(&data).with_context(|| format!("Couldn't parse config file {}", path.clone()))
|
||||
}
|
||||
|
||||
fn default_config_as_toml<T>() -> String
|
||||
where
|
||||
T: Default + TomlConfig,
|
||||
{
|
||||
<T as TomlConfig>::default_to_string()
|
||||
}
|
||||
|
||||
fn start_logger(log_level: LogLevel) -> ::anyhow::Result<()> {
|
||||
let mut builder = simplelog::ConfigBuilder::new();
|
||||
|
||||
builder
|
||||
.set_thread_mode(ThreadLogMode::Both)
|
||||
.set_thread_level(LevelFilter::Error)
|
||||
.set_target_level(LevelFilter::Error)
|
||||
.set_location_level(LevelFilter::Off);
|
||||
|
||||
let config = match builder.set_time_offset_to_local() {
|
||||
Ok(builder) => builder.build(),
|
||||
Err(builder) => builder.build(),
|
||||
};
|
||||
|
||||
let level_filter = match log_level {
|
||||
LogLevel::Off => LevelFilter::Off,
|
||||
LogLevel::Error => LevelFilter::Error,
|
||||
LogLevel::Warn => LevelFilter::Warn,
|
||||
LogLevel::Info => LevelFilter::Info,
|
||||
LogLevel::Debug => LevelFilter::Debug,
|
||||
LogLevel::Trace => LevelFilter::Trace,
|
||||
};
|
||||
|
||||
TermLogger::init(
|
||||
level_filter,
|
||||
config,
|
||||
TerminalMode::Stderr,
|
||||
ColorChoice::Auto,
|
||||
)
|
||||
.context("Couldn't initialize logger")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_commit_info() -> String {
|
||||
git_testament!(TESTAMENT);
|
||||
|
||||
match TESTAMENT.commit {
|
||||
CommitKind::NoTags(hash, date) => {
|
||||
format!(" ({} - {})", first_8_chars(hash), date)
|
||||
}
|
||||
CommitKind::FromTag(_tag, hash, date, _tag_distance) => {
|
||||
format!(" ({} - {})", first_8_chars(hash), date)
|
||||
}
|
||||
_ => String::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn first_8_chars(input: &str) -> String {
|
||||
input.chars().take(8).collect()
|
||||
}
|
240
apps/aquatic/crates/common/src/cpu_pinning.rs
Normal file
240
apps/aquatic/crates/common/src/cpu_pinning.rs
Normal file
@ -0,0 +1,240 @@
|
||||
//! Experimental CPU pinning
|
||||
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, TomlConfig, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum CpuPinningDirection {
|
||||
Ascending,
|
||||
Descending,
|
||||
}
|
||||
|
||||
impl Default for CpuPinningDirection {
|
||||
fn default() -> Self {
|
||||
Self::Ascending
|
||||
}
|
||||
}
|
||||
|
||||
pub trait CpuPinningConfig {
|
||||
fn active(&self) -> bool;
|
||||
fn direction(&self) -> CpuPinningDirection;
|
||||
fn core_offset(&self) -> usize;
|
||||
}
|
||||
|
||||
// Do these shenanigans for compatibility with aquatic_toml_config
|
||||
#[duplicate::duplicate_item(
|
||||
mod_name struct_name cpu_pinning_direction;
|
||||
[asc] [CpuPinningConfigAsc] [CpuPinningDirection::Ascending];
|
||||
[desc] [CpuPinningConfigDesc] [CpuPinningDirection::Descending];
|
||||
)]
|
||||
pub mod mod_name {
|
||||
use super::*;
|
||||
|
||||
/// Experimental cpu pinning
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize, Serialize)]
|
||||
pub struct struct_name {
|
||||
pub active: bool,
|
||||
pub direction: CpuPinningDirection,
|
||||
pub core_offset: usize,
|
||||
}
|
||||
|
||||
impl Default for struct_name {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
active: false,
|
||||
direction: cpu_pinning_direction,
|
||||
core_offset: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl CpuPinningConfig for struct_name {
|
||||
fn active(&self) -> bool {
|
||||
self.active
|
||||
}
|
||||
fn direction(&self) -> CpuPinningDirection {
|
||||
self.direction
|
||||
}
|
||||
fn core_offset(&self) -> usize {
|
||||
self.core_offset
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum WorkerIndex {
|
||||
SocketWorker(usize),
|
||||
SwarmWorker(usize),
|
||||
Util,
|
||||
}
|
||||
|
||||
impl WorkerIndex {
|
||||
pub fn get_core_index<C: CpuPinningConfig>(
|
||||
&self,
|
||||
config: &C,
|
||||
socket_workers: usize,
|
||||
swarm_workers: usize,
|
||||
num_cores: usize,
|
||||
) -> usize {
|
||||
let ascending_index = match self {
|
||||
Self::SocketWorker(index) => config.core_offset() + index,
|
||||
Self::SwarmWorker(index) => config.core_offset() + socket_workers + index,
|
||||
Self::Util => config.core_offset() + socket_workers + swarm_workers,
|
||||
};
|
||||
|
||||
let max_core_index = num_cores - 1;
|
||||
|
||||
let ascending_index = ascending_index.min(max_core_index);
|
||||
|
||||
match config.direction() {
|
||||
CpuPinningDirection::Ascending => ascending_index,
|
||||
CpuPinningDirection::Descending => max_core_index - ascending_index,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Pin current thread to a suitable core
|
||||
///
|
||||
/// Requires hwloc (`apt-get install libhwloc-dev`)
|
||||
pub fn pin_current_if_configured_to<C: CpuPinningConfig>(
|
||||
config: &C,
|
||||
socket_workers: usize,
|
||||
swarm_workers: usize,
|
||||
worker_index: WorkerIndex,
|
||||
) {
|
||||
use hwloc::{CpuSet, ObjectType, Topology, CPUBIND_THREAD};
|
||||
|
||||
if config.active() {
|
||||
let mut topology = Topology::new();
|
||||
|
||||
let core_cpu_sets: Vec<CpuSet> = topology
|
||||
.objects_with_type(&ObjectType::Core)
|
||||
.expect("hwloc: list cores")
|
||||
.into_iter()
|
||||
.map(|core| core.allowed_cpuset().expect("hwloc: get core cpu set"))
|
||||
.collect();
|
||||
|
||||
let num_cores = core_cpu_sets.len();
|
||||
|
||||
let core_index =
|
||||
worker_index.get_core_index(config, socket_workers, swarm_workers, num_cores);
|
||||
|
||||
let cpu_set = core_cpu_sets
|
||||
.get(core_index)
|
||||
.unwrap_or_else(|| panic!("get cpu set for core {}", core_index))
|
||||
.to_owned();
|
||||
|
||||
topology
|
||||
.set_cpubind(cpu_set, CPUBIND_THREAD)
|
||||
.unwrap_or_else(|err| panic!("bind thread to core {}: {:?}", core_index, err));
|
||||
|
||||
::log::info!(
|
||||
"Pinned worker {:?} to cpu core {}",
|
||||
worker_index,
|
||||
core_index
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Tell Linux that incoming messages should be handled by the socket worker
|
||||
/// with the same index as the CPU core receiving the interrupt.
|
||||
///
|
||||
/// Requires that sockets are actually bound in order, so waiting has to be done
|
||||
/// in socket workers.
|
||||
///
|
||||
/// It might make sense to first enable RSS or RPS (if hardware doesn't support
|
||||
/// RSS) and enable sending interrupts to all CPUs that have socket workers
|
||||
/// running on them. Possibly, CPU 0 should be excluded.
|
||||
///
|
||||
/// More Information:
|
||||
/// - https://talawah.io/blog/extreme-http-performance-tuning-one-point-two-million/
|
||||
/// - https://www.kernel.org/doc/Documentation/networking/scaling.txt
|
||||
/// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/performance_tuning_guide/network-rps
|
||||
#[cfg(target_os = "linux")]
|
||||
pub fn socket_attach_cbpf<S: ::std::os::unix::prelude::AsRawFd>(
|
||||
socket: &S,
|
||||
_num_sockets: usize,
|
||||
) -> ::std::io::Result<()> {
|
||||
use std::mem::size_of;
|
||||
use std::os::raw::c_void;
|
||||
|
||||
use libc::{setsockopt, sock_filter, sock_fprog, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF};
|
||||
|
||||
// Good BPF documentation: https://man.openbsd.org/bpf.4
|
||||
|
||||
// Values of constants were copied from the following Linux source files:
|
||||
// - include/uapi/linux/bpf_common.h
|
||||
// - include/uapi/linux/filter.h
|
||||
|
||||
// Instruction
|
||||
const BPF_LD: u16 = 0x00; // Load into A
|
||||
// const BPF_LDX: u16 = 0x01; // Load into X
|
||||
// const BPF_ALU: u16 = 0x04; // Load into X
|
||||
const BPF_RET: u16 = 0x06; // Return value
|
||||
// const BPF_MOD: u16 = 0x90; // Run modulo on A
|
||||
|
||||
// Size
|
||||
const BPF_W: u16 = 0x00; // 32-bit width
|
||||
|
||||
// Source
|
||||
// const BPF_IMM: u16 = 0x00; // Use constant (k)
|
||||
const BPF_ABS: u16 = 0x20;
|
||||
|
||||
// Registers
|
||||
// const BPF_K: u16 = 0x00;
|
||||
const BPF_A: u16 = 0x10;
|
||||
|
||||
// k
|
||||
const SKF_AD_OFF: i32 = -0x1000; // Activate extensions
|
||||
const SKF_AD_CPU: i32 = 36; // Extension for getting CPU
|
||||
|
||||
// Return index of socket that should receive packet
|
||||
let mut filter = [
|
||||
// Store index of CPU receiving packet in register A
|
||||
sock_filter {
|
||||
code: BPF_LD | BPF_W | BPF_ABS,
|
||||
jt: 0,
|
||||
jf: 0,
|
||||
k: u32::from_ne_bytes((SKF_AD_OFF + SKF_AD_CPU).to_ne_bytes()),
|
||||
},
|
||||
/* Disabled, because it doesn't make a lot of sense
|
||||
// Run A = A % socket_workers
|
||||
sock_filter {
|
||||
code: BPF_ALU | BPF_MOD,
|
||||
jt: 0,
|
||||
jf: 0,
|
||||
k: num_sockets as u32,
|
||||
},
|
||||
*/
|
||||
// Return A
|
||||
sock_filter {
|
||||
code: BPF_RET | BPF_A,
|
||||
jt: 0,
|
||||
jf: 0,
|
||||
k: 0,
|
||||
},
|
||||
];
|
||||
|
||||
let program = sock_fprog {
|
||||
filter: filter.as_mut_ptr(),
|
||||
len: filter.len() as u16,
|
||||
};
|
||||
|
||||
let program_ptr: *const sock_fprog = &program;
|
||||
|
||||
unsafe {
|
||||
let result = setsockopt(
|
||||
socket.as_raw_fd(),
|
||||
SOL_SOCKET,
|
||||
SO_ATTACH_REUSEPORT_CBPF,
|
||||
program_ptr as *const c_void,
|
||||
size_of::<sock_fprog>() as u32,
|
||||
);
|
||||
|
||||
if result != 0 {
|
||||
Err(::std::io::Error::last_os_error())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
185
apps/aquatic/crates/common/src/lib.rs
Normal file
185
apps/aquatic/crates/common/src/lib.rs
Normal file
@ -0,0 +1,185 @@
|
||||
use std::fmt::Display;
|
||||
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
|
||||
use std::time::Instant;
|
||||
|
||||
use ahash::RandomState;
|
||||
|
||||
pub mod access_list;
|
||||
pub mod cli;
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
pub mod cpu_pinning;
|
||||
pub mod privileges;
|
||||
#[cfg(feature = "rustls")]
|
||||
pub mod rustls_config;
|
||||
|
||||
/// IndexMap using AHash hasher
|
||||
pub type IndexMap<K, V> = indexmap::IndexMap<K, V, RandomState>;
|
||||
|
||||
/// Peer, connection or similar valid until this instant
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ValidUntil(SecondsSinceServerStart);
|
||||
|
||||
impl ValidUntil {
|
||||
#[inline]
|
||||
pub fn new(start_instant: ServerStartInstant, offset_seconds: u32) -> Self {
|
||||
Self(SecondsSinceServerStart(
|
||||
start_instant.seconds_elapsed().0 + offset_seconds,
|
||||
))
|
||||
}
|
||||
pub fn new_with_now(now: SecondsSinceServerStart, offset_seconds: u32) -> Self {
|
||||
Self(SecondsSinceServerStart(now.0 + offset_seconds))
|
||||
}
|
||||
pub fn valid(&self, now: SecondsSinceServerStart) -> bool {
|
||||
self.0 .0 > now.0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ServerStartInstant(Instant);
|
||||
|
||||
impl ServerStartInstant {
|
||||
#[allow(clippy::new_without_default)] // I prefer ::new here
|
||||
pub fn new() -> Self {
|
||||
Self(Instant::now())
|
||||
}
|
||||
pub fn seconds_elapsed(&self) -> SecondsSinceServerStart {
|
||||
SecondsSinceServerStart(
|
||||
self.0
|
||||
.elapsed()
|
||||
.as_secs()
|
||||
.try_into()
|
||||
.expect("server ran for more seconds than what fits in a u32"),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct SecondsSinceServerStart(u32);
|
||||
|
||||
/// SocketAddr that is not an IPv6-mapped IPv4 address
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct CanonicalSocketAddr(SocketAddr);
|
||||
|
||||
impl CanonicalSocketAddr {
|
||||
pub fn new(addr: SocketAddr) -> Self {
|
||||
match addr {
|
||||
addr @ SocketAddr::V4(_) => Self(addr),
|
||||
SocketAddr::V6(addr) => {
|
||||
match addr.ip().octets() {
|
||||
// Convert IPv4-mapped address (available in std but nightly-only)
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => Self(SocketAddr::V4(
|
||||
SocketAddrV4::new(Ipv4Addr::new(a, b, c, d), addr.port()),
|
||||
)),
|
||||
_ => Self(addr.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_ipv6_mapped(self) -> SocketAddr {
|
||||
match self.0 {
|
||||
SocketAddr::V4(addr) => {
|
||||
let ip = addr.ip().to_ipv6_mapped();
|
||||
|
||||
SocketAddr::V6(SocketAddrV6::new(ip, addr.port(), 0, 0))
|
||||
}
|
||||
addr => addr,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(self) -> SocketAddr {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn get_ipv4(self) -> Option<SocketAddr> {
|
||||
match self.0 {
|
||||
addr @ SocketAddr::V4(_) => Some(addr),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_ipv4(&self) -> bool {
|
||||
self.0.is_ipv4()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
pub fn spawn_prometheus_endpoint(
|
||||
addr: SocketAddr,
|
||||
timeout: Option<::std::time::Duration>,
|
||||
timeout_mask: Option<metrics_util::MetricKindMask>,
|
||||
) -> anyhow::Result<::std::thread::JoinHandle<anyhow::Result<()>>> {
|
||||
use std::thread::Builder;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
|
||||
let handle = Builder::new()
|
||||
.name("prometheus".into())
|
||||
.spawn(move || {
|
||||
use metrics_exporter_prometheus::PrometheusBuilder;
|
||||
use metrics_util::MetricKindMask;
|
||||
|
||||
let rt = ::tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.context("build prometheus tokio runtime")?;
|
||||
|
||||
rt.block_on(async {
|
||||
let mask = timeout_mask.unwrap_or(MetricKindMask::ALL);
|
||||
|
||||
let (recorder, exporter) = PrometheusBuilder::new()
|
||||
.idle_timeout(mask, timeout)
|
||||
.with_http_listener(addr)
|
||||
.build()
|
||||
.context("build prometheus recorder and exporter")?;
|
||||
|
||||
let recorder_handle = recorder.handle();
|
||||
|
||||
::metrics::set_global_recorder(recorder).context("set global metrics recorder")?;
|
||||
|
||||
::tokio::spawn(async move {
|
||||
let mut interval = ::tokio::time::interval(Duration::from_secs(5));
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
// Periodically render metrics to make sure
|
||||
// idles are cleaned up
|
||||
recorder_handle.render();
|
||||
}
|
||||
});
|
||||
|
||||
exporter
|
||||
.await
|
||||
.map_err(|err| anyhow::anyhow!("run prometheus exporter: :{:#?}", err))
|
||||
})
|
||||
})
|
||||
.context("spawn prometheus endpoint")?;
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
pub enum WorkerType {
|
||||
Swarm(usize),
|
||||
Socket(usize),
|
||||
Statistics,
|
||||
Signals,
|
||||
Cleaning,
|
||||
#[cfg(feature = "prometheus")]
|
||||
Prometheus,
|
||||
}
|
||||
|
||||
impl Display for WorkerType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Swarm(index) => f.write_fmt(format_args!("Swarm worker {}", index + 1)),
|
||||
Self::Socket(index) => f.write_fmt(format_args!("Socket worker {}", index + 1)),
|
||||
Self::Statistics => f.write_str("Statistics worker"),
|
||||
Self::Signals => f.write_str("Signals worker"),
|
||||
Self::Cleaning => f.write_str("Cleaning worker"),
|
||||
#[cfg(feature = "prometheus")]
|
||||
Self::Prometheus => f.write_str("Prometheus worker"),
|
||||
}
|
||||
}
|
||||
}
|
62
apps/aquatic/crates/common/src/privileges.rs
Normal file
62
apps/aquatic/crates/common/src/privileges.rs
Normal file
@ -0,0 +1,62 @@
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{Arc, Barrier},
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use privdrop::PrivDrop;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize, Serialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct PrivilegeConfig {
|
||||
/// Chroot and switch group and user after binding to sockets
|
||||
pub drop_privileges: bool,
|
||||
/// Chroot to this path
|
||||
pub chroot_path: PathBuf,
|
||||
/// Group to switch to after chrooting
|
||||
pub group: String,
|
||||
/// User to switch to after chrooting
|
||||
pub user: String,
|
||||
}
|
||||
|
||||
impl Default for PrivilegeConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
drop_privileges: false,
|
||||
chroot_path: ".".into(),
|
||||
user: "nobody".to_string(),
|
||||
group: "nogroup".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PrivilegeDropper {
|
||||
barrier: Arc<Barrier>,
|
||||
config: Arc<PrivilegeConfig>,
|
||||
}
|
||||
|
||||
impl PrivilegeDropper {
|
||||
pub fn new(config: PrivilegeConfig, num_sockets: usize) -> Self {
|
||||
Self {
|
||||
barrier: Arc::new(Barrier::new(num_sockets)),
|
||||
config: Arc::new(config),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn after_socket_creation(self) -> anyhow::Result<()> {
|
||||
if self.config.drop_privileges && self.barrier.wait().is_leader() {
|
||||
PrivDrop::default()
|
||||
.chroot(self.config.chroot_path.clone())
|
||||
.group(self.config.group.clone())
|
||||
.user(self.config.user.clone())
|
||||
.apply()
|
||||
.with_context(|| "couldn't drop privileges after socket creation")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
59
apps/aquatic/crates/common/src/rustls_config.rs
Normal file
59
apps/aquatic/crates/common/src/rustls_config.rs
Normal file
@ -0,0 +1,59 @@
|
||||
use std::{fs::File, io::BufReader, path::Path};
|
||||
|
||||
use anyhow::Context;
|
||||
|
||||
pub type RustlsConfig = rustls::ServerConfig;
|
||||
|
||||
pub fn create_rustls_config(
|
||||
tls_certificate_path: &Path,
|
||||
tls_private_key_path: &Path,
|
||||
) -> anyhow::Result<RustlsConfig> {
|
||||
let certs = {
|
||||
let f = File::open(tls_certificate_path).with_context(|| {
|
||||
format!(
|
||||
"open tls certificate file at {}",
|
||||
tls_certificate_path.to_string_lossy()
|
||||
)
|
||||
})?;
|
||||
let mut f = BufReader::new(f);
|
||||
|
||||
let mut certs = Vec::new();
|
||||
|
||||
for cert in rustls_pemfile::certs(&mut f) {
|
||||
match cert {
|
||||
Ok(cert) => {
|
||||
certs.push(cert);
|
||||
}
|
||||
Err(err) => {
|
||||
::log::error!("error parsing certificate: {:#?}", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
certs
|
||||
};
|
||||
|
||||
let private_key = {
|
||||
let f = File::open(tls_private_key_path).with_context(|| {
|
||||
format!(
|
||||
"open tls private key file at {}",
|
||||
tls_private_key_path.to_string_lossy()
|
||||
)
|
||||
})?;
|
||||
let mut f = BufReader::new(f);
|
||||
|
||||
let key = rustls_pemfile::pkcs8_private_keys(&mut f)
|
||||
.next()
|
||||
.ok_or(anyhow::anyhow!("No private keys in file"))??;
|
||||
|
||||
#[allow(clippy::let_and_return)] // Using temporary variable fixes lifetime issue
|
||||
key
|
||||
};
|
||||
|
||||
let tls_config = rustls::ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, rustls::pki_types::PrivateKeyDer::Pkcs8(private_key))
|
||||
.with_context(|| "create rustls config")?;
|
||||
|
||||
Ok(tls_config)
|
||||
}
|
67
apps/aquatic/crates/http/Cargo.toml
Normal file
67
apps/aquatic/crates/http/Cargo.toml
Normal file
@ -0,0 +1,67 @@
|
||||
[package]
|
||||
name = "aquatic_http"
|
||||
description = "High-performance open HTTP BitTorrent tracker (with optional TLS)"
|
||||
keywords = ["http", "server", "peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
readme = "./README.md"
|
||||
|
||||
[lib]
|
||||
name = "aquatic_http"
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic_http"
|
||||
|
||||
[features]
|
||||
default = ["prometheus", "mimalloc"]
|
||||
prometheus = ["aquatic_common/prometheus", "metrics", "dep:metrics-util"]
|
||||
metrics = ["dep:metrics"]
|
||||
# Use mimalloc allocator for much better performance.
|
||||
#
|
||||
# Requires cmake and a C compiler
|
||||
mimalloc = ["dep:mimalloc"]
|
||||
|
||||
[dependencies]
|
||||
aquatic_common = { workspace = true, features = ["rustls"] }
|
||||
aquatic_http_protocol.workspace = true
|
||||
aquatic_toml_config.workspace = true
|
||||
|
||||
anyhow = "1"
|
||||
arrayvec = "0.7"
|
||||
arc-swap = "1"
|
||||
cfg-if = "1"
|
||||
either = "1"
|
||||
futures = "0.3"
|
||||
futures-lite = "1"
|
||||
futures-rustls = "0.26"
|
||||
glommio = "0.9"
|
||||
httparse = "1"
|
||||
itoa = "1"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
memchr = "2"
|
||||
privdrop = "0.5"
|
||||
once_cell = "1"
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
rustls-pemfile = "2"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
signal-hook = { version = "0.3" }
|
||||
slotmap = "1"
|
||||
socket2 = { version = "0.5", features = ["all"] }
|
||||
thiserror = "2"
|
||||
|
||||
# metrics feature
|
||||
metrics = { version = "0.24", optional = true }
|
||||
metrics-util = { version = "0.19", optional = true }
|
||||
|
||||
# mimalloc feature
|
||||
mimalloc = { version = "0.1", default-features = false, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
121
apps/aquatic/crates/http/README.md
Normal file
121
apps/aquatic/crates/http/README.md
Normal file
@ -0,0 +1,121 @@
|
||||
# aquatic_http: high-performance open HTTP BitTorrent tracker
|
||||
|
||||
[](https://github.com/greatest-ape/aquatic/actions/workflows/ci.yml)
|
||||
|
||||
High-performance open HTTP BitTorrent tracker for Linux 5.8 or later.
|
||||
|
||||
Features at a glance:
|
||||
|
||||
- Multithreaded design for handling large amounts of traffic
|
||||
- All data is stored in-memory (no database needed)
|
||||
- IPv4 and IPv6 support
|
||||
- Supports forbidding/allowing info hashes
|
||||
- Prometheus metrics
|
||||
- Automated CI testing of full file transfers
|
||||
|
||||
## Performance
|
||||
|
||||

|
||||
|
||||
More benchmark details are available [here](../../documents/aquatic-http-load-test-2023-01-25.pdf).
|
||||
|
||||
## Usage
|
||||
|
||||
### Compiling
|
||||
|
||||
- Install Rust with [rustup](https://rustup.rs/) (latest stable release is recommended)
|
||||
- Install build dependencies with your package manager (e.g., `apt-get install cmake build-essential`)
|
||||
- Clone this git repository and build the application:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/greatest-ape/aquatic.git && cd aquatic
|
||||
|
||||
# Recommended: tell Rust to enable support for all SIMD extensions present on
|
||||
# current CPU except for those relating to AVX-512. (If you run a processor
|
||||
# that doesn't clock down when using AVX-512, you can enable those instructions
|
||||
# too.)
|
||||
. ./scripts/env-native-cpu-without-avx-512
|
||||
|
||||
cargo build --release -p aquatic_http
|
||||
```
|
||||
|
||||
### Configuring
|
||||
|
||||
Generate the configuration file:
|
||||
|
||||
```sh
|
||||
./target/release/aquatic_http -p > "aquatic-http-config.toml"
|
||||
```
|
||||
|
||||
Make necessary adjustments to the file. You will likely want to adjust
|
||||
listening addresses under the `network` section.
|
||||
|
||||
To run over TLS, configure certificate and private key files.
|
||||
|
||||
Running behind a reverse proxy is supported. Please refer to the config file
|
||||
for details.
|
||||
|
||||
### Running
|
||||
|
||||
Make sure locked memory limits are sufficient:
|
||||
- If you're using a systemd service file, add `LimitMEMLOCK=65536000` to it
|
||||
- Otherwise, add the following lines to
|
||||
`/etc/security/limits.conf`, and then log out and back in:
|
||||
|
||||
```
|
||||
* hard memlock 65536
|
||||
* soft memlock 65536
|
||||
```
|
||||
|
||||
Once done, start the application:
|
||||
|
||||
```sh
|
||||
./target/release/aquatic_http -c "aquatic-http-config.toml"
|
||||
```
|
||||
|
||||
If your server is pointed to by domain `example.com` and you configured the
|
||||
tracker to run on port 3000, people can now use it by adding the URL
|
||||
`https://example.com:3000/announce` to their torrent files or magnet links.
|
||||
|
||||
### Load testing
|
||||
|
||||
A load test application is available. It supports generation and loading of
|
||||
configuration files in a similar manner to the tracker application.
|
||||
|
||||
After starting the tracker, run the load tester:
|
||||
|
||||
```sh
|
||||
. ./scripts/env-native-cpu-without-avx-512 # Optional
|
||||
|
||||
cargo run --release -p aquatic_http_load_test -- --help
|
||||
```
|
||||
|
||||
## Details
|
||||
|
||||
[BEP 003]: https://www.bittorrent.org/beps/bep_0003.html
|
||||
[BEP 007]: https://www.bittorrent.org/beps/bep_0007.html
|
||||
[BEP 023]: https://www.bittorrent.org/beps/bep_0023.html
|
||||
[BEP 048]: https://www.bittorrent.org/beps/bep_0048.html
|
||||
|
||||
Implements:
|
||||
* [BEP 003]: HTTP BitTorrent protocol ([more details](https://wiki.theory.org/index.php/BitTorrentSpecification#Tracker_HTTP.2FHTTPS_Protocol)). Exceptions:
|
||||
* Doesn't track the number of torrent downloads (0 is always sent)
|
||||
* Only compact responses are supported
|
||||
* [BEP 023]: Compact HTTP responses
|
||||
* [BEP 007]: IPv6 support
|
||||
* [BEP 048]: HTTP scrape support. Notes:
|
||||
* Doesn't allow full scrapes, i.e. of all registered info hashes
|
||||
|
||||
`aquatic_http` has not been tested as much as `aquatic_udp`, but likely works
|
||||
fine in production.
|
||||
|
||||
## Architectural overview
|
||||
|
||||

|
||||
|
||||
## Copyright and license
|
||||
|
||||
Copyright (c) Joakim Frostegård
|
||||
|
||||
Distributed under the terms of the Apache License, Version 2.0. Please refer to
|
||||
the `LICENSE` file in the repository root directory for details.
|
40
apps/aquatic/crates/http/src/common.rs
Normal file
40
apps/aquatic/crates/http/src/common.rs
Normal file
@ -0,0 +1,40 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use aquatic_common::access_list::AccessListArcSwap;
|
||||
use aquatic_common::CanonicalSocketAddr;
|
||||
|
||||
pub use aquatic_common::ValidUntil;
|
||||
|
||||
use aquatic_http_protocol::{
|
||||
request::{AnnounceRequest, ScrapeRequest},
|
||||
response::{AnnounceResponse, ScrapeResponse},
|
||||
};
|
||||
use glommio::channels::shared_channel::SharedSender;
|
||||
use slotmap::new_key_type;
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct ConsumerId(pub usize);
|
||||
|
||||
new_key_type! {
|
||||
pub struct ConnectionId;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ChannelRequest {
|
||||
Announce {
|
||||
request: AnnounceRequest,
|
||||
peer_addr: CanonicalSocketAddr,
|
||||
response_sender: SharedSender<AnnounceResponse>,
|
||||
},
|
||||
Scrape {
|
||||
request: ScrapeRequest,
|
||||
peer_addr: CanonicalSocketAddr,
|
||||
response_sender: SharedSender<ScrapeResponse>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct State {
|
||||
pub access_list: Arc<AccessListArcSwap>,
|
||||
}
|
231
apps/aquatic/crates/http/src/config.rs
Normal file
231
apps/aquatic/crates/http/src/config.rs
Normal file
@ -0,0 +1,231 @@
|
||||
use std::{
|
||||
net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use aquatic_common::{access_list::AccessListConfig, privileges::PrivilegeConfig};
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use aquatic_common::cli::LogLevel;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, TomlConfig, Deserialize, Default)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ReverseProxyPeerIpHeaderFormat {
|
||||
#[default]
|
||||
LastAddress,
|
||||
}
|
||||
|
||||
/// aquatic_http configuration
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct Config {
|
||||
/// Number of socket worker. One per physical core is recommended.
|
||||
///
|
||||
/// Socket workers receive requests from the socket, parse them and send
|
||||
/// them on to the swarm workers. They then receive responses from the
|
||||
/// swarm workers, encode them and send them back over the socket.
|
||||
pub socket_workers: usize,
|
||||
/// Number of swarm workers. One is enough in almost all cases
|
||||
///
|
||||
/// Swarm workers receive a number of requests from socket workers,
|
||||
/// generate responses and send them back to the socket workers.
|
||||
pub swarm_workers: usize,
|
||||
pub log_level: LogLevel,
|
||||
pub network: NetworkConfig,
|
||||
pub protocol: ProtocolConfig,
|
||||
pub cleaning: CleaningConfig,
|
||||
pub privileges: PrivilegeConfig,
|
||||
/// Access list configuration
|
||||
///
|
||||
/// The file is read on start and when the program receives `SIGUSR1`. If
|
||||
/// initial parsing fails, the program exits. Later failures result in in
|
||||
/// emitting of an error-level log message, while successful updates of the
|
||||
/// access list result in emitting of an info-level log message.
|
||||
pub access_list: AccessListConfig,
|
||||
#[cfg(feature = "metrics")]
|
||||
pub metrics: MetricsConfig,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
socket_workers: 1,
|
||||
swarm_workers: 1,
|
||||
log_level: LogLevel::default(),
|
||||
network: NetworkConfig::default(),
|
||||
protocol: ProtocolConfig::default(),
|
||||
cleaning: CleaningConfig::default(),
|
||||
privileges: PrivilegeConfig::default(),
|
||||
access_list: AccessListConfig::default(),
|
||||
#[cfg(feature = "metrics")]
|
||||
metrics: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl aquatic_common::cli::Config for Config {
|
||||
fn get_log_level(&self) -> Option<LogLevel> {
|
||||
Some(self.log_level)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct NetworkConfig {
|
||||
/// Use IPv4
|
||||
pub use_ipv4: bool,
|
||||
/// Use IPv6
|
||||
pub use_ipv6: bool,
|
||||
/// IPv4 address and port
|
||||
///
|
||||
/// Examples:
|
||||
/// - Use 0.0.0.0:3000 to bind to all interfaces on port 3000
|
||||
/// - Use 127.0.0.1:3000 to bind to the loopback interface (localhost) on
|
||||
/// port 3000
|
||||
pub address_ipv4: SocketAddrV4,
|
||||
/// IPv6 address and port
|
||||
///
|
||||
/// Examples:
|
||||
/// - Use [::]:3000 to bind to all interfaces on port 3000
|
||||
/// - Use [::1]:3000 to bind to the loopback interface (localhost) on
|
||||
/// port 3000
|
||||
pub address_ipv6: SocketAddrV6,
|
||||
/// Maximum number of pending TCP connections
|
||||
pub tcp_backlog: i32,
|
||||
/// Enable TLS
|
||||
///
|
||||
/// The TLS files are read on start and when the program receives `SIGUSR1`.
|
||||
/// If initial parsing fails, the program exits. Later failures result in
|
||||
/// in emitting of an error-level log message, while successful updates
|
||||
/// result in emitting of an info-level log message. Updates only affect
|
||||
/// new connections.
|
||||
pub enable_tls: bool,
|
||||
/// Path to TLS certificate (DER-encoded X.509)
|
||||
pub tls_certificate_path: PathBuf,
|
||||
/// Path to TLS private key (DER-encoded ASN.1 in PKCS#8 or PKCS#1 format)
|
||||
pub tls_private_key_path: PathBuf,
|
||||
/// Keep connections alive after sending a response
|
||||
pub keep_alive: bool,
|
||||
/// Does tracker run behind reverse proxy?
|
||||
///
|
||||
/// MUST be set to false if not running behind reverse proxy.
|
||||
///
|
||||
/// If set to true, make sure that reverse_proxy_ip_header_name and
|
||||
/// reverse_proxy_ip_header_format are set to match your reverse proxy
|
||||
/// setup.
|
||||
///
|
||||
/// More info on what can go wrong when running behind reverse proxies:
|
||||
/// https://adam-p.ca/blog/2022/03/x-forwarded-for/
|
||||
pub runs_behind_reverse_proxy: bool,
|
||||
/// Name of header set by reverse proxy to indicate peer ip
|
||||
pub reverse_proxy_ip_header_name: String,
|
||||
/// How to extract peer IP from header field
|
||||
///
|
||||
/// Options:
|
||||
/// - last_address: use the last address in the last instance of the
|
||||
/// header. Works with typical multi-IP setups (e.g., "X-Forwarded-For")
|
||||
/// as well as for single-IP setups (e.g., nginx "X-Real-IP")
|
||||
pub reverse_proxy_ip_header_format: ReverseProxyPeerIpHeaderFormat,
|
||||
/// Set flag on IPv6 socket to only accept IPv6 traffic.
|
||||
///
|
||||
/// This should typically be set to true unless your OS does not support
|
||||
/// double-stack sockets (that is, sockets that receive both IPv4 and IPv6
|
||||
/// packets).
|
||||
pub set_only_ipv6: bool,
|
||||
}
|
||||
|
||||
impl Default for NetworkConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
use_ipv4: true,
|
||||
use_ipv6: true,
|
||||
address_ipv4: SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 3000),
|
||||
address_ipv6: SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, 3000, 0, 0),
|
||||
enable_tls: false,
|
||||
tls_certificate_path: "".into(),
|
||||
tls_private_key_path: "".into(),
|
||||
tcp_backlog: 1024,
|
||||
keep_alive: true,
|
||||
runs_behind_reverse_proxy: false,
|
||||
reverse_proxy_ip_header_name: "X-Forwarded-For".into(),
|
||||
reverse_proxy_ip_header_format: Default::default(),
|
||||
set_only_ipv6: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct ProtocolConfig {
|
||||
/// Maximum number of torrents to accept in scrape request
|
||||
pub max_scrape_torrents: usize,
|
||||
/// Maximum number of requested peers to accept in announce request
|
||||
pub max_peers: usize,
|
||||
/// Ask peers to announce this often (seconds)
|
||||
pub peer_announce_interval: usize,
|
||||
}
|
||||
|
||||
impl Default for ProtocolConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_scrape_torrents: 100,
|
||||
max_peers: 50,
|
||||
peer_announce_interval: 120,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct CleaningConfig {
|
||||
/// Clean peers this often (seconds)
|
||||
pub torrent_cleaning_interval: u64,
|
||||
/// Clean connections this often (seconds)
|
||||
pub connection_cleaning_interval: u64,
|
||||
/// Remove peers that have not announced for this long (seconds)
|
||||
pub max_peer_age: u32,
|
||||
/// Remove connections that haven't seen valid requests for this long (seconds)
|
||||
pub max_connection_idle: u32,
|
||||
}
|
||||
|
||||
impl Default for CleaningConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
torrent_cleaning_interval: 30,
|
||||
connection_cleaning_interval: 60,
|
||||
max_peer_age: 1800,
|
||||
max_connection_idle: 180,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct MetricsConfig {
|
||||
/// Run a prometheus endpoint
|
||||
pub run_prometheus_endpoint: bool,
|
||||
/// Address to run prometheus endpoint on
|
||||
pub prometheus_endpoint_address: SocketAddr,
|
||||
/// Update metrics for torrent count this often (seconds)
|
||||
pub torrent_count_update_interval: u64,
|
||||
}
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
impl Default for MetricsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
run_prometheus_endpoint: false,
|
||||
prometheus_endpoint_address: SocketAddr::from(([0, 0, 0, 0], 9000)),
|
||||
torrent_count_update_interval: 10,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Config;
|
||||
|
||||
::aquatic_toml_config::gen_serialize_deserialize_test!(Config);
|
||||
}
|
199
apps/aquatic/crates/http/src/lib.rs
Normal file
199
apps/aquatic/crates/http/src/lib.rs
Normal file
@ -0,0 +1,199 @@
|
||||
use anyhow::Context;
|
||||
use aquatic_common::{
|
||||
access_list::update_access_list, privileges::PrivilegeDropper,
|
||||
rustls_config::create_rustls_config, ServerStartInstant, WorkerType,
|
||||
};
|
||||
use arc_swap::ArcSwap;
|
||||
use common::State;
|
||||
use glommio::{channels::channel_mesh::MeshBuilder, prelude::*};
|
||||
use signal_hook::{consts::SIGUSR1, iterator::Signals};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
mod common;
|
||||
pub mod config;
|
||||
mod workers;
|
||||
|
||||
pub const APP_NAME: &str = "aquatic_http: HTTP BitTorrent tracker";
|
||||
pub const APP_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
const SHARED_CHANNEL_SIZE: usize = 1024;
|
||||
|
||||
pub fn run(config: Config) -> ::anyhow::Result<()> {
|
||||
let mut signals = Signals::new([SIGUSR1])?;
|
||||
|
||||
if !(config.network.use_ipv4 || config.network.use_ipv6) {
|
||||
return Result::Err(anyhow::anyhow!(
|
||||
"Both use_ipv4 and use_ipv6 can not be set to false"
|
||||
));
|
||||
}
|
||||
|
||||
let state = State::default();
|
||||
|
||||
update_access_list(&config.access_list, &state.access_list)?;
|
||||
|
||||
let request_mesh_builder = MeshBuilder::partial(
|
||||
config.socket_workers + config.swarm_workers,
|
||||
SHARED_CHANNEL_SIZE,
|
||||
);
|
||||
|
||||
let num_sockets_per_worker =
|
||||
if config.network.use_ipv4 { 1 } else { 0 } + if config.network.use_ipv6 { 1 } else { 0 };
|
||||
|
||||
let priv_dropper = PrivilegeDropper::new(
|
||||
config.privileges.clone(),
|
||||
config.socket_workers * num_sockets_per_worker,
|
||||
);
|
||||
|
||||
let opt_tls_config = if config.network.enable_tls {
|
||||
Some(Arc::new(ArcSwap::from_pointee(create_rustls_config(
|
||||
&config.network.tls_certificate_path,
|
||||
&config.network.tls_private_key_path,
|
||||
)?)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let server_start_instant = ServerStartInstant::new();
|
||||
|
||||
let mut join_handles = Vec::new();
|
||||
|
||||
for i in 0..(config.socket_workers) {
|
||||
let config = config.clone();
|
||||
let state = state.clone();
|
||||
let opt_tls_config = opt_tls_config.clone();
|
||||
let request_mesh_builder = request_mesh_builder.clone();
|
||||
|
||||
let mut priv_droppers = Vec::new();
|
||||
|
||||
for _ in 0..num_sockets_per_worker {
|
||||
priv_droppers.push(priv_dropper.clone());
|
||||
}
|
||||
|
||||
let handle = Builder::new()
|
||||
.name(format!("socket-{:02}", i + 1))
|
||||
.spawn(move || {
|
||||
LocalExecutorBuilder::default()
|
||||
.make()
|
||||
.map_err(|err| anyhow::anyhow!("Spawning executor failed: {:#}", err))?
|
||||
.run(workers::socket::run_socket_worker(
|
||||
config,
|
||||
state,
|
||||
opt_tls_config,
|
||||
request_mesh_builder,
|
||||
priv_droppers,
|
||||
server_start_instant,
|
||||
i,
|
||||
))
|
||||
})
|
||||
.context("spawn socket worker")?;
|
||||
|
||||
join_handles.push((WorkerType::Socket(i), handle));
|
||||
}
|
||||
|
||||
for i in 0..(config.swarm_workers) {
|
||||
let config = config.clone();
|
||||
let state = state.clone();
|
||||
let request_mesh_builder = request_mesh_builder.clone();
|
||||
|
||||
let handle = Builder::new()
|
||||
.name(format!("swarm-{:02}", i + 1))
|
||||
.spawn(move || {
|
||||
LocalExecutorBuilder::default()
|
||||
.make()
|
||||
.map_err(|err| anyhow::anyhow!("Spawning executor failed: {:#}", err))?
|
||||
.run(workers::swarm::run_swarm_worker(
|
||||
config,
|
||||
state,
|
||||
request_mesh_builder,
|
||||
server_start_instant,
|
||||
i,
|
||||
))
|
||||
})
|
||||
.context("spawn swarm worker")?;
|
||||
|
||||
join_handles.push((WorkerType::Swarm(i), handle));
|
||||
}
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.metrics.run_prometheus_endpoint {
|
||||
let idle_timeout = config
|
||||
.cleaning
|
||||
.connection_cleaning_interval
|
||||
.max(config.cleaning.torrent_cleaning_interval)
|
||||
.max(config.metrics.torrent_count_update_interval)
|
||||
* 2;
|
||||
|
||||
let handle = aquatic_common::spawn_prometheus_endpoint(
|
||||
config.metrics.prometheus_endpoint_address,
|
||||
Some(Duration::from_secs(idle_timeout)),
|
||||
Some(metrics_util::MetricKindMask::GAUGE),
|
||||
)?;
|
||||
|
||||
join_handles.push((WorkerType::Prometheus, handle));
|
||||
}
|
||||
|
||||
// Spawn signal handler thread
|
||||
{
|
||||
let handle: JoinHandle<anyhow::Result<()>> = Builder::new()
|
||||
.name("signals".into())
|
||||
.spawn(move || {
|
||||
for signal in &mut signals {
|
||||
match signal {
|
||||
SIGUSR1 => {
|
||||
let _ = update_access_list(&config.access_list, &state.access_list);
|
||||
|
||||
if let Some(tls_config) = opt_tls_config.as_ref() {
|
||||
match create_rustls_config(
|
||||
&config.network.tls_certificate_path,
|
||||
&config.network.tls_private_key_path,
|
||||
) {
|
||||
Ok(config) => {
|
||||
tls_config.store(Arc::new(config));
|
||||
|
||||
::log::info!("successfully updated tls config");
|
||||
}
|
||||
Err(err) => {
|
||||
::log::error!("could not update tls config: {:#}", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.context("spawn signal worker")?;
|
||||
|
||||
join_handles.push((WorkerType::Signals, handle));
|
||||
}
|
||||
|
||||
loop {
|
||||
for (i, (_, handle)) in join_handles.iter().enumerate() {
|
||||
if handle.is_finished() {
|
||||
let (worker_type, handle) = join_handles.remove(i);
|
||||
|
||||
match handle.join() {
|
||||
Ok(Ok(())) => {
|
||||
return Err(anyhow::anyhow!("{} stopped", worker_type));
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
return Err(err.context(format!("{} stopped", worker_type)));
|
||||
}
|
||||
Err(_) => {
|
||||
return Err(anyhow::anyhow!("{} panicked", worker_type));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(5));
|
||||
}
|
||||
}
|
15
apps/aquatic/crates/http/src/main.rs
Normal file
15
apps/aquatic/crates/http/src/main.rs
Normal file
@ -0,0 +1,15 @@
|
||||
use aquatic_common::cli::run_app_with_cli_and_config;
|
||||
use aquatic_http::config::Config;
|
||||
|
||||
#[cfg(feature = "mimalloc")]
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn main() {
|
||||
run_app_with_cli_and_config::<Config>(
|
||||
aquatic_http::APP_NAME,
|
||||
aquatic_http::APP_VERSION,
|
||||
aquatic_http::run,
|
||||
None,
|
||||
)
|
||||
}
|
2
apps/aquatic/crates/http/src/workers/mod.rs
Normal file
2
apps/aquatic/crates/http/src/workers/mod.rs
Normal file
@ -0,0 +1,2 @@
|
||||
pub mod socket;
|
||||
pub mod swarm;
|
466
apps/aquatic/crates/http/src/workers/socket/connection.rs
Normal file
466
apps/aquatic/crates/http/src/workers/socket/connection.rs
Normal file
@ -0,0 +1,466 @@
|
||||
use std::cell::RefCell;
|
||||
use std::collections::BTreeMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache};
|
||||
use aquatic_common::rustls_config::RustlsConfig;
|
||||
use aquatic_common::{CanonicalSocketAddr, ServerStartInstant};
|
||||
use aquatic_http_protocol::common::InfoHash;
|
||||
use aquatic_http_protocol::request::{Request, ScrapeRequest};
|
||||
use aquatic_http_protocol::response::{
|
||||
FailureResponse, Response, ScrapeResponse, ScrapeStatistics,
|
||||
};
|
||||
use arc_swap::ArcSwap;
|
||||
use futures::stream::FuturesUnordered;
|
||||
use futures_lite::{AsyncReadExt, AsyncWriteExt, StreamExt};
|
||||
use futures_rustls::TlsAcceptor;
|
||||
use glommio::channels::channel_mesh::Senders;
|
||||
use glommio::channels::shared_channel::{self, SharedReceiver};
|
||||
use glommio::net::TcpStream;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
use super::peer_addr_to_ip_version_str;
|
||||
use super::request::{parse_request, RequestParseError};
|
||||
|
||||
const REQUEST_BUFFER_SIZE: usize = 2048;
|
||||
const RESPONSE_BUFFER_SIZE: usize = 4096;
|
||||
|
||||
const RESPONSE_HEADER_A: &[u8] = b"HTTP/1.1 200 OK\r\nContent-Length: ";
|
||||
const RESPONSE_HEADER_B: &[u8] = b" ";
|
||||
const RESPONSE_HEADER_C: &[u8] = b"\r\n\r\n";
|
||||
|
||||
static RESPONSE_HEADER: Lazy<Vec<u8>> =
|
||||
Lazy::new(|| [RESPONSE_HEADER_A, RESPONSE_HEADER_B, RESPONSE_HEADER_C].concat());
|
||||
|
||||
struct PendingScrapeResponse {
|
||||
pending_worker_responses: usize,
|
||||
stats: BTreeMap<InfoHash, ScrapeStatistics>,
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ConnectionError {
|
||||
#[error("inactive")]
|
||||
Inactive,
|
||||
#[error("socket peer addr extraction failed")]
|
||||
NoSocketPeerAddr(String),
|
||||
#[error("request buffer full")]
|
||||
RequestBufferFull,
|
||||
#[error("response buffer full")]
|
||||
ResponseBufferFull,
|
||||
#[error("response buffer write error: {0}")]
|
||||
ResponseBufferWrite(::std::io::Error),
|
||||
#[error("peer closed")]
|
||||
PeerClosed,
|
||||
#[error("response sender closed")]
|
||||
ResponseSenderClosed,
|
||||
#[error("scrape channel error: {0}")]
|
||||
ScrapeChannelError(&'static str),
|
||||
#[error(transparent)]
|
||||
Other(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(super) async fn run_connection(
|
||||
config: Rc<Config>,
|
||||
access_list: Arc<AccessListArcSwap>,
|
||||
request_senders: Rc<Senders<ChannelRequest>>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
opt_tls_config: Option<Arc<ArcSwap<RustlsConfig>>>,
|
||||
valid_until: Rc<RefCell<ValidUntil>>,
|
||||
stream: TcpStream,
|
||||
worker_index: usize,
|
||||
) -> Result<(), ConnectionError> {
|
||||
let access_list_cache = create_access_list_cache(&access_list);
|
||||
let request_buffer = Box::new([0u8; REQUEST_BUFFER_SIZE]);
|
||||
|
||||
let mut response_buffer = Box::new([0; RESPONSE_BUFFER_SIZE]);
|
||||
|
||||
response_buffer[..RESPONSE_HEADER.len()].copy_from_slice(&RESPONSE_HEADER);
|
||||
|
||||
let remote_addr = stream
|
||||
.peer_addr()
|
||||
.map_err(|err| ConnectionError::NoSocketPeerAddr(err.to_string()))?;
|
||||
|
||||
let opt_peer_addr = if config.network.runs_behind_reverse_proxy {
|
||||
None
|
||||
} else {
|
||||
Some(CanonicalSocketAddr::new(remote_addr))
|
||||
};
|
||||
|
||||
let peer_port = remote_addr.port();
|
||||
|
||||
if let Some(tls_config) = opt_tls_config {
|
||||
let tls_acceptor: TlsAcceptor = tls_config.load_full().into();
|
||||
let stream = tls_acceptor
|
||||
.accept(stream)
|
||||
.await
|
||||
.with_context(|| "tls accept")?;
|
||||
|
||||
let mut conn = Connection {
|
||||
config,
|
||||
access_list_cache,
|
||||
request_senders,
|
||||
valid_until,
|
||||
server_start_instant,
|
||||
peer_port,
|
||||
request_buffer,
|
||||
request_buffer_position: 0,
|
||||
response_buffer,
|
||||
stream,
|
||||
worker_index_string: worker_index.to_string(),
|
||||
};
|
||||
|
||||
conn.run(opt_peer_addr).await
|
||||
} else {
|
||||
let mut conn = Connection {
|
||||
config,
|
||||
access_list_cache,
|
||||
request_senders,
|
||||
valid_until,
|
||||
server_start_instant,
|
||||
peer_port,
|
||||
request_buffer,
|
||||
request_buffer_position: 0,
|
||||
response_buffer,
|
||||
stream,
|
||||
worker_index_string: worker_index.to_string(),
|
||||
};
|
||||
|
||||
conn.run(opt_peer_addr).await
|
||||
}
|
||||
}
|
||||
|
||||
struct Connection<S> {
|
||||
config: Rc<Config>,
|
||||
access_list_cache: AccessListCache,
|
||||
request_senders: Rc<Senders<ChannelRequest>>,
|
||||
valid_until: Rc<RefCell<ValidUntil>>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
peer_port: u16,
|
||||
request_buffer: Box<[u8; REQUEST_BUFFER_SIZE]>,
|
||||
request_buffer_position: usize,
|
||||
response_buffer: Box<[u8; RESPONSE_BUFFER_SIZE]>,
|
||||
stream: S,
|
||||
worker_index_string: String,
|
||||
}
|
||||
|
||||
impl<S> Connection<S>
|
||||
where
|
||||
S: futures::AsyncRead + futures::AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
async fn run(
|
||||
&mut self,
|
||||
// Set unless running behind reverse proxy
|
||||
opt_stable_peer_addr: Option<CanonicalSocketAddr>,
|
||||
) -> Result<(), ConnectionError> {
|
||||
loop {
|
||||
let (request, opt_peer_addr) = self.read_request().await?;
|
||||
|
||||
let peer_addr = opt_stable_peer_addr
|
||||
.or(opt_peer_addr)
|
||||
.ok_or(anyhow::anyhow!("Could not extract peer addr"))?;
|
||||
|
||||
let response = self.handle_request(request, peer_addr).await?;
|
||||
|
||||
self.write_response(&response, peer_addr).await?;
|
||||
|
||||
if !self.config.network.keep_alive {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn read_request(
|
||||
&mut self,
|
||||
) -> Result<(Request, Option<CanonicalSocketAddr>), ConnectionError> {
|
||||
self.request_buffer_position = 0;
|
||||
|
||||
loop {
|
||||
if self.request_buffer_position == self.request_buffer.len() {
|
||||
return Err(ConnectionError::RequestBufferFull);
|
||||
}
|
||||
|
||||
let bytes_read = self
|
||||
.stream
|
||||
.read(&mut self.request_buffer[self.request_buffer_position..])
|
||||
.await
|
||||
.with_context(|| "read")?;
|
||||
|
||||
if bytes_read == 0 {
|
||||
return Err(ConnectionError::PeerClosed);
|
||||
}
|
||||
|
||||
self.request_buffer_position += bytes_read;
|
||||
|
||||
let buffer_slice = &self.request_buffer[..self.request_buffer_position];
|
||||
|
||||
match parse_request(&self.config, buffer_slice) {
|
||||
Ok((request, opt_peer_ip)) => {
|
||||
let opt_peer_addr = if self.config.network.runs_behind_reverse_proxy {
|
||||
let peer_ip = opt_peer_ip
|
||||
.expect("logic error: peer ip must have been extracted at this point");
|
||||
|
||||
Some(CanonicalSocketAddr::new(SocketAddr::new(
|
||||
peer_ip,
|
||||
self.peer_port,
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
return Ok((request, opt_peer_addr));
|
||||
}
|
||||
Err(RequestParseError::MoreDataNeeded) => continue,
|
||||
Err(RequestParseError::RequiredPeerIpHeaderMissing(err)) => {
|
||||
panic!("Tracker configured as running behind reverse proxy, but no corresponding IP header set in request. Please check your reverse proxy setup as well as your aquatic configuration. Error: {:#}", err);
|
||||
}
|
||||
Err(RequestParseError::Other(err)) => {
|
||||
::log::debug!("Failed parsing request: {:#}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Take a request and:
|
||||
/// - Update connection ValidUntil
|
||||
/// - Return error response if request is not allowed
|
||||
/// - If it is an announce request, send it to swarm workers an await a
|
||||
/// response
|
||||
/// - If it is a scrape requests, split it up, pass on the parts to
|
||||
/// relevant swarm workers and await a response
|
||||
async fn handle_request(
|
||||
&mut self,
|
||||
request: Request,
|
||||
peer_addr: CanonicalSocketAddr,
|
||||
) -> Result<Response, ConnectionError> {
|
||||
*self.valid_until.borrow_mut() = ValidUntil::new(
|
||||
self.server_start_instant,
|
||||
self.config.cleaning.max_connection_idle,
|
||||
);
|
||||
|
||||
match request {
|
||||
Request::Announce(request) => {
|
||||
#[cfg(feature = "metrics")]
|
||||
::metrics::counter!(
|
||||
"aquatic_requests_total",
|
||||
"type" => "announce",
|
||||
"ip_version" => peer_addr_to_ip_version_str(&peer_addr),
|
||||
"worker_index" => self.worker_index_string.clone(),
|
||||
)
|
||||
.increment(1);
|
||||
|
||||
let info_hash = request.info_hash;
|
||||
|
||||
if self
|
||||
.access_list_cache
|
||||
.load()
|
||||
.allows(self.config.access_list.mode, &info_hash.0)
|
||||
{
|
||||
let (response_sender, response_receiver) = shared_channel::new_bounded(1);
|
||||
|
||||
let request = ChannelRequest::Announce {
|
||||
request,
|
||||
peer_addr,
|
||||
response_sender,
|
||||
};
|
||||
|
||||
let consumer_index = calculate_request_consumer_index(&self.config, info_hash);
|
||||
|
||||
// Only fails when receiver is closed
|
||||
self.request_senders
|
||||
.send_to(consumer_index, request)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
response_receiver
|
||||
.connect()
|
||||
.await
|
||||
.recv()
|
||||
.await
|
||||
.ok_or(ConnectionError::ResponseSenderClosed)
|
||||
.map(Response::Announce)
|
||||
} else {
|
||||
let response = Response::Failure(FailureResponse {
|
||||
failure_reason: "Info hash not allowed".into(),
|
||||
});
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
}
|
||||
Request::Scrape(ScrapeRequest { info_hashes }) => {
|
||||
#[cfg(feature = "metrics")]
|
||||
::metrics::counter!(
|
||||
"aquatic_requests_total",
|
||||
"type" => "scrape",
|
||||
"ip_version" => peer_addr_to_ip_version_str(&peer_addr),
|
||||
"worker_index" => self.worker_index_string.clone(),
|
||||
)
|
||||
.increment(1);
|
||||
|
||||
let mut info_hashes_by_worker: BTreeMap<usize, Vec<InfoHash>> = BTreeMap::new();
|
||||
|
||||
for info_hash in info_hashes.into_iter() {
|
||||
let info_hashes = info_hashes_by_worker
|
||||
.entry(calculate_request_consumer_index(&self.config, info_hash))
|
||||
.or_default();
|
||||
|
||||
info_hashes.push(info_hash);
|
||||
}
|
||||
|
||||
let pending_worker_responses = info_hashes_by_worker.len();
|
||||
let mut response_receivers = Vec::with_capacity(pending_worker_responses);
|
||||
|
||||
for (consumer_index, info_hashes) in info_hashes_by_worker {
|
||||
let (response_sender, response_receiver) = shared_channel::new_bounded(1);
|
||||
|
||||
response_receivers.push(response_receiver);
|
||||
|
||||
let request = ChannelRequest::Scrape {
|
||||
request: ScrapeRequest { info_hashes },
|
||||
peer_addr,
|
||||
response_sender,
|
||||
};
|
||||
|
||||
// Only fails when receiver is closed
|
||||
self.request_senders
|
||||
.send_to(consumer_index, request)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let pending_scrape_response = PendingScrapeResponse {
|
||||
pending_worker_responses,
|
||||
stats: Default::default(),
|
||||
};
|
||||
|
||||
self.wait_for_scrape_responses(response_receivers, pending_scrape_response)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Wait for partial scrape responses to arrive,
|
||||
/// return full response
|
||||
async fn wait_for_scrape_responses(
|
||||
&self,
|
||||
response_receivers: Vec<SharedReceiver<ScrapeResponse>>,
|
||||
mut pending: PendingScrapeResponse,
|
||||
) -> Result<Response, ConnectionError> {
|
||||
let mut responses = response_receivers
|
||||
.into_iter()
|
||||
.map(|receiver| async { receiver.connect().await.recv().await })
|
||||
.collect::<FuturesUnordered<_>>();
|
||||
|
||||
loop {
|
||||
let response = responses
|
||||
.next()
|
||||
.await
|
||||
.ok_or_else(|| {
|
||||
ConnectionError::ScrapeChannelError(
|
||||
"stream ended before all partial scrape responses received",
|
||||
)
|
||||
})?
|
||||
.ok_or_else(|| ConnectionError::ScrapeChannelError("sender is closed"))?;
|
||||
|
||||
pending.stats.extend(response.files);
|
||||
pending.pending_worker_responses -= 1;
|
||||
|
||||
if pending.pending_worker_responses == 0 {
|
||||
let response = Response::Scrape(ScrapeResponse {
|
||||
files: pending.stats,
|
||||
});
|
||||
|
||||
break Ok(response);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn write_response(
|
||||
&mut self,
|
||||
response: &Response,
|
||||
peer_addr: CanonicalSocketAddr,
|
||||
) -> Result<(), ConnectionError> {
|
||||
// Write body and final newline to response buffer
|
||||
|
||||
let mut position = RESPONSE_HEADER.len();
|
||||
|
||||
let body_len = response
|
||||
.write_bytes(&mut &mut self.response_buffer[position..])
|
||||
.map_err(ConnectionError::ResponseBufferWrite)?;
|
||||
|
||||
position += body_len;
|
||||
|
||||
if position + 2 > self.response_buffer.len() {
|
||||
return Err(ConnectionError::ResponseBufferFull);
|
||||
}
|
||||
|
||||
self.response_buffer[position..position + 2].copy_from_slice(b"\r\n");
|
||||
|
||||
position += 2;
|
||||
|
||||
let content_len = body_len + 2;
|
||||
|
||||
// Clear content-len header value
|
||||
|
||||
{
|
||||
let start = RESPONSE_HEADER_A.len();
|
||||
let end = start + RESPONSE_HEADER_B.len();
|
||||
|
||||
self.response_buffer[start..end].copy_from_slice(RESPONSE_HEADER_B);
|
||||
}
|
||||
|
||||
// Set content-len header value
|
||||
|
||||
{
|
||||
let mut buf = ::itoa::Buffer::new();
|
||||
let content_len_bytes = buf.format(content_len).as_bytes();
|
||||
|
||||
let start = RESPONSE_HEADER_A.len();
|
||||
let end = start + content_len_bytes.len();
|
||||
|
||||
self.response_buffer[start..end].copy_from_slice(content_len_bytes);
|
||||
}
|
||||
|
||||
// Write buffer to stream
|
||||
|
||||
self.stream
|
||||
.write(&self.response_buffer[..position])
|
||||
.await
|
||||
.with_context(|| "write")?;
|
||||
self.stream.flush().await.with_context(|| "flush")?;
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
{
|
||||
let response_type = match response {
|
||||
Response::Announce(_) => "announce",
|
||||
Response::Scrape(_) => "scrape",
|
||||
Response::Failure(_) => "error",
|
||||
};
|
||||
|
||||
let ip_version_str = peer_addr_to_ip_version_str(&peer_addr);
|
||||
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
"type" => response_type,
|
||||
"ip_version" => ip_version_str,
|
||||
"worker_index" => self.worker_index_string.clone(),
|
||||
)
|
||||
.increment(1);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn calculate_request_consumer_index(config: &Config, info_hash: InfoHash) -> usize {
|
||||
(info_hash.0[0] as usize) % config.swarm_workers
|
||||
}
|
302
apps/aquatic/crates/http/src/workers/socket/mod.rs
Normal file
302
apps/aquatic/crates/http/src/workers/socket/mod.rs
Normal file
@ -0,0 +1,302 @@
|
||||
mod connection;
|
||||
mod request;
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::net::SocketAddr;
|
||||
use std::os::unix::prelude::{FromRawFd, IntoRawFd};
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::access_list::AccessList;
|
||||
use aquatic_common::privileges::PrivilegeDropper;
|
||||
use aquatic_common::rustls_config::RustlsConfig;
|
||||
use aquatic_common::{CanonicalSocketAddr, ServerStartInstant};
|
||||
use arc_swap::{ArcSwap, ArcSwapAny};
|
||||
use futures_lite::future::race;
|
||||
use futures_lite::StreamExt;
|
||||
use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role, Senders};
|
||||
use glommio::channels::local_channel::{new_bounded, LocalReceiver, LocalSender};
|
||||
use glommio::net::{TcpListener, TcpStream};
|
||||
use glommio::timer::TimerActionRepeat;
|
||||
use glommio::{enclose, prelude::*};
|
||||
use slotmap::HopSlotMap;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
use crate::workers::socket::connection::{run_connection, ConnectionError};
|
||||
|
||||
struct ConnectionHandle {
|
||||
close_conn_sender: LocalSender<()>,
|
||||
valid_until: Rc<RefCell<ValidUntil>>,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn run_socket_worker(
|
||||
config: Config,
|
||||
state: State,
|
||||
opt_tls_config: Option<Arc<ArcSwap<RustlsConfig>>>,
|
||||
request_mesh_builder: MeshBuilder<ChannelRequest, Partial>,
|
||||
mut priv_droppers: Vec<PrivilegeDropper>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
worker_index: usize,
|
||||
) -> anyhow::Result<()> {
|
||||
let config = Rc::new(config);
|
||||
|
||||
let tcp_listeners = {
|
||||
let opt_listener_ipv4 = if config.network.use_ipv4 {
|
||||
let priv_dropper = priv_droppers
|
||||
.pop()
|
||||
.ok_or(anyhow::anyhow!("no enough priv droppers"))?;
|
||||
let socket =
|
||||
create_tcp_listener(&config, priv_dropper, config.network.address_ipv4.into())
|
||||
.context("create tcp listener")?;
|
||||
|
||||
Some(socket)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let opt_listener_ipv6 = if config.network.use_ipv6 {
|
||||
let priv_dropper = priv_droppers
|
||||
.pop()
|
||||
.ok_or(anyhow::anyhow!("no enough priv droppers"))?;
|
||||
let socket =
|
||||
create_tcp_listener(&config, priv_dropper, config.network.address_ipv6.into())
|
||||
.context("create tcp listener")?;
|
||||
|
||||
Some(socket)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
[opt_listener_ipv4, opt_listener_ipv6]
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let (request_senders, _) = request_mesh_builder
|
||||
.join(Role::Producer)
|
||||
.await
|
||||
.map_err(|err| anyhow::anyhow!("join request mesh: {:#}", err))?;
|
||||
let request_senders = Rc::new(request_senders);
|
||||
|
||||
let connection_handles = Rc::new(RefCell::new(HopSlotMap::with_key()));
|
||||
|
||||
TimerActionRepeat::repeat(enclose!((config, connection_handles) move || {
|
||||
clean_connections(
|
||||
config.clone(),
|
||||
connection_handles.clone(),
|
||||
server_start_instant,
|
||||
)
|
||||
}));
|
||||
|
||||
let tasks = tcp_listeners
|
||||
.into_iter()
|
||||
.map(|tcp_listener| {
|
||||
let listener_state = ListenerState {
|
||||
config: config.clone(),
|
||||
access_list: state.access_list.clone(),
|
||||
opt_tls_config: opt_tls_config.clone(),
|
||||
server_start_instant,
|
||||
connection_handles: connection_handles.clone(),
|
||||
request_senders: request_senders.clone(),
|
||||
worker_index,
|
||||
};
|
||||
|
||||
spawn_local(listener_state.accept_connections(tcp_listener))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for task in tasks {
|
||||
task.await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct ListenerState {
|
||||
config: Rc<Config>,
|
||||
access_list: Arc<ArcSwapAny<Arc<AccessList>>>,
|
||||
opt_tls_config: Option<Arc<ArcSwap<RustlsConfig>>>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
connection_handles: Rc<RefCell<HopSlotMap<ConnectionId, ConnectionHandle>>>,
|
||||
request_senders: Rc<Senders<ChannelRequest>>,
|
||||
worker_index: usize,
|
||||
}
|
||||
|
||||
impl ListenerState {
|
||||
async fn accept_connections(self, listener: TcpListener) {
|
||||
let mut incoming = listener.incoming();
|
||||
|
||||
while let Some(stream) = incoming.next().await {
|
||||
match stream {
|
||||
Ok(stream) => {
|
||||
let (close_conn_sender, close_conn_receiver) = new_bounded(1);
|
||||
|
||||
let valid_until = Rc::new(RefCell::new(ValidUntil::new(
|
||||
self.server_start_instant,
|
||||
self.config.cleaning.max_connection_idle,
|
||||
)));
|
||||
|
||||
let connection_id =
|
||||
self.connection_handles
|
||||
.borrow_mut()
|
||||
.insert(ConnectionHandle {
|
||||
close_conn_sender,
|
||||
valid_until: valid_until.clone(),
|
||||
});
|
||||
|
||||
spawn_local(self.clone().handle_connection(
|
||||
close_conn_receiver,
|
||||
valid_until,
|
||||
connection_id,
|
||||
stream,
|
||||
))
|
||||
.detach();
|
||||
}
|
||||
Err(err) => {
|
||||
::log::error!("accept connection: {:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_connection(
|
||||
self,
|
||||
close_conn_receiver: LocalReceiver<()>,
|
||||
valid_until: Rc<RefCell<ValidUntil>>,
|
||||
connection_id: ConnectionId,
|
||||
stream: TcpStream,
|
||||
) {
|
||||
#[cfg(feature = "metrics")]
|
||||
let active_connections_gauge = ::metrics::gauge!(
|
||||
"aquatic_active_connections",
|
||||
"worker_index" => self.worker_index.to_string(),
|
||||
);
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
active_connections_gauge.increment(1.0);
|
||||
|
||||
let f1 = async {
|
||||
run_connection(
|
||||
self.config,
|
||||
self.access_list,
|
||||
self.request_senders,
|
||||
self.server_start_instant,
|
||||
self.opt_tls_config,
|
||||
valid_until.clone(),
|
||||
stream,
|
||||
self.worker_index,
|
||||
)
|
||||
.await
|
||||
};
|
||||
let f2 = async {
|
||||
close_conn_receiver.recv().await;
|
||||
|
||||
Err(ConnectionError::Inactive)
|
||||
};
|
||||
|
||||
let result = race(f1, f2).await;
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
active_connections_gauge.decrement(1.0);
|
||||
|
||||
match result {
|
||||
Ok(()) => (),
|
||||
Err(
|
||||
err @ (ConnectionError::ResponseBufferWrite(_)
|
||||
| ConnectionError::ResponseBufferFull
|
||||
| ConnectionError::ScrapeChannelError(_)
|
||||
| ConnectionError::ResponseSenderClosed),
|
||||
) => {
|
||||
::log::error!("connection closed: {:#}", err);
|
||||
}
|
||||
Err(err @ ConnectionError::RequestBufferFull) => {
|
||||
::log::info!("connection closed: {:#}", err);
|
||||
}
|
||||
Err(err) => {
|
||||
::log::debug!("connection closed: {:#}", err);
|
||||
}
|
||||
}
|
||||
|
||||
self.connection_handles.borrow_mut().remove(connection_id);
|
||||
}
|
||||
}
|
||||
|
||||
async fn clean_connections(
|
||||
config: Rc<Config>,
|
||||
connection_slab: Rc<RefCell<HopSlotMap<ConnectionId, ConnectionHandle>>>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
) -> Option<Duration> {
|
||||
let now = server_start_instant.seconds_elapsed();
|
||||
|
||||
connection_slab.borrow_mut().retain(|_, handle| {
|
||||
if handle.valid_until.borrow().valid(now) {
|
||||
true
|
||||
} else {
|
||||
let _ = handle.close_conn_sender.try_send(());
|
||||
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
Some(Duration::from_secs(
|
||||
config.cleaning.connection_cleaning_interval,
|
||||
))
|
||||
}
|
||||
|
||||
fn create_tcp_listener(
|
||||
config: &Config,
|
||||
priv_dropper: PrivilegeDropper,
|
||||
address: SocketAddr,
|
||||
) -> anyhow::Result<TcpListener> {
|
||||
let socket = if address.is_ipv4() {
|
||||
socket2::Socket::new(
|
||||
socket2::Domain::IPV4,
|
||||
socket2::Type::STREAM,
|
||||
Some(socket2::Protocol::TCP),
|
||||
)?
|
||||
} else {
|
||||
let socket = socket2::Socket::new(
|
||||
socket2::Domain::IPV6,
|
||||
socket2::Type::STREAM,
|
||||
Some(socket2::Protocol::TCP),
|
||||
)?;
|
||||
|
||||
if config.network.set_only_ipv6 {
|
||||
socket
|
||||
.set_only_v6(true)
|
||||
.with_context(|| "socket: set only ipv6")?;
|
||||
}
|
||||
|
||||
socket
|
||||
};
|
||||
|
||||
socket
|
||||
.set_reuse_port(true)
|
||||
.with_context(|| "socket: set reuse port")?;
|
||||
|
||||
socket
|
||||
.bind(&address.into())
|
||||
.with_context(|| format!("socket: bind to {}", address))?;
|
||||
|
||||
socket
|
||||
.listen(config.network.tcp_backlog)
|
||||
.with_context(|| format!("socket: listen on {}", address))?;
|
||||
|
||||
priv_dropper.after_socket_creation()?;
|
||||
|
||||
Ok(unsafe { TcpListener::from_raw_fd(socket.into_raw_fd()) })
|
||||
}
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
fn peer_addr_to_ip_version_str(addr: &CanonicalSocketAddr) -> &'static str {
|
||||
if addr.is_ipv4() {
|
||||
"4"
|
||||
} else {
|
||||
"6"
|
||||
}
|
||||
}
|
147
apps/aquatic/crates/http/src/workers/socket/request.rs
Normal file
147
apps/aquatic/crates/http/src/workers/socket/request.rs
Normal file
@ -0,0 +1,147 @@
|
||||
use std::net::IpAddr;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_http_protocol::request::Request;
|
||||
|
||||
use crate::config::{Config, ReverseProxyPeerIpHeaderFormat};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum RequestParseError {
|
||||
#[error("required peer ip header missing or invalid")]
|
||||
RequiredPeerIpHeaderMissing(anyhow::Error),
|
||||
#[error("more data needed")]
|
||||
MoreDataNeeded,
|
||||
#[error(transparent)]
|
||||
Other(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
pub fn parse_request(
|
||||
config: &Config,
|
||||
buffer: &[u8],
|
||||
) -> Result<(Request, Option<IpAddr>), RequestParseError> {
|
||||
let mut headers = [httparse::EMPTY_HEADER; 16];
|
||||
let mut http_request = httparse::Request::new(&mut headers);
|
||||
|
||||
match http_request.parse(buffer).with_context(|| "httparse")? {
|
||||
httparse::Status::Complete(_) => {
|
||||
let path = http_request.path.ok_or(anyhow::anyhow!("no http path"))?;
|
||||
let request = Request::parse_http_get_path(path)?;
|
||||
|
||||
let opt_peer_ip = if config.network.runs_behind_reverse_proxy {
|
||||
let header_name = &config.network.reverse_proxy_ip_header_name;
|
||||
let header_format = config.network.reverse_proxy_ip_header_format;
|
||||
|
||||
match parse_forwarded_header(header_name, header_format, http_request.headers) {
|
||||
Ok(peer_ip) => Some(peer_ip),
|
||||
Err(err) => {
|
||||
return Err(RequestParseError::RequiredPeerIpHeaderMissing(err));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok((request, opt_peer_ip))
|
||||
}
|
||||
httparse::Status::Partial => Err(RequestParseError::MoreDataNeeded),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_forwarded_header(
|
||||
header_name: &str,
|
||||
header_format: ReverseProxyPeerIpHeaderFormat,
|
||||
headers: &[httparse::Header<'_>],
|
||||
) -> anyhow::Result<IpAddr> {
|
||||
for header in headers.iter().rev() {
|
||||
if header.name == header_name {
|
||||
match header_format {
|
||||
ReverseProxyPeerIpHeaderFormat::LastAddress => {
|
||||
return ::std::str::from_utf8(header.value)?
|
||||
.split(',')
|
||||
.last()
|
||||
.ok_or(anyhow::anyhow!("no header value"))?
|
||||
.trim()
|
||||
.parse::<IpAddr>()
|
||||
.with_context(|| "parse ip");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(anyhow::anyhow!("header not present"))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
const REQUEST_START: &str = "GET /announce?info_hash=%04%0bkV%3f%5cr%14%a6%b7%98%adC%c3%c9.%40%24%00%b9&peer_id=-ABC940-5ert69muw5t8&port=12345&uploaded=1&downloaded=2&left=3&numwant=0&key=4ab4b877&compact=1&supportcrypto=1&event=started HTTP/1.1\r\nHost: example.com\r\n";
|
||||
|
||||
#[test]
|
||||
fn test_parse_peer_ip_header_multiple() {
|
||||
let mut config = Config::default();
|
||||
|
||||
config.network.runs_behind_reverse_proxy = true;
|
||||
config.network.reverse_proxy_ip_header_name = "X-Forwarded-For".into();
|
||||
config.network.reverse_proxy_ip_header_format = ReverseProxyPeerIpHeaderFormat::LastAddress;
|
||||
|
||||
let mut request = REQUEST_START.to_string();
|
||||
|
||||
request.push_str("X-Forwarded-For: 200.0.0.1\r\n");
|
||||
request.push_str("X-Forwarded-For: 1.2.3.4, 5.6.7.8,9.10.11.12\r\n");
|
||||
request.push_str("\r\n");
|
||||
|
||||
let expected_ip = IpAddr::from([9, 10, 11, 12]);
|
||||
|
||||
assert_eq!(
|
||||
parse_request(&config, request.as_bytes())
|
||||
.unwrap()
|
||||
.1
|
||||
.unwrap(),
|
||||
expected_ip
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_peer_ip_header_single() {
|
||||
let mut config = Config::default();
|
||||
|
||||
config.network.runs_behind_reverse_proxy = true;
|
||||
config.network.reverse_proxy_ip_header_name = "X-Forwarded-For".into();
|
||||
config.network.reverse_proxy_ip_header_format = ReverseProxyPeerIpHeaderFormat::LastAddress;
|
||||
|
||||
let mut request = REQUEST_START.to_string();
|
||||
|
||||
request.push_str("X-Forwarded-For: 1.2.3.4, 5.6.7.8,9.10.11.12\r\n");
|
||||
request.push_str("X-Forwarded-For: 200.0.0.1\r\n");
|
||||
request.push_str("\r\n");
|
||||
|
||||
let expected_ip = IpAddr::from([200, 0, 0, 1]);
|
||||
|
||||
assert_eq!(
|
||||
parse_request(&config, request.as_bytes())
|
||||
.unwrap()
|
||||
.1
|
||||
.unwrap(),
|
||||
expected_ip
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_peer_ip_header_no_header() {
|
||||
let mut config = Config::default();
|
||||
|
||||
config.network.runs_behind_reverse_proxy = true;
|
||||
|
||||
let mut request = REQUEST_START.to_string();
|
||||
|
||||
request.push_str("\r\n");
|
||||
|
||||
let res = parse_request(&config, request.as_bytes());
|
||||
|
||||
assert!(matches!(
|
||||
res,
|
||||
Err(RequestParseError::RequiredPeerIpHeaderMissing(_))
|
||||
));
|
||||
}
|
||||
}
|
135
apps/aquatic/crates/http/src/workers/swarm/mod.rs
Normal file
135
apps/aquatic/crates/http/src/workers/swarm/mod.rs
Normal file
@ -0,0 +1,135 @@
|
||||
mod storage;
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::rc::Rc;
|
||||
use std::time::Duration;
|
||||
|
||||
use futures_lite::{Stream, StreamExt};
|
||||
use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role};
|
||||
use glommio::timer::TimerActionRepeat;
|
||||
use glommio::{enclose, prelude::*};
|
||||
use rand::prelude::SmallRng;
|
||||
use rand::SeedableRng;
|
||||
|
||||
use aquatic_common::{ServerStartInstant, ValidUntil};
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
use self::storage::TorrentMaps;
|
||||
|
||||
pub async fn run_swarm_worker(
|
||||
config: Config,
|
||||
state: State,
|
||||
request_mesh_builder: MeshBuilder<ChannelRequest, Partial>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
worker_index: usize,
|
||||
) -> anyhow::Result<()> {
|
||||
let (_, mut request_receivers) = request_mesh_builder
|
||||
.join(Role::Consumer)
|
||||
.await
|
||||
.map_err(|err| anyhow::anyhow!("join request mesh: {:#}", err))?;
|
||||
|
||||
let torrents = Rc::new(RefCell::new(TorrentMaps::new(worker_index)));
|
||||
let access_list = state.access_list;
|
||||
|
||||
// Periodically clean torrents
|
||||
TimerActionRepeat::repeat(enclose!((config, torrents, access_list) move || {
|
||||
enclose!((config, torrents, access_list) move || async move {
|
||||
torrents.borrow_mut().clean(&config, &access_list, server_start_instant);
|
||||
|
||||
Some(Duration::from_secs(config.cleaning.torrent_cleaning_interval))
|
||||
})()
|
||||
}));
|
||||
|
||||
let max_peer_age = config.cleaning.max_peer_age;
|
||||
let peer_valid_until = Rc::new(RefCell::new(ValidUntil::new(
|
||||
server_start_instant,
|
||||
max_peer_age,
|
||||
)));
|
||||
|
||||
// Periodically update peer_valid_until
|
||||
TimerActionRepeat::repeat(enclose!((peer_valid_until) move || {
|
||||
enclose!((peer_valid_until) move || async move {
|
||||
*peer_valid_until.borrow_mut() = ValidUntil::new(server_start_instant, max_peer_age);
|
||||
|
||||
Some(Duration::from_secs(1))
|
||||
})()
|
||||
}));
|
||||
|
||||
// Periodically update torrent count metrics
|
||||
#[cfg(feature = "metrics")]
|
||||
TimerActionRepeat::repeat(enclose!((config, torrents) move || {
|
||||
enclose!((config, torrents) move || async move {
|
||||
torrents.borrow_mut().update_torrent_metrics();
|
||||
|
||||
Some(Duration::from_secs(config.metrics.torrent_count_update_interval))
|
||||
})()
|
||||
}));
|
||||
|
||||
let mut handles = Vec::new();
|
||||
|
||||
for (_, receiver) in request_receivers.streams() {
|
||||
let handle = spawn_local(handle_request_stream(
|
||||
config.clone(),
|
||||
torrents.clone(),
|
||||
peer_valid_until.clone(),
|
||||
receiver,
|
||||
))
|
||||
.detach();
|
||||
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_request_stream<S>(
|
||||
config: Config,
|
||||
torrents: Rc<RefCell<TorrentMaps>>,
|
||||
peer_valid_until: Rc<RefCell<ValidUntil>>,
|
||||
mut stream: S,
|
||||
) where
|
||||
S: Stream<Item = ChannelRequest> + ::std::marker::Unpin,
|
||||
{
|
||||
let mut rng = SmallRng::from_entropy();
|
||||
|
||||
while let Some(channel_request) = stream.next().await {
|
||||
match channel_request {
|
||||
ChannelRequest::Announce {
|
||||
request,
|
||||
peer_addr,
|
||||
response_sender,
|
||||
} => {
|
||||
let response = torrents.borrow_mut().handle_announce_request(
|
||||
&config,
|
||||
&mut rng,
|
||||
peer_valid_until.borrow().to_owned(),
|
||||
peer_addr,
|
||||
request,
|
||||
);
|
||||
|
||||
if let Err(err) = response_sender.connect().await.send(response).await {
|
||||
::log::error!("swarm worker could not send announce response: {:#}", err);
|
||||
}
|
||||
}
|
||||
ChannelRequest::Scrape {
|
||||
request,
|
||||
peer_addr,
|
||||
response_sender,
|
||||
} => {
|
||||
let response = torrents
|
||||
.borrow_mut()
|
||||
.handle_scrape_request(&config, peer_addr, request);
|
||||
|
||||
if let Err(err) = response_sender.connect().await.send(response).await {
|
||||
::log::error!("swarm worker could not send scrape response: {:#}", err);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
543
apps/aquatic/crates/http/src/workers/swarm/storage.rs
Normal file
543
apps/aquatic/crates/http/src/workers/swarm/storage.rs
Normal file
@ -0,0 +1,543 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
use std::sync::Arc;
|
||||
|
||||
use arrayvec::ArrayVec;
|
||||
use rand::Rng;
|
||||
|
||||
use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache};
|
||||
use aquatic_common::{
|
||||
CanonicalSocketAddr, IndexMap, SecondsSinceServerStart, ServerStartInstant, ValidUntil,
|
||||
};
|
||||
use aquatic_http_protocol::common::*;
|
||||
use aquatic_http_protocol::request::*;
|
||||
use aquatic_http_protocol::response::ResponsePeer;
|
||||
use aquatic_http_protocol::response::*;
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
const SMALL_PEER_MAP_CAPACITY: usize = 4;
|
||||
|
||||
pub trait Ip: ::std::fmt::Debug + Copy + Eq + ::std::hash::Hash {}
|
||||
|
||||
impl Ip for Ipv4Addr {}
|
||||
impl Ip for Ipv6Addr {}
|
||||
|
||||
pub struct TorrentMaps {
|
||||
pub ipv4: TorrentMap<Ipv4Addr>,
|
||||
pub ipv6: TorrentMap<Ipv6Addr>,
|
||||
}
|
||||
|
||||
impl TorrentMaps {
|
||||
pub fn new(worker_index: usize) -> Self {
|
||||
Self {
|
||||
ipv4: TorrentMap::new(worker_index, true),
|
||||
ipv6: TorrentMap::new(worker_index, false),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_announce_request(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
valid_until: ValidUntil,
|
||||
peer_addr: CanonicalSocketAddr,
|
||||
request: AnnounceRequest,
|
||||
) -> AnnounceResponse {
|
||||
match peer_addr.get().ip() {
|
||||
IpAddr::V4(peer_ip_address) => {
|
||||
let (seeders, leechers, response_peers) =
|
||||
self.ipv4.upsert_peer_and_get_response_peers(
|
||||
config,
|
||||
rng,
|
||||
valid_until,
|
||||
peer_ip_address,
|
||||
request,
|
||||
);
|
||||
|
||||
AnnounceResponse {
|
||||
complete: seeders,
|
||||
incomplete: leechers,
|
||||
announce_interval: config.protocol.peer_announce_interval,
|
||||
peers: ResponsePeerListV4(response_peers),
|
||||
peers6: ResponsePeerListV6(vec![]),
|
||||
warning_message: None,
|
||||
}
|
||||
}
|
||||
IpAddr::V6(peer_ip_address) => {
|
||||
let (seeders, leechers, response_peers) =
|
||||
self.ipv6.upsert_peer_and_get_response_peers(
|
||||
config,
|
||||
rng,
|
||||
valid_until,
|
||||
peer_ip_address,
|
||||
request,
|
||||
);
|
||||
|
||||
AnnounceResponse {
|
||||
complete: seeders,
|
||||
incomplete: leechers,
|
||||
announce_interval: config.protocol.peer_announce_interval,
|
||||
peers: ResponsePeerListV4(vec![]),
|
||||
peers6: ResponsePeerListV6(response_peers),
|
||||
warning_message: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_scrape_request(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
peer_addr: CanonicalSocketAddr,
|
||||
request: ScrapeRequest,
|
||||
) -> ScrapeResponse {
|
||||
if peer_addr.get().ip().is_ipv4() {
|
||||
self.ipv4.handle_scrape_request(config, request)
|
||||
} else {
|
||||
self.ipv6.handle_scrape_request(config, request)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
pub fn update_torrent_metrics(&self) {
|
||||
self.ipv4.torrent_gauge.set(self.ipv4.torrents.len() as f64);
|
||||
self.ipv6.torrent_gauge.set(self.ipv6.torrents.len() as f64);
|
||||
}
|
||||
|
||||
pub fn clean(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
access_list: &Arc<AccessListArcSwap>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
) {
|
||||
let mut access_list_cache = create_access_list_cache(access_list);
|
||||
|
||||
let now = server_start_instant.seconds_elapsed();
|
||||
|
||||
self.ipv4.clean(config, &mut access_list_cache, now);
|
||||
self.ipv6.clean(config, &mut access_list_cache, now);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TorrentMap<I: Ip> {
|
||||
torrents: IndexMap<InfoHash, TorrentData<I>>,
|
||||
#[cfg(feature = "metrics")]
|
||||
peer_gauge: ::metrics::Gauge,
|
||||
#[cfg(feature = "metrics")]
|
||||
torrent_gauge: ::metrics::Gauge,
|
||||
}
|
||||
|
||||
impl<I: Ip> TorrentMap<I> {
|
||||
fn new(worker_index: usize, ipv4: bool) -> Self {
|
||||
#[cfg(feature = "metrics")]
|
||||
let peer_gauge = if ipv4 {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers",
|
||||
"ip_version" => "4",
|
||||
"worker_index" => worker_index.to_string(),
|
||||
)
|
||||
} else {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers",
|
||||
"ip_version" => "6",
|
||||
"worker_index" => worker_index.to_string(),
|
||||
)
|
||||
};
|
||||
#[cfg(feature = "metrics")]
|
||||
let torrent_gauge = if ipv4 {
|
||||
::metrics::gauge!(
|
||||
"aquatic_torrents",
|
||||
"ip_version" => "4",
|
||||
"worker_index" => worker_index.to_string(),
|
||||
)
|
||||
} else {
|
||||
::metrics::gauge!(
|
||||
"aquatic_torrents",
|
||||
"ip_version" => "6",
|
||||
"worker_index" => worker_index.to_string(),
|
||||
)
|
||||
};
|
||||
|
||||
Self {
|
||||
torrents: Default::default(),
|
||||
#[cfg(feature = "metrics")]
|
||||
peer_gauge,
|
||||
#[cfg(feature = "metrics")]
|
||||
torrent_gauge,
|
||||
}
|
||||
}
|
||||
|
||||
fn upsert_peer_and_get_response_peers(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
valid_until: ValidUntil,
|
||||
peer_ip_address: I,
|
||||
request: AnnounceRequest,
|
||||
) -> (usize, usize, Vec<ResponsePeer<I>>) {
|
||||
self.torrents
|
||||
.entry(request.info_hash)
|
||||
.or_default()
|
||||
.upsert_peer_and_get_response_peers(
|
||||
config,
|
||||
rng,
|
||||
request,
|
||||
peer_ip_address,
|
||||
valid_until,
|
||||
#[cfg(feature = "metrics")]
|
||||
&self.peer_gauge,
|
||||
)
|
||||
}
|
||||
|
||||
fn handle_scrape_request(&mut self, config: &Config, request: ScrapeRequest) -> ScrapeResponse {
|
||||
let num_to_take = request
|
||||
.info_hashes
|
||||
.len()
|
||||
.min(config.protocol.max_scrape_torrents);
|
||||
|
||||
let mut response = ScrapeResponse {
|
||||
files: BTreeMap::new(),
|
||||
};
|
||||
|
||||
for info_hash in request.info_hashes.into_iter().take(num_to_take) {
|
||||
let stats = self
|
||||
.torrents
|
||||
.get(&info_hash)
|
||||
.map(|torrent_data| torrent_data.scrape_statistics())
|
||||
.unwrap_or(ScrapeStatistics {
|
||||
complete: 0,
|
||||
incomplete: 0,
|
||||
downloaded: 0,
|
||||
});
|
||||
|
||||
response.files.insert(info_hash, stats);
|
||||
}
|
||||
|
||||
response
|
||||
}
|
||||
|
||||
fn clean(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
access_list_cache: &mut AccessListCache,
|
||||
now: SecondsSinceServerStart,
|
||||
) {
|
||||
let mut total_num_peers = 0;
|
||||
|
||||
self.torrents.retain(|info_hash, torrent_data| {
|
||||
if !access_list_cache
|
||||
.load()
|
||||
.allows(config.access_list.mode, &info_hash.0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
let num_peers = match torrent_data {
|
||||
TorrentData::Small(t) => t.clean_and_get_num_peers(now),
|
||||
TorrentData::Large(t) => t.clean_and_get_num_peers(now),
|
||||
};
|
||||
|
||||
total_num_peers += num_peers as u64;
|
||||
|
||||
num_peers > 0
|
||||
});
|
||||
|
||||
self.torrents.shrink_to_fit();
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
self.peer_gauge.set(total_num_peers as f64);
|
||||
}
|
||||
}
|
||||
|
||||
pub enum TorrentData<I: Ip> {
|
||||
Small(SmallPeerMap<I>),
|
||||
Large(LargePeerMap<I>),
|
||||
}
|
||||
|
||||
impl<I: Ip> TorrentData<I> {
|
||||
fn upsert_peer_and_get_response_peers(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
request: AnnounceRequest,
|
||||
ip_address: I,
|
||||
valid_until: ValidUntil,
|
||||
#[cfg(feature = "metrics")] peer_gauge: &::metrics::Gauge,
|
||||
) -> (usize, usize, Vec<ResponsePeer<I>>) {
|
||||
let max_num_peers_to_take = match request.numwant {
|
||||
Some(0) | None => config.protocol.max_peers,
|
||||
Some(numwant) => numwant.min(config.protocol.max_peers),
|
||||
};
|
||||
|
||||
let status = PeerStatus::from_event_and_bytes_left(request.event, request.bytes_left);
|
||||
|
||||
let peer_map_key = ResponsePeer {
|
||||
ip_address,
|
||||
port: request.port,
|
||||
};
|
||||
|
||||
// Create the response before inserting the peer. This means that we
|
||||
// don't have to filter it out from the response peers, and that the
|
||||
// reported number of seeders/leechers will not include it
|
||||
let (response_data, opt_removed_peer) = match self {
|
||||
Self::Small(peer_map) => {
|
||||
let opt_removed_peer = peer_map.remove(&peer_map_key);
|
||||
|
||||
let (seeders, leechers) = peer_map.num_seeders_leechers();
|
||||
let response_peers = peer_map.extract_response_peers(max_num_peers_to_take);
|
||||
|
||||
// Convert peer map to large variant if it is full and
|
||||
// announcing peer is not stopped and will therefore be
|
||||
// inserted
|
||||
if peer_map.is_full() && status != PeerStatus::Stopped {
|
||||
*self = Self::Large(peer_map.to_large());
|
||||
}
|
||||
|
||||
((seeders, leechers, response_peers), opt_removed_peer)
|
||||
}
|
||||
Self::Large(peer_map) => {
|
||||
let opt_removed_peer = peer_map.remove_peer(&peer_map_key);
|
||||
|
||||
let (seeders, leechers) = peer_map.num_seeders_leechers();
|
||||
let response_peers = peer_map.extract_response_peers(rng, max_num_peers_to_take);
|
||||
|
||||
// Try shrinking the map if announcing peer is stopped and
|
||||
// will therefore not be inserted
|
||||
if status == PeerStatus::Stopped {
|
||||
if let Some(peer_map) = peer_map.try_shrink() {
|
||||
*self = Self::Small(peer_map);
|
||||
}
|
||||
}
|
||||
|
||||
((seeders, leechers, response_peers), opt_removed_peer)
|
||||
}
|
||||
};
|
||||
|
||||
match status {
|
||||
PeerStatus::Leeching | PeerStatus::Seeding => {
|
||||
#[cfg(feature = "metrics")]
|
||||
if opt_removed_peer.is_none() {
|
||||
peer_gauge.increment(1.0);
|
||||
}
|
||||
|
||||
let peer = Peer {
|
||||
is_seeder: status == PeerStatus::Seeding,
|
||||
valid_until,
|
||||
};
|
||||
|
||||
match self {
|
||||
Self::Small(peer_map) => peer_map.insert(peer_map_key, peer),
|
||||
Self::Large(peer_map) => peer_map.insert(peer_map_key, peer),
|
||||
}
|
||||
}
|
||||
PeerStatus::Stopped =>
|
||||
{
|
||||
#[cfg(feature = "metrics")]
|
||||
if opt_removed_peer.is_some() {
|
||||
peer_gauge.decrement(1.0);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
response_data
|
||||
}
|
||||
|
||||
fn scrape_statistics(&self) -> ScrapeStatistics {
|
||||
let (seeders, leechers) = match self {
|
||||
Self::Small(peer_map) => peer_map.num_seeders_leechers(),
|
||||
Self::Large(peer_map) => peer_map.num_seeders_leechers(),
|
||||
};
|
||||
|
||||
ScrapeStatistics {
|
||||
complete: seeders,
|
||||
incomplete: leechers,
|
||||
downloaded: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: Ip> Default for TorrentData<I> {
|
||||
fn default() -> Self {
|
||||
Self::Small(SmallPeerMap(ArrayVec::default()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Store torrents with very few peers without an extra heap allocation
|
||||
///
|
||||
/// On public open trackers, this is likely to be the majority of torrents.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct SmallPeerMap<I: Ip>(ArrayVec<(ResponsePeer<I>, Peer), SMALL_PEER_MAP_CAPACITY>);
|
||||
|
||||
impl<I: Ip> SmallPeerMap<I> {
|
||||
fn is_full(&self) -> bool {
|
||||
self.0.is_full()
|
||||
}
|
||||
|
||||
fn num_seeders_leechers(&self) -> (usize, usize) {
|
||||
let seeders = self.0.iter().filter(|(_, p)| p.is_seeder).count();
|
||||
let leechers = self.0.len() - seeders;
|
||||
|
||||
(seeders, leechers)
|
||||
}
|
||||
|
||||
fn insert(&mut self, key: ResponsePeer<I>, peer: Peer) {
|
||||
self.0.push((key, peer));
|
||||
}
|
||||
|
||||
fn remove(&mut self, key: &ResponsePeer<I>) -> Option<Peer> {
|
||||
for (i, (k, _)) in self.0.iter().enumerate() {
|
||||
if k == key {
|
||||
return Some(self.0.remove(i).1);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn extract_response_peers(&self, max_num_peers_to_take: usize) -> Vec<ResponsePeer<I>> {
|
||||
Vec::from_iter(self.0.iter().take(max_num_peers_to_take).map(|(k, _)| *k))
|
||||
}
|
||||
|
||||
fn clean_and_get_num_peers(&mut self, now: SecondsSinceServerStart) -> usize {
|
||||
self.0.retain(|(_, peer)| peer.valid_until.valid(now));
|
||||
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
fn to_large(&self) -> LargePeerMap<I> {
|
||||
let (num_seeders, _) = self.num_seeders_leechers();
|
||||
let peers = self.0.iter().copied().collect();
|
||||
|
||||
LargePeerMap { peers, num_seeders }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct LargePeerMap<I: Ip> {
|
||||
peers: IndexMap<ResponsePeer<I>, Peer>,
|
||||
num_seeders: usize,
|
||||
}
|
||||
|
||||
impl<I: Ip> LargePeerMap<I> {
|
||||
fn num_seeders_leechers(&self) -> (usize, usize) {
|
||||
(self.num_seeders, self.peers.len() - self.num_seeders)
|
||||
}
|
||||
|
||||
fn insert(&mut self, key: ResponsePeer<I>, peer: Peer) {
|
||||
if peer.is_seeder {
|
||||
self.num_seeders += 1;
|
||||
}
|
||||
|
||||
self.peers.insert(key, peer);
|
||||
}
|
||||
|
||||
fn remove_peer(&mut self, key: &ResponsePeer<I>) -> Option<Peer> {
|
||||
let opt_removed_peer = self.peers.swap_remove(key);
|
||||
|
||||
if let Some(Peer {
|
||||
is_seeder: true, ..
|
||||
}) = opt_removed_peer
|
||||
{
|
||||
self.num_seeders -= 1;
|
||||
}
|
||||
|
||||
opt_removed_peer
|
||||
}
|
||||
|
||||
/// Extract response peers
|
||||
///
|
||||
/// If there are more peers in map than `max_num_peers_to_take`, do a random
|
||||
/// selection of peers from first and second halves of map in order to avoid
|
||||
/// returning too homogeneous peers.
|
||||
///
|
||||
/// Does NOT filter out announcing peer.
|
||||
pub fn extract_response_peers(
|
||||
&self,
|
||||
rng: &mut impl Rng,
|
||||
max_num_peers_to_take: usize,
|
||||
) -> Vec<ResponsePeer<I>> {
|
||||
if self.peers.len() <= max_num_peers_to_take {
|
||||
self.peers.keys().copied().collect()
|
||||
} else {
|
||||
let middle_index = self.peers.len() / 2;
|
||||
let num_to_take_per_half = max_num_peers_to_take / 2;
|
||||
|
||||
let offset_half_one = {
|
||||
let from = 0;
|
||||
let to = usize::max(1, middle_index - num_to_take_per_half);
|
||||
|
||||
rng.gen_range(from..to)
|
||||
};
|
||||
let offset_half_two = {
|
||||
let from = middle_index;
|
||||
let to = usize::max(middle_index + 1, self.peers.len() - num_to_take_per_half);
|
||||
|
||||
rng.gen_range(from..to)
|
||||
};
|
||||
|
||||
let end_half_one = offset_half_one + num_to_take_per_half;
|
||||
let end_half_two = offset_half_two + num_to_take_per_half;
|
||||
|
||||
let mut peers = Vec::with_capacity(max_num_peers_to_take);
|
||||
|
||||
if let Some(slice) = self.peers.get_range(offset_half_one..end_half_one) {
|
||||
peers.extend(slice.keys());
|
||||
}
|
||||
if let Some(slice) = self.peers.get_range(offset_half_two..end_half_two) {
|
||||
peers.extend(slice.keys());
|
||||
}
|
||||
|
||||
peers
|
||||
}
|
||||
}
|
||||
|
||||
fn clean_and_get_num_peers(&mut self, now: SecondsSinceServerStart) -> usize {
|
||||
self.peers.retain(|_, peer| {
|
||||
let keep = peer.valid_until.valid(now);
|
||||
|
||||
if (!keep) & peer.is_seeder {
|
||||
self.num_seeders -= 1;
|
||||
}
|
||||
|
||||
keep
|
||||
});
|
||||
|
||||
self.peers.shrink_to_fit();
|
||||
|
||||
self.peers.len()
|
||||
}
|
||||
|
||||
fn try_shrink(&mut self) -> Option<SmallPeerMap<I>> {
|
||||
(self.peers.len() <= SMALL_PEER_MAP_CAPACITY).then(|| {
|
||||
SmallPeerMap(ArrayVec::from_iter(
|
||||
self.peers.iter().map(|(k, v)| (*k, *v)),
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct Peer {
|
||||
pub valid_until: ValidUntil,
|
||||
pub is_seeder: bool,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
|
||||
enum PeerStatus {
|
||||
Seeding,
|
||||
Leeching,
|
||||
Stopped,
|
||||
}
|
||||
|
||||
impl PeerStatus {
|
||||
fn from_event_and_bytes_left(event: AnnounceEvent, bytes_left: usize) -> Self {
|
||||
if let AnnounceEvent::Stopped = event {
|
||||
Self::Stopped
|
||||
} else if bytes_left == 0 {
|
||||
Self::Seeding
|
||||
} else {
|
||||
Self::Leeching
|
||||
}
|
||||
}
|
||||
}
|
37
apps/aquatic/crates/http_load_test/Cargo.toml
Normal file
37
apps/aquatic/crates/http_load_test/Cargo.toml
Normal file
@ -0,0 +1,37 @@
|
||||
[package]
|
||||
name = "aquatic_http_load_test"
|
||||
description = "BitTorrent (HTTP over TLS) load tester"
|
||||
keywords = ["http", "benchmark", "peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
readme = "README.md"
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic_http_load_test"
|
||||
|
||||
[dependencies]
|
||||
aquatic_common.workspace = true
|
||||
aquatic_http_protocol.workspace = true
|
||||
aquatic_toml_config.workspace = true
|
||||
|
||||
anyhow = "1"
|
||||
futures = "0.3"
|
||||
futures-lite = "1"
|
||||
futures-rustls = "0.26"
|
||||
hashbrown = "0.15"
|
||||
glommio = "0.9"
|
||||
log = "0.4"
|
||||
mimalloc = { version = "0.1", default-features = false }
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
rand_distr = "0.4"
|
||||
rustls = { version = "0.23", default-features = false, features = ["logging"] } # TLS 1.2 disabled
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
55
apps/aquatic/crates/http_load_test/README.md
Normal file
55
apps/aquatic/crates/http_load_test/README.md
Normal file
@ -0,0 +1,55 @@
|
||||
# aquatic_http_load_test: HTTP BitTorrent tracker load tester
|
||||
|
||||
[](https://github.com/greatest-ape/aquatic/actions/workflows/ci.yml)
|
||||
|
||||
Load tester for HTTP BitTorrent trackers. Requires Linux 5.8 or later.
|
||||
|
||||
## Usage
|
||||
|
||||
### Compiling
|
||||
|
||||
- Install Rust with [rustup](https://rustup.rs/) (latest stable release is recommended)
|
||||
- Install build dependencies with your package manager (e.g., `apt-get install cmake build-essential`)
|
||||
- Clone this git repository and build the application:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/greatest-ape/aquatic.git && cd aquatic
|
||||
|
||||
# Recommended: tell Rust to enable support for all SIMD extensions present on
|
||||
# current CPU except for those relating to AVX-512. (If you run a processor
|
||||
# that doesn't clock down when using AVX-512, you can enable those instructions
|
||||
# too.)
|
||||
. ./scripts/env-native-cpu-without-avx-512
|
||||
|
||||
cargo build --release -p aquatic_http_load_test
|
||||
```
|
||||
|
||||
### Configuring and running
|
||||
|
||||
Generate the configuration file:
|
||||
|
||||
```sh
|
||||
./target/release/aquatic_http_load_test -p > "load-test-config.toml"
|
||||
```
|
||||
|
||||
Make necessary adjustments to the file.
|
||||
|
||||
Make sure locked memory limits are sufficient:
|
||||
|
||||
```sh
|
||||
ulimit -l 65536
|
||||
```
|
||||
|
||||
First, start the tracker application that you want to test. Then
|
||||
start the load tester:
|
||||
|
||||
```sh
|
||||
./target/release/aquatic_http_load_test -c "load-test-config.toml"
|
||||
```
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Copyright (c) Joakim Frostegård
|
||||
|
||||
Distributed under the terms of the Apache License, Version 2.0. Please refer to
|
||||
the `LICENSE` file in the repository root directory for details.
|
38
apps/aquatic/crates/http_load_test/src/common.rs
Normal file
38
apps/aquatic/crates/http_load_test/src/common.rs
Normal file
@ -0,0 +1,38 @@
|
||||
use std::sync::{atomic::AtomicUsize, Arc};
|
||||
|
||||
use rand_distr::Gamma;
|
||||
|
||||
pub use aquatic_http_protocol::common::*;
|
||||
pub use aquatic_http_protocol::request::*;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct TorrentPeer {
|
||||
pub info_hash: InfoHash,
|
||||
pub scrape_hash_indeces: Vec<usize>,
|
||||
pub peer_id: PeerId,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Statistics {
|
||||
pub requests: AtomicUsize,
|
||||
pub response_peers: AtomicUsize,
|
||||
pub responses_announce: AtomicUsize,
|
||||
pub responses_scrape: AtomicUsize,
|
||||
pub responses_failure: AtomicUsize,
|
||||
pub bytes_sent: AtomicUsize,
|
||||
pub bytes_received: AtomicUsize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LoadTestState {
|
||||
pub info_hashes: Arc<Vec<InfoHash>>,
|
||||
pub statistics: Arc<Statistics>,
|
||||
pub gamma: Arc<Gamma<f64>>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy)]
|
||||
pub enum RequestType {
|
||||
Announce,
|
||||
Scrape,
|
||||
}
|
88
apps/aquatic/crates/http_load_test/src/config.rs
Normal file
88
apps/aquatic/crates/http_load_test/src/config.rs
Normal file
@ -0,0 +1,88 @@
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use aquatic_common::cli::LogLevel;
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
use serde::Deserialize;
|
||||
|
||||
/// aquatic_http_load_test configuration
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct Config {
|
||||
pub server_address: SocketAddr,
|
||||
pub log_level: LogLevel,
|
||||
pub num_workers: usize,
|
||||
/// Maximum number of connections to keep open
|
||||
pub num_connections: usize,
|
||||
/// How often to check if num_connections connections are open, and
|
||||
/// open a new one otherwise. A value of 0 means that connections are
|
||||
/// opened as quickly as possible, which is useful when the tracker
|
||||
/// does not keep connections alive.
|
||||
pub connection_creation_interval_ms: u64,
|
||||
/// Announce/scrape url suffix. Use `/my_token/` to get `/announce/my_token/`
|
||||
pub url_suffix: String,
|
||||
pub duration: usize,
|
||||
pub keep_alive: bool,
|
||||
pub enable_tls: bool,
|
||||
pub torrents: TorrentConfig,
|
||||
}
|
||||
|
||||
impl aquatic_common::cli::Config for Config {
|
||||
fn get_log_level(&self) -> Option<LogLevel> {
|
||||
Some(self.log_level)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
server_address: "127.0.0.1:3000".parse().unwrap(),
|
||||
log_level: LogLevel::Error,
|
||||
num_workers: 1,
|
||||
num_connections: 128,
|
||||
connection_creation_interval_ms: 10,
|
||||
url_suffix: "".into(),
|
||||
duration: 0,
|
||||
keep_alive: true,
|
||||
enable_tls: true,
|
||||
torrents: TorrentConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct TorrentConfig {
|
||||
pub number_of_torrents: usize,
|
||||
/// Probability that a generated peer is a seeder
|
||||
pub peer_seeder_probability: f64,
|
||||
/// Probability that a generated request is a announce request, as part
|
||||
/// of sum of the various weight arguments.
|
||||
pub weight_announce: usize,
|
||||
/// Probability that a generated request is a scrape request, as part
|
||||
/// of sum of the various weight arguments.
|
||||
pub weight_scrape: usize,
|
||||
/// Peers choose torrents according to this Gamma distribution shape
|
||||
pub torrent_gamma_shape: f64,
|
||||
/// Peers choose torrents according to this Gamma distribution scale
|
||||
pub torrent_gamma_scale: f64,
|
||||
}
|
||||
|
||||
impl Default for TorrentConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
number_of_torrents: 10_000,
|
||||
peer_seeder_probability: 0.25,
|
||||
weight_announce: 5,
|
||||
weight_scrape: 0,
|
||||
torrent_gamma_shape: 0.2,
|
||||
torrent_gamma_scale: 100.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Config;
|
||||
|
||||
::aquatic_toml_config::gen_serialize_deserialize_test!(Config);
|
||||
}
|
225
apps/aquatic/crates/http_load_test/src/main.rs
Normal file
225
apps/aquatic/crates/http_load_test/src/main.rs
Normal file
@ -0,0 +1,225 @@
|
||||
use std::sync::{atomic::Ordering, Arc};
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use ::glommio::LocalExecutorBuilder;
|
||||
use rand::prelude::*;
|
||||
use rand_distr::Gamma;
|
||||
|
||||
mod common;
|
||||
mod config;
|
||||
mod network;
|
||||
mod utils;
|
||||
|
||||
use common::*;
|
||||
use config::*;
|
||||
use network::*;
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
/// Multiply bytes during a second with this to get Mbit/s
|
||||
const MBITS_FACTOR: f64 = 1.0 / ((1024.0 * 1024.0) / 8.0);
|
||||
|
||||
pub fn main() {
|
||||
aquatic_common::cli::run_app_with_cli_and_config::<Config>(
|
||||
"aquatic_http_load_test: BitTorrent load tester",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
run,
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
fn run(config: Config) -> ::anyhow::Result<()> {
|
||||
if config.torrents.weight_announce + config.torrents.weight_scrape == 0 {
|
||||
panic!("Error: at least one weight must be larger than zero.");
|
||||
}
|
||||
|
||||
println!("Starting client with config: {:#?}", config);
|
||||
|
||||
let mut info_hashes = Vec::with_capacity(config.torrents.number_of_torrents);
|
||||
|
||||
let mut rng = SmallRng::from_entropy();
|
||||
|
||||
for _ in 0..config.torrents.number_of_torrents {
|
||||
info_hashes.push(InfoHash(rng.gen()));
|
||||
}
|
||||
|
||||
let gamma = Gamma::new(
|
||||
config.torrents.torrent_gamma_shape,
|
||||
config.torrents.torrent_gamma_scale,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let state = LoadTestState {
|
||||
info_hashes: Arc::new(info_hashes),
|
||||
statistics: Arc::new(Statistics::default()),
|
||||
gamma: Arc::new(gamma),
|
||||
};
|
||||
|
||||
let opt_tls_config = if config.enable_tls {
|
||||
Some(create_tls_config().unwrap())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
for _ in 0..config.num_workers {
|
||||
let config = config.clone();
|
||||
let opt_tls_config = opt_tls_config.clone();
|
||||
let state = state.clone();
|
||||
|
||||
LocalExecutorBuilder::default()
|
||||
.name("load-test")
|
||||
.spawn(move || async move {
|
||||
run_socket_thread(config, opt_tls_config, state)
|
||||
.await
|
||||
.unwrap();
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
monitor_statistics(state, &config);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn monitor_statistics(state: LoadTestState, config: &Config) {
|
||||
let start_time = Instant::now();
|
||||
let mut report_avg_response_vec: Vec<f64> = Vec::new();
|
||||
|
||||
let interval = 5;
|
||||
let interval_f64 = interval as f64;
|
||||
|
||||
loop {
|
||||
thread::sleep(Duration::from_secs(interval));
|
||||
|
||||
let statistics = state.statistics.as_ref();
|
||||
|
||||
let responses_announce = statistics
|
||||
.responses_announce
|
||||
.fetch_and(0, Ordering::Relaxed) as f64;
|
||||
// let response_peers = statistics.response_peers
|
||||
// .fetch_and(0, Ordering::SeqCst) as f64;
|
||||
|
||||
let requests_per_second =
|
||||
statistics.requests.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
|
||||
let responses_scrape_per_second =
|
||||
statistics.responses_scrape.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
|
||||
let responses_failure_per_second =
|
||||
statistics.responses_failure.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
|
||||
|
||||
let bytes_sent_per_second =
|
||||
statistics.bytes_sent.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
|
||||
let bytes_received_per_second =
|
||||
statistics.bytes_received.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
|
||||
|
||||
let responses_announce_per_second = responses_announce / interval_f64;
|
||||
|
||||
let responses_per_second = responses_announce_per_second
|
||||
+ responses_scrape_per_second
|
||||
+ responses_failure_per_second;
|
||||
|
||||
report_avg_response_vec.push(responses_per_second);
|
||||
|
||||
println!();
|
||||
println!("Requests out: {:.2}/second", requests_per_second);
|
||||
println!("Responses in: {:.2}/second", responses_per_second);
|
||||
println!(
|
||||
" - Announce responses: {:.2}",
|
||||
responses_announce_per_second
|
||||
);
|
||||
println!(" - Scrape responses: {:.2}", responses_scrape_per_second);
|
||||
println!(
|
||||
" - Failure responses: {:.2}",
|
||||
responses_failure_per_second
|
||||
);
|
||||
//println!("Peers per announce response: {:.2}", response_peers / responses_announce);
|
||||
println!(
|
||||
"Bandwidth out: {:.2}Mbit/s",
|
||||
bytes_sent_per_second * MBITS_FACTOR
|
||||
);
|
||||
println!(
|
||||
"Bandwidth in: {:.2}Mbit/s",
|
||||
bytes_received_per_second * MBITS_FACTOR
|
||||
);
|
||||
|
||||
let time_elapsed = start_time.elapsed();
|
||||
let duration = Duration::from_secs(config.duration as u64);
|
||||
|
||||
if config.duration != 0 && time_elapsed >= duration {
|
||||
let report_len = report_avg_response_vec.len() as f64;
|
||||
let report_sum: f64 = report_avg_response_vec.into_iter().sum();
|
||||
let report_avg: f64 = report_sum / report_len;
|
||||
|
||||
println!(
|
||||
concat!(
|
||||
"\n# aquatic load test report\n\n",
|
||||
"Test ran for {} seconds.\n",
|
||||
"Average responses per second: {:.2}\n\nConfig: {:#?}\n"
|
||||
),
|
||||
time_elapsed.as_secs(),
|
||||
report_avg,
|
||||
config
|
||||
);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct FakeCertificateVerifier;
|
||||
|
||||
impl rustls::client::danger::ServerCertVerifier for FakeCertificateVerifier {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
_end_entity: &rustls::pki_types::CertificateDer<'_>,
|
||||
_intermediates: &[rustls::pki_types::CertificateDer<'_>],
|
||||
_server_name: &rustls::pki_types::ServerName<'_>,
|
||||
_ocsp_response: &[u8],
|
||||
_now: rustls::pki_types::UnixTime,
|
||||
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
|
||||
Ok(rustls::client::danger::ServerCertVerified::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls12_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &rustls::pki_types::CertificateDer<'_>,
|
||||
_dss: &rustls::DigitallySignedStruct,
|
||||
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
|
||||
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn verify_tls13_signature(
|
||||
&self,
|
||||
_message: &[u8],
|
||||
_cert: &rustls::pki_types::CertificateDer<'_>,
|
||||
_dss: &rustls::DigitallySignedStruct,
|
||||
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
|
||||
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
|
||||
}
|
||||
|
||||
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
|
||||
vec![
|
||||
rustls::SignatureScheme::ECDSA_NISTP384_SHA384,
|
||||
rustls::SignatureScheme::ECDSA_NISTP256_SHA256,
|
||||
rustls::SignatureScheme::RSA_PSS_SHA512,
|
||||
rustls::SignatureScheme::RSA_PSS_SHA384,
|
||||
rustls::SignatureScheme::RSA_PSS_SHA256,
|
||||
rustls::SignatureScheme::ED25519,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
fn create_tls_config() -> anyhow::Result<Arc<rustls::ClientConfig>> {
|
||||
let mut config = rustls::ClientConfig::builder()
|
||||
.with_root_certificates(rustls::RootCertStore::empty())
|
||||
.with_no_client_auth();
|
||||
|
||||
config
|
||||
.dangerous()
|
||||
.set_certificate_verifier(Arc::new(FakeCertificateVerifier));
|
||||
|
||||
Ok(Arc::new(config))
|
||||
}
|
277
apps/aquatic/crates/http_load_test/src/network.rs
Normal file
277
apps/aquatic/crates/http_load_test/src/network.rs
Normal file
@ -0,0 +1,277 @@
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
convert::TryInto,
|
||||
io::Cursor,
|
||||
rc::Rc,
|
||||
sync::{atomic::Ordering, Arc},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use aquatic_http_protocol::response::Response;
|
||||
use futures_lite::{AsyncReadExt, AsyncWriteExt};
|
||||
use futures_rustls::TlsConnector;
|
||||
use glommio::net::TcpStream;
|
||||
use glommio::{prelude::*, timer::TimerActionRepeat};
|
||||
use rand::{prelude::SmallRng, SeedableRng};
|
||||
|
||||
use crate::{common::LoadTestState, config::Config, utils::create_random_request};
|
||||
|
||||
pub async fn run_socket_thread(
|
||||
config: Config,
|
||||
opt_tls_config: Option<Arc<rustls::ClientConfig>>,
|
||||
load_test_state: LoadTestState,
|
||||
) -> anyhow::Result<()> {
|
||||
let config = Rc::new(config);
|
||||
let num_active_connections = Rc::new(RefCell::new(0usize));
|
||||
let rng = Rc::new(RefCell::new(SmallRng::from_entropy()));
|
||||
|
||||
let interval = config.connection_creation_interval_ms;
|
||||
|
||||
if interval == 0 {
|
||||
loop {
|
||||
if *num_active_connections.borrow() < config.num_connections {
|
||||
if let Err(err) = run_connection(
|
||||
config.clone(),
|
||||
opt_tls_config.clone(),
|
||||
load_test_state.clone(),
|
||||
num_active_connections.clone(),
|
||||
rng.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
::log::error!("connection creation error: {:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let interval = Duration::from_millis(interval);
|
||||
|
||||
TimerActionRepeat::repeat(move || {
|
||||
periodically_open_connections(
|
||||
config.clone(),
|
||||
interval,
|
||||
opt_tls_config.clone(),
|
||||
load_test_state.clone(),
|
||||
num_active_connections.clone(),
|
||||
rng.clone(),
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
futures_lite::future::pending::<bool>().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn periodically_open_connections(
|
||||
config: Rc<Config>,
|
||||
interval: Duration,
|
||||
opt_tls_config: Option<Arc<rustls::ClientConfig>>,
|
||||
load_test_state: LoadTestState,
|
||||
num_active_connections: Rc<RefCell<usize>>,
|
||||
rng: Rc<RefCell<SmallRng>>,
|
||||
) -> Option<Duration> {
|
||||
if *num_active_connections.borrow() < config.num_connections {
|
||||
spawn_local(async move {
|
||||
if let Err(err) = run_connection(
|
||||
config,
|
||||
opt_tls_config,
|
||||
load_test_state,
|
||||
num_active_connections,
|
||||
rng.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
::log::error!("connection creation error: {:?}", err);
|
||||
}
|
||||
})
|
||||
.detach();
|
||||
}
|
||||
|
||||
Some(interval)
|
||||
}
|
||||
|
||||
async fn run_connection(
|
||||
config: Rc<Config>,
|
||||
opt_tls_config: Option<Arc<rustls::ClientConfig>>,
|
||||
load_test_state: LoadTestState,
|
||||
num_active_connections: Rc<RefCell<usize>>,
|
||||
rng: Rc<RefCell<SmallRng>>,
|
||||
) -> anyhow::Result<()> {
|
||||
let stream = TcpStream::connect(config.server_address)
|
||||
.await
|
||||
.map_err(|err| anyhow::anyhow!("connect: {:?}", err))?;
|
||||
|
||||
if let Some(tls_config) = opt_tls_config {
|
||||
let stream = TlsConnector::from(tls_config)
|
||||
.connect("example.com".try_into().unwrap(), stream)
|
||||
.await?;
|
||||
|
||||
let mut connection = Connection {
|
||||
config,
|
||||
load_test_state,
|
||||
rng,
|
||||
stream,
|
||||
buffer: Box::new([0; 2048]),
|
||||
};
|
||||
|
||||
connection.run(num_active_connections).await?;
|
||||
} else {
|
||||
let mut connection = Connection {
|
||||
config,
|
||||
load_test_state,
|
||||
rng,
|
||||
stream,
|
||||
buffer: Box::new([0; 2048]),
|
||||
};
|
||||
|
||||
connection.run(num_active_connections).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct Connection<S> {
|
||||
config: Rc<Config>,
|
||||
load_test_state: LoadTestState,
|
||||
rng: Rc<RefCell<SmallRng>>,
|
||||
stream: S,
|
||||
buffer: Box<[u8; 2048]>,
|
||||
}
|
||||
|
||||
impl<S> Connection<S>
|
||||
where
|
||||
S: futures::AsyncRead + futures::AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
async fn run(&mut self, num_active_connections: Rc<RefCell<usize>>) -> anyhow::Result<()> {
|
||||
*num_active_connections.borrow_mut() += 1;
|
||||
|
||||
let result = self.run_connection_loop().await;
|
||||
|
||||
if let Err(err) = &result {
|
||||
::log::info!("connection error: {:?}", err);
|
||||
}
|
||||
|
||||
*num_active_connections.borrow_mut() -= 1;
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
async fn run_connection_loop(&mut self) -> anyhow::Result<()> {
|
||||
loop {
|
||||
self.send_request().await?;
|
||||
self.read_response().await?;
|
||||
|
||||
if !self.config.keep_alive {
|
||||
break Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_request(&mut self) -> anyhow::Result<()> {
|
||||
let request = create_random_request(
|
||||
&self.config,
|
||||
&self.load_test_state,
|
||||
&mut self.rng.borrow_mut(),
|
||||
);
|
||||
|
||||
let mut cursor = Cursor::new(&mut self.buffer[..]);
|
||||
|
||||
request.write(&mut cursor, self.config.url_suffix.as_bytes())?;
|
||||
|
||||
let cursor_position = cursor.position() as usize;
|
||||
|
||||
let bytes_sent = self
|
||||
.stream
|
||||
.write(&cursor.into_inner()[..cursor_position])
|
||||
.await?;
|
||||
|
||||
self.stream.flush().await?;
|
||||
|
||||
self.load_test_state
|
||||
.statistics
|
||||
.bytes_sent
|
||||
.fetch_add(bytes_sent, Ordering::Relaxed);
|
||||
|
||||
self.load_test_state
|
||||
.statistics
|
||||
.requests
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn read_response(&mut self) -> anyhow::Result<()> {
|
||||
let mut buffer_position = 0;
|
||||
|
||||
loop {
|
||||
let bytes_read = self
|
||||
.stream
|
||||
.read(&mut self.buffer[buffer_position..])
|
||||
.await?;
|
||||
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
buffer_position += bytes_read;
|
||||
|
||||
let interesting_bytes = &self.buffer[..buffer_position];
|
||||
|
||||
let mut opt_body_start_index = None;
|
||||
|
||||
for (i, chunk) in interesting_bytes.windows(4).enumerate() {
|
||||
if chunk == b"\r\n\r\n" {
|
||||
opt_body_start_index = Some(i + 4);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(body_start_index) = opt_body_start_index {
|
||||
match Response::parse_bytes(&interesting_bytes[body_start_index..]) {
|
||||
Ok(response) => {
|
||||
match response {
|
||||
Response::Announce(_) => {
|
||||
self.load_test_state
|
||||
.statistics
|
||||
.responses_announce
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
Response::Scrape(_) => {
|
||||
self.load_test_state
|
||||
.statistics
|
||||
.responses_scrape
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
Response::Failure(response) => {
|
||||
self.load_test_state
|
||||
.statistics
|
||||
.responses_failure
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
println!("failure response: reason: {}", response.failure_reason);
|
||||
}
|
||||
}
|
||||
|
||||
self.load_test_state
|
||||
.statistics
|
||||
.bytes_received
|
||||
.fetch_add(interesting_bytes.len(), Ordering::Relaxed);
|
||||
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
::log::warn!(
|
||||
"deserialize response error with {} bytes read: {:?}, text: {}",
|
||||
buffer_position,
|
||||
err,
|
||||
interesting_bytes.escape_ascii()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
81
apps/aquatic/crates/http_load_test/src/utils.rs
Normal file
81
apps/aquatic/crates/http_load_test/src/utils.rs
Normal file
@ -0,0 +1,81 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand::distributions::WeightedIndex;
|
||||
use rand::prelude::*;
|
||||
use rand_distr::Gamma;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::*;
|
||||
|
||||
pub fn create_random_request(
|
||||
config: &Config,
|
||||
state: &LoadTestState,
|
||||
rng: &mut SmallRng,
|
||||
) -> Request {
|
||||
let weights = [
|
||||
config.torrents.weight_announce as u32,
|
||||
config.torrents.weight_scrape as u32,
|
||||
];
|
||||
|
||||
let items = [RequestType::Announce, RequestType::Scrape];
|
||||
|
||||
let dist = WeightedIndex::new(weights).expect("random request weighted index");
|
||||
|
||||
match items[dist.sample(rng)] {
|
||||
RequestType::Announce => create_announce_request(config, state, rng),
|
||||
RequestType::Scrape => create_scrape_request(config, state, rng),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn create_announce_request(config: &Config, state: &LoadTestState, rng: &mut impl Rng) -> Request {
|
||||
let (event, bytes_left) = {
|
||||
if rng.gen_bool(config.torrents.peer_seeder_probability) {
|
||||
(AnnounceEvent::Completed, 0)
|
||||
} else {
|
||||
(AnnounceEvent::Started, 50)
|
||||
}
|
||||
};
|
||||
|
||||
let info_hash_index = select_info_hash_index(config, state, rng);
|
||||
|
||||
Request::Announce(AnnounceRequest {
|
||||
info_hash: state.info_hashes[info_hash_index],
|
||||
peer_id: PeerId(rng.gen()),
|
||||
bytes_left,
|
||||
event,
|
||||
key: None,
|
||||
numwant: None,
|
||||
port: rng.gen(),
|
||||
bytes_uploaded: 0,
|
||||
bytes_downloaded: 0,
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn create_scrape_request(config: &Config, state: &LoadTestState, rng: &mut impl Rng) -> Request {
|
||||
let mut scrape_hashes = Vec::with_capacity(5);
|
||||
|
||||
for _ in 0..5 {
|
||||
let info_hash_index = select_info_hash_index(config, state, rng);
|
||||
|
||||
scrape_hashes.push(state.info_hashes[info_hash_index]);
|
||||
}
|
||||
|
||||
Request::Scrape(ScrapeRequest {
|
||||
info_hashes: scrape_hashes,
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn select_info_hash_index(config: &Config, state: &LoadTestState, rng: &mut impl Rng) -> usize {
|
||||
gamma_usize(rng, &state.gamma, config.torrents.number_of_torrents - 1)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn gamma_usize(rng: &mut impl Rng, gamma: &Arc<Gamma<f64>>, max: usize) -> usize {
|
||||
let p: f64 = gamma.sample(rng);
|
||||
let p = (p.min(101.0f64) - 1.0) / 100.0;
|
||||
|
||||
(p * max as f64) as usize
|
||||
}
|
43
apps/aquatic/crates/http_protocol/Cargo.toml
Normal file
43
apps/aquatic/crates/http_protocol/Cargo.toml
Normal file
@ -0,0 +1,43 @@
|
||||
[package]
|
||||
name = "aquatic_http_protocol"
|
||||
description = "HTTP BitTorrent tracker protocol"
|
||||
keywords = ["http", "protocol", "peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
readme = "./README.md"
|
||||
|
||||
[lib]
|
||||
name = "aquatic_http_protocol"
|
||||
|
||||
[[bench]]
|
||||
name = "bench_request_from_bytes"
|
||||
path = "benches/bench_request_from_bytes.rs"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_announce_response_to_bytes"
|
||||
path = "benches/bench_announce_response_to_bytes.rs"
|
||||
harness = false
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
compact_str = { version = "0.7", features = ["serde"] }
|
||||
hex = { version = "0.4", default-features = false }
|
||||
httparse = "1"
|
||||
itoa = "1"
|
||||
log = "0.4"
|
||||
memchr = "2"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_bencode = "0.2"
|
||||
urlencoding = "2"
|
||||
|
||||
[dev-dependencies]
|
||||
bendy = { version = "0.4.0-beta.2", features = ["std", "serde"] }
|
||||
criterion = "0.4"
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
15
apps/aquatic/crates/http_protocol/README.md
Normal file
15
apps/aquatic/crates/http_protocol/README.md
Normal file
@ -0,0 +1,15 @@
|
||||
# aquatic_http_protocol: HTTP BitTorrent tracker protocol
|
||||
|
||||
HTTP BitTorrent tracker message parsing and serialization.
|
||||
|
||||
[BEP 003]: https://www.bittorrent.org/beps/bep_0003.html
|
||||
[BEP 007]: https://www.bittorrent.org/beps/bep_0007.html
|
||||
[BEP 023]: https://www.bittorrent.org/beps/bep_0023.html
|
||||
[BEP 048]: https://www.bittorrent.org/beps/bep_0048.html
|
||||
|
||||
Implements:
|
||||
* [BEP 003]: HTTP BitTorrent protocol ([more details](https://wiki.theory.org/index.php/BitTorrentSpecification#Tracker_HTTP.2FHTTPS_Protocol)). Exceptions:
|
||||
* Only compact responses are supported
|
||||
* [BEP 023]: Compact HTTP responses
|
||||
* [BEP 007]: IPv6 support
|
||||
* [BEP 048]: HTTP scrape support
|
@ -0,0 +1,49 @@
|
||||
use std::net::Ipv4Addr;
|
||||
use std::time::Duration;
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
|
||||
use aquatic_http_protocol::response::*;
|
||||
|
||||
pub fn bench(c: &mut Criterion) {
|
||||
let mut peers = Vec::new();
|
||||
|
||||
for i in 0..100 {
|
||||
peers.push(ResponsePeer {
|
||||
ip_address: Ipv4Addr::new(127, 0, 0, i),
|
||||
port: i as u16,
|
||||
})
|
||||
}
|
||||
|
||||
let announce_response = AnnounceResponse {
|
||||
announce_interval: 120,
|
||||
complete: 100,
|
||||
incomplete: 500,
|
||||
peers: ResponsePeerListV4(peers),
|
||||
peers6: ResponsePeerListV6(Vec::new()),
|
||||
warning_message: None,
|
||||
};
|
||||
|
||||
let response = Response::Announce(announce_response);
|
||||
|
||||
let mut buffer = [0u8; 4096];
|
||||
let mut buffer = ::std::io::Cursor::new(&mut buffer[..]);
|
||||
|
||||
c.bench_function("announce-response-to-bytes", |b| {
|
||||
b.iter(|| {
|
||||
buffer.set_position(0);
|
||||
|
||||
Response::write_bytes(black_box(&response), black_box(&mut buffer)).unwrap();
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
config = Criterion::default()
|
||||
.sample_size(1000)
|
||||
.measurement_time(Duration::from_secs(180))
|
||||
.significance_level(0.01);
|
||||
targets = bench
|
||||
}
|
||||
criterion_main!(benches);
|
@ -0,0 +1,22 @@
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use std::time::Duration;
|
||||
|
||||
use aquatic_http_protocol::request::Request;
|
||||
|
||||
static INPUT: &[u8] = b"GET /announce?info_hash=%04%0bkV%3f%5cr%14%a6%b7%98%adC%c3%c9.%40%24%00%b9&peer_id=-TR2940-5ert69muw5t8&port=11000&uploaded=0&downloaded=0&left=0&numwant=0&key=3ab4b977&compact=1&supportcrypto=1&event=stopped HTTP/1.1\r\n\r\n";
|
||||
|
||||
pub fn bench(c: &mut Criterion) {
|
||||
c.bench_function("request-from-bytes", |b| {
|
||||
b.iter(|| Request::parse_bytes(black_box(INPUT)))
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
config = Criterion::default()
|
||||
.sample_size(1000)
|
||||
.measurement_time(Duration::from_secs(180))
|
||||
.significance_level(0.01);
|
||||
targets = bench
|
||||
}
|
||||
criterion_main!(benches);
|
102
apps/aquatic/crates/http_protocol/src/common.rs
Normal file
102
apps/aquatic/crates/http_protocol/src/common.rs
Normal file
@ -0,0 +1,102 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::utils::*;
|
||||
|
||||
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct PeerId(
|
||||
#[serde(
|
||||
serialize_with = "serialize_20_bytes",
|
||||
deserialize_with = "deserialize_20_bytes"
|
||||
)]
|
||||
pub [u8; 20],
|
||||
);
|
||||
|
||||
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct InfoHash(
|
||||
#[serde(
|
||||
serialize_with = "serialize_20_bytes",
|
||||
deserialize_with = "deserialize_20_bytes"
|
||||
)]
|
||||
pub [u8; 20],
|
||||
);
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum AnnounceEvent {
|
||||
Started,
|
||||
Stopped,
|
||||
Completed,
|
||||
Empty,
|
||||
}
|
||||
|
||||
impl Default for AnnounceEvent {
|
||||
fn default() -> Self {
|
||||
Self::Empty
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for AnnounceEvent {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(value: &str) -> std::result::Result<Self, String> {
|
||||
match value {
|
||||
"started" => Ok(Self::Started),
|
||||
"stopped" => Ok(Self::Stopped),
|
||||
"completed" => Ok(Self::Completed),
|
||||
"empty" => Ok(Self::Empty),
|
||||
value => Err(format!("Unknown value: {}", value)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AnnounceEvent {
|
||||
pub fn as_str(&self) -> Option<&str> {
|
||||
match self {
|
||||
Self::Started => Some("started"),
|
||||
Self::Stopped => Some("stopped"),
|
||||
Self::Completed => Some("completed"),
|
||||
Self::Empty => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for InfoHash {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
let mut arr = [b'0'; 20];
|
||||
|
||||
for byte in arr.iter_mut() {
|
||||
*byte = u8::arbitrary(g);
|
||||
}
|
||||
|
||||
Self(arr)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for PeerId {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
let mut arr = [b'0'; 20];
|
||||
|
||||
for byte in arr.iter_mut() {
|
||||
*byte = u8::arbitrary(g);
|
||||
}
|
||||
|
||||
Self(arr)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for AnnounceEvent {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
match (bool::arbitrary(g), bool::arbitrary(g)) {
|
||||
(false, false) => Self::Started,
|
||||
(true, false) => Self::Started,
|
||||
(false, true) => Self::Completed,
|
||||
(true, true) => Self::Empty,
|
||||
}
|
||||
}
|
||||
}
|
4
apps/aquatic/crates/http_protocol/src/lib.rs
Normal file
4
apps/aquatic/crates/http_protocol/src/lib.rs
Normal file
@ -0,0 +1,4 @@
|
||||
pub mod common;
|
||||
pub mod request;
|
||||
pub mod response;
|
||||
mod utils;
|
450
apps/aquatic/crates/http_protocol/src/request.rs
Normal file
450
apps/aquatic/crates/http_protocol/src/request.rs
Normal file
@ -0,0 +1,450 @@
|
||||
use std::io::Write;
|
||||
|
||||
use anyhow::Context;
|
||||
use compact_str::CompactString;
|
||||
|
||||
use super::common::*;
|
||||
use super::utils::*;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct AnnounceRequest {
|
||||
pub info_hash: InfoHash,
|
||||
pub peer_id: PeerId,
|
||||
pub port: u16,
|
||||
pub bytes_uploaded: usize,
|
||||
pub bytes_downloaded: usize,
|
||||
pub bytes_left: usize,
|
||||
pub event: AnnounceEvent,
|
||||
/// Number of response peers wanted
|
||||
pub numwant: Option<usize>,
|
||||
pub key: Option<CompactString>,
|
||||
}
|
||||
|
||||
impl AnnounceRequest {
|
||||
fn write_bytes<W: Write>(&self, output: &mut W, url_suffix: &[u8]) -> ::std::io::Result<()> {
|
||||
output.write_all(b"GET /announce")?;
|
||||
output.write_all(url_suffix)?;
|
||||
output.write_all(b"?info_hash=")?;
|
||||
urlencode_20_bytes(self.info_hash.0, output)?;
|
||||
|
||||
output.write_all(b"&peer_id=")?;
|
||||
urlencode_20_bytes(self.peer_id.0, output)?;
|
||||
|
||||
output.write_all(b"&port=")?;
|
||||
output.write_all(itoa::Buffer::new().format(self.port).as_bytes())?;
|
||||
|
||||
output.write_all(b"&uploaded=")?;
|
||||
output.write_all(itoa::Buffer::new().format(self.bytes_uploaded).as_bytes())?;
|
||||
|
||||
output.write_all(b"&downloaded=")?;
|
||||
output.write_all(itoa::Buffer::new().format(self.bytes_downloaded).as_bytes())?;
|
||||
|
||||
output.write_all(b"&left=")?;
|
||||
output.write_all(itoa::Buffer::new().format(self.bytes_left).as_bytes())?;
|
||||
|
||||
match self.event {
|
||||
AnnounceEvent::Started => output.write_all(b"&event=started")?,
|
||||
AnnounceEvent::Stopped => output.write_all(b"&event=stopped")?,
|
||||
AnnounceEvent::Completed => output.write_all(b"&event=completed")?,
|
||||
AnnounceEvent::Empty => (),
|
||||
};
|
||||
|
||||
if let Some(numwant) = self.numwant {
|
||||
output.write_all(b"&numwant=")?;
|
||||
output.write_all(itoa::Buffer::new().format(numwant).as_bytes())?;
|
||||
}
|
||||
|
||||
if let Some(ref key) = self.key {
|
||||
output.write_all(b"&key=")?;
|
||||
output.write_all(::urlencoding::encode(key.as_str()).as_bytes())?;
|
||||
}
|
||||
|
||||
// Always ask for compact responses to ease load testing of non-aquatic trackers
|
||||
output.write_all(b"&compact=1")?;
|
||||
|
||||
output.write_all(b" HTTP/1.1\r\nHost: localhost\r\n\r\n")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn parse_query_string(query_string: &str) -> anyhow::Result<Self> {
|
||||
// -- Parse key-value pairs
|
||||
|
||||
let mut opt_info_hash = None;
|
||||
let mut opt_peer_id = None;
|
||||
let mut opt_port = None;
|
||||
let mut opt_bytes_left = None;
|
||||
let mut opt_bytes_uploaded = None;
|
||||
let mut opt_bytes_downloaded = None;
|
||||
let mut event = AnnounceEvent::default();
|
||||
let mut opt_numwant = None;
|
||||
let mut opt_key = None;
|
||||
|
||||
let query_string_bytes = query_string.as_bytes();
|
||||
|
||||
let mut ampersand_iter = ::memchr::memchr_iter(b'&', query_string_bytes);
|
||||
let mut position = 0usize;
|
||||
|
||||
for equal_sign_index in ::memchr::memchr_iter(b'=', query_string_bytes) {
|
||||
let segment_end = ampersand_iter.next().unwrap_or(query_string.len());
|
||||
|
||||
let key = query_string
|
||||
.get(position..equal_sign_index)
|
||||
.with_context(|| format!("no key at {}..{}", position, equal_sign_index))?;
|
||||
let value = query_string
|
||||
.get(equal_sign_index + 1..segment_end)
|
||||
.with_context(|| {
|
||||
format!("no value at {}..{}", equal_sign_index + 1, segment_end)
|
||||
})?;
|
||||
|
||||
match key {
|
||||
"info_hash" => {
|
||||
let value = urldecode_20_bytes(value)?;
|
||||
|
||||
opt_info_hash = Some(InfoHash(value));
|
||||
}
|
||||
"peer_id" => {
|
||||
let value = urldecode_20_bytes(value)?;
|
||||
|
||||
opt_peer_id = Some(PeerId(value));
|
||||
}
|
||||
"port" => {
|
||||
opt_port = Some(value.parse::<u16>().with_context(|| "parse port")?);
|
||||
}
|
||||
"left" => {
|
||||
opt_bytes_left = Some(value.parse::<usize>().with_context(|| "parse left")?);
|
||||
}
|
||||
"uploaded" => {
|
||||
opt_bytes_uploaded =
|
||||
Some(value.parse::<usize>().with_context(|| "parse uploaded")?);
|
||||
}
|
||||
"downloaded" => {
|
||||
opt_bytes_downloaded =
|
||||
Some(value.parse::<usize>().with_context(|| "parse downloaded")?);
|
||||
}
|
||||
"event" => {
|
||||
event = value
|
||||
.parse::<AnnounceEvent>()
|
||||
.map_err(|err| anyhow::anyhow!("invalid event: {}", err))?;
|
||||
}
|
||||
"compact" => {
|
||||
if value != "1" {
|
||||
return Err(anyhow::anyhow!("compact set, but not to 1"));
|
||||
}
|
||||
}
|
||||
"numwant" => {
|
||||
opt_numwant = Some(value.parse::<usize>().with_context(|| "parse numwant")?);
|
||||
}
|
||||
"key" => {
|
||||
if value.len() > 100 {
|
||||
return Err(anyhow::anyhow!("'key' is too long"));
|
||||
}
|
||||
opt_key = Some(::urlencoding::decode(value)?.into());
|
||||
}
|
||||
k => {
|
||||
::log::debug!("ignored unrecognized key: {}", k)
|
||||
}
|
||||
}
|
||||
|
||||
if segment_end == query_string.len() {
|
||||
break;
|
||||
} else {
|
||||
position = segment_end + 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AnnounceRequest {
|
||||
info_hash: opt_info_hash.with_context(|| "no info_hash")?,
|
||||
peer_id: opt_peer_id.with_context(|| "no peer_id")?,
|
||||
port: opt_port.with_context(|| "no port")?,
|
||||
bytes_uploaded: opt_bytes_uploaded.with_context(|| "no uploaded")?,
|
||||
bytes_downloaded: opt_bytes_downloaded.with_context(|| "no downloaded")?,
|
||||
bytes_left: opt_bytes_left.with_context(|| "no left")?,
|
||||
event,
|
||||
numwant: opt_numwant,
|
||||
key: opt_key,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ScrapeRequest {
|
||||
pub info_hashes: Vec<InfoHash>,
|
||||
}
|
||||
|
||||
impl ScrapeRequest {
|
||||
fn write_bytes<W: Write>(&self, output: &mut W, url_suffix: &[u8]) -> ::std::io::Result<()> {
|
||||
output.write_all(b"GET /scrape")?;
|
||||
output.write_all(url_suffix)?;
|
||||
output.write_all(b"?")?;
|
||||
|
||||
let mut first = true;
|
||||
|
||||
for info_hash in self.info_hashes.iter() {
|
||||
if !first {
|
||||
output.write_all(b"&")?;
|
||||
}
|
||||
|
||||
output.write_all(b"info_hash=")?;
|
||||
urlencode_20_bytes(info_hash.0, output)?;
|
||||
|
||||
first = false;
|
||||
}
|
||||
|
||||
output.write_all(b" HTTP/1.1\r\nHost: localhost\r\n\r\n")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn parse_query_string(query_string: &str) -> anyhow::Result<Self> {
|
||||
// -- Parse key-value pairs
|
||||
|
||||
let mut info_hashes = Vec::new();
|
||||
|
||||
let query_string_bytes = query_string.as_bytes();
|
||||
|
||||
let mut ampersand_iter = ::memchr::memchr_iter(b'&', query_string_bytes);
|
||||
let mut position = 0usize;
|
||||
|
||||
for equal_sign_index in ::memchr::memchr_iter(b'=', query_string_bytes) {
|
||||
let segment_end = ampersand_iter.next().unwrap_or(query_string.len());
|
||||
|
||||
let key = query_string
|
||||
.get(position..equal_sign_index)
|
||||
.with_context(|| format!("no key at {}..{}", position, equal_sign_index))?;
|
||||
let value = query_string
|
||||
.get(equal_sign_index + 1..segment_end)
|
||||
.with_context(|| {
|
||||
format!("no value at {}..{}", equal_sign_index + 1, segment_end)
|
||||
})?;
|
||||
|
||||
match key {
|
||||
"info_hash" => {
|
||||
let value = urldecode_20_bytes(value)?;
|
||||
|
||||
info_hashes.push(InfoHash(value));
|
||||
}
|
||||
k => {
|
||||
::log::debug!("ignored unrecognized key: {}", k)
|
||||
}
|
||||
}
|
||||
|
||||
if segment_end == query_string.len() {
|
||||
break;
|
||||
} else {
|
||||
position = segment_end + 1;
|
||||
}
|
||||
}
|
||||
|
||||
if info_hashes.is_empty() {
|
||||
return Err(anyhow::anyhow!("No info hashes sent"));
|
||||
}
|
||||
|
||||
Ok(ScrapeRequest { info_hashes })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Request {
|
||||
Announce(AnnounceRequest),
|
||||
Scrape(ScrapeRequest),
|
||||
}
|
||||
|
||||
impl Request {
|
||||
/// Parse Request from HTTP request bytes
|
||||
pub fn parse_bytes(bytes: &[u8]) -> anyhow::Result<Option<Self>> {
|
||||
let mut headers = [httparse::EMPTY_HEADER; 16];
|
||||
let mut http_request = httparse::Request::new(&mut headers);
|
||||
|
||||
match http_request.parse(bytes) {
|
||||
Ok(httparse::Status::Complete(_)) => {
|
||||
if let Some(path) = http_request.path {
|
||||
Self::parse_http_get_path(path).map(Some)
|
||||
} else {
|
||||
Err(anyhow::anyhow!("no http path"))
|
||||
}
|
||||
}
|
||||
Ok(httparse::Status::Partial) => Ok(None),
|
||||
Err(err) => Err(anyhow::Error::from(err)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse Request from http GET path (`/announce?info_hash=...`)
|
||||
///
|
||||
/// Existing serde-url decode crates were insufficient, so the decision was
|
||||
/// made to create a custom parser. serde_urlencoded doesn't support multiple
|
||||
/// values with same key, and serde_qs pulls in lots of dependencies. Both
|
||||
/// would need preprocessing for the binary format used for info_hash and
|
||||
/// peer_id.
|
||||
///
|
||||
/// The info hashes and peer id's that are received are url-encoded byte
|
||||
/// by byte, e.g., %fa for byte 0xfa. However, they need to be parsed as
|
||||
/// UTF-8 string, meaning that non-ascii bytes are invalid characters.
|
||||
/// Therefore, these bytes must be converted to their equivalent multi-byte
|
||||
/// UTF-8 encodings.
|
||||
pub fn parse_http_get_path(path: &str) -> anyhow::Result<Self> {
|
||||
::log::debug!("request GET path: {}", path);
|
||||
|
||||
let mut split_parts = path.splitn(2, '?');
|
||||
|
||||
let location = split_parts.next().with_context(|| "no location")?;
|
||||
let query_string = split_parts.next().with_context(|| "no query string")?;
|
||||
|
||||
if location == "/announce" {
|
||||
Ok(Request::Announce(AnnounceRequest::parse_query_string(
|
||||
query_string,
|
||||
)?))
|
||||
} else if location == "/scrape" {
|
||||
Ok(Request::Scrape(ScrapeRequest::parse_query_string(
|
||||
query_string,
|
||||
)?))
|
||||
} else {
|
||||
Err(anyhow::anyhow!("Path must be /announce or /scrape"))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write<W: Write>(&self, output: &mut W, url_suffix: &[u8]) -> ::std::io::Result<()> {
|
||||
match self {
|
||||
Self::Announce(r) => r.write_bytes(output, url_suffix),
|
||||
Self::Scrape(r) => r.write_bytes(output, url_suffix),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use quickcheck::{quickcheck, Arbitrary, Gen, TestResult};
|
||||
|
||||
use super::*;
|
||||
|
||||
static ANNOUNCE_REQUEST_PATH: &str = "/announce?info_hash=%04%0bkV%3f%5cr%14%a6%b7%98%adC%c3%c9.%40%24%00%b9&peer_id=-ABC940-5ert69muw5t8&port=12345&uploaded=1&downloaded=2&left=3&numwant=0&key=4ab4b877&compact=1&supportcrypto=1&event=started";
|
||||
static SCRAPE_REQUEST_PATH: &str =
|
||||
"/scrape?info_hash=%04%0bkV%3f%5cr%14%a6%b7%98%adC%c3%c9.%40%24%00%b9";
|
||||
static REFERENCE_INFO_HASH: [u8; 20] = [
|
||||
0x04, 0x0b, b'k', b'V', 0x3f, 0x5c, b'r', 0x14, 0xa6, 0xb7, 0x98, 0xad, b'C', 0xc3, 0xc9,
|
||||
b'.', 0x40, 0x24, 0x00, 0xb9,
|
||||
];
|
||||
static REFERENCE_PEER_ID: [u8; 20] = [
|
||||
b'-', b'A', b'B', b'C', b'9', b'4', b'0', b'-', b'5', b'e', b'r', b't', b'6', b'9', b'm',
|
||||
b'u', b'w', b'5', b't', b'8',
|
||||
];
|
||||
|
||||
fn get_reference_announce_request() -> Request {
|
||||
Request::Announce(AnnounceRequest {
|
||||
info_hash: InfoHash(REFERENCE_INFO_HASH),
|
||||
peer_id: PeerId(REFERENCE_PEER_ID),
|
||||
port: 12345,
|
||||
bytes_uploaded: 1,
|
||||
bytes_downloaded: 2,
|
||||
bytes_left: 3,
|
||||
event: AnnounceEvent::Started,
|
||||
numwant: Some(0),
|
||||
key: Some("4ab4b877".into()),
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_announce_request_from_bytes() {
|
||||
let mut bytes = Vec::new();
|
||||
|
||||
bytes.extend_from_slice(b"GET ");
|
||||
bytes.extend_from_slice(ANNOUNCE_REQUEST_PATH.as_bytes());
|
||||
bytes.extend_from_slice(b" HTTP/1.1\r\n\r\n");
|
||||
|
||||
let parsed_request = Request::parse_bytes(&bytes[..]).unwrap().unwrap();
|
||||
let reference_request = get_reference_announce_request();
|
||||
|
||||
assert_eq!(parsed_request, reference_request);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scrape_request_from_bytes() {
|
||||
let mut bytes = Vec::new();
|
||||
|
||||
bytes.extend_from_slice(b"GET ");
|
||||
bytes.extend_from_slice(SCRAPE_REQUEST_PATH.as_bytes());
|
||||
bytes.extend_from_slice(b" HTTP/1.1\r\n\r\n");
|
||||
|
||||
let parsed_request = Request::parse_bytes(&bytes[..]).unwrap().unwrap();
|
||||
let reference_request = Request::Scrape(ScrapeRequest {
|
||||
info_hashes: vec![InfoHash(REFERENCE_INFO_HASH)],
|
||||
});
|
||||
|
||||
assert_eq!(parsed_request, reference_request);
|
||||
}
|
||||
|
||||
impl Arbitrary for AnnounceRequest {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let key: Option<String> = Arbitrary::arbitrary(g);
|
||||
|
||||
AnnounceRequest {
|
||||
info_hash: Arbitrary::arbitrary(g),
|
||||
peer_id: Arbitrary::arbitrary(g),
|
||||
port: Arbitrary::arbitrary(g),
|
||||
bytes_uploaded: Arbitrary::arbitrary(g),
|
||||
bytes_downloaded: Arbitrary::arbitrary(g),
|
||||
bytes_left: Arbitrary::arbitrary(g),
|
||||
event: Arbitrary::arbitrary(g),
|
||||
numwant: Arbitrary::arbitrary(g),
|
||||
key: key.map(|key| key.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Arbitrary for ScrapeRequest {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
ScrapeRequest {
|
||||
info_hashes: Arbitrary::arbitrary(g),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Arbitrary for Request {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
if Arbitrary::arbitrary(g) {
|
||||
Self::Announce(Arbitrary::arbitrary(g))
|
||||
} else {
|
||||
Self::Scrape(Arbitrary::arbitrary(g))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn quickcheck_serde_identity_request() {
|
||||
fn prop(request: Request) -> TestResult {
|
||||
match request {
|
||||
Request::Announce(AnnounceRequest {
|
||||
key: Some(ref key), ..
|
||||
}) => {
|
||||
if key.len() > 30 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
}
|
||||
Request::Scrape(ScrapeRequest { ref info_hashes }) => {
|
||||
if info_hashes.is_empty() {
|
||||
return TestResult::discard();
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let mut bytes = Vec::new();
|
||||
|
||||
request.write(&mut bytes, &[]).unwrap();
|
||||
|
||||
let parsed_request = Request::parse_bytes(&bytes[..]).unwrap().unwrap();
|
||||
|
||||
let success = request == parsed_request;
|
||||
|
||||
if !success {
|
||||
println!("request: {:?}", request);
|
||||
println!("parsed request: {:?}", parsed_request);
|
||||
println!("bytes as str: {}", String::from_utf8_lossy(&bytes));
|
||||
}
|
||||
|
||||
TestResult::from_bool(success)
|
||||
}
|
||||
|
||||
quickcheck(prop as fn(Request) -> TestResult);
|
||||
}
|
||||
}
|
335
apps/aquatic/crates/http_protocol/src/response.rs
Normal file
335
apps/aquatic/crates/http_protocol/src/response.rs
Normal file
@ -0,0 +1,335 @@
|
||||
use std::borrow::Cow;
|
||||
use std::io::Write;
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use super::common::*;
|
||||
use super::utils::*;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Hash)]
|
||||
pub struct ResponsePeer<I: Eq> {
|
||||
pub ip_address: I,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(transparent)]
|
||||
pub struct ResponsePeerListV4(
|
||||
#[serde(
|
||||
serialize_with = "serialize_response_peers_ipv4",
|
||||
deserialize_with = "deserialize_response_peers_ipv4"
|
||||
)]
|
||||
pub Vec<ResponsePeer<Ipv4Addr>>,
|
||||
);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(transparent)]
|
||||
pub struct ResponsePeerListV6(
|
||||
#[serde(
|
||||
serialize_with = "serialize_response_peers_ipv6",
|
||||
deserialize_with = "deserialize_response_peers_ipv6"
|
||||
)]
|
||||
pub Vec<ResponsePeer<Ipv6Addr>>,
|
||||
);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ScrapeStatistics {
|
||||
pub complete: usize,
|
||||
pub incomplete: usize,
|
||||
pub downloaded: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AnnounceResponse {
|
||||
#[serde(rename = "interval")]
|
||||
pub announce_interval: usize,
|
||||
pub complete: usize,
|
||||
pub incomplete: usize,
|
||||
#[serde(default)]
|
||||
pub peers: ResponsePeerListV4,
|
||||
#[serde(default)]
|
||||
pub peers6: ResponsePeerListV6,
|
||||
// Serialize as string if Some, otherwise skip
|
||||
#[serde(
|
||||
rename = "warning message",
|
||||
skip_serializing_if = "Option::is_none",
|
||||
serialize_with = "serialize_optional_string"
|
||||
)]
|
||||
pub warning_message: Option<String>,
|
||||
}
|
||||
|
||||
impl AnnounceResponse {
|
||||
pub fn write_bytes<W: Write>(&self, output: &mut W) -> ::std::io::Result<usize> {
|
||||
let mut bytes_written = 0usize;
|
||||
|
||||
bytes_written += output.write(b"d8:completei")?;
|
||||
bytes_written += output.write(itoa::Buffer::new().format(self.complete).as_bytes())?;
|
||||
|
||||
bytes_written += output.write(b"e10:incompletei")?;
|
||||
bytes_written += output.write(itoa::Buffer::new().format(self.incomplete).as_bytes())?;
|
||||
|
||||
bytes_written += output.write(b"e8:intervali")?;
|
||||
bytes_written += output.write(
|
||||
itoa::Buffer::new()
|
||||
.format(self.announce_interval)
|
||||
.as_bytes(),
|
||||
)?;
|
||||
|
||||
bytes_written += output.write(b"e5:peers")?;
|
||||
bytes_written += output.write(
|
||||
itoa::Buffer::new()
|
||||
.format(self.peers.0.len() * 6)
|
||||
.as_bytes(),
|
||||
)?;
|
||||
bytes_written += output.write(b":")?;
|
||||
for peer in self.peers.0.iter() {
|
||||
bytes_written += output.write(&u32::from(peer.ip_address).to_be_bytes())?;
|
||||
bytes_written += output.write(&peer.port.to_be_bytes())?;
|
||||
}
|
||||
|
||||
bytes_written += output.write(b"6:peers6")?;
|
||||
bytes_written += output.write(
|
||||
itoa::Buffer::new()
|
||||
.format(self.peers6.0.len() * 18)
|
||||
.as_bytes(),
|
||||
)?;
|
||||
bytes_written += output.write(b":")?;
|
||||
for peer in self.peers6.0.iter() {
|
||||
bytes_written += output.write(&u128::from(peer.ip_address).to_be_bytes())?;
|
||||
bytes_written += output.write(&peer.port.to_be_bytes())?;
|
||||
}
|
||||
|
||||
if let Some(ref warning_message) = self.warning_message {
|
||||
let message_bytes = warning_message.as_bytes();
|
||||
|
||||
bytes_written += output.write(b"15:warning message")?;
|
||||
bytes_written +=
|
||||
output.write(itoa::Buffer::new().format(message_bytes.len()).as_bytes())?;
|
||||
bytes_written += output.write(b":")?;
|
||||
bytes_written += output.write(message_bytes)?;
|
||||
}
|
||||
|
||||
bytes_written += output.write(b"e")?;
|
||||
|
||||
Ok(bytes_written)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ScrapeResponse {
|
||||
/// BTreeMap instead of HashMap since keys need to be serialized in order
|
||||
pub files: BTreeMap<InfoHash, ScrapeStatistics>,
|
||||
}
|
||||
|
||||
impl ScrapeResponse {
|
||||
pub fn write_bytes<W: Write>(&self, output: &mut W) -> ::std::io::Result<usize> {
|
||||
let mut bytes_written = 0usize;
|
||||
|
||||
bytes_written += output.write(b"d5:filesd")?;
|
||||
|
||||
for (info_hash, statistics) in self.files.iter() {
|
||||
bytes_written += output.write(b"20:")?;
|
||||
bytes_written += output.write(&info_hash.0)?;
|
||||
bytes_written += output.write(b"d8:completei")?;
|
||||
bytes_written +=
|
||||
output.write(itoa::Buffer::new().format(statistics.complete).as_bytes())?;
|
||||
bytes_written += output.write(b"e10:downloadedi0e10:incompletei")?;
|
||||
bytes_written +=
|
||||
output.write(itoa::Buffer::new().format(statistics.incomplete).as_bytes())?;
|
||||
bytes_written += output.write(b"ee")?;
|
||||
}
|
||||
|
||||
bytes_written += output.write(b"ee")?;
|
||||
|
||||
Ok(bytes_written)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct FailureResponse {
|
||||
#[serde(rename = "failure reason")]
|
||||
pub failure_reason: Cow<'static, str>,
|
||||
}
|
||||
|
||||
impl FailureResponse {
|
||||
pub fn new<S: Into<Cow<'static, str>>>(reason: S) -> Self {
|
||||
Self {
|
||||
failure_reason: reason.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_bytes<W: Write>(&self, output: &mut W) -> ::std::io::Result<usize> {
|
||||
let mut bytes_written = 0usize;
|
||||
|
||||
let reason_bytes = self.failure_reason.as_bytes();
|
||||
|
||||
bytes_written += output.write(b"d14:failure reason")?;
|
||||
bytes_written += output.write(itoa::Buffer::new().format(reason_bytes.len()).as_bytes())?;
|
||||
bytes_written += output.write(b":")?;
|
||||
bytes_written += output.write(reason_bytes)?;
|
||||
bytes_written += output.write(b"e")?;
|
||||
|
||||
Ok(bytes_written)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Response {
|
||||
Announce(AnnounceResponse),
|
||||
Scrape(ScrapeResponse),
|
||||
Failure(FailureResponse),
|
||||
}
|
||||
|
||||
impl Response {
|
||||
pub fn write_bytes<W: Write>(&self, output: &mut W) -> ::std::io::Result<usize> {
|
||||
match self {
|
||||
Response::Announce(r) => r.write_bytes(output),
|
||||
Response::Failure(r) => r.write_bytes(output),
|
||||
Response::Scrape(r) => r.write_bytes(output),
|
||||
}
|
||||
}
|
||||
pub fn parse_bytes(bytes: &[u8]) -> Result<Self, ::serde_bencode::Error> {
|
||||
::serde_bencode::from_bytes(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for ResponsePeer<Ipv4Addr> {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
ip_address: Ipv4Addr::arbitrary(g),
|
||||
port: u16::arbitrary(g),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for ResponsePeer<Ipv6Addr> {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
ip_address: Ipv6Addr::arbitrary(g),
|
||||
port: u16::arbitrary(g),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for ResponsePeerListV4 {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self(Vec::arbitrary(g))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for ResponsePeerListV6 {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self(Vec::arbitrary(g))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for ScrapeStatistics {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
complete: usize::arbitrary(g),
|
||||
incomplete: usize::arbitrary(g),
|
||||
downloaded: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for AnnounceResponse {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
announce_interval: usize::arbitrary(g),
|
||||
complete: usize::arbitrary(g),
|
||||
incomplete: usize::arbitrary(g),
|
||||
peers: ResponsePeerListV4::arbitrary(g),
|
||||
peers6: ResponsePeerListV6::arbitrary(g),
|
||||
warning_message: quickcheck::Arbitrary::arbitrary(g),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for ScrapeResponse {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
files: BTreeMap::arbitrary(g),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for FailureResponse {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
failure_reason: String::arbitrary(g).into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use quickcheck_macros::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[quickcheck]
|
||||
fn test_announce_response_to_bytes(response: AnnounceResponse) -> bool {
|
||||
let reference = bendy::serde::to_bytes(&Response::Announce(response.clone())).unwrap();
|
||||
|
||||
let mut hand_written = Vec::new();
|
||||
|
||||
response.write_bytes(&mut hand_written).unwrap();
|
||||
|
||||
let success = hand_written == reference;
|
||||
|
||||
if !success {
|
||||
println!("reference: {}", String::from_utf8_lossy(&reference));
|
||||
println!("hand_written: {}", String::from_utf8_lossy(&hand_written));
|
||||
}
|
||||
|
||||
success
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_scrape_response_to_bytes(response: ScrapeResponse) -> bool {
|
||||
let reference = bendy::serde::to_bytes(&Response::Scrape(response.clone())).unwrap();
|
||||
|
||||
let mut hand_written = Vec::new();
|
||||
|
||||
response.write_bytes(&mut hand_written).unwrap();
|
||||
|
||||
let success = hand_written == reference;
|
||||
|
||||
if !success {
|
||||
println!("reference: {}", String::from_utf8_lossy(&reference));
|
||||
println!("hand_written: {}", String::from_utf8_lossy(&hand_written));
|
||||
}
|
||||
|
||||
success
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_failure_response_to_bytes(response: FailureResponse) -> bool {
|
||||
let reference = bendy::serde::to_bytes(&Response::Failure(response.clone())).unwrap();
|
||||
|
||||
let mut hand_written = Vec::new();
|
||||
|
||||
response.write_bytes(&mut hand_written).unwrap();
|
||||
|
||||
let success = hand_written == reference;
|
||||
|
||||
if !success {
|
||||
println!("reference: {}", String::from_utf8_lossy(&reference));
|
||||
println!("hand_written: {}", String::from_utf8_lossy(&hand_written));
|
||||
}
|
||||
|
||||
success
|
||||
}
|
||||
}
|
335
apps/aquatic/crates/http_protocol/src/utils.rs
Normal file
335
apps/aquatic/crates/http_protocol/src/utils.rs
Normal file
@ -0,0 +1,335 @@
|
||||
use std::io::Write;
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
|
||||
use anyhow::Context;
|
||||
use serde::{de::Visitor, Deserializer, Serializer};
|
||||
|
||||
use super::response::ResponsePeer;
|
||||
|
||||
pub fn urlencode_20_bytes(input: [u8; 20], output: &mut impl Write) -> ::std::io::Result<()> {
|
||||
let mut tmp = [b'%'; 60];
|
||||
|
||||
for i in 0..input.len() {
|
||||
hex::encode_to_slice(&input[i..i + 1], &mut tmp[i * 3 + 1..i * 3 + 3]).unwrap();
|
||||
}
|
||||
|
||||
output.write_all(&tmp)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn urldecode_20_bytes(value: &str) -> anyhow::Result<[u8; 20]> {
|
||||
let mut out_arr = [0u8; 20];
|
||||
|
||||
let mut chars = value.chars();
|
||||
|
||||
for i in 0..20 {
|
||||
let c = chars.next().with_context(|| "less than 20 chars")?;
|
||||
|
||||
if c as u32 > 255 {
|
||||
return Err(anyhow::anyhow!(
|
||||
"character not in single byte range: {:#?}",
|
||||
c
|
||||
));
|
||||
}
|
||||
|
||||
if c == '%' {
|
||||
let first = chars
|
||||
.next()
|
||||
.with_context(|| "missing first urldecode char in pair")?;
|
||||
let second = chars
|
||||
.next()
|
||||
.with_context(|| "missing second urldecode char in pair")?;
|
||||
|
||||
let hex = [first as u8, second as u8];
|
||||
|
||||
hex::decode_to_slice(hex, &mut out_arr[i..i + 1])
|
||||
.map_err(|err| anyhow::anyhow!("hex decode error: {:?}", err))?;
|
||||
} else {
|
||||
out_arr[i] = c as u8;
|
||||
}
|
||||
}
|
||||
|
||||
if chars.next().is_some() {
|
||||
return Err(anyhow::anyhow!("more than 20 chars"));
|
||||
}
|
||||
|
||||
Ok(out_arr)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn serialize_optional_string<S>(v: &Option<String>, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match v {
|
||||
Some(s) => serializer.serialize_str(s.as_str()),
|
||||
None => Err(serde::ser::Error::custom("use skip_serializing_if")),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn serialize_20_bytes<S>(bytes: &[u8; 20], serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_bytes(bytes)
|
||||
}
|
||||
|
||||
struct TwentyByteVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for TwentyByteVisitor {
|
||||
type Value = [u8; 20];
|
||||
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
formatter.write_str("20 bytes")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn visit_bytes<E>(self, value: &[u8]) -> Result<Self::Value, E>
|
||||
where
|
||||
E: ::serde::de::Error,
|
||||
{
|
||||
if value.len() != 20 {
|
||||
return Err(::serde::de::Error::custom("not 20 bytes"));
|
||||
}
|
||||
|
||||
let mut arr = [0u8; 20];
|
||||
|
||||
arr.copy_from_slice(value);
|
||||
|
||||
Ok(arr)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn deserialize_20_bytes<'de, D>(deserializer: D) -> Result<[u8; 20], D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
deserializer.deserialize_any(TwentyByteVisitor)
|
||||
}
|
||||
|
||||
pub fn serialize_response_peers_ipv4<S>(
|
||||
response_peers: &[ResponsePeer<Ipv4Addr>],
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut bytes = Vec::with_capacity(response_peers.len() * 6);
|
||||
|
||||
for peer in response_peers {
|
||||
bytes.extend_from_slice(&u32::from(peer.ip_address).to_be_bytes());
|
||||
bytes.extend_from_slice(&peer.port.to_be_bytes())
|
||||
}
|
||||
|
||||
serializer.serialize_bytes(&bytes)
|
||||
}
|
||||
|
||||
pub fn serialize_response_peers_ipv6<S>(
|
||||
response_peers: &[ResponsePeer<Ipv6Addr>],
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut bytes = Vec::with_capacity(response_peers.len() * 6);
|
||||
|
||||
for peer in response_peers {
|
||||
bytes.extend_from_slice(&u128::from(peer.ip_address).to_be_bytes());
|
||||
bytes.extend_from_slice(&peer.port.to_be_bytes())
|
||||
}
|
||||
|
||||
serializer.serialize_bytes(&bytes)
|
||||
}
|
||||
|
||||
struct ResponsePeersIpv4Visitor;
|
||||
|
||||
impl<'de> Visitor<'de> for ResponsePeersIpv4Visitor {
|
||||
type Value = Vec<ResponsePeer<Ipv4Addr>>;
|
||||
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
formatter.write_str("byte-encoded ipv4 address-port pairs")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn visit_bytes<E>(self, value: &[u8]) -> Result<Self::Value, E>
|
||||
where
|
||||
E: ::serde::de::Error,
|
||||
{
|
||||
let chunks = value.chunks_exact(6);
|
||||
|
||||
if !chunks.remainder().is_empty() {
|
||||
return Err(::serde::de::Error::custom("trailing bytes"));
|
||||
}
|
||||
|
||||
let mut ip_bytes = [0u8; 4];
|
||||
let mut port_bytes = [0u8; 2];
|
||||
|
||||
let peers = chunks
|
||||
.into_iter()
|
||||
.map(|chunk| {
|
||||
ip_bytes.copy_from_slice(&chunk[0..4]);
|
||||
port_bytes.copy_from_slice(&chunk[4..6]);
|
||||
|
||||
ResponsePeer {
|
||||
ip_address: Ipv4Addr::from(u32::from_be_bytes(ip_bytes)),
|
||||
port: u16::from_be_bytes(port_bytes),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(peers)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn deserialize_response_peers_ipv4<'de, D>(
|
||||
deserializer: D,
|
||||
) -> Result<Vec<ResponsePeer<Ipv4Addr>>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
deserializer.deserialize_any(ResponsePeersIpv4Visitor)
|
||||
}
|
||||
|
||||
struct ResponsePeersIpv6Visitor;
|
||||
|
||||
impl<'de> Visitor<'de> for ResponsePeersIpv6Visitor {
|
||||
type Value = Vec<ResponsePeer<Ipv6Addr>>;
|
||||
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
formatter.write_str("byte-encoded ipv6 address-port pairs")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn visit_bytes<E>(self, value: &[u8]) -> Result<Self::Value, E>
|
||||
where
|
||||
E: ::serde::de::Error,
|
||||
{
|
||||
let chunks = value.chunks_exact(18);
|
||||
|
||||
if !chunks.remainder().is_empty() {
|
||||
return Err(::serde::de::Error::custom("trailing bytes"));
|
||||
}
|
||||
|
||||
let mut ip_bytes = [0u8; 16];
|
||||
let mut port_bytes = [0u8; 2];
|
||||
|
||||
let peers = chunks
|
||||
.into_iter()
|
||||
.map(|chunk| {
|
||||
ip_bytes.copy_from_slice(&chunk[0..16]);
|
||||
port_bytes.copy_from_slice(&chunk[16..18]);
|
||||
|
||||
ResponsePeer {
|
||||
ip_address: Ipv6Addr::from(u128::from_be_bytes(ip_bytes)),
|
||||
port: u16::from_be_bytes(port_bytes),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(peers)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn deserialize_response_peers_ipv6<'de, D>(
|
||||
deserializer: D,
|
||||
) -> Result<Vec<ResponsePeer<Ipv6Addr>>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
deserializer.deserialize_any(ResponsePeersIpv6Visitor)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use quickcheck_macros::*;
|
||||
|
||||
use crate::common::InfoHash;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_urlencode_20_bytes() {
|
||||
let mut input = [0u8; 20];
|
||||
|
||||
for (i, b) in input.iter_mut().enumerate() {
|
||||
*b = i as u8 % 10;
|
||||
}
|
||||
|
||||
let mut output = Vec::new();
|
||||
|
||||
urlencode_20_bytes(input, &mut output).unwrap();
|
||||
|
||||
assert_eq!(output.len(), 60);
|
||||
|
||||
for (i, chunk) in output.chunks_exact(3).enumerate() {
|
||||
// Not perfect but should do the job
|
||||
let reference = [b'%', b'0', input[i] + 48];
|
||||
|
||||
let success = chunk == reference;
|
||||
|
||||
if !success {
|
||||
println!("failing index: {}", i);
|
||||
}
|
||||
|
||||
assert_eq!(chunk, reference);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[quickcheck]
|
||||
fn test_urlencode_urldecode_20_bytes(
|
||||
a: u8,
|
||||
b: u8,
|
||||
c: u8,
|
||||
d: u8,
|
||||
e: u8,
|
||||
f: u8,
|
||||
g: u8,
|
||||
h: u8,
|
||||
) -> bool {
|
||||
let input: [u8; 20] = [a, b, c, d, e, f, g, h, b, c, d, a, e, f, g, h, a, b, d, c];
|
||||
|
||||
let mut output = Vec::new();
|
||||
|
||||
urlencode_20_bytes(input, &mut output).unwrap();
|
||||
|
||||
let s = ::std::str::from_utf8(&output).unwrap();
|
||||
|
||||
let decoded = urldecode_20_bytes(s).unwrap();
|
||||
|
||||
assert_eq!(input, decoded);
|
||||
|
||||
input == decoded
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_serde_response_peers_ipv4(peers: Vec<ResponsePeer<Ipv4Addr>>) -> bool {
|
||||
let serialized = bendy::serde::to_bytes(&peers).unwrap();
|
||||
let deserialized: Vec<ResponsePeer<Ipv4Addr>> =
|
||||
::bendy::serde::from_bytes(&serialized).unwrap();
|
||||
|
||||
peers == deserialized
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_serde_response_peers_ipv6(peers: Vec<ResponsePeer<Ipv6Addr>>) -> bool {
|
||||
let serialized = bendy::serde::to_bytes(&peers).unwrap();
|
||||
let deserialized: Vec<ResponsePeer<Ipv6Addr>> =
|
||||
::bendy::serde::from_bytes(&serialized).unwrap();
|
||||
|
||||
peers == deserialized
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_serde_info_hash(info_hash: InfoHash) -> bool {
|
||||
let serialized = bendy::serde::to_bytes(&info_hash).unwrap();
|
||||
let deserialized: InfoHash = ::bendy::serde::from_bytes(&serialized).unwrap();
|
||||
|
||||
info_hash == deserialized
|
||||
}
|
||||
}
|
@ -0,0 +1 @@
|
||||
{"group_id":"announce-response-to-bytes","function_id":null,"value_str":null,"throughput":null,"full_id":"announce-response-to-bytes","directory_name":"announce-response-to-bytes","title":"announce-response-to-bytes"}
|
@ -0,0 +1 @@
|
||||
{"mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":6033.211414448978,"upper_bound":6077.812796004471},"point_estimate":6054.625623439862,"standard_error":11.387162302248655},"median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":5978.799232230455,"upper_bound":6005.189535363421},"point_estimate":5992.745967541798,"standard_error":6.185398365563177},"median_abs_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":157.08470879401094,"upper_bound":190.1634482791119},"point_estimate":175.51713287349847,"standard_error":8.3821979113297},"slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":6052.909623777413,"upper_bound":6106.324900686703},"point_estimate":6078.257114077077,"standard_error":13.648790489926581},"std_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":285.8045348063516,"upper_bound":442.7497149360172},"point_estimate":363.44843558752416,"standard_error":40.16921333191484}}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@ -0,0 +1 @@
|
||||
[5184.137608004838,5534.60305616611,6469.177584596171,6819.643032757444]
|
@ -0,0 +1 @@
|
||||
{"group_id":"announce-response-to-bytes","function_id":null,"value_str":null,"throughput":null,"full_id":"announce-response-to-bytes","directory_name":"announce-response-to-bytes","title":"announce-response-to-bytes"}
|
@ -0,0 +1 @@
|
||||
{"mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":816.5793263757998,"upper_bound":829.8277072322014},"point_estimate":823.0324170546021,"standard_error":3.3713205895235987},"median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":785.8508214740125,"upper_bound":790.3983678702459},"point_estimate":787.3168084640594,"standard_error":1.2374611050301572},"median_abs_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":34.7791109454705,"upper_bound":44.243901222281416},"point_estimate":40.0754205033,"standard_error":2.42022909705503},"slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":811.6440256190905,"upper_bound":823.2086243755138},"point_estimate":817.2846212085899,"standard_error":2.95472132616886},"std_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":92.90279248590167,"upper_bound":121.73387529852707},"point_estimate":107.2944955313405,"standard_error":7.401429548815175}}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@ -0,0 +1 @@
|
||||
[565.2398956433274,665.7749574634894,933.8684556505881,1034.40351747075]
|
@ -0,0 +1 @@
|
||||
{"group_id":"request-from-bytes","function_id":null,"value_str":null,"throughput":null,"full_id":"request-from-bytes","directory_name":"request-from-bytes","title":"request-from-bytes"}
|
@ -0,0 +1 @@
|
||||
{"mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":791.6783637138329,"upper_bound":798.2060382161882},"point_estimate":794.7777653239414,"standard_error":1.670679553768017},"median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":786.1377247215969,"upper_bound":789.3747173913043},"point_estimate":788.2154281612928,"standard_error":0.9080984924572599},"median_abs_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":34.47577000388577,"upper_bound":38.99231743541378},"point_estimate":37.25560574108035,"standard_error":1.1689453074940308},"slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":791.1964524096214,"upper_bound":798.189227060581},"point_estimate":794.5503586699593,"standard_error":1.785366051793957},"std_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":41.22148757811178,"upper_bound":64.85026519223337},"point_estimate":52.942361554527636,"standard_error":6.055601310575156}}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@ -0,0 +1 @@
|
||||
[635.6000013134935,698.449239826088,866.0472091930068,928.8964477056013]
|
25
apps/aquatic/crates/peer_id/Cargo.toml
Normal file
25
apps/aquatic/crates/peer_id/Cargo.toml
Normal file
@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "aquatic_peer_id"
|
||||
description = "BitTorrent peer ID handling"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
readme = "./README.md"
|
||||
|
||||
[lib]
|
||||
name = "aquatic_peer_id"
|
||||
|
||||
[features]
|
||||
default = ["quickcheck"]
|
||||
|
||||
[dependencies]
|
||||
compact_str = "0.8"
|
||||
hex = "0.4"
|
||||
regex = "1"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
quickcheck = { version = "1", optional = true }
|
||||
zerocopy = { version = "0.7", features = ["derive"] }
|
3
apps/aquatic/crates/peer_id/README.md
Normal file
3
apps/aquatic/crates/peer_id/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# aquatic_peer_id
|
||||
|
||||
Extract BitTorrent client information from announce request peer IDs.
|
293
apps/aquatic/crates/peer_id/src/lib.rs
Normal file
293
apps/aquatic/crates/peer_id/src/lib.rs
Normal file
@ -0,0 +1,293 @@
|
||||
use std::{borrow::Cow, fmt::Display, sync::OnceLock};
|
||||
|
||||
use compact_str::{format_compact, CompactString};
|
||||
use regex::bytes::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use zerocopy::{AsBytes, FromBytes, FromZeroes};
|
||||
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
Copy,
|
||||
PartialEq,
|
||||
Eq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
Hash,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
AsBytes,
|
||||
FromBytes,
|
||||
FromZeroes,
|
||||
)]
|
||||
#[repr(transparent)]
|
||||
pub struct PeerId(pub [u8; 20]);
|
||||
|
||||
impl PeerId {
|
||||
pub fn client(&self) -> PeerClient {
|
||||
PeerClient::from_peer_id(self)
|
||||
}
|
||||
pub fn first_8_bytes_hex(&self) -> CompactString {
|
||||
let mut buf = [0u8; 16];
|
||||
|
||||
hex::encode_to_slice(&self.0[..8], &mut buf)
|
||||
.expect("PeerId.first_8_bytes_hex buffer too small");
|
||||
|
||||
CompactString::from_utf8_lossy(&buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[non_exhaustive]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub enum PeerClient {
|
||||
BitTorrent(CompactString),
|
||||
Deluge(CompactString),
|
||||
LibTorrentRakshasa(CompactString),
|
||||
LibTorrentRasterbar(CompactString),
|
||||
QBitTorrent(CompactString),
|
||||
Transmission(CompactString),
|
||||
UTorrent(CompactString),
|
||||
UTorrentEmbedded(CompactString),
|
||||
UTorrentMac(CompactString),
|
||||
UTorrentWeb(CompactString),
|
||||
Vuze(CompactString),
|
||||
WebTorrent(CompactString),
|
||||
WebTorrentDesktop(CompactString),
|
||||
Mainline(CompactString),
|
||||
OtherWithPrefixAndVersion {
|
||||
prefix: CompactString,
|
||||
version: CompactString,
|
||||
},
|
||||
OtherWithPrefix(CompactString),
|
||||
Other,
|
||||
}
|
||||
|
||||
impl PeerClient {
|
||||
pub fn from_prefix_and_version(prefix: &[u8], version: &[u8]) -> Self {
|
||||
fn three_digits_plus_prerelease(v1: char, v2: char, v3: char, v4: char) -> CompactString {
|
||||
let prerelease: Cow<str> = match v4 {
|
||||
'd' | 'D' => " dev".into(),
|
||||
'a' | 'A' => " alpha".into(),
|
||||
'b' | 'B' => " beta".into(),
|
||||
'r' | 'R' => " rc".into(),
|
||||
's' | 'S' => " stable".into(),
|
||||
other => format_compact!("{}", other).into(),
|
||||
};
|
||||
|
||||
format_compact!("{}.{}.{}{}", v1, v2, v3, prerelease)
|
||||
}
|
||||
|
||||
fn webtorrent(v1: char, v2: char, v3: char, v4: char) -> CompactString {
|
||||
let major = if v1 == '0' {
|
||||
format_compact!("{}", v2)
|
||||
} else {
|
||||
format_compact!("{}{}", v1, v2)
|
||||
};
|
||||
|
||||
let minor = if v3 == '0' {
|
||||
format_compact!("{}", v4)
|
||||
} else {
|
||||
format_compact!("{}{}", v3, v4)
|
||||
};
|
||||
|
||||
format_compact!("{}.{}", major, minor)
|
||||
}
|
||||
|
||||
if let [v1, v2, v3, v4] = version {
|
||||
let (v1, v2, v3, v4) = (*v1 as char, *v2 as char, *v3 as char, *v4 as char);
|
||||
|
||||
match prefix {
|
||||
b"AZ" => Self::Vuze(format_compact!("{}.{}.{}.{}", v1, v2, v3, v4)),
|
||||
b"BT" => Self::BitTorrent(three_digits_plus_prerelease(v1, v2, v3, v4)),
|
||||
b"DE" => Self::Deluge(three_digits_plus_prerelease(v1, v2, v3, v4)),
|
||||
b"lt" => Self::LibTorrentRakshasa(format_compact!("{}.{}{}.{}", v1, v2, v3, v4)),
|
||||
b"LT" => Self::LibTorrentRasterbar(format_compact!("{}.{}{}.{}", v1, v2, v3, v4)),
|
||||
b"qB" => Self::QBitTorrent(format_compact!("{}.{}.{}", v1, v2, v3)),
|
||||
b"TR" => {
|
||||
let v = match (v1, v2, v3, v4) {
|
||||
('0', '0', '0', v4) => format_compact!("0.{}", v4),
|
||||
('0', '0', v3, v4) => format_compact!("0.{}{}", v3, v4),
|
||||
_ => format_compact!("{}.{}{}", v1, v2, v3),
|
||||
};
|
||||
|
||||
Self::Transmission(v)
|
||||
}
|
||||
b"UE" => Self::UTorrentEmbedded(three_digits_plus_prerelease(v1, v2, v3, v4)),
|
||||
b"UM" => Self::UTorrentMac(three_digits_plus_prerelease(v1, v2, v3, v4)),
|
||||
b"UT" => Self::UTorrent(three_digits_plus_prerelease(v1, v2, v3, v4)),
|
||||
b"UW" => Self::UTorrentWeb(three_digits_plus_prerelease(v1, v2, v3, v4)),
|
||||
b"WD" => Self::WebTorrentDesktop(webtorrent(v1, v2, v3, v4)),
|
||||
b"WW" => Self::WebTorrent(webtorrent(v1, v2, v3, v4)),
|
||||
_ => Self::OtherWithPrefixAndVersion {
|
||||
prefix: CompactString::from_utf8_lossy(prefix),
|
||||
version: CompactString::from_utf8_lossy(version),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
match (prefix, version) {
|
||||
(b"M", &[major, b'-', minor, b'-', patch, b'-']) => Self::Mainline(
|
||||
format_compact!("{}.{}.{}", major as char, minor as char, patch as char),
|
||||
),
|
||||
(b"M", &[major, b'-', minor1, minor2, b'-', patch]) => {
|
||||
Self::Mainline(format_compact!(
|
||||
"{}.{}{}.{}",
|
||||
major as char,
|
||||
minor1 as char,
|
||||
minor2 as char,
|
||||
patch as char
|
||||
))
|
||||
}
|
||||
_ => Self::OtherWithPrefixAndVersion {
|
||||
prefix: CompactString::from_utf8_lossy(prefix),
|
||||
version: CompactString::from_utf8_lossy(version),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_peer_id(peer_id: &PeerId) -> Self {
|
||||
static AZ_RE: OnceLock<Regex> = OnceLock::new();
|
||||
|
||||
if let Some(caps) = AZ_RE
|
||||
.get_or_init(|| {
|
||||
Regex::new(r"^\-(?P<name>[a-zA-Z]{2})(?P<version>[0-9]{3}[0-9a-zA-Z])")
|
||||
.expect("compile AZ_RE regex")
|
||||
})
|
||||
.captures(&peer_id.0)
|
||||
{
|
||||
return Self::from_prefix_and_version(&caps["name"], &caps["version"]);
|
||||
}
|
||||
|
||||
static MAINLINE_RE: OnceLock<Regex> = OnceLock::new();
|
||||
|
||||
if let Some(caps) = MAINLINE_RE
|
||||
.get_or_init(|| {
|
||||
Regex::new(r"^(?P<name>[a-zA-Z])(?P<version>[0-9\-]{6})\-")
|
||||
.expect("compile MAINLINE_RE regex")
|
||||
})
|
||||
.captures(&peer_id.0)
|
||||
{
|
||||
return Self::from_prefix_and_version(&caps["name"], &caps["version"]);
|
||||
}
|
||||
|
||||
static PREFIX_RE: OnceLock<Regex> = OnceLock::new();
|
||||
|
||||
if let Some(caps) = PREFIX_RE
|
||||
.get_or_init(|| {
|
||||
Regex::new(r"^(?P<prefix>[a-zA-Z0-9\-]+)\-").expect("compile PREFIX_RE regex")
|
||||
})
|
||||
.captures(&peer_id.0)
|
||||
{
|
||||
return Self::OtherWithPrefix(CompactString::from_utf8_lossy(&caps["prefix"]));
|
||||
}
|
||||
|
||||
Self::Other
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for PeerClient {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::BitTorrent(v) => write!(f, "BitTorrent {}", v.as_str()),
|
||||
Self::Deluge(v) => write!(f, "Deluge {}", v.as_str()),
|
||||
Self::LibTorrentRakshasa(v) => write!(f, "lt (rakshasa) {}", v.as_str()),
|
||||
Self::LibTorrentRasterbar(v) => write!(f, "lt (rasterbar) {}", v.as_str()),
|
||||
Self::QBitTorrent(v) => write!(f, "QBitTorrent {}", v.as_str()),
|
||||
Self::Transmission(v) => write!(f, "Transmission {}", v.as_str()),
|
||||
Self::UTorrent(v) => write!(f, "µTorrent {}", v.as_str()),
|
||||
Self::UTorrentEmbedded(v) => write!(f, "µTorrent Emb. {}", v.as_str()),
|
||||
Self::UTorrentMac(v) => write!(f, "µTorrent Mac {}", v.as_str()),
|
||||
Self::UTorrentWeb(v) => write!(f, "µTorrent Web {}", v.as_str()),
|
||||
Self::Vuze(v) => write!(f, "Vuze {}", v.as_str()),
|
||||
Self::WebTorrent(v) => write!(f, "WebTorrent {}", v.as_str()),
|
||||
Self::WebTorrentDesktop(v) => write!(f, "WebTorrent Desktop {}", v.as_str()),
|
||||
Self::Mainline(v) => write!(f, "Mainline {}", v.as_str()),
|
||||
Self::OtherWithPrefixAndVersion { prefix, version } => {
|
||||
write!(f, "Other ({}) ({})", prefix.as_str(), version.as_str())
|
||||
}
|
||||
Self::OtherWithPrefix(prefix) => write!(f, "Other ({})", prefix.as_str()),
|
||||
Self::Other => f.write_str("Other"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickcheck")]
|
||||
impl quickcheck::Arbitrary for PeerId {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
let mut bytes = [0u8; 20];
|
||||
|
||||
for byte in bytes.iter_mut() {
|
||||
*byte = u8::arbitrary(g);
|
||||
}
|
||||
|
||||
Self(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickcheck")]
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn create_peer_id(bytes: &[u8]) -> PeerId {
|
||||
let mut peer_id = PeerId([0; 20]);
|
||||
|
||||
let len = bytes.len();
|
||||
|
||||
peer_id.0[..len].copy_from_slice(bytes);
|
||||
|
||||
peer_id
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_client_from_peer_id() {
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-lt1234-k/asdh3")),
|
||||
PeerClient::LibTorrentRakshasa("1.23.4".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-DE123s-k/asdh3")),
|
||||
PeerClient::Deluge("1.2.3 stable".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-DE123r-k/asdh3")),
|
||||
PeerClient::Deluge("1.2.3 rc".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-UT123A-k/asdh3")),
|
||||
PeerClient::UTorrent("1.2.3 alpha".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-TR0012-k/asdh3")),
|
||||
PeerClient::Transmission("0.12".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-TR1212-k/asdh3")),
|
||||
PeerClient::Transmission("1.21".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-WW0102-k/asdh3")),
|
||||
PeerClient::WebTorrent("1.2".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-WW1302-k/asdh3")),
|
||||
PeerClient::WebTorrent("13.2".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-WW1324-k/asdh3")),
|
||||
PeerClient::WebTorrent("13.24".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"M1-2-3--k/asdh3")),
|
||||
PeerClient::Mainline("1.2.3".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"M1-23-4-k/asdh3")),
|
||||
PeerClient::Mainline("1.23.4".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"S3-k/asdh3")),
|
||||
PeerClient::OtherWithPrefix("S3".into())
|
||||
);
|
||||
}
|
||||
}
|
23
apps/aquatic/crates/toml_config/Cargo.toml
Normal file
23
apps/aquatic/crates/toml_config/Cargo.toml
Normal file
@ -0,0 +1,23 @@
|
||||
[package]
|
||||
name = "aquatic_toml_config"
|
||||
description = "Serialize toml with comments"
|
||||
keywords = ["toml"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "aquatic_toml_config"
|
||||
|
||||
[dependencies]
|
||||
toml = "0.5"
|
||||
aquatic_toml_config_derive.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
128
apps/aquatic/crates/toml_config/src/lib.rs
Normal file
128
apps/aquatic/crates/toml_config/src/lib.rs
Normal file
@ -0,0 +1,128 @@
|
||||
pub use aquatic_toml_config_derive::TomlConfig;
|
||||
pub use toml;
|
||||
|
||||
/// Run this on your struct implementing TomlConfig to generate a
|
||||
/// serialization/deserialization test for it.
|
||||
#[macro_export]
|
||||
macro_rules! gen_serialize_deserialize_test {
|
||||
($ident:ident) => {
|
||||
#[test]
|
||||
fn test_cargo_toml_serialize_deserialize() {
|
||||
use ::aquatic_toml_config::TomlConfig;
|
||||
let serialized = $ident::default_to_string();
|
||||
let deserialized = ::aquatic_toml_config::toml::de::from_str(&serialized).unwrap();
|
||||
|
||||
assert_eq!($ident::default(), deserialized);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Export structs to toml, converting Rust doc strings to comments.
|
||||
///
|
||||
/// Supports one level of nesting. Fields containing structs must come
|
||||
/// after regular fields.
|
||||
///
|
||||
/// Usage:
|
||||
/// ```
|
||||
/// use aquatic_toml_config::TomlConfig;
|
||||
///
|
||||
/// #[derive(TomlConfig)]
|
||||
/// struct SubConfig {
|
||||
/// /// A
|
||||
/// a: usize,
|
||||
/// /// B
|
||||
/// b: String,
|
||||
/// }
|
||||
///
|
||||
/// impl Default for SubConfig {
|
||||
/// fn default() -> Self {
|
||||
/// Self {
|
||||
/// a: 200,
|
||||
/// b: "subconfig hello".into(),
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// #[derive(TomlConfig)]
|
||||
/// struct Config {
|
||||
/// /// A
|
||||
/// a: usize,
|
||||
/// /// B
|
||||
/// b: String,
|
||||
/// /// C
|
||||
/// c: SubConfig,
|
||||
/// }
|
||||
///
|
||||
/// impl Default for Config {
|
||||
/// fn default() -> Self {
|
||||
/// Self {
|
||||
/// a: 100,
|
||||
/// b: "hello".into(),
|
||||
/// c: Default::default(),
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// let expected = "# A\na = 100\n# B\nb = \"hello\"\n\n# C\n[c]\n# A\na = 200\n# B\nb = \"subconfig hello\"\n";
|
||||
///
|
||||
/// assert_eq!(
|
||||
/// Config::default_to_string(),
|
||||
/// expected,
|
||||
/// );
|
||||
/// ```
|
||||
pub trait TomlConfig: Default {
|
||||
fn default_to_string() -> String;
|
||||
}
|
||||
|
||||
pub mod __private {
|
||||
use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6};
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub trait Private {
|
||||
fn __to_string(&self, comment: Option<String>, field_name: String) -> String;
|
||||
}
|
||||
|
||||
macro_rules! impl_trait {
|
||||
($ident:ident) => {
|
||||
impl Private for $ident {
|
||||
fn __to_string(&self, comment: Option<String>, field_name: String) -> String {
|
||||
let mut output = String::new();
|
||||
|
||||
if let Some(comment) = comment {
|
||||
output.push_str(&comment);
|
||||
}
|
||||
|
||||
let value = crate::toml::ser::to_string(self).unwrap();
|
||||
|
||||
output.push_str(&format!("{} = {}\n", field_name, value));
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_trait!(isize);
|
||||
impl_trait!(i8);
|
||||
impl_trait!(i16);
|
||||
impl_trait!(i32);
|
||||
impl_trait!(i64);
|
||||
|
||||
impl_trait!(usize);
|
||||
impl_trait!(u8);
|
||||
impl_trait!(u16);
|
||||
impl_trait!(u32);
|
||||
impl_trait!(u64);
|
||||
|
||||
impl_trait!(f32);
|
||||
impl_trait!(f64);
|
||||
|
||||
impl_trait!(bool);
|
||||
|
||||
impl_trait!(String);
|
||||
|
||||
impl_trait!(PathBuf);
|
||||
impl_trait!(SocketAddr);
|
||||
impl_trait!(SocketAddrV4);
|
||||
impl_trait!(SocketAddrV6);
|
||||
}
|
46
apps/aquatic/crates/toml_config/tests/test.rs
Normal file
46
apps/aquatic/crates/toml_config/tests/test.rs
Normal file
@ -0,0 +1,46 @@
|
||||
use serde::Deserialize;
|
||||
|
||||
use aquatic_toml_config::{gen_serialize_deserialize_test, TomlConfig};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, TomlConfig, Deserialize)]
|
||||
struct TestConfigInnerA {
|
||||
/// Comment for a
|
||||
a: String,
|
||||
/// Comment for b
|
||||
b: usize,
|
||||
}
|
||||
|
||||
impl Default for TestConfigInnerA {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
a: "Inner hello world".into(),
|
||||
b: 100,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Comment for TestConfig
|
||||
#[derive(Clone, Debug, PartialEq, Eq, TomlConfig, Deserialize)]
|
||||
struct TestConfig {
|
||||
/// Comment for a that stretches over
|
||||
/// multiple lines
|
||||
a: String,
|
||||
/// Comment for b
|
||||
b: usize,
|
||||
c: bool,
|
||||
/// Comment for TestConfigInnerA
|
||||
inner_a: TestConfigInnerA,
|
||||
}
|
||||
|
||||
impl Default for TestConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
a: "Hello, world!".into(),
|
||||
b: 100,
|
||||
c: true,
|
||||
inner_a: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gen_serialize_deserialize_test!(TestConfig);
|
20
apps/aquatic/crates/toml_config_derive/Cargo.toml
Normal file
20
apps/aquatic/crates/toml_config_derive/Cargo.toml
Normal file
@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "aquatic_toml_config_derive"
|
||||
description = "Serialize toml with comments"
|
||||
exclude = ["target"]
|
||||
keywords = ["toml"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
proc-macro2 = "1"
|
||||
quote = "1"
|
||||
syn = "1"
|
174
apps/aquatic/crates/toml_config_derive/src/lib.rs
Normal file
174
apps/aquatic/crates/toml_config_derive/src/lib.rs
Normal file
@ -0,0 +1,174 @@
|
||||
use proc_macro2::{TokenStream, TokenTree};
|
||||
use quote::quote;
|
||||
use syn::{parse_macro_input, Attribute, Data, DataStruct, DeriveInput, Fields, Ident, Type};
|
||||
|
||||
#[proc_macro_derive(TomlConfig)]
|
||||
pub fn derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
||||
let input = parse_macro_input!(input as DeriveInput);
|
||||
|
||||
let comment = extract_comment_string(input.attrs);
|
||||
let ident = input.ident;
|
||||
|
||||
match input.data {
|
||||
Data::Struct(struct_data) => {
|
||||
let mut output_stream = quote! {
|
||||
let mut output = String::new();
|
||||
};
|
||||
|
||||
extract_from_struct(ident.clone(), struct_data, &mut output_stream);
|
||||
|
||||
proc_macro::TokenStream::from(quote! {
|
||||
impl ::aquatic_toml_config::TomlConfig for #ident {
|
||||
fn default_to_string() -> String {
|
||||
let mut output = String::new();
|
||||
|
||||
let comment: Option<String> = #comment;
|
||||
|
||||
if let Some(comment) = comment {
|
||||
output.push_str(&comment);
|
||||
output.push('\n');
|
||||
}
|
||||
|
||||
let body = {
|
||||
#output_stream
|
||||
|
||||
output
|
||||
};
|
||||
|
||||
output.push_str(&body);
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
impl ::aquatic_toml_config::__private::Private for #ident {
|
||||
fn __to_string(&self, comment: Option<String>, field_name: String) -> String {
|
||||
let mut output = String::new();
|
||||
|
||||
output.push('\n');
|
||||
|
||||
if let Some(comment) = comment {
|
||||
output.push_str(&comment);
|
||||
}
|
||||
output.push_str(&format!("[{}]\n", field_name));
|
||||
|
||||
let body = {
|
||||
#output_stream
|
||||
|
||||
output
|
||||
};
|
||||
|
||||
output.push_str(&body);
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
Data::Enum(_) => proc_macro::TokenStream::from(quote! {
|
||||
impl ::aquatic_toml_config::__private::Private for #ident {
|
||||
fn __to_string(&self, comment: Option<String>, field_name: String) -> String {
|
||||
let mut output = String::new();
|
||||
let wrapping_comment: Option<String> = #comment;
|
||||
|
||||
if let Some(comment) = wrapping_comment {
|
||||
output.push_str(&comment);
|
||||
}
|
||||
|
||||
if let Some(comment) = comment {
|
||||
output.push_str(&comment);
|
||||
}
|
||||
|
||||
let value = match ::aquatic_toml_config::toml::ser::to_string(self) {
|
||||
Ok(value) => value,
|
||||
Err(err) => panic!("Couldn't serialize enum to toml: {:#}", err),
|
||||
};
|
||||
|
||||
output.push_str(&format!("{} = {}\n", field_name, value));
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
}),
|
||||
Data::Union(_) => panic!("Unions are not supported"),
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_from_struct(
|
||||
struct_ty_ident: Ident,
|
||||
struct_data: DataStruct,
|
||||
output_stream: &mut TokenStream,
|
||||
) {
|
||||
let fields = if let Fields::Named(fields) = struct_data.fields {
|
||||
fields
|
||||
} else {
|
||||
panic!("Fields are not named");
|
||||
};
|
||||
|
||||
output_stream.extend(::std::iter::once(quote! {
|
||||
let struct_default = #struct_ty_ident::default();
|
||||
}));
|
||||
|
||||
for field in fields.named.into_iter() {
|
||||
let ident = field.ident.expect("Encountered unnamed field");
|
||||
let ident_string = format!("{}", ident);
|
||||
let comment = extract_comment_string(field.attrs);
|
||||
|
||||
if let Type::Path(path) = field.ty {
|
||||
output_stream.extend(::std::iter::once(quote! {
|
||||
{
|
||||
let comment: Option<String> = #comment;
|
||||
let field_default: #path = struct_default.#ident;
|
||||
|
||||
let s: String = ::aquatic_toml_config::__private::Private::__to_string(
|
||||
&field_default,
|
||||
comment,
|
||||
#ident_string.to_string()
|
||||
);
|
||||
output.push_str(&s);
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_comment_string(attrs: Vec<Attribute>) -> TokenStream {
|
||||
let mut output = String::new();
|
||||
|
||||
for attr in attrs.into_iter() {
|
||||
let path_ident = if let Some(path_ident) = attr.path.get_ident() {
|
||||
path_ident
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if format!("{}", path_ident) != "doc" {
|
||||
continue;
|
||||
}
|
||||
|
||||
for token_tree in attr.tokens {
|
||||
if let TokenTree::Literal(literal) = token_tree {
|
||||
let mut comment = format!("{}", literal);
|
||||
|
||||
// Strip leading and trailing quotation marks
|
||||
comment.remove(comment.len() - 1);
|
||||
comment.remove(0);
|
||||
|
||||
// Add toml comment indicator
|
||||
comment.insert(0, '#');
|
||||
|
||||
output.push_str(&comment);
|
||||
output.push('\n');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if output.is_empty() {
|
||||
quote! {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
quote! {
|
||||
Some(#output.to_string())
|
||||
}
|
||||
}
|
||||
}
|
73
apps/aquatic/crates/udp/Cargo.toml
Normal file
73
apps/aquatic/crates/udp/Cargo.toml
Normal file
@ -0,0 +1,73 @@
|
||||
[package]
|
||||
name = "aquatic_udp"
|
||||
description = "High-performance open UDP BitTorrent tracker"
|
||||
keywords = ["udp", "server", "peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
readme = "./README.md"
|
||||
|
||||
[lib]
|
||||
name = "aquatic_udp"
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic_udp"
|
||||
|
||||
[features]
|
||||
default = ["prometheus", "mimalloc"]
|
||||
# Export prometheus metrics
|
||||
prometheus = ["metrics", "aquatic_common/prometheus"]
|
||||
# Experimental io_uring support (Linux 6.0 or later required)
|
||||
io-uring = ["dep:io-uring"]
|
||||
# Use mimalloc allocator for much better performance.
|
||||
#
|
||||
# Requires cmake and a C compiler
|
||||
mimalloc = ["dep:mimalloc"]
|
||||
|
||||
[dependencies]
|
||||
aquatic_common.workspace = true
|
||||
aquatic_toml_config.workspace = true
|
||||
aquatic_udp_protocol.workspace = true
|
||||
|
||||
anyhow = "1"
|
||||
arrayvec = "0.7"
|
||||
blake3 = "1"
|
||||
cfg-if = "1"
|
||||
compact_str = "0.8"
|
||||
constant_time_eq = "0.3"
|
||||
crossbeam-channel = "0.5"
|
||||
crossbeam-utils = "0.8"
|
||||
getrandom = "0.2"
|
||||
hashbrown = { version = "0.15", default-features = false }
|
||||
hdrhistogram = "7"
|
||||
hex = "0.4"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
mio = { version = "1", features = ["net", "os-poll"] }
|
||||
num-format = "0.4"
|
||||
parking_lot = "0.12"
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
signal-hook = { version = "0.3" }
|
||||
slab = "0.4"
|
||||
socket2 = { version = "0.5", features = ["all"] }
|
||||
time = { version = "0.3", features = ["formatting"] }
|
||||
tinytemplate = "1"
|
||||
|
||||
# prometheus feature
|
||||
metrics = { version = "0.24", optional = true }
|
||||
|
||||
# io-uring feature
|
||||
io-uring = { version = "0.7", optional = true }
|
||||
|
||||
# mimalloc feature
|
||||
mimalloc = { version = "0.1", default-features = false, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
94
apps/aquatic/crates/udp/README.md
Normal file
94
apps/aquatic/crates/udp/README.md
Normal file
@ -0,0 +1,94 @@
|
||||
# aquatic_udp: high-performance open UDP BitTorrent tracker
|
||||
|
||||
[](https://github.com/greatest-ape/aquatic/actions/workflows/ci.yml)
|
||||
|
||||
High-performance open UDP BitTorrent tracker for Unix-like operating systems.
|
||||
|
||||
Features at a glance:
|
||||
|
||||
- Multithreaded design for handling large amounts of traffic
|
||||
- All data is stored in-memory (no database needed)
|
||||
- IPv4 and IPv6 support
|
||||
- Supports forbidding/allowing info hashes
|
||||
- Prometheus metrics
|
||||
- Automated CI testing of full file transfers
|
||||
|
||||
Known users:
|
||||
|
||||
- [explodie.org public tracker](https://explodie.org/opentracker.html) (`udp://explodie.org:6969`), typically [serving ~100,000 requests per second](https://explodie.org/tracker-stats.html)
|
||||
|
||||
This is the most mature implementation in the aquatic family. I consider it fully ready for production use.
|
||||
|
||||
## Performance
|
||||
|
||||

|
||||
|
||||
More benchmark details are available [here](../../documents/aquatic-udp-load-test-2024-02-10.md).
|
||||
|
||||
## Usage
|
||||
|
||||
### Compiling
|
||||
|
||||
- Install Rust with [rustup](https://rustup.rs/) (latest stable release is recommended)
|
||||
- Install build dependencies with your package manager (e.g., `apt-get install cmake build-essential`)
|
||||
- Clone this git repository and build the application:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/greatest-ape/aquatic.git && cd aquatic
|
||||
|
||||
# Recommended: tell Rust to enable support for all SIMD extensions present on
|
||||
# current CPU except for those relating to AVX-512. (If you run a processor
|
||||
# that doesn't clock down when using AVX-512, you can enable those instructions
|
||||
# too.)
|
||||
. ./scripts/env-native-cpu-without-avx-512
|
||||
|
||||
cargo build --release -p aquatic_udp
|
||||
```
|
||||
|
||||
### Configuring and running
|
||||
|
||||
Generate the configuration file:
|
||||
|
||||
```sh
|
||||
./target/release/aquatic_udp -p > "aquatic-udp-config.toml"
|
||||
```
|
||||
|
||||
Make necessary adjustments to the file. You will likely want to adjust
|
||||
listening addresses under the `network` section.
|
||||
|
||||
Once done, start the application:
|
||||
|
||||
```sh
|
||||
./target/release/aquatic_udp -c "aquatic-udp-config.toml"
|
||||
```
|
||||
|
||||
If your server is pointed to by domain `example.com` and you configured the
|
||||
tracker to run on port 3000, people can now use it by adding the URL
|
||||
`udp://example.com:3000` to their torrent files or magnet links.
|
||||
|
||||
### Load testing
|
||||
|
||||
A load test application is available. It supports generation and loading of
|
||||
configuration files in a similar manner to the tracker application.
|
||||
|
||||
After starting the tracker, run the load tester:
|
||||
|
||||
```sh
|
||||
. ./scripts/env-native-cpu-without-avx-512 # Optional
|
||||
|
||||
cargo run --release -p aquatic_udp_load_test -- --help
|
||||
```
|
||||
|
||||
## Details
|
||||
|
||||
Implements [BEP 015](https://www.bittorrent.org/beps/bep_0015.html) ([more details](https://libtorrent.org/udp_tracker_protocol.html)) with the following exceptions:
|
||||
|
||||
- Ignores IP addresses sent in announce requests. The packet source IP is always used.
|
||||
- Doesn't track the number of torrent downloads (0 is always sent).
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Copyright (c) Joakim Frostegård
|
||||
|
||||
Distributed under the terms of the Apache License, Version 2.0. Please refer to
|
||||
the `LICENSE` file in the repository root directory for details.
|
148
apps/aquatic/crates/udp/src/common.rs
Normal file
148
apps/aquatic/crates/udp/src/common.rs
Normal file
@ -0,0 +1,148 @@
|
||||
use std::iter::repeat_with;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::Arc;
|
||||
|
||||
use aquatic_common::access_list::AccessListArcSwap;
|
||||
use aquatic_common::ServerStartInstant;
|
||||
use aquatic_udp_protocol::*;
|
||||
use crossbeam_utils::CachePadded;
|
||||
use hdrhistogram::Histogram;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::swarm::TorrentMaps;
|
||||
|
||||
pub const BUFFER_SIZE: usize = 8192;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum IpVersion {
|
||||
V4,
|
||||
V6,
|
||||
}
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
impl IpVersion {
|
||||
pub fn prometheus_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::V4 => "4",
|
||||
Self::V6 => "6",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Statistics {
|
||||
pub socket: Vec<CachePaddedArc<IpVersionStatistics<SocketWorkerStatistics>>>,
|
||||
pub swarm: CachePaddedArc<IpVersionStatistics<SwarmWorkerStatistics>>,
|
||||
}
|
||||
|
||||
impl Statistics {
|
||||
pub fn new(config: &Config) -> Self {
|
||||
Self {
|
||||
socket: repeat_with(Default::default)
|
||||
.take(config.socket_workers)
|
||||
.collect(),
|
||||
swarm: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct IpVersionStatistics<T> {
|
||||
pub ipv4: T,
|
||||
pub ipv6: T,
|
||||
}
|
||||
|
||||
impl<T> IpVersionStatistics<T> {
|
||||
pub fn by_ip_version(&self, ip_version: IpVersion) -> &T {
|
||||
match ip_version {
|
||||
IpVersion::V4 => &self.ipv4,
|
||||
IpVersion::V6 => &self.ipv6,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SocketWorkerStatistics {
|
||||
pub requests: AtomicUsize,
|
||||
pub responses_connect: AtomicUsize,
|
||||
pub responses_announce: AtomicUsize,
|
||||
pub responses_scrape: AtomicUsize,
|
||||
pub responses_error: AtomicUsize,
|
||||
pub bytes_received: AtomicUsize,
|
||||
pub bytes_sent: AtomicUsize,
|
||||
}
|
||||
|
||||
pub type CachePaddedArc<T> = CachePadded<Arc<CachePadded<T>>>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SwarmWorkerStatistics {
|
||||
pub torrents: AtomicUsize,
|
||||
pub peers: AtomicUsize,
|
||||
}
|
||||
|
||||
pub enum StatisticsMessage {
|
||||
Ipv4PeerHistogram(Histogram<u64>),
|
||||
Ipv6PeerHistogram(Histogram<u64>),
|
||||
PeerAdded(PeerId),
|
||||
PeerRemoved(PeerId),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct State {
|
||||
pub access_list: Arc<AccessListArcSwap>,
|
||||
pub torrent_maps: TorrentMaps,
|
||||
pub server_start_instant: ServerStartInstant,
|
||||
}
|
||||
|
||||
impl Default for State {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
access_list: Arc::new(AccessListArcSwap::default()),
|
||||
torrent_maps: TorrentMaps::default(),
|
||||
server_start_instant: ServerStartInstant::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{net::Ipv6Addr, num::NonZeroU16};
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
use super::*;
|
||||
|
||||
// Assumes that announce response with maximum amount of ipv6 peers will
|
||||
// be the longest
|
||||
#[test]
|
||||
fn test_buffer_size() {
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
let config = Config::default();
|
||||
|
||||
let peers = ::std::iter::repeat(ResponsePeer {
|
||||
ip_address: Ipv6AddrBytes(Ipv6Addr::new(1, 1, 1, 1, 1, 1, 1, 1).octets()),
|
||||
port: Port::new(NonZeroU16::new(1).unwrap()),
|
||||
})
|
||||
.take(config.protocol.max_response_peers)
|
||||
.collect();
|
||||
|
||||
let response = Response::AnnounceIpv6(AnnounceResponse {
|
||||
fixed: AnnounceResponseFixedData {
|
||||
transaction_id: TransactionId::new(1),
|
||||
announce_interval: AnnounceInterval::new(1),
|
||||
seeders: NumberOfPeers::new(1),
|
||||
leechers: NumberOfPeers::new(1),
|
||||
},
|
||||
peers,
|
||||
});
|
||||
|
||||
let mut buf = Vec::new();
|
||||
|
||||
response.write_bytes(&mut buf).unwrap();
|
||||
|
||||
println!("Buffer len: {}", buf.len());
|
||||
|
||||
assert!(buf.len() <= BUFFER_SIZE);
|
||||
}
|
||||
}
|
262
apps/aquatic/crates/udp/src/config.rs
Normal file
262
apps/aquatic/crates/udp/src/config.rs
Normal file
@ -0,0 +1,262 @@
|
||||
use std::{
|
||||
net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use aquatic_common::{access_list::AccessListConfig, privileges::PrivilegeConfig};
|
||||
use cfg_if::cfg_if;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use aquatic_common::cli::LogLevel;
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
|
||||
/// aquatic_udp configuration
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize, Serialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct Config {
|
||||
/// Number of socket workers
|
||||
///
|
||||
/// 0 = automatically set to number of available virtual CPUs
|
||||
pub socket_workers: usize,
|
||||
pub log_level: LogLevel,
|
||||
pub network: NetworkConfig,
|
||||
pub protocol: ProtocolConfig,
|
||||
pub statistics: StatisticsConfig,
|
||||
pub cleaning: CleaningConfig,
|
||||
pub privileges: PrivilegeConfig,
|
||||
/// Access list configuration
|
||||
///
|
||||
/// The file is read on start and when the program receives `SIGUSR1`. If
|
||||
/// initial parsing fails, the program exits. Later failures result in in
|
||||
/// emitting of an error-level log message, while successful updates of the
|
||||
/// access list result in emitting of an info-level log message.
|
||||
pub access_list: AccessListConfig,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
socket_workers: 1,
|
||||
log_level: LogLevel::Error,
|
||||
network: NetworkConfig::default(),
|
||||
protocol: ProtocolConfig::default(),
|
||||
statistics: StatisticsConfig::default(),
|
||||
cleaning: CleaningConfig::default(),
|
||||
privileges: PrivilegeConfig::default(),
|
||||
access_list: AccessListConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl aquatic_common::cli::Config for Config {
|
||||
fn get_log_level(&self) -> Option<LogLevel> {
|
||||
Some(self.log_level)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize, Serialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct NetworkConfig {
|
||||
/// Use IPv4
|
||||
pub use_ipv4: bool,
|
||||
/// Use IPv6
|
||||
pub use_ipv6: bool,
|
||||
/// IPv4 address and port
|
||||
///
|
||||
/// Examples:
|
||||
/// - Use 0.0.0.0:3000 to bind to all interfaces on port 3000
|
||||
/// - Use 127.0.0.1:3000 to bind to the loopback interface (localhost) on
|
||||
/// port 3000
|
||||
pub address_ipv4: SocketAddrV4,
|
||||
/// IPv6 address and port
|
||||
///
|
||||
/// Examples:
|
||||
/// - Use [::]:3000 to bind to all interfaces on port 3000
|
||||
/// - Use [::1]:3000 to bind to the loopback interface (localhost) on
|
||||
/// port 3000
|
||||
pub address_ipv6: SocketAddrV6,
|
||||
/// Size of socket recv buffer. Use 0 for OS default.
|
||||
///
|
||||
/// This setting can have a big impact on dropped packages. It might
|
||||
/// require changing system defaults. Some examples of commands to set
|
||||
/// values for different operating systems:
|
||||
///
|
||||
/// macOS:
|
||||
/// $ sudo sysctl net.inet.udp.recvspace=8000000
|
||||
///
|
||||
/// Linux:
|
||||
/// $ sudo sysctl -w net.core.rmem_max=8000000
|
||||
/// $ sudo sysctl -w net.core.rmem_default=8000000
|
||||
pub socket_recv_buffer_size: usize,
|
||||
/// Poll timeout in milliseconds (mio backend only)
|
||||
pub poll_timeout_ms: u64,
|
||||
/// Store this many responses at most for retrying (once) on send failure
|
||||
/// (mio backend only)
|
||||
///
|
||||
/// Useful on operating systems that do not provide an udp send buffer,
|
||||
/// such as FreeBSD. Setting the value to zero disables resending
|
||||
/// functionality.
|
||||
pub resend_buffer_max_len: usize,
|
||||
/// Set flag on IPv6 socket to only accept IPv6 traffic.
|
||||
///
|
||||
/// This should typically be set to true unless your OS does not support
|
||||
/// double-stack sockets (that is, sockets that receive both IPv4 and IPv6
|
||||
/// packets).
|
||||
pub set_only_ipv6: bool,
|
||||
#[cfg(feature = "io-uring")]
|
||||
pub use_io_uring: bool,
|
||||
/// Number of ring entries (io_uring backend only)
|
||||
///
|
||||
/// Will be rounded to next power of two if not already one.
|
||||
#[cfg(feature = "io-uring")]
|
||||
pub ring_size: u16,
|
||||
}
|
||||
|
||||
impl NetworkConfig {
|
||||
pub fn ipv4_active(&self) -> bool {
|
||||
self.use_ipv4
|
||||
}
|
||||
pub fn ipv6_active(&self) -> bool {
|
||||
self.use_ipv6
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NetworkConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
use_ipv4: true,
|
||||
use_ipv6: true,
|
||||
address_ipv4: SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 3000),
|
||||
address_ipv6: SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, 3000, 0, 0),
|
||||
socket_recv_buffer_size: 8_000_000,
|
||||
poll_timeout_ms: 50,
|
||||
resend_buffer_max_len: 0,
|
||||
set_only_ipv6: true,
|
||||
#[cfg(feature = "io-uring")]
|
||||
use_io_uring: true,
|
||||
#[cfg(feature = "io-uring")]
|
||||
ring_size: 128,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize, Serialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct ProtocolConfig {
|
||||
/// Maximum number of torrents to allow in scrape request
|
||||
pub max_scrape_torrents: u8,
|
||||
/// Maximum number of peers to return in announce response
|
||||
pub max_response_peers: usize,
|
||||
/// Ask peers to announce this often (seconds)
|
||||
pub peer_announce_interval: i32,
|
||||
}
|
||||
|
||||
impl Default for ProtocolConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_scrape_torrents: 70,
|
||||
max_response_peers: 30,
|
||||
peer_announce_interval: 60 * 15,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize, Serialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct StatisticsConfig {
|
||||
/// Collect and print/write statistics this often (seconds)
|
||||
pub interval: u64,
|
||||
/// Collect statistics on number of peers per torrent
|
||||
///
|
||||
/// Will increase time taken for torrent cleaning.
|
||||
pub torrent_peer_histograms: bool,
|
||||
/// Collect statistics on peer clients.
|
||||
///
|
||||
/// Also, see `prometheus_peer_id_prefixes`.
|
||||
///
|
||||
/// Expect a certain CPU hit (maybe 5% higher consumption) and a bit higher
|
||||
/// memory use
|
||||
pub peer_clients: bool,
|
||||
/// Print statistics to standard output
|
||||
pub print_to_stdout: bool,
|
||||
/// Save statistics as HTML to a file
|
||||
pub write_html_to_file: bool,
|
||||
/// Path to save HTML file to
|
||||
pub html_file_path: PathBuf,
|
||||
/// Run a prometheus endpoint
|
||||
#[cfg(feature = "prometheus")]
|
||||
pub run_prometheus_endpoint: bool,
|
||||
/// Address to run prometheus endpoint on
|
||||
#[cfg(feature = "prometheus")]
|
||||
pub prometheus_endpoint_address: SocketAddr,
|
||||
/// Serve information on all peer id prefixes on the prometheus endpoint.
|
||||
///
|
||||
/// Requires `peer_clients` to be activated.
|
||||
///
|
||||
/// May consume quite a bit of CPU and RAM, since data on every single peer
|
||||
/// client will be reported continuously on the endpoint
|
||||
#[cfg(feature = "prometheus")]
|
||||
pub prometheus_peer_id_prefixes: bool,
|
||||
}
|
||||
|
||||
impl StatisticsConfig {
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "prometheus")] {
|
||||
pub fn active(&self) -> bool {
|
||||
(self.interval != 0) &
|
||||
(self.print_to_stdout | self.write_html_to_file | self.run_prometheus_endpoint)
|
||||
}
|
||||
} else {
|
||||
pub fn active(&self) -> bool {
|
||||
(self.interval != 0) & (self.print_to_stdout | self.write_html_to_file)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for StatisticsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval: 5,
|
||||
torrent_peer_histograms: false,
|
||||
peer_clients: false,
|
||||
print_to_stdout: false,
|
||||
write_html_to_file: false,
|
||||
html_file_path: "tmp/statistics.html".into(),
|
||||
#[cfg(feature = "prometheus")]
|
||||
run_prometheus_endpoint: false,
|
||||
#[cfg(feature = "prometheus")]
|
||||
prometheus_endpoint_address: SocketAddr::from(([0, 0, 0, 0], 9000)),
|
||||
#[cfg(feature = "prometheus")]
|
||||
prometheus_peer_id_prefixes: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize, Serialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct CleaningConfig {
|
||||
/// Clean torrents this often (seconds)
|
||||
pub torrent_cleaning_interval: u64,
|
||||
/// Allow clients to use a connection token for this long (seconds)
|
||||
pub max_connection_age: u32,
|
||||
/// Remove peers who have not announced for this long (seconds)
|
||||
pub max_peer_age: u32,
|
||||
}
|
||||
|
||||
impl Default for CleaningConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
torrent_cleaning_interval: 60 * 2,
|
||||
max_connection_age: 60 * 2,
|
||||
max_peer_age: 60 * 20,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Config;
|
||||
|
||||
::aquatic_toml_config::gen_serialize_deserialize_test!(Config);
|
||||
}
|
188
apps/aquatic/crates/udp/src/lib.rs
Normal file
188
apps/aquatic/crates/udp/src/lib.rs
Normal file
@ -0,0 +1,188 @@
|
||||
pub mod common;
|
||||
pub mod config;
|
||||
pub mod swarm;
|
||||
pub mod workers;
|
||||
|
||||
use std::thread::{available_parallelism, sleep, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::WorkerType;
|
||||
use crossbeam_channel::unbounded;
|
||||
use signal_hook::consts::SIGUSR1;
|
||||
use signal_hook::iterator::Signals;
|
||||
|
||||
use aquatic_common::access_list::update_access_list;
|
||||
use aquatic_common::privileges::PrivilegeDropper;
|
||||
|
||||
use common::{State, Statistics};
|
||||
use config::Config;
|
||||
use workers::socket::ConnectionValidator;
|
||||
|
||||
pub const APP_NAME: &str = "aquatic_udp: UDP BitTorrent tracker";
|
||||
pub const APP_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
pub fn run(mut config: Config) -> ::anyhow::Result<()> {
|
||||
let mut signals = Signals::new([SIGUSR1])?;
|
||||
|
||||
if !(config.network.use_ipv4 || config.network.use_ipv6) {
|
||||
return Result::Err(anyhow::anyhow!(
|
||||
"Both use_ipv4 and use_ipv6 can not be set to false"
|
||||
));
|
||||
}
|
||||
|
||||
if config.socket_workers == 0 {
|
||||
config.socket_workers = available_parallelism().map(Into::into).unwrap_or(1);
|
||||
};
|
||||
|
||||
let num_sockets_per_worker =
|
||||
if config.network.use_ipv4 { 1 } else { 0 } + if config.network.use_ipv6 { 1 } else { 0 };
|
||||
|
||||
let state = State::default();
|
||||
let statistics = Statistics::new(&config);
|
||||
let connection_validator = ConnectionValidator::new(&config)?;
|
||||
let priv_dropper = PrivilegeDropper::new(
|
||||
config.privileges.clone(),
|
||||
config.socket_workers * num_sockets_per_worker,
|
||||
);
|
||||
let (statistics_sender, statistics_receiver) = unbounded();
|
||||
|
||||
update_access_list(&config.access_list, &state.access_list)?;
|
||||
|
||||
let mut join_handles = Vec::new();
|
||||
|
||||
// Spawn socket worker threads
|
||||
for i in 0..config.socket_workers {
|
||||
let state = state.clone();
|
||||
let config = config.clone();
|
||||
let connection_validator = connection_validator.clone();
|
||||
let statistics = statistics.socket[i].clone();
|
||||
let statistics_sender = statistics_sender.clone();
|
||||
|
||||
let mut priv_droppers = Vec::new();
|
||||
|
||||
for _ in 0..num_sockets_per_worker {
|
||||
priv_droppers.push(priv_dropper.clone());
|
||||
}
|
||||
|
||||
let handle = Builder::new()
|
||||
.name(format!("socket-{:02}", i + 1))
|
||||
.spawn(move || {
|
||||
workers::socket::run_socket_worker(
|
||||
config,
|
||||
state,
|
||||
statistics,
|
||||
statistics_sender,
|
||||
connection_validator,
|
||||
priv_droppers,
|
||||
)
|
||||
})
|
||||
.with_context(|| "spawn socket worker")?;
|
||||
|
||||
join_handles.push((WorkerType::Socket(i), handle));
|
||||
}
|
||||
|
||||
// Spawn cleaning thread
|
||||
{
|
||||
let state = state.clone();
|
||||
let config = config.clone();
|
||||
let statistics = statistics.swarm.clone();
|
||||
let statistics_sender = statistics_sender.clone();
|
||||
|
||||
let handle = Builder::new().name("cleaning".into()).spawn(move || loop {
|
||||
sleep(Duration::from_secs(
|
||||
config.cleaning.torrent_cleaning_interval,
|
||||
));
|
||||
|
||||
state.torrent_maps.clean_and_update_statistics(
|
||||
&config,
|
||||
&statistics,
|
||||
&statistics_sender,
|
||||
&state.access_list,
|
||||
state.server_start_instant,
|
||||
);
|
||||
})?;
|
||||
|
||||
join_handles.push((WorkerType::Cleaning, handle));
|
||||
}
|
||||
|
||||
// Spawn statistics thread
|
||||
if config.statistics.active() {
|
||||
let state = state.clone();
|
||||
let config = config.clone();
|
||||
|
||||
let handle = Builder::new()
|
||||
.name("statistics".into())
|
||||
.spawn(move || {
|
||||
workers::statistics::run_statistics_worker(
|
||||
config,
|
||||
state,
|
||||
statistics,
|
||||
statistics_receiver,
|
||||
)
|
||||
})
|
||||
.with_context(|| "spawn statistics worker")?;
|
||||
|
||||
join_handles.push((WorkerType::Statistics, handle));
|
||||
}
|
||||
|
||||
// Spawn prometheus endpoint thread
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.active() && config.statistics.run_prometheus_endpoint {
|
||||
let handle = aquatic_common::spawn_prometheus_endpoint(
|
||||
config.statistics.prometheus_endpoint_address,
|
||||
Some(Duration::from_secs(
|
||||
config.cleaning.torrent_cleaning_interval * 2,
|
||||
)),
|
||||
None,
|
||||
)?;
|
||||
|
||||
join_handles.push((WorkerType::Prometheus, handle));
|
||||
}
|
||||
|
||||
// Spawn signal handler thread
|
||||
{
|
||||
let config = config.clone();
|
||||
|
||||
let handle: JoinHandle<anyhow::Result<()>> = Builder::new()
|
||||
.name("signals".into())
|
||||
.spawn(move || {
|
||||
for signal in &mut signals {
|
||||
match signal {
|
||||
SIGUSR1 => {
|
||||
let _ = update_access_list(&config.access_list, &state.access_list);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.context("spawn signal worker")?;
|
||||
|
||||
join_handles.push((WorkerType::Signals, handle));
|
||||
}
|
||||
|
||||
// Quit application if any worker returns or panics
|
||||
loop {
|
||||
for (i, (_, handle)) in join_handles.iter().enumerate() {
|
||||
if handle.is_finished() {
|
||||
let (worker_type, handle) = join_handles.remove(i);
|
||||
|
||||
match handle.join() {
|
||||
Ok(Ok(())) => {
|
||||
return Err(anyhow::anyhow!("{} stopped", worker_type));
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
return Err(err.context(format!("{} stopped", worker_type)));
|
||||
}
|
||||
Err(_) => {
|
||||
return Err(anyhow::anyhow!("{} panicked", worker_type));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(5));
|
||||
}
|
||||
}
|
12
apps/aquatic/crates/udp/src/main.rs
Normal file
12
apps/aquatic/crates/udp/src/main.rs
Normal file
@ -0,0 +1,12 @@
|
||||
#[cfg(feature = "mimalloc")]
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn main() {
|
||||
aquatic_common::cli::run_app_with_cli_and_config::<aquatic_udp::config::Config>(
|
||||
aquatic_udp::APP_NAME,
|
||||
aquatic_udp::APP_VERSION,
|
||||
aquatic_udp::run,
|
||||
None,
|
||||
)
|
||||
}
|
706
apps/aquatic/crates/udp/src/swarm.rs
Normal file
706
apps/aquatic/crates/udp/src/swarm.rs
Normal file
@ -0,0 +1,706 @@
|
||||
use std::iter::repeat_with;
|
||||
use std::net::IpAddr;
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
use aquatic_common::SecondsSinceServerStart;
|
||||
use aquatic_common::ServerStartInstant;
|
||||
use aquatic_common::{
|
||||
access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache, AccessListMode},
|
||||
ValidUntil,
|
||||
};
|
||||
use aquatic_common::{CanonicalSocketAddr, IndexMap};
|
||||
|
||||
use aquatic_udp_protocol::*;
|
||||
use arrayvec::ArrayVec;
|
||||
use crossbeam_channel::Sender;
|
||||
use hashbrown::HashMap;
|
||||
use hdrhistogram::Histogram;
|
||||
use parking_lot::RwLockUpgradableReadGuard;
|
||||
use rand::prelude::SmallRng;
|
||||
use rand::Rng;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
const SMALL_PEER_MAP_CAPACITY: usize = 2;
|
||||
|
||||
use aquatic_udp_protocol::InfoHash;
|
||||
use parking_lot::RwLock;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TorrentMaps {
|
||||
ipv4: TorrentMapShards<Ipv4AddrBytes>,
|
||||
ipv6: TorrentMapShards<Ipv6AddrBytes>,
|
||||
}
|
||||
|
||||
impl Default for TorrentMaps {
|
||||
fn default() -> Self {
|
||||
const NUM_SHARDS: usize = 16;
|
||||
|
||||
Self {
|
||||
ipv4: TorrentMapShards::new(NUM_SHARDS),
|
||||
ipv6: TorrentMapShards::new(NUM_SHARDS),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TorrentMaps {
|
||||
pub fn announce(
|
||||
&self,
|
||||
config: &Config,
|
||||
statistics_sender: &Sender<StatisticsMessage>,
|
||||
rng: &mut SmallRng,
|
||||
request: &AnnounceRequest,
|
||||
src: CanonicalSocketAddr,
|
||||
valid_until: ValidUntil,
|
||||
) -> Response {
|
||||
match src.get().ip() {
|
||||
IpAddr::V4(ip_address) => Response::AnnounceIpv4(self.ipv4.announce(
|
||||
config,
|
||||
statistics_sender,
|
||||
rng,
|
||||
request,
|
||||
ip_address.into(),
|
||||
valid_until,
|
||||
)),
|
||||
IpAddr::V6(ip_address) => Response::AnnounceIpv6(self.ipv6.announce(
|
||||
config,
|
||||
statistics_sender,
|
||||
rng,
|
||||
request,
|
||||
ip_address.into(),
|
||||
valid_until,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn scrape(&self, request: ScrapeRequest, src: CanonicalSocketAddr) -> ScrapeResponse {
|
||||
if src.is_ipv4() {
|
||||
self.ipv4.scrape(request)
|
||||
} else {
|
||||
self.ipv6.scrape(request)
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove forbidden or inactive torrents, reclaim space and update statistics
|
||||
pub fn clean_and_update_statistics(
|
||||
&self,
|
||||
config: &Config,
|
||||
statistics: &CachePaddedArc<IpVersionStatistics<SwarmWorkerStatistics>>,
|
||||
statistics_sender: &Sender<StatisticsMessage>,
|
||||
access_list: &Arc<AccessListArcSwap>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
) {
|
||||
let mut cache = create_access_list_cache(access_list);
|
||||
let mode = config.access_list.mode;
|
||||
let now = server_start_instant.seconds_elapsed();
|
||||
|
||||
let mut statistics_messages = Vec::new();
|
||||
|
||||
let ipv4 = self.ipv4.clean_and_get_statistics(
|
||||
config,
|
||||
&mut statistics_messages,
|
||||
&mut cache,
|
||||
mode,
|
||||
now,
|
||||
);
|
||||
let ipv6 = self.ipv6.clean_and_get_statistics(
|
||||
config,
|
||||
&mut statistics_messages,
|
||||
&mut cache,
|
||||
mode,
|
||||
now,
|
||||
);
|
||||
|
||||
if config.statistics.active() {
|
||||
statistics.ipv4.torrents.store(ipv4.0, Ordering::Relaxed);
|
||||
statistics.ipv6.torrents.store(ipv6.0, Ordering::Relaxed);
|
||||
statistics.ipv4.peers.store(ipv4.1, Ordering::Relaxed);
|
||||
statistics.ipv6.peers.store(ipv6.1, Ordering::Relaxed);
|
||||
|
||||
if let Some(message) = ipv4.2 {
|
||||
statistics_messages.push(StatisticsMessage::Ipv4PeerHistogram(message));
|
||||
}
|
||||
if let Some(message) = ipv6.2 {
|
||||
statistics_messages.push(StatisticsMessage::Ipv6PeerHistogram(message));
|
||||
}
|
||||
|
||||
for message in statistics_messages {
|
||||
if let Err(err) = statistics_sender.try_send(message) {
|
||||
::log::error!("couldn't send statistics message: {:#}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TorrentMapShards<I: Ip>(Arc<[RwLock<TorrentMapShard<I>>]>);
|
||||
|
||||
impl<I: Ip> TorrentMapShards<I> {
|
||||
fn new(num_shards: usize) -> Self {
|
||||
Self(
|
||||
repeat_with(Default::default)
|
||||
.take(num_shards)
|
||||
.collect::<Vec<_>>()
|
||||
.into_boxed_slice()
|
||||
.into(),
|
||||
)
|
||||
}
|
||||
|
||||
fn announce(
|
||||
&self,
|
||||
config: &Config,
|
||||
statistics_sender: &Sender<StatisticsMessage>,
|
||||
rng: &mut SmallRng,
|
||||
request: &AnnounceRequest,
|
||||
ip_address: I,
|
||||
valid_until: ValidUntil,
|
||||
) -> AnnounceResponse<I> {
|
||||
let torrent_data = {
|
||||
let torrent_map_shard = self.get_shard(&request.info_hash).upgradable_read();
|
||||
|
||||
// Clone Arc here to avoid keeping lock on whole shard
|
||||
if let Some(torrent_data) = torrent_map_shard.get(&request.info_hash) {
|
||||
torrent_data.clone()
|
||||
} else {
|
||||
// Don't overwrite entry if created in the meantime
|
||||
RwLockUpgradableReadGuard::upgrade(torrent_map_shard)
|
||||
.entry(request.info_hash)
|
||||
.or_default()
|
||||
.clone()
|
||||
}
|
||||
};
|
||||
|
||||
let mut peer_map = torrent_data.peer_map.write();
|
||||
|
||||
peer_map.announce(
|
||||
config,
|
||||
statistics_sender,
|
||||
rng,
|
||||
request,
|
||||
ip_address,
|
||||
valid_until,
|
||||
)
|
||||
}
|
||||
|
||||
fn scrape(&self, request: ScrapeRequest) -> ScrapeResponse {
|
||||
let mut response = ScrapeResponse {
|
||||
transaction_id: request.transaction_id,
|
||||
torrent_stats: Vec::with_capacity(request.info_hashes.len()),
|
||||
};
|
||||
|
||||
for info_hash in request.info_hashes {
|
||||
let torrent_map_shard = self.get_shard(&info_hash);
|
||||
|
||||
let statistics = if let Some(torrent_data) = torrent_map_shard.read().get(&info_hash) {
|
||||
torrent_data.peer_map.read().scrape_statistics()
|
||||
} else {
|
||||
TorrentScrapeStatistics {
|
||||
seeders: NumberOfPeers::new(0),
|
||||
leechers: NumberOfPeers::new(0),
|
||||
completed: NumberOfDownloads::new(0),
|
||||
}
|
||||
};
|
||||
|
||||
response.torrent_stats.push(statistics);
|
||||
}
|
||||
|
||||
response
|
||||
}
|
||||
|
||||
fn clean_and_get_statistics(
|
||||
&self,
|
||||
config: &Config,
|
||||
statistics_messages: &mut Vec<StatisticsMessage>,
|
||||
access_list_cache: &mut AccessListCache,
|
||||
access_list_mode: AccessListMode,
|
||||
now: SecondsSinceServerStart,
|
||||
) -> (usize, usize, Option<Histogram<u64>>) {
|
||||
let mut total_num_torrents = 0;
|
||||
let mut total_num_peers = 0;
|
||||
|
||||
let mut opt_histogram: Option<Histogram<u64>> = config
|
||||
.statistics
|
||||
.torrent_peer_histograms
|
||||
.then(|| Histogram::new(3).expect("create peer histogram"));
|
||||
|
||||
for torrent_map_shard in self.0.iter() {
|
||||
for torrent_data in torrent_map_shard.read().values() {
|
||||
let mut peer_map = torrent_data.peer_map.write();
|
||||
|
||||
let num_peers = match peer_map.deref_mut() {
|
||||
PeerMap::Small(small_peer_map) => {
|
||||
small_peer_map.clean_and_get_num_peers(config, statistics_messages, now)
|
||||
}
|
||||
PeerMap::Large(large_peer_map) => {
|
||||
let num_peers = large_peer_map.clean_and_get_num_peers(
|
||||
config,
|
||||
statistics_messages,
|
||||
now,
|
||||
);
|
||||
|
||||
if let Some(small_peer_map) = large_peer_map.try_shrink() {
|
||||
*peer_map = PeerMap::Small(small_peer_map);
|
||||
}
|
||||
|
||||
num_peers
|
||||
}
|
||||
};
|
||||
|
||||
drop(peer_map);
|
||||
|
||||
match opt_histogram.as_mut() {
|
||||
Some(histogram) if num_peers > 0 => {
|
||||
if let Err(err) = histogram.record(num_peers as u64) {
|
||||
::log::error!("Couldn't record {} to histogram: {:#}", num_peers, err);
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
total_num_peers += num_peers;
|
||||
|
||||
torrent_data
|
||||
.pending_removal
|
||||
.store(num_peers == 0, Ordering::Release);
|
||||
}
|
||||
|
||||
let mut torrent_map_shard = torrent_map_shard.write();
|
||||
|
||||
torrent_map_shard.retain(|info_hash, torrent_data| {
|
||||
if !access_list_cache
|
||||
.load()
|
||||
.allows(access_list_mode, &info_hash.0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check pending_removal flag set in previous cleaning step. This
|
||||
// prevents us from removing TorrentData entries that were just
|
||||
// added but do not yet contain any peers. Also double-check that
|
||||
// no peers have been added since we last checked.
|
||||
if torrent_data
|
||||
.pending_removal
|
||||
.fetch_and(false, Ordering::Acquire)
|
||||
&& torrent_data.peer_map.read().is_empty()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
});
|
||||
|
||||
torrent_map_shard.shrink_to_fit();
|
||||
|
||||
total_num_torrents += torrent_map_shard.len();
|
||||
}
|
||||
|
||||
(total_num_torrents, total_num_peers, opt_histogram)
|
||||
}
|
||||
|
||||
fn get_shard(&self, info_hash: &InfoHash) -> &RwLock<TorrentMapShard<I>> {
|
||||
self.0.get(info_hash.0[0] as usize % self.0.len()).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Use HashMap instead of IndexMap for better lookup performance
|
||||
type TorrentMapShard<T> = HashMap<InfoHash, Arc<TorrentData<T>>>;
|
||||
|
||||
pub struct TorrentData<T: Ip> {
|
||||
peer_map: RwLock<PeerMap<T>>,
|
||||
pending_removal: AtomicBool,
|
||||
}
|
||||
|
||||
impl<I: Ip> Default for TorrentData<I> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
peer_map: Default::default(),
|
||||
pending_removal: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum PeerMap<I: Ip> {
|
||||
Small(SmallPeerMap<I>),
|
||||
Large(LargePeerMap<I>),
|
||||
}
|
||||
|
||||
impl<I: Ip> PeerMap<I> {
|
||||
fn announce(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
statistics_sender: &Sender<StatisticsMessage>,
|
||||
rng: &mut SmallRng,
|
||||
request: &AnnounceRequest,
|
||||
ip_address: I,
|
||||
valid_until: ValidUntil,
|
||||
) -> AnnounceResponse<I> {
|
||||
let max_num_peers_to_take: usize = if request.peers_wanted.0.get() <= 0 {
|
||||
config.protocol.max_response_peers
|
||||
} else {
|
||||
::std::cmp::min(
|
||||
config.protocol.max_response_peers,
|
||||
request.peers_wanted.0.get().try_into().unwrap(),
|
||||
)
|
||||
};
|
||||
|
||||
let status =
|
||||
PeerStatus::from_event_and_bytes_left(request.event.into(), request.bytes_left);
|
||||
|
||||
let peer_map_key = ResponsePeer {
|
||||
ip_address,
|
||||
port: request.port,
|
||||
};
|
||||
|
||||
// Create the response before inserting the peer. This means that we
|
||||
// don't have to filter it out from the response peers, and that the
|
||||
// reported number of seeders/leechers will not include it
|
||||
let (response, opt_removed_peer) = match self {
|
||||
Self::Small(peer_map) => {
|
||||
let opt_removed_peer = peer_map.remove(&peer_map_key);
|
||||
|
||||
let (seeders, leechers) = peer_map.num_seeders_leechers();
|
||||
|
||||
let response = AnnounceResponse {
|
||||
fixed: AnnounceResponseFixedData {
|
||||
transaction_id: request.transaction_id,
|
||||
announce_interval: AnnounceInterval::new(
|
||||
config.protocol.peer_announce_interval,
|
||||
),
|
||||
leechers: NumberOfPeers::new(leechers.try_into().unwrap_or(i32::MAX)),
|
||||
seeders: NumberOfPeers::new(seeders.try_into().unwrap_or(i32::MAX)),
|
||||
},
|
||||
peers: peer_map.extract_response_peers(max_num_peers_to_take),
|
||||
};
|
||||
|
||||
// Convert peer map to large variant if it is full and
|
||||
// announcing peer is not stopped and will therefore be
|
||||
// inserted
|
||||
if peer_map.is_full() && status != PeerStatus::Stopped {
|
||||
*self = Self::Large(peer_map.to_large());
|
||||
}
|
||||
|
||||
(response, opt_removed_peer)
|
||||
}
|
||||
Self::Large(peer_map) => {
|
||||
let opt_removed_peer = peer_map.remove_peer(&peer_map_key);
|
||||
|
||||
let (seeders, leechers) = peer_map.num_seeders_leechers();
|
||||
|
||||
let response = AnnounceResponse {
|
||||
fixed: AnnounceResponseFixedData {
|
||||
transaction_id: request.transaction_id,
|
||||
announce_interval: AnnounceInterval::new(
|
||||
config.protocol.peer_announce_interval,
|
||||
),
|
||||
leechers: NumberOfPeers::new(leechers.try_into().unwrap_or(i32::MAX)),
|
||||
seeders: NumberOfPeers::new(seeders.try_into().unwrap_or(i32::MAX)),
|
||||
},
|
||||
peers: peer_map.extract_response_peers(rng, max_num_peers_to_take),
|
||||
};
|
||||
|
||||
// Try shrinking the map if announcing peer is stopped and
|
||||
// will therefore not be inserted
|
||||
if status == PeerStatus::Stopped {
|
||||
if let Some(peer_map) = peer_map.try_shrink() {
|
||||
*self = Self::Small(peer_map);
|
||||
}
|
||||
}
|
||||
|
||||
(response, opt_removed_peer)
|
||||
}
|
||||
};
|
||||
|
||||
match status {
|
||||
PeerStatus::Leeching | PeerStatus::Seeding => {
|
||||
let peer = Peer {
|
||||
peer_id: request.peer_id,
|
||||
is_seeder: status == PeerStatus::Seeding,
|
||||
valid_until,
|
||||
};
|
||||
|
||||
match self {
|
||||
Self::Small(peer_map) => peer_map.insert(peer_map_key, peer),
|
||||
Self::Large(peer_map) => peer_map.insert(peer_map_key, peer),
|
||||
}
|
||||
|
||||
if config.statistics.peer_clients && opt_removed_peer.is_none() {
|
||||
statistics_sender
|
||||
.try_send(StatisticsMessage::PeerAdded(request.peer_id))
|
||||
.expect("statistics channel should be unbounded");
|
||||
}
|
||||
}
|
||||
PeerStatus::Stopped => {
|
||||
if config.statistics.peer_clients && opt_removed_peer.is_some() {
|
||||
statistics_sender
|
||||
.try_send(StatisticsMessage::PeerRemoved(request.peer_id))
|
||||
.expect("statistics channel should be unbounded");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
response
|
||||
}
|
||||
|
||||
fn scrape_statistics(&self) -> TorrentScrapeStatistics {
|
||||
let (seeders, leechers) = match self {
|
||||
Self::Small(peer_map) => peer_map.num_seeders_leechers(),
|
||||
Self::Large(peer_map) => peer_map.num_seeders_leechers(),
|
||||
};
|
||||
|
||||
TorrentScrapeStatistics {
|
||||
seeders: NumberOfPeers::new(seeders.try_into().unwrap_or(i32::MAX)),
|
||||
leechers: NumberOfPeers::new(leechers.try_into().unwrap_or(i32::MAX)),
|
||||
completed: NumberOfDownloads::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
match self {
|
||||
Self::Small(peer_map) => peer_map.0.is_empty(),
|
||||
Self::Large(peer_map) => peer_map.peers.is_empty(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: Ip> Default for PeerMap<I> {
|
||||
fn default() -> Self {
|
||||
Self::Small(SmallPeerMap(ArrayVec::default()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Store torrents with up to two peers without an extra heap allocation
|
||||
///
|
||||
/// On public open trackers, this is likely to be the majority of torrents.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct SmallPeerMap<I: Ip>(ArrayVec<(ResponsePeer<I>, Peer), SMALL_PEER_MAP_CAPACITY>);
|
||||
|
||||
impl<I: Ip> SmallPeerMap<I> {
|
||||
fn is_full(&self) -> bool {
|
||||
self.0.is_full()
|
||||
}
|
||||
|
||||
fn num_seeders_leechers(&self) -> (usize, usize) {
|
||||
let seeders = self.0.iter().filter(|(_, p)| p.is_seeder).count();
|
||||
let leechers = self.0.len() - seeders;
|
||||
|
||||
(seeders, leechers)
|
||||
}
|
||||
|
||||
fn insert(&mut self, key: ResponsePeer<I>, peer: Peer) {
|
||||
self.0.push((key, peer));
|
||||
}
|
||||
|
||||
fn remove(&mut self, key: &ResponsePeer<I>) -> Option<Peer> {
|
||||
for (i, (k, _)) in self.0.iter().enumerate() {
|
||||
if k == key {
|
||||
return Some(self.0.remove(i).1);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn extract_response_peers(&self, max_num_peers_to_take: usize) -> Vec<ResponsePeer<I>> {
|
||||
Vec::from_iter(self.0.iter().take(max_num_peers_to_take).map(|(k, _)| *k))
|
||||
}
|
||||
|
||||
fn clean_and_get_num_peers(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
statistics_messages: &mut Vec<StatisticsMessage>,
|
||||
now: SecondsSinceServerStart,
|
||||
) -> usize {
|
||||
self.0.retain(|(_, peer)| {
|
||||
let keep = peer.valid_until.valid(now);
|
||||
|
||||
if !keep && config.statistics.peer_clients {
|
||||
statistics_messages.push(StatisticsMessage::PeerRemoved(peer.peer_id));
|
||||
}
|
||||
|
||||
keep
|
||||
});
|
||||
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
fn to_large(&self) -> LargePeerMap<I> {
|
||||
let (num_seeders, _) = self.num_seeders_leechers();
|
||||
let peers = self.0.iter().copied().collect();
|
||||
|
||||
LargePeerMap { peers, num_seeders }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct LargePeerMap<I: Ip> {
|
||||
peers: IndexMap<ResponsePeer<I>, Peer>,
|
||||
num_seeders: usize,
|
||||
}
|
||||
|
||||
impl<I: Ip> LargePeerMap<I> {
|
||||
fn num_seeders_leechers(&self) -> (usize, usize) {
|
||||
(self.num_seeders, self.peers.len() - self.num_seeders)
|
||||
}
|
||||
|
||||
fn insert(&mut self, key: ResponsePeer<I>, peer: Peer) {
|
||||
if peer.is_seeder {
|
||||
self.num_seeders += 1;
|
||||
}
|
||||
|
||||
self.peers.insert(key, peer);
|
||||
}
|
||||
|
||||
fn remove_peer(&mut self, key: &ResponsePeer<I>) -> Option<Peer> {
|
||||
let opt_removed_peer = self.peers.swap_remove(key);
|
||||
|
||||
if let Some(Peer {
|
||||
is_seeder: true, ..
|
||||
}) = opt_removed_peer
|
||||
{
|
||||
self.num_seeders -= 1;
|
||||
}
|
||||
|
||||
opt_removed_peer
|
||||
}
|
||||
|
||||
/// Extract response peers
|
||||
///
|
||||
/// If there are more peers in map than `max_num_peers_to_take`, do a
|
||||
/// random selection of peers from first and second halves of map in
|
||||
/// order to avoid returning too homogeneous peers. This is a lot more
|
||||
/// cache-friendly than doing a fully random selection.
|
||||
fn extract_response_peers(
|
||||
&self,
|
||||
rng: &mut impl Rng,
|
||||
max_num_peers_to_take: usize,
|
||||
) -> Vec<ResponsePeer<I>> {
|
||||
if self.peers.len() <= max_num_peers_to_take {
|
||||
self.peers.keys().copied().collect()
|
||||
} else {
|
||||
let middle_index = self.peers.len() / 2;
|
||||
let num_to_take_per_half = max_num_peers_to_take / 2;
|
||||
|
||||
let offset_half_one = {
|
||||
let from = 0;
|
||||
let to = usize::max(1, middle_index - num_to_take_per_half);
|
||||
|
||||
rng.gen_range(from..to)
|
||||
};
|
||||
let offset_half_two = {
|
||||
let from = middle_index;
|
||||
let to = usize::max(middle_index + 1, self.peers.len() - num_to_take_per_half);
|
||||
|
||||
rng.gen_range(from..to)
|
||||
};
|
||||
|
||||
let end_half_one = offset_half_one + num_to_take_per_half;
|
||||
let end_half_two = offset_half_two + num_to_take_per_half;
|
||||
|
||||
let mut peers = Vec::with_capacity(max_num_peers_to_take);
|
||||
|
||||
if let Some(slice) = self.peers.get_range(offset_half_one..end_half_one) {
|
||||
peers.extend(slice.keys().copied());
|
||||
}
|
||||
if let Some(slice) = self.peers.get_range(offset_half_two..end_half_two) {
|
||||
peers.extend(slice.keys().copied());
|
||||
}
|
||||
|
||||
peers
|
||||
}
|
||||
}
|
||||
|
||||
fn clean_and_get_num_peers(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
statistics_messages: &mut Vec<StatisticsMessage>,
|
||||
now: SecondsSinceServerStart,
|
||||
) -> usize {
|
||||
self.peers.retain(|_, peer| {
|
||||
let keep = peer.valid_until.valid(now);
|
||||
|
||||
if !keep {
|
||||
if peer.is_seeder {
|
||||
self.num_seeders -= 1;
|
||||
}
|
||||
if config.statistics.peer_clients {
|
||||
statistics_messages.push(StatisticsMessage::PeerRemoved(peer.peer_id));
|
||||
}
|
||||
}
|
||||
|
||||
keep
|
||||
});
|
||||
|
||||
if !self.peers.is_empty() {
|
||||
self.peers.shrink_to_fit();
|
||||
}
|
||||
|
||||
self.peers.len()
|
||||
}
|
||||
|
||||
fn try_shrink(&mut self) -> Option<SmallPeerMap<I>> {
|
||||
(self.peers.len() <= SMALL_PEER_MAP_CAPACITY).then(|| {
|
||||
SmallPeerMap(ArrayVec::from_iter(
|
||||
self.peers.iter().map(|(k, v)| (*k, *v)),
|
||||
))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct Peer {
|
||||
peer_id: PeerId,
|
||||
is_seeder: bool,
|
||||
valid_until: ValidUntil,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
|
||||
pub enum PeerStatus {
|
||||
Seeding,
|
||||
Leeching,
|
||||
Stopped,
|
||||
}
|
||||
|
||||
impl PeerStatus {
|
||||
/// Determine peer status from announce event and number of bytes left.
|
||||
///
|
||||
/// Likely, the last branch will be taken most of the time.
|
||||
#[inline]
|
||||
pub fn from_event_and_bytes_left(event: AnnounceEvent, bytes_left: NumberOfBytes) -> Self {
|
||||
if event == AnnounceEvent::Stopped {
|
||||
Self::Stopped
|
||||
} else if bytes_left.0.get() == 0 {
|
||||
Self::Seeding
|
||||
} else {
|
||||
Self::Leeching
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_peer_status_from_event_and_bytes_left() {
|
||||
use PeerStatus::*;
|
||||
|
||||
let f = PeerStatus::from_event_and_bytes_left;
|
||||
|
||||
assert_eq!(Stopped, f(AnnounceEvent::Stopped, NumberOfBytes::new(0)));
|
||||
assert_eq!(Stopped, f(AnnounceEvent::Stopped, NumberOfBytes::new(1)));
|
||||
|
||||
assert_eq!(Seeding, f(AnnounceEvent::Started, NumberOfBytes::new(0)));
|
||||
assert_eq!(Leeching, f(AnnounceEvent::Started, NumberOfBytes::new(1)));
|
||||
|
||||
assert_eq!(Seeding, f(AnnounceEvent::Completed, NumberOfBytes::new(0)));
|
||||
assert_eq!(Leeching, f(AnnounceEvent::Completed, NumberOfBytes::new(1)));
|
||||
|
||||
assert_eq!(Seeding, f(AnnounceEvent::None, NumberOfBytes::new(0)));
|
||||
assert_eq!(Leeching, f(AnnounceEvent::None, NumberOfBytes::new(1)));
|
||||
}
|
||||
}
|
2
apps/aquatic/crates/udp/src/workers/mod.rs
Normal file
2
apps/aquatic/crates/udp/src/workers/mod.rs
Normal file
@ -0,0 +1,2 @@
|
||||
pub mod socket;
|
||||
pub mod statistics;
|
194
apps/aquatic/crates/udp/src/workers/socket/mio/mod.rs
Normal file
194
apps/aquatic/crates/udp/src/workers/socket/mio/mod.rs
Normal file
@ -0,0 +1,194 @@
|
||||
mod socket;
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::access_list::AccessListCache;
|
||||
use crossbeam_channel::Sender;
|
||||
use mio::{Events, Interest, Poll, Token};
|
||||
|
||||
use aquatic_common::{
|
||||
access_list::create_access_list_cache, privileges::PrivilegeDropper, CanonicalSocketAddr,
|
||||
ValidUntil,
|
||||
};
|
||||
use aquatic_udp_protocol::*;
|
||||
use rand::rngs::SmallRng;
|
||||
use rand::SeedableRng;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
use socket::Socket;
|
||||
|
||||
use super::validator::ConnectionValidator;
|
||||
use super::{EXTRA_PACKET_SIZE_IPV4, EXTRA_PACKET_SIZE_IPV6};
|
||||
|
||||
const TOKEN_V4: Token = Token(0);
|
||||
const TOKEN_V6: Token = Token(1);
|
||||
|
||||
pub fn run(
|
||||
config: Config,
|
||||
shared_state: State,
|
||||
statistics: CachePaddedArc<IpVersionStatistics<SocketWorkerStatistics>>,
|
||||
statistics_sender: Sender<StatisticsMessage>,
|
||||
validator: ConnectionValidator,
|
||||
mut priv_droppers: Vec<PrivilegeDropper>,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut opt_socket_ipv4 = if config.network.use_ipv4 {
|
||||
let priv_dropper = priv_droppers.pop().expect("not enough privilege droppers");
|
||||
|
||||
Some(Socket::<self::socket::Ipv4>::create(&config, priv_dropper)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let mut opt_socket_ipv6 = if config.network.use_ipv6 {
|
||||
let priv_dropper = priv_droppers.pop().expect("not enough privilege droppers");
|
||||
|
||||
Some(Socket::<self::socket::Ipv6>::create(&config, priv_dropper)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let access_list_cache = create_access_list_cache(&shared_state.access_list);
|
||||
let peer_valid_until = ValidUntil::new(
|
||||
shared_state.server_start_instant,
|
||||
config.cleaning.max_peer_age,
|
||||
);
|
||||
|
||||
let mut shared = WorkerSharedData {
|
||||
config,
|
||||
shared_state,
|
||||
statistics,
|
||||
statistics_sender,
|
||||
validator,
|
||||
access_list_cache,
|
||||
buffer: [0; BUFFER_SIZE],
|
||||
rng: SmallRng::from_entropy(),
|
||||
peer_valid_until,
|
||||
};
|
||||
|
||||
let mut events = Events::with_capacity(2);
|
||||
let mut poll = Poll::new().context("create poll")?;
|
||||
|
||||
if let Some(socket) = opt_socket_ipv4.as_mut() {
|
||||
poll.registry()
|
||||
.register(&mut socket.socket, TOKEN_V4, Interest::READABLE)
|
||||
.context("register poll")?;
|
||||
}
|
||||
if let Some(socket) = opt_socket_ipv6.as_mut() {
|
||||
poll.registry()
|
||||
.register(&mut socket.socket, TOKEN_V6, Interest::READABLE)
|
||||
.context("register poll")?;
|
||||
}
|
||||
|
||||
let poll_timeout = Duration::from_millis(shared.config.network.poll_timeout_ms);
|
||||
|
||||
let mut iter_counter = 0u64;
|
||||
|
||||
loop {
|
||||
poll.poll(&mut events, Some(poll_timeout)).context("poll")?;
|
||||
|
||||
for event in events.iter() {
|
||||
if event.is_readable() {
|
||||
match event.token() {
|
||||
TOKEN_V4 => {
|
||||
if let Some(socket) = opt_socket_ipv4.as_mut() {
|
||||
socket.read_and_handle_requests(&mut shared);
|
||||
}
|
||||
}
|
||||
TOKEN_V6 => {
|
||||
if let Some(socket) = opt_socket_ipv6.as_mut() {
|
||||
socket.read_and_handle_requests(&mut shared);
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(socket) = opt_socket_ipv4.as_mut() {
|
||||
socket.resend_failed(&mut shared);
|
||||
}
|
||||
if let Some(socket) = opt_socket_ipv6.as_mut() {
|
||||
socket.resend_failed(&mut shared);
|
||||
}
|
||||
|
||||
if iter_counter % 256 == 0 {
|
||||
shared.validator.update_elapsed();
|
||||
|
||||
shared.peer_valid_until = ValidUntil::new(
|
||||
shared.shared_state.server_start_instant,
|
||||
shared.config.cleaning.max_peer_age,
|
||||
);
|
||||
}
|
||||
|
||||
iter_counter = iter_counter.wrapping_add(1);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WorkerSharedData {
|
||||
config: Config,
|
||||
shared_state: State,
|
||||
statistics: CachePaddedArc<IpVersionStatistics<SocketWorkerStatistics>>,
|
||||
statistics_sender: Sender<StatisticsMessage>,
|
||||
access_list_cache: AccessListCache,
|
||||
validator: ConnectionValidator,
|
||||
buffer: [u8; BUFFER_SIZE],
|
||||
rng: SmallRng,
|
||||
peer_valid_until: ValidUntil,
|
||||
}
|
||||
|
||||
impl WorkerSharedData {
|
||||
fn handle_request(&mut self, request: Request, src: CanonicalSocketAddr) -> Option<Response> {
|
||||
let access_list_mode = self.config.access_list.mode;
|
||||
|
||||
match request {
|
||||
Request::Connect(request) => {
|
||||
return Some(Response::Connect(ConnectResponse {
|
||||
connection_id: self.validator.create_connection_id(src),
|
||||
transaction_id: request.transaction_id,
|
||||
}));
|
||||
}
|
||||
Request::Announce(request) => {
|
||||
if self
|
||||
.validator
|
||||
.connection_id_valid(src, request.connection_id)
|
||||
{
|
||||
if self
|
||||
.access_list_cache
|
||||
.load()
|
||||
.allows(access_list_mode, &request.info_hash.0)
|
||||
{
|
||||
let response = self.shared_state.torrent_maps.announce(
|
||||
&self.config,
|
||||
&self.statistics_sender,
|
||||
&mut self.rng,
|
||||
&request,
|
||||
src,
|
||||
self.peer_valid_until,
|
||||
);
|
||||
|
||||
return Some(response);
|
||||
} else {
|
||||
return Some(Response::Error(ErrorResponse {
|
||||
transaction_id: request.transaction_id,
|
||||
message: "Info hash not allowed".into(),
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
Request::Scrape(request) => {
|
||||
if self
|
||||
.validator
|
||||
.connection_id_valid(src, request.connection_id)
|
||||
{
|
||||
return Some(Response::Scrape(
|
||||
self.shared_state.torrent_maps.scrape(request, src),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
323
apps/aquatic/crates/udp/src/workers/socket/mio/socket.rs
Normal file
323
apps/aquatic/crates/udp/src/workers/socket/mio/socket.rs
Normal file
@ -0,0 +1,323 @@
|
||||
use std::io::{Cursor, ErrorKind};
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use anyhow::Context;
|
||||
use mio::net::UdpSocket;
|
||||
use socket2::{Domain, Protocol, Type};
|
||||
|
||||
use aquatic_common::{privileges::PrivilegeDropper, CanonicalSocketAddr};
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
use super::{WorkerSharedData, EXTRA_PACKET_SIZE_IPV4, EXTRA_PACKET_SIZE_IPV6};
|
||||
|
||||
pub trait IpVersion {
|
||||
fn is_v4() -> bool;
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct Ipv4;
|
||||
|
||||
impl IpVersion for Ipv4 {
|
||||
fn is_v4() -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct Ipv6;
|
||||
|
||||
impl IpVersion for Ipv6 {
|
||||
fn is_v4() -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Socket<V> {
|
||||
pub socket: UdpSocket,
|
||||
opt_resend_buffer: Option<Vec<(CanonicalSocketAddr, Response)>>,
|
||||
phantom_data: PhantomData<V>,
|
||||
}
|
||||
|
||||
impl Socket<Ipv4> {
|
||||
pub fn create(config: &Config, priv_dropper: PrivilegeDropper) -> anyhow::Result<Self> {
|
||||
let socket = socket2::Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP))?;
|
||||
|
||||
socket
|
||||
.set_reuse_port(true)
|
||||
.with_context(|| "socket: set reuse port")?;
|
||||
socket
|
||||
.set_nonblocking(true)
|
||||
.with_context(|| "socket: set nonblocking")?;
|
||||
|
||||
let recv_buffer_size = config.network.socket_recv_buffer_size;
|
||||
|
||||
if recv_buffer_size != 0 {
|
||||
if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) {
|
||||
::log::error!(
|
||||
"socket: failed setting recv buffer to {}: {:?}",
|
||||
recv_buffer_size,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
socket
|
||||
.bind(&config.network.address_ipv4.into())
|
||||
.with_context(|| format!("socket: bind to {}", config.network.address_ipv4))?;
|
||||
|
||||
priv_dropper.after_socket_creation()?;
|
||||
|
||||
let mut s = Self {
|
||||
socket: UdpSocket::from_std(::std::net::UdpSocket::from(socket)),
|
||||
opt_resend_buffer: None,
|
||||
phantom_data: Default::default(),
|
||||
};
|
||||
|
||||
if config.network.resend_buffer_max_len > 0 {
|
||||
s.opt_resend_buffer = Some(Vec::new());
|
||||
}
|
||||
|
||||
Ok(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl Socket<Ipv6> {
|
||||
pub fn create(config: &Config, priv_dropper: PrivilegeDropper) -> anyhow::Result<Self> {
|
||||
let socket = socket2::Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP))?;
|
||||
|
||||
if config.network.set_only_ipv6 {
|
||||
socket
|
||||
.set_only_v6(true)
|
||||
.with_context(|| "socket: set only ipv6")?;
|
||||
}
|
||||
socket
|
||||
.set_reuse_port(true)
|
||||
.with_context(|| "socket: set reuse port")?;
|
||||
socket
|
||||
.set_nonblocking(true)
|
||||
.with_context(|| "socket: set nonblocking")?;
|
||||
|
||||
let recv_buffer_size = config.network.socket_recv_buffer_size;
|
||||
|
||||
if recv_buffer_size != 0 {
|
||||
if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) {
|
||||
::log::error!(
|
||||
"socket: failed setting recv buffer to {}: {:?}",
|
||||
recv_buffer_size,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
socket
|
||||
.bind(&config.network.address_ipv6.into())
|
||||
.with_context(|| format!("socket: bind to {}", config.network.address_ipv6))?;
|
||||
|
||||
priv_dropper.after_socket_creation()?;
|
||||
|
||||
let mut s = Self {
|
||||
socket: UdpSocket::from_std(::std::net::UdpSocket::from(socket)),
|
||||
opt_resend_buffer: None,
|
||||
phantom_data: Default::default(),
|
||||
};
|
||||
|
||||
if config.network.resend_buffer_max_len > 0 {
|
||||
s.opt_resend_buffer = Some(Vec::new());
|
||||
}
|
||||
|
||||
Ok(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl<V: IpVersion> Socket<V> {
|
||||
pub fn read_and_handle_requests(&mut self, shared: &mut WorkerSharedData) {
|
||||
let max_scrape_torrents = shared.config.protocol.max_scrape_torrents;
|
||||
|
||||
loop {
|
||||
match self.socket.recv_from(&mut shared.buffer[..]) {
|
||||
Ok((bytes_read, src)) => {
|
||||
let src_port = src.port();
|
||||
let src = CanonicalSocketAddr::new(src);
|
||||
|
||||
// Use canonical address for statistics
|
||||
let opt_statistics = if shared.config.statistics.active() {
|
||||
if src.is_ipv4() {
|
||||
let statistics = &shared.statistics.ipv4;
|
||||
|
||||
statistics
|
||||
.bytes_received
|
||||
.fetch_add(bytes_read + EXTRA_PACKET_SIZE_IPV4, Ordering::Relaxed);
|
||||
|
||||
Some(statistics)
|
||||
} else {
|
||||
let statistics = &shared.statistics.ipv6;
|
||||
|
||||
statistics
|
||||
.bytes_received
|
||||
.fetch_add(bytes_read + EXTRA_PACKET_SIZE_IPV6, Ordering::Relaxed);
|
||||
|
||||
Some(statistics)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if src_port == 0 {
|
||||
::log::debug!("Ignored request because source port is zero");
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
match Request::parse_bytes(&shared.buffer[..bytes_read], max_scrape_torrents) {
|
||||
Ok(request) => {
|
||||
if let Some(statistics) = opt_statistics {
|
||||
statistics.requests.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
if let Some(response) = shared.handle_request(request, src) {
|
||||
self.send_response(shared, src, response, false);
|
||||
}
|
||||
}
|
||||
Err(RequestParseError::Sendable {
|
||||
connection_id,
|
||||
transaction_id,
|
||||
err,
|
||||
}) if shared.validator.connection_id_valid(src, connection_id) => {
|
||||
let response = ErrorResponse {
|
||||
transaction_id,
|
||||
message: err.into(),
|
||||
};
|
||||
|
||||
self.send_response(shared, src, Response::Error(response), false);
|
||||
|
||||
::log::debug!("request parse error (sent error response): {:?}", err);
|
||||
}
|
||||
Err(err) => {
|
||||
::log::debug!(
|
||||
"request parse error (didn't send error response): {:?}",
|
||||
err
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
Err(err) if err.kind() == ErrorKind::WouldBlock => {
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
::log::warn!("recv_from error: {:#}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn send_response(
|
||||
&mut self,
|
||||
shared: &mut WorkerSharedData,
|
||||
canonical_addr: CanonicalSocketAddr,
|
||||
response: Response,
|
||||
disable_resend_buffer: bool,
|
||||
) {
|
||||
let mut buffer = Cursor::new(&mut shared.buffer[..]);
|
||||
|
||||
if let Err(err) = response.write_bytes(&mut buffer) {
|
||||
::log::error!("failed writing response to buffer: {:#}", err);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
let bytes_written = buffer.position() as usize;
|
||||
|
||||
let addr = if V::is_v4() {
|
||||
canonical_addr
|
||||
.get_ipv4()
|
||||
.expect("found peer ipv6 address while running bound to ipv4 address")
|
||||
} else {
|
||||
canonical_addr.get_ipv6_mapped()
|
||||
};
|
||||
|
||||
match self
|
||||
.socket
|
||||
.send_to(&buffer.into_inner()[..bytes_written], addr)
|
||||
{
|
||||
Ok(bytes_sent) if shared.config.statistics.active() => {
|
||||
let stats = if canonical_addr.is_ipv4() {
|
||||
let stats = &shared.statistics.ipv4;
|
||||
|
||||
stats
|
||||
.bytes_sent
|
||||
.fetch_add(bytes_sent + EXTRA_PACKET_SIZE_IPV4, Ordering::Relaxed);
|
||||
|
||||
stats
|
||||
} else {
|
||||
let stats = &shared.statistics.ipv6;
|
||||
|
||||
stats
|
||||
.bytes_sent
|
||||
.fetch_add(bytes_sent + EXTRA_PACKET_SIZE_IPV6, Ordering::Relaxed);
|
||||
|
||||
stats
|
||||
};
|
||||
|
||||
match response {
|
||||
Response::Connect(_) => {
|
||||
stats.responses_connect.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => {
|
||||
stats.responses_announce.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
Response::Scrape(_) => {
|
||||
stats.responses_scrape.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
Response::Error(_) => {
|
||||
stats.responses_error.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(_) => (),
|
||||
Err(err) => match self.opt_resend_buffer.as_mut() {
|
||||
Some(resend_buffer)
|
||||
if !disable_resend_buffer
|
||||
&& ((err.raw_os_error() == Some(libc::ENOBUFS))
|
||||
|| (err.kind() == ErrorKind::WouldBlock)) =>
|
||||
{
|
||||
if resend_buffer.len() < shared.config.network.resend_buffer_max_len {
|
||||
::log::debug!("Adding response to resend queue, since sending it to {} failed with: {:#}", addr, err);
|
||||
|
||||
resend_buffer.push((canonical_addr, response));
|
||||
} else {
|
||||
::log::warn!("Response resend buffer full, dropping response");
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
::log::warn!("Sending response to {} failed: {:#}", addr, err);
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
::log::debug!("send response fn finished");
|
||||
}
|
||||
|
||||
/// If resend buffer is enabled, send any responses in it
|
||||
pub fn resend_failed(&mut self, shared: &mut WorkerSharedData) {
|
||||
if self.opt_resend_buffer.is_some() {
|
||||
let mut tmp_resend_buffer = Vec::new();
|
||||
|
||||
// Do memory swap shenanigans to get around false positive in
|
||||
// borrow checker regarding double mut borrowing of self
|
||||
|
||||
if let Some(resend_buffer) = self.opt_resend_buffer.as_mut() {
|
||||
::std::mem::swap(resend_buffer, &mut tmp_resend_buffer);
|
||||
}
|
||||
|
||||
for (addr, response) in tmp_resend_buffer.drain(..) {
|
||||
self.send_response(shared, addr, response, true);
|
||||
}
|
||||
|
||||
if let Some(resend_buffer) = self.opt_resend_buffer.as_mut() {
|
||||
::std::mem::swap(resend_buffer, &mut tmp_resend_buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
71
apps/aquatic/crates/udp/src/workers/socket/mod.rs
Normal file
71
apps/aquatic/crates/udp/src/workers/socket/mod.rs
Normal file
@ -0,0 +1,71 @@
|
||||
mod mio;
|
||||
#[cfg(all(target_os = "linux", feature = "io-uring"))]
|
||||
mod uring;
|
||||
mod validator;
|
||||
|
||||
use aquatic_common::privileges::PrivilegeDropper;
|
||||
use crossbeam_channel::Sender;
|
||||
|
||||
use crate::{
|
||||
common::{
|
||||
CachePaddedArc, IpVersionStatistics, SocketWorkerStatistics, State, StatisticsMessage,
|
||||
},
|
||||
config::Config,
|
||||
};
|
||||
|
||||
pub use self::validator::ConnectionValidator;
|
||||
|
||||
#[cfg(all(not(target_os = "linux"), feature = "io-uring"))]
|
||||
compile_error!("io_uring feature is only supported on Linux");
|
||||
|
||||
/// Bytes of data transmitted when sending an IPv4 UDP packet, in addition to payload size
|
||||
///
|
||||
/// Consists of:
|
||||
/// - 8 bit ethernet frame
|
||||
/// - 14 + 4 bit MAC header and checksum
|
||||
/// - 20 bit IPv4 header
|
||||
/// - 8 bit udp header
|
||||
const EXTRA_PACKET_SIZE_IPV4: usize = 8 + 18 + 20 + 8;
|
||||
|
||||
/// Bytes of data transmitted when sending an IPv4 UDP packet, in addition to payload size
|
||||
///
|
||||
/// Consists of:
|
||||
/// - 8 bit ethernet frame
|
||||
/// - 14 + 4 bit MAC header and checksum
|
||||
/// - 40 bit IPv6 header
|
||||
/// - 8 bit udp header
|
||||
const EXTRA_PACKET_SIZE_IPV6: usize = 8 + 18 + 40 + 8;
|
||||
|
||||
pub fn run_socket_worker(
|
||||
config: Config,
|
||||
shared_state: State,
|
||||
statistics: CachePaddedArc<IpVersionStatistics<SocketWorkerStatistics>>,
|
||||
statistics_sender: Sender<StatisticsMessage>,
|
||||
validator: ConnectionValidator,
|
||||
priv_droppers: Vec<PrivilegeDropper>,
|
||||
) -> anyhow::Result<()> {
|
||||
#[cfg(all(target_os = "linux", feature = "io-uring"))]
|
||||
if config.network.use_io_uring {
|
||||
use anyhow::Context;
|
||||
|
||||
self::uring::supported_on_current_kernel().context("check for io_uring compatibility")?;
|
||||
|
||||
return self::uring::SocketWorker::run(
|
||||
config,
|
||||
shared_state,
|
||||
statistics,
|
||||
statistics_sender,
|
||||
validator,
|
||||
priv_droppers,
|
||||
);
|
||||
}
|
||||
|
||||
self::mio::run(
|
||||
config,
|
||||
shared_state,
|
||||
statistics,
|
||||
statistics_sender,
|
||||
validator,
|
||||
priv_droppers,
|
||||
)
|
||||
}
|
947
apps/aquatic/crates/udp/src/workers/socket/uring/buf_ring.rs
Normal file
947
apps/aquatic/crates/udp/src/workers/socket/uring/buf_ring.rs
Normal file
@ -0,0 +1,947 @@
|
||||
// Copyright (c) 2021 Carl Lerche
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
// Copied (with slight modifications) from
|
||||
// - https://github.com/FrankReh/tokio-uring/tree/9387c92c98138451f7d760432a04b0b95a406f22/src/buf/bufring
|
||||
// - https://github.com/FrankReh/tokio-uring/blob/9387c92c98138451f7d760432a04b0b95a406f22/src/buf/bufgroup/mod.rs
|
||||
|
||||
//! Module for the io_uring device's buf_ring feature.
|
||||
|
||||
// Developer's note about io_uring return codes when a buf_ring is used:
|
||||
//
|
||||
// While a buf_ring pool is exhaused, new calls to read that are, or are not, ready to read will
|
||||
// fail with the 105 error, "no buffers", while existing calls that were waiting to become ready to
|
||||
// read will not fail. Only when the data becomes ready to read will they fail, if the buffer ring
|
||||
// is still empty at that time. This makes sense when thinking about it from how the kernel
|
||||
// implements the start of a read command; it can be confusing when first working with these
|
||||
// commands from the userland perspective.
|
||||
|
||||
// While the file! calls yield the clippy false positive.
|
||||
#![allow(clippy::print_literal)]
|
||||
|
||||
use io_uring::types;
|
||||
use std::cell::Cell;
|
||||
use std::io;
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::{self, AtomicU16};
|
||||
|
||||
use super::CurrentRing;
|
||||
|
||||
/// The buffer group ID.
|
||||
///
|
||||
/// The creater of a buffer group is responsible for picking a buffer group id
|
||||
/// that does not conflict with other buffer group ids also being registered with the uring
|
||||
/// interface.
|
||||
pub(crate) type Bgid = u16;
|
||||
|
||||
// Future: Maybe create a bgid module with a trivial implementation of a type that tracks the next
|
||||
// bgid to use. The crate's driver could do that perhaps, but there could be a benefit to tracking
|
||||
// them across multiple thread's drivers. So there is flexibility in not building it into the
|
||||
// driver.
|
||||
|
||||
/// The buffer ID. Buffer ids are assigned and used by the crate and probably are not visible
|
||||
/// to the crate user.
|
||||
pub(crate) type Bid = u16;
|
||||
|
||||
/// This tracks a buffer that has been filled in by the kernel, having gotten the memory
|
||||
/// from a buffer ring, and returned to userland via a cqe entry.
|
||||
pub struct BufX {
|
||||
bgroup: BufRing,
|
||||
bid: Bid,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl BufX {
|
||||
// # Safety
|
||||
//
|
||||
// The bid must be the buffer id supplied by the kernel as having been chosen and written to.
|
||||
// The length of the buffer must represent the length written to by the kernel.
|
||||
pub(crate) unsafe fn new(bgroup: BufRing, bid: Bid, len: usize) -> Self {
|
||||
// len will already have been checked against the buf_capacity
|
||||
// so it is guaranteed that len <= bgroup.buf_capacity.
|
||||
|
||||
Self { bgroup, bid, len }
|
||||
}
|
||||
|
||||
/// Return the number of bytes initialized.
|
||||
///
|
||||
/// This value initially came from the kernel, as reported in the cqe. This value may have been
|
||||
/// modified with a call to the IoBufMut::set_init method.
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
/// Return true if this represents an empty buffer. The length reported by the kernel was 0.
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
/// Return the capacity of this buffer.
|
||||
#[inline]
|
||||
pub fn cap(&self) -> usize {
|
||||
self.bgroup.buf_capacity(self.bid)
|
||||
}
|
||||
|
||||
/// Return a byte slice reference.
|
||||
#[inline]
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
let p = self.bgroup.stable_ptr(self.bid);
|
||||
// Safety: the pointer returned by stable_ptr is valid for the lifetime of self,
|
||||
// and self's len is set when the kernel reports the amount of data that was
|
||||
// written into the buffer.
|
||||
unsafe { std::slice::from_raw_parts(p, self.len) }
|
||||
}
|
||||
|
||||
/// Return a mutable byte slice reference.
|
||||
#[inline]
|
||||
pub fn as_slice_mut(&mut self) -> &mut [u8] {
|
||||
let p = self.bgroup.stable_mut_ptr(self.bid);
|
||||
// Safety: the pointer returned by stable_mut_ptr is valid for the lifetime of self,
|
||||
// and self's len is set when the kernel reports the amount of data that was
|
||||
// written into the buffer. In addition, we hold a &mut reference to self.
|
||||
unsafe { std::slice::from_raw_parts_mut(p, self.len) }
|
||||
}
|
||||
|
||||
// Future: provide access to the uninit space between len and cap if the buffer is being
|
||||
// repurposed before being dropped. The set_init below does that too.
|
||||
}
|
||||
|
||||
impl Drop for BufX {
|
||||
fn drop(&mut self) {
|
||||
// Add the buffer back to the bgroup, for the kernel to reuse.
|
||||
// Safety: this function may only be called by the buffer's drop function.
|
||||
unsafe { self.bgroup.dropping_bid(self.bid) };
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
unsafe impl crate::buf::IoBuf for BufX {
|
||||
fn stable_ptr(&self) -> *const u8 {
|
||||
self.bgroup.stable_ptr(self.bid)
|
||||
}
|
||||
|
||||
fn bytes_init(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
fn bytes_total(&self) -> usize {
|
||||
self.cap()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl crate::buf::IoBufMut for BufX {
|
||||
fn stable_mut_ptr(&mut self) -> *mut u8 {
|
||||
self.bgroup.stable_mut_ptr(self.bid)
|
||||
}
|
||||
|
||||
unsafe fn set_init(&mut self, init_len: usize) {
|
||||
if self.len < init_len {
|
||||
let cap = self.bgroup.buf_capacity(self.bid);
|
||||
assert!(init_len <= cap);
|
||||
self.len = init_len;
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
impl From<BufX> for Vec<u8> {
|
||||
fn from(item: BufX) -> Self {
|
||||
item.as_slice().to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
/// A `BufRing` represents the ring and the buffers used with the kernel's io_uring buf_ring
|
||||
/// feature.
|
||||
///
|
||||
/// In this implementation, it is both the ring of buffer entries and the actual buffer
|
||||
/// allocations.
|
||||
///
|
||||
/// A BufRing is created through the [`Builder`] and can be registered automatically by the
|
||||
/// builder's `build` step or at a later time by the user. Registration involves informing the
|
||||
/// kernel of the ring's dimensions and its identifier (its buffer group id, which goes by the name
|
||||
/// `bgid`).
|
||||
///
|
||||
/// Multiple buf_rings, here multiple BufRings, can be created and registered. BufRings are
|
||||
/// reference counted to ensure their memory is live while their BufX buffers are live. When a BufX
|
||||
/// buffer is dropped, it releases itself back to the BufRing from which it came allowing it to be
|
||||
/// reused by the kernel.
|
||||
///
|
||||
/// It is perhaps worth pointing out that it is the ring itself that is registered with the kernel,
|
||||
/// not the buffers per se. While a given buf_ring cannot have it size changed dynamically, the
|
||||
/// buffers that are pushed to the ring by userland, and later potentially re-pushed in the ring,
|
||||
/// can change. The buffers can be of different sizes and they could come from different allocation
|
||||
/// blocks. This implementation does not provide that flexibility. Each BufRing comes with its own
|
||||
/// equal length buffer allocation. And when a BufRing buffer, a BufX, is dropped, its id is pushed
|
||||
/// back to the ring.
|
||||
///
|
||||
/// This is the one and only `Provided Buffers` implementation in `tokio_uring` at the moment and
|
||||
/// in this version, is a purely concrete type, with a concrete BufX type for buffers that are
|
||||
/// returned by operations like `recv_provbuf` to the userland application.
|
||||
///
|
||||
/// Aside from the register and unregister steps, there are no syscalls used to pass buffers to the
|
||||
/// kernel. The ring contains a tail memory address that this userland type updates as buffers are
|
||||
/// added to the ring and which the kernel reads when it needs to pull a buffer from the ring. The
|
||||
/// kernel does not have a head pointer address that it updates for the userland. The userland
|
||||
/// (this type), is expected to avoid overwriting the head of the circular ring by keeping track of
|
||||
/// how many buffers were added to the ring and how many have been returned through the CQE
|
||||
/// mechanism. This particular implementation does not track the count because all buffers are
|
||||
/// allocated at the beginning, by the builder, and only its own buffers that came back via a CQE
|
||||
/// are ever added back to the ring, so it should be impossible to overflow the ring.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BufRing {
|
||||
// RawBufRing uses cell for fields where necessary.
|
||||
raw: Rc<RawBufRing>,
|
||||
}
|
||||
|
||||
// Methods the BufX needs.
|
||||
|
||||
impl BufRing {
|
||||
pub(crate) fn buf_capacity(&self, _: Bid) -> usize {
|
||||
self.raw.buf_capacity_i()
|
||||
}
|
||||
|
||||
pub(crate) fn stable_ptr(&self, bid: Bid) -> *const u8 {
|
||||
// Will panic if bid is out of range.
|
||||
self.raw.stable_ptr_i(bid)
|
||||
}
|
||||
|
||||
pub(crate) fn stable_mut_ptr(&mut self, bid: Bid) -> *mut u8 {
|
||||
// Safety: self is &mut, we're good.
|
||||
unsafe { self.raw.stable_mut_ptr_i(bid) }
|
||||
}
|
||||
|
||||
// # Safety
|
||||
//
|
||||
// `dropping_bid` should only be called by the buffer's drop function because once called, the
|
||||
// buffer may be given back to the kernel for reuse.
|
||||
pub(crate) unsafe fn dropping_bid(&self, bid: Bid) {
|
||||
self.raw.dropping_bid_i(bid);
|
||||
}
|
||||
}
|
||||
|
||||
// Methods the io operations need.
|
||||
|
||||
impl BufRing {
|
||||
pub(crate) fn bgid(&self) -> Bgid {
|
||||
self.raw.bgid()
|
||||
}
|
||||
|
||||
// # Safety
|
||||
//
|
||||
// The res and flags values are used to lookup a buffer and set its initialized length.
|
||||
// The caller is responsible for these being correct. This is expected to be called
|
||||
// when these two values are received from the kernel via a CQE and we rely on the kernel to
|
||||
// give us correct information.
|
||||
pub(crate) unsafe fn get_buf(&self, res: u32, flags: u32) -> io::Result<Option<BufX>> {
|
||||
let bid = match io_uring::cqueue::buffer_select(flags) {
|
||||
Some(bid) => bid,
|
||||
None => {
|
||||
// Have seen res == 0, flags == 4 with a TCP socket. res == 0 we take to mean the
|
||||
// socket is empty so return None to show there is no buffer returned, which should
|
||||
// be interpreted to mean there is no more data to read from this file or socket.
|
||||
if res == 0 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"BufRing::get_buf failed as the buffer bit, IORING_CQE_F_BUFFER, was missing from flags, res = {}, flags = {}",
|
||||
res, flags)
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let len = res as usize;
|
||||
|
||||
/*
|
||||
let flags = flags & !io_uring::sys::IORING_CQE_F_BUFFER; // for tracing flags
|
||||
println!(
|
||||
"{}:{}: get_buf res({res})=len({len}) flags({:#x})->bid({bid})\n\n",
|
||||
file!(),
|
||||
line!(),
|
||||
flags
|
||||
);
|
||||
*/
|
||||
|
||||
assert!(len <= self.raw.buf_len);
|
||||
|
||||
// TODO maybe later
|
||||
// #[cfg(any(debug, feature = "cautious"))]
|
||||
// {
|
||||
// let mut debug_bitmap = self.debug_bitmap.borrow_mut();
|
||||
// let m = 1 << (bid % 8);
|
||||
// assert!(debug_bitmap[(bid / 8) as usize] & m == m);
|
||||
// debug_bitmap[(bid / 8) as usize] &= !m;
|
||||
// }
|
||||
|
||||
self.raw.metric_getting_another();
|
||||
/*
|
||||
println!(
|
||||
"{}:{}: get_buf cur {}, min {}",
|
||||
file!(),
|
||||
line!(),
|
||||
self.possible_cur.get(),
|
||||
self.possible_min.get(),
|
||||
);
|
||||
*/
|
||||
|
||||
// Safety: the len provided to BufX::new is given to us from the kernel.
|
||||
Ok(Some(unsafe { BufX::new(self.clone(), bid, len) }))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
/// Build the arguments to call build() that returns a [`BufRing`].
|
||||
///
|
||||
/// Refer to the methods descriptions for details.
|
||||
#[allow(dead_code)]
|
||||
pub struct Builder {
|
||||
page_size: usize,
|
||||
bgid: Bgid,
|
||||
ring_entries: u16,
|
||||
buf_cnt: u16,
|
||||
buf_len: usize,
|
||||
buf_align: usize,
|
||||
ring_pad: usize,
|
||||
bufend_align: usize,
|
||||
|
||||
skip_register: bool,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl Builder {
|
||||
/// Create a new Builder with the given buffer group ID and defaults.
|
||||
///
|
||||
/// The buffer group ID, `bgid`, is the id the kernel's io_uring device uses to identify the
|
||||
/// provided buffer pool to use by operations that are posted to the device.
|
||||
///
|
||||
/// The user is responsible for picking a bgid that does not conflict with other buffer groups
|
||||
/// that have been registered with the same uring interface.
|
||||
pub fn new(bgid: Bgid) -> Builder {
|
||||
Builder {
|
||||
page_size: 4096,
|
||||
bgid,
|
||||
ring_entries: 128,
|
||||
buf_cnt: 0,
|
||||
buf_len: 4096,
|
||||
buf_align: 0,
|
||||
ring_pad: 0,
|
||||
bufend_align: 0,
|
||||
skip_register: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// The page size of the kernel. Defaults to 4096.
|
||||
///
|
||||
/// The io_uring device requires the BufRing is allocated on the start of a page, i.e. with a
|
||||
/// page size alignment.
|
||||
///
|
||||
/// The caller should determine the page size, and may want to cache the info if multiple buf
|
||||
/// rings are to be created. Crates are available to get this information or the user may want
|
||||
/// to call the libc sysconf directly:
|
||||
///
|
||||
/// use libc::{_SC_PAGESIZE, sysconf};
|
||||
/// let page_size: usize = unsafe { sysconf(_SC_PAGESIZE) as usize };
|
||||
pub fn page_size(mut self, page_size: usize) -> Builder {
|
||||
self.page_size = page_size;
|
||||
self
|
||||
}
|
||||
|
||||
/// The number of ring entries to create for the buffer ring.
|
||||
///
|
||||
/// This defaults to 128 or the `buf_cnt`, whichever is larger.
|
||||
///
|
||||
/// The number will be made a power of 2, and will be the maximum of the ring_entries setting
|
||||
/// and the buf_cnt setting. The interface will enforce a maximum of 2^15 (32768) so it can do
|
||||
/// rollover calculation.
|
||||
///
|
||||
/// Each ring entry is 16 bytes.
|
||||
pub fn ring_entries(mut self, ring_entries: u16) -> Builder {
|
||||
self.ring_entries = ring_entries;
|
||||
self
|
||||
}
|
||||
|
||||
/// The number of buffers to allocate. If left zero, the ring_entries value will be used and
|
||||
/// that value defaults to 128.
|
||||
pub fn buf_cnt(mut self, buf_cnt: u16) -> Builder {
|
||||
self.buf_cnt = buf_cnt;
|
||||
self
|
||||
}
|
||||
|
||||
/// The length of each allocated buffer. Defaults to 4096.
|
||||
///
|
||||
/// Non-alignment values are possible and `buf_align` can be used to allocate each buffer on
|
||||
/// an alignment buffer, even if the buffer length is not desired to equal the alignment.
|
||||
pub fn buf_len(mut self, buf_len: usize) -> Builder {
|
||||
self.buf_len = buf_len;
|
||||
self
|
||||
}
|
||||
|
||||
/// The alignment of the first buffer allocated.
|
||||
///
|
||||
/// Generally not needed.
|
||||
///
|
||||
/// The buffers are allocated right after the ring unless `ring_pad` is used and generally the
|
||||
/// buffers are allocated contiguous to one another unless the `buf_len` is set to something
|
||||
/// different.
|
||||
pub fn buf_align(mut self, buf_align: usize) -> Builder {
|
||||
self.buf_align = buf_align;
|
||||
self
|
||||
}
|
||||
|
||||
/// Pad to place after ring to ensure separation between rings and first buffer.
|
||||
///
|
||||
/// Generally not needed but may be useful if the ring's end and the buffers' start are to have
|
||||
/// some separation, perhaps for cacheline reasons.
|
||||
pub fn ring_pad(mut self, ring_pad: usize) -> Builder {
|
||||
self.ring_pad = ring_pad;
|
||||
self
|
||||
}
|
||||
|
||||
/// The alignment of the end of the buffer allocated. To keep other things out of a cache line
|
||||
/// or out of a page, if that's desired.
|
||||
pub fn bufend_align(mut self, bufend_align: usize) -> Builder {
|
||||
self.bufend_align = bufend_align;
|
||||
self
|
||||
}
|
||||
|
||||
/// Skip automatic registration. The caller can manually invoke the buf_ring.register()
|
||||
/// function later. Regardless, the unregister() method will be called automatically when the
|
||||
/// BufRing goes out of scope if the caller hadn't manually called buf_ring.unregister()
|
||||
/// already.
|
||||
pub fn skip_auto_register(mut self, skip: bool) -> Builder {
|
||||
self.skip_register = skip;
|
||||
self
|
||||
}
|
||||
|
||||
/// Return a BufRing, having computed the layout for the single aligned allocation
|
||||
/// of both the buffer ring elements and the buffers themselves.
|
||||
///
|
||||
/// If auto_register was left enabled, register the BufRing with the driver.
|
||||
pub fn build(&self) -> io::Result<BufRing> {
|
||||
let mut b: Builder = *self;
|
||||
|
||||
// Two cases where both buf_cnt and ring_entries are set to the max of the two.
|
||||
if b.buf_cnt == 0 || b.ring_entries < b.buf_cnt {
|
||||
let max = std::cmp::max(b.ring_entries, b.buf_cnt);
|
||||
b.buf_cnt = max;
|
||||
b.ring_entries = max;
|
||||
}
|
||||
|
||||
// Don't allow the next_power_of_two calculation to be done if already larger than 2^15
|
||||
// because 2^16 reads back as 0 in a u16. And the interface doesn't allow for ring_entries
|
||||
// larger than 2^15 anyway, so this is a good place to catch it. Here we return a unique
|
||||
// error that is more descriptive than the InvalidArg that would come from the interface.
|
||||
if b.ring_entries > (1 << 15) {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"ring_entries exceeded 32768",
|
||||
));
|
||||
}
|
||||
|
||||
// Requirement of the interface is the ring entries is a power of two, making its and our
|
||||
// mask calculation trivial.
|
||||
b.ring_entries = b.ring_entries.next_power_of_two();
|
||||
|
||||
Ok(BufRing {
|
||||
raw: Rc::new(RawBufRing::new(NewArgs {
|
||||
page_size: b.page_size,
|
||||
bgid: b.bgid,
|
||||
ring_entries: b.ring_entries,
|
||||
buf_cnt: b.buf_cnt,
|
||||
buf_len: b.buf_len,
|
||||
buf_align: b.buf_align,
|
||||
ring_pad: b.ring_pad,
|
||||
bufend_align: b.bufend_align,
|
||||
auto_register: !b.skip_register,
|
||||
})?),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Trivial helper struct for this module.
|
||||
struct NewArgs {
|
||||
page_size: usize,
|
||||
bgid: Bgid,
|
||||
ring_entries: u16,
|
||||
buf_cnt: u16,
|
||||
buf_len: usize,
|
||||
buf_align: usize,
|
||||
ring_pad: usize,
|
||||
bufend_align: usize,
|
||||
auto_register: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct RawBufRing {
|
||||
bgid: Bgid,
|
||||
|
||||
// Keep mask rather than ring size because mask is used often, ring size not.
|
||||
//ring_entries: u16, // Invariants: > 0, power of 2, max 2^15 (32768).
|
||||
ring_entries_mask: u16, // Invariant one less than ring_entries which is > 0, power of 2, max 2^15 (32768).
|
||||
|
||||
buf_cnt: u16, // Invariants: > 0, <= ring_entries.
|
||||
buf_len: usize, // Invariant: > 0.
|
||||
layout: std::alloc::Layout,
|
||||
ring_addr: *const types::BufRingEntry, // Invariant: constant.
|
||||
buffers_addr: *mut u8, // Invariant: constant.
|
||||
local_tail: Cell<u16>,
|
||||
tail_addr: *const AtomicU16,
|
||||
registered: Cell<bool>,
|
||||
|
||||
// The first `possible` field is a best effort at tracking the current buffer pool usage and
|
||||
// from that, tracking the lowest level that has been reached. The two are an attempt at
|
||||
// letting the user check the sizing needs of their buf_ring pool.
|
||||
//
|
||||
// We don't really know how deep the uring device has gone into the pool because we never see
|
||||
// its head value and it can be taking buffers from the ring, in-flight, while we add buffers
|
||||
// back to the ring. All we know is when a CQE arrives and a buffer lookup is performed, a
|
||||
// buffer has already been taken from the pool, and when the buffer is dropped, we add it back
|
||||
// to the the ring and it is about to be considered part of the pool again.
|
||||
possible_cur: Cell<u16>,
|
||||
possible_min: Cell<u16>,
|
||||
//
|
||||
// TODO maybe later
|
||||
// #[cfg(any(debug, feature = "cautious"))]
|
||||
// debug_bitmap: RefCell<std::vec::Vec<u8>>,
|
||||
}
|
||||
|
||||
impl RawBufRing {
|
||||
fn new(new_args: NewArgs) -> io::Result<RawBufRing> {
|
||||
#[allow(non_upper_case_globals)]
|
||||
const trace: bool = false;
|
||||
|
||||
let NewArgs {
|
||||
page_size,
|
||||
bgid,
|
||||
ring_entries,
|
||||
buf_cnt,
|
||||
buf_len,
|
||||
buf_align,
|
||||
ring_pad,
|
||||
bufend_align,
|
||||
auto_register,
|
||||
} = new_args;
|
||||
|
||||
// Check that none of the important args are zero and the ring_entries is at least large
|
||||
// enough to hold all the buffers and that ring_entries is a power of 2.
|
||||
|
||||
if (buf_cnt == 0)
|
||||
|| (buf_cnt > ring_entries)
|
||||
|| (buf_len == 0)
|
||||
|| ((ring_entries & (ring_entries - 1)) != 0)
|
||||
{
|
||||
return Err(io::Error::from(io::ErrorKind::InvalidInput));
|
||||
}
|
||||
|
||||
// entry_size is 16 bytes.
|
||||
let entry_size = std::mem::size_of::<types::BufRingEntry>();
|
||||
let mut ring_size = entry_size * (ring_entries as usize);
|
||||
if trace {
|
||||
println!(
|
||||
"{}:{}: entry_size {} * ring_entries {} = ring_size {} {:#x}",
|
||||
file!(),
|
||||
line!(),
|
||||
entry_size,
|
||||
ring_entries,
|
||||
ring_size,
|
||||
ring_size,
|
||||
);
|
||||
}
|
||||
|
||||
ring_size += ring_pad;
|
||||
|
||||
if trace {
|
||||
println!(
|
||||
"{}:{}: after +ring_pad {} ring_size {} {:#x}",
|
||||
file!(),
|
||||
line!(),
|
||||
ring_pad,
|
||||
ring_size,
|
||||
ring_size,
|
||||
);
|
||||
}
|
||||
|
||||
if buf_align > 0 {
|
||||
let buf_align = buf_align.next_power_of_two();
|
||||
ring_size = (ring_size + (buf_align - 1)) & !(buf_align - 1);
|
||||
if trace {
|
||||
println!(
|
||||
"{}:{}: after buf_align ring_size {} {:#x}",
|
||||
file!(),
|
||||
line!(),
|
||||
ring_size,
|
||||
ring_size,
|
||||
);
|
||||
}
|
||||
}
|
||||
let buf_size = buf_len * (buf_cnt as usize);
|
||||
assert!(ring_size != 0);
|
||||
assert!(buf_size != 0);
|
||||
let mut tot_size: usize = ring_size + buf_size;
|
||||
if trace {
|
||||
println!(
|
||||
"{}:{}: ring_size {} {:#x} + buf_size {} {:#x} = tot_size {} {:#x}",
|
||||
file!(),
|
||||
line!(),
|
||||
ring_size,
|
||||
ring_size,
|
||||
buf_size,
|
||||
buf_size,
|
||||
tot_size,
|
||||
tot_size
|
||||
);
|
||||
}
|
||||
if bufend_align > 0 {
|
||||
// for example, if bufend_align is 4096, would make total size a multiple of pages
|
||||
let bufend_align = bufend_align.next_power_of_two();
|
||||
tot_size = (tot_size + (bufend_align - 1)) & !(bufend_align - 1);
|
||||
if trace {
|
||||
println!(
|
||||
"{}:{}: after bufend_align tot_size {} {:#x}",
|
||||
file!(),
|
||||
line!(),
|
||||
tot_size,
|
||||
tot_size,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let align: usize = page_size; // alignment must be at least the page size
|
||||
let align = align.next_power_of_two();
|
||||
let layout = std::alloc::Layout::from_size_align(tot_size, align).unwrap();
|
||||
|
||||
assert!(layout.size() >= ring_size);
|
||||
// Safety: we are assured layout has nonzero size, we passed the assert just above.
|
||||
let ring_addr: *mut u8 = unsafe { std::alloc::alloc_zeroed(layout) };
|
||||
|
||||
// Buffers starts after the ring_size.
|
||||
// Safety: are we assured the address and the offset are in bounds because the ring_addr is
|
||||
// the value we got from the alloc call, and the layout.size was shown to be at least as
|
||||
// large as the ring_size.
|
||||
let buffers_addr: *mut u8 = unsafe { ring_addr.add(ring_size) };
|
||||
if trace {
|
||||
println!(
|
||||
"{}:{}: ring_addr {} {:#x}, layout: size {} align {}",
|
||||
file!(),
|
||||
line!(),
|
||||
ring_addr as u64,
|
||||
ring_addr as u64,
|
||||
layout.size(),
|
||||
layout.align()
|
||||
);
|
||||
println!(
|
||||
"{}:{}: buffers_addr {} {:#x}",
|
||||
file!(),
|
||||
line!(),
|
||||
buffers_addr as u64,
|
||||
buffers_addr as u64,
|
||||
);
|
||||
}
|
||||
|
||||
let ring_addr: *const types::BufRingEntry = ring_addr as _;
|
||||
|
||||
// Safety: the ring_addr passed into tail is the start of the ring. It is both the start of
|
||||
// the ring and the first entry in the ring.
|
||||
let tail_addr = unsafe { types::BufRingEntry::tail(ring_addr) } as *const AtomicU16;
|
||||
|
||||
let ring_entries_mask = ring_entries - 1;
|
||||
assert!((ring_entries & ring_entries_mask) == 0);
|
||||
|
||||
let buf_ring = RawBufRing {
|
||||
bgid,
|
||||
ring_entries_mask,
|
||||
buf_cnt,
|
||||
buf_len,
|
||||
layout,
|
||||
ring_addr,
|
||||
buffers_addr,
|
||||
local_tail: Cell::new(0),
|
||||
tail_addr,
|
||||
registered: Cell::new(false),
|
||||
possible_cur: Cell::new(0),
|
||||
possible_min: Cell::new(buf_cnt),
|
||||
//
|
||||
// TODO maybe later
|
||||
// #[cfg(any(debug, feature = "cautious"))]
|
||||
// debug_bitmap: RefCell::new(std::vec![0; ((buf_cnt+7)/8) as usize]),
|
||||
};
|
||||
|
||||
// Question had come up: where should the initial buffers be added to the ring?
|
||||
// Here when the ring is created, even before it is registered potentially?
|
||||
// Or after registration?
|
||||
//
|
||||
// For this type, BufRing, we are adding the buffers to the ring as the last part of creating the BufRing,
|
||||
// even before registration is optionally performed.
|
||||
//
|
||||
// We've seen the registration to be successful, even when the ring starts off empty.
|
||||
|
||||
// Add the buffers here where the ring is created.
|
||||
|
||||
for bid in 0..buf_cnt {
|
||||
buf_ring.buf_ring_add(bid);
|
||||
}
|
||||
buf_ring.buf_ring_sync();
|
||||
|
||||
// The default is to register the buffer ring right here. There is usually no reason the
|
||||
// caller should want to register it some time later.
|
||||
//
|
||||
// Perhaps the caller wants to allocate the buffer ring before the CONTEXT driver is in
|
||||
// place - that would be a reason to delay the register call until later.
|
||||
|
||||
if auto_register {
|
||||
buf_ring.register()?;
|
||||
}
|
||||
Ok(buf_ring)
|
||||
}
|
||||
|
||||
/// Register the buffer ring with the kernel.
|
||||
/// Normally this is done automatically when building a BufRing.
|
||||
///
|
||||
/// This method must be called in the context of a `tokio-uring` runtime.
|
||||
/// The registration persists for the lifetime of the runtime, unless
|
||||
/// revoked by the [`unregister`] method. Dropping the
|
||||
/// instance this method has been called on does revoke
|
||||
/// the registration and deallocate the buffer space.
|
||||
///
|
||||
/// [`unregister`]: Self::unregister
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If a `Provided Buffers` group with the same `bgid` is already registered, the function
|
||||
/// returns an error.
|
||||
fn register(&self) -> io::Result<()> {
|
||||
let bgid = self.bgid;
|
||||
//println!("{}:{}: register bgid {bgid}", file!(), line!());
|
||||
|
||||
// Future: move to separate public function so other buf_ring implementations
|
||||
// can register, and unregister, the same way.
|
||||
|
||||
let res = CurrentRing::with(|ring| unsafe {
|
||||
ring.submitter()
|
||||
.register_buf_ring(self.ring_addr as _, self.ring_entries(), bgid)
|
||||
});
|
||||
// println!("{}:{}: res {:?}", file!(), line!(), res);
|
||||
|
||||
if let Err(e) = res {
|
||||
match e.raw_os_error() {
|
||||
Some(22) => {
|
||||
// using buf_ring requires kernel 5.19 or greater.
|
||||
// TODO turn these eprintln into new, more expressive error being returned.
|
||||
// TODO what convention should we follow in this crate for adding information
|
||||
// onto an error?
|
||||
eprintln!(
|
||||
"buf_ring.register returned {e}, most likely indicating this kernel is not 5.19+",
|
||||
);
|
||||
}
|
||||
Some(17) => {
|
||||
// Registering a duplicate bgid is not allowed. There is an `unregister`
|
||||
// operations that can remove the first.
|
||||
eprintln!(
|
||||
"buf_ring.register returned `{e}`, indicating the attempted buffer group id {bgid} was already registered",
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
eprintln!("buf_ring.register returned `{e}` for group id {bgid}");
|
||||
}
|
||||
}
|
||||
return Err(e);
|
||||
};
|
||||
|
||||
self.registered.set(true);
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
/// Unregister the buffer ring from the io_uring.
|
||||
/// Normally this is done automatically when the BufRing goes out of scope.
|
||||
///
|
||||
/// Warning: requires the CONTEXT driver is already in place or will panic.
|
||||
fn unregister(&self) -> io::Result<()> {
|
||||
// If not registered, make this a no-op.
|
||||
if !self.registered.get() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.registered.set(false);
|
||||
|
||||
let bgid = self.bgid;
|
||||
|
||||
CurrentRing::with(|ring| ring.submitter().unregister_buf_ring(bgid))
|
||||
}
|
||||
|
||||
/// Returns the buffer group id.
|
||||
#[inline]
|
||||
fn bgid(&self) -> Bgid {
|
||||
self.bgid
|
||||
}
|
||||
|
||||
fn metric_getting_another(&self) {
|
||||
self.possible_cur.set(self.possible_cur.get() - 1);
|
||||
self.possible_min.set(std::cmp::min(
|
||||
self.possible_min.get(),
|
||||
self.possible_cur.get(),
|
||||
));
|
||||
}
|
||||
|
||||
// # Safety
|
||||
//
|
||||
// Dropping a duplicate bid is likely to cause undefined behavior
|
||||
// as the kernel uses the same buffer for different data concurrently.
|
||||
unsafe fn dropping_bid_i(&self, bid: Bid) {
|
||||
self.buf_ring_add(bid);
|
||||
self.buf_ring_sync();
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn buf_capacity_i(&self) -> usize {
|
||||
self.buf_len as _
|
||||
}
|
||||
|
||||
#[inline]
|
||||
// # Panic
|
||||
//
|
||||
// This function will panic if given a bid that is not within the valid range 0..self.buf_cnt.
|
||||
fn stable_ptr_i(&self, bid: Bid) -> *const u8 {
|
||||
assert!(bid < self.buf_cnt);
|
||||
let offset: usize = self.buf_len * (bid as usize);
|
||||
// Safety: buffers_addr is an u8 pointer and was part of an allocation large enough to hold
|
||||
// buf_cnt number of buf_len buffers. buffers_addr, buf_cnt and buf_len are treated as
|
||||
// constants and bid was just asserted to be less than buf_cnt.
|
||||
unsafe { self.buffers_addr.add(offset) }
|
||||
}
|
||||
|
||||
// # Safety
|
||||
//
|
||||
// This may only be called by an owned or &mut object.
|
||||
//
|
||||
// # Panic
|
||||
// This will panic if bid is out of range.
|
||||
#[inline]
|
||||
unsafe fn stable_mut_ptr_i(&self, bid: Bid) -> *mut u8 {
|
||||
assert!(bid < self.buf_cnt);
|
||||
let offset: usize = self.buf_len * (bid as usize);
|
||||
// Safety: buffers_addr is an u8 pointer and was part of an allocation large enough to hold
|
||||
// buf_cnt number of buf_len buffers. buffers_addr, buf_cnt and buf_len are treated as
|
||||
// constants and bid was just asserted to be less than buf_cnt.
|
||||
self.buffers_addr.add(offset)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn ring_entries(&self) -> u16 {
|
||||
self.ring_entries_mask + 1
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn mask(&self) -> u16 {
|
||||
self.ring_entries_mask
|
||||
}
|
||||
|
||||
// Writes to a ring entry and updates our local copy of the tail.
|
||||
//
|
||||
// Adds the buffer known by its buffer id to the buffer ring. The buffer's address and length
|
||||
// are known given its bid.
|
||||
//
|
||||
// This does not sync the new tail value. The caller should use `buf_ring_sync` for that.
|
||||
//
|
||||
// Panics if the bid is out of range.
|
||||
fn buf_ring_add(&self, bid: Bid) {
|
||||
// Compute address of current tail position, increment the local copy of the tail. Then
|
||||
// write the buffer's address, length and bid into the current tail entry.
|
||||
|
||||
let cur_tail = self.local_tail.get();
|
||||
self.local_tail.set(cur_tail.wrapping_add(1));
|
||||
let ring_idx = cur_tail & self.mask();
|
||||
|
||||
let ring_addr = self.ring_addr as *mut types::BufRingEntry;
|
||||
|
||||
// Safety:
|
||||
// 1. the pointer address (ring_addr), is set and const at self creation time,
|
||||
// and points to a block of memory at least as large as the number of ring_entries,
|
||||
// 2. the mask used to create ring_idx is one less than
|
||||
// the number of ring_entries, and ring_entries was tested to be a power of two,
|
||||
// So the address gotten by adding ring_idx entries to ring_addr is guaranteed to
|
||||
// be a valid address of a ring entry.
|
||||
let entry = unsafe { &mut *ring_addr.add(ring_idx as usize) };
|
||||
|
||||
entry.set_addr(self.stable_ptr_i(bid) as _);
|
||||
entry.set_len(self.buf_len as _);
|
||||
entry.set_bid(bid);
|
||||
|
||||
// Update accounting.
|
||||
self.possible_cur.set(self.possible_cur.get() + 1);
|
||||
|
||||
// TODO maybe later
|
||||
// #[cfg(any(debug, feature = "cautious"))]
|
||||
// {
|
||||
// let mut debug_bitmap = self.debug_bitmap.borrow_mut();
|
||||
// let m = 1 << (bid % 8);
|
||||
// assert!(debug_bitmap[(bid / 8) as usize] & m == 0);
|
||||
// debug_bitmap[(bid / 8) as usize] |= m;
|
||||
// }
|
||||
}
|
||||
|
||||
// Make 'count' new buffers visible to the kernel. Called after
|
||||
// io_uring_buf_ring_add() has been called 'count' times to fill in new
|
||||
// buffers.
|
||||
#[inline]
|
||||
fn buf_ring_sync(&self) {
|
||||
// Safety: dereferencing this raw pointer is safe. The tail_addr was computed once at init
|
||||
// to refer to the tail address in the ring and is held const for self's lifetime.
|
||||
unsafe {
|
||||
(*self.tail_addr).store(self.local_tail.get(), atomic::Ordering::Release);
|
||||
}
|
||||
// The liburing code did io_uring_smp_store_release(&br.tail, local_tail);
|
||||
}
|
||||
|
||||
// Return the possible_min buffer pool size.
|
||||
#[allow(dead_code)]
|
||||
fn possible_min(&self) -> u16 {
|
||||
self.possible_min.get()
|
||||
}
|
||||
|
||||
// Return the possible_min buffer pool size and reset to allow fresh counting going forward.
|
||||
#[allow(dead_code)]
|
||||
fn possible_min_and_reset(&self) -> u16 {
|
||||
let res = self.possible_min.get();
|
||||
self.possible_min.set(self.buf_cnt);
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for RawBufRing {
|
||||
fn drop(&mut self) {
|
||||
if self.registered.get() {
|
||||
_ = self.unregister();
|
||||
}
|
||||
// Safety: the ptr and layout are treated as constant, and ptr (ring_addr) was assigned by
|
||||
// a call to std::alloc::alloc_zeroed using the same layout.
|
||||
unsafe { std::alloc::dealloc(self.ring_addr as *mut u8, self.layout) };
|
||||
}
|
||||
}
|
618
apps/aquatic/crates/udp/src/workers/socket/uring/mod.rs
Normal file
618
apps/aquatic/crates/udp/src/workers/socket/uring/mod.rs
Normal file
@ -0,0 +1,618 @@
|
||||
mod buf_ring;
|
||||
mod recv_helper;
|
||||
mod send_buffers;
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::collections::VecDeque;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::UdpSocket;
|
||||
use std::ops::DerefMut;
|
||||
use std::os::fd::AsRawFd;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::access_list::AccessListCache;
|
||||
use crossbeam_channel::Sender;
|
||||
use io_uring::opcode::Timeout;
|
||||
use io_uring::types::{Fixed, Timespec};
|
||||
use io_uring::{IoUring, Probe};
|
||||
use recv_helper::RecvHelper;
|
||||
use socket2::{Domain, Protocol, Socket, Type};
|
||||
|
||||
use aquatic_common::{
|
||||
access_list::create_access_list_cache, privileges::PrivilegeDropper, CanonicalSocketAddr,
|
||||
ValidUntil,
|
||||
};
|
||||
use aquatic_udp_protocol::*;
|
||||
use rand::rngs::SmallRng;
|
||||
use rand::SeedableRng;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
use self::buf_ring::BufRing;
|
||||
use self::recv_helper::{RecvHelperV4, RecvHelperV6};
|
||||
use self::send_buffers::{ResponseType, SendBuffers};
|
||||
|
||||
use super::validator::ConnectionValidator;
|
||||
use super::{EXTRA_PACKET_SIZE_IPV4, EXTRA_PACKET_SIZE_IPV6};
|
||||
|
||||
/// Size of each request buffer
|
||||
///
|
||||
/// Needs to fit recvmsg metadata in addition to the payload.
|
||||
///
|
||||
/// The payload of a scrape request with 20 info hashes fits in 256 bytes.
|
||||
const REQUEST_BUF_LEN: usize = 512;
|
||||
|
||||
/// Size of each response buffer
|
||||
///
|
||||
/// Enough for:
|
||||
/// - IPv6 announce response with 112 peers
|
||||
/// - scrape response for 170 info hashes
|
||||
const RESPONSE_BUF_LEN: usize = 2048;
|
||||
|
||||
const USER_DATA_RECV_V4: u64 = u64::MAX;
|
||||
const USER_DATA_RECV_V6: u64 = u64::MAX - 1;
|
||||
const USER_DATA_PULSE_TIMEOUT: u64 = u64::MAX - 2;
|
||||
|
||||
const SOCKET_IDENTIFIER_V4: Fixed = Fixed(0);
|
||||
const SOCKET_IDENTIFIER_V6: Fixed = Fixed(1);
|
||||
|
||||
thread_local! {
|
||||
/// Store IoUring instance here so that it can be accessed in BufRing::drop
|
||||
pub static CURRENT_RING: CurrentRing = CurrentRing(RefCell::new(None));
|
||||
}
|
||||
|
||||
pub struct CurrentRing(RefCell<Option<IoUring>>);
|
||||
|
||||
impl CurrentRing {
|
||||
fn with<F, T>(mut f: F) -> T
|
||||
where
|
||||
F: FnMut(&mut IoUring) -> T,
|
||||
{
|
||||
CURRENT_RING.with(|r| {
|
||||
let mut opt_ring = r.0.borrow_mut();
|
||||
|
||||
f(Option::as_mut(opt_ring.deref_mut()).expect("IoUring not set"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SocketWorker {
|
||||
config: Config,
|
||||
shared_state: State,
|
||||
statistics: CachePaddedArc<IpVersionStatistics<SocketWorkerStatistics>>,
|
||||
statistics_sender: Sender<StatisticsMessage>,
|
||||
access_list_cache: AccessListCache,
|
||||
validator: ConnectionValidator,
|
||||
#[allow(dead_code)]
|
||||
opt_socket_ipv4: Option<UdpSocket>,
|
||||
#[allow(dead_code)]
|
||||
opt_socket_ipv6: Option<UdpSocket>,
|
||||
buf_ring: BufRing,
|
||||
send_buffers: SendBuffers,
|
||||
recv_helper_v4: RecvHelperV4,
|
||||
recv_helper_v6: RecvHelperV6,
|
||||
local_responses: VecDeque<(CanonicalSocketAddr, Response)>,
|
||||
resubmittable_sqe_buf: Vec<io_uring::squeue::Entry>,
|
||||
recv_sqe_ipv4: io_uring::squeue::Entry,
|
||||
recv_sqe_ipv6: io_uring::squeue::Entry,
|
||||
pulse_timeout_sqe: io_uring::squeue::Entry,
|
||||
peer_valid_until: ValidUntil,
|
||||
rng: SmallRng,
|
||||
}
|
||||
|
||||
impl SocketWorker {
|
||||
pub fn run(
|
||||
config: Config,
|
||||
shared_state: State,
|
||||
statistics: CachePaddedArc<IpVersionStatistics<SocketWorkerStatistics>>,
|
||||
statistics_sender: Sender<StatisticsMessage>,
|
||||
validator: ConnectionValidator,
|
||||
mut priv_droppers: Vec<PrivilegeDropper>,
|
||||
) -> anyhow::Result<()> {
|
||||
let ring_entries = config.network.ring_size.next_power_of_two();
|
||||
// Try to fill up the ring with send requests
|
||||
let send_buffer_entries = ring_entries;
|
||||
|
||||
let opt_socket_ipv4 = if config.network.use_ipv4 {
|
||||
let priv_dropper = priv_droppers.pop().expect("not enough priv droppers");
|
||||
|
||||
Some(
|
||||
create_socket(&config, priv_dropper, config.network.address_ipv4.into())
|
||||
.context("create ipv4 socket")?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let opt_socket_ipv6 = if config.network.use_ipv6 {
|
||||
let priv_dropper = priv_droppers.pop().expect("not enough priv droppers");
|
||||
|
||||
Some(
|
||||
create_socket(&config, priv_dropper, config.network.address_ipv6.into())
|
||||
.context("create ipv6 socket")?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let access_list_cache = create_access_list_cache(&shared_state.access_list);
|
||||
|
||||
let send_buffers = SendBuffers::new(send_buffer_entries as usize);
|
||||
let recv_helper_v4 = RecvHelperV4::new(&config);
|
||||
let recv_helper_v6 = RecvHelperV6::new(&config);
|
||||
|
||||
let ring = IoUring::builder()
|
||||
.setup_coop_taskrun()
|
||||
.setup_single_issuer()
|
||||
.setup_submit_all()
|
||||
.build(ring_entries.into())
|
||||
.unwrap();
|
||||
|
||||
ring.submitter()
|
||||
.register_files(&[
|
||||
opt_socket_ipv4
|
||||
.as_ref()
|
||||
.map(|s| s.as_raw_fd())
|
||||
.unwrap_or(-1),
|
||||
opt_socket_ipv6
|
||||
.as_ref()
|
||||
.map(|s| s.as_raw_fd())
|
||||
.unwrap_or(-1),
|
||||
])
|
||||
.unwrap();
|
||||
|
||||
// Store ring in thread local storage before creating BufRing
|
||||
CURRENT_RING.with(|r| *r.0.borrow_mut() = Some(ring));
|
||||
|
||||
let buf_ring = buf_ring::Builder::new(0)
|
||||
.ring_entries(ring_entries)
|
||||
.buf_len(REQUEST_BUF_LEN)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
// This timeout enables regular updates of ConnectionValidator and
|
||||
// peer_valid_until
|
||||
let pulse_timeout_sqe = {
|
||||
let timespec_ptr = Box::into_raw(Box::new(Timespec::new().sec(5))) as *const _;
|
||||
|
||||
Timeout::new(timespec_ptr)
|
||||
.build()
|
||||
.user_data(USER_DATA_PULSE_TIMEOUT)
|
||||
};
|
||||
|
||||
let mut resubmittable_sqe_buf = vec![pulse_timeout_sqe.clone()];
|
||||
|
||||
let recv_sqe_ipv4 = recv_helper_v4.create_entry(buf_ring.bgid());
|
||||
let recv_sqe_ipv6 = recv_helper_v6.create_entry(buf_ring.bgid());
|
||||
|
||||
if opt_socket_ipv4.is_some() {
|
||||
resubmittable_sqe_buf.push(recv_sqe_ipv4.clone());
|
||||
}
|
||||
if opt_socket_ipv6.is_some() {
|
||||
resubmittable_sqe_buf.push(recv_sqe_ipv6.clone());
|
||||
}
|
||||
|
||||
let peer_valid_until = ValidUntil::new(
|
||||
shared_state.server_start_instant,
|
||||
config.cleaning.max_peer_age,
|
||||
);
|
||||
|
||||
let mut worker = Self {
|
||||
config,
|
||||
shared_state,
|
||||
statistics,
|
||||
statistics_sender,
|
||||
validator,
|
||||
access_list_cache,
|
||||
opt_socket_ipv4,
|
||||
opt_socket_ipv6,
|
||||
send_buffers,
|
||||
recv_helper_v4,
|
||||
recv_helper_v6,
|
||||
local_responses: Default::default(),
|
||||
buf_ring,
|
||||
recv_sqe_ipv4,
|
||||
recv_sqe_ipv6,
|
||||
pulse_timeout_sqe,
|
||||
resubmittable_sqe_buf,
|
||||
peer_valid_until,
|
||||
rng: SmallRng::from_entropy(),
|
||||
};
|
||||
|
||||
CurrentRing::with(|ring| worker.run_inner(ring));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_inner(&mut self, ring: &mut IoUring) {
|
||||
loop {
|
||||
for sqe in self.resubmittable_sqe_buf.drain(..) {
|
||||
unsafe { ring.submission().push(&sqe).unwrap() };
|
||||
}
|
||||
|
||||
let sq_space = {
|
||||
let sq = ring.submission();
|
||||
|
||||
sq.capacity() - sq.len()
|
||||
};
|
||||
|
||||
let mut num_send_added = 0;
|
||||
|
||||
// Enqueue local responses
|
||||
for _ in 0..sq_space {
|
||||
if let Some((addr, response)) = self.local_responses.pop_front() {
|
||||
let send_to_ipv4_socket = if addr.is_ipv4() {
|
||||
if self.opt_socket_ipv4.is_some() {
|
||||
true
|
||||
} else if self.opt_socket_ipv6.is_some() {
|
||||
false
|
||||
} else {
|
||||
panic!("No socket open")
|
||||
}
|
||||
} else if self.opt_socket_ipv6.is_some() {
|
||||
false
|
||||
} else {
|
||||
panic!("IPv6 response with no IPv6 socket")
|
||||
};
|
||||
|
||||
match self
|
||||
.send_buffers
|
||||
.prepare_entry(send_to_ipv4_socket, response, addr)
|
||||
{
|
||||
Ok(entry) => {
|
||||
unsafe { ring.submission().push(&entry).unwrap() };
|
||||
|
||||
num_send_added += 1;
|
||||
}
|
||||
Err(send_buffers::Error::NoBuffers(response)) => {
|
||||
self.local_responses.push_front((addr, response));
|
||||
|
||||
break;
|
||||
}
|
||||
Err(send_buffers::Error::SerializationFailed(err)) => {
|
||||
::log::error!("Failed serializing response: {:#}", err);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for all sendmsg entries to complete. If none were added,
|
||||
// wait for at least one recvmsg or timeout in order to avoid
|
||||
// busy-polling if there is no incoming data.
|
||||
ring.submitter()
|
||||
.submit_and_wait(num_send_added.max(1))
|
||||
.unwrap();
|
||||
|
||||
for cqe in ring.completion() {
|
||||
self.handle_cqe(cqe);
|
||||
}
|
||||
|
||||
self.send_buffers.reset_likely_next_free_index();
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_cqe(&mut self, cqe: io_uring::cqueue::Entry) {
|
||||
match cqe.user_data() {
|
||||
USER_DATA_RECV_V4 => {
|
||||
if let Some((addr, response)) = self.handle_recv_cqe(&cqe, true) {
|
||||
self.local_responses.push_back((addr, response));
|
||||
}
|
||||
|
||||
if !io_uring::cqueue::more(cqe.flags()) {
|
||||
self.resubmittable_sqe_buf.push(self.recv_sqe_ipv4.clone());
|
||||
}
|
||||
}
|
||||
USER_DATA_RECV_V6 => {
|
||||
if let Some((addr, response)) = self.handle_recv_cqe(&cqe, false) {
|
||||
self.local_responses.push_back((addr, response));
|
||||
}
|
||||
|
||||
if !io_uring::cqueue::more(cqe.flags()) {
|
||||
self.resubmittable_sqe_buf.push(self.recv_sqe_ipv6.clone());
|
||||
}
|
||||
}
|
||||
USER_DATA_PULSE_TIMEOUT => {
|
||||
self.validator.update_elapsed();
|
||||
|
||||
self.peer_valid_until = ValidUntil::new(
|
||||
self.shared_state.server_start_instant,
|
||||
self.config.cleaning.max_peer_age,
|
||||
);
|
||||
|
||||
self.resubmittable_sqe_buf
|
||||
.push(self.pulse_timeout_sqe.clone());
|
||||
}
|
||||
send_buffer_index => {
|
||||
let result = cqe.result();
|
||||
|
||||
if result < 0 {
|
||||
::log::error!(
|
||||
"Couldn't send response: {:#}",
|
||||
::std::io::Error::from_raw_os_error(-result)
|
||||
);
|
||||
} else if self.config.statistics.active() {
|
||||
let send_buffer_index = send_buffer_index as usize;
|
||||
|
||||
let (response_type, receiver_is_ipv4) =
|
||||
self.send_buffers.response_type_and_ipv4(send_buffer_index);
|
||||
|
||||
let (statistics, extra_bytes) = if receiver_is_ipv4 {
|
||||
(&self.statistics.ipv4, EXTRA_PACKET_SIZE_IPV4)
|
||||
} else {
|
||||
(&self.statistics.ipv6, EXTRA_PACKET_SIZE_IPV6)
|
||||
};
|
||||
|
||||
statistics
|
||||
.bytes_sent
|
||||
.fetch_add(result as usize + extra_bytes, Ordering::Relaxed);
|
||||
|
||||
let response_counter = match response_type {
|
||||
ResponseType::Connect => &statistics.responses_connect,
|
||||
ResponseType::Announce => &statistics.responses_announce,
|
||||
ResponseType::Scrape => &statistics.responses_scrape,
|
||||
ResponseType::Error => &statistics.responses_error,
|
||||
};
|
||||
|
||||
response_counter.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// Safety: OK because cqe using buffer has been returned and
|
||||
// contents will no longer be accessed by kernel
|
||||
unsafe {
|
||||
self.send_buffers
|
||||
.mark_buffer_as_free(send_buffer_index as usize);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_recv_cqe(
|
||||
&mut self,
|
||||
cqe: &io_uring::cqueue::Entry,
|
||||
received_on_ipv4_socket: bool,
|
||||
) -> Option<(CanonicalSocketAddr, Response)> {
|
||||
let result = cqe.result();
|
||||
|
||||
if result < 0 {
|
||||
if -result == libc::ENOBUFS {
|
||||
::log::info!("recv failed due to lack of buffers, try increasing ring size");
|
||||
} else {
|
||||
::log::warn!(
|
||||
"recv failed: {:#}",
|
||||
::std::io::Error::from_raw_os_error(-result)
|
||||
);
|
||||
}
|
||||
|
||||
return None;
|
||||
}
|
||||
|
||||
let buffer = unsafe {
|
||||
match self.buf_ring.get_buf(result as u32, cqe.flags()) {
|
||||
Ok(Some(buffer)) => buffer,
|
||||
Ok(None) => {
|
||||
::log::error!("Couldn't get recv buffer");
|
||||
|
||||
return None;
|
||||
}
|
||||
Err(err) => {
|
||||
::log::error!("Couldn't get recv buffer: {:#}", err);
|
||||
|
||||
return None;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let recv_helper = if received_on_ipv4_socket {
|
||||
&self.recv_helper_v4 as &dyn RecvHelper
|
||||
} else {
|
||||
&self.recv_helper_v6 as &dyn RecvHelper
|
||||
};
|
||||
|
||||
match recv_helper.parse(buffer.as_slice()) {
|
||||
Ok((request, addr)) => {
|
||||
if self.config.statistics.active() {
|
||||
let (statistics, extra_bytes) = if addr.is_ipv4() {
|
||||
(&self.statistics.ipv4, EXTRA_PACKET_SIZE_IPV4)
|
||||
} else {
|
||||
(&self.statistics.ipv6, EXTRA_PACKET_SIZE_IPV6)
|
||||
};
|
||||
|
||||
statistics
|
||||
.bytes_received
|
||||
.fetch_add(buffer.len() + extra_bytes, Ordering::Relaxed);
|
||||
statistics.requests.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
return self.handle_request(request, addr);
|
||||
}
|
||||
Err(self::recv_helper::Error::RequestParseError(err, addr)) => {
|
||||
if self.config.statistics.active() {
|
||||
if addr.is_ipv4() {
|
||||
self.statistics
|
||||
.ipv4
|
||||
.bytes_received
|
||||
.fetch_add(buffer.len() + EXTRA_PACKET_SIZE_IPV4, Ordering::Relaxed);
|
||||
} else {
|
||||
self.statistics
|
||||
.ipv6
|
||||
.bytes_received
|
||||
.fetch_add(buffer.len() + EXTRA_PACKET_SIZE_IPV6, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
match err {
|
||||
RequestParseError::Sendable {
|
||||
connection_id,
|
||||
transaction_id,
|
||||
err,
|
||||
} => {
|
||||
::log::debug!("Couldn't parse request from {:?}: {}", addr, err);
|
||||
|
||||
if self.validator.connection_id_valid(addr, connection_id) {
|
||||
let response = ErrorResponse {
|
||||
transaction_id,
|
||||
message: err.into(),
|
||||
};
|
||||
|
||||
return Some((addr, Response::Error(response)));
|
||||
}
|
||||
}
|
||||
RequestParseError::Unsendable { err } => {
|
||||
::log::debug!("Couldn't parse request from {:?}: {}", addr, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(self::recv_helper::Error::InvalidSocketAddress) => {
|
||||
::log::debug!("Ignored request claiming to be from port 0");
|
||||
}
|
||||
Err(self::recv_helper::Error::RecvMsgParseError) => {
|
||||
::log::error!("RecvMsgOut::parse failed");
|
||||
}
|
||||
Err(self::recv_helper::Error::RecvMsgTruncated) => {
|
||||
::log::warn!("RecvMsgOut::parse failed: sockaddr or payload truncated");
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn handle_request(
|
||||
&mut self,
|
||||
request: Request,
|
||||
src: CanonicalSocketAddr,
|
||||
) -> Option<(CanonicalSocketAddr, Response)> {
|
||||
let access_list_mode = self.config.access_list.mode;
|
||||
|
||||
match request {
|
||||
Request::Connect(request) => {
|
||||
let response = Response::Connect(ConnectResponse {
|
||||
connection_id: self.validator.create_connection_id(src),
|
||||
transaction_id: request.transaction_id,
|
||||
});
|
||||
|
||||
return Some((src, response));
|
||||
}
|
||||
Request::Announce(request) => {
|
||||
if self
|
||||
.validator
|
||||
.connection_id_valid(src, request.connection_id)
|
||||
{
|
||||
if self
|
||||
.access_list_cache
|
||||
.load()
|
||||
.allows(access_list_mode, &request.info_hash.0)
|
||||
{
|
||||
let response = self.shared_state.torrent_maps.announce(
|
||||
&self.config,
|
||||
&self.statistics_sender,
|
||||
&mut self.rng,
|
||||
&request,
|
||||
src,
|
||||
self.peer_valid_until,
|
||||
);
|
||||
|
||||
return Some((src, response));
|
||||
} else {
|
||||
let response = Response::Error(ErrorResponse {
|
||||
transaction_id: request.transaction_id,
|
||||
message: "Info hash not allowed".into(),
|
||||
});
|
||||
|
||||
return Some((src, response));
|
||||
}
|
||||
}
|
||||
}
|
||||
Request::Scrape(request) => {
|
||||
if self
|
||||
.validator
|
||||
.connection_id_valid(src, request.connection_id)
|
||||
{
|
||||
let response =
|
||||
Response::Scrape(self.shared_state.torrent_maps.scrape(request, src));
|
||||
|
||||
return Some((src, response));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn create_socket(
|
||||
config: &Config,
|
||||
priv_dropper: PrivilegeDropper,
|
||||
address: SocketAddr,
|
||||
) -> anyhow::Result<::std::net::UdpSocket> {
|
||||
let socket = if address.is_ipv4() {
|
||||
Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP))?
|
||||
} else {
|
||||
let socket = Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP))?;
|
||||
|
||||
if config.network.set_only_ipv6 {
|
||||
socket
|
||||
.set_only_v6(true)
|
||||
.with_context(|| "socket: set only ipv6")?;
|
||||
}
|
||||
|
||||
socket
|
||||
};
|
||||
|
||||
socket
|
||||
.set_reuse_port(true)
|
||||
.with_context(|| "socket: set reuse port")?;
|
||||
|
||||
socket
|
||||
.set_nonblocking(true)
|
||||
.with_context(|| "socket: set nonblocking")?;
|
||||
|
||||
let recv_buffer_size = config.network.socket_recv_buffer_size;
|
||||
|
||||
if recv_buffer_size != 0 {
|
||||
if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) {
|
||||
::log::error!(
|
||||
"socket: failed setting recv buffer to {}: {:?}",
|
||||
recv_buffer_size,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
socket
|
||||
.bind(&address.into())
|
||||
.with_context(|| format!("socket: bind to {}", address))?;
|
||||
|
||||
priv_dropper.after_socket_creation()?;
|
||||
|
||||
Ok(socket.into())
|
||||
}
|
||||
|
||||
pub fn supported_on_current_kernel() -> anyhow::Result<()> {
|
||||
let opcodes = [
|
||||
// We can't probe for RecvMsgMulti, so we probe for SendZc, which was
|
||||
// also introduced in Linux 6.0
|
||||
io_uring::opcode::SendZc::CODE,
|
||||
];
|
||||
|
||||
let ring = IoUring::new(1).with_context(|| "create ring")?;
|
||||
|
||||
let mut probe = Probe::new();
|
||||
|
||||
ring.submitter()
|
||||
.register_probe(&mut probe)
|
||||
.with_context(|| "register probe")?;
|
||||
|
||||
for opcode in opcodes {
|
||||
if !probe.is_supported(opcode) {
|
||||
return Err(anyhow::anyhow!(
|
||||
"io_uring opcode {:b} not supported",
|
||||
opcode
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
169
apps/aquatic/crates/udp/src/workers/socket/uring/recv_helper.rs
Normal file
169
apps/aquatic/crates/udp/src/workers/socket/uring/recv_helper.rs
Normal file
@ -0,0 +1,169 @@
|
||||
use std::{
|
||||
mem::MaybeUninit,
|
||||
net::{Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
|
||||
};
|
||||
|
||||
use aquatic_common::CanonicalSocketAddr;
|
||||
use aquatic_udp_protocol::{Request, RequestParseError};
|
||||
use io_uring::{opcode::RecvMsgMulti, types::RecvMsgOut};
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
use super::{SOCKET_IDENTIFIER_V4, SOCKET_IDENTIFIER_V6, USER_DATA_RECV_V4, USER_DATA_RECV_V6};
|
||||
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
pub enum Error {
|
||||
RecvMsgParseError,
|
||||
RecvMsgTruncated,
|
||||
RequestParseError(RequestParseError, CanonicalSocketAddr),
|
||||
InvalidSocketAddress,
|
||||
}
|
||||
|
||||
pub trait RecvHelper {
|
||||
fn parse(&self, buffer: &[u8]) -> Result<(Request, CanonicalSocketAddr), Error>;
|
||||
}
|
||||
|
||||
// For IPv4 sockets
|
||||
pub struct RecvHelperV4 {
|
||||
max_scrape_torrents: u8,
|
||||
#[allow(dead_code)]
|
||||
name_v4: *const libc::sockaddr_in,
|
||||
msghdr_v4: *const libc::msghdr,
|
||||
}
|
||||
|
||||
impl RecvHelperV4 {
|
||||
pub fn new(config: &Config) -> Self {
|
||||
let name_v4 = Box::into_raw(Box::new(libc::sockaddr_in {
|
||||
sin_family: 0,
|
||||
sin_port: 0,
|
||||
sin_addr: libc::in_addr { s_addr: 0 },
|
||||
sin_zero: [0; 8],
|
||||
}));
|
||||
|
||||
// XXX: on musl libc, msghdr contains private padding fields
|
||||
let msghdr_v4 = unsafe {
|
||||
let mut hdr = MaybeUninit::<libc::msghdr>::zeroed().assume_init();
|
||||
hdr.msg_name = name_v4 as *mut libc::c_void;
|
||||
hdr.msg_namelen = core::mem::size_of::<libc::sockaddr_in>() as u32;
|
||||
Box::into_raw(Box::new(hdr))
|
||||
};
|
||||
|
||||
Self {
|
||||
max_scrape_torrents: config.protocol.max_scrape_torrents,
|
||||
name_v4,
|
||||
msghdr_v4,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_entry(&self, buf_group: u16) -> io_uring::squeue::Entry {
|
||||
RecvMsgMulti::new(SOCKET_IDENTIFIER_V4, self.msghdr_v4, buf_group)
|
||||
.build()
|
||||
.user_data(USER_DATA_RECV_V4)
|
||||
}
|
||||
}
|
||||
|
||||
impl RecvHelper for RecvHelperV4 {
|
||||
fn parse(&self, buffer: &[u8]) -> Result<(Request, CanonicalSocketAddr), Error> {
|
||||
// Safe as long as kernel only reads from the pointer and doesn't
|
||||
// write to it. I think this is the case.
|
||||
let msghdr = unsafe { self.msghdr_v4.read() };
|
||||
|
||||
let msg = RecvMsgOut::parse(buffer, &msghdr).map_err(|_| Error::RecvMsgParseError)?;
|
||||
|
||||
if msg.is_name_data_truncated() | msg.is_payload_truncated() {
|
||||
return Err(Error::RecvMsgTruncated);
|
||||
}
|
||||
|
||||
let name_data = unsafe { *(msg.name_data().as_ptr() as *const libc::sockaddr_in) };
|
||||
|
||||
let addr = SocketAddr::V4(SocketAddrV4::new(
|
||||
u32::from_be(name_data.sin_addr.s_addr).into(),
|
||||
u16::from_be(name_data.sin_port),
|
||||
));
|
||||
|
||||
if addr.port() == 0 {
|
||||
return Err(Error::InvalidSocketAddress);
|
||||
}
|
||||
|
||||
let addr = CanonicalSocketAddr::new(addr);
|
||||
|
||||
let request = Request::parse_bytes(msg.payload_data(), self.max_scrape_torrents)
|
||||
.map_err(|err| Error::RequestParseError(err, addr))?;
|
||||
|
||||
Ok((request, addr))
|
||||
}
|
||||
}
|
||||
|
||||
// For IPv6 sockets (can theoretically still receive IPv4 packets, though)
|
||||
pub struct RecvHelperV6 {
|
||||
max_scrape_torrents: u8,
|
||||
#[allow(dead_code)]
|
||||
name_v6: *const libc::sockaddr_in6,
|
||||
msghdr_v6: *const libc::msghdr,
|
||||
}
|
||||
|
||||
impl RecvHelperV6 {
|
||||
pub fn new(config: &Config) -> Self {
|
||||
let name_v6 = Box::into_raw(Box::new(libc::sockaddr_in6 {
|
||||
sin6_family: 0,
|
||||
sin6_port: 0,
|
||||
sin6_flowinfo: 0,
|
||||
sin6_addr: libc::in6_addr { s6_addr: [0; 16] },
|
||||
sin6_scope_id: 0,
|
||||
}));
|
||||
|
||||
// XXX: on musl libc, msghdr contains private padding fields
|
||||
let msghdr_v6 = unsafe {
|
||||
let mut hdr = MaybeUninit::<libc::msghdr>::zeroed().assume_init();
|
||||
hdr.msg_name = name_v6 as *mut libc::c_void;
|
||||
hdr.msg_namelen = core::mem::size_of::<libc::sockaddr_in6>() as u32;
|
||||
Box::into_raw(Box::new(hdr))
|
||||
};
|
||||
|
||||
Self {
|
||||
max_scrape_torrents: config.protocol.max_scrape_torrents,
|
||||
name_v6,
|
||||
msghdr_v6,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_entry(&self, buf_group: u16) -> io_uring::squeue::Entry {
|
||||
RecvMsgMulti::new(SOCKET_IDENTIFIER_V6, self.msghdr_v6, buf_group)
|
||||
.build()
|
||||
.user_data(USER_DATA_RECV_V6)
|
||||
}
|
||||
}
|
||||
|
||||
impl RecvHelper for RecvHelperV6 {
|
||||
fn parse(&self, buffer: &[u8]) -> Result<(Request, CanonicalSocketAddr), Error> {
|
||||
// Safe as long as kernel only reads from the pointer and doesn't
|
||||
// write to it. I think this is the case.
|
||||
let msghdr = unsafe { self.msghdr_v6.read() };
|
||||
|
||||
let msg = RecvMsgOut::parse(buffer, &msghdr).map_err(|_| Error::RecvMsgParseError)?;
|
||||
|
||||
if msg.is_name_data_truncated() | msg.is_payload_truncated() {
|
||||
return Err(Error::RecvMsgTruncated);
|
||||
}
|
||||
|
||||
let name_data = unsafe { *(msg.name_data().as_ptr() as *const libc::sockaddr_in6) };
|
||||
|
||||
let addr = SocketAddr::V6(SocketAddrV6::new(
|
||||
Ipv6Addr::from(name_data.sin6_addr.s6_addr),
|
||||
u16::from_be(name_data.sin6_port),
|
||||
u32::from_be(name_data.sin6_flowinfo),
|
||||
u32::from_be(name_data.sin6_scope_id),
|
||||
));
|
||||
|
||||
if addr.port() == 0 {
|
||||
return Err(Error::InvalidSocketAddress);
|
||||
}
|
||||
|
||||
let addr = CanonicalSocketAddr::new(addr);
|
||||
|
||||
let request = Request::parse_bytes(msg.payload_data(), self.max_scrape_torrents)
|
||||
.map_err(|err| Error::RequestParseError(err, addr))?;
|
||||
|
||||
Ok((request, addr))
|
||||
}
|
||||
}
|
242
apps/aquatic/crates/udp/src/workers/socket/uring/send_buffers.rs
Normal file
242
apps/aquatic/crates/udp/src/workers/socket/uring/send_buffers.rs
Normal file
@ -0,0 +1,242 @@
|
||||
use std::{
|
||||
io::Cursor,
|
||||
iter::repeat_with,
|
||||
mem::MaybeUninit,
|
||||
net::SocketAddr,
|
||||
ptr::{addr_of_mut, null_mut},
|
||||
};
|
||||
|
||||
use aquatic_common::CanonicalSocketAddr;
|
||||
use aquatic_udp_protocol::Response;
|
||||
use io_uring::opcode::SendMsg;
|
||||
|
||||
use super::{RESPONSE_BUF_LEN, SOCKET_IDENTIFIER_V4, SOCKET_IDENTIFIER_V6};
|
||||
|
||||
pub enum Error {
|
||||
NoBuffers(Response),
|
||||
SerializationFailed(std::io::Error),
|
||||
}
|
||||
|
||||
pub struct SendBuffers {
|
||||
likely_next_free_index: usize,
|
||||
buffers: Vec<(SendBufferMetadata, *mut SendBuffer)>,
|
||||
}
|
||||
|
||||
impl SendBuffers {
|
||||
pub fn new(capacity: usize) -> Self {
|
||||
let buffers = repeat_with(|| (Default::default(), SendBuffer::new()))
|
||||
.take(capacity)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Self {
|
||||
likely_next_free_index: 0,
|
||||
buffers,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn response_type_and_ipv4(&self, index: usize) -> (ResponseType, bool) {
|
||||
let meta = &self.buffers.get(index).unwrap().0;
|
||||
|
||||
(meta.response_type, meta.receiver_is_ipv4)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// Only safe to call once buffer is no longer referenced by in-flight
|
||||
/// io_uring queue entries
|
||||
pub unsafe fn mark_buffer_as_free(&mut self, index: usize) {
|
||||
self.buffers[index].0.free = true;
|
||||
}
|
||||
|
||||
/// Call after going through completion queue
|
||||
pub fn reset_likely_next_free_index(&mut self) {
|
||||
self.likely_next_free_index = 0;
|
||||
}
|
||||
|
||||
pub fn prepare_entry(
|
||||
&mut self,
|
||||
send_to_ipv4_socket: bool,
|
||||
response: Response,
|
||||
addr: CanonicalSocketAddr,
|
||||
) -> Result<io_uring::squeue::Entry, Error> {
|
||||
let index = if let Some(index) = self.next_free_index() {
|
||||
index
|
||||
} else {
|
||||
return Err(Error::NoBuffers(response));
|
||||
};
|
||||
|
||||
let (buffer_metadata, buffer) = self.buffers.get_mut(index).unwrap();
|
||||
|
||||
// Safe as long as `mark_buffer_as_free` was used correctly
|
||||
let buffer = unsafe { &mut *(*buffer) };
|
||||
|
||||
match buffer.prepare_entry(response, addr, send_to_ipv4_socket, buffer_metadata) {
|
||||
Ok(entry) => {
|
||||
buffer_metadata.free = false;
|
||||
|
||||
self.likely_next_free_index = index + 1;
|
||||
|
||||
Ok(entry.user_data(index as u64))
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
fn next_free_index(&self) -> Option<usize> {
|
||||
if self.likely_next_free_index >= self.buffers.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
for (i, (meta, _)) in self.buffers[self.likely_next_free_index..]
|
||||
.iter()
|
||||
.enumerate()
|
||||
{
|
||||
if meta.free {
|
||||
return Some(self.likely_next_free_index + i);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Make sure not to hold any reference to this struct while kernel can
|
||||
/// write to its contents
|
||||
struct SendBuffer {
|
||||
name_v4: libc::sockaddr_in,
|
||||
name_v6: libc::sockaddr_in6,
|
||||
bytes: [u8; RESPONSE_BUF_LEN],
|
||||
iovec: libc::iovec,
|
||||
msghdr: libc::msghdr,
|
||||
}
|
||||
|
||||
impl SendBuffer {
|
||||
fn new() -> *mut Self {
|
||||
let mut instance = Box::new(Self {
|
||||
name_v4: libc::sockaddr_in {
|
||||
sin_family: libc::AF_INET as u16,
|
||||
sin_port: 0,
|
||||
sin_addr: libc::in_addr { s_addr: 0 },
|
||||
sin_zero: [0; 8],
|
||||
},
|
||||
name_v6: libc::sockaddr_in6 {
|
||||
sin6_family: libc::AF_INET6 as u16,
|
||||
sin6_port: 0,
|
||||
sin6_flowinfo: 0,
|
||||
sin6_addr: libc::in6_addr { s6_addr: [0; 16] },
|
||||
sin6_scope_id: 0,
|
||||
},
|
||||
bytes: [0; RESPONSE_BUF_LEN],
|
||||
iovec: libc::iovec {
|
||||
iov_base: null_mut(),
|
||||
iov_len: 0,
|
||||
},
|
||||
msghdr: unsafe { MaybeUninit::<libc::msghdr>::zeroed().assume_init() },
|
||||
});
|
||||
|
||||
instance.iovec.iov_base = addr_of_mut!(instance.bytes) as *mut libc::c_void;
|
||||
instance.iovec.iov_len = instance.bytes.len();
|
||||
|
||||
instance.msghdr.msg_iov = addr_of_mut!(instance.iovec);
|
||||
instance.msghdr.msg_iovlen = 1;
|
||||
|
||||
// Set IPv4 initially. Will be overridden with each prepare_entry call
|
||||
instance.msghdr.msg_name = addr_of_mut!(instance.name_v4) as *mut libc::c_void;
|
||||
instance.msghdr.msg_namelen = core::mem::size_of::<libc::sockaddr_in>() as u32;
|
||||
|
||||
Box::into_raw(instance)
|
||||
}
|
||||
|
||||
fn prepare_entry(
|
||||
&mut self,
|
||||
response: Response,
|
||||
addr: CanonicalSocketAddr,
|
||||
send_to_ipv4_socket: bool,
|
||||
metadata: &mut SendBufferMetadata,
|
||||
) -> Result<io_uring::squeue::Entry, Error> {
|
||||
let entry_fd = if send_to_ipv4_socket {
|
||||
metadata.receiver_is_ipv4 = true;
|
||||
|
||||
let addr = if let Some(SocketAddr::V4(addr)) = addr.get_ipv4() {
|
||||
addr
|
||||
} else {
|
||||
panic!("ipv6 address in ipv4 mode");
|
||||
};
|
||||
|
||||
self.name_v4.sin_port = addr.port().to_be();
|
||||
self.name_v4.sin_addr.s_addr = u32::from(*addr.ip()).to_be();
|
||||
self.msghdr.msg_name = addr_of_mut!(self.name_v4) as *mut libc::c_void;
|
||||
self.msghdr.msg_namelen = core::mem::size_of::<libc::sockaddr_in>() as u32;
|
||||
|
||||
SOCKET_IDENTIFIER_V4
|
||||
} else {
|
||||
// Set receiver protocol type before calling addr.get_ipv6_mapped()
|
||||
metadata.receiver_is_ipv4 = addr.is_ipv4();
|
||||
|
||||
let addr = if let SocketAddr::V6(addr) = addr.get_ipv6_mapped() {
|
||||
addr
|
||||
} else {
|
||||
panic!("ipv4 address when ipv6 or ipv6-mapped address expected");
|
||||
};
|
||||
|
||||
self.name_v6.sin6_port = addr.port().to_be();
|
||||
self.name_v6.sin6_addr.s6_addr = addr.ip().octets();
|
||||
self.msghdr.msg_name = addr_of_mut!(self.name_v6) as *mut libc::c_void;
|
||||
self.msghdr.msg_namelen = core::mem::size_of::<libc::sockaddr_in6>() as u32;
|
||||
|
||||
SOCKET_IDENTIFIER_V6
|
||||
};
|
||||
|
||||
let mut cursor = Cursor::new(&mut self.bytes[..]);
|
||||
|
||||
match response.write_bytes(&mut cursor) {
|
||||
Ok(()) => {
|
||||
self.iovec.iov_len = cursor.position() as usize;
|
||||
|
||||
metadata.response_type = ResponseType::from_response(&response);
|
||||
|
||||
Ok(SendMsg::new(entry_fd, addr_of_mut!(self.msghdr)).build())
|
||||
}
|
||||
Err(err) => Err(Error::SerializationFailed(err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct SendBufferMetadata {
|
||||
free: bool,
|
||||
/// Only used for statistics
|
||||
receiver_is_ipv4: bool,
|
||||
/// Only used for statistics
|
||||
response_type: ResponseType,
|
||||
}
|
||||
|
||||
impl Default for SendBufferMetadata {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
free: true,
|
||||
receiver_is_ipv4: true,
|
||||
response_type: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
|
||||
pub enum ResponseType {
|
||||
#[default]
|
||||
Connect,
|
||||
Announce,
|
||||
Scrape,
|
||||
Error,
|
||||
}
|
||||
|
||||
impl ResponseType {
|
||||
fn from_response(response: &Response) -> Self {
|
||||
match response {
|
||||
Response::Connect(_) => Self::Connect,
|
||||
Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => Self::Announce,
|
||||
Response::Scrape(_) => Self::Scrape,
|
||||
Response::Error(_) => Self::Error,
|
||||
}
|
||||
}
|
||||
}
|
165
apps/aquatic/crates/udp/src/workers/socket/validator.rs
Normal file
165
apps/aquatic/crates/udp/src/workers/socket/validator.rs
Normal file
@ -0,0 +1,165 @@
|
||||
use std::net::IpAddr;
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::Context;
|
||||
use constant_time_eq::constant_time_eq;
|
||||
use getrandom::getrandom;
|
||||
|
||||
use aquatic_common::CanonicalSocketAddr;
|
||||
use aquatic_udp_protocol::ConnectionId;
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
/// HMAC (BLAKE3) based ConnectionId creator and validator
|
||||
///
|
||||
/// Method update_elapsed must be called at least once a minute.
|
||||
///
|
||||
/// The purpose of using ConnectionIds is to make IP spoofing costly, mainly to
|
||||
/// prevent the tracker from being used as an amplification vector for DDoS
|
||||
/// attacks. By including 32 bits of BLAKE3 keyed hash output in the Ids, an
|
||||
/// attacker would have to make on average 2^31 attemps to correctly guess a
|
||||
/// single hash. Furthermore, such a hash would only be valid for at most
|
||||
/// `max_connection_age` seconds, a short duration to get value for the
|
||||
/// bandwidth spent brute forcing it.
|
||||
///
|
||||
/// Structure of created ConnectionID (bytes making up inner i64):
|
||||
/// - &[0..4]: ConnectionId creation time as number of seconds after
|
||||
/// ConnectionValidator instance was created, encoded as u32 bytes. A u32
|
||||
/// fits around 136 years in seconds.
|
||||
/// - &[4..8]: truncated keyed BLAKE3 hash of:
|
||||
/// - previous 4 bytes
|
||||
/// - octets of client IP address
|
||||
#[derive(Clone)]
|
||||
pub struct ConnectionValidator {
|
||||
start_time: Instant,
|
||||
max_connection_age: u64,
|
||||
keyed_hasher: blake3::Hasher,
|
||||
seconds_since_start: u32,
|
||||
}
|
||||
|
||||
impl ConnectionValidator {
|
||||
/// Create new instance. Must be created once and cloned if used in several
|
||||
/// threads.
|
||||
pub fn new(config: &Config) -> anyhow::Result<Self> {
|
||||
let mut key = [0; 32];
|
||||
|
||||
getrandom(&mut key)
|
||||
.with_context(|| "Couldn't get random bytes for ConnectionValidator key")?;
|
||||
|
||||
let keyed_hasher = blake3::Hasher::new_keyed(&key);
|
||||
|
||||
Ok(Self {
|
||||
keyed_hasher,
|
||||
start_time: Instant::now(),
|
||||
max_connection_age: config.cleaning.max_connection_age.into(),
|
||||
seconds_since_start: 0,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn create_connection_id(&mut self, source_addr: CanonicalSocketAddr) -> ConnectionId {
|
||||
let elapsed = (self.seconds_since_start).to_ne_bytes();
|
||||
|
||||
let hash = self.hash(elapsed, source_addr.get().ip());
|
||||
|
||||
let mut connection_id_bytes = [0u8; 8];
|
||||
|
||||
connection_id_bytes[..4].copy_from_slice(&elapsed);
|
||||
connection_id_bytes[4..].copy_from_slice(&hash);
|
||||
|
||||
ConnectionId::new(i64::from_ne_bytes(connection_id_bytes))
|
||||
}
|
||||
|
||||
pub fn connection_id_valid(
|
||||
&mut self,
|
||||
source_addr: CanonicalSocketAddr,
|
||||
connection_id: ConnectionId,
|
||||
) -> bool {
|
||||
let bytes = connection_id.0.get().to_ne_bytes();
|
||||
let (elapsed, hash) = bytes.split_at(4);
|
||||
let elapsed: [u8; 4] = elapsed.try_into().unwrap();
|
||||
|
||||
if !constant_time_eq(hash, &self.hash(elapsed, source_addr.get().ip())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let seconds_since_start = self.seconds_since_start as u64;
|
||||
let client_elapsed = u64::from(u32::from_ne_bytes(elapsed));
|
||||
let client_expiration_time = client_elapsed + self.max_connection_age;
|
||||
|
||||
// In addition to checking if the client connection is expired,
|
||||
// disallow client_elapsed values that are too far in future and thus
|
||||
// could not have been sent by the tracker. This prevents brute forcing
|
||||
// with `u32::MAX` as 'elapsed' part of ConnectionId to find a hash that
|
||||
// works until the tracker is restarted.
|
||||
let client_not_expired = client_expiration_time > seconds_since_start;
|
||||
let client_elapsed_not_in_far_future = client_elapsed <= (seconds_since_start + 60);
|
||||
|
||||
client_not_expired & client_elapsed_not_in_far_future
|
||||
}
|
||||
|
||||
pub fn update_elapsed(&mut self) {
|
||||
self.seconds_since_start = self.start_time.elapsed().as_secs() as u32;
|
||||
}
|
||||
|
||||
fn hash(&mut self, elapsed: [u8; 4], ip_addr: IpAddr) -> [u8; 4] {
|
||||
self.keyed_hasher.update(&elapsed);
|
||||
|
||||
match ip_addr {
|
||||
IpAddr::V4(ip) => self.keyed_hasher.update(&ip.octets()),
|
||||
IpAddr::V6(ip) => self.keyed_hasher.update(&ip.octets()),
|
||||
};
|
||||
|
||||
let mut hash = [0u8; 4];
|
||||
|
||||
self.keyed_hasher.finalize_xof().fill(&mut hash);
|
||||
self.keyed_hasher.reset();
|
||||
|
||||
hash
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use quickcheck_macros::quickcheck;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[quickcheck]
|
||||
fn test_connection_validator(
|
||||
original_addr: IpAddr,
|
||||
different_addr: IpAddr,
|
||||
max_connection_age: u32,
|
||||
) -> quickcheck::TestResult {
|
||||
let original_addr = CanonicalSocketAddr::new(SocketAddr::new(original_addr, 0));
|
||||
let different_addr = CanonicalSocketAddr::new(SocketAddr::new(different_addr, 0));
|
||||
|
||||
if original_addr == different_addr {
|
||||
return quickcheck::TestResult::discard();
|
||||
}
|
||||
|
||||
let mut validator = {
|
||||
let mut config = Config::default();
|
||||
|
||||
config.cleaning.max_connection_age = max_connection_age;
|
||||
|
||||
ConnectionValidator::new(&config).unwrap()
|
||||
};
|
||||
|
||||
let connection_id = validator.create_connection_id(original_addr);
|
||||
|
||||
let original_valid = validator.connection_id_valid(original_addr, connection_id);
|
||||
let different_valid = validator.connection_id_valid(different_addr, connection_id);
|
||||
|
||||
if different_valid {
|
||||
return quickcheck::TestResult::failed();
|
||||
}
|
||||
|
||||
if max_connection_age == 0 {
|
||||
quickcheck::TestResult::from_bool(!original_valid)
|
||||
} else {
|
||||
quickcheck::TestResult::from_bool(original_valid)
|
||||
}
|
||||
}
|
||||
}
|
331
apps/aquatic/crates/udp/src/workers/statistics/collector.rs
Normal file
331
apps/aquatic/crates/udp/src/workers/statistics/collector.rs
Normal file
@ -0,0 +1,331 @@
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::Instant;
|
||||
|
||||
use hdrhistogram::Histogram;
|
||||
use num_format::{Locale, ToFormattedString};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
use super::{IpVersion, Statistics};
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
macro_rules! set_peer_histogram_gauge {
|
||||
($ip_version:expr, $data:expr, $type_label:expr) => {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
"type" => $type_label,
|
||||
"ip_version" => $ip_version,
|
||||
)
|
||||
.set($data as f64);
|
||||
};
|
||||
}
|
||||
|
||||
pub struct StatisticsCollector {
|
||||
statistics: Statistics,
|
||||
ip_version: IpVersion,
|
||||
last_update: Instant,
|
||||
last_complete_histogram: PeerHistogramStatistics,
|
||||
}
|
||||
|
||||
impl StatisticsCollector {
|
||||
pub fn new(statistics: Statistics, ip_version: IpVersion) -> Self {
|
||||
Self {
|
||||
statistics,
|
||||
last_update: Instant::now(),
|
||||
last_complete_histogram: Default::default(),
|
||||
ip_version,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_histogram(&mut self, histogram: Histogram<u64>) {
|
||||
self.last_complete_histogram = PeerHistogramStatistics::new(histogram);
|
||||
}
|
||||
|
||||
pub fn collect_from_shared(
|
||||
&mut self,
|
||||
#[cfg(feature = "prometheus")] config: &Config,
|
||||
) -> CollectedStatistics {
|
||||
let mut requests = 0;
|
||||
let mut responses_connect: usize = 0;
|
||||
let mut responses_announce: usize = 0;
|
||||
let mut responses_scrape: usize = 0;
|
||||
let mut responses_error: usize = 0;
|
||||
let mut bytes_received: usize = 0;
|
||||
let mut bytes_sent: usize = 0;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
let ip_version_prometheus_str = self.ip_version.prometheus_str();
|
||||
|
||||
for (i, statistics) in self
|
||||
.statistics
|
||||
.socket
|
||||
.iter()
|
||||
.map(|s| s.by_ip_version(self.ip_version))
|
||||
.enumerate()
|
||||
{
|
||||
{
|
||||
let n = statistics.requests.fetch_and(0, Ordering::Relaxed);
|
||||
|
||||
requests += n;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::counter!(
|
||||
"aquatic_requests_total",
|
||||
"ip_version" => ip_version_prometheus_str,
|
||||
"worker_index" => i.to_string(),
|
||||
)
|
||||
.increment(n.try_into().unwrap());
|
||||
}
|
||||
}
|
||||
{
|
||||
let n = statistics.responses_connect.fetch_and(0, Ordering::Relaxed);
|
||||
|
||||
responses_connect += n;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
"type" => "connect",
|
||||
"ip_version" => ip_version_prometheus_str,
|
||||
"worker_index" => i.to_string(),
|
||||
)
|
||||
.increment(n.try_into().unwrap());
|
||||
}
|
||||
}
|
||||
{
|
||||
let n = statistics
|
||||
.responses_announce
|
||||
.fetch_and(0, Ordering::Relaxed);
|
||||
|
||||
responses_announce += n;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
"type" => "announce",
|
||||
"ip_version" => ip_version_prometheus_str,
|
||||
"worker_index" => i.to_string(),
|
||||
)
|
||||
.increment(n.try_into().unwrap());
|
||||
}
|
||||
}
|
||||
{
|
||||
let n = statistics.responses_scrape.fetch_and(0, Ordering::Relaxed);
|
||||
|
||||
responses_scrape += n;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
"type" => "scrape",
|
||||
"ip_version" => ip_version_prometheus_str,
|
||||
"worker_index" => i.to_string(),
|
||||
)
|
||||
.increment(n.try_into().unwrap());
|
||||
}
|
||||
}
|
||||
{
|
||||
let n = statistics.responses_error.fetch_and(0, Ordering::Relaxed);
|
||||
|
||||
responses_error += n;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
"type" => "error",
|
||||
"ip_version" => ip_version_prometheus_str,
|
||||
"worker_index" => i.to_string(),
|
||||
)
|
||||
.increment(n.try_into().unwrap());
|
||||
}
|
||||
}
|
||||
{
|
||||
let n = statistics.bytes_received.fetch_and(0, Ordering::Relaxed);
|
||||
|
||||
bytes_received += n;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::counter!(
|
||||
"aquatic_rx_bytes",
|
||||
"ip_version" => ip_version_prometheus_str,
|
||||
"worker_index" => i.to_string(),
|
||||
)
|
||||
.increment(n.try_into().unwrap());
|
||||
}
|
||||
}
|
||||
{
|
||||
let n = statistics.bytes_sent.fetch_and(0, Ordering::Relaxed);
|
||||
|
||||
bytes_sent += n;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::counter!(
|
||||
"aquatic_tx_bytes",
|
||||
"ip_version" => ip_version_prometheus_str,
|
||||
"worker_index" => i.to_string(),
|
||||
)
|
||||
.increment(n.try_into().unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let swarm_statistics = &self.statistics.swarm.by_ip_version(self.ip_version);
|
||||
|
||||
let num_torrents = {
|
||||
let num_torrents = swarm_statistics.torrents.load(Ordering::Relaxed);
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::gauge!(
|
||||
"aquatic_torrents",
|
||||
"ip_version" => ip_version_prometheus_str,
|
||||
)
|
||||
.set(num_torrents as f64);
|
||||
}
|
||||
|
||||
num_torrents
|
||||
};
|
||||
|
||||
let num_peers = {
|
||||
let num_peers = swarm_statistics.peers.load(Ordering::Relaxed);
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers",
|
||||
"ip_version" => ip_version_prometheus_str,
|
||||
)
|
||||
.set(num_peers as f64);
|
||||
}
|
||||
|
||||
num_peers
|
||||
};
|
||||
|
||||
let elapsed = {
|
||||
let now = Instant::now();
|
||||
|
||||
let elapsed = (now - self.last_update).as_secs_f64();
|
||||
|
||||
self.last_update = now;
|
||||
|
||||
elapsed
|
||||
};
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint && config.statistics.torrent_peer_histograms {
|
||||
self.last_complete_histogram
|
||||
.update_metrics(ip_version_prometheus_str);
|
||||
}
|
||||
|
||||
let requests_per_second = requests as f64 / elapsed;
|
||||
let responses_per_second_connect = responses_connect as f64 / elapsed;
|
||||
let responses_per_second_announce = responses_announce as f64 / elapsed;
|
||||
let responses_per_second_scrape = responses_scrape as f64 / elapsed;
|
||||
let responses_per_second_error = responses_error as f64 / elapsed;
|
||||
let bytes_received_per_second = bytes_received as f64 / elapsed;
|
||||
let bytes_sent_per_second = bytes_sent as f64 / elapsed;
|
||||
|
||||
let responses_per_second_total = responses_per_second_connect
|
||||
+ responses_per_second_announce
|
||||
+ responses_per_second_scrape
|
||||
+ responses_per_second_error;
|
||||
|
||||
CollectedStatistics {
|
||||
requests_per_second: (requests_per_second as usize).to_formatted_string(&Locale::en),
|
||||
responses_per_second_total: (responses_per_second_total as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
responses_per_second_connect: (responses_per_second_connect as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
responses_per_second_announce: (responses_per_second_announce as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
responses_per_second_scrape: (responses_per_second_scrape as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
responses_per_second_error: (responses_per_second_error as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
rx_mbits: format!("{:.2}", bytes_received_per_second * 8.0 / 1_000_000.0),
|
||||
tx_mbits: format!("{:.2}", bytes_sent_per_second * 8.0 / 1_000_000.0),
|
||||
num_torrents: num_torrents.to_formatted_string(&Locale::en),
|
||||
num_peers: num_peers.to_formatted_string(&Locale::en),
|
||||
peer_histogram: self.last_complete_histogram.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct CollectedStatistics {
|
||||
pub requests_per_second: String,
|
||||
pub responses_per_second_total: String,
|
||||
pub responses_per_second_connect: String,
|
||||
pub responses_per_second_announce: String,
|
||||
pub responses_per_second_scrape: String,
|
||||
pub responses_per_second_error: String,
|
||||
pub rx_mbits: String,
|
||||
pub tx_mbits: String,
|
||||
pub num_torrents: String,
|
||||
pub num_peers: String,
|
||||
pub peer_histogram: PeerHistogramStatistics,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Default)]
|
||||
pub struct PeerHistogramStatistics {
|
||||
pub min: u64,
|
||||
pub p10: u64,
|
||||
pub p20: u64,
|
||||
pub p30: u64,
|
||||
pub p40: u64,
|
||||
pub p50: u64,
|
||||
pub p60: u64,
|
||||
pub p70: u64,
|
||||
pub p80: u64,
|
||||
pub p90: u64,
|
||||
pub p95: u64,
|
||||
pub p99: u64,
|
||||
pub p999: u64,
|
||||
pub max: u64,
|
||||
}
|
||||
|
||||
impl PeerHistogramStatistics {
|
||||
fn new(h: Histogram<u64>) -> Self {
|
||||
Self {
|
||||
min: h.min(),
|
||||
p10: h.value_at_percentile(10.0),
|
||||
p20: h.value_at_percentile(20.0),
|
||||
p30: h.value_at_percentile(30.0),
|
||||
p40: h.value_at_percentile(40.0),
|
||||
p50: h.value_at_percentile(50.0),
|
||||
p60: h.value_at_percentile(60.0),
|
||||
p70: h.value_at_percentile(70.0),
|
||||
p80: h.value_at_percentile(80.0),
|
||||
p90: h.value_at_percentile(90.0),
|
||||
p95: h.value_at_percentile(95.0),
|
||||
p99: h.value_at_percentile(99.0),
|
||||
p999: h.value_at_percentile(99.9),
|
||||
max: h.max(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
fn update_metrics(&self, ip_version: &'static str) {
|
||||
set_peer_histogram_gauge!(ip_version, self.min, "min");
|
||||
set_peer_histogram_gauge!(ip_version, self.p10, "p10");
|
||||
set_peer_histogram_gauge!(ip_version, self.p20, "p20");
|
||||
set_peer_histogram_gauge!(ip_version, self.p30, "p30");
|
||||
set_peer_histogram_gauge!(ip_version, self.p40, "p40");
|
||||
set_peer_histogram_gauge!(ip_version, self.p50, "p50");
|
||||
set_peer_histogram_gauge!(ip_version, self.p60, "p60");
|
||||
set_peer_histogram_gauge!(ip_version, self.p70, "p70");
|
||||
set_peer_histogram_gauge!(ip_version, self.p80, "p80");
|
||||
set_peer_histogram_gauge!(ip_version, self.p90, "p90");
|
||||
set_peer_histogram_gauge!(ip_version, self.p99, "p99");
|
||||
set_peer_histogram_gauge!(ip_version, self.p999, "p999");
|
||||
set_peer_histogram_gauge!(ip_version, self.max, "max");
|
||||
}
|
||||
}
|
298
apps/aquatic/crates/udp/src/workers/statistics/mod.rs
Normal file
298
apps/aquatic/crates/udp/src/workers/statistics/mod.rs
Normal file
@ -0,0 +1,298 @@
|
||||
mod collector;
|
||||
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::IndexMap;
|
||||
use aquatic_udp_protocol::{PeerClient, PeerId};
|
||||
use compact_str::CompactString;
|
||||
use crossbeam_channel::Receiver;
|
||||
use num_format::{Locale, ToFormattedString};
|
||||
use serde::Serialize;
|
||||
use time::format_description::well_known::Rfc2822;
|
||||
use time::OffsetDateTime;
|
||||
use tinytemplate::TinyTemplate;
|
||||
|
||||
use collector::{CollectedStatistics, StatisticsCollector};
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
const TEMPLATE_KEY: &str = "statistics";
|
||||
const TEMPLATE_CONTENTS: &str = include_str!("../../../templates/statistics.html");
|
||||
const STYLESHEET_CONTENTS: &str = concat!(
|
||||
"<style>",
|
||||
include_str!("../../../templates/statistics.css"),
|
||||
"</style>"
|
||||
);
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
struct TemplateData {
|
||||
stylesheet: String,
|
||||
ipv4_active: bool,
|
||||
ipv6_active: bool,
|
||||
extended_active: bool,
|
||||
ipv4: CollectedStatistics,
|
||||
ipv6: CollectedStatistics,
|
||||
last_updated: String,
|
||||
peer_update_interval: String,
|
||||
peer_clients: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
pub fn run_statistics_worker(
|
||||
config: Config,
|
||||
shared_state: State,
|
||||
statistics: Statistics,
|
||||
statistics_receiver: Receiver<StatisticsMessage>,
|
||||
) -> anyhow::Result<()> {
|
||||
let process_peer_client_data = {
|
||||
let mut collect = config.statistics.write_html_to_file;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
{
|
||||
collect |= config.statistics.run_prometheus_endpoint;
|
||||
}
|
||||
|
||||
collect & config.statistics.peer_clients
|
||||
};
|
||||
|
||||
let opt_tt = if config.statistics.write_html_to_file {
|
||||
let mut tt = TinyTemplate::new();
|
||||
|
||||
tt.add_template(TEMPLATE_KEY, TEMPLATE_CONTENTS)
|
||||
.context("parse statistics html template")?;
|
||||
|
||||
Some(tt)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut ipv4_collector = StatisticsCollector::new(statistics.clone(), IpVersion::V4);
|
||||
let mut ipv6_collector = StatisticsCollector::new(statistics, IpVersion::V6);
|
||||
|
||||
// Store a count to enable not removing peers from the count completely
|
||||
// just because they were removed from one torrent
|
||||
let mut peers: IndexMap<PeerId, (usize, PeerClient, CompactString)> = IndexMap::default();
|
||||
|
||||
loop {
|
||||
let start_time = Instant::now();
|
||||
|
||||
for message in statistics_receiver.try_iter() {
|
||||
match message {
|
||||
StatisticsMessage::Ipv4PeerHistogram(h) => ipv4_collector.add_histogram(h),
|
||||
StatisticsMessage::Ipv6PeerHistogram(h) => ipv6_collector.add_histogram(h),
|
||||
StatisticsMessage::PeerAdded(peer_id) => {
|
||||
if process_peer_client_data {
|
||||
peers
|
||||
.entry(peer_id)
|
||||
.or_insert_with(|| (0, peer_id.client(), peer_id.first_8_bytes_hex()))
|
||||
.0 += 1;
|
||||
}
|
||||
}
|
||||
StatisticsMessage::PeerRemoved(peer_id) => {
|
||||
if process_peer_client_data {
|
||||
if let Some((count, _, _)) = peers.get_mut(&peer_id) {
|
||||
*count -= 1;
|
||||
|
||||
if *count == 0 {
|
||||
peers.swap_remove(&peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let statistics_ipv4 = ipv4_collector.collect_from_shared(
|
||||
#[cfg(feature = "prometheus")]
|
||||
&config,
|
||||
);
|
||||
let statistics_ipv6 = ipv6_collector.collect_from_shared(
|
||||
#[cfg(feature = "prometheus")]
|
||||
&config,
|
||||
);
|
||||
|
||||
let peer_clients = if process_peer_client_data {
|
||||
let mut clients: IndexMap<PeerClient, usize> = IndexMap::default();
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
let mut prefixes: IndexMap<CompactString, usize> = IndexMap::default();
|
||||
|
||||
// Only count peer_ids once, even if they are in multiple torrents
|
||||
for (_, peer_client, prefix) in peers.values() {
|
||||
*clients.entry(peer_client.to_owned()).or_insert(0) += 1;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint
|
||||
&& config.statistics.prometheus_peer_id_prefixes
|
||||
{
|
||||
*prefixes.entry(prefix.to_owned()).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
|
||||
clients.sort_unstable_by(|_, a, _, b| b.cmp(a));
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint
|
||||
&& config.statistics.prometheus_peer_id_prefixes
|
||||
{
|
||||
for (prefix, count) in prefixes {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peer_id_prefixes",
|
||||
"prefix_hex" => prefix.to_string(),
|
||||
)
|
||||
.set(count as f64);
|
||||
}
|
||||
}
|
||||
|
||||
let mut client_vec = Vec::with_capacity(clients.len());
|
||||
|
||||
for (client, count) in clients {
|
||||
if config.statistics.write_html_to_file {
|
||||
client_vec.push((client.to_string(), count.to_formatted_string(&Locale::en)));
|
||||
}
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peer_clients",
|
||||
"client" => client.to_string(),
|
||||
)
|
||||
.set(count as f64);
|
||||
}
|
||||
}
|
||||
|
||||
client_vec
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
if config.statistics.print_to_stdout {
|
||||
println!("General:");
|
||||
println!(
|
||||
" access list entries: {}",
|
||||
shared_state.access_list.load().len()
|
||||
);
|
||||
|
||||
if config.network.ipv4_active() {
|
||||
println!("IPv4:");
|
||||
print_to_stdout(&config, &statistics_ipv4);
|
||||
}
|
||||
if config.network.ipv6_active() {
|
||||
println!("IPv6:");
|
||||
print_to_stdout(&config, &statistics_ipv6);
|
||||
}
|
||||
|
||||
println!();
|
||||
}
|
||||
|
||||
if let Some(tt) = opt_tt.as_ref() {
|
||||
let template_data = TemplateData {
|
||||
stylesheet: STYLESHEET_CONTENTS.to_string(),
|
||||
ipv4_active: config.network.ipv4_active(),
|
||||
ipv6_active: config.network.ipv6_active(),
|
||||
extended_active: config.statistics.torrent_peer_histograms,
|
||||
ipv4: statistics_ipv4,
|
||||
ipv6: statistics_ipv6,
|
||||
last_updated: OffsetDateTime::now_utc()
|
||||
.format(&Rfc2822)
|
||||
.unwrap_or("(formatting error)".into()),
|
||||
peer_update_interval: format!("{}", config.cleaning.torrent_cleaning_interval),
|
||||
peer_clients,
|
||||
};
|
||||
|
||||
if let Err(err) = save_html_to_file(&config, tt, &template_data) {
|
||||
::log::error!("Couldn't save statistics to file: {:#}", err)
|
||||
}
|
||||
}
|
||||
|
||||
peers.shrink_to_fit();
|
||||
|
||||
if let Some(time_remaining) =
|
||||
Duration::from_secs(config.statistics.interval).checked_sub(start_time.elapsed())
|
||||
{
|
||||
::std::thread::sleep(time_remaining);
|
||||
} else {
|
||||
::log::warn!(
|
||||
"statistics interval not long enough to process all data, output may be misleading"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_to_stdout(config: &Config, statistics: &CollectedStatistics) {
|
||||
println!(
|
||||
" bandwidth: {:>7} Mbit/s in, {:7} Mbit/s out",
|
||||
statistics.rx_mbits, statistics.tx_mbits,
|
||||
);
|
||||
println!(" requests/second: {:>10}", statistics.requests_per_second);
|
||||
println!(" responses/second");
|
||||
println!(
|
||||
" total: {:>10}",
|
||||
statistics.responses_per_second_total
|
||||
);
|
||||
println!(
|
||||
" connect: {:>10}",
|
||||
statistics.responses_per_second_connect
|
||||
);
|
||||
println!(
|
||||
" announce: {:>10}",
|
||||
statistics.responses_per_second_announce
|
||||
);
|
||||
println!(
|
||||
" scrape: {:>10}",
|
||||
statistics.responses_per_second_scrape
|
||||
);
|
||||
println!(
|
||||
" error: {:>10}",
|
||||
statistics.responses_per_second_error
|
||||
);
|
||||
println!(
|
||||
" torrents: {:>10} (updated every {}s)",
|
||||
statistics.num_torrents, config.cleaning.torrent_cleaning_interval
|
||||
);
|
||||
println!(
|
||||
" peers: {:>10} (updated every {}s)",
|
||||
statistics.num_peers, config.cleaning.torrent_cleaning_interval
|
||||
);
|
||||
|
||||
if config.statistics.torrent_peer_histograms {
|
||||
println!(
|
||||
" peers per torrent (updated every {}s)",
|
||||
config.cleaning.torrent_cleaning_interval
|
||||
);
|
||||
println!(" min {:>10}", statistics.peer_histogram.min);
|
||||
println!(" p10 {:>10}", statistics.peer_histogram.p10);
|
||||
println!(" p20 {:>10}", statistics.peer_histogram.p20);
|
||||
println!(" p30 {:>10}", statistics.peer_histogram.p30);
|
||||
println!(" p40 {:>10}", statistics.peer_histogram.p40);
|
||||
println!(" p50 {:>10}", statistics.peer_histogram.p50);
|
||||
println!(" p60 {:>10}", statistics.peer_histogram.p60);
|
||||
println!(" p70 {:>10}", statistics.peer_histogram.p70);
|
||||
println!(" p80 {:>10}", statistics.peer_histogram.p80);
|
||||
println!(" p90 {:>10}", statistics.peer_histogram.p90);
|
||||
println!(" p95 {:>10}", statistics.peer_histogram.p95);
|
||||
println!(" p99 {:>10}", statistics.peer_histogram.p99);
|
||||
println!(" p99.9 {:>10}", statistics.peer_histogram.p999);
|
||||
println!(" max {:>10}", statistics.peer_histogram.max);
|
||||
}
|
||||
}
|
||||
|
||||
fn save_html_to_file(
|
||||
config: &Config,
|
||||
tt: &TinyTemplate,
|
||||
template_data: &TemplateData,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut file = File::create(&config.statistics.html_file_path).with_context(|| {
|
||||
format!(
|
||||
"File path: {}",
|
||||
&config.statistics.html_file_path.to_string_lossy()
|
||||
)
|
||||
})?;
|
||||
|
||||
write!(file, "{}", tt.render(TEMPLATE_KEY, template_data)?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
22
apps/aquatic/crates/udp/templates/statistics.css
Normal file
22
apps/aquatic/crates/udp/templates/statistics.css
Normal file
@ -0,0 +1,22 @@
|
||||
body {
|
||||
font-family: arial, sans-serif;
|
||||
font-size: 16px;
|
||||
}
|
||||
|
||||
table {
|
||||
border-collapse: collapse
|
||||
}
|
||||
|
||||
caption {
|
||||
caption-side: bottom;
|
||||
padding-top: 0.5rem;
|
||||
}
|
||||
|
||||
th, td {
|
||||
padding: 0.5rem 2rem;
|
||||
border: 1px solid #ccc;
|
||||
}
|
||||
|
||||
th {
|
||||
background-color: #eee;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user