Initial commit
Signed-off-by: Tuan-Dat Tran <tuan-dat.tran@tudattr.dev>
This commit is contained in:
6
5g-uulm-network-monitoring/.gitignore
vendored
Normal file
6
5g-uulm-network-monitoring/.gitignore
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
/target
|
||||
/output/*
|
||||
*.pcap
|
||||
*.log
|
||||
/resources/video/*
|
||||
!/resources/video/.gitignore
|
||||
79
5g-uulm-network-monitoring/.gitlab-ci.yml
Normal file
79
5g-uulm-network-monitoring/.gitlab-ci.yml
Normal file
@@ -0,0 +1,79 @@
|
||||
variables:
|
||||
DOCKER_TAG_PREFIX: "uulm"
|
||||
KANIKO_IMAGE: "gcr.io/kaniko-project/executor:v1.9.0-debug"
|
||||
CI_REGISTRY: 192.168.100.2:5000
|
||||
CI_COMMIT_TAG: "develop"
|
||||
DOCKER_CONFIG: "/kaniko/.docker/"
|
||||
|
||||
stages:
|
||||
- build
|
||||
- deploy
|
||||
|
||||
.use-kaniko:
|
||||
image:
|
||||
name: $KANIKO_IMAGE
|
||||
entrypoint: [""]
|
||||
|
||||
.multi:
|
||||
parallel:
|
||||
matrix:
|
||||
- COMPONENT_NAME: "videoprobe"
|
||||
DOCKERFILE_PATH: "Dockerfile"
|
||||
- COMPONENT_NAME: "ffmpeg"
|
||||
DOCKERFILE_PATH: "ffmpeg.Dockerfile"
|
||||
- COMPONENT_NAME: "nginx"
|
||||
DOCKERFILE_PATH: "nginx.Dockerfile"
|
||||
|
||||
.branches:
|
||||
only:
|
||||
- master
|
||||
- dev
|
||||
|
||||
build:
|
||||
stage: build
|
||||
extends:
|
||||
- .multi
|
||||
- .use-kaniko
|
||||
- .branches
|
||||
script:
|
||||
- echo "Building $COMPONENT_NAME"
|
||||
- /kaniko/executor
|
||||
--context "${CI_PROJECT_DIR}"
|
||||
--dockerfile "${DOCKERFILE_PATH}"
|
||||
--destination "${CI_REGISTRY}/${DOCKER_TAG_PREFIX}/${COMPONENT_NAME}:${CI_COMMIT_TAG}"
|
||||
--no-push
|
||||
|
||||
|
||||
deploy:
|
||||
before_script:
|
||||
- |
|
||||
echo "-----BEGIN CERTIFICATE-----
|
||||
MIIClDCCAf2gAwIBAgIUac+ko3JCbLKoWfsw4zZ7jmK2hWUwDQYJKoZIhvcNAQEF
|
||||
BQAwfDELMAkGA1UEBhMCWFgxDDAKBgNVBAgMA04vQTEMMAoGA1UEBwwDTi9BMSAw
|
||||
HgYDVQQKDBdTZWxmLXNpZ25lZCBjZXJ0aWZpY2F0ZTEvMC0GA1UEAwwmMTkyLjE2
|
||||
OC4xMDAuMjogU2VsZi1zaWduZWQgY2VydGlmaWNhdGUwHhcNMjMwNzI4MDcyOTAz
|
||||
WhcNMjQwNzI3MDcyOTAzWjB8MQswCQYDVQQGEwJYWDEMMAoGA1UECAwDTi9BMQww
|
||||
CgYDVQQHDANOL0ExIDAeBgNVBAoMF1NlbGYtc2lnbmVkIGNlcnRpZmljYXRlMS8w
|
||||
LQYDVQQDDCYxOTIuMTY4LjEwMC4yOiBTZWxmLXNpZ25lZCBjZXJ0aWZpY2F0ZTCB
|
||||
nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAniESM4TXYpLuqkkkXe6wdAlVo/In
|
||||
iaPVIV6WH64dab8s5idpkl6ThvkpuON6czF8oQtEC5OCWvHUmPf8wu29kC7s2Gop
|
||||
8yeWlu8BG0fD28qDxhURbDoxqlrbEVQN3r+ekYKlEm83yxM4Zay+r1+s1fzYkf5q
|
||||
/O0n8WV74Sf4/tkCAwEAAaMTMBEwDwYDVR0RBAgwBocEwKhkAjANBgkqhkiG9w0B
|
||||
AQUFAAOBgQCJ5618apVWYG2+mizc3HgDgOrY88wUdXOnpejj5r6YrhaQp/vUHGmY
|
||||
Tv5E3G+lYtNJDzqfjMNgZXGzK6A7D66tU+MuO7yHX7a370JyBF/5rc0YQM+ygIlr
|
||||
2WQ58cXzY9INB2l+JTbzDXA+gL7EvGzu/8CWoUd9RabSTRRz6hd2OQ==
|
||||
-----END CERTIFICATE-----" >> /kaniko/ssl/certs/additional-ca-cert-bundle.crt
|
||||
stage: deploy
|
||||
extends:
|
||||
- .multi
|
||||
- .use-kaniko
|
||||
- .branches
|
||||
script:
|
||||
- echo "Deploying $COMPONENT_NAME"
|
||||
- echo {\"auths\":{\"192.168.100.2:5000/v2/\":{\"username\":\"5g-iana\",\"password\":\"5g-iana\"}}} > /kaniko/.docker/config.json
|
||||
- /kaniko/executor
|
||||
--skip-tls-verify
|
||||
--context "${CI_PROJECT_DIR}"
|
||||
--dockerfile "${DOCKERFILE_PATH}"
|
||||
--destination "${CI_REGISTRY}/${DOCKER_TAG_PREFIX}/${COMPONENT_NAME}:${CI_COMMIT_TAG}"
|
||||
|
||||
2374
5g-uulm-network-monitoring/Cargo.lock
generated
Normal file
2374
5g-uulm-network-monitoring/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
30
5g-uulm-network-monitoring/Cargo.toml
Normal file
30
5g-uulm-network-monitoring/Cargo.toml
Normal file
@@ -0,0 +1,30 @@
|
||||
[package]
|
||||
name = "videoprobe"
|
||||
version = "0.2.0"
|
||||
edition = "2021"
|
||||
|
||||
[profile.dev]
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
strip = true
|
||||
panic = "abort"
|
||||
|
||||
[dependencies]
|
||||
byte-unit = "4.0.18"
|
||||
chrono = "0.4.24"
|
||||
clap = { version = "4.1.6", features = ["derive", "string", "unicode"] }
|
||||
gpsd_proto = "0.7.0"
|
||||
rocket = { version = "0.5.0-rc.1", features = ["json"] }
|
||||
serde_json = "1.0.94"
|
||||
tokio = { version = "1.26.0", features = ["full"] }
|
||||
tracing = "0.1.37"
|
||||
tracing-appender = "0.2.2"
|
||||
tracing-subscriber = "0.3.16"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
reqwest = "0.11.22"
|
||||
local-ip-address = "0.5.6"
|
||||
|
||||
[features]
|
||||
rtt = []
|
||||
throughput = []
|
||||
22
5g-uulm-network-monitoring/Dockerfile
Normal file
22
5g-uulm-network-monitoring/Dockerfile
Normal file
@@ -0,0 +1,22 @@
|
||||
# Build Stage
|
||||
FROM rust:1.74 as builder
|
||||
WORKDIR /usr/src/5G_VideoProbe
|
||||
COPY src src
|
||||
COPY Cargo.* .
|
||||
RUN cargo install -F rtt --path .
|
||||
|
||||
# Runtime Stage
|
||||
FROM debian:stable-slim as runtime
|
||||
RUN apt-get update && apt-get install -y \
|
||||
tshark \
|
||||
gpsd \
|
||||
iputils-ping \
|
||||
ffmpeg \
|
||||
tcpdump \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY Rocket.toml /etc/videoprobe/Rocket.toml
|
||||
COPY run.sh /run.sh
|
||||
|
||||
COPY --from=builder /usr/local/cargo/bin/videoprobe /usr/local/bin/videoprobe
|
||||
|
||||
CMD [ "/run.sh" ]
|
||||
105
5g-uulm-network-monitoring/README.md
Normal file
105
5g-uulm-network-monitoring/README.md
Normal file
@@ -0,0 +1,105 @@
|
||||
# 5G-IANA: UULM Network Monitoring
|
||||
|
||||
This repository contains the CI/CD and Dockerfiles necessary to build
|
||||
the UULM Network Monitoring Tool.
|
||||
|
||||
This tool is used to deploy a RTMP stream behind a reverse proxy and a network
|
||||
monitoring client which has a consumer for the stream.
|
||||
The monitoring tool outputs the timestamp, lat, lon, byts per second and rtt.
|
||||
|
||||
## Feature Flags
|
||||
|
||||
There are currently two feature flags for this tool which enable us to record
|
||||
and output data to the endpoint in multiple formats.
|
||||
|
||||
### RTT
|
||||
|
||||
With only the `rtt` flag enabled the tool records and emits the `rtt` towards
|
||||
the `ping_ip`. One output would look like this:
|
||||
|
||||
```csv
|
||||
# lat, lon, rtt
|
||||
0.00000000,0.00000000,6480000 ns
|
||||
```
|
||||
|
||||
```sh
|
||||
curl -X GET -H "Content-Type: application/json" -d "{ \"endpoint_ip\": [\"http://172.17.0.1:41002/upload\"], \"ping_ip\": \"1.1\" }" http://172.17.0.1:8000/demo/start
|
||||
```
|
||||
|
||||
```sh
|
||||
cargo build -F rtt
|
||||
```
|
||||
|
||||
### RTT/Throughput
|
||||
|
||||
```csv
|
||||
# unix timestamp, lat, lon, bytes per second, rtt
|
||||
1716480819,0.00000000,0.00000000,1.86 KB,6960000 ns
|
||||
```
|
||||
|
||||
```sh
|
||||
curl -X GET -H "Content-Type: application/json" -d "{ \"endpoint_ip\": [\"http://172.17.0.1:41002/upload\"], \"ping_ip\": \"1.1\" , \"stream_url\": \"rtmp://132.252.100.137:31000/live/test\" }" http://172.17.0.1:8000/demo/start
|
||||
```
|
||||
|
||||
```sh
|
||||
cargo build -F rtt -F throughput
|
||||
```
|
||||
|
||||
### Throughput (not yet tested)
|
||||
|
||||
```csv
|
||||
# lat, lon, throughput per second
|
||||
0.00000000,0.00000000,1.86 KB
|
||||
```
|
||||
|
||||
```sh
|
||||
curl -X GET -H "Content-Type: application/json" -d "{ \"endpoint_ip\": [\"http://172.17.0.1:41002/upload\"], \"stream_url\": \"rtmp://132.252.100.137:31000/live/test\" }" http://172.17.0.1:8000/demo/start
|
||||
```
|
||||
|
||||
```sh
|
||||
cargo build -F throughput
|
||||
```
|
||||
|
||||
## Local Deployment
|
||||
|
||||
### Example
|
||||
|
||||
```sh
|
||||
# Server
|
||||
docker build -f nginx.Dockerfile -t ffmpeg-nginx .; docker run -p 1935:1935 ffmpeg-nginx:latest
|
||||
|
||||
# Client
|
||||
# Add features as needed
|
||||
cargo run -F rtt -F throughput -- -p "/pcap/receiver.pcap" -o "/output/videoprobe_$(date '+%s').log"
|
||||
|
||||
# Configure Client
|
||||
curl -X GET -H "Content-Type: application/json" -d "{ \"endpoint_ip\": [\"http://172.17.0.1:41002/upload\"], \"ping_ip\": \"1.1\" , \"stream_url\": \"rtmp://localhost:1935/live/test\" }" http://172.17.0.1:8000/demo/start
|
||||
```
|
||||
|
||||
## Open internal Ports
|
||||
|
||||
- **1935**: RTMP of `web` providing `sender`-stream
|
||||
- **8000**: Endpoint of `videoprobe`
|
||||
|
||||
## Configurations/Environment Variables
|
||||
|
||||
- STREAM<sub>URL</sub>: The URL of a rtmp based video stream. In this
|
||||
environment it is to be `web`.
|
||||
- RUST<sub>LOG</sub>: The logging level of the network monitoring tool
|
||||
itself.
|
||||
- ROCKET<sub>CONFIG</sub>: Might as well be constant, but specifies the
|
||||
path for the configuration of the API endpoint of `videoprobe`.
|
||||
- VP<sub>TARGET</sub>: The API endpoint to upload the collected data to
|
||||
with with a `POST` request. This is variable should not be used during
|
||||
the demonstration.
|
||||
- CMD: Needed as an alternative to using the `command:` keyword, which
|
||||
is usually used to overwrite a containers entrypoint.
|
||||
- GNSS<sub>ENABLED</sub>: Used for choosing whether the videoprobe
|
||||
should be running with "dry gps". Dry GPS means that the tool will be
|
||||
running without GPS capabilities in case the user is sure that there
|
||||
is no GNSS device present or satalite connectivity can't be ensured.
|
||||
- GNSS<sub>DEV</sub>: The path of the mounted GNSS Device. Needed to
|
||||
start gpsd inside of the container. Changes to it should also be
|
||||
applied to the corresponding
|
||||
[file:local-docker-compose.yml](local-docker-compose.yml) and
|
||||
[file:docker-compose.yml](docker-compose.yml).
|
||||
107
5g-uulm-network-monitoring/README.md.old
Normal file
107
5g-uulm-network-monitoring/README.md.old
Normal file
@@ -0,0 +1,107 @@
|
||||
# 5G-IANA: UULM Network Monitoring
|
||||
|
||||
This repository contains the CI/CD and Dockerfiles necessary to build
|
||||
the UULM Network Monitoring Tool.
|
||||
|
||||
For demonstration purposes we need to send a command to `videoprobe`
|
||||
before it starts running, so we can deploy it beforehand. To do this
|
||||
simply run the following command:
|
||||
|
||||
``` bash
|
||||
curl -X GET -H "Content-Type: application/json" -d "{\"node_ip\": [\"<obu-node endpoint>\",\"<pqos endpoint>\"], \"stream_ip\": \"<ping target>\", \"stream_url\": \"<stream url>"}" http://<videoprobe ip/port>/demo/start
|
||||
|
||||
```
|
||||
|
||||
- node<sub>ip</sub>: A list of API endpoints `videorprobe` should send
|
||||
the collected data to i.e. \_\[<http://192.168.0.149:8001/upload>,
|
||||
<http://192.168.0.149:8002/upload%5D_>.
|
||||
- stream<sub>ip</sub>: The IP `videoprobe` measures the latency to.
|
||||
Usually this is the same as IP as the `stream_url` i.e.
|
||||
<u>192.168.0.149</u>.
|
||||
- stream<u>url: The full path to the nginx-proxy thats hosting a rtmp
|
||||
stream i.e. <sub>rtmp</sub>://192.168.0.149/live/test</u>.
|
||||
|
||||
## Testing Locally
|
||||
|
||||
When testing locally we may host the videostream provider and the
|
||||
consumer on the same device. This is not the case for the deployment on
|
||||
the 5G-IANA platform, where we put them on different clusters (see
|
||||
[file:maestro-compose.yml](maestro-compose.yml)). All files regarding
|
||||
local testing can be found in [file:local/](local/).
|
||||
|
||||
1. Make sure to have the GNSS Dongle connected as a device at
|
||||
`/dev/ttyACM0`. If it has another name, change the entry in
|
||||
[local-docker-compose.yml](local-docker-compose.yml) accordingly.
|
||||
2. Run `docker compose -f local-docker-compose.yml up --build` to
|
||||
build/run all of the `*Dockerfiles`.
|
||||
3. For the current version, which is built for the demonstration, we
|
||||
need to run the `curl` command to provide `videoprobe` with the
|
||||
endpoint to which it'll send the data.
|
||||
|
||||
Usually that would be the `obu-node` container. For local testing we are
|
||||
using [file:app.py](app.py). Adjust the port accordingly in the curl
|
||||
command so it looks roughly like this:
|
||||
|
||||
``` bash
|
||||
# Another example: curl -X GET -H "Content-Type: application/json" -d "{\"node_ip\": [\"https://webhook.site/30ffd7cd-0fa5-4391-8725-c05a1bf48a75/upload/\"], \"stream_ip\": \"192.168.30.248\", \"stream_url\": \"rtmp://192.168.30.248:32731/live/test\"}" http://192.168.30.248:31234/demo/start
|
||||
curl -X GET -H "Content-Type: application/json" -d "{\"node_ip\": [\"http://192.168.0.149:8001/upload\",\"http://192.168.0.149:8002/upload\"], \"stream_ip\": \"192.168.0.149\", \"stream_url\": \"rtmp://192.168.0.149/live/test\"}" http://192.168.0.149:8000/demo/start
|
||||
```
|
||||
|
||||
Given your devices IP is `192.168.100.2`
|
||||
|
||||
1. Once running you can do either of the following:
|
||||
1. Simulate DMLOs `get_data_stats` by running the following
|
||||
command:
|
||||
`curl -X GET -H "Content-Type: application/json" -d "{\"id\": 1}" http://<IP of videoprobe>:8000/data_collection/get_data_stats`
|
||||
|
||||
## Running on 5G-IANA
|
||||
|
||||
When testing locally we are hosting the videostream provider and the
|
||||
consumer on the same device. This is not the case for the deployment on
|
||||
the 5G-IANA platform, where we put them on different clusters (see
|
||||
[file:maestro-compose.yml](maestro-compose.yml)).
|
||||
|
||||
1. Make sure OBUs are connected by running the following command on the
|
||||
MEC: `kubectl get nodes # UULM-OBU1 and UULM-OBU2 should be present`
|
||||
2. Make sure the OBUs each have a GNSS receiver connected to them. If
|
||||
there are no devices called `/dev/ttyACM0` on each OBU, change the
|
||||
entries in the
|
||||
[docker-compose.yml](docker-compose.yml)/[maestro-compose.yml](maestro-compose.yml)
|
||||
accordingly to the actual name of the GNSS receivers and redeploy
|
||||
the images. A possibly easier alternative would be to unplug the
|
||||
GNSS receiver, reboot the machine and plug it back in, if possible.
|
||||
3. Find out the IPs for the OBUs and run
|
||||
`curl -X GET -H "Content-Type: application/json" -d "{\"ip\": http://192.168.100.2:32123/upload}" http://192.168.100.2:8000/demo/start`
|
||||
on each of them. `192.168.100.2` being a placeholder for their
|
||||
respective IPs, 32123 being a placeholder for the port the
|
||||
`obu-node` container is listening on for data-uploads and port 8000
|
||||
being a placeholder for videoprobe listening on for the start
|
||||
command.
|
||||
|
||||
## Open internal Ports
|
||||
|
||||
- **1935**: RTMP of `web` providing `sender`-stream
|
||||
- **8000**: Endpoint of `videoprobe`
|
||||
|
||||
## Configurations/Environment Variables
|
||||
|
||||
- STREAM<sub>URL</sub>: The URL of a rtmp based video stream. In this
|
||||
environment it is to be `web`.
|
||||
- RUST<sub>LOG</sub>: The logging level of the network monitoring tool
|
||||
itself.
|
||||
- ROCKET<sub>CONFIG</sub>: Might as well be constant, but specifies the
|
||||
path for the configuration of the API endpoint of `videoprobe`.
|
||||
- VP<sub>TARGET</sub>: The API endpoint to upload the collected data to
|
||||
with with a `POST` request. This is variable should not be used during
|
||||
the demonstration.
|
||||
- CMD: Needed as an alternative to using the `command:` keyword, which
|
||||
is usually used to overwrite a containers entrypoint.
|
||||
- GNSS<sub>ENABLED</sub>: Used for choosing whether the videoprobe
|
||||
should be running with "dry gps". Dry GPS means that the tool will be
|
||||
running without GPS capabilities in case the user is sure that there
|
||||
is no GNSS device present or satalite connectivity can't be ensured.
|
||||
- GNSS<sub>DEV</sub>: The path of the mounted GNSS Device. Needed to
|
||||
start gpsd inside of the container. Changes to it should also be
|
||||
applied to the corresponding
|
||||
[file:local-docker-compose.yml](local-docker-compose.yml) and
|
||||
[file:docker-compose.yml](docker-compose.yml).
|
||||
57
5g-uulm-network-monitoring/README.org
Normal file
57
5g-uulm-network-monitoring/README.org
Normal file
@@ -0,0 +1,57 @@
|
||||
* 5G-IANA: UULM Network Monitoring
|
||||
|
||||
This repository contains the CI/CD and Dockerfiles necessary to build the UULM Network Monitoring Tool.
|
||||
|
||||
For demonstration purposes we need to send a command to =videoprobe= before it starts running, so we can deploy it beforehand.
|
||||
To do this simply run the following command:
|
||||
#+begin_src sh
|
||||
curl -X GET -H "Content-Type: application/json" -d "{\"node_ip\": [\"<obu-node endpoint>\",\"<pqos endpoint>\"], \"stream_ip\": \"<ping target>\", \"stream_url\": \"<stream url>"}" http://<videoprobe ip/port>/demo/start
|
||||
|
||||
#+end_src
|
||||
- node_ip: A list of API endpoints =videorprobe= should send the collected data to i.e. _[http://192.168.0.149:8001/upload, http://192.168.0.149:8002/upload]_.
|
||||
- stream_ip: The IP =videoprobe= measures the latency to. Usually this is the same as IP as the ~stream_url~ i.e. _192.168.0.149_.
|
||||
- stream_url: The full path to the nginx-proxy thats hosting a rtmp stream i.e. _rtmp://192.168.0.149/live/test_.
|
||||
|
||||
** Testing Locally
|
||||
When testing locally we may host the videostream provider and the consumer on the same device.
|
||||
This is not the case for the deployment on the 5G-IANA platform, where we put them on different clusters (see [[file:maestro-compose.yml]]).
|
||||
All files regarding local testing can be found in [[file:local/]].
|
||||
1. Make sure to have the GNSS Dongle connected as a device at ~/dev/ttyACM0~.
|
||||
If it has another name, change the entry in [[file:local-docker-compose.yml][local-docker-compose.yml]] accordingly.
|
||||
2. Run ~docker compose -f local-docker-compose.yml up --build~ to build/run all of the =*Dockerfiles=.
|
||||
3. For the current version, which is built for the demonstration, we need to run the ~curl~ command to provide =videoprobe= with the endpoint to which it'll send the data.
|
||||
Usually that would be the =obu-node= container.
|
||||
For local testing we are using [[file:app.py]].
|
||||
Adjust the port accordingly in the curl command so it looks roughly like this:
|
||||
#+BEGIN_SRC sh
|
||||
# Another example: curl -X GET -H "Content-Type: application/json" -d "{\"node_ip\": [\"https://webhook.site/30ffd7cd-0fa5-4391-8725-c05a1bf48a75/upload/\"], \"stream_ip\": \"192.168.30.248\", \"stream_url\": \"rtmp://192.168.30.248:32731/live/test\"}" http://192.168.30.248:31234/demo/start
|
||||
curl -X GET -H "Content-Type: application/json" -d "{\"node_ip\": [\"http://192.168.0.149:8001/upload\",\"http://192.168.0.149:8002/upload\"], \"stream_ip\": \"192.168.0.149\", \"stream_url\": \"rtmp://192.168.0.149/live/test\"}" http://192.168.0.149:8000/demo/start
|
||||
#+END_SRC
|
||||
Given your devices IP is =192.168.100.2=
|
||||
4. Once running you can do either of the following:
|
||||
1. Simulate DMLOs ~get_data_stats~ by running the following command:
|
||||
~curl -X GET -H "Content-Type: application/json" -d "{\"id\": 1}" http://<IP of videoprobe>:8000/data_collection/get_data_stats~
|
||||
|
||||
** Running on 5G-IANA
|
||||
When testing locally we are hosting the videostream provider and the consumer on the same device.
|
||||
This is not the case for the deployment on the 5G-IANA platform, where we put them on different clusters (see [[file:maestro-compose.yml]]).
|
||||
1. Make sure OBUs are connected by running the following command on the MEC:
|
||||
~kubectl get nodes # UULM-OBU1 and UULM-OBU2 should be present~
|
||||
2. Make sure the OBUs each have a GNSS receiver connected to them.
|
||||
If there are no devices called ~/dev/ttyACM0~ on each OBU, change the entries in the [[file:docker-compose.yml][docker-compose.yml]]/[[file:maestro-compose.yml][maestro-compose.yml]] accordingly to the actual name of the GNSS receivers and redeploy the images.
|
||||
A possibly easier alternative would be to unplug the GNSS receiver, reboot the machine and plug it back in, if possible.
|
||||
3. Find out the IPs for the OBUs and run ~curl -X GET -H "Content-Type: application/json" -d "{\"ip\": http://192.168.100.2:32123/upload}" http://192.168.100.2:8000/demo/start~ on each of them. ~192.168.100.2~ being a placeholder for their respective IPs, 32123 being a placeholder for the port the =obu-node= container is listening on for data-uploads and port 8000 being a placeholder for videoprobe listening on for the start command.
|
||||
|
||||
** Open internal Ports
|
||||
- *1935*: RTMP of =web= providing =sender=-stream
|
||||
- *8000*: Endpoint of =videoprobe=
|
||||
|
||||
** Configurations/Environment Variables
|
||||
- STREAM_URL: The URL of a rtmp based video stream. In this environment it is to be =web=.
|
||||
- RUST_LOG: The logging level of the network monitoring tool itself.
|
||||
- ROCKET_CONFIG: Might as well be constant, but specifies the path for the configuration of the API endpoint of =videoprobe=.
|
||||
- VP_TARGET: The API endpoint to upload the collected data to with with a ~POST~ request. This is variable should not be used during the demonstration.
|
||||
- CMD: Needed as an alternative to using the ~command:~ keyword, which is usually used to overwrite a containers entrypoint.
|
||||
- GNSS_ENABLED: Used for choosing whether the videoprobe should be running with "dry gps". Dry GPS means that the tool will be running without GPS capabilities in case the user is sure that there is no GNSS device present or satalite connectivity can't be ensured.
|
||||
- GNSS_DEV: The path of the mounted GNSS Device. Needed to start gpsd inside of the container. Changes to it should also be applied to the corresponding [[file:local-docker-compose.yml]] and [[file:docker-compose.yml]].
|
||||
|
||||
3
5g-uulm-network-monitoring/Rocket.toml
Normal file
3
5g-uulm-network-monitoring/Rocket.toml
Normal file
@@ -0,0 +1,3 @@
|
||||
[default]
|
||||
address = "0.0.0.0"
|
||||
limits = { form = "64 kB", json = "1 MiB" }
|
||||
7
5g-uulm-network-monitoring/buildx/buildkitd.toml
Normal file
7
5g-uulm-network-monitoring/buildx/buildkitd.toml
Normal file
@@ -0,0 +1,7 @@
|
||||
[registry."192.168.100.2:5000"]
|
||||
http = true
|
||||
insecure = true
|
||||
ca = ["certs/192.168.100.2:5000/ca.crt"]
|
||||
[[registry."192.168.100.2:5000".keypair]]
|
||||
key = "certs/192.168.100.2:5000/client.key"
|
||||
cert = "certs/192.168.100.2:5000/client.cert"
|
||||
3
5g-uulm-network-monitoring/buildx/create_builder.sh
Executable file
3
5g-uulm-network-monitoring/buildx/create_builder.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
docker buildx create --name iana --platform linux/amd64,linux/arm64 --bootstrap --config ./buildkitd.toml --use
|
||||
18
5g-uulm-network-monitoring/buildx/setup.sh
Executable file
18
5g-uulm-network-monitoring/buildx/setup.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Nokia
|
||||
#IANA_REGISTRY=192.168.100.2:5000
|
||||
# TS
|
||||
IANA_REGISTRY=192.168.100.2:5000
|
||||
|
||||
mkdir -p certs/"$IANA_REGISTRY"
|
||||
|
||||
(
|
||||
cd certs/"$IANA_REGISTRY" || exit 1
|
||||
|
||||
openssl s_client -showcerts -connect "$IANA_REGISTRY" </dev/null | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' >ca.crt
|
||||
|
||||
openssl genrsa -out client.key 4096
|
||||
openssl req -new -x509 -text -key client.key -out client.cert \
|
||||
-subj "/C=DE/ST=Northrhine Westphalia/L=Essen/O=University Duisburg-Essen/emailAddress=tuan-dat.tran@stud.uni-due.de"
|
||||
)
|
||||
39
5g-uulm-network-monitoring/docker-compose.yml
Normal file
39
5g-uulm-network-monitoring/docker-compose.yml
Normal file
@@ -0,0 +1,39 @@
|
||||
version: "3.9"
|
||||
name: uulm_network_monitoring
|
||||
services:
|
||||
videoprobe:
|
||||
image: 192.168.100.2:5000/uulm/passive_network_monitoring:latest
|
||||
container_name: netmon_receiver_videoprobe
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "1.0"
|
||||
memory: 512M
|
||||
healthcheck:
|
||||
test: curl http://localhost:8000
|
||||
interval: 10s
|
||||
environment:
|
||||
- RUST_LOG=info
|
||||
- ROCKET_CONFIG=/etc/videoprobe/Rocket.toml
|
||||
- GNSS_DEV=/dev/ttyACM0
|
||||
- GNSS_ENABLED=true # default
|
||||
depends_on:
|
||||
- web
|
||||
ports:
|
||||
- 8000:8000
|
||||
devices:
|
||||
- /dev/ttyACM0:/dev/ttyACM0
|
||||
|
||||
web:
|
||||
image: 192.168.100.2:5000/uulm/nginx:latest
|
||||
container_name: netmon_sender_web
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "1.0"
|
||||
memory: 512M
|
||||
healthcheck:
|
||||
test: curl http://localhost:1935
|
||||
interval: 10s
|
||||
ports:
|
||||
- 1935:1935
|
||||
11
5g-uulm-network-monitoring/docker-push-rtt.sh
Executable file
11
5g-uulm-network-monitoring/docker-push-rtt.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
|
||||
# docker tag SOURCE_IMAGE[:TAG] 192.168.100.2:5000/uulm/<COMPONENT_NAME>:<VERSION>
|
||||
# docker push 192.168.100.2:5000/uulm/<COMPONENT_NAME>:<VERSION>
|
||||
|
||||
PNM_VERSION=v1.3.0
|
||||
LOCAL_CL_IMAGE=videoprobe-rtt
|
||||
REMOTE_CL_IMAGE=uc6nmclirtt
|
||||
docker build -f ./docker/nmcli_rtt.Dockerfile -t $LOCAL_CL_IMAGE .
|
||||
docker tag $LOCAL_CL_IMAGE:latest 192.168.100.2:5000/uulm/$REMOTE_CL_IMAGE:$PNM_VERSION
|
||||
docker push 192.168.100.2:5000/uulm/$REMOTE_CL_IMAGE:$PNM_VERSION
|
||||
11
5g-uulm-network-monitoring/docker-push-throughput.sh
Executable file
11
5g-uulm-network-monitoring/docker-push-throughput.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
|
||||
# docker tag SOURCE_IMAGE[:TAG] 192.168.100.2:5000/uulm/<COMPONENT_NAME>:<VERSION>
|
||||
# docker push 192.168.100.2:5000/uulm/<COMPONENT_NAME>:<VERSION>
|
||||
|
||||
PNM_VERSION=v1.0.0
|
||||
LOCAL_CL_IMAGE=videoprobe-throughput
|
||||
REMOTE_CL_IMAGE=uc6nmclithroughput
|
||||
docker build -f ./docker/nmcli_throughput.Dockerfile -t $LOCAL_CL_IMAGE .
|
||||
docker tag $LOCAL_CL_IMAGE:latest 192.168.100.2:5000/uulm/$REMOTE_CL_IMAGE:$PNM_VERSION
|
||||
docker push 192.168.100.2:5000/uulm/$REMOTE_CL_IMAGE:$PNM_VERSION
|
||||
24
5g-uulm-network-monitoring/docker-push.sh
Executable file
24
5g-uulm-network-monitoring/docker-push.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
|
||||
# docker tag SOURCE_IMAGE[:TAG] 192.168.100.2:5000/uulm/<COMPONENT_NAME>:<VERSION>
|
||||
# docker push 192.168.100.2:5000/uulm/<COMPONENT_NAME>:<VERSION>
|
||||
|
||||
REGISTRY=192.168.100.2:5000/uulm
|
||||
|
||||
TAG=v1.3.0
|
||||
DOCKERFILE=./docker/nmcli_default.Dockerfile
|
||||
REMOTE_IMAGE_X86=passive_network_monitoring
|
||||
REMOTE_IMAGE_ARM=passive_network_monitoring_arm
|
||||
|
||||
docker buildx build --platform linux/amd64 -f $DOCKERFILE -t \
|
||||
$REGISTRY/$REMOTE_IMAGE_X86:$TAG . --push
|
||||
|
||||
docker buildx build --platform linux/arm64 -f $DOCKERFILE -t \
|
||||
$REGISTRY/$REMOTE_IMAGE_ARM:$TAG . --push
|
||||
|
||||
NGINX_VERSION=v1.2.2
|
||||
LOCAL_NGINX_IMAGE=nginx-stream
|
||||
REMOTE_NGINX_IMAGE=nginx
|
||||
docker build -f ./docker/nginx.Dockerfile -t $LOCAL_NGINX_IMAGE .
|
||||
docker tag $LOCAL_NGINX_IMAGE $REGISTRY/$REMOTE_NGINX_IMAGE:$NGINX_VERSION
|
||||
docker push $REGISTRY/$REMOTE_NGINX_IMAGE:$NGINX_VERSION
|
||||
7
5g-uulm-network-monitoring/docker/nginx.Dockerfile
Normal file
7
5g-uulm-network-monitoring/docker/nginx.Dockerfile
Normal file
@@ -0,0 +1,7 @@
|
||||
FROM tiangolo/nginx-rtmp:latest-2024-01-15
|
||||
# Install dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends ffmpeg && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT ["sh", "-c", "nginx && ffmpeg -f lavfi -i testsrc -vf scale=1920x1080 -r 30 -c:v libx264 -pix_fmt yuv420p -b:v 20M -f flv rtmp://localhost/live/test"]
|
||||
22
5g-uulm-network-monitoring/docker/nmcli_default.Dockerfile
Normal file
22
5g-uulm-network-monitoring/docker/nmcli_default.Dockerfile
Normal file
@@ -0,0 +1,22 @@
|
||||
# Build Stage
|
||||
FROM rust:1.74 AS builder
|
||||
WORKDIR /usr/src/5G_VideoProbe
|
||||
COPY ../src src
|
||||
COPY ../Cargo.* .
|
||||
RUN cargo install -F rtt -F throughput --path .
|
||||
|
||||
# Runtime Stage
|
||||
FROM debian:stable-slim AS runtime
|
||||
RUN apt-get update && apt-get install -y \
|
||||
tshark \
|
||||
gpsd \
|
||||
iputils-ping \
|
||||
ffmpeg \
|
||||
tcpdump \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY ../Rocket.toml /etc/videoprobe/Rocket.toml
|
||||
COPY ../run.sh /run.sh
|
||||
|
||||
COPY --from=builder /usr/local/cargo/bin/videoprobe /usr/local/bin/videoprobe
|
||||
|
||||
CMD [ "/run.sh" ]
|
||||
22
5g-uulm-network-monitoring/docker/nmcli_rtt.Dockerfile
Normal file
22
5g-uulm-network-monitoring/docker/nmcli_rtt.Dockerfile
Normal file
@@ -0,0 +1,22 @@
|
||||
# Build Stage
|
||||
FROM rust:1.74 AS builder
|
||||
WORKDIR /usr/src/5G_VideoProbe
|
||||
COPY ../src src
|
||||
COPY ../Cargo.* .
|
||||
RUN cargo install -F rtt --path .
|
||||
|
||||
# Runtime Stage
|
||||
FROM debian:stable-slim AS runtime
|
||||
RUN apt-get update && apt-get install -y \
|
||||
tshark \
|
||||
gpsd \
|
||||
iputils-ping \
|
||||
ffmpeg \
|
||||
tcpdump \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY ../Rocket.toml /etc/videoprobe/Rocket.toml
|
||||
COPY ../run.sh /run.sh
|
||||
|
||||
COPY --from=builder /usr/local/cargo/bin/videoprobe /usr/local/bin/videoprobe
|
||||
|
||||
CMD [ "/run.sh" ]
|
||||
@@ -0,0 +1,22 @@
|
||||
# Build Stage
|
||||
FROM rust:1.74 as builder
|
||||
WORKDIR /usr/src/5G_VideoProbe
|
||||
COPY ../src src
|
||||
COPY ../Cargo.* .
|
||||
RUN cargo install -F throughput --path .
|
||||
|
||||
# Runtime Stage
|
||||
FROM debian:stable-slim AS runtime
|
||||
RUN apt-get update && apt-get install -y \
|
||||
tshark \
|
||||
gpsd \
|
||||
iputils-ping \
|
||||
ffmpeg \
|
||||
tcpdump \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
COPY ../Rocket.toml /etc/videoprobe/Rocket.toml
|
||||
COPY ../run.sh /run.sh
|
||||
|
||||
COPY --from=builder /usr/local/cargo/bin/videoprobe /usr/local/bin/videoprobe
|
||||
|
||||
CMD [ "/run.sh" ]
|
||||
95
5g-uulm-network-monitoring/docs/measurement_setup.org
Normal file
95
5g-uulm-network-monitoring/docs/measurement_setup.org
Normal file
@@ -0,0 +1,95 @@
|
||||
* Measurement Setup
|
||||
During our testing in Ulm we had two machines.
|
||||
The *5G-MEC*, which we used as the sender of a video stream.
|
||||
The receiver of the stream was a laptop with a GNSS module and the 5G antenna.
|
||||
|
||||
The 5G-MEC was a VM running Ubuntu.
|
||||
To access the sender from the receiver, the receiver had to be on a specific mobile network.
|
||||
From there the receiver had access to a OpenVPN-Server, which granted us a connection to the 5G-MEC.
|
||||
To limit traffic between the sender and receiver to only the 5G connection, Wi-Fi was turned off.
|
||||
|
||||
** Sender
|
||||
The sender had a video file that was used by a [[https://hub.docker.com/r/jrottenberg/ffmpeg][ffmpeg]] container which then provided an rmtp stream.
|
||||
The resulting rtmp stream was forwarded to [[https://hub.docker.com/r/tiangolo/nginx-rtmp/][nginx-rtmp]] so it is accessable from the outside.
|
||||
|
||||
*** Video File
|
||||
The video file the sender used for streaming had the following format:
|
||||
- Bitrate: 23780 kb/s
|
||||
- Encoding: h264
|
||||
- Color model: yuv420p
|
||||
- Resolution: 1920x1080
|
||||
- Frame Rate: 23.98 fps
|
||||
|
||||
*** Video Stream
|
||||
To stream the video the following flags were used:
|
||||
#+begin_src shell
|
||||
-i /video/video.mkv -c:v libx264 -b:v 40M -movflags frag_keyframe+empty_moov -f flv ${STREAM_URL}
|
||||
#+end_src
|
||||
- ~-i /video/video.mkv~: This specifies the path to the video file
|
||||
- ~-c:v libx264~: Specifies the video coded that should be used. H264, the original codec, in this case.
|
||||
- ~-b:v 40M~: Specifies the target video bitrate, which is 40 Mb/s.
|
||||
- ~-movflags frag_keyframe+empty_moov~: These were used to make the file compatible with the FLV format
|
||||
- ~-f flv~: Specifies the target file format. FLV is necessary for the RTMP video stream.
|
||||
- ~${STREAM_URL}~: The URL where the stream will be served.
|
||||
|
||||
** Receiver
|
||||
The receiver had a GNSS module to record GPS data and a 5G mobile connection.
|
||||
It was running the videoprobe tool in docker along with [[https://hub.docker.com/r/jrottenberg/ffmpeg][ffmpeg]] and [[https://hub.docker.com/r/nicolaka/netshoot][netshoot]].
|
||||
ffmpeg was configured to use netshoot as its network gateway.
|
||||
netshoot ran tshark on its passing traffic and created a pcap file.
|
||||
That pcap file was written to a docker volume, which was also attached to our videoprobe tool.
|
||||
The videoprobe tool used the pcap file to gauge the throughput of the video stream.
|
||||
|
||||
Along with videoprobe to generate logs we also ran ~signal.sh~ to gain mobile network signal information from the antenna, such as "Reference Signal Received Quality" (RSRQ) and "Reference Signal Received Power" (RSRP).
|
||||
|
||||
The logfiles of both have to be manually joined on their timestamps at a later time.
|
||||
*** Receiver
|
||||
The receiver ran with the following flags:
|
||||
#+begin_src shell
|
||||
-i ${STREAM_URL} -c copy -f null -
|
||||
#+end_src
|
||||
- ~-i ${STREAM_URL}~: The source of the video stream that should be read in.
|
||||
- ~-c copy~: Makes the receiver take in the input stream as is.
|
||||
- ~-f null -~: Discard the streamed video.
|
||||
*** Netshoot
|
||||
Netshoot is a network diagnostic tool for docker.
|
||||
We use it to get the traffic to/from the ffmpeg container.
|
||||
*** VideoProbe
|
||||
VideoProbe uses the following metrics:
|
||||
- Location Data (from GNSS module)
|
||||
- Throughput (from netshoot-pcap file)
|
||||
- Latency (ping)
|
||||
to create a logfile.
|
||||
|
||||
Each entry of the logfile has the fields: (timestamp, latitude,longitude, throughput, latency (in ns)).
|
||||
The resolution of the output was 1 entry/s.
|
||||
*** signal.sh
|
||||
We used ~qmicli~ to log signal information of the antenna.
|
||||
|
||||
An entry of the logfile has the fields: (timestamp, rsrq, rsrp)
|
||||
The resolution of the output was 1 entry/s.
|
||||
** Diagram
|
||||
#+begin_src
|
||||
┌───────────────────────────────┐
|
||||
│ │
|
||||
│ Client (Laptop) │
|
||||
│ │
|
||||
│ ┌──────┐ ┌────────────┐ │
|
||||
┌───────────────────────────────────────────────────────────────────┐ │ │ │ │ │ │
|
||||
│ │ │ │ pcap │◄─────┤ videoprobe │ │
|
||||
│ Sender (5G-MEC) ┌────────────────────────────────────────┐ │ │ │ │ │ │ │
|
||||
│ │ │ │ │ └──────┘ └────────────┘ │
|
||||
│ ┌──────────────┐ │ Docker │ │ │ ▲ │
|
||||
│ │ │ │ ┌────────┐ │ │ │ │ │
|
||||
│ │ Videofile │ │ │ │ ┌──────────────┐ │ │ │ ┌──┴───────┐ ┌────────┐ │
|
||||
│ │ - h264 │ │ │ ffmpeg │ │ │ │ │ │ │ │ │ │ │
|
||||
│ │ - 1920x1080 ├─────┼────►│ - h264 ├────►│ nginx-server ├────┼──►├─────────────────────────────────┼─►│ netshoot ├─────►│ ffmpeg │ │
|
||||
│ │ - 23.98 fps │ │ │ - 40M │ │ │ │ │ │ │ │ │ - copy │ │
|
||||
│ │ - 23780 kb/s │ │ │ - flv │ └──────────────┘ │ │ │ └──────────┘ │ │ │
|
||||
│ │ - mkv │ │ │ │ │ │ │ └────────┘ │
|
||||
│ └──────────────┘ │ └────────┘ │ │ │ │
|
||||
│ │ │ │ └───────────────────────────────┘
|
||||
│ └────────────────────────────────────────┘ │
|
||||
│ │
|
||||
└───────────────────────────────────────────────────────────────────┘
|
||||
#+end_src
|
||||
18
5g-uulm-network-monitoring/local-docker-push.sh
Executable file
18
5g-uulm-network-monitoring/local-docker-push.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/sh
|
||||
|
||||
# docker tag SOURCE_IMAGE[:TAG] 192.168.100.2:5000/uulm/<COMPONENT_NAME>:<VERSION>
|
||||
# docker push 192.168.100.2:5000/uulm/<COMPONENT_NAME>:<VERSION>
|
||||
|
||||
REGISTRY="mos4"
|
||||
|
||||
CL_TAG=v1.3.0
|
||||
REMOTE_CL_IMAGE=passive_network_monitoring
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -f ./docker/nmcli_default.Dockerfile -t $REGISTRY/$REMOTE_CL_IMAGE:$CL_TAG . --push
|
||||
|
||||
NGINX_VERSION=v1.2.2
|
||||
LOCAL_NGINX_IMAGE=nginx-stream
|
||||
REMOTE_NGINX_IMAGE=nginx
|
||||
# docker buildx build --platform linux/amd64 -f ./docker/nginx.Dockerfile -t $REGISTRY/$REMOTE_NGINX_IMAGE:$NGINX_VERSION --push .
|
||||
docker build -f ./docker/nginx.Dockerfile -t $LOCAL_NGINX_IMAGE .
|
||||
docker tag $LOCAL_NGINX_IMAGE $REGISTRY/$REMOTE_NGINX_IMAGE:$NGINX_VERSION
|
||||
docker push $REGISTRY/$REMOTE_NGINX_IMAGE:$NGINX_VERSION
|
||||
16
5g-uulm-network-monitoring/local/app.py
Normal file
16
5g-uulm-network-monitoring/local/app.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from flask import Flask, request
|
||||
import sys
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/upload', methods=['POST'])
|
||||
def handle_post_request():
|
||||
# Print the received POST request data
|
||||
print(request)
|
||||
print(request.data)
|
||||
|
||||
# Respond with a 200 status code
|
||||
return '', 200
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(host='0.0.0.0', port=int(sys.argv[1]))
|
||||
@@ -0,0 +1,24 @@
|
||||
version: "3.9"
|
||||
name: uulm_network_monitoring
|
||||
services:
|
||||
videoprobe:
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: Dockerfile
|
||||
container_name: netmon_receiver_videoprobe
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus : "1.0"
|
||||
memory: 512M
|
||||
healthcheck:
|
||||
test: curl http://localhost:8000
|
||||
interval: 10s
|
||||
environment:
|
||||
- RUST_LOG=info
|
||||
- ROCKET_CONFIG=/etc/videoprobe/Rocket.toml
|
||||
- GNSS_DEV=/dev/ttyACM0
|
||||
ports:
|
||||
- 8000:8000
|
||||
devices:
|
||||
- /dev/ttyACM0:/dev/ttyACM0
|
||||
@@ -0,0 +1,38 @@
|
||||
version: "3.9"
|
||||
name: uulm_network_monitoring
|
||||
services:
|
||||
web:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: nginx.Dockerfile
|
||||
container_name: netmon_sender_web
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus : "1.0"
|
||||
memory: 512M
|
||||
healthcheck:
|
||||
test: curl http://localhost:1935
|
||||
interval: 10s
|
||||
ports:
|
||||
- 1935:1935
|
||||
|
||||
sender:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ffmpeg.Dockerfile
|
||||
container_name: netmon_sender_ffmpeg
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus : "1.0"
|
||||
memory: 2048M
|
||||
healthcheck:
|
||||
test: curl http://web:1935
|
||||
interval: 10s
|
||||
environment:
|
||||
- CMD=/usr/local/bin/ffmpeg -f lavfi -i testsrc -vf scale=3840x2160 -r 60 -c:v libx264 -pix_fmt yuv420p -b:v 40M -f flv rtmp://web/live/test
|
||||
depends_on:
|
||||
- web
|
||||
devices:
|
||||
- /dev/dri:/dev/dri
|
||||
57
5g-uulm-network-monitoring/maestro-compose.yml
Normal file
57
5g-uulm-network-monitoring/maestro-compose.yml
Normal file
@@ -0,0 +1,57 @@
|
||||
application_name: uulm_network_monitoring
|
||||
infrastructure_manager: kubernetes # kubernetes (default) or openstack
|
||||
|
||||
default_image_registry:
|
||||
username: my_username_for_image_repository
|
||||
password: "R3p0$1t0rY_P@$$W0rD!"
|
||||
|
||||
components:
|
||||
videoprobe:
|
||||
artifacts:
|
||||
image: 192.168.100.2:5000/uulm/passive_network_monitoring:latest
|
||||
registry: default
|
||||
aarch: amd64
|
||||
replicas: 1
|
||||
compute:
|
||||
cpus: 1.0
|
||||
ram: 512.0
|
||||
storage: 512.0
|
||||
location:
|
||||
cluster: cluster-1
|
||||
node: node-1
|
||||
depends_on:
|
||||
- component_name: web
|
||||
component_port: 1935
|
||||
healthcheck:
|
||||
http: http://localhost:8000
|
||||
interval: 10s
|
||||
environment:
|
||||
- RUST_LOG=info
|
||||
- ROCKET_CONFIG=/etc/videoprobe/Rocket.toml
|
||||
- GNSS_DEV=/dev/ttyACM0
|
||||
- GNSS_ENABLED=true
|
||||
container_interfaces:
|
||||
- tcp: 8000
|
||||
user_facing: true
|
||||
devices:
|
||||
- /dev/ttyACM0:/dev/ttyACM0
|
||||
|
||||
web:
|
||||
artifacts:
|
||||
image: 192.168.100.2:5000/uulm/nginx:latest
|
||||
registry: default
|
||||
aarch: amd64
|
||||
replicas: 1
|
||||
compute:
|
||||
cpus: 1.0
|
||||
ram: 512.0
|
||||
storage: 1024.0
|
||||
location:
|
||||
cluster: cluster-2
|
||||
node: node-1
|
||||
healthcheck:
|
||||
cmd: curl rtmp://localhost:1935/live/test
|
||||
interval: 10s
|
||||
container_interfaces:
|
||||
- tcp: 1935
|
||||
user_facing: true
|
||||
28
5g-uulm-network-monitoring/run.sh
Executable file
28
5g-uulm-network-monitoring/run.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
|
||||
#
|
||||
# This script is needed for the Dockerfile since running gpsd with a RUN command doesn't seem to work
|
||||
#
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
echo "Log Level: $RUST_LOG"
|
||||
echo "Rocket Config: $ROCKET_CONFIG"
|
||||
echo "GNSS ENABLED: $GNSS_ENABLED"
|
||||
if [ "$GNSS_ENABLED" = true ]; then
|
||||
echo "GNSS Device Path: $GNSS_DEV"
|
||||
gpsd -n -G -S 2947 -F /var/run/gpsd.sock $GNSS_DEV
|
||||
else
|
||||
echo "${RED}GNSS is DISABLED${NC}"
|
||||
fi
|
||||
|
||||
mkdir /pcap/
|
||||
touch /pcap/receiver.pcap
|
||||
tcpdump -i eth0 -w /pcap/receiver.pcap &
|
||||
|
||||
sleep 5
|
||||
mkdir /output/
|
||||
if [ "$GNSS_ENABLED" = true ]; then
|
||||
videoprobe -p "/pcap/receiver.pcap" -o "/output/videoprobe_$(date '+%s').log"
|
||||
else
|
||||
videoprobe -p "/pcap/receiver.pcap" -o "/output/videoprobe_$(date '+%s').log" -d
|
||||
fi
|
||||
1
5g-uulm-network-monitoring/rustfmt.toml
Normal file
1
5g-uulm-network-monitoring/rustfmt.toml
Normal file
@@ -0,0 +1 @@
|
||||
edition = "2018"
|
||||
139
5g-uulm-network-monitoring/src/bps.rs
Normal file
139
5g-uulm-network-monitoring/src/bps.rs
Normal file
@@ -0,0 +1,139 @@
|
||||
use std::{io::Error, path::Path, process::Stdio, time::Duration};
|
||||
|
||||
use byte_unit::{AdjustedByte, Byte};
|
||||
use tokio::{
|
||||
io::{AsyncBufReadExt, BufReader},
|
||||
process::Command,
|
||||
sync::mpsc::UnboundedSender,
|
||||
};
|
||||
use tracing::{debug, trace};
|
||||
|
||||
struct Bandwidth {
|
||||
time: Duration,
|
||||
data: Byte,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct Bps {
|
||||
/// Last recorded time
|
||||
last_time: Duration,
|
||||
/// The current total time
|
||||
total_time: Duration,
|
||||
/// The current total length
|
||||
total_len: Byte,
|
||||
}
|
||||
|
||||
impl Bps {
|
||||
fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
fn update(&mut self, data: &str) -> Option<BpsData> {
|
||||
let x: Vec<&str> = data.trim().split('\t').collect();
|
||||
let epoch = Duration::from_secs_f64(x[0].parse::<f64>().unwrap_or_default());
|
||||
let _src = x[1];
|
||||
let _dst = x[2];
|
||||
let len = Byte::from_bytes((x[3].parse::<usize>().unwrap_or_default() as u64).into());
|
||||
let mut res = None;
|
||||
|
||||
if self.total_time > Duration::from_secs(1) {
|
||||
// One second elapsed
|
||||
let window_size = self.total_len; // Total amount of bytes
|
||||
let window_time = epoch; // Duration of the window
|
||||
|
||||
let bandwidth = Bandwidth {
|
||||
time: window_time,
|
||||
data: window_size,
|
||||
};
|
||||
|
||||
self.reset(epoch);
|
||||
|
||||
res = Some(BpsData {
|
||||
timestamp: bandwidth.time.as_secs(),
|
||||
data: bandwidth.data.get_appropriate_unit(false),
|
||||
});
|
||||
} else {
|
||||
// We're still in the window
|
||||
// One second hasn't elapsed yet
|
||||
// Difference between current time and last time a pkt got recorded
|
||||
let delta = if epoch > self.last_time {
|
||||
epoch - self.last_time
|
||||
} else {
|
||||
self.last_time - epoch
|
||||
};
|
||||
|
||||
self.last_time = epoch;
|
||||
self.total_time += delta;
|
||||
self.total_len = Byte::from_bytes(self.total_len.get_bytes() + len.get_bytes());
|
||||
}
|
||||
trace!("Bps: {:?}", self);
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
fn reset(&mut self, last_time: Duration) {
|
||||
self.last_time = last_time;
|
||||
self.total_time = Duration::default();
|
||||
self.total_len = Byte::default();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BpsData {
|
||||
pub timestamp: u64,
|
||||
pub data: AdjustedByte,
|
||||
}
|
||||
|
||||
pub async fn run_bandwidth_eval(
|
||||
pcap_file: &Path,
|
||||
sender: UnboundedSender<Option<BpsData>>,
|
||||
) -> Result<(), Error> {
|
||||
debug!("Running tail...");
|
||||
let mut tail_child = Command::new("tail")
|
||||
.args(["-f", "-c", "+0", pcap_file.as_os_str().to_str().unwrap()])
|
||||
.stdin(Stdio::null())
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()
|
||||
.unwrap();
|
||||
|
||||
let tail_stdout: Stdio = tail_child.stdout.take().unwrap().try_into().unwrap();
|
||||
|
||||
debug!("Running tshark...");
|
||||
let mut tshark_child = Command::new("tshark")
|
||||
.args([
|
||||
"-T",
|
||||
"fields",
|
||||
"-e",
|
||||
"frame.time_epoch",
|
||||
"-e",
|
||||
"ip.src_host",
|
||||
"-e",
|
||||
"ip.dst_host",
|
||||
"-e",
|
||||
"frame.len",
|
||||
"-i",
|
||||
"-",
|
||||
])
|
||||
.stdin(tail_stdout)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::null())
|
||||
.spawn()?;
|
||||
|
||||
let mut bps = Bps::new();
|
||||
let tshark_stdout = tshark_child.stdout.take().unwrap();
|
||||
|
||||
let tshark_handler = tokio::spawn(async move {
|
||||
let mut reader = BufReader::new(tshark_stdout).lines();
|
||||
let mut counter = 0;
|
||||
while let Some(line) = reader.next_line().await.unwrap() {
|
||||
trace!("Pkt {}: {}", counter, &line);
|
||||
counter += 1;
|
||||
let data = bps.update(&line);
|
||||
sender.send(data).expect("Couldn't send BpsData");
|
||||
}
|
||||
});
|
||||
|
||||
tshark_handler.await.unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
30
5g-uulm-network-monitoring/src/cli.rs
Normal file
30
5g-uulm-network-monitoring/src/cli.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
use clap::Parser;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about=None, long_about=None)]
|
||||
pub struct Cli {
|
||||
/// Path of the pcap file
|
||||
#[arg(short, long)]
|
||||
pub pcap: PathBuf,
|
||||
|
||||
/// Output file as csv
|
||||
#[arg(short, long)]
|
||||
pub out: PathBuf,
|
||||
|
||||
/// Endpoint to send data to
|
||||
#[arg(short, long)]
|
||||
pub endpoint: Option<String>,
|
||||
|
||||
/// Target for ping
|
||||
#[arg(short, long)]
|
||||
pub target: Option<String>,
|
||||
|
||||
/// Option purely for testing.
|
||||
#[arg(short, long)]
|
||||
pub dry_gps: bool,
|
||||
|
||||
/// STREAM_URL for ffmpeg
|
||||
#[arg(short, long)]
|
||||
pub stream_url: Option<String>,
|
||||
}
|
||||
107
5g-uulm-network-monitoring/src/endpoints.rs
Normal file
107
5g-uulm-network-monitoring/src/endpoints.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
use local_ip_address::local_ip;
|
||||
use rocket::{get, serde::json::Json};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
ops::Deref,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
use crate::SharedCounter;
|
||||
|
||||
#[cfg(all(feature = "throughput", feature = "rtt"))]
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct StartDemoRequest {
|
||||
endpoint_ip: Vec<String>,
|
||||
ping_ip: String,
|
||||
stream_url: String,
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "throughput", not(feature = "rtt")))]
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct StartDemoRequest {
|
||||
endpoint_ip: Vec<String>,
|
||||
stream_url: String,
|
||||
}
|
||||
|
||||
#[cfg(all(not(feature = "throughput"), feature = "rtt"))]
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct StartDemoRequest {
|
||||
endpoint_ip: Vec<String>,
|
||||
ping_ip: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct DataCollectionStatsRequest {
|
||||
id: i32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct DataNode {
|
||||
id: i32,
|
||||
ip: String,
|
||||
dataset_size: f32,
|
||||
time_since_last_record: u64,
|
||||
}
|
||||
|
||||
#[get("/demo/start", format = "json", data = "<data>")]
|
||||
pub fn start_demo(
|
||||
state: &rocket::State<SharedCounter>,
|
||||
data: Json<StartDemoRequest>,
|
||||
) -> &'static str {
|
||||
{
|
||||
let (local_state, cvar) = state.inner().deref();
|
||||
let mut local_state = local_state.lock().unwrap();
|
||||
local_state.started = true;
|
||||
local_state.endpoint_ip = Some(data.endpoint_ip.clone());
|
||||
|
||||
#[cfg(feature = "rtt")]
|
||||
{
|
||||
local_state.ping_ip = Some(data.ping_ip.clone());
|
||||
}
|
||||
|
||||
#[cfg(feature = "throughput")]
|
||||
{
|
||||
local_state.stream_url = Some(data.stream_url.clone());
|
||||
}
|
||||
|
||||
cvar.notify_one();
|
||||
}
|
||||
"Ok"
|
||||
}
|
||||
|
||||
#[get("/data_collection/get_data_stats", format = "json", data = "<data>")]
|
||||
pub fn get_counter(
|
||||
state: &rocket::State<SharedCounter>,
|
||||
data: Json<DataCollectionStatsRequest>,
|
||||
) -> Json<DataNode> {
|
||||
// Get counter value
|
||||
let (counter_val, last_visited): (f32, u64) = {
|
||||
let (local_state, _) = state.inner().deref();
|
||||
let local_state = local_state.lock().unwrap();
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
|
||||
(local_state.counter as f32, now - local_state.last_check)
|
||||
};
|
||||
|
||||
// Reset counter now that it has been seen
|
||||
{
|
||||
let (local_state, _) = state.inner().deref();
|
||||
let mut local_state = local_state.lock().unwrap();
|
||||
local_state.counter = 0;
|
||||
local_state.last_check = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
}
|
||||
|
||||
// Response
|
||||
Json(DataNode {
|
||||
id: data.id,
|
||||
ip: local_ip().unwrap().to_string(),
|
||||
dataset_size: counter_val,
|
||||
time_since_last_record: last_visited,
|
||||
})
|
||||
}
|
||||
17
5g-uulm-network-monitoring/src/ffmpeg.rs
Normal file
17
5g-uulm-network-monitoring/src/ffmpeg.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
use std::error::Error;
|
||||
|
||||
use tokio::process::Command;
|
||||
|
||||
pub async fn run_ffmpeg(stream_url: String) -> Result<(), Box<dyn Error>> {
|
||||
let _ffmpeg_child = Command::new("ffmpeg")
|
||||
.arg("-i")
|
||||
.arg(stream_url)
|
||||
.arg("-c")
|
||||
.arg("copy")
|
||||
.arg("-f")
|
||||
.arg("null")
|
||||
.arg("-")
|
||||
// .stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
Ok(())
|
||||
}
|
||||
96
5g-uulm-network-monitoring/src/gps.rs
Normal file
96
5g-uulm-network-monitoring/src/gps.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::time::UNIX_EPOCH;
|
||||
use std::{error::Error, time::SystemTime};
|
||||
|
||||
use chrono::DateTime;
|
||||
use gpsd_proto::{Tpv, UnifiedResponse};
|
||||
use tokio::{
|
||||
io::{AsyncBufReadExt, AsyncWriteExt, BufReader},
|
||||
net::TcpStream,
|
||||
sync::mpsc::UnboundedSender,
|
||||
};
|
||||
use tracing::{debug, info, trace};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct GpsData {
|
||||
pub timestamp: u64,
|
||||
pub lat: Option<f64>,
|
||||
pub lon: Option<f64>,
|
||||
}
|
||||
|
||||
pub async fn run_gpsd_eval(
|
||||
sender: UnboundedSender<Option<GpsData>>,
|
||||
testing: bool,
|
||||
) -> Result<(), Box<dyn Error>> {
|
||||
if !testing {
|
||||
let addr: SocketAddr = "127.0.0.1:2947".parse().unwrap();
|
||||
|
||||
let mut stream = TcpStream::connect(addr).await?;
|
||||
debug!("Connected to server: {}", stream.peer_addr()?);
|
||||
|
||||
stream
|
||||
.write_all(gpsd_proto::ENABLE_WATCH_CMD.as_bytes())
|
||||
.await?;
|
||||
|
||||
let mut stream_reader = BufReader::new(stream).lines();
|
||||
|
||||
while let Some(line) = stream_reader.next_line().await.unwrap() {
|
||||
if let Ok(rd) = serde_json::from_str(&line) {
|
||||
match rd {
|
||||
UnifiedResponse::Version(v) => {
|
||||
if v.proto_major < gpsd_proto::PROTO_MAJOR_MIN {
|
||||
panic!("Gpsd major version mismatch");
|
||||
}
|
||||
info!("Gpsd version {} connected", v.rev);
|
||||
}
|
||||
UnifiedResponse::Device(d) => debug!("Device {:?}", d),
|
||||
UnifiedResponse::Tpv(t) => {
|
||||
let data = parse_tpv(t);
|
||||
if let Some(data) = data {
|
||||
sender
|
||||
.send(Some(data))
|
||||
.expect("Couldn't send GpsData to main thread")
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
loop {
|
||||
sender
|
||||
.send(Some(GpsData {
|
||||
timestamp: SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards.")
|
||||
.as_secs(),
|
||||
lat: Some(0_f64),
|
||||
lon: Some(0_f64),
|
||||
}))
|
||||
.expect("Couldn't send GpsData to main thread");
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
||||
}
|
||||
#[allow(unreachable_code)]
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_tpv(t: Tpv) -> Option<GpsData> {
|
||||
if let Some(time) = t.time {
|
||||
let timestamp = DateTime::parse_from_rfc3339(&time)
|
||||
.unwrap()
|
||||
.timestamp()
|
||||
.unsigned_abs();
|
||||
let lat = t.lat;
|
||||
let lon = t.lon;
|
||||
trace!("TPV: t: {}, lat: {:?}, lon: {:?}", timestamp, lat, lon);
|
||||
Some(GpsData {
|
||||
timestamp,
|
||||
lat,
|
||||
lon,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
793
5g-uulm-network-monitoring/src/main.rs
Normal file
793
5g-uulm-network-monitoring/src/main.rs
Normal file
@@ -0,0 +1,793 @@
|
||||
#[cfg(feature = "throughput")]
|
||||
use byte_unit::AdjustedByte;
|
||||
#[cfg(feature = "rtt")]
|
||||
use chrono::Duration;
|
||||
use clap::Parser;
|
||||
use rocket::routes;
|
||||
use std::{
|
||||
collections::BTreeMap,
|
||||
io::Error,
|
||||
sync::{Arc, Condvar, Mutex},
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
use tokio::{fs::File, io::AsyncWriteExt, sync::mpsc};
|
||||
use tracing::{debug, error, info, trace};
|
||||
|
||||
use crate::cli::Cli;
|
||||
use crate::endpoints::{get_counter, start_demo};
|
||||
|
||||
#[cfg(feature = "throughput")]
|
||||
mod bps;
|
||||
mod cli;
|
||||
mod endpoints;
|
||||
#[cfg(feature = "throughput")]
|
||||
mod ffmpeg;
|
||||
mod gps;
|
||||
#[cfg(feature = "rtt")]
|
||||
mod rttps;
|
||||
|
||||
/// The maximum length of a entry/line in the csv file
|
||||
const MAX_CSV_ENTRY_LENGTH: usize = 55; // 55 is the realistic upper bound 100 to be safe
|
||||
|
||||
/// The buffer that stores the data entries before they are sent out to the http endpoint
|
||||
const ENTRIES_BUFFER_LENGTH: usize = 100;
|
||||
|
||||
type CsvEntries = [CsvEntry; ENTRIES_BUFFER_LENGTH];
|
||||
type CsvEntry = [char; MAX_CSV_ENTRY_LENGTH];
|
||||
|
||||
pub type SharedCounter = Arc<(Mutex<State>, Condvar)>;
|
||||
|
||||
/// The state of the app, specifically used for the API endpoint
|
||||
#[cfg(all(feature = "throughput", feature = "rtt"))]
|
||||
#[derive(Debug)]
|
||||
pub struct State {
|
||||
// Whether program should be started
|
||||
started: bool,
|
||||
// To configure IP of the endpoint that should receive the collected data
|
||||
endpoint_ip: Option<Vec<String>>,
|
||||
// To configure IP of the ping-target after starting
|
||||
ping_ip: Option<String>,
|
||||
// To configure IP of the stream url for ffmpeg
|
||||
stream_url: Option<String>,
|
||||
// Amount of counted data packages
|
||||
counter: usize,
|
||||
// Time of last check on endpoint
|
||||
last_check: u64,
|
||||
// Push Data
|
||||
entries: CsvEntries,
|
||||
// Amount of counted data packages
|
||||
entries_counter: usize,
|
||||
}
|
||||
|
||||
/// The state of the app, specifically used for the API endpoint
|
||||
#[cfg(all(not(feature = "throughput"), feature = "rtt"))]
|
||||
#[derive(Debug)]
|
||||
pub struct State {
|
||||
// Whether program should be started
|
||||
started: bool,
|
||||
// To configure IP of the endpoint that should receive the collected data
|
||||
endpoint_ip: Option<Vec<String>>,
|
||||
// To configure IP of the ping-target after starting
|
||||
ping_ip: Option<String>,
|
||||
// Amount of counted data packages
|
||||
counter: usize,
|
||||
// Time of last check on endpoint
|
||||
last_check: u64,
|
||||
// Push Data
|
||||
entries: CsvEntries,
|
||||
// Amount of counted data packages
|
||||
entries_counter: usize,
|
||||
}
|
||||
|
||||
/// The state of the app, specifically used for the API endpoint
|
||||
#[cfg(all(feature = "throughput", not(feature = "rtt")))]
|
||||
#[derive(Debug)]
|
||||
pub struct State {
|
||||
// Whether program should be started
|
||||
started: bool,
|
||||
// To configure IP of the endpoint that should receive the collected data
|
||||
endpoint_ip: Option<Vec<String>>,
|
||||
// To configure IP of the stream url for ffmpeg
|
||||
stream_url: Option<String>,
|
||||
// Amount of counted data packages
|
||||
counter: usize,
|
||||
// Time of last check on endpoint
|
||||
last_check: u64,
|
||||
// Push Data
|
||||
entries: CsvEntries,
|
||||
// Amount of counted data packages
|
||||
entries_counter: usize,
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "throughput", feature = "rtt"))]
|
||||
impl Default for State {
|
||||
fn default() -> Self {
|
||||
State {
|
||||
started: false,
|
||||
endpoint_ip: None,
|
||||
ping_ip: None,
|
||||
stream_url: None,
|
||||
counter: 0,
|
||||
last_check: SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs(),
|
||||
entries: [[' '; MAX_CSV_ENTRY_LENGTH]; ENTRIES_BUFFER_LENGTH],
|
||||
entries_counter: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(not(feature = "throughput"), feature = "rtt"))]
|
||||
impl Default for State {
|
||||
fn default() -> Self {
|
||||
State {
|
||||
started: false,
|
||||
endpoint_ip: None,
|
||||
ping_ip: None,
|
||||
counter: 0,
|
||||
last_check: SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs(),
|
||||
entries: [[' '; MAX_CSV_ENTRY_LENGTH]; ENTRIES_BUFFER_LENGTH],
|
||||
entries_counter: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "throughput", not(feature = "rtt")))]
|
||||
impl Default for State {
|
||||
fn default() -> Self {
|
||||
State {
|
||||
started: false,
|
||||
endpoint_ip: None,
|
||||
stream_url: None,
|
||||
counter: 0,
|
||||
last_check: SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs(),
|
||||
entries: [[' '; MAX_CSV_ENTRY_LENGTH]; ENTRIES_BUFFER_LENGTH],
|
||||
entries_counter: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct DataMsg {
|
||||
timestamp: u64,
|
||||
entry: DataEntry,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[cfg(all(feature = "throughput", feature = "rtt"))]
|
||||
struct DataEntry {
|
||||
lat: Option<f64>,
|
||||
lon: Option<f64>,
|
||||
gps_count: u64,
|
||||
byte: Option<AdjustedByte>,
|
||||
rtt: Option<Duration>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[cfg(all(not(feature = "throughput"), feature = "rtt"))]
|
||||
struct DataEntry {
|
||||
lat: Option<f64>,
|
||||
lon: Option<f64>,
|
||||
gps_count: u64,
|
||||
rtt: Option<Duration>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[cfg(all(feature = "throughput", not(feature = "rtt")))]
|
||||
struct DataEntry {
|
||||
lat: Option<f64>,
|
||||
lon: Option<f64>,
|
||||
gps_count: u64,
|
||||
byte: Option<AdjustedByte>,
|
||||
}
|
||||
|
||||
impl DataEntry {
|
||||
fn combine(&self, other: &DataEntry) -> DataEntry {
|
||||
// trace!("Compare: Self: {:?}, Other: {:?}", self, other);
|
||||
|
||||
let lat = match (self.lat, other.lat) {
|
||||
(Some(lat1), Some(lat2)) => Some(lat1 + lat2),
|
||||
(None, Some(lat2)) => Some(lat2),
|
||||
(Some(lat1), None) => Some(lat1),
|
||||
(None, None) => None,
|
||||
};
|
||||
|
||||
let lon = match (self.lon, other.lon) {
|
||||
(Some(lon1), Some(lon2)) => Some(lon1 + lon2),
|
||||
(None, Some(lon2)) => Some(lon2),
|
||||
(Some(lon1), None) => Some(lon1),
|
||||
(None, None) => None,
|
||||
};
|
||||
|
||||
let gps_count = self.gps_count + other.gps_count;
|
||||
|
||||
#[cfg(all(feature = "throughput", feature = "rtt"))]
|
||||
{
|
||||
let byte = self.byte.or(other.byte);
|
||||
|
||||
let rtt = match (self.rtt, other.rtt) {
|
||||
(Some(rtt1), Some(rtt2)) => Some((rtt1 + rtt2) / 2),
|
||||
(Some(rtt1), _) => Some(rtt1),
|
||||
(None, Some(rtt2)) => Some(rtt2),
|
||||
(None, None) => None,
|
||||
};
|
||||
|
||||
DataEntry {
|
||||
lat,
|
||||
lon,
|
||||
gps_count,
|
||||
byte,
|
||||
rtt,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(not(feature = "throughput"), feature = "rtt"))]
|
||||
{
|
||||
let rtt = match (self.rtt, other.rtt) {
|
||||
(Some(rtt1), Some(rtt2)) => Some((rtt1 + rtt2) / 2),
|
||||
(Some(rtt1), _) => Some(rtt1),
|
||||
(None, Some(rtt2)) => Some(rtt2),
|
||||
(None, None) => None,
|
||||
};
|
||||
|
||||
DataEntry {
|
||||
lat,
|
||||
lon,
|
||||
gps_count,
|
||||
rtt,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "throughput", not(feature = "rtt")))]
|
||||
{
|
||||
let byte = self.byte.or(other.byte);
|
||||
|
||||
DataEntry {
|
||||
lat,
|
||||
lon,
|
||||
gps_count,
|
||||
byte,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Error> {
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let args = Cli::parse();
|
||||
|
||||
#[cfg(feature = "throughput")]
|
||||
let pcap_file = &args.pcap;
|
||||
|
||||
let state: SharedCounter = Arc::new((Mutex::new(State::default()), Condvar::new()));
|
||||
|
||||
debug!("Starting API...");
|
||||
let state_api = state.clone();
|
||||
let api_handler = rocket::build()
|
||||
.mount("/", routes![get_counter])
|
||||
.mount("/", routes![start_demo])
|
||||
.manage(state_api)
|
||||
.launch();
|
||||
|
||||
let _api_join_handle = tokio::spawn(api_handler);
|
||||
{
|
||||
info!("Waiting for GET to /demo/start...");
|
||||
let state_started = state.clone();
|
||||
let (lock, cvar) = &*state_started;
|
||||
let mut started = lock.lock().unwrap();
|
||||
while !started.started {
|
||||
started = cvar.wait(started).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
let endpoint_ip: Vec<String> = {
|
||||
let state_endpoint = state.clone();
|
||||
let (lock, _) = &*state_endpoint;
|
||||
let local_state = lock.lock().unwrap();
|
||||
let e = if let Some(endpoint) = args.endpoint {
|
||||
local_state.endpoint_ip.clone().unwrap_or(vec![endpoint])
|
||||
} else {
|
||||
local_state.endpoint_ip.clone().unwrap()
|
||||
};
|
||||
info!("Endpoint to upload data to is: {:?}", e);
|
||||
e
|
||||
};
|
||||
|
||||
debug!("Creating mpscs...");
|
||||
let (gps_sender, mut gps_receiver) = mpsc::unbounded_channel();
|
||||
|
||||
#[cfg(feature = "rtt")]
|
||||
let (rttps_sender, mut rttps_receiver) = mpsc::unbounded_channel();
|
||||
|
||||
#[cfg(feature = "rtt")]
|
||||
let ping_ip: String = {
|
||||
let state_endpoint = state.clone();
|
||||
let (lock, _) = &*state_endpoint;
|
||||
let local_state = lock.lock().unwrap();
|
||||
let p = local_state.ping_ip.clone().or(args.target).unwrap();
|
||||
info!("Endpoint to measure latency at: {}", p);
|
||||
p
|
||||
};
|
||||
|
||||
#[cfg(feature = "throughput")]
|
||||
let (bps_sender, mut bps_receiver) = mpsc::unbounded_channel();
|
||||
|
||||
#[cfg(feature = "throughput")]
|
||||
let stream_url: String = {
|
||||
let state_endpoint = state.clone();
|
||||
let (lock, _) = &*state_endpoint;
|
||||
let local_state = lock.lock().unwrap();
|
||||
let s = local_state.stream_url.clone().or(args.stream_url).unwrap();
|
||||
info!("Endpoint to stream video from: {}", s);
|
||||
s
|
||||
};
|
||||
|
||||
#[cfg(feature = "throughput")]
|
||||
debug!("Running ffmpeg...");
|
||||
#[cfg(feature = "throughput")]
|
||||
let ffmpeg_handler = ffmpeg::run_ffmpeg(stream_url.clone());
|
||||
#[cfg(feature = "throughput")]
|
||||
debug!("Running bps...");
|
||||
#[cfg(feature = "throughput")]
|
||||
let bps_handler = bps::run_bandwidth_eval(pcap_file, bps_sender);
|
||||
|
||||
// wait here until api request comes in.
|
||||
debug!("Running gps...");
|
||||
let gps_handler = gps::run_gpsd_eval(gps_sender, args.dry_gps);
|
||||
#[cfg(feature = "rtt")]
|
||||
debug!("Running rttps...");
|
||||
#[cfg(feature = "rtt")]
|
||||
let rttps_handler = rttps::run_rtt_eval(rttps_sender, ping_ip);
|
||||
|
||||
let (tx, mut rx) = mpsc::unbounded_channel();
|
||||
|
||||
let gps_tx = tx.clone();
|
||||
|
||||
#[cfg(all(feature = "throughput", feature = "rtt"))]
|
||||
let gps_channel_handler = tokio::spawn(async move {
|
||||
while let Some(msg) = gps_receiver.recv().await {
|
||||
if let Some(data) = msg {
|
||||
debug!("GpsData: {:?}", data);
|
||||
gps_tx
|
||||
.send(DataMsg {
|
||||
timestamp: data.timestamp,
|
||||
entry: DataEntry {
|
||||
lat: data.lat,
|
||||
lon: data.lon,
|
||||
gps_count: 1,
|
||||
byte: None,
|
||||
rtt: None,
|
||||
},
|
||||
})
|
||||
.map_err(|err| error!("Failed to send data via GPS channel: {}", err))
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(all(not(feature = "throughput"), feature = "rtt"))]
|
||||
let gps_channel_handler = tokio::spawn(async move {
|
||||
while let Some(msg) = gps_receiver.recv().await {
|
||||
if let Some(data) = msg {
|
||||
debug!("GpsData: {:?}", data);
|
||||
gps_tx
|
||||
.send(DataMsg {
|
||||
timestamp: data.timestamp,
|
||||
entry: DataEntry {
|
||||
lat: data.lat,
|
||||
lon: data.lon,
|
||||
gps_count: 1,
|
||||
rtt: None,
|
||||
},
|
||||
})
|
||||
.map_err(|err| error!("Failed to send data via GPS channel: {}", err))
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(all(feature = "throughput", not(feature = "rtt")))]
|
||||
let gps_channel_handler = tokio::spawn(async move {
|
||||
while let Some(msg) = gps_receiver.recv().await {
|
||||
if let Some(data) = msg {
|
||||
debug!("GpsData: {:?}", data);
|
||||
gps_tx
|
||||
.send(DataMsg {
|
||||
timestamp: data.timestamp,
|
||||
entry: DataEntry {
|
||||
lat: data.lat,
|
||||
lon: data.lon,
|
||||
gps_count: 1,
|
||||
byte: None,
|
||||
},
|
||||
})
|
||||
.map_err(|err| error!("Failed to send data via GPS channel: {}", err))
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(feature = "throughput")]
|
||||
let bps_tx = tx.clone();
|
||||
|
||||
#[cfg(all(feature = "throughput", feature = "rtt"))]
|
||||
let bps_channel_handler = tokio::spawn(async move {
|
||||
while let Some(msg) = bps_receiver.recv().await {
|
||||
if let Some(data) = msg {
|
||||
debug!("BPSData: {:?}", data);
|
||||
bps_tx
|
||||
.send(DataMsg {
|
||||
timestamp: data.timestamp,
|
||||
entry: DataEntry {
|
||||
lat: None,
|
||||
lon: None,
|
||||
gps_count: 0,
|
||||
byte: Some(data.data),
|
||||
rtt: None,
|
||||
},
|
||||
})
|
||||
.map_err(|err| error!("Failed to send data via BPS channel: {}", err))
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(all(feature = "throughput", not(feature = "rtt")))]
|
||||
let bps_channel_handler = tokio::spawn(async move {
|
||||
while let Some(msg) = bps_receiver.recv().await {
|
||||
if let Some(data) = msg {
|
||||
debug!("BPSData: {:?}", data);
|
||||
bps_tx
|
||||
.send(DataMsg {
|
||||
timestamp: data.timestamp,
|
||||
entry: DataEntry {
|
||||
lat: None,
|
||||
lon: None,
|
||||
gps_count: 0,
|
||||
byte: Some(data.data),
|
||||
},
|
||||
})
|
||||
.map_err(|err| error!("Failed to send data via BPS channel: {}", err))
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(feature = "rtt")]
|
||||
let rttps_tx = tx.clone();
|
||||
|
||||
#[cfg(all(feature = "throughput", feature = "rtt"))]
|
||||
let rttps_channel_handler = tokio::spawn(async move {
|
||||
while let Some(msg) = rttps_receiver.recv().await {
|
||||
if let Some(data) = msg {
|
||||
debug!("RTTps: {:?}", data);
|
||||
rttps_tx
|
||||
.send(DataMsg {
|
||||
timestamp: data.timestamp,
|
||||
entry: DataEntry {
|
||||
lat: None,
|
||||
lon: None,
|
||||
gps_count: 0,
|
||||
byte: None,
|
||||
rtt: Some(data.rtt),
|
||||
},
|
||||
})
|
||||
.map_err(|err| error!("Failed to send data via RTTps channel: {}", err))
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(all(not(feature = "throughput"), feature = "rtt"))]
|
||||
let rttps_channel_handler = tokio::spawn(async move {
|
||||
while let Some(msg) = rttps_receiver.recv().await {
|
||||
if let Some(data) = msg {
|
||||
debug!("RTTps: {:?}", data);
|
||||
rttps_tx
|
||||
.send(DataMsg {
|
||||
timestamp: data.timestamp,
|
||||
entry: DataEntry {
|
||||
lat: None,
|
||||
lon: None,
|
||||
gps_count: 0,
|
||||
rtt: Some(data.rtt),
|
||||
},
|
||||
})
|
||||
.map_err(|err| error!("Failed to send data via RTTps channel: {}", err))
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let mut output_file = File::create(&args.out).await?;
|
||||
let mut entries: BTreeMap<u64, DataEntry> = BTreeMap::new();
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
let state_csv = state.clone();
|
||||
|
||||
let csv_handler = tokio::spawn(async move {
|
||||
while let Some(msg) = rx.recv().await {
|
||||
let key = msg.timestamp;
|
||||
if entries.contains_key(&key) {
|
||||
let former_entry = entries.get(&key).unwrap();
|
||||
let new_entry = &msg.entry;
|
||||
let combined_entry = former_entry.combine(new_entry);
|
||||
entries.insert(key, combined_entry);
|
||||
} else {
|
||||
entries.insert(key, msg.entry);
|
||||
}
|
||||
// Write entry to csv if complete
|
||||
let entry = entries.get(&key).unwrap();
|
||||
|
||||
#[cfg(all(feature = "throughput", feature = "rtt"))]
|
||||
if let (Some(lat), Some(lon), Some(byte), Some(rtt)) =
|
||||
(entry.lat, entry.lon, entry.byte, entry.rtt)
|
||||
{
|
||||
let rtt_ns = rtt.num_nanoseconds().unwrap();
|
||||
let rtt_string = format!("{rtt_ns} ns");
|
||||
let csv_entry = format!(
|
||||
"{},{:2.8},{:2.8},{},{}\n",
|
||||
&key,
|
||||
lat / (entry.gps_count as f64),
|
||||
lon / (entry.gps_count as f64),
|
||||
byte,
|
||||
rtt_string
|
||||
);
|
||||
|
||||
info!("Writing data: {}", &csv_entry.trim());
|
||||
output_file.write_all(csv_entry.as_bytes()).await.unwrap();
|
||||
let mut char_array: [char; MAX_CSV_ENTRY_LENGTH] = [' '; MAX_CSV_ENTRY_LENGTH];
|
||||
// Convert the String into a Vec<char>
|
||||
let char_vec: Vec<char> = csv_entry.chars().collect();
|
||||
let len = char_vec.len().min(MAX_CSV_ENTRY_LENGTH);
|
||||
{
|
||||
let (local_state, _) = &*state_csv;
|
||||
let mut local_state = local_state.lock().unwrap();
|
||||
let counter = local_state.entries_counter;
|
||||
|
||||
if counter < ENTRIES_BUFFER_LENGTH {
|
||||
char_array[..len].copy_from_slice(&char_vec[..len]);
|
||||
local_state.entries[counter] = char_array;
|
||||
local_state.counter += 1;
|
||||
local_state.entries_counter += 1;
|
||||
}
|
||||
}
|
||||
let request_body: Option<String> = {
|
||||
let (local_state, _) = &*state_csv;
|
||||
let mut local_state = local_state.lock().unwrap();
|
||||
if local_state.entries_counter >= ENTRIES_BUFFER_LENGTH {
|
||||
let body = local_state
|
||||
.entries
|
||||
.iter()
|
||||
.map(|r| r.iter().collect::<String>().trim().to_string())
|
||||
.filter(|l| !l.is_empty())
|
||||
.collect::<Vec<String>>()
|
||||
.join("\n");
|
||||
{
|
||||
local_state.entries_counter = 0;
|
||||
local_state.entries =
|
||||
[[' '; MAX_CSV_ENTRY_LENGTH]; ENTRIES_BUFFER_LENGTH];
|
||||
}
|
||||
info!("Sending {} to {:?}", body.clone(), endpoint_ip);
|
||||
Some(body)
|
||||
} else {
|
||||
info!("counter: {}", local_state.entries_counter);
|
||||
None
|
||||
}
|
||||
};
|
||||
if let Some(rb) = request_body {
|
||||
info!("Trying to send data...");
|
||||
for e in endpoint_ip.clone().iter() {
|
||||
if let Ok(response) = client.post(e.clone()).body(rb.clone()).send().await {
|
||||
info!(
|
||||
"Sucessfully sent data to {}. Response: {:?}",
|
||||
e.clone(),
|
||||
response
|
||||
);
|
||||
} else {
|
||||
error!("Couldn't send data to {}", e.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
trace!(
|
||||
"Building data: {{{}: {:?}}} (unfinished)",
|
||||
&key,
|
||||
entries.get(&key)
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(all(not(feature = "throughput"), feature = "rtt"))]
|
||||
if let (Some(lat), Some(lon), Some(rtt)) = (entry.lat, entry.lon, entry.rtt) {
|
||||
let rtt_ns = rtt.num_nanoseconds().unwrap();
|
||||
let rtt_string = format!("{rtt_ns} ns");
|
||||
let csv_entry = format!(
|
||||
"{},{:2.8},{:2.8},{}\n",
|
||||
&key,
|
||||
lat / (entry.gps_count as f64),
|
||||
lon / (entry.gps_count as f64),
|
||||
rtt_string
|
||||
);
|
||||
|
||||
info!("Writing data: {}", &csv_entry.trim());
|
||||
output_file.write_all(csv_entry.as_bytes()).await.unwrap();
|
||||
let mut char_array: [char; MAX_CSV_ENTRY_LENGTH] = [' '; MAX_CSV_ENTRY_LENGTH];
|
||||
// Convert the String into a Vec<char>
|
||||
let char_vec: Vec<char> = csv_entry.chars().collect();
|
||||
let len = char_vec.len().min(MAX_CSV_ENTRY_LENGTH);
|
||||
{
|
||||
let (local_state, _) = &*state_csv;
|
||||
let mut local_state = local_state.lock().unwrap();
|
||||
let counter = local_state.entries_counter;
|
||||
|
||||
if counter < ENTRIES_BUFFER_LENGTH {
|
||||
char_array[..len].copy_from_slice(&char_vec[..len]);
|
||||
local_state.entries[counter] = char_array;
|
||||
local_state.counter += 1;
|
||||
local_state.entries_counter += 1;
|
||||
}
|
||||
}
|
||||
let request_body: Option<String> = {
|
||||
let (local_state, _) = &*state_csv;
|
||||
let mut local_state = local_state.lock().unwrap();
|
||||
if local_state.entries_counter >= ENTRIES_BUFFER_LENGTH {
|
||||
let body = local_state
|
||||
.entries
|
||||
.iter()
|
||||
.map(|r| r.iter().collect::<String>().trim().to_string())
|
||||
.filter(|l| !l.is_empty())
|
||||
.collect::<Vec<String>>()
|
||||
.join("\n");
|
||||
let mut new_entries: [[char; MAX_CSV_ENTRY_LENGTH]; ENTRIES_BUFFER_LENGTH] =
|
||||
local_state.entries;
|
||||
new_entries.copy_within(1.., 0);
|
||||
new_entries[ENTRIES_BUFFER_LENGTH - 1] = [' '; MAX_CSV_ENTRY_LENGTH];
|
||||
{
|
||||
local_state.entries_counter = ENTRIES_BUFFER_LENGTH - 1;
|
||||
local_state.entries = new_entries;
|
||||
}
|
||||
info!("Sending {} to {:?}", body.clone(), endpoint_ip);
|
||||
Some(body)
|
||||
} else {
|
||||
info!("counter: {}", local_state.entries_counter);
|
||||
None
|
||||
}
|
||||
};
|
||||
if let Some(rb) = request_body {
|
||||
info!("Trying to send data...");
|
||||
for e in endpoint_ip.clone().iter() {
|
||||
if let Ok(response) = client.post(e.clone()).body(rb.clone()).send().await {
|
||||
info!(
|
||||
"Sucessfully sent data to {}. Response: {:?}",
|
||||
e.clone(),
|
||||
response
|
||||
);
|
||||
} else {
|
||||
error!("Couldn't send data to {}", e.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
trace!(
|
||||
"Building data: {{{}: {:?}}} (unfinished)",
|
||||
&key,
|
||||
entries.get(&key)
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "throughput", not(feature = "rtt")))]
|
||||
if let (Some(lat), Some(lon), Some(byte)) = (entry.lat, entry.lon, entry.byte) {
|
||||
let csv_entry = format!(
|
||||
"{:2.8},{:2.8},{}\n",
|
||||
lat / (entry.gps_count as f64),
|
||||
lon / (entry.gps_count as f64),
|
||||
byte,
|
||||
);
|
||||
|
||||
info!("Writing data: {}", &csv_entry.trim());
|
||||
output_file.write_all(csv_entry.as_bytes()).await.unwrap();
|
||||
let mut char_array: [char; MAX_CSV_ENTRY_LENGTH] = [' '; MAX_CSV_ENTRY_LENGTH];
|
||||
// Convert the String into a Vec<char>
|
||||
let char_vec: Vec<char> = csv_entry.chars().collect();
|
||||
let len = char_vec.len().min(MAX_CSV_ENTRY_LENGTH);
|
||||
{
|
||||
let (local_state, _) = &*state_csv;
|
||||
let mut local_state = local_state.lock().unwrap();
|
||||
let counter = local_state.entries_counter;
|
||||
|
||||
if counter < ENTRIES_BUFFER_LENGTH {
|
||||
char_array[..len].copy_from_slice(&char_vec[..len]);
|
||||
local_state.entries[counter] = char_array;
|
||||
local_state.counter += 1;
|
||||
local_state.entries_counter += 1;
|
||||
}
|
||||
}
|
||||
let request_body: Option<String> = {
|
||||
let (local_state, _) = &*state_csv;
|
||||
let mut local_state = local_state.lock().unwrap();
|
||||
if local_state.entries_counter >= ENTRIES_BUFFER_LENGTH {
|
||||
let body = local_state
|
||||
.entries
|
||||
.iter()
|
||||
.map(|r| r.iter().collect::<String>().trim().to_string())
|
||||
.filter(|l| !l.is_empty())
|
||||
.collect::<Vec<String>>()
|
||||
.join("\n");
|
||||
{
|
||||
local_state.entries_counter = 0;
|
||||
local_state.entries =
|
||||
[[' '; MAX_CSV_ENTRY_LENGTH]; ENTRIES_BUFFER_LENGTH];
|
||||
}
|
||||
info!("Sending {} to {:?}", body.clone(), endpoint_ip);
|
||||
Some(body)
|
||||
} else {
|
||||
info!("counter: {}", local_state.entries_counter);
|
||||
None
|
||||
}
|
||||
};
|
||||
if let Some(rb) = request_body {
|
||||
info!("Trying to send data...");
|
||||
for e in endpoint_ip.clone().iter() {
|
||||
if let Ok(response) = client.post(e.clone()).body(rb.clone()).send().await {
|
||||
info!(
|
||||
"Sucessfully sent data to {}. Response: {:?}",
|
||||
e.clone(),
|
||||
response
|
||||
);
|
||||
} else {
|
||||
error!("Couldn't send data to {}", e.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
trace!(
|
||||
"Building data: {{{}: {:?}}} (unfinished)",
|
||||
&key,
|
||||
entries.get(&key)
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(all(feature = "throughput", feature = "rtt"))]
|
||||
let _handler = tokio::join!(
|
||||
ffmpeg_handler,
|
||||
bps_handler,
|
||||
gps_handler,
|
||||
rttps_handler,
|
||||
gps_channel_handler,
|
||||
bps_channel_handler,
|
||||
rttps_channel_handler,
|
||||
csv_handler,
|
||||
);
|
||||
|
||||
#[cfg(all(feature = "throughput", not(feature = "rtt")))]
|
||||
let _handler = tokio::join!(
|
||||
ffmpeg_handler,
|
||||
bps_handler,
|
||||
gps_handler,
|
||||
gps_channel_handler,
|
||||
bps_channel_handler,
|
||||
csv_handler,
|
||||
);
|
||||
|
||||
#[cfg(all(not(feature = "throughput"), feature = "rtt"))]
|
||||
let _handler = tokio::join!(
|
||||
gps_handler,
|
||||
rttps_handler,
|
||||
gps_channel_handler,
|
||||
rttps_channel_handler,
|
||||
csv_handler,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
80
5g-uulm-network-monitoring/src/rttps.rs
Normal file
80
5g-uulm-network-monitoring/src/rttps.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
use std::{error::Error, process::Stdio};
|
||||
|
||||
use chrono::Duration;
|
||||
use tokio::{
|
||||
io::{AsyncBufReadExt, BufReader},
|
||||
process::Command,
|
||||
sync::mpsc::UnboundedSender,
|
||||
};
|
||||
use tracing::{debug, trace};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RTTps {
|
||||
pub timestamp: u64,
|
||||
pub rtt: Duration,
|
||||
}
|
||||
|
||||
impl RTTps {
|
||||
fn new() -> Self {
|
||||
RTTps {
|
||||
timestamp: 0,
|
||||
rtt: Duration::min_value(),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse(&mut self, line: &str) -> Option<RTTData> {
|
||||
debug!(?line);
|
||||
if line.contains("time=") {
|
||||
let start = line.find('[')?;
|
||||
let end = line.find(']')?;
|
||||
let timestamp_str = &line[start + 1..end];
|
||||
|
||||
let start = line.find("time=")?;
|
||||
let end = line.find(" ms")?;
|
||||
let rtt_str = &line[start + 5..end];
|
||||
|
||||
let timestamp = timestamp_str.split('.').next()?.parse::<u64>().ok()?;
|
||||
let rtt_mus = rtt_str.parse::<f64>().ok()? * 1000f64;
|
||||
let rtt = Duration::microseconds(rtt_mus.round() as i64);
|
||||
|
||||
Some(RTTData { timestamp, rtt })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RTTData {
|
||||
pub timestamp: u64,
|
||||
pub rtt: Duration,
|
||||
}
|
||||
|
||||
pub async fn run_rtt_eval(
|
||||
sender: UnboundedSender<Option<RTTData>>,
|
||||
ping_target: String,
|
||||
) -> Result<(), Box<dyn Error>> {
|
||||
let mut ping_child = Command::new("ping")
|
||||
.arg("-D")
|
||||
.arg("-i")
|
||||
.arg(".2")
|
||||
.arg(ping_target.trim())
|
||||
.stdout(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
||||
let mut rttps = RTTps::new();
|
||||
let ping_stdout = ping_child.stdout.take().unwrap();
|
||||
|
||||
let ping_handler = tokio::spawn(async move {
|
||||
let mut reader = BufReader::new(ping_stdout).lines();
|
||||
while let Some(line) = reader.next_line().await.unwrap() {
|
||||
let data = rttps.parse(&line);
|
||||
trace! {"{:?}", data}
|
||||
sender.send(data).expect("Couldn't send RTTData");
|
||||
}
|
||||
});
|
||||
|
||||
ping_handler.await.unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Reference in New Issue
Block a user