commit
e3397c3a79
63 changed files with 2440 additions and 1230 deletions
64
.github/workflows/docker_build_push.yml
vendored
64
.github/workflows/docker_build_push.yml
vendored
|
|
@ -6,11 +6,12 @@ on:
|
|||
- main
|
||||
- develop
|
||||
|
||||
env:
|
||||
REGISTRY_IMAGE: jqtype/rpxy
|
||||
|
||||
jobs:
|
||||
build_and_push:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
IMAGE_NAME: rpxy
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
|
|
@ -21,6 +22,15 @@ jobs:
|
|||
- name: GitHub Environment
|
||||
run: echo "BRANCH=${GITHUB_REF##*/}" >> $GITHUB_ENV
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY_IMAGE }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
|
|
@ -30,32 +40,64 @@ jobs:
|
|||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Release build and push x86_64
|
||||
- name: Release build and push
|
||||
if: ${{ env.BRANCH == 'main' }}
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.IMAGE_NAME }}:latest
|
||||
file: ./docker/Dockerfile.amd64
|
||||
${{ env.REGISTRY_IMAGE }}:latest
|
||||
file: ./docker/Dockerfile
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
- name: Release build and push x86_64-slim
|
||||
- name: Release build and push slim
|
||||
if: ${{ env.BRANCH == 'main' }}
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.IMAGE_NAME }}:slim, ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.IMAGE_NAME }}:latest-slim
|
||||
file: ./docker/Dockerfile.amd64-slim
|
||||
${{ env.REGISTRY_IMAGE }}:slim, ${{ env.REGISTRY_IMAGE }}:latest-slim
|
||||
build-contexts: |
|
||||
messense/rust-musl-cross:amd64-musl=docker-image://messense/rust-musl-cross:x86_64-musl
|
||||
messense/rust-musl-cross:arm64-musl=docker-image://messense/rust-musl-cross:aarch64-musl
|
||||
file: ./docker/Dockerfile.slim
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
- name: Nightly build and push x86_64
|
||||
- name: Nightly build and push
|
||||
if: ${{ env.BRANCH == 'develop' }}
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.IMAGE_NAME }}:nightly
|
||||
file: ./docker/Dockerfile.amd64
|
||||
${{ env.REGISTRY_IMAGE }}:nightly
|
||||
file: ./docker/Dockerfile
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
- name: Nightly build and push slim
|
||||
if: ${{ env.BRANCH == 'develop' }}
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.REGISTRY_IMAGE }}:nightly-slim
|
||||
build-contexts: |
|
||||
messense/rust-musl-cross:amd64-musl=docker-image://messense/rust-musl-cross:x86_64-musl
|
||||
messense/rust-musl-cross:arm64-musl=docker-image://messense/rust-musl-cross:aarch64-musl
|
||||
file: ./docker/Dockerfile.slim
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
|
|
|||
12
CHANGELOG.md
12
CHANGELOG.md
|
|
@ -2,6 +2,18 @@
|
|||
|
||||
## 0.4.0 (unreleased)
|
||||
|
||||
### Improvement
|
||||
|
||||
- Feat: Continuous watching on a specified config file and hot-reloading the file when updated
|
||||
- Feat: Enabled to specify TCP listen backlog in the config file
|
||||
- Feat: Add a GitHub action to build `arm64` docker image.
|
||||
- Bench: Add benchmark result on `amd64` architecture.
|
||||
- Refactor: Split `rpxy` into `rpxy-lib` and `rpxy-bin`
|
||||
- Refactor: lots of minor improvements
|
||||
|
||||
### Bugfix
|
||||
|
||||
- Fix bug to apply default backend application
|
||||
|
||||
## 0.3.0
|
||||
|
||||
|
|
|
|||
86
Cargo.toml
86
Cargo.toml
|
|
@ -1,87 +1,7 @@
|
|||
[package]
|
||||
name = "rpxy"
|
||||
version = "0.3.0"
|
||||
authors = ["Jun Kurihara"]
|
||||
homepage = "https://github.com/junkurihara/rust-rpxy"
|
||||
repository = "https://github.com/junkurihara/rust-rpxy"
|
||||
license = "MIT"
|
||||
readme = "README.md"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
default = ["http3", "sticky-cookie"]
|
||||
http3 = ["quinn", "h3", "h3-quinn"]
|
||||
sticky-cookie = ["base64", "sha2", "chrono"]
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.71"
|
||||
clap = { version = "4.3.4", features = ["std", "cargo", "wrap_help"] }
|
||||
rand = "0.8.5"
|
||||
toml = { version = "0.7.4", default-features = false, features = ["parse"] }
|
||||
rustc-hash = "1.1.0"
|
||||
serde = { version = "1.0.164", default-features = false, features = ["derive"] }
|
||||
bytes = "1.4.0"
|
||||
thiserror = "1.0.40"
|
||||
x509-parser = "0.15.0"
|
||||
derive_builder = "0.12.0"
|
||||
futures = { version = "0.3.28", features = ["alloc", "async-await"] }
|
||||
tokio = { version = "1.28.2", default-features = false, features = [
|
||||
"net",
|
||||
"rt-multi-thread",
|
||||
"parking_lot",
|
||||
"time",
|
||||
"sync",
|
||||
"macros",
|
||||
] }
|
||||
|
||||
# http and tls
|
||||
hyper = { version = "0.14.26", default-features = false, features = [
|
||||
"server",
|
||||
"http1",
|
||||
"http2",
|
||||
"stream",
|
||||
] }
|
||||
hyper-rustls = { version = "0.24.0", default-features = false, features = [
|
||||
"tokio-runtime",
|
||||
"webpki-tokio",
|
||||
"http1",
|
||||
"http2",
|
||||
] }
|
||||
tokio-rustls = { version = "0.24.1", features = ["early-data"] }
|
||||
rustls-pemfile = "1.0.2"
|
||||
rustls = { version = "0.21.2", default-features = false }
|
||||
webpki = "0.22.0"
|
||||
|
||||
# logging
|
||||
tracing = { version = "0.1.37" }
|
||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||
|
||||
# http/3
|
||||
# quinn = { version = "0.9.3", optional = true }
|
||||
quinn = { path = "./quinn/quinn", optional = true } # Tentative to support rustls-0.21
|
||||
h3 = { path = "./h3/h3/", optional = true }
|
||||
# h3-quinn = { path = "./h3/h3-quinn/", optional = true }
|
||||
h3-quinn = { path = "./h3-quinn/", optional = true } # Tentative to support rustls-0.21
|
||||
|
||||
# cookie handling for sticky cookie
|
||||
chrono = { version = "0.4.26", default-features = false, features = [
|
||||
"unstable-locales",
|
||||
"alloc",
|
||||
"clock",
|
||||
], optional = true }
|
||||
base64 = { version = "0.21.2", optional = true }
|
||||
sha2 = { version = "0.10.7", default-features = false, optional = true }
|
||||
|
||||
|
||||
[target.'cfg(not(target_env = "msvc"))'.dependencies]
|
||||
tikv-jemallocator = "0.5.0"
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
[workspace]
|
||||
|
||||
members = ["rpxy-bin", "rpxy-lib"]
|
||||
exclude = ["quinn", "h3-quinn", "h3"]
|
||||
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
|
|
|
|||
19
README.md
19
README.md
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[](LICENSE)
|
||||

|
||||

|
||||

|
||||

|
||||
[](https://hub.docker.com/r/jqtype/rpxy)
|
||||
|
||||
|
|
@ -48,6 +48,20 @@ You can run `rpxy` with a configuration file like
|
|||
% ./target/release/rpxy --config config.toml
|
||||
```
|
||||
|
||||
If you specify `-w` option along with the config file path, `rpxy` tracks the change of `config.toml` in the real-time manner and apply the change immediately without restarting the process.
|
||||
|
||||
The full help messages are given follows.
|
||||
|
||||
```bash:
|
||||
usage: rpxy [OPTIONS] --config <FILE>
|
||||
|
||||
Options:
|
||||
-c, --config <FILE> Configuration file path like ./config.toml
|
||||
-w, --watch Activate dynamic reloading of the config file via continuous monitoring
|
||||
-h, --help Print help
|
||||
-V, --version Print version
|
||||
```
|
||||
|
||||
That's all!
|
||||
|
||||
## Basic Configuration
|
||||
|
|
@ -217,13 +231,14 @@ Since it is currently a work-in-progress project, we are frequently adding new o
|
|||
|
||||
## Using Docker Image
|
||||
|
||||
You can also use [docker image](https://hub.docker.com/r/jqtype/rpxy) instead of directly executing the binary. There are only two docker-specific environment variables.
|
||||
You can also use [docker image](https://hub.docker.com/r/jqtype/rpxy) instead of directly executing the binary. There are only several docker-specific environment variables.
|
||||
|
||||
- `HOST_USER` (default: `user`): User name executing `rpxy` inside the container.
|
||||
- `HOST_UID` (default: `900`): `UID` of `HOST_USER`.
|
||||
- `HOST_GID` (default: `900`): `GID` of `HOST_USER`
|
||||
- `LOG_LEVEL=debug|info|warn|error`: Log level
|
||||
- `LOG_TO_FILE=true|false`: Enable logging to the log file `/rpxy/log/rpxy.log` using `logrotate`. You should mount `/rpxy/log` via docker volume option if enabled. The log dir and file will be owned by the `HOST_USER` with `HOST_UID:HOST_GID` on the host machine. Hence, `HOST_USER`, `HOST_UID` and `HOST_GID` should be the same as ones of the user who executes the `rpxy` docker container on the host.
|
||||
- `WATCH=true|false` (default: `false`): Activate continuous watching of the config file if true.
|
||||
|
||||
Other than them, all you need is to mount your `config.toml` as `/etc/rpxy.toml` and certificates/private keys as you like through the docker volume option. See [`docker/docker-compose.yml`](./docker/docker-compose.yml) for the detailed configuration. Note that the file path of keys and certificates must be ones in your docker container.
|
||||
|
||||
|
|
|
|||
10
TODO.md
10
TODO.md
|
|
@ -4,12 +4,20 @@
|
|||
- More flexible option for rewriting path
|
||||
- Refactoring
|
||||
|
||||
Split `backend` module into three parts
|
||||
- Split `backend` module into three parts
|
||||
|
||||
- backend(s): struct containing info, defined for each served domain with multiple paths
|
||||
- upstream/upstream group: information on targeted destinations for each set of (a domain + a path)
|
||||
- load-balance: load balancing mod for a domain + path
|
||||
|
||||
- Done in v0.4.0:
|
||||
~~Split `rpxy` source codes into `rpxy-lib` and `rpxy-bin` to make the core part (reverse proxy) isolated from the misc part like toml file loader. This is in order to make the configuration-related part more flexible (related to [#33](https://github.com/junkurihara/rust-rpxy/issues/33))~~
|
||||
|
||||
- Cache option for the response with `Cache-Control: public` header directive ([#55](https://github.com/junkurihara/rust-rpxy/issues/55))
|
||||
- Consideration on migrating from `quinn` and `h3-quinn` to other QUIC implementations ([#57](https://github.com/junkurihara/rust-rpxy/issues/57))
|
||||
- Done in v0.4.0:
|
||||
~~Benchmark with other reverse proxy implementations like Sozu ([#58](https://github.com/junkurihara/rust-rpxy/issues/58)) Currently, Sozu can work only on `amd64` format due to its HTTP message parser limitation... Since the main developer have only `arm64` (Apple M1) laptops, so we should do that on VPS?~~
|
||||
|
||||
- Unit tests
|
||||
- Options to serve custom http_error page.
|
||||
- Prometheus metrics
|
||||
|
|
|
|||
246
bench/README.md
246
bench/README.md
|
|
@ -1,11 +1,26 @@
|
|||
# Sample Benchmark Result
|
||||
# Sample Benchmark Results
|
||||
|
||||
Using `rewrk` and Docker on a Macbook Pro 14 to simply measure the performance of several reverse proxy through HTTP1.1.
|
||||
This test simply measures the performance of several reverse proxy through HTTP/1.1 by the following command using [`rewrk`](https://github.com/lnx-search/rewrk).
|
||||
|
||||
```
|
||||
```sh:
|
||||
$ rewrk -c 512 -t 4 -d 15s -h http://localhost:8080 --pct
|
||||
```
|
||||
|
||||
## Tests on `linux/arm64/v8`
|
||||
|
||||
Done at Jul. 15, 2023
|
||||
|
||||
### Environment
|
||||
|
||||
- `rpxy` commit id: `1da7e5bfb77d1ce4ee8d6cfc59b1c725556fc192`
|
||||
- Docker Desktop 4.21.1 (114176)
|
||||
- ReWrk 0.3.2
|
||||
- Macbook Pro '14 (2021, M1 Max, 64GB RAM)
|
||||
|
||||
The docker images of `nginx` and `caddy` for `linux/arm64/v8` are pulled from the official registry.
|
||||
|
||||
### Result for `rpxy`, `nginx` and `caddy`
|
||||
|
||||
```
|
||||
----------------------------
|
||||
Benchmark on rpxy
|
||||
|
|
@ -13,23 +28,23 @@ Beginning round 1...
|
|||
Benchmarking 512 connections @ http://localhost:8080 for 15 second(s)
|
||||
Latencies:
|
||||
Avg Stdev Min Max
|
||||
26.81ms 11.96ms 2.96ms 226.04ms
|
||||
19.64ms 8.85ms 0.67ms 113.22ms
|
||||
Requests:
|
||||
Total: 285390 Req/Sec: 19032.01
|
||||
Total: 390078 Req/Sec: 26011.25
|
||||
Transfer:
|
||||
Total: 222.85 MB Transfer Rate: 14.86 MB/Sec
|
||||
Total: 304.85 MB Transfer Rate: 20.33 MB/Sec
|
||||
+ --------------- + --------------- +
|
||||
| Percentile | Avg Latency |
|
||||
+ --------------- + --------------- +
|
||||
| 99.9% | 145.89ms |
|
||||
| 99% | 81.33ms |
|
||||
| 95% | 59.08ms |
|
||||
| 90% | 51.67ms |
|
||||
| 75% | 42.45ms |
|
||||
| 50% | 35.39ms |
|
||||
| 99.9% | 79.24ms |
|
||||
| 99% | 54.28ms |
|
||||
| 95% | 42.50ms |
|
||||
| 90% | 37.82ms |
|
||||
| 75% | 31.54ms |
|
||||
| 50% | 26.37ms |
|
||||
+ --------------- + --------------- +
|
||||
|
||||
767 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
721 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
|
|
@ -38,23 +53,23 @@ Beginning round 1...
|
|||
Benchmarking 512 connections @ http://localhost:8090 for 15 second(s)
|
||||
Latencies:
|
||||
Avg Stdev Min Max
|
||||
38.39ms 21.06ms 2.91ms 248.32ms
|
||||
33.26ms 15.18ms 1.40ms 118.94ms
|
||||
Requests:
|
||||
Total: 199210 Req/Sec: 13288.91
|
||||
Total: 230268 Req/Sec: 15356.08
|
||||
Transfer:
|
||||
Total: 161.46 MB Transfer Rate: 10.77 MB/Sec
|
||||
Total: 186.77 MB Transfer Rate: 12.46 MB/Sec
|
||||
+ --------------- + --------------- +
|
||||
| Percentile | Avg Latency |
|
||||
+ --------------- + --------------- +
|
||||
| 99.9% | 164.33ms |
|
||||
| 99% | 121.55ms |
|
||||
| 95% | 96.43ms |
|
||||
| 90% | 85.05ms |
|
||||
| 75% | 67.80ms |
|
||||
| 50% | 53.85ms |
|
||||
| 99.9% | 99.91ms |
|
||||
| 99% | 83.74ms |
|
||||
| 95% | 70.67ms |
|
||||
| 90% | 64.03ms |
|
||||
| 75% | 54.32ms |
|
||||
| 50% | 45.19ms |
|
||||
+ --------------- + --------------- +
|
||||
|
||||
736 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
677 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
|
|
@ -63,21 +78,186 @@ Beginning round 1...
|
|||
Benchmarking 512 connections @ http://localhost:8100 for 15 second(s)
|
||||
Latencies:
|
||||
Avg Stdev Min Max
|
||||
83.17ms 73.71ms 1.24ms 734.67ms
|
||||
48.51ms 50.74ms 0.34ms 554.58ms
|
||||
Requests:
|
||||
Total: 91685 Req/Sec: 6114.05
|
||||
Total: 157239 Req/Sec: 10485.98
|
||||
Transfer:
|
||||
Total: 73.20 MB Transfer Rate: 4.88 MB/Sec
|
||||
Total: 125.99 MB Transfer Rate: 8.40 MB/Sec
|
||||
+ --------------- + --------------- +
|
||||
| Percentile | Avg Latency |
|
||||
+ --------------- + --------------- +
|
||||
| 99.9% | 642.29ms |
|
||||
| 99% | 507.21ms |
|
||||
| 95% | 324.34ms |
|
||||
| 90% | 249.55ms |
|
||||
| 75% | 174.62ms |
|
||||
| 50% | 128.85ms |
|
||||
| 99.9% | 473.82ms |
|
||||
| 99% | 307.16ms |
|
||||
| 95% | 212.28ms |
|
||||
| 90% | 169.05ms |
|
||||
| 75% | 115.92ms |
|
||||
| 50% | 80.24ms |
|
||||
+ --------------- + --------------- +
|
||||
|
||||
740 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
708 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
```
|
||||
|
||||
## Results on `linux/amd64`
|
||||
|
||||
Done at Jul. 24, 2023
|
||||
|
||||
### Environment
|
||||
|
||||
- `rpxy` commit id: `7c0945a5124418aa9a1024568c1989bb77cf312f`
|
||||
- Docker Desktop 4.21.1 (114176)
|
||||
- ReWrk 0.3.2 and Wrk 0.4.2
|
||||
- iMac '27 (2020, 10-Core Intel Core i9, 128GB RAM)
|
||||
|
||||
The docker images of `nginx` and `caddy` for `linux/amd64` were pulled from the official registry. For `Sozu`, the official docker image from its developers was still version 0.11.0 (currently the latest version is 0.15.2). So we built it by ourselves locally using the `Sozu`'s official [`Dockerfile`](https://github.com/sozu-proxy/sozu/blob/main/Dockerfile).
|
||||
|
||||
Also, when `Sozu` is configured as an HTTP reverse proxy, it cannot handle HTTP request messages emit from `ReWrk` due to hostname parsing errors though it can correctly handle messages dispatched from `curl` and browsers. So, we additionally test using [`Wrk`](https://github.com/wg/wrk) to examine `Sozu` with the following command.
|
||||
|
||||
```sh:
|
||||
$ wrk -c 512 -t 4 -d 15s http://localhost:8110
|
||||
```
|
||||
|
||||
<!-- ```
|
||||
ERROR Error connecting to backend: Could not get cluster id from request: Host not found: http://localhost:8110/: Hostname parsing failed for host http://localhost:8110/: Parsing Error: Error { input: [58, 47, 47, 108, 111, 99, 97, 108, 104, 111, 115, 116, 58, 56, 49, 49, 48, 47], code: Eof }
|
||||
``` -->
|
||||
|
||||
### Result
|
||||
|
||||
#### With ReWrk for `rpxy`, `nginx` and `caddy`
|
||||
|
||||
```
|
||||
----------------------------
|
||||
Benchmark [x86_64] with ReWrk
|
||||
----------------------------
|
||||
Benchmark on rpxy
|
||||
Beginning round 1...
|
||||
Benchmarking 512 connections @ http://localhost:8080 for 15 second(s)
|
||||
Latencies:
|
||||
Avg Stdev Min Max
|
||||
20.37ms 8.95ms 1.63ms 160.27ms
|
||||
Requests:
|
||||
Total: 376345 Req/Sec: 25095.19
|
||||
Transfer:
|
||||
Total: 295.61 MB Transfer Rate: 19.71 MB/Sec
|
||||
+ --------------- + --------------- +
|
||||
| Percentile | Avg Latency |
|
||||
+ --------------- + --------------- +
|
||||
| 99.9% | 112.50ms |
|
||||
| 99% | 61.33ms |
|
||||
| 95% | 44.26ms |
|
||||
| 90% | 38.74ms |
|
||||
| 75% | 32.00ms |
|
||||
| 50% | 26.82ms |
|
||||
+ --------------- + --------------- +
|
||||
|
||||
626 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
Benchmark on nginx
|
||||
Beginning round 1...
|
||||
Benchmarking 512 connections @ http://localhost:8090 for 15 second(s)
|
||||
Latencies:
|
||||
Avg Stdev Min Max
|
||||
23.45ms 12.42ms 1.18ms 154.44ms
|
||||
Requests:
|
||||
Total: 326685 Req/Sec: 21784.73
|
||||
Transfer:
|
||||
Total: 265.22 MB Transfer Rate: 17.69 MB/Sec
|
||||
+ --------------- + --------------- +
|
||||
| Percentile | Avg Latency |
|
||||
+ --------------- + --------------- +
|
||||
| 99.9% | 96.85ms |
|
||||
| 99% | 73.93ms |
|
||||
| 95% | 57.57ms |
|
||||
| 90% | 50.36ms |
|
||||
| 75% | 40.57ms |
|
||||
| 50% | 32.70ms |
|
||||
+ --------------- + --------------- +
|
||||
|
||||
657 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
Benchmark on caddy
|
||||
Beginning round 1...
|
||||
Benchmarking 512 connections @ http://localhost:8100 for 15 second(s)
|
||||
Latencies:
|
||||
Avg Stdev Min Max
|
||||
45.71ms 50.47ms 0.88ms 908.49ms
|
||||
Requests:
|
||||
Total: 166917 Req/Sec: 11129.80
|
||||
Transfer:
|
||||
Total: 133.77 MB Transfer Rate: 8.92 MB/Sec
|
||||
+ --------------- + --------------- +
|
||||
| Percentile | Avg Latency |
|
||||
+ --------------- + --------------- +
|
||||
| 99.9% | 608.92ms |
|
||||
| 99% | 351.18ms |
|
||||
| 95% | 210.56ms |
|
||||
| 90% | 162.68ms |
|
||||
| 75% | 106.97ms |
|
||||
| 50% | 73.90ms |
|
||||
+ --------------- + --------------- +
|
||||
|
||||
646 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
|
||||
sleep 3 secs
|
||||
```
|
||||
|
||||
#### With Wrk for `rpxy`, `nginx`, `caddy` and `sozu`
|
||||
|
||||
```
|
||||
----------------------------
|
||||
Benchmark [x86_64] with Wrk
|
||||
----------------------------
|
||||
Benchmark on rpxy
|
||||
Running 15s test @ http://localhost:8080
|
||||
4 threads and 512 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 18.68ms 8.09ms 122.64ms 74.03%
|
||||
Req/Sec 6.95k 815.23 8.45k 83.83%
|
||||
414819 requests in 15.01s, 326.37MB read
|
||||
Socket errors: connect 0, read 608, write 0, timeout 0
|
||||
Requests/sec: 27627.79
|
||||
Transfer/sec: 21.74MB
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
Benchmark on nginx
|
||||
Running 15s test @ http://localhost:8090
|
||||
4 threads and 512 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 23.34ms 13.80ms 126.06ms 74.66%
|
||||
Req/Sec 5.71k 607.41 7.07k 73.17%
|
||||
341127 requests in 15.03s, 277.50MB read
|
||||
Socket errors: connect 0, read 641, write 0, timeout 0
|
||||
Requests/sec: 22701.54
|
||||
Transfer/sec: 18.47MB
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
Benchmark on caddy
|
||||
Running 15s test @ http://localhost:8100
|
||||
4 threads and 512 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 54.19ms 55.63ms 674.53ms 88.55%
|
||||
Req/Sec 2.92k 1.40k 5.57k 56.17%
|
||||
174748 requests in 15.03s, 140.61MB read
|
||||
Socket errors: connect 0, read 660, write 0, timeout 0
|
||||
Non-2xx or 3xx responses: 70
|
||||
Requests/sec: 11624.63
|
||||
Transfer/sec: 9.35MB
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
Benchmark on sozu
|
||||
Running 15s test @ http://localhost:8110
|
||||
4 threads and 512 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 19.78ms 4.89ms 98.09ms 76.88%
|
||||
Req/Sec 6.49k 824.75 8.11k 76.17%
|
||||
387744 requests in 15.02s, 329.11MB read
|
||||
Socket errors: connect 0, read 647, write 0, timeout 0
|
||||
Requests/sec: 25821.93
|
||||
Transfer/sec: 21.92MB
|
||||
```
|
||||
|
|
|
|||
53
bench/bench.amd64.sh
Normal file
53
bench/bench.amd64.sh
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
#!/bin/sh
|
||||
|
||||
echo "----------------------------"
|
||||
echo "Benchmark [x86_64] with ReWrk"
|
||||
|
||||
echo "----------------------------"
|
||||
echo "Benchmark on rpxy"
|
||||
rewrk -c 512 -t 4 -d 15s -h http://localhost:8080 --pct
|
||||
|
||||
echo "sleep 3 secs"
|
||||
sleep 3
|
||||
|
||||
echo "----------------------------"
|
||||
echo "Benchmark on nginx"
|
||||
rewrk -c 512 -t 4 -d 15s -h http://localhost:8090 --pct
|
||||
|
||||
echo "sleep 3 secs"
|
||||
sleep 3
|
||||
|
||||
echo "----------------------------"
|
||||
echo "Benchmark on caddy"
|
||||
rewrk -c 512 -t 4 -d 15s -h http://localhost:8100 --pct
|
||||
|
||||
echo "sleep 3 secs"
|
||||
sleep 3
|
||||
|
||||
echo "----------------------------"
|
||||
echo "Benchmark [x86_64] with Wrk"
|
||||
|
||||
echo "----------------------------"
|
||||
echo "Benchmark on rpxy"
|
||||
wrk -c 512 -t 4 -d 15s http://localhost:8080
|
||||
|
||||
echo "sleep 3 secs"
|
||||
sleep 3
|
||||
|
||||
echo "----------------------------"
|
||||
echo "Benchmark on nginx"
|
||||
wrk -c 512 -t 4 -d 15s http://localhost:8090
|
||||
|
||||
echo "sleep 3 secs"
|
||||
sleep 3
|
||||
|
||||
echo "----------------------------"
|
||||
echo "Benchmark on caddy"
|
||||
wrk -c 512 -t 4 -d 15s http://localhost:8100
|
||||
|
||||
echo "sleep 3 secs"
|
||||
sleep 3
|
||||
|
||||
echo "----------------------------"
|
||||
echo "Benchmark on sozu"
|
||||
wrk -c 512 -t 4 -d 15s http://localhost:8110
|
||||
|
|
@ -12,6 +12,8 @@
|
|||
# echo "Benchmark on caddy"
|
||||
# ab -c 100 -n 10000 http://127.0.0.1:8100/index.html
|
||||
|
||||
echo "----------------------------"
|
||||
echo "Benchmark [Any Arch]"
|
||||
|
||||
echo "----------------------------"
|
||||
echo "Benchmark on rpxy"
|
||||
|
|
|
|||
96
bench/docker-compose.amd64.yml
Normal file
96
bench/docker-compose.amd64.yml
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
container_name: backend-nginx
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- VIRTUAL_HOST=localhost
|
||||
- VIRTUAL_PORT=80
|
||||
expose:
|
||||
- 80
|
||||
# ports:
|
||||
# - 127.0.0.1:8888:80
|
||||
logging:
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
networks:
|
||||
bench-nw:
|
||||
ipv4_address: 192.168.100.100
|
||||
|
||||
rpxy-rp:
|
||||
image: jqtype/rpxy
|
||||
container_name: proxy-rpxy
|
||||
pull_policy: never
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: docker/Dockerfile
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- LOG_LEVEL=info
|
||||
- LOG_TO_FILE=false
|
||||
ports:
|
||||
- 127.0.0.1:8080:8080
|
||||
tty: false
|
||||
volumes:
|
||||
- ./rpxy.toml:/etc/rpxy.toml:ro
|
||||
networks:
|
||||
bench-nw:
|
||||
|
||||
nginx-rp:
|
||||
image: nginx:alpine
|
||||
container_name: proxy-nginx
|
||||
ports:
|
||||
- 127.0.0.1:8090:80
|
||||
restart: unless-stopped
|
||||
tty: false
|
||||
privileged: true
|
||||
volumes:
|
||||
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
logging:
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
networks:
|
||||
bench-nw:
|
||||
|
||||
caddy-rp:
|
||||
image: caddy:2
|
||||
container_name: proxy-caddy
|
||||
ports:
|
||||
- 127.0.0.1:8100:80
|
||||
restart: unless-stopped
|
||||
tty: false
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||
networks:
|
||||
bench-nw:
|
||||
|
||||
# Sozu wokrs only in X86_64 (amd64) environment
|
||||
# Official image from sozu developers is still version 0.11.0.
|
||||
# So we built it by ourselves locally.
|
||||
sozu-rp:
|
||||
image: jqtype/sozu
|
||||
container_name: proxy-sozu
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 127.0.0.1:8110:80
|
||||
logging:
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
volumes:
|
||||
- ./sozu-config.toml:/etc/sozu/config.toml
|
||||
networks:
|
||||
bench-nw:
|
||||
|
||||
networks:
|
||||
bench-nw:
|
||||
name: bench-nw
|
||||
driver: bridge
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 192.168.100.0/24
|
||||
|
|
@ -25,6 +25,7 @@ services:
|
|||
pull_policy: never
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: docker/Dockerfile
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- LOG_LEVEL=info
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@
|
|||
# return 503;
|
||||
# }
|
||||
# localhost
|
||||
upstream localhost {
|
||||
upstream backend {
|
||||
## Can be connected with "bench-nw" network
|
||||
# backend-nginx
|
||||
server 192.168.100.100:80;
|
||||
|
|
@ -69,6 +69,6 @@ server {
|
|||
listen 80 ;
|
||||
# access_log /var/log/nginx/access.log vhost;
|
||||
location / {
|
||||
proxy_pass http://localhost;
|
||||
proxy_pass http://backend;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
16
bench/sozu-config.toml
Normal file
16
bench/sozu-config.toml
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
log_level = "info"
|
||||
log_target = "stdout"
|
||||
max_connections = 512
|
||||
activate_listeners = true
|
||||
|
||||
[[listeners]]
|
||||
protocol = "http"
|
||||
# listening address
|
||||
address = "0.0.0.0:80"
|
||||
|
||||
[clusters]
|
||||
|
||||
[clusters.backend]
|
||||
protocol = "http"
|
||||
frontends = [{ address = "0.0.0.0:80", hostname = "localhost" }]
|
||||
backends = [{ address = "192.168.100.100:80" }]
|
||||
|
|
@ -10,6 +10,9 @@
|
|||
listen_port = 8080
|
||||
listen_port_tls = 8443
|
||||
|
||||
# Optional for h2 and http1.1
|
||||
tcp_listen_backlog = 1024
|
||||
|
||||
# Optional for h2 and http1.1
|
||||
max_concurrent_streams = 100
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
FROM ubuntu:22.04 AS base
|
||||
LABEL maintainer="Jun Kurihara"
|
||||
|
||||
|
|
@ -6,7 +5,7 @@ SHELL ["/bin/sh", "-x", "-c"]
|
|||
ENV SERIAL 2
|
||||
|
||||
########################################
|
||||
FROM base as builder
|
||||
FROM --platform=$BUILDPLATFORM base AS builder
|
||||
|
||||
ENV CFLAGS=-Ofast
|
||||
ENV BUILD_DEPS curl make ca-certificates build-essential
|
||||
|
|
@ -15,6 +14,17 @@ WORKDIR /tmp
|
|||
|
||||
COPY . /tmp/
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN if [ $TARGETARCH = "amd64" ]; then \
|
||||
echo "x86_64" > /arch; \
|
||||
elif [ $TARGETARCH = "arm64" ]; then \
|
||||
echo "aarch64" > /arch; \
|
||||
else \
|
||||
echo "Unsupported platform: $TARGETARCH"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
ENV RUSTFLAGS "-C link-arg=-s"
|
||||
|
||||
RUN update-ca-certificates 2> /dev/null || true
|
||||
|
|
@ -22,14 +32,16 @@ RUN update-ca-certificates 2> /dev/null || true
|
|||
RUN apt-get update && apt-get install -qy --no-install-recommends $BUILD_DEPS && \
|
||||
curl -sSf https://sh.rustup.rs | bash -s -- -y --default-toolchain stable && \
|
||||
export PATH="$HOME/.cargo/bin:$PATH" && \
|
||||
echo "Install toolchain" && \
|
||||
rustup target add $(cat /arch)-unknown-linux-gnu &&\
|
||||
echo "Building rpxy from source" && \
|
||||
cargo build --release && \
|
||||
strip --strip-all /tmp/target/release/rpxy
|
||||
cargo build --release --target=$(cat /arch)-unknown-linux-gnu && \
|
||||
strip --strip-all /tmp/target/$(cat /arch)-unknown-linux-gnu/release/rpxy &&\
|
||||
cp /tmp/target/$(cat /arch)-unknown-linux-gnu/release/rpxy /tmp/target/release/rpxy
|
||||
|
||||
########################################
|
||||
FROM base AS runner
|
||||
FROM --platform=$TARGETPLATFORM base AS runner
|
||||
|
||||
ENV TAG_NAME=amd64
|
||||
ENV RUNTIME_DEPS logrotate ca-certificates gosu
|
||||
|
||||
RUN apt-get update && \
|
||||
|
|
@ -1,7 +1,19 @@
|
|||
########################################
|
||||
FROM messense/rust-musl-cross:x86_64-musl as builder
|
||||
FROM --platform=$BUILDPLATFORM messense/rust-musl-cross:${TARGETARCH}-musl AS builder
|
||||
|
||||
LABEL maintainer="Jun Kurihara"
|
||||
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN if [ $TARGETARCH = "amd64" ]; then \
|
||||
echo "x86_64" > /arch; \
|
||||
elif [ $TARGETARCH = "arm64" ]; then \
|
||||
echo "aarch64" > /arch; \
|
||||
else \
|
||||
echo "Unsupported platform: $TARGETARCH"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
ENV TARGET_DIR=x86_64-unknown-linux-musl
|
||||
ENV CFLAGS=-Ofast
|
||||
|
||||
WORKDIR /tmp
|
||||
|
|
@ -11,15 +23,14 @@ COPY . /tmp/
|
|||
ENV RUSTFLAGS "-C link-arg=-s"
|
||||
|
||||
RUN echo "Building rpxy from source" && \
|
||||
cargo build --release && \
|
||||
musl-strip --strip-all /tmp/target/${TARGET_DIR}/release/rpxy
|
||||
cargo build --release --target $(cat /arch)-unknown-linux-musl && \
|
||||
musl-strip --strip-all /tmp/target/$(cat /arch)-unknown-linux-musl/release/rpxy && \
|
||||
cp /tmp/target/$(cat /arch)-unknown-linux-musl/release/rpxy /tmp/target/release/rpxy
|
||||
|
||||
########################################
|
||||
FROM alpine:latest as runner
|
||||
FROM --platform=$TARGETPLATFORM alpine:latest AS runner
|
||||
LABEL maintainer="Jun Kurihara"
|
||||
|
||||
ENV TAG_NAME=amd64-slim
|
||||
ENV TARGET_DIR=x86_64-unknown-linux-musl
|
||||
ENV RUNTIME_DEPS logrotate ca-certificates su-exec
|
||||
|
||||
RUN apk add --no-cache ${RUNTIME_DEPS} && \
|
||||
|
|
@ -29,7 +40,7 @@ RUN apk add --no-cache ${RUNTIME_DEPS} && \
|
|||
mkdir -p /rpxy/bin &&\
|
||||
mkdir -p /rpxy/log
|
||||
|
||||
COPY --from=builder /tmp/target/${TARGET_DIR}/release/rpxy /rpxy/bin/rpxy
|
||||
COPY --from=builder /tmp/target/release/rpxy /rpxy/bin/rpxy
|
||||
COPY ./docker/run.sh /rpxy
|
||||
COPY ./docker/entrypoint.sh /rpxy
|
||||
|
||||
|
|
@ -10,13 +10,17 @@ services:
|
|||
- 127.0.0.1:8443:8443
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: ./docker/Dockerfile.amd64
|
||||
dockerfile: ./docker/Dockerfile
|
||||
platforms: # Choose your platforms
|
||||
- "linux/amd64"
|
||||
# - "linux/arm64"
|
||||
environment:
|
||||
- LOG_LEVEL=debug
|
||||
- LOG_TO_FILE=true
|
||||
- HOST_USER=jun
|
||||
- HOST_UID=501
|
||||
- HOST_GID=501
|
||||
# - WATCH=true
|
||||
tty: false
|
||||
privileged: true
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -7,4 +7,19 @@ if [ -z $LOG_LEVEL ]; then
|
|||
fi
|
||||
echo "rpxy: Logging with level ${LOG_LEVEL}"
|
||||
|
||||
RUST_LOG=${LOG_LEVEL} /rpxy/bin/rpxy --config ${CONFIG_FILE}
|
||||
# continuously watch and reload the config file
|
||||
if [ -z $WATCH ]; then
|
||||
WATCH=false
|
||||
else
|
||||
if [ "$WATCH" = "true" ]; then
|
||||
WATCH=true
|
||||
else
|
||||
WATCH=false
|
||||
fi
|
||||
fi
|
||||
|
||||
if $WATCH ; then
|
||||
RUST_LOG=${LOG_LEVEL} /rpxy/bin/rpxy --config ${CONFIG_FILE} -w
|
||||
else
|
||||
RUST_LOG=${LOG_LEVEL} /rpxy/bin/rpxy --config ${CONFIG_FILE}
|
||||
fi
|
||||
|
|
|
|||
2
h3
2
h3
|
|
@ -1 +1 @@
|
|||
Subproject commit 3ef7c1a37b635e8446322d8f8d3a68580a208ad8
|
||||
Subproject commit a57ed224ac5d17a635eb71eb6f83c1196f581a51
|
||||
2
quinn
2
quinn
|
|
@ -1 +1 @@
|
|||
Subproject commit 7914468e27621633a8399c8d02fbf3f557d54df2
|
||||
Subproject commit 532ba7d80405ad083fd05546fa71becbe5eff1a4
|
||||
49
rpxy-bin/Cargo.toml
Normal file
49
rpxy-bin/Cargo.toml
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
[package]
|
||||
name = "rpxy"
|
||||
version = "0.4.0"
|
||||
authors = ["Jun Kurihara"]
|
||||
homepage = "https://github.com/junkurihara/rust-rpxy"
|
||||
repository = "https://github.com/junkurihara/rust-rpxy"
|
||||
license = "MIT"
|
||||
readme = "../README.md"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
default = ["http3"]
|
||||
http3 = []
|
||||
|
||||
[dependencies]
|
||||
rpxy-lib = { path = "../rpxy-lib/", features = ["http3", "sticky-cookie"] }
|
||||
|
||||
anyhow = "1.0.72"
|
||||
rustc-hash = "1.1.0"
|
||||
serde = { version = "1.0.178", default-features = false, features = ["derive"] }
|
||||
derive_builder = "0.12.0"
|
||||
tokio = { version = "1.29.1", default-features = false, features = [
|
||||
"net",
|
||||
"rt-multi-thread",
|
||||
"time",
|
||||
"sync",
|
||||
"macros",
|
||||
] }
|
||||
async-trait = "0.1.72"
|
||||
rustls-pemfile = "1.0.3"
|
||||
|
||||
# config
|
||||
clap = { version = "4.3.19", features = ["std", "cargo", "wrap_help"] }
|
||||
toml = { version = "0.7.6", default-features = false, features = ["parse"] }
|
||||
hot_reload = "0.1.4"
|
||||
|
||||
# logging
|
||||
tracing = { version = "0.1.37" }
|
||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||
|
||||
|
||||
[target.'cfg(not(target_env = "msvc"))'.dependencies]
|
||||
tikv-jemallocator = "0.5.4"
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
185
rpxy-bin/src/cert_file_reader.rs
Normal file
185
rpxy-bin/src/cert_file_reader.rs
Normal file
|
|
@ -0,0 +1,185 @@
|
|||
use crate::log::*;
|
||||
use async_trait::async_trait;
|
||||
use derive_builder::Builder;
|
||||
use rpxy_lib::{
|
||||
reexports::{Certificate, PrivateKey},
|
||||
CertsAndKeys, CryptoSource,
|
||||
};
|
||||
use std::{
|
||||
fs::File,
|
||||
io::{self, BufReader, Cursor, Read},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
#[derive(Builder, Debug, Clone)]
|
||||
/// Crypto-related file reader implementing certs::CryptoRead trait
|
||||
pub struct CryptoFileSource {
|
||||
#[builder(setter(custom))]
|
||||
/// Always exist
|
||||
pub tls_cert_path: PathBuf,
|
||||
|
||||
#[builder(setter(custom))]
|
||||
/// Always exist
|
||||
pub tls_cert_key_path: PathBuf,
|
||||
|
||||
#[builder(setter(custom), default)]
|
||||
/// This may not exist
|
||||
pub client_ca_cert_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl CryptoFileSourceBuilder {
|
||||
pub fn tls_cert_path(&mut self, v: &str) -> &mut Self {
|
||||
self.tls_cert_path = Some(PathBuf::from(v));
|
||||
self
|
||||
}
|
||||
pub fn tls_cert_key_path(&mut self, v: &str) -> &mut Self {
|
||||
self.tls_cert_key_path = Some(PathBuf::from(v));
|
||||
self
|
||||
}
|
||||
pub fn client_ca_cert_path(&mut self, v: &Option<String>) -> &mut Self {
|
||||
self.client_ca_cert_path = Some(v.to_owned().as_ref().map(PathBuf::from));
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CryptoSource for CryptoFileSource {
|
||||
type Error = io::Error;
|
||||
/// read crypto materials from source
|
||||
async fn read(&self) -> Result<CertsAndKeys, Self::Error> {
|
||||
read_certs_and_keys(
|
||||
&self.tls_cert_path,
|
||||
&self.tls_cert_key_path,
|
||||
self.client_ca_cert_path.as_ref(),
|
||||
)
|
||||
}
|
||||
/// Returns true when mutual tls is enabled
|
||||
fn is_mutual_tls(&self) -> bool {
|
||||
self.client_ca_cert_path.is_some()
|
||||
}
|
||||
}
|
||||
|
||||
/// Read certificates and private keys from file
|
||||
fn read_certs_and_keys(
|
||||
cert_path: &PathBuf,
|
||||
cert_key_path: &PathBuf,
|
||||
client_ca_cert_path: Option<&PathBuf>,
|
||||
) -> Result<CertsAndKeys, io::Error> {
|
||||
debug!("Read TLS server certificates and private key");
|
||||
|
||||
let certs: Vec<_> = {
|
||||
let certs_path_str = cert_path.display().to_string();
|
||||
let mut reader = BufReader::new(File::open(cert_path).map_err(|e| {
|
||||
io::Error::new(
|
||||
e.kind(),
|
||||
format!("Unable to load the certificates [{certs_path_str}]: {e}"),
|
||||
)
|
||||
})?);
|
||||
rustls_pemfile::certs(&mut reader)
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Unable to parse the certificates"))?
|
||||
}
|
||||
.drain(..)
|
||||
.map(Certificate)
|
||||
.collect();
|
||||
|
||||
let cert_keys: Vec<_> = {
|
||||
let cert_key_path_str = cert_key_path.display().to_string();
|
||||
let encoded_keys = {
|
||||
let mut encoded_keys = vec![];
|
||||
File::open(cert_key_path)
|
||||
.map_err(|e| {
|
||||
io::Error::new(
|
||||
e.kind(),
|
||||
format!("Unable to load the certificate keys [{cert_key_path_str}]: {e}"),
|
||||
)
|
||||
})?
|
||||
.read_to_end(&mut encoded_keys)?;
|
||||
encoded_keys
|
||||
};
|
||||
let mut reader = Cursor::new(encoded_keys);
|
||||
let pkcs8_keys = rustls_pemfile::pkcs8_private_keys(&mut reader).map_err(|_| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Unable to parse the certificates private keys (PKCS8)",
|
||||
)
|
||||
})?;
|
||||
reader.set_position(0);
|
||||
let mut rsa_keys = rustls_pemfile::rsa_private_keys(&mut reader)?;
|
||||
let mut keys = pkcs8_keys;
|
||||
keys.append(&mut rsa_keys);
|
||||
if keys.is_empty() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"No private keys found - Make sure that they are in PKCS#8/PEM format",
|
||||
));
|
||||
}
|
||||
keys.drain(..).map(PrivateKey).collect()
|
||||
};
|
||||
|
||||
let client_ca_certs = if let Some(path) = client_ca_cert_path {
|
||||
debug!("Read CA certificates for client authentication");
|
||||
// Reads client certificate and returns client
|
||||
let certs: Vec<_> = {
|
||||
let certs_path_str = path.display().to_string();
|
||||
let mut reader = BufReader::new(File::open(path).map_err(|e| {
|
||||
io::Error::new(
|
||||
e.kind(),
|
||||
format!("Unable to load the client certificates [{certs_path_str}]: {e}"),
|
||||
)
|
||||
})?);
|
||||
rustls_pemfile::certs(&mut reader)
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Unable to parse the client certificates"))?
|
||||
}
|
||||
.drain(..)
|
||||
.map(Certificate)
|
||||
.collect();
|
||||
Some(certs)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(CertsAndKeys {
|
||||
certs,
|
||||
cert_keys,
|
||||
client_ca_certs,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
#[tokio::test]
|
||||
async fn read_server_crt_key_files() {
|
||||
let tls_cert_path = "../example-certs/server.crt";
|
||||
let tls_cert_key_path = "../example-certs/server.key";
|
||||
let crypto_file_source = CryptoFileSourceBuilder::default()
|
||||
.tls_cert_key_path(tls_cert_key_path)
|
||||
.tls_cert_path(tls_cert_path)
|
||||
.build();
|
||||
assert!(crypto_file_source.is_ok());
|
||||
|
||||
let crypto_file_source = crypto_file_source.unwrap();
|
||||
let crypto_elem = crypto_file_source.read().await;
|
||||
assert!(crypto_elem.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn read_server_crt_key_files_with_client_ca_crt() {
|
||||
let tls_cert_path = "../example-certs/server.crt";
|
||||
let tls_cert_key_path = "../example-certs/server.key";
|
||||
let client_ca_cert_path = Some("../example-certs/client.ca.crt".to_string());
|
||||
let crypto_file_source = CryptoFileSourceBuilder::default()
|
||||
.tls_cert_key_path(tls_cert_key_path)
|
||||
.tls_cert_path(tls_cert_path)
|
||||
.client_ca_cert_path(&client_ca_cert_path)
|
||||
.build();
|
||||
assert!(crypto_file_source.is_ok());
|
||||
|
||||
let crypto_file_source = crypto_file_source.unwrap();
|
||||
let crypto_elem = crypto_file_source.read().await;
|
||||
assert!(crypto_elem.is_ok());
|
||||
|
||||
let crypto_elem = crypto_elem.unwrap();
|
||||
assert!(crypto_elem.client_ca_certs.is_some());
|
||||
}
|
||||
}
|
||||
9
rpxy-bin/src/config/mod.rs
Normal file
9
rpxy-bin/src/config/mod.rs
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
mod parse;
|
||||
mod service;
|
||||
mod toml;
|
||||
|
||||
pub use {
|
||||
self::toml::ConfigToml,
|
||||
parse::{build_settings, parse_opts},
|
||||
service::ConfigTomlReloader,
|
||||
};
|
||||
97
rpxy-bin/src/config/parse.rs
Normal file
97
rpxy-bin/src/config/parse.rs
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
use super::toml::ConfigToml;
|
||||
use crate::{
|
||||
cert_file_reader::CryptoFileSource,
|
||||
error::{anyhow, ensure},
|
||||
};
|
||||
use clap::{Arg, ArgAction};
|
||||
use rpxy_lib::{AppConfig, AppConfigList, ProxyConfig};
|
||||
|
||||
/// Parsed options
|
||||
pub struct Opts {
|
||||
pub config_file_path: String,
|
||||
pub watch: bool,
|
||||
}
|
||||
|
||||
/// Parse arg values passed from cli
|
||||
pub fn parse_opts() -> Result<Opts, anyhow::Error> {
|
||||
let _ = include_str!("../../Cargo.toml");
|
||||
let options = clap::command!()
|
||||
.arg(
|
||||
Arg::new("config_file")
|
||||
.long("config")
|
||||
.short('c')
|
||||
.value_name("FILE")
|
||||
.required(true)
|
||||
.help("Configuration file path like ./config.toml"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("watch")
|
||||
.long("watch")
|
||||
.short('w')
|
||||
.action(ArgAction::SetTrue)
|
||||
.help("Activate dynamic reloading of the config file via continuous monitoring"),
|
||||
);
|
||||
let matches = options.get_matches();
|
||||
|
||||
///////////////////////////////////
|
||||
let config_file_path = matches.get_one::<String>("config_file").unwrap().to_owned();
|
||||
let watch = matches.get_one::<bool>("watch").unwrap().to_owned();
|
||||
|
||||
Ok(Opts {
|
||||
config_file_path,
|
||||
watch,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn build_settings(
|
||||
config: &ConfigToml,
|
||||
) -> std::result::Result<(ProxyConfig, AppConfigList<CryptoFileSource>), anyhow::Error> {
|
||||
///////////////////////////////////
|
||||
// build proxy config
|
||||
let proxy_config: ProxyConfig = config.try_into()?;
|
||||
|
||||
///////////////////////////////////
|
||||
// backend_apps
|
||||
let apps = config.apps.clone().ok_or(anyhow!("Missing application spec"))?;
|
||||
|
||||
// assertions for all backend apps
|
||||
ensure!(!apps.0.is_empty(), "Wrong application spec.");
|
||||
// if only https_port is specified, tls must be configured for all apps
|
||||
if proxy_config.http_port.is_none() {
|
||||
ensure!(
|
||||
apps.0.iter().all(|(_, app)| app.tls.is_some()),
|
||||
"Some apps serves only plaintext HTTP"
|
||||
);
|
||||
}
|
||||
// https redirection can be configured if both ports are active
|
||||
if !(proxy_config.https_port.is_some() && proxy_config.http_port.is_some()) {
|
||||
ensure!(
|
||||
apps.0.iter().all(|(_, app)| {
|
||||
if let Some(tls) = app.tls.as_ref() {
|
||||
tls.https_redirection.is_none()
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}),
|
||||
"https_redirection can be specified only when both http_port and https_port are specified"
|
||||
);
|
||||
}
|
||||
|
||||
// build applications
|
||||
let mut app_config_list_inner = Vec::<AppConfig<CryptoFileSource>>::new();
|
||||
|
||||
// let mut backends = Backends::new();
|
||||
for (app_name, app) in apps.0.iter() {
|
||||
let _server_name_string = app.server_name.as_ref().ok_or(anyhow!("No server name"))?;
|
||||
let registered_app_name = app_name.to_ascii_lowercase();
|
||||
let app_config = app.build_app_config(®istered_app_name)?;
|
||||
app_config_list_inner.push(app_config);
|
||||
}
|
||||
|
||||
let app_config_list = AppConfigList {
|
||||
inner: app_config_list_inner,
|
||||
default_app: config.default_app.clone().map(|v| v.to_ascii_lowercase()), // default backend application for plaintext http requests
|
||||
};
|
||||
|
||||
Ok((proxy_config, app_config_list))
|
||||
}
|
||||
24
rpxy-bin/src/config/service.rs
Normal file
24
rpxy-bin/src/config/service.rs
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
use super::toml::ConfigToml;
|
||||
use async_trait::async_trait;
|
||||
use hot_reload::{Reload, ReloaderError};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ConfigTomlReloader {
|
||||
pub config_path: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Reload<ConfigToml> for ConfigTomlReloader {
|
||||
type Source = String;
|
||||
async fn new(source: &Self::Source) -> Result<Self, ReloaderError<ConfigToml>> {
|
||||
Ok(Self {
|
||||
config_path: source.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn reload(&self) -> Result<Option<ConfigToml>, ReloaderError<ConfigToml>> {
|
||||
let conf = ConfigToml::new(&self.config_path)
|
||||
.map_err(|_e| ReloaderError::<ConfigToml>::Reload("Failed to reload config toml"))?;
|
||||
Ok(Some(conf))
|
||||
}
|
||||
}
|
||||
259
rpxy-bin/src/config/toml.rs
Normal file
259
rpxy-bin/src/config/toml.rs
Normal file
|
|
@ -0,0 +1,259 @@
|
|||
use crate::{
|
||||
cert_file_reader::{CryptoFileSource, CryptoFileSourceBuilder},
|
||||
constants::*,
|
||||
error::{anyhow, ensure},
|
||||
};
|
||||
use rpxy_lib::{reexports::Uri, AppConfig, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri};
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use serde::Deserialize;
|
||||
use std::{fs, net::SocketAddr};
|
||||
|
||||
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
|
||||
pub struct ConfigToml {
|
||||
pub listen_port: Option<u16>,
|
||||
pub listen_port_tls: Option<u16>,
|
||||
pub listen_ipv6: Option<bool>,
|
||||
pub tcp_listen_backlog: Option<u32>,
|
||||
pub max_concurrent_streams: Option<u32>,
|
||||
pub max_clients: Option<u32>,
|
||||
pub apps: Option<Apps>,
|
||||
pub default_app: Option<String>,
|
||||
pub experimental: Option<Experimental>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "http3")]
|
||||
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
|
||||
pub struct Http3Option {
|
||||
pub alt_svc_max_age: Option<u32>,
|
||||
pub request_max_body_size: Option<usize>,
|
||||
pub max_concurrent_connections: Option<u32>,
|
||||
pub max_concurrent_bidistream: Option<u32>,
|
||||
pub max_concurrent_unistream: Option<u32>,
|
||||
pub max_idle_timeout: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
|
||||
pub struct Experimental {
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3: Option<Http3Option>,
|
||||
pub ignore_sni_consistency: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
|
||||
pub struct Apps(pub HashMap<String, Application>);
|
||||
|
||||
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
|
||||
pub struct Application {
|
||||
pub server_name: Option<String>,
|
||||
pub reverse_proxy: Option<Vec<ReverseProxyOption>>,
|
||||
pub tls: Option<TlsOption>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
|
||||
pub struct TlsOption {
|
||||
pub tls_cert_path: Option<String>,
|
||||
pub tls_cert_key_path: Option<String>,
|
||||
pub https_redirection: Option<bool>,
|
||||
pub client_ca_cert_path: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
|
||||
pub struct ReverseProxyOption {
|
||||
pub path: Option<String>,
|
||||
pub replace_path: Option<String>,
|
||||
pub upstream: Vec<UpstreamParams>,
|
||||
pub upstream_options: Option<Vec<String>>,
|
||||
pub load_balance: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
|
||||
pub struct UpstreamParams {
|
||||
pub location: String,
|
||||
pub tls: Option<bool>,
|
||||
}
|
||||
|
||||
impl TryInto<ProxyConfig> for &ConfigToml {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_into(self) -> std::result::Result<ProxyConfig, Self::Error> {
|
||||
let mut proxy_config = ProxyConfig {
|
||||
// listen port and socket
|
||||
http_port: self.listen_port,
|
||||
https_port: self.listen_port_tls,
|
||||
..Default::default()
|
||||
};
|
||||
ensure!(
|
||||
proxy_config.http_port.is_some() || proxy_config.https_port.is_some(),
|
||||
anyhow!("Either/Both of http_port or https_port must be specified")
|
||||
);
|
||||
if proxy_config.http_port.is_some() && proxy_config.https_port.is_some() {
|
||||
ensure!(
|
||||
proxy_config.http_port.unwrap() != proxy_config.https_port.unwrap(),
|
||||
anyhow!("http_port and https_port must be different")
|
||||
);
|
||||
}
|
||||
|
||||
// NOTE: when [::]:xx is bound, both v4 and v6 listeners are enabled.
|
||||
let listen_addresses: Vec<&str> = if let Some(true) = self.listen_ipv6 {
|
||||
LISTEN_ADDRESSES_V6.to_vec()
|
||||
} else {
|
||||
LISTEN_ADDRESSES_V4.to_vec()
|
||||
};
|
||||
proxy_config.listen_sockets = listen_addresses
|
||||
.iter()
|
||||
.flat_map(|addr| {
|
||||
let mut v: Vec<SocketAddr> = vec![];
|
||||
if let Some(port) = proxy_config.http_port {
|
||||
v.push(format!("{addr}:{port}").parse().unwrap());
|
||||
}
|
||||
if let Some(port) = proxy_config.https_port {
|
||||
v.push(format!("{addr}:{port}").parse().unwrap());
|
||||
}
|
||||
v
|
||||
})
|
||||
.collect();
|
||||
|
||||
// tcp backlog
|
||||
if let Some(backlog) = self.tcp_listen_backlog {
|
||||
proxy_config.tcp_listen_backlog = backlog;
|
||||
}
|
||||
|
||||
// max values
|
||||
if let Some(c) = self.max_clients {
|
||||
proxy_config.max_clients = c as usize;
|
||||
}
|
||||
if let Some(c) = self.max_concurrent_streams {
|
||||
proxy_config.max_concurrent_streams = c;
|
||||
}
|
||||
|
||||
// experimental
|
||||
if let Some(exp) = &self.experimental {
|
||||
#[cfg(feature = "http3")]
|
||||
{
|
||||
if let Some(h3option) = &exp.h3 {
|
||||
proxy_config.http3 = true;
|
||||
if let Some(x) = h3option.alt_svc_max_age {
|
||||
proxy_config.h3_alt_svc_max_age = x;
|
||||
}
|
||||
if let Some(x) = h3option.request_max_body_size {
|
||||
proxy_config.h3_request_max_body_size = x;
|
||||
}
|
||||
if let Some(x) = h3option.max_concurrent_connections {
|
||||
proxy_config.h3_max_concurrent_connections = x;
|
||||
}
|
||||
if let Some(x) = h3option.max_concurrent_bidistream {
|
||||
proxy_config.h3_max_concurrent_bidistream = x.into();
|
||||
}
|
||||
if let Some(x) = h3option.max_concurrent_unistream {
|
||||
proxy_config.h3_max_concurrent_unistream = x.into();
|
||||
}
|
||||
if let Some(x) = h3option.max_idle_timeout {
|
||||
if x == 0u64 {
|
||||
proxy_config.h3_max_idle_timeout = None;
|
||||
} else {
|
||||
proxy_config.h3_max_idle_timeout = Some(tokio::time::Duration::from_secs(x))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ignore) = exp.ignore_sni_consistency {
|
||||
proxy_config.sni_consistency = !ignore;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(proxy_config)
|
||||
}
|
||||
}
|
||||
|
||||
impl ConfigToml {
|
||||
pub fn new(config_file: &str) -> std::result::Result<Self, anyhow::Error> {
|
||||
let config_str = fs::read_to_string(config_file)?;
|
||||
|
||||
toml::from_str(&config_str).map_err(|e| anyhow!(e))
|
||||
}
|
||||
}
|
||||
|
||||
impl Application {
|
||||
pub fn build_app_config(&self, app_name: &str) -> std::result::Result<AppConfig<CryptoFileSource>, anyhow::Error> {
|
||||
let server_name_string = self.server_name.as_ref().ok_or(anyhow!("Missing server_name"))?;
|
||||
|
||||
// reverse proxy settings
|
||||
let reverse_proxy_config: Vec<ReverseProxyConfig> = self.try_into()?;
|
||||
|
||||
// tls settings
|
||||
let tls_config = if self.tls.is_some() {
|
||||
let tls = self.tls.as_ref().unwrap();
|
||||
ensure!(tls.tls_cert_key_path.is_some() && tls.tls_cert_path.is_some());
|
||||
let inner = CryptoFileSourceBuilder::default()
|
||||
.tls_cert_path(tls.tls_cert_path.as_ref().unwrap())
|
||||
.tls_cert_key_path(tls.tls_cert_key_path.as_ref().unwrap())
|
||||
.client_ca_cert_path(&tls.client_ca_cert_path)
|
||||
.build()?;
|
||||
|
||||
let https_redirection = if tls.https_redirection.is_none() {
|
||||
true // Default true
|
||||
} else {
|
||||
tls.https_redirection.unwrap()
|
||||
};
|
||||
|
||||
Some(TlsConfig {
|
||||
inner,
|
||||
https_redirection,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(AppConfig {
|
||||
app_name: app_name.to_owned(),
|
||||
server_name: server_name_string.to_owned(),
|
||||
reverse_proxy: reverse_proxy_config,
|
||||
tls: tls_config,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl TryInto<Vec<ReverseProxyConfig>> for &Application {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_into(self) -> std::result::Result<Vec<ReverseProxyConfig>, Self::Error> {
|
||||
let _server_name_string = self.server_name.as_ref().ok_or(anyhow!("Missing server_name"))?;
|
||||
let rp_settings = self.reverse_proxy.as_ref().ok_or(anyhow!("Missing reverse_proxy"))?;
|
||||
|
||||
let mut reverse_proxies: Vec<ReverseProxyConfig> = Vec::new();
|
||||
|
||||
for rpo in rp_settings.iter() {
|
||||
let upstream_res: Vec<Option<UpstreamUri>> = rpo.upstream.iter().map(|v| v.try_into().ok()).collect();
|
||||
if !upstream_res.iter().all(|v| v.is_some()) {
|
||||
return Err(anyhow!("[{}] Upstream uri is invalid", &_server_name_string));
|
||||
}
|
||||
let upstream = upstream_res.into_iter().map(|v| v.unwrap()).collect();
|
||||
|
||||
reverse_proxies.push(ReverseProxyConfig {
|
||||
path: rpo.path.clone(),
|
||||
replace_path: rpo.replace_path.clone(),
|
||||
upstream,
|
||||
upstream_options: rpo.upstream_options.clone(),
|
||||
load_balance: rpo.load_balance.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
Ok(reverse_proxies)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryInto<UpstreamUri> for &UpstreamParams {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_into(self) -> std::result::Result<UpstreamUri, Self::Error> {
|
||||
let scheme = match self.tls {
|
||||
Some(true) => "https",
|
||||
_ => "http",
|
||||
};
|
||||
let location = format!("{}://{}", scheme, self.location);
|
||||
Ok(UpstreamUri {
|
||||
inner: location.parse::<Uri>().map_err(|e| anyhow!("{}", e))?,
|
||||
})
|
||||
}
|
||||
}
|
||||
3
rpxy-bin/src/constants.rs
Normal file
3
rpxy-bin/src/constants.rs
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
pub const LISTEN_ADDRESSES_V4: &[&str] = &["0.0.0.0"];
|
||||
pub const LISTEN_ADDRESSES_V6: &[&str] = &["[::]"];
|
||||
pub const CONFIG_WATCH_DELAY_SECS: u32 = 20;
|
||||
1
rpxy-bin/src/error.rs
Normal file
1
rpxy-bin/src/error.rs
Normal file
|
|
@ -0,0 +1 @@
|
|||
pub use anyhow::{anyhow, bail, ensure, Context};
|
||||
24
rpxy-bin/src/log.rs
Normal file
24
rpxy-bin/src/log.rs
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
pub use tracing::{debug, error, info, warn};
|
||||
|
||||
pub fn init_logger() {
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
|
||||
|
||||
let format_layer = fmt::layer()
|
||||
.with_line_number(false)
|
||||
.with_thread_ids(false)
|
||||
.with_target(false)
|
||||
.with_thread_names(true)
|
||||
.with_target(true)
|
||||
.with_level(true)
|
||||
.compact();
|
||||
|
||||
// This limits the logger to emits only rpxy crate
|
||||
let level_string = std::env::var(EnvFilter::DEFAULT_ENV).unwrap_or_else(|_| "info".to_string());
|
||||
let filter_layer = EnvFilter::new(format!("{}={}", env!("CARGO_PKG_NAME"), level_string));
|
||||
// let filter_layer = EnvFilter::from_default_env();
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(format_layer)
|
||||
.with(filter_layer)
|
||||
.init();
|
||||
}
|
||||
134
rpxy-bin/src/main.rs
Normal file
134
rpxy-bin/src/main.rs
Normal file
|
|
@ -0,0 +1,134 @@
|
|||
#[cfg(not(target_env = "msvc"))]
|
||||
use tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[cfg(not(target_env = "msvc"))]
|
||||
#[global_allocator]
|
||||
static GLOBAL: Jemalloc = Jemalloc;
|
||||
|
||||
mod cert_file_reader;
|
||||
mod config;
|
||||
mod constants;
|
||||
mod error;
|
||||
mod log;
|
||||
|
||||
use crate::{
|
||||
config::{build_settings, parse_opts, ConfigToml, ConfigTomlReloader},
|
||||
constants::CONFIG_WATCH_DELAY_SECS,
|
||||
log::*,
|
||||
};
|
||||
use hot_reload::{ReloaderReceiver, ReloaderService};
|
||||
use rpxy_lib::entrypoint;
|
||||
|
||||
fn main() {
|
||||
init_logger();
|
||||
|
||||
let mut runtime_builder = tokio::runtime::Builder::new_multi_thread();
|
||||
runtime_builder.enable_all();
|
||||
runtime_builder.thread_name("rpxy");
|
||||
let runtime = runtime_builder.build().unwrap();
|
||||
|
||||
runtime.block_on(async {
|
||||
// Initially load options
|
||||
let Ok(parsed_opts) = parse_opts() else {
|
||||
error!("Invalid toml file");
|
||||
std::process::exit(1);
|
||||
};
|
||||
|
||||
if !parsed_opts.watch {
|
||||
if let Err(e) = rpxy_service_without_watcher(&parsed_opts.config_file_path, runtime.handle().clone()).await {
|
||||
error!("rpxy service existed: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
} else {
|
||||
let (config_service, config_rx) = ReloaderService::<ConfigTomlReloader, ConfigToml>::new(
|
||||
&parsed_opts.config_file_path,
|
||||
CONFIG_WATCH_DELAY_SECS,
|
||||
false,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::select! {
|
||||
Err(e) = config_service.start() => {
|
||||
error!("config reloader service exited: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
Err(e) = rpxy_service_with_watcher(config_rx, runtime.handle().clone()) => {
|
||||
error!("rpxy service existed: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async fn rpxy_service_without_watcher(
|
||||
config_file_path: &str,
|
||||
runtime_handle: tokio::runtime::Handle,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
info!("Start rpxy service");
|
||||
let config_toml = match ConfigToml::new(config_file_path) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("Invalid toml file: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
let (proxy_conf, app_conf) = match build_settings(&config_toml) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("Invalid configuration: {e}");
|
||||
return Err(anyhow::anyhow!(e));
|
||||
}
|
||||
};
|
||||
entrypoint(&proxy_conf, &app_conf, &runtime_handle)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
|
||||
async fn rpxy_service_with_watcher(
|
||||
mut config_rx: ReloaderReceiver<ConfigToml>,
|
||||
runtime_handle: tokio::runtime::Handle,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
info!("Start rpxy service with dynamic config reloader");
|
||||
// Initial loading
|
||||
config_rx.changed().await?;
|
||||
let config_toml = config_rx.borrow().clone().unwrap();
|
||||
let (mut proxy_conf, mut app_conf) = match build_settings(&config_toml) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("Invalid configuration: {e}");
|
||||
return Err(anyhow::anyhow!(e));
|
||||
}
|
||||
};
|
||||
|
||||
// Continuous monitoring
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = entrypoint(&proxy_conf, &app_conf, &runtime_handle) => {
|
||||
error!("rpxy entrypoint exited");
|
||||
break;
|
||||
}
|
||||
_ = config_rx.changed() => {
|
||||
if config_rx.borrow().is_none() {
|
||||
error!("Something wrong in config reloader receiver");
|
||||
break;
|
||||
}
|
||||
let config_toml = config_rx.borrow().clone().unwrap();
|
||||
match build_settings(&config_toml) {
|
||||
Ok((p, a)) => {
|
||||
(proxy_conf, app_conf) = (p, a)
|
||||
},
|
||||
Err(e) => {
|
||||
error!("Invalid configuration. Configuration does not updated: {e}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
info!("Configuration updated. Force to re-bind TCP/UDP sockets");
|
||||
}
|
||||
else => break
|
||||
}
|
||||
}
|
||||
|
||||
Err(anyhow::anyhow!("rpxy or continuous monitoring service exited"))
|
||||
}
|
||||
79
rpxy-lib/Cargo.toml
Normal file
79
rpxy-lib/Cargo.toml
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
[package]
|
||||
name = "rpxy-lib"
|
||||
version = "0.4.0"
|
||||
authors = ["Jun Kurihara"]
|
||||
homepage = "https://github.com/junkurihara/rust-rpxy"
|
||||
repository = "https://github.com/junkurihara/rust-rpxy"
|
||||
license = "MIT"
|
||||
readme = "../README.md"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
default = ["http3", "sticky-cookie"]
|
||||
http3 = ["quinn", "h3", "h3-quinn"]
|
||||
sticky-cookie = ["base64", "sha2", "chrono"]
|
||||
|
||||
[dependencies]
|
||||
rand = "0.8.5"
|
||||
rustc-hash = "1.1.0"
|
||||
bytes = "1.4.0"
|
||||
derive_builder = "0.12.0"
|
||||
futures = { version = "0.3.28", features = ["alloc", "async-await"] }
|
||||
tokio = { version = "1.29.1", default-features = false, features = [
|
||||
"net",
|
||||
"rt-multi-thread",
|
||||
"time",
|
||||
"sync",
|
||||
"macros",
|
||||
] }
|
||||
async-trait = "0.1.72"
|
||||
hot_reload = "0.1.4" # reloading certs
|
||||
|
||||
# Error handling
|
||||
anyhow = "1.0.72"
|
||||
thiserror = "1.0.44"
|
||||
|
||||
# http and tls
|
||||
hyper = { version = "0.14.27", default-features = false, features = [
|
||||
"server",
|
||||
"http1",
|
||||
"http2",
|
||||
"stream",
|
||||
] }
|
||||
hyper-rustls = { version = "0.24.1", default-features = false, features = [
|
||||
"tokio-runtime",
|
||||
"webpki-tokio",
|
||||
"http1",
|
||||
"http2",
|
||||
] }
|
||||
tokio-rustls = { version = "0.24.1", features = ["early-data"] }
|
||||
rustls = { version = "0.21.5", default-features = false }
|
||||
webpki = "0.22.0"
|
||||
x509-parser = "0.15.0"
|
||||
|
||||
# logging
|
||||
tracing = { version = "0.1.37" }
|
||||
|
||||
# http/3
|
||||
# quinn = { version = "0.9.3", optional = true }
|
||||
quinn = { path = "../quinn/quinn", optional = true } # Tentative to support rustls-0.21
|
||||
h3 = { path = "../h3/h3/", optional = true }
|
||||
# h3-quinn = { path = "./h3/h3-quinn/", optional = true }
|
||||
h3-quinn = { path = "../h3-quinn/", optional = true } # Tentative to support rustls-0.21
|
||||
# for UDP socket wit SO_REUSEADDR
|
||||
socket2 = { version = "0.5.3", features = ["all"] }
|
||||
|
||||
# cookie handling for sticky cookie
|
||||
chrono = { version = "0.4.26", default-features = false, features = [
|
||||
"unstable-locales",
|
||||
"alloc",
|
||||
"clock",
|
||||
], optional = true }
|
||||
base64 = { version = "0.21.2", optional = true }
|
||||
sha2 = { version = "0.10.7", default-features = false, optional = true }
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
77
rpxy-lib/src/backend/mod.rs
Normal file
77
rpxy-lib/src/backend/mod.rs
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
mod load_balance;
|
||||
#[cfg(feature = "sticky-cookie")]
|
||||
mod load_balance_sticky;
|
||||
#[cfg(feature = "sticky-cookie")]
|
||||
mod sticky_cookie;
|
||||
mod upstream;
|
||||
mod upstream_opts;
|
||||
|
||||
#[cfg(feature = "sticky-cookie")]
|
||||
pub use self::sticky_cookie::{StickyCookie, StickyCookieValue};
|
||||
pub use self::{
|
||||
load_balance::{LbContext, LoadBalance},
|
||||
upstream::{ReverseProxy, Upstream, UpstreamGroup, UpstreamGroupBuilder},
|
||||
upstream_opts::UpstreamOption,
|
||||
};
|
||||
use crate::{
|
||||
certs::CryptoSource,
|
||||
utils::{BytesName, PathNameBytesExp, ServerNameBytesExp},
|
||||
};
|
||||
use derive_builder::Builder;
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use std::borrow::Cow;
|
||||
|
||||
/// Struct serving information to route incoming connections, like server name to be handled and tls certs/keys settings.
|
||||
#[derive(Builder)]
|
||||
pub struct Backend<T>
|
||||
where
|
||||
T: CryptoSource,
|
||||
{
|
||||
#[builder(setter(into))]
|
||||
/// backend application name, e.g., app1
|
||||
pub app_name: String,
|
||||
#[builder(setter(custom))]
|
||||
/// server name, e.g., example.com, in String ascii lower case
|
||||
pub server_name: String,
|
||||
/// struct of reverse proxy serving incoming request
|
||||
pub reverse_proxy: ReverseProxy,
|
||||
|
||||
/// tls settings: https redirection with 30x
|
||||
#[builder(default)]
|
||||
pub https_redirection: Option<bool>,
|
||||
|
||||
/// TLS settings: source meta for server cert, key, client ca cert
|
||||
#[builder(default)]
|
||||
pub crypto_source: Option<T>,
|
||||
}
|
||||
impl<'a, T> BackendBuilder<T>
|
||||
where
|
||||
T: CryptoSource,
|
||||
{
|
||||
pub fn server_name(&mut self, server_name: impl Into<Cow<'a, str>>) -> &mut Self {
|
||||
self.server_name = Some(server_name.into().to_ascii_lowercase());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// HashMap and some meta information for multiple Backend structs.
|
||||
pub struct Backends<T>
|
||||
where
|
||||
T: CryptoSource,
|
||||
{
|
||||
pub apps: HashMap<ServerNameBytesExp, Backend<T>>, // hyper::uriで抜いたhostで引っ掛ける
|
||||
pub default_server_name_bytes: Option<ServerNameBytesExp>, // for plaintext http
|
||||
}
|
||||
|
||||
impl<T> Backends<T>
|
||||
where
|
||||
T: CryptoSource,
|
||||
{
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Self {
|
||||
Backends {
|
||||
apps: HashMap::<ServerNameBytesExp, Backend<T>>::default(),
|
||||
default_server_name_bytes: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
91
rpxy-lib/src/certs.rs
Normal file
91
rpxy-lib/src/certs.rs
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
use async_trait::async_trait;
|
||||
use rustc_hash::FxHashSet as HashSet;
|
||||
use rustls::{
|
||||
sign::{any_supported_type, CertifiedKey},
|
||||
Certificate, OwnedTrustAnchor, PrivateKey,
|
||||
};
|
||||
use std::io;
|
||||
use x509_parser::prelude::*;
|
||||
|
||||
#[async_trait]
|
||||
// Trait to read certs and keys anywhere from KVS, file, sqlite, etc.
|
||||
pub trait CryptoSource {
|
||||
type Error;
|
||||
|
||||
/// read crypto materials from source
|
||||
async fn read(&self) -> Result<CertsAndKeys, Self::Error>;
|
||||
|
||||
/// Returns true when mutual tls is enabled
|
||||
fn is_mutual_tls(&self) -> bool;
|
||||
}
|
||||
|
||||
/// Certificates and private keys in rustls loaded from files
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct CertsAndKeys {
|
||||
pub certs: Vec<Certificate>,
|
||||
pub cert_keys: Vec<PrivateKey>,
|
||||
pub client_ca_certs: Option<Vec<Certificate>>,
|
||||
}
|
||||
|
||||
impl CertsAndKeys {
|
||||
pub fn parse_server_certs_and_keys(&self) -> Result<CertifiedKey, anyhow::Error> {
|
||||
// for (server_name_bytes_exp, certs_and_keys) in self.inner.iter() {
|
||||
let signing_key = self
|
||||
.cert_keys
|
||||
.iter()
|
||||
.find_map(|k| {
|
||||
if let Ok(sk) = any_supported_type(k) {
|
||||
Some(sk)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Unable to find a valid certificate and key",
|
||||
)
|
||||
})?;
|
||||
Ok(CertifiedKey::new(self.certs.clone(), signing_key))
|
||||
}
|
||||
|
||||
pub fn parse_client_ca_certs(&self) -> Result<(Vec<OwnedTrustAnchor>, HashSet<Vec<u8>>), anyhow::Error> {
|
||||
let certs = self.client_ca_certs.as_ref().ok_or(anyhow::anyhow!("No client cert"))?;
|
||||
|
||||
let owned_trust_anchors: Vec<_> = certs
|
||||
.iter()
|
||||
.map(|v| {
|
||||
// let trust_anchor = tokio_rustls::webpki::TrustAnchor::try_from_cert_der(&v.0).unwrap();
|
||||
let trust_anchor = webpki::TrustAnchor::try_from_cert_der(&v.0).unwrap();
|
||||
rustls::OwnedTrustAnchor::from_subject_spki_name_constraints(
|
||||
trust_anchor.subject,
|
||||
trust_anchor.spki,
|
||||
trust_anchor.name_constraints,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
// TODO: SKID is not used currently
|
||||
let subject_key_identifiers: HashSet<_> = certs
|
||||
.iter()
|
||||
.filter_map(|v| {
|
||||
// retrieve ca key id (subject key id)
|
||||
let cert = parse_x509_certificate(&v.0).unwrap().1;
|
||||
let subject_key_ids = cert
|
||||
.iter_extensions()
|
||||
.filter_map(|ext| match ext.parsed_extension() {
|
||||
ParsedExtension::SubjectKeyIdentifier(skid) => Some(skid),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if !subject_key_ids.is_empty() {
|
||||
Some(subject_key_ids[0].0.to_owned())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok((owned_trust_anchors, subject_key_identifiers))
|
||||
}
|
||||
}
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
pub const LISTEN_ADDRESSES_V4: &[&str] = &["0.0.0.0"];
|
||||
pub const LISTEN_ADDRESSES_V6: &[&str] = &["[::]"];
|
||||
// pub const LISTEN_ADDRESSES_V4: &[&str] = &["0.0.0.0"];
|
||||
// pub const LISTEN_ADDRESSES_V6: &[&str] = &["[::]"];
|
||||
pub const TCP_LISTEN_BACKLOG: u32 = 1024;
|
||||
// pub const HTTP_LISTEN_PORT: u16 = 8080;
|
||||
// pub const HTTPS_LISTEN_PORT: u16 = 8443;
|
||||
pub const PROXY_TIMEOUT_SEC: u64 = 60;
|
||||
|
|
@ -8,6 +9,7 @@ pub const TLS_HANDSHAKE_TIMEOUT_SEC: u64 = 15; // default as with firefox browse
|
|||
pub const MAX_CLIENTS: usize = 512;
|
||||
pub const MAX_CONCURRENT_STREAMS: u32 = 64;
|
||||
pub const CERTS_WATCH_DELAY_SECS: u32 = 60;
|
||||
pub const LOAD_CERTS_ONLY_WHEN_UPDATED: bool = true;
|
||||
|
||||
// #[cfg(feature = "http3")]
|
||||
// pub const H3_RESPONSE_BUF_SIZE: usize = 65_536; // 64KB
|
||||
|
|
@ -10,9 +10,15 @@ pub enum RpxyError {
|
|||
#[error("Proxy build error")]
|
||||
ProxyBuild(#[from] crate::proxy::ProxyBuilderError),
|
||||
|
||||
#[error("Backend build error")]
|
||||
BackendBuild(#[from] crate::backend::BackendBuilderError),
|
||||
|
||||
#[error("MessageHandler build error")]
|
||||
HandlerBuild(#[from] crate::handler::HttpMessageHandlerBuilderError),
|
||||
|
||||
#[error("Config builder error: {0}")]
|
||||
ConfigBuild(&'static str),
|
||||
|
||||
#[error("Http Message Handler Error: {0}")]
|
||||
Handler(&'static str),
|
||||
|
||||
|
|
@ -29,6 +35,8 @@ pub enum RpxyError {
|
|||
#[error("I/O Error")]
|
||||
Io(#[from] io::Error),
|
||||
|
||||
// #[error("Toml Deserialization Error")]
|
||||
// TomlDe(#[from] toml::de::Error),
|
||||
#[cfg(feature = "http3")]
|
||||
#[error("Quic Connection Error")]
|
||||
QuicConn(#[from] quinn::ConnectionError),
|
||||
298
rpxy-lib/src/globals.rs
Normal file
298
rpxy-lib/src/globals.rs
Normal file
|
|
@ -0,0 +1,298 @@
|
|||
use crate::{
|
||||
backend::{
|
||||
Backend, BackendBuilder, Backends, ReverseProxy, Upstream, UpstreamGroup, UpstreamGroupBuilder, UpstreamOption,
|
||||
},
|
||||
certs::CryptoSource,
|
||||
constants::*,
|
||||
error::RpxyError,
|
||||
log::*,
|
||||
utils::{BytesName, PathNameBytesExp},
|
||||
};
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
};
|
||||
use tokio::time::Duration;
|
||||
|
||||
/// Global object containing proxy configurations and shared object like counters.
|
||||
/// But note that in Globals, we do not have Mutex and RwLock. It is indeed, the context shared among async tasks.
|
||||
pub struct Globals<T>
|
||||
where
|
||||
T: CryptoSource,
|
||||
{
|
||||
/// Configuration parameters for proxy transport and request handlers
|
||||
pub proxy_config: ProxyConfig, // TODO: proxy configはarcに包んでこいつだけ使いまわせばいいように変えていく。backendsも?
|
||||
|
||||
/// Backend application objects to which http request handler forward incoming requests
|
||||
pub backends: Backends<T>,
|
||||
|
||||
/// Shared context - Counter for serving requests
|
||||
pub request_count: RequestCount,
|
||||
|
||||
/// Shared context - Async task runtime handler
|
||||
pub runtime_handle: tokio::runtime::Handle,
|
||||
}
|
||||
|
||||
/// Configuration parameters for proxy transport and request handlers
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct ProxyConfig {
|
||||
pub listen_sockets: Vec<SocketAddr>, // when instantiate server
|
||||
pub http_port: Option<u16>, // when instantiate server
|
||||
pub https_port: Option<u16>, // when instantiate server
|
||||
pub tcp_listen_backlog: u32, // when instantiate server
|
||||
|
||||
pub proxy_timeout: Duration, // when serving requests at Proxy
|
||||
pub upstream_timeout: Duration, // when serving requests at Handler
|
||||
|
||||
pub max_clients: usize, // when serving requests
|
||||
pub max_concurrent_streams: u32, // when instantiate server
|
||||
pub keepalive: bool, // when instantiate server
|
||||
|
||||
// experimentals
|
||||
pub sni_consistency: bool, // Handler
|
||||
// All need to make packet acceptor
|
||||
#[cfg(feature = "http3")]
|
||||
pub http3: bool,
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3_alt_svc_max_age: u32,
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3_request_max_body_size: usize,
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3_max_concurrent_bidistream: quinn::VarInt,
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3_max_concurrent_unistream: quinn::VarInt,
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3_max_concurrent_connections: u32,
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3_max_idle_timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
impl Default for ProxyConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
listen_sockets: Vec::new(),
|
||||
http_port: None,
|
||||
https_port: None,
|
||||
tcp_listen_backlog: TCP_LISTEN_BACKLOG,
|
||||
|
||||
// TODO: Reconsider each timeout values
|
||||
proxy_timeout: Duration::from_secs(PROXY_TIMEOUT_SEC),
|
||||
upstream_timeout: Duration::from_secs(UPSTREAM_TIMEOUT_SEC),
|
||||
|
||||
max_clients: MAX_CLIENTS,
|
||||
max_concurrent_streams: MAX_CONCURRENT_STREAMS,
|
||||
keepalive: true,
|
||||
|
||||
sni_consistency: true,
|
||||
|
||||
#[cfg(feature = "http3")]
|
||||
http3: false,
|
||||
#[cfg(feature = "http3")]
|
||||
h3_alt_svc_max_age: H3::ALT_SVC_MAX_AGE,
|
||||
#[cfg(feature = "http3")]
|
||||
h3_request_max_body_size: H3::REQUEST_MAX_BODY_SIZE,
|
||||
#[cfg(feature = "http3")]
|
||||
h3_max_concurrent_connections: H3::MAX_CONCURRENT_CONNECTIONS,
|
||||
#[cfg(feature = "http3")]
|
||||
h3_max_concurrent_bidistream: H3::MAX_CONCURRENT_BIDISTREAM.into(),
|
||||
#[cfg(feature = "http3")]
|
||||
h3_max_concurrent_unistream: H3::MAX_CONCURRENT_UNISTREAM.into(),
|
||||
#[cfg(feature = "http3")]
|
||||
h3_max_idle_timeout: Some(Duration::from_secs(H3::MAX_IDLE_TIMEOUT)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration parameters for backend applications
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct AppConfigList<T>
|
||||
where
|
||||
T: CryptoSource,
|
||||
{
|
||||
pub inner: Vec<AppConfig<T>>,
|
||||
pub default_app: Option<String>,
|
||||
}
|
||||
impl<T> TryInto<Backends<T>> for AppConfigList<T>
|
||||
where
|
||||
T: CryptoSource + Clone,
|
||||
{
|
||||
type Error = RpxyError;
|
||||
|
||||
fn try_into(self) -> Result<Backends<T>, Self::Error> {
|
||||
let mut backends = Backends::new();
|
||||
for app_config in self.inner.iter() {
|
||||
let backend = app_config.try_into()?;
|
||||
backends
|
||||
.apps
|
||||
.insert(app_config.server_name.clone().to_server_name_vec(), backend);
|
||||
info!(
|
||||
"Registering application {} ({})",
|
||||
&app_config.server_name, &app_config.app_name
|
||||
);
|
||||
}
|
||||
|
||||
// default backend application for plaintext http requests
|
||||
if let Some(d) = self.default_app {
|
||||
let d_sn: Vec<&str> = backends
|
||||
.apps
|
||||
.iter()
|
||||
.filter(|(_k, v)| v.app_name == d)
|
||||
.map(|(_, v)| v.server_name.as_ref())
|
||||
.collect();
|
||||
if !d_sn.is_empty() {
|
||||
info!(
|
||||
"Serving plaintext http for requests to unconfigured server_name by app {} (server_name: {}).",
|
||||
d, d_sn[0]
|
||||
);
|
||||
backends.default_server_name_bytes = Some(d_sn[0].to_server_name_vec());
|
||||
}
|
||||
}
|
||||
Ok(backends)
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration parameters for single backend application
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct AppConfig<T>
|
||||
where
|
||||
T: CryptoSource,
|
||||
{
|
||||
pub app_name: String,
|
||||
pub server_name: String,
|
||||
pub reverse_proxy: Vec<ReverseProxyConfig>,
|
||||
pub tls: Option<TlsConfig<T>>,
|
||||
}
|
||||
impl<T> TryInto<Backend<T>> for &AppConfig<T>
|
||||
where
|
||||
T: CryptoSource + Clone,
|
||||
{
|
||||
type Error = RpxyError;
|
||||
|
||||
fn try_into(self) -> Result<Backend<T>, Self::Error> {
|
||||
// backend builder
|
||||
let mut backend_builder = BackendBuilder::default();
|
||||
// reverse proxy settings
|
||||
let reverse_proxy = self.try_into()?;
|
||||
|
||||
backend_builder
|
||||
.app_name(self.app_name.clone())
|
||||
.server_name(self.server_name.clone())
|
||||
.reverse_proxy(reverse_proxy);
|
||||
|
||||
// TLS settings and build backend instance
|
||||
let backend = if self.tls.is_none() {
|
||||
backend_builder.build().map_err(RpxyError::BackendBuild)?
|
||||
} else {
|
||||
let tls = self.tls.as_ref().unwrap();
|
||||
|
||||
backend_builder
|
||||
.https_redirection(Some(tls.https_redirection))
|
||||
.crypto_source(Some(tls.inner.clone()))
|
||||
.build()?
|
||||
};
|
||||
Ok(backend)
|
||||
}
|
||||
}
|
||||
impl<T> TryInto<ReverseProxy> for &AppConfig<T>
|
||||
where
|
||||
T: CryptoSource + Clone,
|
||||
{
|
||||
type Error = RpxyError;
|
||||
|
||||
fn try_into(self) -> Result<ReverseProxy, Self::Error> {
|
||||
let mut upstream: HashMap<PathNameBytesExp, UpstreamGroup> = HashMap::default();
|
||||
|
||||
self.reverse_proxy.iter().for_each(|rpo| {
|
||||
let upstream_vec: Vec<Upstream> = rpo.upstream.iter().map(|x| x.try_into().unwrap()).collect();
|
||||
// let upstream_iter = rpo.upstream.iter().map(|x| x.to_upstream().unwrap());
|
||||
// let lb_upstream_num = vec_upstream.len();
|
||||
let elem = UpstreamGroupBuilder::default()
|
||||
.upstream(&upstream_vec)
|
||||
.path(&rpo.path)
|
||||
.replace_path(&rpo.replace_path)
|
||||
.lb(&rpo.load_balance, &upstream_vec, &self.server_name, &rpo.path)
|
||||
.opts(&rpo.upstream_options)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
upstream.insert(elem.path.clone(), elem);
|
||||
});
|
||||
if self.reverse_proxy.iter().filter(|rpo| rpo.path.is_none()).count() >= 2 {
|
||||
error!("Multiple default reverse proxy setting");
|
||||
return Err(RpxyError::ConfigBuild("Invalid reverse proxy setting"));
|
||||
}
|
||||
|
||||
if !(upstream.iter().all(|(_, elem)| {
|
||||
!(elem.opts.contains(&UpstreamOption::ConvertHttpsTo11) && elem.opts.contains(&UpstreamOption::ConvertHttpsTo2))
|
||||
})) {
|
||||
error!("Either one of force_http11 or force_http2 can be enabled");
|
||||
return Err(RpxyError::ConfigBuild("Invalid upstream option setting"));
|
||||
}
|
||||
|
||||
Ok(ReverseProxy { upstream })
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration parameters for single reverse proxy corresponding to the path
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct ReverseProxyConfig {
|
||||
pub path: Option<String>,
|
||||
pub replace_path: Option<String>,
|
||||
pub upstream: Vec<UpstreamUri>,
|
||||
pub upstream_options: Option<Vec<String>>,
|
||||
pub load_balance: Option<String>,
|
||||
}
|
||||
|
||||
/// Configuration parameters for single upstream destination from a reverse proxy
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct UpstreamUri {
|
||||
pub inner: hyper::Uri,
|
||||
}
|
||||
impl TryInto<Upstream> for &UpstreamUri {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_into(self) -> std::result::Result<Upstream, Self::Error> {
|
||||
Ok(Upstream {
|
||||
uri: self.inner.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration parameters on TLS for a single backend application
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct TlsConfig<T>
|
||||
where
|
||||
T: CryptoSource,
|
||||
{
|
||||
pub inner: T,
|
||||
pub https_redirection: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
/// Counter for serving requests
|
||||
pub struct RequestCount(Arc<AtomicUsize>);
|
||||
|
||||
impl RequestCount {
|
||||
pub fn current(&self) -> usize {
|
||||
self.0.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn increment(&self) -> usize {
|
||||
self.0.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn decrement(&self) -> usize {
|
||||
let mut count;
|
||||
while {
|
||||
count = self.0.load(Ordering::Relaxed);
|
||||
count > 0
|
||||
&& self
|
||||
.0
|
||||
.compare_exchange(count, count - 1, Ordering::Relaxed, Ordering::Relaxed)
|
||||
!= Ok(count)
|
||||
} {}
|
||||
count
|
||||
}
|
||||
}
|
||||
|
|
@ -2,6 +2,7 @@
|
|||
use super::{utils_headers::*, utils_request::*, utils_synth_response::*, HandlerContext};
|
||||
use crate::{
|
||||
backend::{Backend, UpstreamGroup},
|
||||
certs::CryptoSource,
|
||||
error::*,
|
||||
globals::Globals,
|
||||
log::*,
|
||||
|
|
@ -18,23 +19,29 @@ use std::{env, net::SocketAddr, sync::Arc};
|
|||
use tokio::{io::copy_bidirectional, time::timeout};
|
||||
|
||||
#[derive(Clone, Builder)]
|
||||
pub struct HttpMessageHandler<T>
|
||||
/// HTTP message handler for requests from clients and responses from backend applications,
|
||||
/// responsible to manipulate and forward messages to upstream backends and downstream clients.
|
||||
pub struct HttpMessageHandler<T, U>
|
||||
where
|
||||
T: Connect + Clone + Sync + Send + 'static,
|
||||
U: CryptoSource + Clone,
|
||||
{
|
||||
forwarder: Arc<Client<T>>,
|
||||
globals: Arc<Globals>,
|
||||
globals: Arc<Globals<U>>,
|
||||
}
|
||||
|
||||
impl<T> HttpMessageHandler<T>
|
||||
impl<T, U> HttpMessageHandler<T, U>
|
||||
where
|
||||
T: Connect + Clone + Sync + Send + 'static,
|
||||
U: CryptoSource + Clone,
|
||||
{
|
||||
/// Return with an arbitrary status code of error and log message
|
||||
fn return_with_error_log(&self, status_code: StatusCode, log_data: &mut MessageLog) -> Result<Response<Body>> {
|
||||
log_data.status_code(&status_code).output();
|
||||
http_error(status_code)
|
||||
}
|
||||
|
||||
/// Handle incoming request message from a client
|
||||
pub async fn handle_request(
|
||||
self,
|
||||
mut req: Request<Body>,
|
||||
|
|
@ -56,34 +63,35 @@ where
|
|||
};
|
||||
// check consistency of between TLS SNI and HOST/Request URI Line.
|
||||
#[allow(clippy::collapsible_if)]
|
||||
if tls_enabled && self.globals.sni_consistency {
|
||||
if tls_enabled && self.globals.proxy_config.sni_consistency {
|
||||
if server_name != tls_server_name.unwrap_or_default() {
|
||||
return self.return_with_error_log(StatusCode::MISDIRECTED_REQUEST, &mut log_data);
|
||||
}
|
||||
}
|
||||
// Find backend application for given server_name, and drop if incoming request is invalid as request.
|
||||
let backend = if let Some(be) = self.globals.backends.apps.get(&server_name) {
|
||||
be
|
||||
} else if let Some(default_server_name) = &self.globals.backends.default_server_name_bytes {
|
||||
let backend = match self.globals.backends.apps.get(&server_name) {
|
||||
Some(be) => be,
|
||||
None => {
|
||||
let Some(default_server_name) = &self.globals.backends.default_server_name_bytes else {
|
||||
return self.return_with_error_log(StatusCode::SERVICE_UNAVAILABLE, &mut log_data);
|
||||
};
|
||||
debug!("Serving by default app");
|
||||
self.globals.backends.apps.get(default_server_name).unwrap()
|
||||
} else {
|
||||
return self.return_with_error_log(StatusCode::SERVICE_UNAVAILABLE, &mut log_data);
|
||||
}
|
||||
};
|
||||
|
||||
// Redirect to https if !tls_enabled and redirect_to_https is true
|
||||
if !tls_enabled && backend.https_redirection.unwrap_or(false) {
|
||||
debug!("Redirect to secure connection: {}", &backend.server_name);
|
||||
log_data.status_code(&StatusCode::PERMANENT_REDIRECT).output();
|
||||
return secure_redirection(&backend.server_name, self.globals.https_port, &req);
|
||||
return secure_redirection(&backend.server_name, self.globals.proxy_config.https_port, &req);
|
||||
}
|
||||
|
||||
// Find reverse proxy for given path and choose one of upstream host
|
||||
// Longest prefix match
|
||||
let path = req.uri().path();
|
||||
let upstream_group = match backend.reverse_proxy.get(path) {
|
||||
Some(ug) => ug,
|
||||
None => return self.return_with_error_log(StatusCode::NOT_FOUND, &mut log_data),
|
||||
let Some(upstream_group) = backend.reverse_proxy.get(path) else {
|
||||
return self.return_with_error_log(StatusCode::NOT_FOUND, &mut log_data)
|
||||
};
|
||||
|
||||
// Upgrade in request header
|
||||
|
|
@ -110,19 +118,17 @@ where
|
|||
log_data.upstream(req.uri());
|
||||
//////
|
||||
|
||||
// Forward request to
|
||||
// Forward request to a chosen backend
|
||||
let mut res_backend = {
|
||||
match timeout(self.globals.upstream_timeout, self.forwarder.request(req)).await {
|
||||
Err(_) => {
|
||||
let Ok(result) = timeout(self.globals.proxy_config.upstream_timeout, self.forwarder.request(req)).await else {
|
||||
return self.return_with_error_log(StatusCode::GATEWAY_TIMEOUT, &mut log_data);
|
||||
}
|
||||
Ok(x) => match x {
|
||||
};
|
||||
match result {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
error!("Failed to get response from backend: {}", e);
|
||||
return self.return_with_error_log(StatusCode::SERVICE_UNAVAILABLE, &mut log_data);
|
||||
}
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -138,22 +144,32 @@ where
|
|||
|
||||
if res_backend.status() != StatusCode::SWITCHING_PROTOCOLS {
|
||||
// Generate response to client
|
||||
if self.generate_response_forwarded(&mut res_backend, backend).is_ok() {
|
||||
log_data.status_code(&res_backend.status()).output();
|
||||
return Ok(res_backend);
|
||||
} else {
|
||||
if self.generate_response_forwarded(&mut res_backend, backend).is_err() {
|
||||
return self.return_with_error_log(StatusCode::INTERNAL_SERVER_ERROR, &mut log_data);
|
||||
}
|
||||
log_data.status_code(&res_backend.status()).output();
|
||||
return Ok(res_backend);
|
||||
}
|
||||
|
||||
// Handle StatusCode::SWITCHING_PROTOCOLS in response
|
||||
let upgrade_in_response = extract_upgrade(res_backend.headers());
|
||||
if if let (Some(u_req), Some(u_res)) = (upgrade_in_request.as_ref(), upgrade_in_response.as_ref()) {
|
||||
let should_upgrade = if let (Some(u_req), Some(u_res)) = (upgrade_in_request.as_ref(), upgrade_in_response.as_ref())
|
||||
{
|
||||
u_req.to_ascii_lowercase() == u_res.to_ascii_lowercase()
|
||||
} else {
|
||||
false
|
||||
} {
|
||||
if let Some(request_upgraded) = request_upgraded {
|
||||
};
|
||||
if !should_upgrade {
|
||||
error!(
|
||||
"Backend tried to switch to protocol {:?} when {:?} was requested",
|
||||
upgrade_in_response, upgrade_in_request
|
||||
);
|
||||
return self.return_with_error_log(StatusCode::INTERNAL_SERVER_ERROR, &mut log_data);
|
||||
}
|
||||
let Some(request_upgraded) = request_upgraded else {
|
||||
error!("Request does not have an upgrade extension");
|
||||
return self.return_with_error_log(StatusCode::BAD_REQUEST, &mut log_data);
|
||||
};
|
||||
let Some(onupgrade) = res_backend.extensions_mut().remove::<hyper::upgrade::OnUpgrade>() else {
|
||||
error!("Response does not have an upgrade extension");
|
||||
return self.return_with_error_log(StatusCode::INTERNAL_SERVER_ERROR, &mut log_data);
|
||||
|
|
@ -178,27 +194,17 @@ where
|
|||
});
|
||||
log_data.status_code(&res_backend.status()).output();
|
||||
Ok(res_backend)
|
||||
} else {
|
||||
error!("Request does not have an upgrade extension");
|
||||
self.return_with_error_log(StatusCode::BAD_REQUEST, &mut log_data)
|
||||
}
|
||||
} else {
|
||||
error!(
|
||||
"Backend tried to switch to protocol {:?} when {:?} was requested",
|
||||
upgrade_in_response, upgrade_in_request
|
||||
);
|
||||
self.return_with_error_log(StatusCode::INTERNAL_SERVER_ERROR, &mut log_data)
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////
|
||||
// Functions to generate messages
|
||||
////////////////////////////////////////////////////
|
||||
|
||||
fn generate_response_forwarded<B: core::fmt::Debug>(
|
||||
&self,
|
||||
response: &mut Response<B>,
|
||||
chosen_backend: &Backend,
|
||||
) -> Result<()> {
|
||||
/// Manipulate a response message sent from a backend application to forward downstream to a client.
|
||||
fn generate_response_forwarded<B>(&self, response: &mut Response<B>, chosen_backend: &Backend<U>) -> Result<()>
|
||||
where
|
||||
B: core::fmt::Debug,
|
||||
{
|
||||
let headers = response.headers_mut();
|
||||
remove_connection_header(headers);
|
||||
remove_hop_header(headers);
|
||||
|
|
@ -206,15 +212,21 @@ where
|
|||
|
||||
#[cfg(feature = "http3")]
|
||||
{
|
||||
// TODO: Workaround for avoid h3 for client authentication
|
||||
if self.globals.http3 && chosen_backend.client_ca_cert_path.is_none() {
|
||||
if let Some(port) = self.globals.https_port {
|
||||
// Manipulate ALT_SVC allowing h3 in response message only when mutual TLS is not enabled
|
||||
// TODO: This is a workaround for avoiding a client authentication in HTTP/3
|
||||
if self.globals.proxy_config.http3
|
||||
&& chosen_backend
|
||||
.crypto_source
|
||||
.as_ref()
|
||||
.is_some_and(|v| !v.is_mutual_tls())
|
||||
{
|
||||
if let Some(port) = self.globals.proxy_config.https_port {
|
||||
add_header_entry_overwrite_if_exist(
|
||||
headers,
|
||||
header::ALT_SVC.as_str(),
|
||||
format!(
|
||||
"h3=\":{}\"; ma={}, h3-29=\":{}\"; ma={}",
|
||||
port, self.globals.h3_alt_svc_max_age, port, self.globals.h3_alt_svc_max_age
|
||||
port, self.globals.proxy_config.h3_alt_svc_max_age, port, self.globals.proxy_config.h3_alt_svc_max_age
|
||||
),
|
||||
)?;
|
||||
}
|
||||
|
|
@ -225,7 +237,7 @@ where
|
|||
}
|
||||
#[cfg(not(feature = "http3"))]
|
||||
{
|
||||
if let Some(port) = self.globals.https_port {
|
||||
if let Some(port) = self.globals.proxy_config.https_port {
|
||||
headers.remove(header::ALT_SVC.as_str());
|
||||
}
|
||||
}
|
||||
|
|
@ -234,6 +246,7 @@ where
|
|||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
/// Manipulate a request message sent from a client to forward upstream to a backend application
|
||||
fn generate_request_forwarded<B>(
|
||||
&self,
|
||||
client_addr: &SocketAddr,
|
||||
|
|
@ -9,6 +9,7 @@ pub use handler_main::{HttpMessageHandler, HttpMessageHandlerBuilder, HttpMessag
|
|||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
/// Context object to handle sticky cookies at HTTP message handler
|
||||
struct HandlerContext {
|
||||
#[cfg(feature = "sticky-cookie")]
|
||||
context_lb: Option<LbContext>,
|
||||
|
|
@ -8,7 +8,7 @@ use hyper::{
|
|||
header::{self, HeaderMap, HeaderName, HeaderValue},
|
||||
Uri,
|
||||
};
|
||||
use std::net::SocketAddr;
|
||||
use std::{borrow::Cow, net::SocketAddr};
|
||||
|
||||
////////////////////////////////////////////////////
|
||||
// Functions to manipulate headers
|
||||
|
|
@ -83,6 +83,7 @@ pub(super) fn set_sticky_cookie_lb_context(headers: &mut HeaderMap, context_from
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply options to request header, which are specified in the configuration
|
||||
pub(super) fn apply_upstream_options_to_header(
|
||||
headers: &mut HeaderMap,
|
||||
_client_addr: &SocketAddr,
|
||||
|
|
@ -113,7 +114,7 @@ pub(super) fn apply_upstream_options_to_header(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// https://datatracker.ietf.org/doc/html/rfc9110
|
||||
/// Append header entry with comma according to [RFC9110](https://datatracker.ietf.org/doc/html/rfc9110)
|
||||
pub(super) fn append_header_entry_with_comma(headers: &mut HeaderMap, key: &str, value: &str) -> Result<()> {
|
||||
match headers.entry(HeaderName::from_bytes(key.as_bytes())?) {
|
||||
header::Entry::Vacant(entry) => {
|
||||
|
|
@ -132,10 +133,11 @@ pub(super) fn append_header_entry_with_comma(headers: &mut HeaderMap, key: &str,
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Add header entry if not exist
|
||||
pub(super) fn add_header_entry_if_not_exist(
|
||||
headers: &mut HeaderMap,
|
||||
key: impl Into<std::borrow::Cow<'static, str>>,
|
||||
value: impl Into<std::borrow::Cow<'static, str>>,
|
||||
key: impl Into<Cow<'static, str>>,
|
||||
value: impl Into<Cow<'static, str>>,
|
||||
) -> Result<()> {
|
||||
match headers.entry(HeaderName::from_bytes(key.into().as_bytes())?) {
|
||||
header::Entry::Vacant(entry) => {
|
||||
|
|
@ -147,10 +149,11 @@ pub(super) fn add_header_entry_if_not_exist(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Overwrite header entry if exist
|
||||
pub(super) fn add_header_entry_overwrite_if_exist(
|
||||
headers: &mut HeaderMap,
|
||||
key: impl Into<std::borrow::Cow<'static, str>>,
|
||||
value: impl Into<std::borrow::Cow<'static, str>>,
|
||||
key: impl Into<Cow<'static, str>>,
|
||||
value: impl Into<Cow<'static, str>>,
|
||||
) -> Result<()> {
|
||||
match headers.entry(HeaderName::from_bytes(key.into().as_bytes())?) {
|
||||
header::Entry::Vacant(entry) => {
|
||||
|
|
@ -164,11 +167,10 @@ pub(super) fn add_header_entry_overwrite_if_exist(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Align cookie values in single line
|
||||
/// Sometimes violates [RFC6265](https://www.rfc-editor.org/rfc/rfc6265#section-5.4) (for http/1.1).
|
||||
/// This is allowed in RFC7540 (for http/2) as mentioned [here](https://stackoverflow.com/questions/4843556/in-http-specification-what-is-the-string-that-separates-cookies).
|
||||
pub(super) fn make_cookie_single_line(headers: &mut HeaderMap) -> Result<()> {
|
||||
// Sometimes violates RFC6265 (for http/1.1).
|
||||
// https://www.rfc-editor.org/rfc/rfc6265#section-5.4
|
||||
// This is allowed in RFC7540 (for http/2).
|
||||
// https://stackoverflow.com/questions/4843556/in-http-specification-what-is-the-string-that-separates-cookies
|
||||
let cookies = headers
|
||||
.iter()
|
||||
.filter(|(k, _)| **k == hyper::header::COOKIE)
|
||||
|
|
@ -182,6 +184,7 @@ pub(super) fn make_cookie_single_line(headers: &mut HeaderMap) -> Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Add forwarding headers like `x-forwarded-for`.
|
||||
pub(super) fn add_forwarding_header(
|
||||
headers: &mut HeaderMap,
|
||||
client_addr: &SocketAddr,
|
||||
|
|
@ -219,6 +222,7 @@ pub(super) fn add_forwarding_header(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove connection header
|
||||
pub(super) fn remove_connection_header(headers: &mut HeaderMap) {
|
||||
if let Some(values) = headers.get(header::CONNECTION) {
|
||||
if let Ok(v) = values.clone().to_str() {
|
||||
|
|
@ -231,6 +235,7 @@ pub(super) fn remove_connection_header(headers: &mut HeaderMap) {
|
|||
}
|
||||
}
|
||||
|
||||
/// Hop header values which are removed at proxy
|
||||
const HOP_HEADERS: &[&str] = &[
|
||||
"connection",
|
||||
"te",
|
||||
|
|
@ -243,12 +248,14 @@ const HOP_HEADERS: &[&str] = &[
|
|||
"upgrade",
|
||||
];
|
||||
|
||||
/// Remove hop headers
|
||||
pub(super) fn remove_hop_header(headers: &mut HeaderMap) {
|
||||
HOP_HEADERS.iter().for_each(|key| {
|
||||
headers.remove(*key);
|
||||
});
|
||||
}
|
||||
|
||||
/// Extract upgrade header value if exist
|
||||
pub(super) fn extract_upgrade(headers: &HeaderMap) -> Option<String> {
|
||||
if let Some(c) = headers.get(header::CONNECTION) {
|
||||
if c
|
||||
|
|
@ -7,6 +7,7 @@ use hyper::{header, Request};
|
|||
////////////////////////////////////////////////////
|
||||
// Functions to manipulate request line
|
||||
|
||||
/// Apply upstream options in request line, specified in the configuration
|
||||
pub(super) fn apply_upstream_options_to_request_line<B>(req: &mut Request<B>, upstream: &UpstreamGroup) -> Result<()> {
|
||||
for opt in upstream.opts.iter() {
|
||||
match opt {
|
||||
|
|
@ -19,10 +20,12 @@ pub(super) fn apply_upstream_options_to_request_line<B>(req: &mut Request<B>, up
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Trait defining parser of hostname
|
||||
pub trait ParseHost {
|
||||
fn parse_host(&self) -> Result<&[u8]>;
|
||||
}
|
||||
impl<B> ParseHost for Request<B> {
|
||||
/// Extract hostname from either the request HOST header or request line
|
||||
fn parse_host(&self) -> Result<&[u8]> {
|
||||
let headers_host = self.headers().get(header::HOST);
|
||||
let uri_host = self.uri().host();
|
||||
|
|
@ -5,11 +5,13 @@ use hyper::{Body, Request, Response, StatusCode, Uri};
|
|||
////////////////////////////////////////////////////
|
||||
// Functions to create response (error or redirect)
|
||||
|
||||
/// Generate a synthetic response message of a certain error status code
|
||||
pub(super) fn http_error(status_code: StatusCode) -> Result<Response<Body>> {
|
||||
let response = Response::builder().status(status_code).body(Body::empty())?;
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
/// Generate synthetic response message of a redirection to https host with 301
|
||||
pub(super) fn secure_redirection<B>(
|
||||
server_name: &str,
|
||||
tls_port: Option<u16>,
|
||||
98
rpxy-lib/src/lib.rs
Normal file
98
rpxy-lib/src/lib.rs
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
mod backend;
|
||||
mod certs;
|
||||
mod constants;
|
||||
mod error;
|
||||
mod globals;
|
||||
mod handler;
|
||||
mod log;
|
||||
mod proxy;
|
||||
mod utils;
|
||||
|
||||
use crate::{error::*, globals::Globals, handler::HttpMessageHandlerBuilder, log::*, proxy::ProxyBuilder};
|
||||
use futures::future::select_all;
|
||||
use hyper::Client;
|
||||
// use hyper_trust_dns::TrustDnsResolver;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use crate::{
|
||||
certs::{CertsAndKeys, CryptoSource},
|
||||
globals::{AppConfig, AppConfigList, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri},
|
||||
};
|
||||
pub mod reexports {
|
||||
pub use hyper::Uri;
|
||||
pub use rustls::{Certificate, PrivateKey};
|
||||
}
|
||||
|
||||
/// Entrypoint that creates and spawns tasks of reverse proxy services
|
||||
pub async fn entrypoint<T>(
|
||||
proxy_config: &ProxyConfig,
|
||||
app_config_list: &AppConfigList<T>,
|
||||
runtime_handle: &tokio::runtime::Handle,
|
||||
) -> Result<()>
|
||||
where
|
||||
T: CryptoSource + Clone + Send + Sync + 'static,
|
||||
{
|
||||
// For initial message logging
|
||||
if proxy_config.listen_sockets.iter().any(|addr| addr.is_ipv6()) {
|
||||
info!("Listen both IPv4 and IPv6")
|
||||
} else {
|
||||
info!("Listen IPv4")
|
||||
}
|
||||
if proxy_config.http_port.is_some() {
|
||||
info!("Listen port: {}", proxy_config.http_port.unwrap());
|
||||
}
|
||||
if proxy_config.https_port.is_some() {
|
||||
info!("Listen port: {} (for TLS)", proxy_config.https_port.unwrap());
|
||||
}
|
||||
if proxy_config.http3 {
|
||||
info!("Experimental HTTP/3.0 is enabled. Note it is still very unstable.");
|
||||
}
|
||||
if !proxy_config.sni_consistency {
|
||||
info!("Ignore consistency between TLS SNI and Host header (or Request line). Note it violates RFC.");
|
||||
}
|
||||
|
||||
// build global
|
||||
let globals = Arc::new(Globals {
|
||||
proxy_config: proxy_config.clone(),
|
||||
backends: app_config_list.clone().try_into()?,
|
||||
request_count: Default::default(),
|
||||
runtime_handle: runtime_handle.clone(),
|
||||
});
|
||||
// let connector = TrustDnsResolver::default().into_rustls_webpki_https_connector();
|
||||
let connector = hyper_rustls::HttpsConnectorBuilder::new()
|
||||
.with_webpki_roots()
|
||||
.https_or_http()
|
||||
.enable_http1()
|
||||
.enable_http2()
|
||||
.build();
|
||||
|
||||
let msg_handler = HttpMessageHandlerBuilder::default()
|
||||
.forwarder(Arc::new(Client::builder().build::<_, hyper::Body>(connector)))
|
||||
.globals(globals.clone())
|
||||
.build()?;
|
||||
|
||||
let addresses = globals.proxy_config.listen_sockets.clone();
|
||||
let futures = select_all(addresses.into_iter().map(|addr| {
|
||||
let mut tls_enabled = false;
|
||||
if let Some(https_port) = globals.proxy_config.https_port {
|
||||
tls_enabled = https_port == addr.port()
|
||||
}
|
||||
|
||||
let proxy = ProxyBuilder::default()
|
||||
.globals(globals.clone())
|
||||
.listening_on(addr)
|
||||
.tls_enabled(tls_enabled)
|
||||
.msg_handler(msg_handler.clone())
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
globals.runtime_handle.spawn(proxy.start())
|
||||
}));
|
||||
|
||||
// wait for all future
|
||||
if let (Ok(Err(e)), _, _) = futures.await {
|
||||
error!("Some proxy services are down: {:?}", e);
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -95,26 +95,3 @@ impl MessageLog {
|
|||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_logger() {
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
|
||||
|
||||
let format_layer = fmt::layer()
|
||||
.with_line_number(false)
|
||||
.with_thread_ids(false)
|
||||
.with_target(false)
|
||||
.with_thread_names(true)
|
||||
.with_target(true)
|
||||
.with_level(true)
|
||||
.compact();
|
||||
|
||||
// This limits the logger to emits only rpxy crate
|
||||
let level_string = std::env::var(EnvFilter::DEFAULT_ENV).unwrap_or_else(|_| "info".to_string());
|
||||
let filter_layer = EnvFilter::new(format!("{}={}", env!("CARGO_PKG_NAME"), level_string));
|
||||
// let filter_layer = EnvFilter::from_default_env();
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(format_layer)
|
||||
.with(filter_layer)
|
||||
.init();
|
||||
}
|
||||
181
rpxy-lib/src/proxy/crypto_service.rs
Normal file
181
rpxy-lib/src/proxy/crypto_service.rs
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
use crate::{
|
||||
certs::{CertsAndKeys, CryptoSource},
|
||||
globals::Globals,
|
||||
log::*,
|
||||
utils::ServerNameBytesExp,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use hot_reload::*;
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use rustls::{server::ResolvesServerCertUsingSni, sign::CertifiedKey, RootCertStore, ServerConfig};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
/// Reloader service for certificates and keys for TLS
|
||||
pub struct CryptoReloader<T>
|
||||
where
|
||||
T: CryptoSource,
|
||||
{
|
||||
globals: Arc<Globals<T>>,
|
||||
}
|
||||
|
||||
pub type SniServerCryptoMap = HashMap<ServerNameBytesExp, Arc<ServerConfig>>;
|
||||
pub struct ServerCrypto {
|
||||
// For Quic/HTTP3, only servers with no client authentication
|
||||
pub inner_global_no_client_auth: Arc<ServerConfig>,
|
||||
// For TLS over TCP/HTTP2 and 1.1, map of SNI to server_crypto for all given servers
|
||||
pub inner_local_map: Arc<SniServerCryptoMap>,
|
||||
}
|
||||
|
||||
/// Reloader target for the certificate reloader service
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Default)]
|
||||
pub struct ServerCryptoBase {
|
||||
inner: HashMap<ServerNameBytesExp, CertsAndKeys>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T> Reload<ServerCryptoBase> for CryptoReloader<T>
|
||||
where
|
||||
T: CryptoSource + Sync + Send,
|
||||
{
|
||||
type Source = Arc<Globals<T>>;
|
||||
async fn new(source: &Self::Source) -> Result<Self, ReloaderError<ServerCryptoBase>> {
|
||||
Ok(Self {
|
||||
globals: source.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn reload(&self) -> Result<Option<ServerCryptoBase>, ReloaderError<ServerCryptoBase>> {
|
||||
let mut certs_and_keys_map = ServerCryptoBase::default();
|
||||
|
||||
for (server_name_bytes_exp, backend) in self.globals.backends.apps.iter() {
|
||||
if let Some(crypto_source) = &backend.crypto_source {
|
||||
let certs_and_keys = crypto_source
|
||||
.read()
|
||||
.await
|
||||
.map_err(|_e| ReloaderError::<ServerCryptoBase>::Reload("Failed to reload cert, key or ca cert"))?;
|
||||
certs_and_keys_map
|
||||
.inner
|
||||
.insert(server_name_bytes_exp.to_owned(), certs_and_keys);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(certs_and_keys_map))
|
||||
}
|
||||
}
|
||||
|
||||
impl TryInto<Arc<ServerCrypto>> for &ServerCryptoBase {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_into(self) -> Result<Arc<ServerCrypto>, Self::Error> {
|
||||
let mut resolver_global = ResolvesServerCertUsingSni::new();
|
||||
let mut server_crypto_local_map: SniServerCryptoMap = HashMap::default();
|
||||
|
||||
for (server_name_bytes_exp, certs_and_keys) in self.inner.iter() {
|
||||
let server_name: String = server_name_bytes_exp.try_into()?;
|
||||
|
||||
// Parse server certificates and private keys
|
||||
let Ok(certified_key): Result<CertifiedKey, _> = certs_and_keys.parse_server_certs_and_keys() else {
|
||||
warn!("Failed to add certificate for {}", server_name);
|
||||
continue;
|
||||
};
|
||||
|
||||
let mut resolver_local = ResolvesServerCertUsingSni::new();
|
||||
let mut client_ca_roots_local = RootCertStore::empty();
|
||||
|
||||
// add server certificate and key
|
||||
if let Err(e) = resolver_local.add(server_name.as_str(), certified_key.to_owned()) {
|
||||
error!(
|
||||
"{}: Failed to read some certificates and keys {}",
|
||||
server_name.as_str(),
|
||||
e
|
||||
)
|
||||
}
|
||||
|
||||
// add client certificate if specified
|
||||
if certs_and_keys.client_ca_certs.is_none() {
|
||||
// aggregated server config for no client auth server for http3
|
||||
if let Err(e) = resolver_global.add(server_name.as_str(), certified_key) {
|
||||
error!(
|
||||
"{}: Failed to read some certificates and keys {}",
|
||||
server_name.as_str(),
|
||||
e
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// add client certificate if specified
|
||||
match certs_and_keys.parse_client_ca_certs() {
|
||||
Ok((owned_trust_anchors, _subject_key_ids)) => {
|
||||
client_ca_roots_local.add_server_trust_anchors(owned_trust_anchors.into_iter());
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to add client CA certificate for {}: {}",
|
||||
server_name.as_str(),
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut server_config_local = if client_ca_roots_local.is_empty() {
|
||||
// with no client auth, enable http1.1 -- 3
|
||||
#[cfg(not(feature = "http3"))]
|
||||
{
|
||||
ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(resolver_local))
|
||||
}
|
||||
#[cfg(feature = "http3")]
|
||||
{
|
||||
let mut sc = ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(resolver_local));
|
||||
sc.alpn_protocols = vec![b"h3".to_vec(), b"hq-29".to_vec()]; // TODO: remove hq-29 later?
|
||||
sc
|
||||
}
|
||||
} else {
|
||||
// with client auth, enable only http1.1 and 2
|
||||
// let client_certs_verifier = rustls::server::AllowAnyAnonymousOrAuthenticatedClient::new(client_ca_roots);
|
||||
let client_certs_verifier = rustls::server::AllowAnyAuthenticatedClient::new(client_ca_roots_local);
|
||||
ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_client_cert_verifier(Arc::new(client_certs_verifier))
|
||||
.with_cert_resolver(Arc::new(resolver_local))
|
||||
};
|
||||
server_config_local.alpn_protocols.push(b"h2".to_vec());
|
||||
server_config_local.alpn_protocols.push(b"http/1.1".to_vec());
|
||||
|
||||
server_crypto_local_map.insert(server_name_bytes_exp.to_owned(), Arc::new(server_config_local));
|
||||
}
|
||||
|
||||
//////////////
|
||||
let mut server_crypto_global = ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(resolver_global));
|
||||
|
||||
//////////////////////////////
|
||||
|
||||
#[cfg(feature = "http3")]
|
||||
{
|
||||
server_crypto_global.alpn_protocols = vec![
|
||||
b"h3".to_vec(),
|
||||
b"hq-29".to_vec(), // TODO: remove later?
|
||||
b"h2".to_vec(),
|
||||
b"http/1.1".to_vec(),
|
||||
];
|
||||
}
|
||||
#[cfg(not(feature = "http3"))]
|
||||
{
|
||||
server_crypto_global.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
|
||||
}
|
||||
|
||||
Ok(Arc::new(ServerCrypto {
|
||||
inner_global_no_client_auth: Arc::new(server_crypto_global),
|
||||
inner_local_map: Arc::new(server_crypto_local_map),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
|
@ -1,7 +1,9 @@
|
|||
mod crypto_service;
|
||||
mod proxy_client_cert;
|
||||
#[cfg(feature = "http3")]
|
||||
mod proxy_h3;
|
||||
mod proxy_main;
|
||||
mod proxy_tls;
|
||||
mod socket;
|
||||
|
||||
pub use proxy_main::{Proxy, ProxyBuilder, ProxyBuilderError};
|
||||
|
|
@ -10,26 +10,18 @@ pub(super) fn check_client_authentication(
|
|||
client_certs: Option<&[Certificate]>,
|
||||
client_ca_keyids_set_for_sni: Option<&HashSet<Vec<u8>>>,
|
||||
) -> std::result::Result<(), ClientCertsError> {
|
||||
let client_ca_keyids_set = match client_ca_keyids_set_for_sni {
|
||||
Some(c) => c,
|
||||
None => {
|
||||
let Some(client_ca_keyids_set) = client_ca_keyids_set_for_sni else {
|
||||
// No client cert settings for given server name
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let client_certs = match client_certs {
|
||||
Some(c) => {
|
||||
debug!("Incoming TLS client is (temporarily) authenticated via client cert");
|
||||
c
|
||||
}
|
||||
None => {
|
||||
let Some(client_certs) = client_certs else {
|
||||
error!("Client certificate is needed for given server name");
|
||||
return Err(ClientCertsError::ClientCertRequired(
|
||||
"Client certificate is needed for given server name".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
debug!("Incoming TLS client is (temporarily) authenticated via client cert");
|
||||
|
||||
// Check client certificate key ids
|
||||
let mut client_certs_parsed_iter = client_certs.iter().filter_map(|d| parse_x509_certificate(&d.0).ok());
|
||||
|
|
@ -1,14 +1,15 @@
|
|||
use super::Proxy;
|
||||
use crate::{error::*, log::*, utils::ServerNameBytesExp};
|
||||
use crate::{certs::CryptoSource, error::*, log::*, utils::ServerNameBytesExp};
|
||||
use bytes::{Buf, Bytes};
|
||||
use h3::{quic::BidiStream, server::RequestStream};
|
||||
use hyper::{client::connect::Connect, Body, Request, Response};
|
||||
use std::net::SocketAddr;
|
||||
use tokio::time::{timeout, Duration};
|
||||
|
||||
impl<T> Proxy<T>
|
||||
impl<T, U> Proxy<T, U>
|
||||
where
|
||||
T: Connect + Clone + Sync + Send + 'static,
|
||||
U: CryptoSource + Clone + Sync + Send + 'static,
|
||||
{
|
||||
pub(super) async fn connection_serve_h3(
|
||||
self,
|
||||
|
|
@ -43,7 +44,7 @@ where
|
|||
// We consider the connection count separately from the stream count.
|
||||
// Max clients for h1/h2 = max 'stream' for h3.
|
||||
let request_count = self.globals.request_count.clone();
|
||||
if request_count.increment() > self.globals.max_clients {
|
||||
if request_count.increment() > self.globals.proxy_config.max_clients {
|
||||
request_count.decrement();
|
||||
h3_conn.shutdown(0).await?;
|
||||
break;
|
||||
|
|
@ -54,7 +55,7 @@ where
|
|||
let tls_server_name_inner = tls_server_name.clone();
|
||||
self.globals.runtime_handle.spawn(async move {
|
||||
if let Err(e) = timeout(
|
||||
self_inner.globals.proxy_timeout + Duration::from_secs(1), // timeout per stream are considered as same as one in http2
|
||||
self_inner.globals.proxy_config.proxy_timeout + Duration::from_secs(1), // timeout per stream are considered as same as one in http2
|
||||
self_inner.stream_serve_h3(req, stream, client_addr, tls_server_name_inner),
|
||||
)
|
||||
.await
|
||||
|
|
@ -97,7 +98,7 @@ where
|
|||
|
||||
// Buffering and sending body through channel for protocol conversion like h3 -> h2/http1.1
|
||||
// The underling buffering, i.e., buffer given by the API recv_data.await?, is handled by quinn.
|
||||
let max_body_size = self.globals.h3_request_max_body_size;
|
||||
let max_body_size = self.globals.proxy_config.h3_request_max_body_size;
|
||||
self.globals.runtime_handle.spawn(async move {
|
||||
let mut sender = body_sender;
|
||||
let mut size = 0usize;
|
||||
|
|
@ -1,11 +1,12 @@
|
|||
// use super::proxy_handler::handle_request;
|
||||
use crate::{error::*, globals::Globals, handler::HttpMessageHandler, log::*, utils::ServerNameBytesExp};
|
||||
use super::socket::bind_tcp_socket;
|
||||
use crate::{
|
||||
certs::CryptoSource, error::*, globals::Globals, handler::HttpMessageHandler, log::*, utils::ServerNameBytesExp,
|
||||
};
|
||||
use derive_builder::{self, Builder};
|
||||
use hyper::{client::connect::Connect, server::conn::Http, service::service_fn, Body, Request};
|
||||
use std::{net::SocketAddr, sync::Arc};
|
||||
use tokio::{
|
||||
io::{AsyncRead, AsyncWrite},
|
||||
net::TcpListener,
|
||||
runtime::Handle,
|
||||
time::{timeout, Duration},
|
||||
};
|
||||
|
|
@ -32,19 +33,21 @@ where
|
|||
}
|
||||
|
||||
#[derive(Clone, Builder)]
|
||||
pub struct Proxy<T>
|
||||
pub struct Proxy<T, U>
|
||||
where
|
||||
T: Connect + Clone + Sync + Send + 'static,
|
||||
U: CryptoSource + Clone + Sync + Send + 'static,
|
||||
{
|
||||
pub listening_on: SocketAddr,
|
||||
pub tls_enabled: bool, // TCP待受がTLSかどうか
|
||||
pub msg_handler: HttpMessageHandler<T>,
|
||||
pub globals: Arc<Globals>,
|
||||
pub msg_handler: HttpMessageHandler<T, U>,
|
||||
pub globals: Arc<Globals<U>>,
|
||||
}
|
||||
|
||||
impl<T> Proxy<T>
|
||||
impl<T, U> Proxy<T, U>
|
||||
where
|
||||
T: Connect + Clone + Sync + Send + 'static,
|
||||
U: CryptoSource + Clone + Sync + Send,
|
||||
{
|
||||
pub(super) fn client_serve<I>(
|
||||
self,
|
||||
|
|
@ -56,7 +59,7 @@ where
|
|||
I: AsyncRead + AsyncWrite + Send + Unpin + 'static,
|
||||
{
|
||||
let request_count = self.globals.request_count.clone();
|
||||
if request_count.increment() > self.globals.max_clients {
|
||||
if request_count.increment() > self.globals.proxy_config.max_clients {
|
||||
request_count.decrement();
|
||||
return;
|
||||
}
|
||||
|
|
@ -64,7 +67,7 @@ where
|
|||
|
||||
self.globals.runtime_handle.clone().spawn(async move {
|
||||
timeout(
|
||||
self.globals.proxy_timeout + Duration::from_secs(1),
|
||||
self.globals.proxy_config.proxy_timeout + Duration::from_secs(1),
|
||||
server
|
||||
.serve_connection(
|
||||
stream,
|
||||
|
|
@ -90,7 +93,9 @@ where
|
|||
|
||||
async fn start_without_tls(self, server: Http<LocalExecutor>) -> Result<()> {
|
||||
let listener_service = async {
|
||||
let tcp_listener = TcpListener::bind(&self.listening_on).await?;
|
||||
let tcp_socket = bind_tcp_socket(&self.listening_on)?;
|
||||
let tcp_listener = tcp_socket.listen(self.globals.proxy_config.tcp_listen_backlog)?;
|
||||
// let tcp_listener = TcpListener::bind(&self.listening_on).await?;
|
||||
info!("Start TCP proxy serving with HTTP request for configured host names");
|
||||
while let Ok((stream, _client_addr)) = tcp_listener.accept().await {
|
||||
self.clone().client_serve(stream, server.clone(), _client_addr, None);
|
||||
|
|
@ -103,8 +108,8 @@ where
|
|||
|
||||
pub async fn start(self) -> Result<()> {
|
||||
let mut server = Http::new();
|
||||
server.http1_keep_alive(self.globals.keepalive);
|
||||
server.http2_max_concurrent_streams(self.globals.max_concurrent_streams);
|
||||
server.http1_keep_alive(self.globals.proxy_config.keepalive);
|
||||
server.http2_max_concurrent_streams(self.globals.proxy_config.max_concurrent_streams);
|
||||
server.pipeline_flush(true);
|
||||
let executor = LocalExecutor::new(self.globals.runtime_handle.clone());
|
||||
let server = server.with_executor(executor);
|
||||
|
|
@ -1,49 +1,33 @@
|
|||
use super::proxy_main::{LocalExecutor, Proxy};
|
||||
use crate::{
|
||||
backend::{ServerCrypto, SniServerCryptoMap},
|
||||
constants::*,
|
||||
error::*,
|
||||
log::*,
|
||||
utils::BytesName,
|
||||
#[cfg(feature = "http3")]
|
||||
use super::socket::bind_udp_socket;
|
||||
use super::{
|
||||
crypto_service::{CryptoReloader, ServerCrypto, ServerCryptoBase, SniServerCryptoMap},
|
||||
proxy_main::{LocalExecutor, Proxy},
|
||||
socket::bind_tcp_socket,
|
||||
};
|
||||
use crate::{certs::CryptoSource, constants::*, error::*, log::*, utils::BytesName};
|
||||
use hot_reload::{ReloaderReceiver, ReloaderService};
|
||||
use hyper::{client::connect::Connect, server::conn::Http};
|
||||
#[cfg(feature = "http3")]
|
||||
use quinn::{crypto::rustls::HandshakeData, Endpoint, ServerConfig as QuicServerConfig, TransportConfig};
|
||||
#[cfg(feature = "http3")]
|
||||
use rustls::ServerConfig;
|
||||
use std::sync::Arc;
|
||||
use tokio::{
|
||||
net::TcpListener,
|
||||
sync::watch,
|
||||
time::{sleep, timeout, Duration},
|
||||
};
|
||||
use tokio::time::{timeout, Duration};
|
||||
|
||||
impl<T> Proxy<T>
|
||||
impl<T, U> Proxy<T, U>
|
||||
where
|
||||
T: Connect + Clone + Sync + Send + 'static,
|
||||
U: CryptoSource + Clone + Sync + Send + 'static,
|
||||
{
|
||||
async fn cert_service(&self, server_crypto_tx: watch::Sender<Option<Arc<ServerCrypto>>>) {
|
||||
info!("Start cert watch service");
|
||||
loop {
|
||||
if let Ok(server_crypto) = self.globals.backends.generate_server_crypto().await {
|
||||
if let Err(_e) = server_crypto_tx.send(Some(Arc::new(server_crypto))) {
|
||||
error!("Failed to populate server crypto");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
error!("Failed to update certs");
|
||||
}
|
||||
sleep(Duration::from_secs(CERTS_WATCH_DELAY_SECS.into())).await;
|
||||
}
|
||||
}
|
||||
|
||||
// TCP Listener Service, i.e., http/2 and http/1.1
|
||||
async fn listener_service(
|
||||
&self,
|
||||
server: Http<LocalExecutor>,
|
||||
mut server_crypto_rx: watch::Receiver<Option<Arc<ServerCrypto>>>,
|
||||
mut server_crypto_rx: ReloaderReceiver<ServerCryptoBase>,
|
||||
) -> Result<()> {
|
||||
let tcp_listener = TcpListener::bind(&self.listening_on).await?;
|
||||
let tcp_socket = bind_tcp_socket(&self.listening_on)?;
|
||||
let tcp_listener = tcp_socket.listen(self.globals.proxy_config.tcp_listen_backlog)?;
|
||||
info!("Start TCP proxy serving with HTTPS request for configured host names");
|
||||
|
||||
let mut server_crypto_map: Option<Arc<SniServerCryptoMap>> = None;
|
||||
|
|
@ -105,9 +89,14 @@ where
|
|||
}
|
||||
_ = server_crypto_rx.changed() => {
|
||||
if server_crypto_rx.borrow().is_none() {
|
||||
error!("Reloader is broken");
|
||||
break;
|
||||
}
|
||||
let server_crypto = server_crypto_rx.borrow().clone().unwrap();
|
||||
let cert_keys_map = server_crypto_rx.borrow().clone().unwrap();
|
||||
let Some(server_crypto): Option<Arc<ServerCrypto>> = (&cert_keys_map).try_into().ok() else {
|
||||
error!("Failed to update server crypto");
|
||||
break;
|
||||
};
|
||||
server_crypto_map = Some(server_crypto.inner_local_map.clone());
|
||||
}
|
||||
else => break
|
||||
|
|
@ -117,7 +106,7 @@ where
|
|||
}
|
||||
|
||||
#[cfg(feature = "http3")]
|
||||
async fn listener_service_h3(&self, mut server_crypto_rx: watch::Receiver<Option<Arc<ServerCrypto>>>) -> Result<()> {
|
||||
async fn listener_service_h3(&self, mut server_crypto_rx: ReloaderReceiver<ServerCryptoBase>) -> Result<()> {
|
||||
info!("Start UDP proxy serving with HTTP/3 request for configured host names");
|
||||
// first set as null config server
|
||||
let rustls_server_config = ServerConfig::builder()
|
||||
|
|
@ -129,14 +118,30 @@ where
|
|||
|
||||
let mut transport_config_quic = TransportConfig::default();
|
||||
transport_config_quic
|
||||
.max_concurrent_bidi_streams(self.globals.h3_max_concurrent_bidistream)
|
||||
.max_concurrent_uni_streams(self.globals.h3_max_concurrent_unistream)
|
||||
.max_idle_timeout(self.globals.h3_max_idle_timeout);
|
||||
.max_concurrent_bidi_streams(self.globals.proxy_config.h3_max_concurrent_bidistream)
|
||||
.max_concurrent_uni_streams(self.globals.proxy_config.h3_max_concurrent_unistream)
|
||||
.max_idle_timeout(
|
||||
self
|
||||
.globals
|
||||
.proxy_config
|
||||
.h3_max_idle_timeout
|
||||
.map(|v| quinn::IdleTimeout::try_from(v).unwrap()),
|
||||
);
|
||||
|
||||
let mut server_config_h3 = QuicServerConfig::with_crypto(Arc::new(rustls_server_config));
|
||||
server_config_h3.transport = Arc::new(transport_config_quic);
|
||||
server_config_h3.concurrent_connections(self.globals.h3_max_concurrent_connections);
|
||||
let endpoint = Endpoint::server(server_config_h3, self.listening_on)?;
|
||||
server_config_h3.concurrent_connections(self.globals.proxy_config.h3_max_concurrent_connections);
|
||||
|
||||
// To reuse address
|
||||
let udp_socket = bind_udp_socket(&self.listening_on)?;
|
||||
let runtime = quinn::default_runtime()
|
||||
.ok_or_else(|| std::io::Error::new(std::io::ErrorKind::Other, "No async runtime found"))?;
|
||||
let endpoint = Endpoint::new(
|
||||
quinn::EndpointConfig::default(),
|
||||
Some(server_config_h3),
|
||||
udp_socket,
|
||||
runtime,
|
||||
)?;
|
||||
|
||||
let mut server_crypto: Option<Arc<ServerCrypto>> = None;
|
||||
loop {
|
||||
|
|
@ -146,29 +151,24 @@ where
|
|||
continue;
|
||||
}
|
||||
let mut conn: quinn::Connecting = new_conn.unwrap();
|
||||
let hsd = match conn.handshake_data().await {
|
||||
Ok(h) => h,
|
||||
Err(_) => continue
|
||||
let Ok(hsd) = conn.handshake_data().await else {
|
||||
continue
|
||||
};
|
||||
|
||||
let hsd_downcast = match hsd.downcast::<HandshakeData>() {
|
||||
Ok(d) => d,
|
||||
Err(_) => continue
|
||||
let Ok(hsd_downcast) = hsd.downcast::<HandshakeData>() else {
|
||||
continue
|
||||
};
|
||||
let new_server_name = match hsd_downcast.server_name {
|
||||
Some(sn) => sn.to_server_name_vec(),
|
||||
None => {
|
||||
let Some(new_server_name) = hsd_downcast.server_name else {
|
||||
warn!("HTTP/3 no SNI is given");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
debug!(
|
||||
"HTTP/3 connection incoming (SNI {:?})",
|
||||
new_server_name.0
|
||||
new_server_name
|
||||
);
|
||||
// TODO: server_nameをここで出してどんどん深く投げていくのは効率が悪い。connecting -> connectionsの後でいいのでは?
|
||||
// TODO: 通常のTLSと同じenumか何かにまとめたい
|
||||
let fut = self.clone().connection_serve_h3(conn, new_server_name);
|
||||
let fut = self.clone().connection_serve_h3(conn, new_server_name.to_server_name_vec());
|
||||
self.globals.runtime_handle.spawn(async move {
|
||||
// Timeout is based on underlying quic
|
||||
if let Err(e) = fut.await {
|
||||
|
|
@ -178,12 +178,18 @@ where
|
|||
}
|
||||
_ = server_crypto_rx.changed() => {
|
||||
if server_crypto_rx.borrow().is_none() {
|
||||
error!("Reloader is broken");
|
||||
break;
|
||||
}
|
||||
server_crypto = server_crypto_rx.borrow().clone();
|
||||
if server_crypto.is_some(){
|
||||
endpoint.set_server_config(Some(QuicServerConfig::with_crypto(server_crypto.clone().unwrap().inner_global_no_client_auth.clone())));
|
||||
}
|
||||
let cert_keys_map = server_crypto_rx.borrow().clone().unwrap();
|
||||
|
||||
server_crypto = (&cert_keys_map).try_into().ok();
|
||||
let Some(inner) = server_crypto.clone() else {
|
||||
error!("Failed to update server crypto for h3");
|
||||
break;
|
||||
};
|
||||
endpoint.set_server_config(Some(QuicServerConfig::with_crypto(inner.clone().inner_global_no_client_auth.clone())));
|
||||
|
||||
}
|
||||
else => break
|
||||
}
|
||||
|
|
@ -193,14 +199,21 @@ where
|
|||
}
|
||||
|
||||
pub async fn start_with_tls(self, server: Http<LocalExecutor>) -> Result<()> {
|
||||
let (tx, rx) = watch::channel::<Option<Arc<ServerCrypto>>>(None);
|
||||
let (cert_reloader_service, cert_reloader_rx) = ReloaderService::<CryptoReloader<U>, ServerCryptoBase>::new(
|
||||
&self.globals.clone(),
|
||||
CERTS_WATCH_DELAY_SECS,
|
||||
!LOAD_CERTS_ONLY_WHEN_UPDATED,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!(e))?;
|
||||
|
||||
#[cfg(not(feature = "http3"))]
|
||||
{
|
||||
tokio::select! {
|
||||
_= self.cert_service(tx) => {
|
||||
_= cert_reloader_service.start() => {
|
||||
error!("Cert service for TLS exited");
|
||||
},
|
||||
_ = self.listener_service(server, rx) => {
|
||||
_ = self.listener_service(server, cert_reloader_rx) => {
|
||||
error!("TCP proxy service for TLS exited");
|
||||
},
|
||||
else => {
|
||||
|
|
@ -212,15 +225,15 @@ where
|
|||
}
|
||||
#[cfg(feature = "http3")]
|
||||
{
|
||||
if self.globals.http3 {
|
||||
if self.globals.proxy_config.http3 {
|
||||
tokio::select! {
|
||||
_= self.cert_service(tx) => {
|
||||
_= cert_reloader_service.start() => {
|
||||
error!("Cert service for TLS exited");
|
||||
},
|
||||
_ = self.listener_service(server, rx.clone()) => {
|
||||
_ = self.listener_service(server, cert_reloader_rx.clone()) => {
|
||||
error!("TCP proxy service for TLS exited");
|
||||
},
|
||||
_= self.listener_service_h3(rx) => {
|
||||
_= self.listener_service_h3(cert_reloader_rx) => {
|
||||
error!("UDP proxy service for QUIC exited");
|
||||
},
|
||||
else => {
|
||||
|
|
@ -231,10 +244,10 @@ where
|
|||
Ok(())
|
||||
} else {
|
||||
tokio::select! {
|
||||
_= self.cert_service(tx) => {
|
||||
_= cert_reloader_service.start() => {
|
||||
error!("Cert service for TLS exited");
|
||||
},
|
||||
_ = self.listener_service(server, rx) => {
|
||||
_ = self.listener_service(server, cert_reloader_rx) => {
|
||||
error!("TCP proxy service for TLS exited");
|
||||
},
|
||||
else => {
|
||||
45
rpxy-lib/src/proxy/socket.rs
Normal file
45
rpxy-lib/src/proxy/socket.rs
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
use crate::{error::*, log::*};
|
||||
#[cfg(feature = "http3")]
|
||||
use socket2::{Domain, Protocol, Socket, Type};
|
||||
use std::net::SocketAddr;
|
||||
#[cfg(feature = "http3")]
|
||||
use std::net::UdpSocket;
|
||||
use tokio::net::TcpSocket;
|
||||
|
||||
/// Bind TCP socket to the given `SocketAddr`, and returns the TCP socket with `SO_REUSEADDR` and `SO_REUSEPORT` options.
|
||||
/// This option is required to re-bind the socket address when the proxy instance is reconstructed.
|
||||
pub(super) fn bind_tcp_socket(listening_on: &SocketAddr) -> Result<TcpSocket> {
|
||||
let tcp_socket = if listening_on.is_ipv6() {
|
||||
TcpSocket::new_v6()
|
||||
} else {
|
||||
TcpSocket::new_v4()
|
||||
}?;
|
||||
tcp_socket.set_reuseaddr(true)?;
|
||||
tcp_socket.set_reuseport(true)?;
|
||||
if let Err(e) = tcp_socket.bind(*listening_on) {
|
||||
error!("Failed to bind TCP socket: {}", e);
|
||||
return Err(RpxyError::Io(e));
|
||||
};
|
||||
Ok(tcp_socket)
|
||||
}
|
||||
|
||||
#[cfg(feature = "http3")]
|
||||
/// Bind UDP socket to the given `SocketAddr`, and returns the UDP socket with `SO_REUSEADDR` and `SO_REUSEPORT` options.
|
||||
/// This option is required to re-bind the socket address when the proxy instance is reconstructed.
|
||||
pub(super) fn bind_udp_socket(listening_on: &SocketAddr) -> Result<UdpSocket> {
|
||||
let socket = if listening_on.is_ipv6() {
|
||||
Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP))
|
||||
} else {
|
||||
Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP))
|
||||
}?;
|
||||
// socket.set_reuse_address(true)?; // This isn't necessary
|
||||
socket.set_reuse_port(true)?;
|
||||
|
||||
if let Err(e) = socket.bind(&(*listening_on).into()) {
|
||||
error!("Failed to bind UDP socket: {}", e);
|
||||
return Err(RpxyError::Io(e));
|
||||
};
|
||||
let udp_socket: UdpSocket = socket.into();
|
||||
|
||||
Ok(udp_socket)
|
||||
}
|
||||
|
|
@ -7,6 +7,13 @@ impl From<&[u8]> for ServerNameBytesExp {
|
|||
Self(b.to_ascii_lowercase())
|
||||
}
|
||||
}
|
||||
impl TryInto<String> for &ServerNameBytesExp {
|
||||
type Error = anyhow::Error;
|
||||
fn try_into(self) -> Result<String, Self::Error> {
|
||||
let s = std::str::from_utf8(&self.0)?;
|
||||
Ok(s.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Path name, like "/path/ok", represented in bytes-based struct
|
||||
/// for searching hashmap or key list by exact or longest-prefix matching
|
||||
|
|
@ -16,6 +23,9 @@ impl PathNameBytesExp {
|
|||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.len() == 0
|
||||
}
|
||||
pub fn get<I>(&self, index: I) -> Option<&I::Output>
|
||||
where
|
||||
I: std::slice::SliceIndex<[u8]>,
|
||||
|
|
@ -1,342 +0,0 @@
|
|||
mod load_balance;
|
||||
#[cfg(feature = "sticky-cookie")]
|
||||
mod load_balance_sticky;
|
||||
#[cfg(feature = "sticky-cookie")]
|
||||
mod sticky_cookie;
|
||||
mod upstream;
|
||||
mod upstream_opts;
|
||||
|
||||
#[cfg(feature = "sticky-cookie")]
|
||||
pub use self::sticky_cookie::{StickyCookie, StickyCookieValue};
|
||||
pub use self::{
|
||||
load_balance::{LbContext, LoadBalance},
|
||||
upstream::{ReverseProxy, Upstream, UpstreamGroup, UpstreamGroupBuilder},
|
||||
upstream_opts::UpstreamOption,
|
||||
};
|
||||
use crate::{
|
||||
log::*,
|
||||
utils::{BytesName, PathNameBytesExp, ServerNameBytesExp},
|
||||
};
|
||||
use derive_builder::Builder;
|
||||
use rustc_hash::{FxHashMap as HashMap, FxHashSet as HashSet};
|
||||
use rustls::{OwnedTrustAnchor, RootCertStore};
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
fs::File,
|
||||
io::{self, BufReader, Cursor, Read},
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio_rustls::rustls::{
|
||||
server::ResolvesServerCertUsingSni,
|
||||
sign::{any_supported_type, CertifiedKey},
|
||||
Certificate, PrivateKey, ServerConfig,
|
||||
};
|
||||
use x509_parser::prelude::*;
|
||||
|
||||
/// Struct serving information to route incoming connections, like server name to be handled and tls certs/keys settings.
|
||||
#[derive(Builder)]
|
||||
pub struct Backend {
|
||||
#[builder(setter(into))]
|
||||
/// backend application name, e.g., app1
|
||||
pub app_name: String,
|
||||
#[builder(setter(custom))]
|
||||
/// server name, e.g., example.com, in String ascii lower case
|
||||
pub server_name: String,
|
||||
/// struct of reverse proxy serving incoming request
|
||||
pub reverse_proxy: ReverseProxy,
|
||||
|
||||
/// tls settings
|
||||
#[builder(setter(custom), default)]
|
||||
pub tls_cert_path: Option<PathBuf>,
|
||||
#[builder(setter(custom), default)]
|
||||
pub tls_cert_key_path: Option<PathBuf>,
|
||||
#[builder(default)]
|
||||
pub https_redirection: Option<bool>,
|
||||
#[builder(setter(custom), default)]
|
||||
pub client_ca_cert_path: Option<PathBuf>,
|
||||
}
|
||||
impl<'a> BackendBuilder {
|
||||
pub fn server_name(&mut self, server_name: impl Into<Cow<'a, str>>) -> &mut Self {
|
||||
self.server_name = Some(server_name.into().to_ascii_lowercase());
|
||||
self
|
||||
}
|
||||
pub fn tls_cert_path(&mut self, v: &Option<String>) -> &mut Self {
|
||||
self.tls_cert_path = Some(opt_string_to_opt_pathbuf(v));
|
||||
self
|
||||
}
|
||||
pub fn tls_cert_key_path(&mut self, v: &Option<String>) -> &mut Self {
|
||||
self.tls_cert_key_path = Some(opt_string_to_opt_pathbuf(v));
|
||||
self
|
||||
}
|
||||
pub fn client_ca_cert_path(&mut self, v: &Option<String>) -> &mut Self {
|
||||
self.client_ca_cert_path = Some(opt_string_to_opt_pathbuf(v));
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
fn opt_string_to_opt_pathbuf(input: &Option<String>) -> Option<PathBuf> {
|
||||
input.to_owned().as_ref().map(PathBuf::from)
|
||||
}
|
||||
|
||||
impl Backend {
|
||||
pub fn read_certs_and_key(&self) -> io::Result<CertifiedKey> {
|
||||
debug!("Read TLS server certificates and private key");
|
||||
let (Some(certs_path), Some(certs_keys_path)) = (self.tls_cert_path.as_ref(), self.tls_cert_key_path.as_ref()) else {
|
||||
return Err(io::Error::new(io::ErrorKind::Other, "Invalid certs and keys paths"));
|
||||
};
|
||||
let certs: Vec<_> = {
|
||||
let certs_path_str = certs_path.display().to_string();
|
||||
let mut reader = BufReader::new(File::open(certs_path).map_err(|e| {
|
||||
io::Error::new(
|
||||
e.kind(),
|
||||
format!("Unable to load the certificates [{certs_path_str}]: {e}"),
|
||||
)
|
||||
})?);
|
||||
rustls_pemfile::certs(&mut reader)
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Unable to parse the certificates"))?
|
||||
}
|
||||
.drain(..)
|
||||
.map(Certificate)
|
||||
.collect();
|
||||
let certs_keys: Vec<_> = {
|
||||
let certs_keys_path_str = certs_keys_path.display().to_string();
|
||||
let encoded_keys = {
|
||||
let mut encoded_keys = vec![];
|
||||
File::open(certs_keys_path)
|
||||
.map_err(|e| {
|
||||
io::Error::new(
|
||||
e.kind(),
|
||||
format!("Unable to load the certificate keys [{certs_keys_path_str}]: {e}"),
|
||||
)
|
||||
})?
|
||||
.read_to_end(&mut encoded_keys)?;
|
||||
encoded_keys
|
||||
};
|
||||
let mut reader = Cursor::new(encoded_keys);
|
||||
let pkcs8_keys = rustls_pemfile::pkcs8_private_keys(&mut reader).map_err(|_| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Unable to parse the certificates private keys (PKCS8)",
|
||||
)
|
||||
})?;
|
||||
reader.set_position(0);
|
||||
let mut rsa_keys = rustls_pemfile::rsa_private_keys(&mut reader)?;
|
||||
let mut keys = pkcs8_keys;
|
||||
keys.append(&mut rsa_keys);
|
||||
if keys.is_empty() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"No private keys found - Make sure that they are in PKCS#8/PEM format",
|
||||
));
|
||||
}
|
||||
keys.drain(..).map(PrivateKey).collect()
|
||||
};
|
||||
let signing_key = certs_keys
|
||||
.iter()
|
||||
.find_map(|k| {
|
||||
if let Ok(sk) = any_supported_type(k) {
|
||||
Some(sk)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.ok_or_else(|| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Unable to find a valid certificate and key",
|
||||
)
|
||||
})?;
|
||||
Ok(CertifiedKey::new(certs, signing_key))
|
||||
}
|
||||
|
||||
fn read_client_ca_certs(&self) -> io::Result<(Vec<OwnedTrustAnchor>, HashSet<Vec<u8>>)> {
|
||||
debug!("Read CA certificates for client authentication");
|
||||
// Reads client certificate and returns client
|
||||
let client_ca_cert_path = {
|
||||
let Some(c) = self.client_ca_cert_path.as_ref() else {
|
||||
return Err(io::Error::new(io::ErrorKind::Other, "Invalid certs and keys paths"));
|
||||
};
|
||||
c
|
||||
};
|
||||
let certs: Vec<_> = {
|
||||
let certs_path_str = client_ca_cert_path.display().to_string();
|
||||
let mut reader = BufReader::new(File::open(client_ca_cert_path).map_err(|e| {
|
||||
io::Error::new(
|
||||
e.kind(),
|
||||
format!("Unable to load the client certificates [{certs_path_str}]: {e}"),
|
||||
)
|
||||
})?);
|
||||
rustls_pemfile::certs(&mut reader)
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Unable to parse the client certificates"))?
|
||||
}
|
||||
.drain(..)
|
||||
.map(Certificate)
|
||||
.collect();
|
||||
|
||||
let owned_trust_anchors: Vec<_> = certs
|
||||
.iter()
|
||||
.map(|v| {
|
||||
// let trust_anchor = tokio_rustls::webpki::TrustAnchor::try_from_cert_der(&v.0).unwrap();
|
||||
let trust_anchor = webpki::TrustAnchor::try_from_cert_der(&v.0).unwrap();
|
||||
rustls::OwnedTrustAnchor::from_subject_spki_name_constraints(
|
||||
trust_anchor.subject,
|
||||
trust_anchor.spki,
|
||||
trust_anchor.name_constraints,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
// TODO: SKID is not used currently
|
||||
let subject_key_identifiers: HashSet<_> = certs
|
||||
.iter()
|
||||
.filter_map(|v| {
|
||||
// retrieve ca key id (subject key id)
|
||||
let cert = parse_x509_certificate(&v.0).unwrap().1;
|
||||
let subject_key_ids = cert
|
||||
.iter_extensions()
|
||||
.filter_map(|ext| match ext.parsed_extension() {
|
||||
ParsedExtension::SubjectKeyIdentifier(skid) => Some(skid),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if !subject_key_ids.is_empty() {
|
||||
Some(subject_key_ids[0].0.to_owned())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok((owned_trust_anchors, subject_key_identifiers))
|
||||
}
|
||||
}
|
||||
|
||||
/// HashMap and some meta information for multiple Backend structs.
|
||||
pub struct Backends {
|
||||
pub apps: HashMap<ServerNameBytesExp, Backend>, // hyper::uriで抜いたhostで引っ掛ける
|
||||
pub default_server_name_bytes: Option<ServerNameBytesExp>, // for plaintext http
|
||||
}
|
||||
|
||||
pub type SniServerCryptoMap = HashMap<ServerNameBytesExp, Arc<ServerConfig>>;
|
||||
pub struct ServerCrypto {
|
||||
// For Quic/HTTP3, only servers with no client authentication
|
||||
pub inner_global_no_client_auth: Arc<ServerConfig>,
|
||||
// For TLS over TCP/HTTP2 and 1.1, map of SNI to server_crypto for all given servers
|
||||
pub inner_local_map: Arc<SniServerCryptoMap>,
|
||||
}
|
||||
|
||||
impl Backends {
|
||||
pub async fn generate_server_crypto(&self) -> Result<ServerCrypto, anyhow::Error> {
|
||||
let mut resolver_global = ResolvesServerCertUsingSni::new();
|
||||
let mut server_crypto_local_map: SniServerCryptoMap = HashMap::default();
|
||||
|
||||
for (server_name_bytes_exp, backend) in self.apps.iter() {
|
||||
if backend.tls_cert_key_path.is_some() && backend.tls_cert_path.is_some() {
|
||||
match backend.read_certs_and_key() {
|
||||
Ok(certified_key) => {
|
||||
let mut resolver_local = ResolvesServerCertUsingSni::new();
|
||||
let mut client_ca_roots_local = RootCertStore::empty();
|
||||
|
||||
// add server certificate and key
|
||||
if let Err(e) = resolver_local.add(backend.server_name.as_str(), certified_key.to_owned()) {
|
||||
error!(
|
||||
"{}: Failed to read some certificates and keys {}",
|
||||
backend.server_name.as_str(),
|
||||
e
|
||||
)
|
||||
}
|
||||
|
||||
if backend.client_ca_cert_path.is_none() {
|
||||
// aggregated server config for no client auth server for http3
|
||||
if let Err(e) = resolver_global.add(backend.server_name.as_str(), certified_key) {
|
||||
error!(
|
||||
"{}: Failed to read some certificates and keys {}",
|
||||
backend.server_name.as_str(),
|
||||
e
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// add client certificate if specified
|
||||
match backend.read_client_ca_certs() {
|
||||
Ok((owned_trust_anchors, _subject_key_ids)) => {
|
||||
client_ca_roots_local.add_server_trust_anchors(owned_trust_anchors.into_iter());
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to add client CA certificate for {}: {}",
|
||||
backend.server_name.as_str(),
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut server_config_local = if client_ca_roots_local.is_empty() {
|
||||
// with no client auth, enable http1.1 -- 3
|
||||
#[cfg(not(feature = "http3"))]
|
||||
{
|
||||
ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(resolver_local))
|
||||
}
|
||||
#[cfg(feature = "http3")]
|
||||
{
|
||||
let mut sc = ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(resolver_local));
|
||||
sc.alpn_protocols = vec![b"h3".to_vec(), b"hq-29".to_vec()]; // TODO: remove hq-29 later?
|
||||
sc
|
||||
}
|
||||
} else {
|
||||
// with client auth, enable only http1.1 and 2
|
||||
// let client_certs_verifier = rustls::server::AllowAnyAnonymousOrAuthenticatedClient::new(client_ca_roots);
|
||||
let client_certs_verifier = rustls::server::AllowAnyAuthenticatedClient::new(client_ca_roots_local);
|
||||
ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_client_cert_verifier(Arc::new(client_certs_verifier))
|
||||
.with_cert_resolver(Arc::new(resolver_local))
|
||||
};
|
||||
server_config_local.alpn_protocols.push(b"h2".to_vec());
|
||||
server_config_local.alpn_protocols.push(b"http/1.1".to_vec());
|
||||
|
||||
server_crypto_local_map.insert(server_name_bytes_exp.to_owned(), Arc::new(server_config_local));
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to add certificate for {}: {}", backend.server_name.as_str(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// debug!("Load certificate chain for {} server_name's", cnt);
|
||||
|
||||
//////////////
|
||||
let mut server_crypto_global = ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(resolver_global));
|
||||
|
||||
//////////////////////////////
|
||||
|
||||
#[cfg(feature = "http3")]
|
||||
{
|
||||
server_crypto_global.alpn_protocols = vec![
|
||||
b"h3".to_vec(),
|
||||
b"hq-29".to_vec(), // TODO: remove later?
|
||||
b"h2".to_vec(),
|
||||
b"http/1.1".to_vec(),
|
||||
];
|
||||
}
|
||||
#[cfg(not(feature = "http3"))]
|
||||
{
|
||||
server_crypto_global.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
|
||||
}
|
||||
|
||||
Ok(ServerCrypto {
|
||||
inner_global_no_client_auth: Arc::new(server_crypto_global),
|
||||
inner_local_map: Arc::new(server_crypto_local_map),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
mod parse;
|
||||
mod toml;
|
||||
|
||||
pub use parse::parse_opts;
|
||||
|
|
@ -1,235 +0,0 @@
|
|||
use super::toml::{ConfigToml, ReverseProxyOption};
|
||||
use crate::{
|
||||
backend::{BackendBuilder, ReverseProxy, Upstream, UpstreamGroup, UpstreamGroupBuilder, UpstreamOption},
|
||||
constants::*,
|
||||
error::*,
|
||||
globals::*,
|
||||
log::*,
|
||||
utils::{BytesName, PathNameBytesExp},
|
||||
};
|
||||
use clap::Arg;
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
pub fn parse_opts(globals: &mut Globals) -> std::result::Result<(), anyhow::Error> {
|
||||
let _ = include_str!("../../Cargo.toml");
|
||||
let options = clap::command!().arg(
|
||||
Arg::new("config_file")
|
||||
.long("config")
|
||||
.short('c')
|
||||
.value_name("FILE")
|
||||
.help("Configuration file path like \"./config.toml\""),
|
||||
);
|
||||
let matches = options.get_matches();
|
||||
|
||||
let config = if let Some(config_file_path) = matches.get_one::<String>("config_file") {
|
||||
ConfigToml::new(config_file_path)?
|
||||
} else {
|
||||
// Default config Toml
|
||||
ConfigToml::default()
|
||||
};
|
||||
|
||||
// listen port and socket
|
||||
globals.http_port = config.listen_port;
|
||||
globals.https_port = config.listen_port_tls;
|
||||
ensure!(
|
||||
{ globals.http_port.is_some() || globals.https_port.is_some() } && {
|
||||
if let (Some(p), Some(t)) = (globals.http_port, globals.https_port) {
|
||||
p != t
|
||||
} else {
|
||||
true
|
||||
}
|
||||
},
|
||||
anyhow!("Wrong port spec.")
|
||||
);
|
||||
// NOTE: when [::]:xx is bound, both v4 and v6 listeners are enabled.
|
||||
let listen_addresses: Vec<&str> = match config.listen_ipv6 {
|
||||
Some(true) => {
|
||||
info!("Listen both IPv4 and IPv6");
|
||||
LISTEN_ADDRESSES_V6.to_vec()
|
||||
}
|
||||
Some(false) | None => {
|
||||
info!("Listen IPv4");
|
||||
LISTEN_ADDRESSES_V4.to_vec()
|
||||
}
|
||||
};
|
||||
globals.listen_sockets = listen_addresses
|
||||
.iter()
|
||||
.flat_map(|x| {
|
||||
let mut v: Vec<SocketAddr> = vec![];
|
||||
if let Some(p) = globals.http_port {
|
||||
v.push(format!("{x}:{p}").parse().unwrap());
|
||||
}
|
||||
if let Some(p) = globals.https_port {
|
||||
v.push(format!("{x}:{p}").parse().unwrap());
|
||||
}
|
||||
v
|
||||
})
|
||||
.collect();
|
||||
if globals.http_port.is_some() {
|
||||
info!("Listen port: {}", globals.http_port.unwrap());
|
||||
}
|
||||
if globals.https_port.is_some() {
|
||||
info!("Listen port: {} (for TLS)", globals.https_port.unwrap());
|
||||
}
|
||||
|
||||
// max values
|
||||
if let Some(c) = config.max_clients {
|
||||
globals.max_clients = c as usize;
|
||||
}
|
||||
if let Some(c) = config.max_concurrent_streams {
|
||||
globals.max_concurrent_streams = c;
|
||||
}
|
||||
|
||||
// backend apps
|
||||
ensure!(config.apps.is_some(), "Missing application spec.");
|
||||
let apps = config.apps.unwrap();
|
||||
ensure!(!apps.0.is_empty(), "Wrong application spec.");
|
||||
|
||||
// each app
|
||||
for (app_name, app) in apps.0.iter() {
|
||||
ensure!(app.server_name.is_some(), "Missing server_name");
|
||||
let server_name_string = app.server_name.as_ref().unwrap();
|
||||
if globals.http_port.is_none() {
|
||||
// if only https_port is specified, tls must be configured
|
||||
ensure!(app.tls.is_some())
|
||||
}
|
||||
|
||||
// backend builder
|
||||
let mut backend_builder = BackendBuilder::default();
|
||||
// reverse proxy settings
|
||||
ensure!(app.reverse_proxy.is_some(), "Missing reverse_proxy");
|
||||
let reverse_proxy = get_reverse_proxy(server_name_string, app.reverse_proxy.as_ref().unwrap())?;
|
||||
|
||||
backend_builder
|
||||
.app_name(server_name_string)
|
||||
.server_name(server_name_string)
|
||||
.reverse_proxy(reverse_proxy);
|
||||
|
||||
// TLS settings and build backend instance
|
||||
let backend = if app.tls.is_none() {
|
||||
ensure!(globals.http_port.is_some(), "Required HTTP port");
|
||||
backend_builder.build()?
|
||||
} else {
|
||||
let tls = app.tls.as_ref().unwrap();
|
||||
ensure!(tls.tls_cert_key_path.is_some() && tls.tls_cert_path.is_some());
|
||||
|
||||
let https_redirection = if tls.https_redirection.is_none() {
|
||||
Some(true) // Default true
|
||||
} else {
|
||||
ensure!(globals.https_port.is_some()); // only when both https ports are configured.
|
||||
tls.https_redirection
|
||||
};
|
||||
|
||||
backend_builder
|
||||
.tls_cert_path(&tls.tls_cert_path)
|
||||
.tls_cert_key_path(&tls.tls_cert_key_path)
|
||||
.https_redirection(https_redirection)
|
||||
.client_ca_cert_path(&tls.client_ca_cert_path)
|
||||
.build()?
|
||||
};
|
||||
|
||||
globals
|
||||
.backends
|
||||
.apps
|
||||
.insert(server_name_string.to_server_name_vec(), backend);
|
||||
info!("Registering application: {} ({})", app_name, server_name_string);
|
||||
}
|
||||
|
||||
// default backend application for plaintext http requests
|
||||
if let Some(d) = config.default_app {
|
||||
let d_sn: Vec<&str> = globals
|
||||
.backends
|
||||
.apps
|
||||
.iter()
|
||||
.filter(|(_k, v)| v.app_name == d)
|
||||
.map(|(_, v)| v.server_name.as_ref())
|
||||
.collect();
|
||||
if !d_sn.is_empty() {
|
||||
info!(
|
||||
"Serving plaintext http for requests to unconfigured server_name by app {} (server_name: {}).",
|
||||
d, d_sn[0]
|
||||
);
|
||||
globals.backends.default_server_name_bytes = Some(d_sn[0].to_server_name_vec());
|
||||
}
|
||||
}
|
||||
|
||||
// experimental
|
||||
if let Some(exp) = config.experimental {
|
||||
#[cfg(feature = "http3")]
|
||||
{
|
||||
if let Some(h3option) = exp.h3 {
|
||||
globals.http3 = true;
|
||||
info!("Experimental HTTP/3.0 is enabled. Note it is still very unstable.");
|
||||
if let Some(x) = h3option.alt_svc_max_age {
|
||||
globals.h3_alt_svc_max_age = x;
|
||||
}
|
||||
if let Some(x) = h3option.request_max_body_size {
|
||||
globals.h3_request_max_body_size = x;
|
||||
}
|
||||
if let Some(x) = h3option.max_concurrent_connections {
|
||||
globals.h3_max_concurrent_connections = x;
|
||||
}
|
||||
if let Some(x) = h3option.max_concurrent_bidistream {
|
||||
globals.h3_max_concurrent_bidistream = x.into();
|
||||
}
|
||||
if let Some(x) = h3option.max_concurrent_unistream {
|
||||
globals.h3_max_concurrent_unistream = x.into();
|
||||
}
|
||||
if let Some(x) = h3option.max_idle_timeout {
|
||||
if x == 0u64 {
|
||||
globals.h3_max_idle_timeout = None;
|
||||
} else {
|
||||
globals.h3_max_idle_timeout =
|
||||
Some(quinn::IdleTimeout::try_from(tokio::time::Duration::from_secs(x)).unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(b) = exp.ignore_sni_consistency {
|
||||
globals.sni_consistency = !b;
|
||||
if b {
|
||||
info!("Ignore consistency between TLS SNI and Host header (or Request line). Note it violates RFC.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_reverse_proxy(
|
||||
server_name_string: &str,
|
||||
rp_settings: &[ReverseProxyOption],
|
||||
) -> std::result::Result<ReverseProxy, anyhow::Error> {
|
||||
let mut upstream: HashMap<PathNameBytesExp, UpstreamGroup> = HashMap::default();
|
||||
|
||||
rp_settings.iter().for_each(|rpo| {
|
||||
let upstream_vec: Vec<Upstream> = rpo.upstream.iter().map(|x| x.to_upstream().unwrap()).collect();
|
||||
// let upstream_iter = rpo.upstream.iter().map(|x| x.to_upstream().unwrap());
|
||||
// let lb_upstream_num = vec_upstream.len();
|
||||
let elem = UpstreamGroupBuilder::default()
|
||||
.upstream(&upstream_vec)
|
||||
.path(&rpo.path)
|
||||
.replace_path(&rpo.replace_path)
|
||||
.lb(&rpo.load_balance, &upstream_vec, server_name_string, &rpo.path)
|
||||
.opts(&rpo.upstream_options)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
upstream.insert(elem.path.clone(), elem);
|
||||
});
|
||||
ensure!(
|
||||
rp_settings.iter().filter(|rpo| rpo.path.is_none()).count() < 2,
|
||||
"Multiple default reverse proxy setting"
|
||||
);
|
||||
ensure!(
|
||||
upstream
|
||||
.iter()
|
||||
.all(|(_, elem)| !(elem.opts.contains(&UpstreamOption::ConvertHttpsTo11)
|
||||
&& elem.opts.contains(&UpstreamOption::ConvertHttpsTo2))),
|
||||
"either one of force_http11 or force_http2 can be enabled"
|
||||
);
|
||||
|
||||
Ok(ReverseProxy { upstream })
|
||||
}
|
||||
|
|
@ -1,89 +0,0 @@
|
|||
use crate::{backend::Upstream, error::*};
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use serde::Deserialize;
|
||||
use std::fs;
|
||||
|
||||
#[derive(Deserialize, Debug, Default)]
|
||||
pub struct ConfigToml {
|
||||
pub listen_port: Option<u16>,
|
||||
pub listen_port_tls: Option<u16>,
|
||||
pub listen_ipv6: Option<bool>,
|
||||
pub max_concurrent_streams: Option<u32>,
|
||||
pub max_clients: Option<u32>,
|
||||
pub apps: Option<Apps>,
|
||||
pub default_app: Option<String>,
|
||||
pub experimental: Option<Experimental>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "http3")]
|
||||
#[derive(Deserialize, Debug, Default)]
|
||||
pub struct Http3Option {
|
||||
pub alt_svc_max_age: Option<u32>,
|
||||
pub request_max_body_size: Option<usize>,
|
||||
pub max_concurrent_connections: Option<u32>,
|
||||
pub max_concurrent_bidistream: Option<u32>,
|
||||
pub max_concurrent_unistream: Option<u32>,
|
||||
pub max_idle_timeout: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Default)]
|
||||
pub struct Experimental {
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3: Option<Http3Option>,
|
||||
pub ignore_sni_consistency: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Default)]
|
||||
pub struct Apps(pub HashMap<String, Application>);
|
||||
|
||||
#[derive(Deserialize, Debug, Default)]
|
||||
pub struct Application {
|
||||
pub server_name: Option<String>,
|
||||
pub reverse_proxy: Option<Vec<ReverseProxyOption>>,
|
||||
pub tls: Option<TlsOption>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Default)]
|
||||
pub struct TlsOption {
|
||||
pub tls_cert_path: Option<String>,
|
||||
pub tls_cert_key_path: Option<String>,
|
||||
pub https_redirection: Option<bool>,
|
||||
pub client_ca_cert_path: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Default)]
|
||||
pub struct ReverseProxyOption {
|
||||
pub path: Option<String>,
|
||||
pub replace_path: Option<String>,
|
||||
pub upstream: Vec<UpstreamParams>,
|
||||
pub upstream_options: Option<Vec<String>>,
|
||||
pub load_balance: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Default)]
|
||||
pub struct UpstreamParams {
|
||||
pub location: String,
|
||||
pub tls: Option<bool>,
|
||||
}
|
||||
impl UpstreamParams {
|
||||
pub fn to_upstream(&self) -> Result<Upstream> {
|
||||
let mut scheme = "http";
|
||||
if let Some(t) = self.tls {
|
||||
if t {
|
||||
scheme = "https";
|
||||
}
|
||||
}
|
||||
let location = format!("{}://{}", scheme, self.location);
|
||||
Ok(Upstream {
|
||||
uri: location.parse::<hyper::Uri>().map_err(|e| anyhow!("{}", e))?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ConfigToml {
|
||||
pub fn new(config_file: &str) -> std::result::Result<Self, anyhow::Error> {
|
||||
let config_str = fs::read_to_string(config_file).context("Failed to read config file")?;
|
||||
|
||||
toml::from_str(&config_str).context("Failed to parse toml config")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
use crate::backend::Backends;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
};
|
||||
use tokio::time::Duration;
|
||||
|
||||
pub struct Globals {
|
||||
pub listen_sockets: Vec<SocketAddr>,
|
||||
pub http_port: Option<u16>,
|
||||
pub https_port: Option<u16>,
|
||||
|
||||
pub proxy_timeout: Duration,
|
||||
pub upstream_timeout: Duration,
|
||||
|
||||
pub max_clients: usize,
|
||||
pub request_count: RequestCount,
|
||||
pub max_concurrent_streams: u32,
|
||||
pub keepalive: bool,
|
||||
|
||||
pub runtime_handle: tokio::runtime::Handle,
|
||||
pub backends: Backends,
|
||||
|
||||
// experimentals
|
||||
pub sni_consistency: bool,
|
||||
|
||||
#[cfg(feature = "http3")]
|
||||
pub http3: bool,
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3_alt_svc_max_age: u32,
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3_request_max_body_size: usize,
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3_max_concurrent_bidistream: quinn::VarInt,
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3_max_concurrent_unistream: quinn::VarInt,
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3_max_concurrent_connections: u32,
|
||||
#[cfg(feature = "http3")]
|
||||
pub h3_max_idle_timeout: Option<quinn::IdleTimeout>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct RequestCount(Arc<AtomicUsize>);
|
||||
|
||||
impl RequestCount {
|
||||
pub fn current(&self) -> usize {
|
||||
self.0.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn increment(&self) -> usize {
|
||||
self.0.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn decrement(&self) -> usize {
|
||||
let mut count;
|
||||
while {
|
||||
count = self.0.load(Ordering::Relaxed);
|
||||
count > 0
|
||||
&& self
|
||||
.0
|
||||
.compare_exchange(count, count - 1, Ordering::Relaxed, Ordering::Relaxed)
|
||||
!= Ok(count)
|
||||
} {}
|
||||
count
|
||||
}
|
||||
}
|
||||
132
src/main.rs
132
src/main.rs
|
|
@ -1,132 +0,0 @@
|
|||
#[cfg(not(target_env = "msvc"))]
|
||||
use tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[cfg(not(target_env = "msvc"))]
|
||||
#[global_allocator]
|
||||
static GLOBAL: Jemalloc = Jemalloc;
|
||||
|
||||
mod backend;
|
||||
mod config;
|
||||
mod constants;
|
||||
mod error;
|
||||
mod globals;
|
||||
mod handler;
|
||||
mod log;
|
||||
mod proxy;
|
||||
mod utils;
|
||||
|
||||
use crate::{
|
||||
backend::{Backend, Backends},
|
||||
config::parse_opts,
|
||||
constants::*,
|
||||
error::*,
|
||||
globals::*,
|
||||
handler::HttpMessageHandlerBuilder,
|
||||
log::*,
|
||||
proxy::ProxyBuilder,
|
||||
utils::ServerNameBytesExp,
|
||||
};
|
||||
use futures::future::select_all;
|
||||
use hyper::Client;
|
||||
// use hyper_trust_dns::TrustDnsResolver;
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::time::Duration;
|
||||
|
||||
fn main() {
|
||||
init_logger();
|
||||
|
||||
let mut runtime_builder = tokio::runtime::Builder::new_multi_thread();
|
||||
runtime_builder.enable_all();
|
||||
runtime_builder.thread_name("rpxy");
|
||||
let runtime = runtime_builder.build().unwrap();
|
||||
|
||||
runtime.block_on(async {
|
||||
let mut globals = Globals {
|
||||
listen_sockets: Vec::new(),
|
||||
http_port: None,
|
||||
https_port: None,
|
||||
|
||||
// TODO: Reconsider each timeout values
|
||||
proxy_timeout: Duration::from_secs(PROXY_TIMEOUT_SEC),
|
||||
upstream_timeout: Duration::from_secs(UPSTREAM_TIMEOUT_SEC),
|
||||
|
||||
max_clients: MAX_CLIENTS,
|
||||
request_count: Default::default(),
|
||||
max_concurrent_streams: MAX_CONCURRENT_STREAMS,
|
||||
keepalive: true,
|
||||
|
||||
runtime_handle: runtime.handle().clone(),
|
||||
backends: Backends {
|
||||
default_server_name_bytes: None,
|
||||
apps: HashMap::<ServerNameBytesExp, Backend>::default(),
|
||||
},
|
||||
|
||||
sni_consistency: true,
|
||||
|
||||
#[cfg(feature = "http3")]
|
||||
http3: false,
|
||||
#[cfg(feature = "http3")]
|
||||
h3_alt_svc_max_age: H3::ALT_SVC_MAX_AGE,
|
||||
#[cfg(feature = "http3")]
|
||||
h3_request_max_body_size: H3::REQUEST_MAX_BODY_SIZE,
|
||||
#[cfg(feature = "http3")]
|
||||
h3_max_concurrent_connections: H3::MAX_CONCURRENT_CONNECTIONS,
|
||||
#[cfg(feature = "http3")]
|
||||
h3_max_concurrent_bidistream: H3::MAX_CONCURRENT_BIDISTREAM.into(),
|
||||
#[cfg(feature = "http3")]
|
||||
h3_max_concurrent_unistream: H3::MAX_CONCURRENT_UNISTREAM.into(),
|
||||
#[cfg(feature = "http3")]
|
||||
h3_max_idle_timeout: Some(quinn::IdleTimeout::try_from(Duration::from_secs(H3::MAX_IDLE_TIMEOUT)).unwrap()),
|
||||
};
|
||||
|
||||
if let Err(e) = parse_opts(&mut globals) {
|
||||
error!("Invalid configuration: {}", e);
|
||||
std::process::exit(1);
|
||||
};
|
||||
|
||||
entrypoint(Arc::new(globals)).await.unwrap()
|
||||
});
|
||||
warn!("Exit the program");
|
||||
}
|
||||
|
||||
// entrypoint creates and spawns tasks of proxy services
|
||||
async fn entrypoint(globals: Arc<Globals>) -> Result<()> {
|
||||
// let connector = TrustDnsResolver::default().into_rustls_webpki_https_connector();
|
||||
let connector = hyper_rustls::HttpsConnectorBuilder::new()
|
||||
.with_webpki_roots()
|
||||
.https_or_http()
|
||||
.enable_http1()
|
||||
.enable_http2()
|
||||
.build();
|
||||
|
||||
let msg_handler = HttpMessageHandlerBuilder::default()
|
||||
.forwarder(Arc::new(Client::builder().build::<_, hyper::Body>(connector)))
|
||||
.globals(globals.clone())
|
||||
.build()?;
|
||||
|
||||
let addresses = globals.listen_sockets.clone();
|
||||
let futures = select_all(addresses.into_iter().map(|addr| {
|
||||
let mut tls_enabled = false;
|
||||
if let Some(https_port) = globals.https_port {
|
||||
tls_enabled = https_port == addr.port()
|
||||
}
|
||||
|
||||
let proxy = ProxyBuilder::default()
|
||||
.globals(globals.clone())
|
||||
.listening_on(addr)
|
||||
.tls_enabled(tls_enabled)
|
||||
.msg_handler(msg_handler.clone())
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
globals.runtime_handle.spawn(proxy.start())
|
||||
}));
|
||||
|
||||
// wait for all future
|
||||
if let (Ok(_), _, _) = futures.await {
|
||||
error!("Some proxy services are down");
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue