Merge branch 'tmp/sticky-cookie' into feat/sticky-cookie-feature

This commit is contained in:
Jun Kurihara 2025-06-03 14:50:00 +09:00 committed by GitHub
commit d8cadf06af
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
80 changed files with 4870 additions and 867 deletions

3
.build/Jenkinsfile vendored
View file

@ -37,9 +37,6 @@ pipeline {
dir('rust-rpxy') { dir('rust-rpxy') {
sh """ sh """
# Update submodule URLs to HTTPS (allows cloning without SSH keys)
sed -i 's|git@github.com:|https://github.com/|g' .gitmodules
# Initialize and update submodules # Initialize and update submodules
git submodule update --init git submodule update --init
""" """

View file

@ -1,6 +1,3 @@
# Basic dependabot.yml file with
# minimum configuration for two package managers
version: 2 version: 2
updates: updates:
# Enable version updates for cargo # Enable version updates for cargo
@ -9,16 +6,6 @@ updates:
schedule: schedule:
interval: "daily" interval: "daily"
- package-ecosystem: "cargo"
directory: "/rpxy-bin"
schedule:
interval: "daily"
- package-ecosystem: "cargo"
directory: "/rpxy-lib"
schedule:
interval: "daily"
# Enable version updates for Docker # Enable version updates for Docker
- package-ecosystem: "docker" - package-ecosystem: "docker"
directory: "/docker" directory: "/docker"

View file

@ -5,6 +5,9 @@ on:
pull_request: pull_request:
types: [synchronize, opened] types: [synchronize, opened]
permissions:
contents: read
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always

View file

@ -14,6 +14,10 @@ on:
jobs: jobs:
on-success: on-success:
permissions:
contents: read
packages: read
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success' }} || ${{ github.event_name == 'repositry_dispatch' }} if: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success' }} || ${{ github.event_name == 'repositry_dispatch' }}
strategy: strategy:
@ -34,16 +38,6 @@ jobs:
platform: linux/arm64 platform: linux/arm64
tags-suffix: "-slim" tags-suffix: "-slim"
- target: "musl"
build-feature: "-slim-pq"
platform: linux/amd64
tags-suffix: "-slim-pq"
- target: "musl"
build-feature: "-slim-pq"
platform: linux/arm64
tags-suffix: "-slim-pq"
- target: "gnu" - target: "gnu"
build-feature: "-s2n" build-feature: "-s2n"
platform: linux/amd64 platform: linux/amd64
@ -54,26 +48,6 @@ jobs:
platform: linux/arm64 platform: linux/arm64
tags-suffix: "-s2n" tags-suffix: "-s2n"
- target: "gnu"
build-feature: "-pq"
platform: linux/amd64
tags-suffix: "-pq"
- target: "gnu"
build-feature: "-pq"
platform: linux/arm64
tags-suffix: "-pq"
- target: "gnu"
build-feature: "-s2n-pq"
platform: linux/amd64
tags-suffix: "-s2n-pq"
- target: "gnu"
build-feature: "-s2n-pq"
platform: linux/arm64
tags-suffix: "-s2n-pq"
- target: "gnu" - target: "gnu"
build-feature: "-webpki-roots" build-feature: "-webpki-roots"
platform: linux/amd64 platform: linux/amd64
@ -128,12 +102,18 @@ jobs:
path: "/tmp/${{ steps.set-env.outputs.target_name }}" path: "/tmp/${{ steps.set-env.outputs.target_name }}"
on-failure: on-failure:
permissions:
contents: read
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'failure' }} if: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'failure' }}
steps: steps:
- run: echo 'The release triggering workflows failed' - run: echo 'The release triggering workflows failed'
release: release:
permissions:
contents: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.event_name == 'repository_dispatch' }} if: ${{ github.event_name == 'repository_dispatch' }}
needs: on-success needs: on-success

View file

@ -16,7 +16,11 @@ env:
jobs: jobs:
build_and_push: build_and_push:
runs-on: ubuntu-latest permissions:
contents: read
packages: write
runs-on: ubuntu-22.04
if: ${{ github.event_name == 'push' }} || ${{ github.event_name == 'pull_request' && github.event.pull_request.merged == true }} if: ${{ github.event_name == 'push' }} || ${{ github.event_name == 'pull_request' && github.event.pull_request.merged == true }}
strategy: strategy:
fail-fast: false fail-fast: false
@ -30,17 +34,6 @@ jobs:
jqtype/rpxy:latest jqtype/rpxy:latest
ghcr.io/junkurihara/rust-rpxy:latest ghcr.io/junkurihara/rust-rpxy:latest
- target: "default-pq"
dockerfile: ./docker/Dockerfile
platforms: linux/amd64,linux/arm64
build-args: |
"CARGO_FEATURES=--no-default-features --features=http3-quinn,cache,rustls-backend,acme,post-quantum"
tags-suffix: "-pq"
# Aliases must be used only for release builds
aliases: |
jqtype/rpxy:pq
ghcr.io/junkurihara/rust-rpxy:pq
- target: "default-slim" - target: "default-slim"
dockerfile: ./docker/Dockerfile-slim dockerfile: ./docker/Dockerfile-slim
build-contexts: | build-contexts: |
@ -53,24 +46,10 @@ jobs:
jqtype/rpxy:slim jqtype/rpxy:slim
ghcr.io/junkurihara/rust-rpxy:slim ghcr.io/junkurihara/rust-rpxy:slim
- target: "default-slim-pq"
dockerfile: ./docker/Dockerfile-slim
build-args: |
"CARGO_FEATURES=--no-default-features --features=http3-quinn,cache,rustls-backend,acme,post-quantum"
build-contexts: |
messense/rust-musl-cross:amd64-musl=docker-image://messense/rust-musl-cross:x86_64-musl
messense/rust-musl-cross:arm64-musl=docker-image://messense/rust-musl-cross:aarch64-musl
platforms: linux/amd64,linux/arm64
tags-suffix: "-slim-pq"
# Aliases must be used only for release builds
aliases: |
jqtype/rpxy:slim-pq
ghcr.io/junkurihara/rust-rpxy:slim-pq
- target: "s2n" - target: "s2n"
dockerfile: ./docker/Dockerfile dockerfile: ./docker/Dockerfile
build-args: | build-args: |
"CARGO_FEATURES=--no-default-features --features=http3-s2n,cache,rustls-backend,acme" "CARGO_FEATURES=--no-default-features --features=http3-s2n,cache,rustls-backend,acme,post-quantum"
"ADDITIONAL_DEPS=pkg-config libssl-dev cmake libclang1 gcc g++" "ADDITIONAL_DEPS=pkg-config libssl-dev cmake libclang1 gcc g++"
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
tags-suffix: "-s2n" tags-suffix: "-s2n"
@ -79,23 +58,11 @@ jobs:
jqtype/rpxy:s2n jqtype/rpxy:s2n
ghcr.io/junkurihara/rust-rpxy:s2n ghcr.io/junkurihara/rust-rpxy:s2n
- target: "s2n-pq"
dockerfile: ./docker/Dockerfile
build-args: |
"CARGO_FEATURES=--no-default-features --features=http3-s2n,cache,rustls-backend,acme,post-quantum"
"ADDITIONAL_DEPS=pkg-config libssl-dev cmake libclang1 gcc g++"
platforms: linux/amd64,linux/arm64
tags-suffix: "-s2n-pq"
# Aliases must be used only for release builds
aliases: |
jqtype/rpxy:s2n-pq
ghcr.io/junkurihara/rust-rpxy:s2n-pq
- target: "webpki-roots" - target: "webpki-roots"
dockerfile: ./docker/Dockerfile dockerfile: ./docker/Dockerfile
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
build-args: | build-args: |
"CARGO_FEATURES=--no-default-features --features=http3-quinn,cache,webpki-roots,acme" "CARGO_FEATURES=--no-default-features --features=http3-quinn,cache,webpki-roots,acme,post-quantum"
tags-suffix: "-webpki-roots" tags-suffix: "-webpki-roots"
# Aliases must be used only for release builds # Aliases must be used only for release builds
aliases: | aliases: |
@ -105,7 +72,7 @@ jobs:
- target: "slim-webpki-roots" - target: "slim-webpki-roots"
dockerfile: ./docker/Dockerfile-slim dockerfile: ./docker/Dockerfile-slim
build-args: | build-args: |
"CARGO_FEATURES=--no-default-features --features=http3-quinn,cache,webpki-roots,acme" "CARGO_FEATURES=--no-default-features --features=http3-quinn,cache,webpki-roots,acme,post-quantum"
build-contexts: | build-contexts: |
messense/rust-musl-cross:amd64-musl=docker-image://messense/rust-musl-cross:x86_64-musl messense/rust-musl-cross:amd64-musl=docker-image://messense/rust-musl-cross:x86_64-musl
messense/rust-musl-cross:arm64-musl=docker-image://messense/rust-musl-cross:aarch64-musl messense/rust-musl-cross:arm64-musl=docker-image://messense/rust-musl-cross:aarch64-musl
@ -119,7 +86,7 @@ jobs:
- target: "s2n-webpki-roots" - target: "s2n-webpki-roots"
dockerfile: ./docker/Dockerfile dockerfile: ./docker/Dockerfile
build-args: | build-args: |
"CARGO_FEATURES=--no-default-features --features=http3-s2n,cache,webpki-roots,acme" "CARGO_FEATURES=--no-default-features --features=http3-s2n,cache,webpki-roots,acme,post-quantum"
"ADDITIONAL_DEPS=pkg-config libssl-dev cmake libclang1 gcc g++" "ADDITIONAL_DEPS=pkg-config libssl-dev cmake libclang1 gcc g++"
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
tags-suffix: "-s2n-webpki-roots" tags-suffix: "-s2n-webpki-roots"
@ -207,6 +174,14 @@ jobs:
platforms: ${{ matrix.platforms }} platforms: ${{ matrix.platforms }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
- name: check pull_request title
if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref == 'develop' && github.event.pull_request.base.ref == 'main' && github.event.pull_request.merged == true }}
uses: kaisugi/action-regex-match@v1.0.1
id: regex-match
with:
text: ${{ github.event.pull_request.title }}
regex: "^(\\d+\\.\\d+\\.\\d+)$"
- name: Release build and push from main branch - name: Release build and push from main branch
if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref == 'develop' && github.event.pull_request.base.ref == 'main' && github.event.pull_request.merged == true }} if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref == 'develop' && github.event.pull_request.base.ref == 'main' && github.event.pull_request.merged == true }}
uses: docker/build-push-action@v6 uses: docker/build-push-action@v6
@ -218,6 +193,8 @@ jobs:
${{ env.GHCR }}/${{ env.GHCR_IMAGE_NAME }}:latest${{ matrix.tags-suffix }} ${{ env.GHCR }}/${{ env.GHCR_IMAGE_NAME }}:latest${{ matrix.tags-suffix }}
${{ env.DH_REGISTRY_NAME }}:latest${{ matrix.tags-suffix }} ${{ env.DH_REGISTRY_NAME }}:latest${{ matrix.tags-suffix }}
${{ matrix.aliases }} ${{ matrix.aliases }}
${{ env.GHCR }}/${{ env.GHCR_IMAGE_NAME }}:${{ github.event.pull_request.title }}${{ matrix.tags-suffix }}
${{ env.DH_REGISTRY_NAME }}:${{ github.event.pull_request.title }}${{ matrix.tags-suffix }}
build-contexts: ${{ matrix.build-contexts }} build-contexts: ${{ matrix.build-contexts }}
file: ${{ matrix.dockerfile }} file: ${{ matrix.dockerfile }}
cache-from: type=gha,scope=rpxy-latest-${{ matrix.target }} cache-from: type=gha,scope=rpxy-latest-${{ matrix.target }}
@ -226,6 +203,10 @@ jobs:
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
dispatch_release_event: dispatch_release_event:
permissions:
contents: write
actions: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref == 'develop' && github.event.pull_request.base.ref == 'main' && github.event.pull_request.merged == true }} if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref == 'develop' && github.event.pull_request.base.ref == 'main' && github.event.pull_request.merged == true }}
needs: build_and_push needs: build_and_push

View file

@ -7,6 +7,8 @@ on:
jobs: jobs:
Scan-Build: Scan-Build:
permissions:
contents: read
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4

5
.gitignore vendored
View file

@ -3,14 +3,11 @@
docker/log docker/log
docker/cache docker/cache
docker/config docker/config
docker/acme_registry
# Generated by Cargo # Generated by Cargo
# will have compiled files and executables # will have compiled files and executables
/target/ /target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt # These are backup files generated by rustfmt
**/*.rs.bk **/*.rs.bk

6
.gitmodules vendored
View file

@ -1,10 +1,10 @@
[submodule "submodules/rusty-http-cache-semantics"] [submodule "submodules/rusty-http-cache-semantics"]
path = submodules/rusty-http-cache-semantics path = submodules/rusty-http-cache-semantics
url = git@github.com:junkurihara/rusty-http-cache-semantics.git url = https://github.com/junkurihara/rusty-http-cache-semantics.git
[submodule "submodules/rustls-acme"] [submodule "submodules/rustls-acme"]
path = submodules/rustls-acme path = submodules/rustls-acme
url = git@github.com:junkurihara/rustls-acme.git url = https://github.com/junkurihara/rustls-acme.git
[submodule "submodules/s2n-quic"] [submodule "submodules/s2n-quic"]
path = submodules/s2n-quic path = submodules/s2n-quic
url = git@github.com:junkurihara/s2n-quic.git url = https://github.com/junkurihara/s2n-quic.git
branch = rustls-pq branch = rustls-pq

View file

@ -1,6 +1,54 @@
# CHANGELOG # CHANGELOG
## 0.9.4 or 0.10.0 (Unreleased) ## 0.10.1 or 0.11.0 (Unreleased)
## 0.10.0
### Important Changes
- [Breaking] We removed non-`watch` execute option and enabled the dynamic reloading of the config file by default.
- We newly added `log-dir` execute option to specify the directory for `access.log`,`error.log` and `rpxy.log`. This is optional, and if not specified, the logs are written to the standard output by default.
### Improvement
- Refactor: lots of minor improvements
- Deps
## 0.9.7
### Improvement
- Feat: add version tag for docker images via github actions
- Feat: support gRPC: This makes rpxy to serve gRPC requests on the same port as HTTP and HTTPS, i.e., listen_port and listen_port_tls. This means that by using the different subdomain for HTTP(S) and gRPC, we can multiplex them on same ports without opening another port dedicated to gRPC. To this end, this update made the forwarder to force HTTP/2 for gRPC requests towards backend (gRPC) app.
- Deps and refactor
### Bugfix
- Fixed bug for the upstream option "force_http2_upstream"
### Other
- Tentative downgrade of github actions `runs-on` from ubuntu-latest to ubuntu-22.04.
## 0.9.6
### Improvement
- Feat: Change the default hashing algorithm for internal hashmaps and hashsets from FxHash to aHash. This change is to improve the security against HashDos attacks for colliding domain names and paths, and to improve the speed of hash operations for string keys (c.f., [the performance comparison](https://github.com/tkaitchuck/aHash/blob/master/compare/readme.md)).
- Deps and refactor
## 0.9.5
### Bugfix
- Fix docker image build options with `post-quantum` feature.
## 0.9.4
### Improvement
- Feat: Enable the hybrid post-quantum key exchange for TLS and QUIC with `X25519MLKEM768` by default.
- Deps and refactor
## 0.9.3 ## 0.9.3

3799
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,11 +1,11 @@
[workspace.package] [workspace.package]
version = "0.9.4" version = "0.10.0"
authors = ["Jun Kurihara"] authors = ["Jun Kurihara"]
homepage = "https://github.com/junkurihara/rust-rpxy" homepage = "https://github.com/junkurihara/rust-rpxy"
repository = "https://github.com/junkurihara/rust-rpxy" repository = "https://github.com/junkurihara/rust-rpxy"
license = "MIT" license = "MIT"
readme = "./README.md" readme = "./README.md"
edition = "2021" edition = "2024"
publish = false publish = false
[workspace] [workspace]

View file

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2024 Jun Kurihara Copyright (c) 2025 Jun Kurihara
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View file

@ -8,21 +8,36 @@
> **WIP Project** > **WIP Project**
> [!NOTE]
> This project is an HTTP, i.e., Layer 7, reverse-proxy. If you are looking for a TCP/UDP, i.e., Layer 4, reverse-proxy, please check my another project, [`rpxy-l4`](https://github.com/junkurihara/rust-rpxy-l4).
## Introduction ## Introduction
`rpxy` [ahr-pik-see] is an implementation of simple and lightweight reverse-proxy with some additional features. The implementation is based on [`hyper`](https://github.com/hyperium/hyper), [`rustls`](https://github.com/rustls/rustls) and [`tokio`](https://github.com/tokio-rs/tokio), i.e., written in Rust [^pure_rust]. Our `rpxy` routes multiple host names to appropriate backend application servers while serving TLS connections. `rpxy` [ahr-pik-see] is an implementation of simple and lightweight reverse-proxy with some additional features. The implementation is based on [`hyper`](https://github.com/hyperium/hyper), [`rustls`](https://github.com/rustls/rustls) and [`tokio`](https://github.com/tokio-rs/tokio), i.e., written in Rust [^pure_rust]. Our `rpxy` routes multiple host names to appropriate backend application servers while serving TLS connections.
[^pure_rust]: Doubtfully can be claimed to be written in pure Rust since current `rpxy` is based on `aws-lc-rs` for cryptographic operations. [^pure_rust]: Doubtfully can be claimed to be written in pure Rust since current `rpxy` is based on `aws-lc-rs` for cryptographic operations.
By default, `rpxy` provides the *TLS connection sanitization* by correctly binding a certificate used to establish a secure channel with the backend application. Specifically, it always keeps the consistency between the given SNI (server name indication) in `ClientHello` of the underlying TLS and the domain name given by the overlaid HTTP HOST header (or URL in Request line) [^1]. Additionally, as a somewhat unstable feature, our `rpxy` can handle the brand-new HTTP/3 connection thanks to [`quinn`](https://github.com/quinn-rs/quinn), [`s2n-quic`](https://github.com/aws/s2n-quic) and [`hyperium/h3`](https://github.com/hyperium/h3).[^h3lib] Furthermore, `rpxy` supports the automatic issuance and renewal of certificates via [TLS-ALPN-01 (RFC8737)](https://www.rfc-editor.org/rfc/rfc8737) of [ACME protocol (RFC8555)](https://www.rfc-editor.org/rfc/rfc8555) thanks to [`rustls-acme`](https://github.com/FlorianUekermann/rustls-acme), and the hybridized post-quantum key exchange [`X25519Kyber768Draft00`](https://datatracker.ietf.org/doc/draft-tls-westerbaan-xyber768d00/)[^kyber] for TLS incoming and outgoing initiation thanks to [`rustls-post-quantum`](https://docs.rs/rustls-post-quantum/latest/rustls_post_quantum/). Supported features are summarized as follows:
[^h3lib]: HTTP/3 libraries are mutually exclusive. You need to explicitly specify `s2n-quic` with `--no-default-features` flag. Also note that if you build `rpxy` with `s2n-quic`, then it requires `openssl` just for building the package. - Supported HTTP(S) protocols: HTTP/1.1, HTTP/2 and brand-new HTTP/3 [^h3lib]
- gRPC is also supported
- Serving multiple domain names with TLS termination
- Mutual TLS authentication with client certificates
- Automated certificate issuance and renewal via TLS-ALPN-01 ACME protocol [^acme]
- Post-quantum key exchange for TLS/QUIC [^kyber]
- TLS connection sanitization to avoid the domain fronting [^sanitization]
- Load balancing with round-robin, random, and sticky session
- and more...
[^kyber]: This is not yet a default feature. You need to specify `--features post-quantum` when building `rpxy`. Also note that `X25519Kyber768Draft00` is a draft version yet this is widely used on the Internet. We will update the feature when the newest version (`X25519MLKEM768` in [`ECDHE-MLKEM`](https://www.ietf.org/archive/id/draft-kwiatkowski-tls-ecdhe-mlkem-02.html)) is available. [^h3lib]: HTTP/3 is enabled thanks to [`quinn`](https://github.com/quinn-rs/quinn), [`s2n-quic`](https://github.com/aws/s2n-quic) and [`hyperium/h3`](https://github.com/hyperium/h3). HTTP/3 libraries are mutually exclusive. You need to explicitly specify `s2n-quic` with `--no-default-features` flag. Also note that if you build `rpxy` with `s2n-quic`, then it requires `openssl` just for building the package.
This project is still *work-in-progress*. But it is already working in some production environments and serves a number of domain names. Furthermore it *significantly outperforms* NGINX and Caddy, e.g., *1.5x faster than NGINX*, in the setting of a very simple HTTP reverse-proxy scenario (See [`bench`](./bench/) directory). [^acme]: `rpxy` supports the automatic issuance and renewal of certificates via [TLS-ALPN-01 (RFC8737)](https://www.rfc-editor.org/rfc/rfc8737) of [ACME protocol (RFC8555)](https://www.rfc-editor.org/rfc/rfc8555) thanks to [`rustls-acme`](https://github.com/FlorianUekermann/rustls-acme).
[^1]: We should note that NGINX doesn't guarantee such a consistency by default. To this end, you have to add `if` statement in the configuration file in NGINX. [^kyber]: `rpxy` supports the hybridized post-quantum key exchange [`X25519MLKEM768`](https://www.ietf.org/archive/id/draft-kwiatkowski-tls-ecdhe-mlkem-02.html)[^kyber] for TLS/QUIC incoming and outgoing initiation thanks to [`rustls-post-quantum`](https://docs.rs/rustls-post-quantum/latest/rustls_post_quantum/). This is already a default feature. Also note that `X25519MLKEM768` is still a draft version yet this is widely used on the Internet.
[^sanitization]: By default, `rpxy` provides the *TLS connection sanitization* by correctly binding a certificate used to establish a secure channel with the backend application. Specifically, it always keeps the consistency between the given SNI (server name indication) in `ClientHello` of the underlying TLS and the domain name given by the overlaid HTTP HOST header (or URL in Request line). We should note that NGINX doesn't guarantee such a consistency by default. To this end, you have to add `if` statement in the configuration file in NGINX.
This project is still *work-in-progress*. But it is already working in some production environments and serves a number of domain names. Furthermore it *significantly outperforms* NGINX and Caddy, e.g., *30% ~ 60% or more faster than NGINX*, in the setting of a very simple HTTP reverse-proxy scenario (See [`bench`](./bench/) directory).
## Installing/Building an Executable Binary of `rpxy` ## Installing/Building an Executable Binary of `rpxy`
@ -65,7 +80,7 @@ You can run `rpxy` with a configuration file like
% ./target/release/rpxy --config config.toml % ./target/release/rpxy --config config.toml
``` ```
If you specify `-w` option along with the config file path, `rpxy` tracks the change of `config.toml` in the real-time manner and apply the change immediately without restarting the process. `rpxy` tracks the change of `config.toml` in the real-time manner and apply the change immediately without restarting the process.
The full help messages are given follows. The full help messages are given follows.
@ -74,11 +89,17 @@ usage: rpxy [OPTIONS] --config <FILE>
Options: Options:
-c, --config <FILE> Configuration file path like ./config.toml -c, --config <FILE> Configuration file path like ./config.toml
-w, --watch Activate dynamic reloading of the config file via continuous monitoring -l, --log-dir <LOG_DIR> Directory for log files. If not specified, logs are printed to stdout.
-h, --help Print help -h, --help Print help
-V, --version Print version -V, --version Print version
``` ```
If you set `--log-dir=<log_dir>`, the log files are created in the specified directory. Otherwise, the log is printed to stdout.
- `${log_dir}/access.log` for access log
<!-- - `${log_dir}/error.log` for error log -->
- `${log_dir}/rpxy.log` for system and error log
That's all! That's all!
## Basic Configuration ## Basic Configuration
@ -422,6 +443,17 @@ Check a third party project [`Gamerboy59/rpxy-webui`](https://github.com/Gamerbo
todo! todo!
## Credits
`rpxy` cannot be built without the following projects and inspirations:
- [`hyper`](https://github.com/hyperium/hyper) and [`hyperium/h3`](https://github.com/hyperium/h3)
- [`rustls`](https://github.com/rustls/rustls)
- [`tokio`](https://github.com/tokio-rs/tokio)
- [`quinn`](https://github.com/quinn-rs/quinn)
- [`s2n-quic`](https://github.com/aws/s2n-quic)
- [`rustls-acme`](https://github.com/FlorianUekermann/rustls-acme)
## License ## License
`rpxy` is free, open-source software licensed under MIT License. `rpxy` is free, open-source software licensed under MIT License.

View file

@ -2,9 +2,13 @@
auto_https off auto_https off
} }
:80 { :80 {
# Proxy everything else to Rocket # Proxy everything else to Rocket
reverse_proxy backend-nginx reverse_proxy backend-nginx
log {
level ERROR
}
} }

View file

@ -3,49 +3,46 @@
This test simply measures the performance of several reverse proxy through HTTP/1.1 by the following command using [`rewrk`](https://github.com/lnx-search/rewrk). This test simply measures the performance of several reverse proxy through HTTP/1.1 by the following command using [`rewrk`](https://github.com/lnx-search/rewrk).
```sh: ```sh:
$ rewrk -c 512 -t 4 -d 15s -h http://localhost:8080 --pct rewrk -c 512 -t 4 -d 15s -h http://localhost:8080 --pct
``` ```
## Tests on `linux/arm64/v8` ## Tests on `linux/arm64/v8`
Done at Jul. 15, 2023 Done at May. 17, 2025
### Environment ### Environment
- `rpxy` commit id: `1da7e5bfb77d1ce4ee8d6cfc59b1c725556fc192` - `rpxy` commit id: `e259e0b58897258d98fdb7504a1cbcbd7c5b37db`
- Docker Desktop 4.21.1 (114176) - Docker Desktop 4.41.2 (191736)
- ReWrk 0.3.2 - ReWrk 0.3.2
- Macbook Pro '14 (2021, M1 Max, 64GB RAM) - Mac mini (2024, M4 Pro, 64GB RAM)
The docker images of `nginx` and `caddy` for `linux/arm64/v8` are pulled from the official registry. The docker images of `nginx` and `caddy` for `linux/arm64/v8` are pulled from the official registry.
### Result for `rpxy`, `nginx` and `caddy` ### Result for `rpxy`, `nginx` and `caddy`
``` ```bash
----------------------------
Benchmark on rpxy Benchmark on rpxy
Beginning round 1... Beginning round 1...
Benchmarking 512 connections @ http://localhost:8080 for 15 second(s) Benchmarking 512 connections @ http://localhost:8080 for 15 second(s)
Latencies: Latencies:
Avg Stdev Min Max Avg Stdev Min Max
19.64ms 8.85ms 0.67ms 113.22ms 6.90ms 3.42ms 0.78ms 80.26ms
Requests: Requests:
Total: 390078 Req/Sec: 26011.25 Total: 1107885 Req/Sec: 73866.03
Transfer: Transfer:
Total: 304.85 MB Transfer Rate: 20.33 MB/Sec Total: 867.44 MB Transfer Rate: 57.83 MB/Sec
+ --------------- + --------------- + + --------------- + --------------- +
| Percentile | Avg Latency | | Percentile | Avg Latency |
+ --------------- + --------------- + + --------------- + --------------- +
| 99.9% | 79.24ms | | 99.9% | 49.76ms |
| 99% | 54.28ms | | 99% | 29.57ms |
| 95% | 42.50ms | | 95% | 15.78ms |
| 90% | 37.82ms | | 90% | 13.05ms |
| 75% | 31.54ms | | 75% | 10.41ms |
| 50% | 26.37ms | | 50% | 8.72ms |
+ --------------- + --------------- + + --------------- + --------------- +
721 Errors: error shutting down connection: Socket is not connected (os error 57)
sleep 3 secs sleep 3 secs
---------------------------- ----------------------------
Benchmark on nginx Benchmark on nginx
@ -53,23 +50,23 @@ Beginning round 1...
Benchmarking 512 connections @ http://localhost:8090 for 15 second(s) Benchmarking 512 connections @ http://localhost:8090 for 15 second(s)
Latencies: Latencies:
Avg Stdev Min Max Avg Stdev Min Max
33.26ms 15.18ms 1.40ms 118.94ms 11.65ms 14.04ms 0.40ms 205.93ms
Requests: Requests:
Total: 230268 Req/Sec: 15356.08 Total: 654978 Req/Sec: 43666.56
Transfer: Transfer:
Total: 186.77 MB Transfer Rate: 12.46 MB/Sec Total: 532.81 MB Transfer Rate: 35.52 MB/Sec
+ --------------- + --------------- + + --------------- + --------------- +
| Percentile | Avg Latency | | Percentile | Avg Latency |
+ --------------- + --------------- + + --------------- + --------------- +
| 99.9% | 99.91ms | | 99.9% | 151.00ms |
| 99% | 83.74ms | | 99% | 102.80ms |
| 95% | 70.67ms | | 95% | 62.44ms |
| 90% | 64.03ms | | 90% | 42.98ms |
| 75% | 54.32ms | | 75% | 26.44ms |
| 50% | 45.19ms | | 50% | 18.25ms |
+ --------------- + --------------- + + --------------- + --------------- +
677 Errors: error shutting down connection: Socket is not connected (os error 57) 512 Errors: connection closed
sleep 3 secs sleep 3 secs
---------------------------- ----------------------------
@ -78,33 +75,31 @@ Beginning round 1...
Benchmarking 512 connections @ http://localhost:8100 for 15 second(s) Benchmarking 512 connections @ http://localhost:8100 for 15 second(s)
Latencies: Latencies:
Avg Stdev Min Max Avg Stdev Min Max
48.51ms 50.74ms 0.34ms 554.58ms 77.54ms 368.11ms 0.37ms 6770.73ms
Requests: Requests:
Total: 157239 Req/Sec: 10485.98 Total: 86963 Req/Sec: 5798.35
Transfer: Transfer:
Total: 125.99 MB Transfer Rate: 8.40 MB/Sec Total: 70.00 MB Transfer Rate: 4.67 MB/Sec
+ --------------- + --------------- + + --------------- + --------------- +
| Percentile | Avg Latency | | Percentile | Avg Latency |
+ --------------- + --------------- + + --------------- + --------------- +
| 99.9% | 473.82ms | | 99.9% | 5789.65ms |
| 99% | 307.16ms | | 99% | 3407.02ms |
| 95% | 212.28ms | | 95% | 1022.31ms |
| 90% | 169.05ms | | 90% | 608.17ms |
| 75% | 115.92ms | | 75% | 281.95ms |
| 50% | 80.24ms | | 50% | 149.29ms |
+ --------------- + --------------- + + --------------- + --------------- +
708 Errors: error shutting down connection: Socket is not connected (os error 57)
``` ```
## Results on `linux/amd64` ## Results on `linux/amd64`
Done at Jul. 24, 2023 Done at May 20, 2025
### Environment ### Environment
- `rpxy` commit id: `7c0945a5124418aa9a1024568c1989bb77cf312f` - `rpxy` commit id: `e259e0b58897258d98fdb7504a1cbcbd7c5b37db`
- Docker Desktop 4.21.1 (114176) - Docker Desktop 4.41.2 (192736)
- ReWrk 0.3.2 and Wrk 0.4.2 - ReWrk 0.3.2 and Wrk 0.4.2
- iMac '27 (2020, 10-Core Intel Core i9, 128GB RAM) - iMac '27 (2020, 10-Core Intel Core i9, 128GB RAM)
@ -112,8 +107,8 @@ The docker images of `nginx` and `caddy` for `linux/amd64` were pulled from the
Also, when `Sozu` is configured as an HTTP reverse proxy, it cannot handle HTTP request messages emit from `ReWrk` due to hostname parsing errors though it can correctly handle messages dispatched from `curl` and browsers. So, we additionally test using [`Wrk`](https://github.com/wg/wrk) to examine `Sozu` with the following command. Also, when `Sozu` is configured as an HTTP reverse proxy, it cannot handle HTTP request messages emit from `ReWrk` due to hostname parsing errors though it can correctly handle messages dispatched from `curl` and browsers. So, we additionally test using [`Wrk`](https://github.com/wg/wrk) to examine `Sozu` with the following command.
```sh: ```bash
$ wrk -c 512 -t 4 -d 15s http://localhost:8110 wrk -c 512 -t 4 -d 15s http://localhost:8110
``` ```
<!-- ``` <!-- ```
@ -124,7 +119,7 @@ ERROR Error connecting to backend: Could not get cluster id from request: Host
#### With ReWrk for `rpxy`, `nginx` and `caddy` #### With ReWrk for `rpxy`, `nginx` and `caddy`
``` ```bash
---------------------------- ----------------------------
Benchmark [x86_64] with ReWrk Benchmark [x86_64] with ReWrk
---------------------------- ----------------------------
@ -133,24 +128,22 @@ Beginning round 1...
Benchmarking 512 connections @ http://localhost:8080 for 15 second(s) Benchmarking 512 connections @ http://localhost:8080 for 15 second(s)
Latencies: Latencies:
Avg Stdev Min Max Avg Stdev Min Max
20.37ms 8.95ms 1.63ms 160.27ms 15.75ms 6.75ms 1.75ms 124.25ms
Requests: Requests:
Total: 376345 Req/Sec: 25095.19 Total: 486635 Req/Sec: 32445.33
Transfer: Transfer:
Total: 295.61 MB Transfer Rate: 19.71 MB/Sec Total: 381.02 MB Transfer Rate: 25.40 MB/Sec
+ --------------- + --------------- + + --------------- + --------------- +
| Percentile | Avg Latency | | Percentile | Avg Latency |
+ --------------- + --------------- + + --------------- + --------------- +
| 99.9% | 112.50ms | | 99.9% | 91.91ms |
| 99% | 61.33ms | | 99% | 55.53ms |
| 95% | 44.26ms | | 95% | 34.87ms |
| 90% | 38.74ms | | 90% | 29.55ms |
| 75% | 32.00ms | | 75% | 23.99ms |
| 50% | 26.82ms | | 50% | 20.17ms |
+ --------------- + --------------- + + --------------- + --------------- +
626 Errors: error shutting down connection: Socket is not connected (os error 57)
sleep 3 secs sleep 3 secs
---------------------------- ----------------------------
Benchmark on nginx Benchmark on nginx
@ -158,24 +151,22 @@ Beginning round 1...
Benchmarking 512 connections @ http://localhost:8090 for 15 second(s) Benchmarking 512 connections @ http://localhost:8090 for 15 second(s)
Latencies: Latencies:
Avg Stdev Min Max Avg Stdev Min Max
23.45ms 12.42ms 1.18ms 154.44ms 24.02ms 15.84ms 1.31ms 207.97ms
Requests: Requests:
Total: 326685 Req/Sec: 21784.73 Total: 318516 Req/Sec: 21236.67
Transfer: Transfer:
Total: 265.22 MB Transfer Rate: 17.69 MB/Sec Total: 259.11 MB Transfer Rate: 17.28 MB/Sec
+ --------------- + --------------- + + --------------- + --------------- +
| Percentile | Avg Latency | | Percentile | Avg Latency |
+ --------------- + --------------- + + --------------- + --------------- +
| 99.9% | 96.85ms | | 99.9% | 135.56ms |
| 99% | 73.93ms | | 99% | 92.59ms |
| 95% | 57.57ms | | 95% | 68.54ms |
| 90% | 50.36ms | | 90% | 58.75ms |
| 75% | 40.57ms | | 75% | 45.88ms |
| 50% | 32.70ms | | 50% | 35.64ms |
+ --------------- + --------------- + + --------------- + --------------- +
657 Errors: error shutting down connection: Socket is not connected (os error 57)
sleep 3 secs sleep 3 secs
---------------------------- ----------------------------
Benchmark on caddy Benchmark on caddy
@ -183,30 +174,26 @@ Beginning round 1...
Benchmarking 512 connections @ http://localhost:8100 for 15 second(s) Benchmarking 512 connections @ http://localhost:8100 for 15 second(s)
Latencies: Latencies:
Avg Stdev Min Max Avg Stdev Min Max
45.71ms 50.47ms 0.88ms 908.49ms 74.60ms 181.26ms 0.94ms 2723.20ms
Requests: Requests:
Total: 166917 Req/Sec: 11129.80 Total: 101893 Req/Sec: 6792.16
Transfer: Transfer:
Total: 133.77 MB Transfer Rate: 8.92 MB/Sec Total: 82.03 MB Transfer Rate: 5.47 MB/Sec
+ --------------- + --------------- + + --------------- + --------------- +
| Percentile | Avg Latency | | Percentile | Avg Latency |
+ --------------- + --------------- + + --------------- + --------------- +
| 99.9% | 608.92ms | | 99.9% | 2232.12ms |
| 99% | 351.18ms | | 99% | 1517.73ms |
| 95% | 210.56ms | | 95% | 624.63ms |
| 90% | 162.68ms | | 90% | 406.69ms |
| 75% | 106.97ms | | 75% | 222.42ms |
| 50% | 73.90ms | | 50% | 133.46ms |
+ --------------- + --------------- + + --------------- + --------------- +
646 Errors: error shutting down connection: Socket is not connected (os error 57)
sleep 3 secs
``` ```
#### With Wrk for `rpxy`, `nginx`, `caddy` and `sozu` #### With Wrk for `rpxy`, `nginx`, `caddy` and `sozu`
``` ```bash
---------------------------- ----------------------------
Benchmark [x86_64] with Wrk Benchmark [x86_64] with Wrk
---------------------------- ----------------------------
@ -214,12 +201,11 @@ Benchmark on rpxy
Running 15s test @ http://localhost:8080 Running 15s test @ http://localhost:8080
4 threads and 512 connections 4 threads and 512 connections
Thread Stats Avg Stdev Max +/- Stdev Thread Stats Avg Stdev Max +/- Stdev
Latency 18.68ms 8.09ms 122.64ms 74.03% Latency 15.65ms 6.94ms 104.73ms 81.28%
Req/Sec 6.95k 815.23 8.45k 83.83% Req/Sec 8.36k 0.90k 9.90k 77.83%
414819 requests in 15.01s, 326.37MB read 499550 requests in 15.02s, 391.14MB read
Socket errors: connect 0, read 608, write 0, timeout 0 Requests/sec: 33267.61
Requests/sec: 27627.79 Transfer/sec: 26.05MB
Transfer/sec: 21.74MB
sleep 3 secs sleep 3 secs
---------------------------- ----------------------------
@ -227,12 +213,11 @@ Benchmark on nginx
Running 15s test @ http://localhost:8090 Running 15s test @ http://localhost:8090
4 threads and 512 connections 4 threads and 512 connections
Thread Stats Avg Stdev Max +/- Stdev Thread Stats Avg Stdev Max +/- Stdev
Latency 23.34ms 13.80ms 126.06ms 74.66% Latency 24.26ms 15.29ms 167.43ms 73.34%
Req/Sec 5.71k 607.41 7.07k 73.17% Req/Sec 5.53k 493.14 6.91k 69.67%
341127 requests in 15.03s, 277.50MB read 330569 requests in 15.02s, 268.91MB read
Socket errors: connect 0, read 641, write 0, timeout 0 Requests/sec: 22014.96
Requests/sec: 22701.54 Transfer/sec: 17.91MB
Transfer/sec: 18.47MB
sleep 3 secs sleep 3 secs
---------------------------- ----------------------------
@ -240,13 +225,13 @@ Benchmark on caddy
Running 15s test @ http://localhost:8100 Running 15s test @ http://localhost:8100
4 threads and 512 connections 4 threads and 512 connections
Thread Stats Avg Stdev Max +/- Stdev Thread Stats Avg Stdev Max +/- Stdev
Latency 54.19ms 55.63ms 674.53ms 88.55% Latency 212.89ms 300.23ms 1.99s 86.56%
Req/Sec 2.92k 1.40k 5.57k 56.17% Req/Sec 1.31k 1.64k 5.72k 78.79%
174748 requests in 15.03s, 140.61MB read 67749 requests in 15.04s, 51.97MB read
Socket errors: connect 0, read 660, write 0, timeout 0 Socket errors: connect 0, read 0, write 0, timeout 222
Non-2xx or 3xx responses: 70 Non-2xx or 3xx responses: 3686
Requests/sec: 11624.63 Requests/sec: 4505.12
Transfer/sec: 9.35MB Transfer/sec: 3.46MB
sleep 3 secs sleep 3 secs
---------------------------- ----------------------------
@ -254,10 +239,9 @@ Benchmark on sozu
Running 15s test @ http://localhost:8110 Running 15s test @ http://localhost:8110
4 threads and 512 connections 4 threads and 512 connections
Thread Stats Avg Stdev Max +/- Stdev Thread Stats Avg Stdev Max +/- Stdev
Latency 19.78ms 4.89ms 98.09ms 76.88% Latency 34.68ms 6.30ms 90.21ms 72.49%
Req/Sec 6.49k 824.75 8.11k 76.17% Req/Sec 3.69k 397.85 5.08k 73.00%
387744 requests in 15.02s, 329.11MB read 220655 requests in 15.01s, 187.29MB read
Socket errors: connect 0, read 647, write 0, timeout 0 Requests/sec: 14699.17
Requests/sec: 25821.93 Transfer/sec: 12.48MB
Transfer/sec: 21.92MB
``` ```

View file

@ -1,4 +1,3 @@
version: "3"
services: services:
nginx: nginx:
image: nginx:alpine image: nginx:alpine
@ -28,7 +27,7 @@ services:
dockerfile: docker/Dockerfile dockerfile: docker/Dockerfile
restart: unless-stopped restart: unless-stopped
environment: environment:
- LOG_LEVEL=info - LOG_LEVEL=error # almost nolog
- LOG_TO_FILE=false - LOG_TO_FILE=false
ports: ports:
- 127.0.0.1:8080:8080 - 127.0.0.1:8080:8080
@ -47,7 +46,7 @@ services:
tty: false tty: false
privileged: true privileged: true
volumes: volumes:
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro - ./nginx.conf:/etc/nginx/conf.d/default.conf:ro # set as almost nolog
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
logging: logging:
options: options:
@ -64,7 +63,7 @@ services:
restart: unless-stopped restart: unless-stopped
tty: false tty: false
volumes: volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro - ./Caddyfile:/etc/caddy/Caddyfile:ro # set as almost no log
networks: networks:
bench-nw: bench-nw:
@ -82,7 +81,7 @@ services:
max-size: "10m" max-size: "10m"
max-file: "3" max-file: "3"
volumes: volumes:
- ./sozu-config.toml:/etc/sozu/config.toml - ./sozu-config.toml:/etc/sozu/config.toml # set as almost nolog
networks: networks:
bench-nw: bench-nw:

View file

@ -1,4 +1,3 @@
version: "3"
services: services:
nginx: nginx:
image: nginx:alpine image: nginx:alpine
@ -28,7 +27,7 @@ services:
dockerfile: docker/Dockerfile dockerfile: docker/Dockerfile
restart: unless-stopped restart: unless-stopped
environment: environment:
- LOG_LEVEL=info - LOG_LEVEL=error # almost nolog
- LOG_TO_FILE=false - LOG_TO_FILE=false
ports: ports:
- 127.0.0.1:8080:8080 - 127.0.0.1:8080:8080
@ -47,7 +46,7 @@ services:
tty: false tty: false
privileged: true privileged: true
volumes: volumes:
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro - ./nginx.conf:/etc/nginx/conf.d/default.conf:ro # set as almost nolog
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
logging: logging:
options: options:
@ -64,7 +63,7 @@ services:
restart: unless-stopped restart: unless-stopped
tty: false tty: false
volumes: volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro - ./Caddyfile:/etc/caddy/Caddyfile:ro # set as almost no log
networks: networks:
bench-nw: bench-nw:

View file

@ -31,11 +31,14 @@
# '"$request" $status $body_bytes_sent ' # '"$request" $status $body_bytes_sent '
# '"$http_referer" "$http_user_agent" ' # '"$http_referer" "$http_user_agent" '
# '"$upstream_addr"'; # '"$upstream_addr"';
# access_log off; access_log off;
# ssl_protocols TLSv1.2 TLSv1.3; # ssl_protocols TLSv1.2 TLSv1.3;
# ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384'; # ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
# ssl_prefer_server_ciphers off; # ssl_prefer_server_ciphers off;
# error_log /dev/stderr; # error_log /dev/stderr;
error_log /dev/null crit;
# resolver 127.0.0.11; # resolver 127.0.0.11;
# # HTTP 1.1 support # # HTTP 1.1 support
# proxy_http_version 1.1; # proxy_http_version 1.1;

View file

@ -1,4 +1,4 @@
log_level = "info" log_level = "error"
log_target = "stdout" log_target = "stdout"
max_connections = 512 max_connections = 512
activate_listeners = true activate_listeners = true

View file

@ -28,10 +28,10 @@ max_clients = 512
listen_ipv6 = false listen_ipv6 = false
# Optional: App that serves all plaintext http request by referring to HOSTS or request header # Optional: App that serves all plaintext http request by referring to HOSTS or request header
# execpt for configured application. # except for configured application.
# Note that this is only for http. # Note that this is only for http.
# Note that nothing is served for requests via https since secure channel cannot be # Note that nothing is served for requests via https since secure channel cannot be
# established for unconfigured server_name, and they are always rejected by checking SNI. # established for non-configured server_name, and they are always rejected by checking SNI.
default_app = 'another_localhost' default_app = 'another_localhost'
################################### ###################################
@ -106,7 +106,7 @@ tls = { https_redirection = true, acme = true }
# Experimantal settings # # Experimantal settings #
################################### ###################################
[experimental] [experimental]
# Higly recommend not to be true. If true, you ignore RFC. if not specified, it is always false. # Highly recommend not to be true. If true, you ignore RFC. if not specified, it is always false.
# This might be required to be true when a certificate is used by multiple backend hosts, especially in case where a TLS connection is re-used. # This might be required to be true when a certificate is used by multiple backend hosts, especially in case where a TLS connection is re-used.
# We should note that this strongly depends on the client implementation. # We should note that this strongly depends on the client implementation.
ignore_sni_consistency = false ignore_sni_consistency = false

View file

@ -2,13 +2,13 @@ FROM ubuntu:24.04 AS base
LABEL maintainer="Jun Kurihara" LABEL maintainer="Jun Kurihara"
SHELL ["/bin/sh", "-x", "-c"] SHELL ["/bin/sh", "-x", "-c"]
ENV SERIAL 2 ENV SERIAL=2
######################################## ########################################
FROM --platform=$BUILDPLATFORM base AS builder FROM --platform=$BUILDPLATFORM base AS builder
ENV CFLAGS=-Ofast ENV CFLAGS=-Ofast
ENV BUILD_DEPS curl make ca-certificates build-essential ENV BUILD_DEPS="curl make ca-certificates build-essential"
ENV TARGET_SUFFIX=unknown-linux-gnu ENV TARGET_SUFFIX=unknown-linux-gnu
WORKDIR /tmp WORKDIR /tmp
@ -17,9 +17,9 @@ COPY . /tmp/
ARG TARGETARCH ARG TARGETARCH
ARG CARGO_FEATURES ARG CARGO_FEATURES
ENV CARGO_FEATURES ${CARGO_FEATURES} ENV CARGO_FEATURES="${CARGO_FEATURES}"
ARG ADDITIONAL_DEPS ARG ADDITIONAL_DEPS
ENV ADDITIONAL_DEPS ${ADDITIONAL_DEPS} ENV ADDITIONAL_DEPS="${ADDITIONAL_DEPS}"
RUN if [ $TARGETARCH = "amd64" ]; then \ RUN if [ $TARGETARCH = "amd64" ]; then \
echo "x86_64" > /arch; \ echo "x86_64" > /arch; \
@ -30,7 +30,7 @@ RUN if [ $TARGETARCH = "amd64" ]; then \
exit 1; \ exit 1; \
fi fi
ENV RUSTFLAGS "-C link-arg=-s" ENV RUSTFLAGS="-C link-arg=-s"
RUN update-ca-certificates 2> /dev/null || true RUN update-ca-certificates 2> /dev/null || true
@ -40,6 +40,7 @@ RUN apt-get update && apt-get install -qy --no-install-recommends $BUILD_DEPS ${
echo "Install toolchain" && \ echo "Install toolchain" && \
rustup target add $(cat /arch)-${TARGET_SUFFIX} && \ rustup target add $(cat /arch)-${TARGET_SUFFIX} && \
echo "Building rpxy from source" && \ echo "Building rpxy from source" && \
cargo update &&\
cargo build --release --target=$(cat /arch)-${TARGET_SUFFIX} ${CARGO_FEATURES} && \ cargo build --release --target=$(cat /arch)-${TARGET_SUFFIX} ${CARGO_FEATURES} && \
strip --strip-all /tmp/target/$(cat /arch)-${TARGET_SUFFIX}/release/rpxy &&\ strip --strip-all /tmp/target/$(cat /arch)-${TARGET_SUFFIX}/release/rpxy &&\
cp /tmp/target/$(cat /arch)-${TARGET_SUFFIX}/release/rpxy /tmp/target/release/rpxy cp /tmp/target/$(cat /arch)-${TARGET_SUFFIX}/release/rpxy /tmp/target/release/rpxy
@ -47,7 +48,7 @@ RUN apt-get update && apt-get install -qy --no-install-recommends $BUILD_DEPS ${
######################################## ########################################
FROM --platform=$TARGETPLATFORM base AS runner FROM --platform=$TARGETPLATFORM base AS runner
ENV RUNTIME_DEPS logrotate ca-certificates gosu ENV RUNTIME_DEPS="logrotate ca-certificates gosu"
RUN apt-get update && \ RUN apt-get update && \
apt-get install -qy --no-install-recommends $RUNTIME_DEPS && \ apt-get install -qy --no-install-recommends $RUNTIME_DEPS && \

View file

@ -5,7 +5,7 @@ LABEL maintainer="Jun Kurihara"
ARG TARGETARCH ARG TARGETARCH
ARG CARGO_FEATURES ARG CARGO_FEATURES
ENV CARGO_FEATURES ${CARGO_FEATURES} ENV CARGO_FEATURES=${CARGO_FEATURES}
RUN if [ $TARGETARCH = "amd64" ]; then \ RUN if [ $TARGETARCH = "amd64" ]; then \
echo "x86_64" > /arch; \ echo "x86_64" > /arch; \
@ -22,9 +22,10 @@ WORKDIR /tmp
COPY . /tmp/ COPY . /tmp/
ENV RUSTFLAGS "-C link-arg=-s" ENV RUSTFLAGS="-C link-arg=-s"
RUN echo "Building rpxy from source" && \ RUN echo "Building rpxy from source" && \
cargo update && \
cargo build --release --target $(cat /arch)-unknown-linux-musl ${CARGO_FEATURES} && \ cargo build --release --target $(cat /arch)-unknown-linux-musl ${CARGO_FEATURES} && \
musl-strip --strip-all /tmp/target/$(cat /arch)-unknown-linux-musl/release/rpxy && \ musl-strip --strip-all /tmp/target/$(cat /arch)-unknown-linux-musl/release/rpxy && \
cp /tmp/target/$(cat /arch)-unknown-linux-musl/release/rpxy /tmp/target/release/rpxy cp /tmp/target/$(cat /arch)-unknown-linux-musl/release/rpxy /tmp/target/release/rpxy
@ -33,7 +34,7 @@ RUN echo "Building rpxy from source" && \
FROM --platform=$TARGETPLATFORM alpine:latest AS runner FROM --platform=$TARGETPLATFORM alpine:latest AS runner
LABEL maintainer="Jun Kurihara" LABEL maintainer="Jun Kurihara"
ENV RUNTIME_DEPS logrotate ca-certificates su-exec ENV RUNTIME_DEPS="logrotate ca-certificates su-exec"
RUN apk add --no-cache ${RUNTIME_DEPS} && \ RUN apk add --no-cache ${RUNTIME_DEPS} && \
update-ca-certificates && \ update-ca-certificates && \

View file

@ -9,11 +9,10 @@ There are several docker-specific environment variables.
- `HOST_USER` (default: `user`): User name executing `rpxy` inside the container. - `HOST_USER` (default: `user`): User name executing `rpxy` inside the container.
- `HOST_UID` (default: `900`): `UID` of `HOST_USER`. - `HOST_UID` (default: `900`): `UID` of `HOST_USER`.
- `HOST_GID` (default: `900`): `GID` of `HOST_USER` - `HOST_GID` (default: `900`): `GID` of `HOST_USER`
- `LOG_LEVEL=debug|info|warn|error`: Log level - `LOG_LEVEL=trace|debug|info|warn|error`: Log level
- `LOG_TO_FILE=true|false`: Enable logging to the log file `/rpxy/log/rpxy.log` using `logrotate`. You should mount `/rpxy/log` via docker volume option if enabled. The log dir and file will be owned by the `HOST_USER` with `HOST_UID:HOST_GID` on the host machine. Hence, `HOST_USER`, `HOST_UID` and `HOST_GID` should be the same as ones of the user who executes the `rpxy` docker container on the host. - `LOG_TO_FILE=true|false`: Enable logging to the log files using `logrotate` (locations: system/error log = `/rpxy/log/rpxy.log`, and access log = `/rpxy/log/access.log`). You should mount `/rpxy/log` via docker volume option if enabled. The log dir and file will be owned by the `HOST_USER` with `HOST_UID:HOST_GID` on the host machine. Hence, `HOST_USER`, `HOST_UID` and `HOST_GID` should be the same as ones of the user who executes the `rpxy` docker container on the host.
- `WATCH=true|false` (default: `false`): Activate continuous watching of the config file if true.
Then, all you need is to mount your `config.toml` as `/etc/rpxy.toml` and certificates/private keys as you like through the docker volume option. **If `WATCH=true`, You need to mount a directory, e.g., `./rpxy-config/`, including `rpxy.toml` on `/rpxy/config` instead of a file to correctly track file changes**. This is a docker limitation. Even if `WATCH=false`, you can mount the dir onto `/rpxy/config` rather than `/etc/rpxy.toml`. A file mounted on `/etc/rpxy` is prioritized over a dir mounted on `/rpxy/config`. Then, all you need is to mount your `config.toml` as `/etc/rpxy.toml` and certificates/private keys as you like through the docker volume option. **You need to mount a directory, e.g., `./rpxy-config/`, including `rpxy.toml` on `/rpxy/config` instead of a file to dynamically track file changes**. This is a docker limitation. You can mount the dir onto `/rpxy/config` rather than `/etc/rpxy.toml`. A file mounted on `/etc/rpxy` is prioritized over a dir mounted on `/rpxy/config`.
See [`docker-compose.yml`](./docker-compose.yml) for the detailed configuration. Note that the file path of keys and certificates must be ones in your docker container. See [`docker-compose.yml`](./docker-compose.yml) for the detailed configuration. Note that the file path of keys and certificates must be ones in your docker container.
@ -27,19 +26,25 @@ e.g. `-v rpxy/ca-certificates:/usr/local/share/ca-certificates`
Differences among tags are summarized as follows. Differences among tags are summarized as follows.
### Latest Builds ### Latest and versioned builds
- `latest`: Built from the `main` branch with default features, running on Ubuntu. Latest builds are shipped from the `main` branch when the new version is released. For example, when the version `x.y.z` is released, the following images are provided.
- `latest-slim`, `slim`: Built by `musl` from the `main` branch with default features, running on Alpine.
- `latest-s2n`, `s2n`: Built from the `main` branch with the `http3-s2n` feature, running on Ubuntu.
- `*-pq`: Built with the `post-quantum` feature. This feature supports the post-quantum key exchange using `rustls-post-quantum` crate.
### Nightly Builds - `latest`, `x.y.z`: Built with default features, running on Ubuntu.
- `latest-slim`, `slim`, `x.y.z-slim` : Built by `musl` with default features, running on Alpine.
- `latest-s2n`, `s2n`, `x.y.z-s2n`: Built with the `http3-s2n` feature, running on Ubuntu.
- `nightly`: Built from the `develop` branch with default features, running on Ubuntu. Additionally, images built with `webpki-roots` are provided in a similar manner to the above (e.g., `latest-s2n-webpki-roots` and `s2n-webpki-roots` tagged for the same image).
- `nightly-slim`: Built by `musl` from the `develop` branch with default features, running on Alpine.
- `nightly-s2n`: Built from the `develop` branch with the `http3-s2n` feature, running on Ubuntu. ### Nightly builds
- `*-pq`: Built with the `post-quantum` feature. This feature supports the hybridized post-quantum key exchange using `rustls-post-quantum` crate.
Nightly builds are shipped from the `develop` branch for every push.
- `nightly`: Built with default features, running on Ubuntu.
- `nightly-slim`: Built by `musl` with default features, running on Alpine.
- `nightly-s2n`: Built with the `http3-s2n` feature, running on Ubuntu.
Additionally, images built with `webpki-roots` are provided in a similar manner to the above (e.g., `nightly-s2n-webpki-roots`).
## Caveats ## Caveats

View file

@ -20,12 +20,11 @@ services:
# - "linux/amd64" # - "linux/amd64"
- "linux/arm64" - "linux/arm64"
environment: environment:
- LOG_LEVEL=debug - LOG_LEVEL=trace
- LOG_TO_FILE=true - LOG_TO_FILE=true
- HOST_USER=jun - HOST_USER=jun
- HOST_UID=501 - HOST_UID=501
- HOST_GID=501 - HOST_GID=501
# - WATCH=true
tty: false tty: false
privileged: true privileged: true
volumes: volumes:

View file

@ -20,12 +20,11 @@ services:
# - "linux/amd64" # - "linux/amd64"
- "linux/arm64" - "linux/arm64"
environment: environment:
- LOG_LEVEL=debug - LOG_LEVEL=trace
- LOG_TO_FILE=true - LOG_TO_FILE=true
- HOST_USER=jun - HOST_USER=jun
- HOST_UID=501 - HOST_UID=501
- HOST_GID=501 - HOST_GID=501
# - WATCH=true
tty: false tty: false
privileged: true privileged: true
volumes: volumes:

View file

@ -1,6 +1,7 @@
#!/usr/bin/env sh #!/usr/bin/env sh
LOG_DIR=/rpxy/log LOG_DIR=/rpxy/log
LOG_FILE=${LOG_DIR}/rpxy.log SYSTEM_LOG_FILE=${LOG_DIR}/rpxy.log
ACCESS_LOG_FILE=${LOG_DIR}/access.log
LOG_SIZE=10M LOG_SIZE=10M
LOG_NUM=10 LOG_NUM=10
@ -43,8 +44,24 @@ include /etc/logrotate.d
# system-specific logs may be also be configured here. # system-specific logs may be also be configured here.
EOF EOF
cat > /etc/logrotate.d/rpxy.conf << EOF cat > /etc/logrotate.d/rpxy-system.conf << EOF
${LOG_FILE} { ${SYSTEM_LOG_FILE} {
dateext
daily
missingok
rotate ${LOG_NUM}
notifempty
compress
delaycompress
dateformat -%Y-%m-%d-%s
size ${LOG_SIZE}
copytruncate
su ${USER} ${USER}
}
EOF
cat > /etc/logrotate.d/rpxy-access.conf << EOF
${ACCESS_LOG_FILE} {
dateext dateext
daily daily
missingok missingok
@ -157,10 +174,4 @@ fi
# Run rpxy # Run rpxy
cd /rpxy cd /rpxy
echo "rpxy: Start with user: ${USER} (${USER_ID}:${GROUP_ID})" echo "rpxy: Start with user: ${USER} (${USER_ID}:${GROUP_ID})"
if "${LOGGING}"; then gosu ${USER} sh -c "/rpxy/run.sh 2>&1"
echo "rpxy: Start with writing log file"
gosu ${USER} sh -c "/rpxy/run.sh 2>&1 | tee ${LOG_FILE}"
else
echo "rpxy: Start without writing log file"
gosu ${USER} sh -c "/rpxy/run.sh 2>&1"
fi

View file

@ -1,5 +1,7 @@
#!/usr/bin/env sh #!/usr/bin/env sh
CONFIG_FILE=/etc/rpxy.toml CONFIG_FILE=/etc/rpxy.toml
LOG_DIR=/rpxy/log
LOGGING=${LOG_TO_FILE:-false}
# debug level logging # debug level logging
if [ -z $LOG_LEVEL ]; then if [ -z $LOG_LEVEL ]; then
@ -7,19 +9,11 @@ if [ -z $LOG_LEVEL ]; then
fi fi
echo "rpxy: Logging with level ${LOG_LEVEL}" echo "rpxy: Logging with level ${LOG_LEVEL}"
# continuously watch and reload the config file
if [ -z $WATCH ]; then
WATCH=false
else
if [ "$WATCH" = "true" ]; then
WATCH=true
else
WATCH=false
fi
fi
if $WATCH ; then if "${LOGGING}"; then
RUST_LOG=${LOG_LEVEL} /rpxy/bin/rpxy --config ${CONFIG_FILE} -w echo "rpxy: Start with writing log files"
RUST_LOG=${LOG_LEVEL} /rpxy/bin/rpxy --config ${CONFIG_FILE} --log-dir ${LOG_DIR}
else else
echo "rpxy: Start without writing log files"
RUST_LOG=${LOG_LEVEL} /rpxy/bin/rpxy --config ${CONFIG_FILE} RUST_LOG=${LOG_LEVEL} /rpxy/bin/rpxy --config ${CONFIG_FILE}
fi fi

View file

@ -14,25 +14,28 @@ publish.workspace = true
post-quantum = ["rustls-post-quantum"] post-quantum = ["rustls-post-quantum"]
[dependencies] [dependencies]
url = { version = "2.5.2" } url = { version = "2.5.4" }
rustc-hash = "2.0.0" ahash = "0.8.12"
thiserror = "1.0.66" thiserror = "2.0.12"
tracing = "0.1.40" tracing = "0.1.41"
async-trait = "0.1.83" async-trait = "0.1.88"
base64 = "0.22.1" base64 = "0.22.1"
aws-lc-rs = { version = "1.10.0", default-features = false, features = [ aws-lc-rs = { version = "1.13.1", default-features = false, features = [
"aws-lc-sys", "aws-lc-sys",
] } ] }
blocking = "1.6.1" blocking = "1.6.1"
rustls = { version = "0.23.16", default-features = false, features = [ rustls = { version = "0.23.27", default-features = false, features = [
"std", "std",
"aws_lc_rs", "aws_lc_rs",
] } ] }
rustls-platform-verifier = { version = "0.3.4" } rustls-platform-verifier = { version = "0.6.0" }
rustls-acme = { path = "../submodules/rustls-acme/", default-features = false, features = [ rustls-acme = { path = "../submodules/rustls-acme/", default-features = false, features = [
"aws-lc-rs", "aws-lc-rs",
] } ] }
rustls-post-quantum = { version = "0.1.0", optional = true } rustls-post-quantum = { version = "0.2.2", optional = true }
tokio = { version = "1.41.0", default-features = false } tokio = { version = "1.45.1", default-features = false, features = [
tokio-util = { version = "0.7.12", default-features = false } "rt",
tokio-stream = { version = "0.1.16", default-features = false } "macros",
] }
tokio-util = { version = "0.7.15", default-features = false }
tokio-stream = { version = "0.1.17", default-features = false }

View file

@ -12,4 +12,7 @@ pub enum RpxyAcmeError {
/// IO error /// IO error
#[error("IO error: {0}")] #[error("IO error: {0}")]
Io(#[from] std::io::Error), Io(#[from] std::io::Error),
/// TLS client configuration error
#[error("TLS client configuration error: {0}")]
TlsClientConfig(String),
} }

View file

@ -4,7 +4,7 @@ use crate::{
error::RpxyAcmeError, error::RpxyAcmeError,
log::*, log::*,
}; };
use rustc_hash::FxHashMap as HashMap; use ahash::HashMap;
use rustls::ServerConfig; use rustls::ServerConfig;
use rustls_acme::AcmeConfig; use rustls_acme::AcmeConfig;
use std::{path::PathBuf, sync::Arc}; use std::{path::PathBuf, sync::Arc};
@ -77,13 +77,9 @@ impl AcmeManager {
/// Returns a Vec<JoinHandle<()>> as a tasks handles and a map of domain to ServerConfig for challenge. /// Returns a Vec<JoinHandle<()>> as a tasks handles and a map of domain to ServerConfig for challenge.
pub fn spawn_manager_tasks( pub fn spawn_manager_tasks(
&self, &self,
cancel_token: Option<tokio_util::sync::CancellationToken>, cancel_token: tokio_util::sync::CancellationToken,
) -> (Vec<tokio::task::JoinHandle<()>>, HashMap<String, Arc<ServerConfig>>) { ) -> (Vec<tokio::task::JoinHandle<()>>, HashMap<String, Arc<ServerConfig>>) {
let rustls_client_config = rustls::ClientConfig::builder() let rustls_client_config = Self::create_tls_client_config().expect("Failed to create TLS client configuration for ACME");
.dangerous() // The `Verifier` we're using is actually safe
.with_custom_certificate_verifier(Arc::new(rustls_platform_verifier::Verifier::new()))
.with_no_client_auth();
let rustls_client_config = Arc::new(rustls_client_config);
let mut server_configs_for_challenge: HashMap<String, Arc<ServerConfig>> = HashMap::default(); let mut server_configs_for_challenge: HashMap<String, Arc<ServerConfig>> = HashMap::default();
let join_handles = self let join_handles = self
@ -115,14 +111,11 @@ impl AcmeManager {
} }
} }
}; };
if let Some(cancel_token) = cancel_token.as_ref() {
tokio::select! { tokio::select! {
_ = task => {}, _ = task => {},
_ = cancel_token.cancelled() => { debug!("rpxy ACME manager task for {domain} terminated") } _ = cancel_token.cancelled() => { debug!("rpxy ACME manager task for {domain} terminated") }
} }
} else {
task.await;
}
} }
}) })
}) })
@ -130,6 +123,26 @@ impl AcmeManager {
(join_handles, server_configs_for_challenge) (join_handles, server_configs_for_challenge)
} }
/// Creates a TLS client configuration with platform certificate verification.
///
/// This configuration uses the system's certificate store for verification,
/// which is appropriate for ACME certificate validation.
fn create_tls_client_config() -> Result<Arc<rustls::ClientConfig>, RpxyAcmeError> {
let crypto_provider = rustls::crypto::CryptoProvider::get_default().ok_or(RpxyAcmeError::TlsClientConfig(
"No default crypto provider available".to_string(),
))?;
let verifier = rustls_platform_verifier::Verifier::new(crypto_provider.clone())
.map_err(|e| RpxyAcmeError::TlsClientConfig(format!("Failed to create certificate verifier: {}", e)))?;
let client_config = rustls::ClientConfig::builder()
.dangerous() // Safe: using platform certificate verifier
.with_custom_certificate_verifier(Arc::new(verifier))
.with_no_client_auth();
Ok(Arc::new(client_config))
}
} }
#[cfg(test)] #[cfg(test)]

View file

@ -13,10 +13,8 @@ publish.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features] [features]
# default = ["http3-quinn", "cache", "rustls-backend", "acme", "sticky-cookie", "post-quantum"] default = ["http3-quinn", "cache", "rustls-backend", "sticky-cookie", "acme", "post-quantum"]
# default = ["http3-s2n", "cache", "rustls-backend", "acme", "sticky-cookie", "post-quantum"] # default = ["http3-s2n", "cache", "rustls-backend", "sticky-cookie", "acme", "post-quantum"]
default = ["http3-quinn", "cache", "rustls-backend", "acme", "sticky-cookie"]
# default = ["http3-s2n", "cache", "rustls-backend", "acme", "sticky-cookie"]
http3-quinn = ["rpxy-lib/http3-quinn"] http3-quinn = ["rpxy-lib/http3-quinn"]
http3-s2n = ["rpxy-lib/http3-s2n"] http3-s2n = ["rpxy-lib/http3-s2n"]
native-tls-backend = ["rpxy-lib/native-tls-backend"] native-tls-backend = ["rpxy-lib/native-tls-backend"]
@ -30,30 +28,32 @@ sticky-cookie = ["rpxy-lib/sticky-cookie"]
[dependencies] [dependencies]
rpxy-lib = { path = "../rpxy-lib/", default-features = false } rpxy-lib = { path = "../rpxy-lib/", default-features = false }
mimalloc = { version = "*", default-features = false } # TODO: pin mimalloc due to compilation failure by musl
anyhow = "1.0.91" mimalloc = { version = "=0.1.44", default-features = false }
rustc-hash = "2.0.0" libmimalloc-sys = { version = "=0.1.40" }
serde = { version = "1.0.214", default-features = false, features = ["derive"] } anyhow = "1.0.98"
tokio = { version = "1.41.0", default-features = false, features = [ ahash = "0.8.12"
serde = { version = "1.0.219", default-features = false, features = ["derive"] }
tokio = { version = "1.45.1", default-features = false, features = [
"net", "net",
"rt-multi-thread", "rt-multi-thread",
"time", "time",
"sync", "sync",
"macros", "macros",
] } ] }
tokio-util = { version = "0.7.12", default-features = false } tokio-util = { version = "0.7.15", default-features = false }
async-trait = "0.1.83" async-trait = "0.1.88"
futures-util = { version = "0.3.31", default-features = false } futures-util = { version = "0.3.31", default-features = false }
# config # config
clap = { version = "4.5.20", features = ["std", "cargo", "wrap_help"] } clap = { version = "4.5.39", features = ["std", "cargo", "wrap_help"] }
toml = { version = "0.8.19", default-features = false, features = ["parse"] } toml = { version = "0.8.22", default-features = false, features = ["parse"] }
hot_reload = "0.1.6" hot_reload = "0.1.9"
serde_ignored = "0.1.10" serde_ignored = "0.1.12"
# logging # logging
tracing = { version = "0.1.40" } tracing = { version = "0.1.41" }
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
################################ ################################
# cert management # cert management

View file

@ -1,21 +1,29 @@
use super::toml::ConfigToml; use super::toml::{ConfigToml, ConfigTomlExt};
use crate::error::{anyhow, ensure}; use crate::error::{anyhow, ensure};
use clap::{Arg, ArgAction}; use ahash::HashMap;
use clap::Arg;
use hot_reload::{ReloaderReceiver, ReloaderService}; use hot_reload::{ReloaderReceiver, ReloaderService};
use rpxy_certs::{build_cert_reloader, CryptoFileSourceBuilder, CryptoReloader, ServerCryptoBase}; use rpxy_certs::{CryptoFileSourceBuilder, CryptoReloader, ServerCryptoBase, build_cert_reloader};
use rpxy_lib::{AppConfig, AppConfigList, ProxyConfig}; use rpxy_lib::{AppConfigList, ProxyConfig};
use rustc_hash::FxHashMap as HashMap;
#[cfg(feature = "acme")] #[cfg(feature = "acme")]
use rpxy_acme::{AcmeManager, ACME_DIR_URL, ACME_REGISTRY_PATH}; use rpxy_acme::{ACME_DIR_URL, ACME_REGISTRY_PATH, AcmeManager};
/// Parsed options /// Parsed options from CLI
/// Options for configuring the application.
///
/// # Fields
/// - `config_file_path`: Path to the configuration file.
/// - `log_dir_path`: Optional path to the log directory.
pub struct Opts { pub struct Opts {
pub config_file_path: String, pub config_file_path: String,
pub watch: bool, pub log_dir_path: Option<String>,
} }
/// Parse arg values passed from cli /// Parses command-line arguments into an [`Opts`](rpxy-bin/src/config/parse.rs:13) struct.
///
/// Returns a populated [`Opts`](rpxy-bin/src/config/parse.rs:13) on success, or an error if parsing fails.
/// Expects a required `--config` argument and an optional `--log-dir` argument.
pub fn parse_opts() -> Result<Opts, anyhow::Error> { pub fn parse_opts() -> Result<Opts, anyhow::Error> {
let _ = include_str!("../../Cargo.toml"); let _ = include_str!("../../Cargo.toml");
let options = clap::command!() let options = clap::command!()
@ -28,78 +36,60 @@ pub fn parse_opts() -> Result<Opts, anyhow::Error> {
.help("Configuration file path like ./config.toml"), .help("Configuration file path like ./config.toml"),
) )
.arg( .arg(
Arg::new("watch") Arg::new("log_dir")
.long("watch") .long("log-dir")
.short('w') .short('l')
.action(ArgAction::SetTrue) .value_name("LOG_DIR")
.help("Activate dynamic reloading of the config file via continuous monitoring"), .help("Directory for log files. If not specified, logs are printed to stdout."),
); );
let matches = options.get_matches(); let matches = options.get_matches();
///////////////////////////////////
let config_file_path = matches.get_one::<String>("config_file").unwrap().to_owned(); let config_file_path = matches.get_one::<String>("config_file").unwrap().to_owned();
let watch = matches.get_one::<bool>("watch").unwrap().to_owned(); let log_dir_path = matches.get_one::<String>("log_dir").map(|v| v.to_owned());
Ok(Opts { config_file_path, watch }) Ok(Opts {
config_file_path,
log_dir_path,
})
} }
pub fn build_settings(config: &ConfigToml) -> std::result::Result<(ProxyConfig, AppConfigList), anyhow::Error> { /// Build proxy and app settings from config using ConfigTomlExt
// build proxy config pub fn build_settings(config: &ConfigToml) -> Result<(ProxyConfig, AppConfigList), anyhow::Error> {
let proxy_config: ProxyConfig = config.try_into()?; config.validate_and_build_settings()
// backend_apps
let apps = config.apps.clone().ok_or(anyhow!("Missing application spec"))?;
// assertions for all backend apps
ensure!(!apps.0.is_empty(), "Wrong application spec.");
// if only https_port is specified, tls must be configured for all apps
if proxy_config.http_port.is_none() {
ensure!(
apps.0.iter().all(|(_, app)| app.tls.is_some()),
"Some apps serves only plaintext HTTP"
);
}
// https redirection port must be configured only when both http_port and https_port are configured.
if proxy_config.https_redirection_port.is_some() {
ensure!(
proxy_config.https_port.is_some() && proxy_config.http_port.is_some(),
"https_redirection_port can be specified only when both http_port and https_port are specified"
);
}
// https redirection can be configured if both ports are active
if !(proxy_config.https_port.is_some() && proxy_config.http_port.is_some()) {
ensure!(
apps.0.iter().all(|(_, app)| {
if let Some(tls) = app.tls.as_ref() {
tls.https_redirection.is_none()
} else {
true
}
}),
"https_redirection can be specified only when both http_port and https_port are specified"
);
}
// build applications
let mut app_config_list_inner = Vec::<AppConfig>::new();
for (app_name, app) in apps.0.iter() {
let _server_name_string = app.server_name.as_ref().ok_or(anyhow!("No server name"))?;
let registered_app_name = app_name.to_ascii_lowercase();
let app_config = app.build_app_config(&registered_app_name)?;
app_config_list_inner.push(app_config);
}
let app_config_list = AppConfigList {
inner: app_config_list_inner,
default_app: config.default_app.clone().map(|v| v.to_ascii_lowercase()), // default backend application for plaintext http requests
};
Ok((proxy_config, app_config_list))
} }
/* ----------------------- */ /* ----------------------- */
/// Helper to build a CryptoFileSource for an app, handling ACME if enabled
#[cfg(feature = "acme")]
fn build_tls_for_app_acme(
tls: &mut super::toml::TlsOption,
acme_option: &Option<super::toml::AcmeOption>,
server_name: &str,
acme_registry_path: &str,
acme_dir_url: &str,
) -> Result<(), anyhow::Error> {
if let Some(true) = tls.acme {
ensure!(acme_option.is_some() && tls.tls_cert_key_path.is_none() && tls.tls_cert_path.is_none());
let subdir = format!("{}/{}", acme_registry_path, server_name.to_ascii_lowercase());
let file_name =
rpxy_acme::DirCache::cached_cert_file_name(&[server_name.to_ascii_lowercase()], acme_dir_url.to_ascii_lowercase());
let cert_path = format!("{}/{}", subdir, file_name);
tls.tls_cert_key_path = Some(cert_path.clone());
tls.tls_cert_path = Some(cert_path);
}
Ok(())
}
/// Build cert map /// Build cert map
/// Builds the certificate manager for TLS applications.
///
/// # Arguments
/// * `config` - Reference to the parsed configuration.
///
/// # Returns
/// Returns an option containing a tuple of certificate reloader service and receiver, or `None` if TLS is not enabled.
/// Returns an error if configuration is invalid or required fields are missing.
pub async fn build_cert_manager( pub async fn build_cert_manager(
config: &ConfigToml, config: &ConfigToml,
) -> Result< ) -> Result<
@ -136,19 +126,9 @@ pub async fn build_cert_manager(
ensure!(tls.tls_cert_key_path.is_some() && tls.tls_cert_path.is_some()); ensure!(tls.tls_cert_key_path.is_some() && tls.tls_cert_path.is_some());
#[cfg(feature = "acme")] #[cfg(feature = "acme")]
let tls = {
let mut tls = tls.clone(); let mut tls = tls.clone();
if let Some(true) = tls.acme { #[cfg(feature = "acme")]
ensure!(acme_option.is_some() && tls.tls_cert_key_path.is_none() && tls.tls_cert_path.is_none()); build_tls_for_app_acme(&mut tls, &acme_option, server_name, acme_registry_path, acme_dir_url)?;
// Both of tls_cert_key_path and tls_cert_path must be the same for ACME since it's a single file
let subdir = format!("{}/{}", acme_registry_path, server_name.to_ascii_lowercase());
let file_name =
rpxy_acme::DirCache::cached_cert_file_name(&[server_name.to_ascii_lowercase()], acme_dir_url.to_ascii_lowercase());
tls.tls_cert_key_path = Some(format!("{}/{}", subdir, file_name));
tls.tls_cert_path = Some(format!("{}/{}", subdir, file_name));
}
tls
};
let crypto_file_source = CryptoFileSourceBuilder::default() let crypto_file_source = CryptoFileSourceBuilder::default()
.tls_cert_path(tls.tls_cert_path.as_ref().unwrap()) .tls_cert_path(tls.tls_cert_path.as_ref().unwrap())
@ -165,24 +145,31 @@ pub async fn build_cert_manager(
/* ----------------------- */ /* ----------------------- */
#[cfg(feature = "acme")] #[cfg(feature = "acme")]
/// Build acme manager /// Build acme manager
/// Builds the ACME manager for automatic certificate management (enabled with the `acme` feature).
///
/// # Arguments
/// * `config` - Reference to the parsed configuration.
/// * `runtime_handle` - Tokio runtime handle for async operations.
///
/// # Returns
/// Returns an option containing an [`AcmeManager`](rpxy-bin/src/config/parse.rs:153) if ACME is configured, or `None` otherwise.
/// Returns an error if configuration is invalid or required fields are missing.
pub async fn build_acme_manager( pub async fn build_acme_manager(
config: &ConfigToml, config: &ConfigToml,
runtime_handle: tokio::runtime::Handle, runtime_handle: tokio::runtime::Handle,
) -> Result<Option<AcmeManager>, anyhow::Error> { ) -> Result<Option<AcmeManager>, anyhow::Error> {
let acme_option = config.experimental.as_ref().and_then(|v| v.acme.clone()); let acme_option = config.experimental.as_ref().and_then(|v| v.acme.clone());
if acme_option.is_none() { let Some(acme_option) = acme_option else {
return Ok(None); return Ok(None);
} };
let acme_option = acme_option.unwrap();
let domains = config let domains: Vec<String> = config
.apps .apps
.as_ref() .as_ref()
.unwrap() .unwrap()
.0 .0
.values() .values()
.filter_map(|app| { .filter_map(|app| {
//
if let Some(tls) = app.tls.as_ref() { if let Some(tls) = app.tls.as_ref() {
if let Some(true) = tls.acme { if let Some(true) = tls.acme {
return Some(app.server_name.as_ref().unwrap().to_owned()); return Some(app.server_name.as_ref().unwrap().to_owned());
@ -190,7 +177,7 @@ pub async fn build_acme_manager(
} }
None None
}) })
.collect::<Vec<_>>(); .collect();
if domains.is_empty() { if domains.is_empty() {
return Ok(None); return Ok(None);

View file

@ -8,17 +8,16 @@ pub struct ConfigTomlReloader {
} }
#[async_trait] #[async_trait]
impl Reload<ConfigToml> for ConfigTomlReloader { impl Reload<ConfigToml, String> for ConfigTomlReloader {
type Source = String; type Source = String;
async fn new(source: &Self::Source) -> Result<Self, ReloaderError<ConfigToml>> { async fn new(source: &Self::Source) -> Result<Self, ReloaderError<ConfigToml, String>> {
Ok(Self { Ok(Self {
config_path: source.clone(), config_path: source.clone(),
}) })
} }
async fn reload(&self) -> Result<Option<ConfigToml>, ReloaderError<ConfigToml>> { async fn reload(&self) -> Result<Option<ConfigToml>, ReloaderError<ConfigToml, String>> {
let conf = ConfigToml::new(&self.config_path) let conf = ConfigToml::new(&self.config_path).map_err(|e| ReloaderError::<ConfigToml, String>::Reload(e.to_string()))?;
.map_err(|_e| ReloaderError::<ConfigToml>::Reload("Failed to reload config toml"))?;
Ok(Some(conf)) Ok(Some(conf))
} }
} }

View file

@ -3,13 +3,26 @@ use crate::{
error::{anyhow, ensure}, error::{anyhow, ensure},
log::warn, log::warn,
}; };
use rpxy_lib::{reexports::Uri, AppConfig, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri}; use ahash::HashMap;
use rustc_hash::FxHashMap as HashMap; use rpxy_lib::{AppConfig, AppConfigList, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri, reexports::Uri};
use serde::Deserialize; use serde::Deserialize;
use std::{fs, net::SocketAddr}; use std::{fs, net::SocketAddr};
use tokio::time::Duration; use tokio::time::Duration;
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)] #[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
/// Main configuration structure parsed from the TOML file.
///
/// # Fields
/// - `listen_port`: Optional TCP port for HTTP.
/// - `listen_port_tls`: Optional TCP port for HTTPS/TLS.
/// - `listen_ipv6`: Enable IPv6 listening.
/// - `https_redirection_port`: Optional port for HTTP to HTTPS redirection.
/// - `tcp_listen_backlog`: Optional TCP backlog size.
/// - `max_concurrent_streams`: Optional max concurrent streams.
/// - `max_clients`: Optional max client connections.
/// - `apps`: Optional application definitions.
/// - `default_app`: Optional default application name.
/// - `experimental`: Optional experimental features.
pub struct ConfigToml { pub struct ConfigToml {
pub listen_port: Option<u16>, pub listen_port: Option<u16>,
pub listen_port_tls: Option<u16>, pub listen_port_tls: Option<u16>,
@ -23,8 +36,75 @@ pub struct ConfigToml {
pub experimental: Option<Experimental>, pub experimental: Option<Experimental>,
} }
/// Extension trait for config validation and building
pub trait ConfigTomlExt {
fn validate_and_build_settings(&self) -> Result<(ProxyConfig, AppConfigList), anyhow::Error>;
}
impl ConfigTomlExt for ConfigToml {
fn validate_and_build_settings(&self) -> Result<(ProxyConfig, AppConfigList), anyhow::Error> {
let proxy_config: ProxyConfig = self.try_into()?;
let apps = self.apps.as_ref().ok_or(anyhow!("Missing application spec"))?;
// Ensure at least one app is defined
ensure!(!apps.0.is_empty(), "Wrong application spec.");
// Helper: all apps have TLS
let all_apps_have_tls = apps.0.values().all(|app| app.tls.is_some());
// Helper: all apps have https_redirection unset
let all_apps_no_https_redirection = apps.0.values().all(|app| {
if let Some(tls) = app.tls.as_ref() {
tls.https_redirection.is_none()
} else {
true
}
});
if proxy_config.http_port.is_none() {
ensure!(all_apps_have_tls, "Some apps serve only plaintext HTTP");
}
if proxy_config.https_redirection_port.is_some() {
ensure!(
proxy_config.https_port.is_some() && proxy_config.http_port.is_some(),
"https_redirection_port can be specified only when both http_port and https_port are specified"
);
}
if !(proxy_config.https_port.is_some() && proxy_config.http_port.is_some()) {
ensure!(
all_apps_no_https_redirection,
"https_redirection can be specified only when both http_port and https_port are specified"
);
}
// Build AppConfigList
let mut app_config_list_inner = Vec::<AppConfig>::new();
for (app_name, app) in apps.0.iter() {
let _server_name_string = app.server_name.as_ref().ok_or(anyhow!("No server name"))?;
let registered_app_name = app_name.to_ascii_lowercase();
let app_config = app.build_app_config(&registered_app_name)?;
app_config_list_inner.push(app_config);
}
let app_config_list = AppConfigList {
inner: app_config_list_inner,
default_app: self.default_app.clone().map(|v| v.to_ascii_lowercase()),
};
Ok((proxy_config, app_config_list))
}
}
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)] #[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
/// HTTP/3 protocol options for server configuration.
///
/// # Fields
/// - `alt_svc_max_age`: Optional max age for Alt-Svc header.
/// - `request_max_body_size`: Optional maximum request body size.
/// - `max_concurrent_connections`: Optional maximum concurrent connections.
/// - `max_concurrent_bidistream`: Optional maximum concurrent bidirectional streams.
/// - `max_concurrent_unistream`: Optional maximum concurrent unidirectional streams.
/// - `max_idle_timeout`: Optional maximum idle timeout in milliseconds.
pub struct Http3Option { pub struct Http3Option {
pub alt_svc_max_age: Option<u32>, pub alt_svc_max_age: Option<u32>,
pub request_max_body_size: Option<usize>, pub request_max_body_size: Option<usize>,
@ -232,7 +312,7 @@ impl ConfigToml {
// Check unused fields during deserialization // Check unused fields during deserialization
let t = toml::de::Deserializer::new(&config_str); let t = toml::de::Deserializer::new(&config_str);
let mut unused = rustc_hash::FxHashSet::default(); let mut unused = ahash::HashSet::default();
let res = serde_ignored::deserialize(t, |path| { let res = serde_ignored::deserialize(t, |path| {
unused.insert(path.to_string()); unused.insert(path.to_string());

View file

@ -1,7 +1,13 @@
/// Default IPv4 listen addresses for the server.
pub const LISTEN_ADDRESSES_V4: &[&str] = &["0.0.0.0"]; pub const LISTEN_ADDRESSES_V4: &[&str] = &["0.0.0.0"];
/// Default IPv6 listen addresses for the server.
pub const LISTEN_ADDRESSES_V6: &[&str] = &["[::]"]; pub const LISTEN_ADDRESSES_V6: &[&str] = &["[::]"];
/// Delay in seconds before reloading the configuration after changes.
pub const CONFIG_WATCH_DELAY_SECS: u32 = 15; pub const CONFIG_WATCH_DELAY_SECS: u32 = 15;
#[cfg(feature = "cache")] #[cfg(feature = "cache")]
// Cache directory /// Directory path for cache storage (enabled with "cache" feature).
pub const CACHE_DIR: &str = "./cache"; pub const CACHE_DIR: &str = "./cache";
pub(crate) const ACCESS_LOG_FILE: &str = "access.log";
pub(crate) const SYSTEM_LOG_FILE: &str = "rpxy.log";

View file

@ -1,2 +1,2 @@
#[allow(unused)] #[allow(unused)]
pub use anyhow::{anyhow, bail, ensure, Context}; pub use anyhow::{Context, anyhow, bail, ensure};

View file

@ -1,44 +1,126 @@
use crate::constants::{ACCESS_LOG_FILE, SYSTEM_LOG_FILE};
use rpxy_lib::log_event_names;
use std::str::FromStr; use std::str::FromStr;
use tracing_subscriber::{fmt, prelude::*}; use tracing_subscriber::{filter::filter_fn, fmt, prelude::*};
#[allow(unused)] #[allow(unused)]
pub use tracing::{debug, error, info, warn}; pub use tracing::{debug, error, info, warn};
/// Initialize the logger with the RUST_LOG environment variable. /// Initialize the logger with the RUST_LOG environment variable.
pub fn init_logger() { pub fn init_logger(log_dir_path: Option<&str>) {
let level_string = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string()); let level = std::env::var("RUST_LOG")
let level = tracing::Level::from_str(level_string.as_str()).unwrap_or(tracing::Level::INFO); .ok()
.and_then(|s| tracing::Level::from_str(&s).ok())
.unwrap_or(tracing::Level::INFO);
// This limits the logger to emits only this crate with any level above RUST_LOG, for included crates it will emit only ERROR (in prod)/INFO (in dev) or above level. match log_dir_path {
let stdio_layer = fmt::layer().with_level(true).with_thread_ids(false); None => init_stdio_logger(level),
if level <= tracing::Level::INFO { Some(path) => init_file_logger(level, path),
// in normal deployment environment }
let stdio_layer = stdio_layer }
/// file logging
fn init_file_logger(level: tracing::Level, log_dir_path: &str) {
println!("Activate logging to files: {}", log_dir_path);
let log_dir = std::path::Path::new(log_dir_path);
if !log_dir.exists() {
println!("Directory does not exist, creating: {}", log_dir.display());
std::fs::create_dir_all(log_dir).expect("Failed to create log directory");
}
let access_log_path = log_dir.join(ACCESS_LOG_FILE);
let system_log_path = log_dir.join(SYSTEM_LOG_FILE);
println!("Access log: {}", access_log_path.display());
println!("System and error log: {}", system_log_path.display());
let access_log = open_log_file(&access_log_path);
let system_log = open_log_file(&system_log_path);
let access_layer = fmt::layer()
.with_line_number(false)
.with_thread_ids(false)
.with_thread_names(false)
.with_target(false) .with_target(false)
.with_level(false)
.compact() .compact()
.with_filter(tracing_subscriber::filter::filter_fn(move |metadata| { .with_ansi(false)
(metadata .with_writer(access_log)
.target() .with_filter(AccessLogFilter);
.starts_with(env!("CARGO_PKG_NAME").replace('-', "_").as_str())
&& metadata.level() <= &level) let system_layer = fmt::layer()
|| metadata.level() <= &tracing::Level::ERROR.min(level) .with_line_number(false)
.with_thread_ids(false)
.with_thread_names(false)
.with_target(false)
.with_level(true)
.compact()
.with_ansi(false)
.with_writer(system_log)
.with_filter(filter_fn(move |metadata| {
(is_cargo_pkg(metadata) && metadata.name() != log_event_names::ACCESS_LOG && metadata.level() <= &level)
|| metadata.level() <= &tracing::Level::WARN.min(level)
})); }));
tracing_subscriber::registry().with(stdio_layer).init();
tracing_subscriber::registry().with(access_layer).with(system_layer).init();
}
/// stdio logging
fn init_stdio_logger(level: tracing::Level) {
// This limits the logger to emit only this crate with any level above RUST_LOG,
// for included crates it will emit only ERROR (in prod)/INFO (in dev) or above level.
let base_layer = fmt::layer().with_level(true).with_thread_ids(false);
let debug = level > tracing::Level::INFO;
let filter = filter_fn(move |metadata| {
if debug {
(is_cargo_pkg(metadata) && metadata.level() <= &level) || metadata.level() <= &tracing::Level::INFO.min(level)
} else { } else {
// debugging (is_cargo_pkg(metadata) && metadata.level() <= &level) || metadata.level() <= &tracing::Level::WARN.min(level)
let stdio_layer = stdio_layer }
});
let stdio_layer = if debug {
base_layer
.with_line_number(true) .with_line_number(true)
.with_target(true) .with_target(true)
.with_thread_names(true) .with_thread_names(true)
.with_target(true) .with_target(true)
.compact() .compact()
.with_filter(tracing_subscriber::filter::filter_fn(move |metadata| { .with_filter(filter)
(metadata } else {
.target() base_layer.with_target(false).compact().with_filter(filter)
.starts_with(env!("CARGO_PKG_NAME").replace('-', "_").as_str())
&& metadata.level() <= &level)
|| metadata.level() <= &tracing::Level::INFO.min(level)
}));
tracing_subscriber::registry().with(stdio_layer).init();
}; };
tracing_subscriber::registry().with(stdio_layer).init();
}
/// Access log filter
struct AccessLogFilter;
impl<S> tracing_subscriber::layer::Filter<S> for AccessLogFilter {
fn enabled(&self, metadata: &tracing::Metadata<'_>, _: &tracing_subscriber::layer::Context<'_, S>) -> bool {
is_cargo_pkg(metadata) && metadata.name().contains(log_event_names::ACCESS_LOG) && metadata.level() <= &tracing::Level::INFO
}
}
#[inline]
/// Create a file for logging
fn open_log_file<P>(path: P) -> std::fs::File
where
P: AsRef<std::path::Path>,
{
// create a file if it does not exist
std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(path)
.expect("Failed to open the log file")
}
#[inline]
/// Matches cargo package name with `_` instead of `-`
fn is_cargo_pkg(metadata: &tracing::Metadata<'_>) -> bool {
let pkg_name = env!("CARGO_PKG_NAME").replace('-', "_");
metadata.target().starts_with(&pkg_name)
} }

View file

@ -9,19 +9,17 @@ mod log;
#[cfg(feature = "acme")] #[cfg(feature = "acme")]
use crate::config::build_acme_manager; use crate::config::build_acme_manager;
use crate::{ use crate::{
config::{build_cert_manager, build_settings, parse_opts, ConfigToml, ConfigTomlReloader}, config::{ConfigToml, ConfigTomlReloader, build_cert_manager, build_settings, parse_opts},
constants::CONFIG_WATCH_DELAY_SECS, constants::CONFIG_WATCH_DELAY_SECS,
error::*, error::*,
log::*, log::*,
}; };
use hot_reload::{ReloaderReceiver, ReloaderService}; use hot_reload::{ReloaderReceiver, ReloaderService};
use rpxy_lib::{entrypoint, RpxyOptions, RpxyOptionsBuilder}; use rpxy_lib::{RpxyOptions, RpxyOptionsBuilder, entrypoint};
use std::sync::Arc; use std::sync::Arc;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
fn main() { fn main() {
init_logger();
let mut runtime_builder = tokio::runtime::Builder::new_multi_thread(); let mut runtime_builder = tokio::runtime::Builder::new_multi_thread();
runtime_builder.enable_all(); runtime_builder.enable_all();
runtime_builder.thread_name("rpxy"); runtime_builder.thread_name("rpxy");
@ -30,18 +28,16 @@ fn main() {
runtime.block_on(async { runtime.block_on(async {
// Initially load options // Initially load options
let Ok(parsed_opts) = parse_opts() else { let Ok(parsed_opts) = parse_opts() else {
error!("Invalid toml file");
std::process::exit(1); std::process::exit(1);
}; };
if !parsed_opts.watch { init_logger(parsed_opts.log_dir_path.as_deref());
if let Err(e) = rpxy_service_without_watcher(&parsed_opts.config_file_path, runtime.handle().clone()).await {
error!("rpxy service existed: {e}"); let (config_service, config_rx) = ReloaderService::<ConfigTomlReloader, ConfigToml, String>::new(
std::process::exit(1); &parsed_opts.config_file_path,
} CONFIG_WATCH_DELAY_SECS,
} else { false,
let (config_service, config_rx) = )
ReloaderService::<ConfigTomlReloader, ConfigToml>::new(&parsed_opts.config_file_path, CONFIG_WATCH_DELAY_SECS, false)
.await .await
.unwrap(); .unwrap();
@ -52,7 +48,7 @@ fn main() {
std::process::exit(1); std::process::exit(1);
} }
} }
rpxy_res = rpxy_service_with_watcher(config_rx, runtime.handle().clone()) => { rpxy_res = rpxy_service(config_rx, runtime.handle().clone()) => {
if let Err(e) = rpxy_res { if let Err(e) = rpxy_res {
error!("rpxy service existed: {e}"); error!("rpxy service existed: {e}");
std::process::exit(1); std::process::exit(1);
@ -60,7 +56,6 @@ fn main() {
} }
} }
std::process::exit(0); std::process::exit(0);
}
}); });
} }
@ -76,6 +71,7 @@ struct RpxyService {
} }
impl RpxyService { impl RpxyService {
/// Create a new RpxyService from config and runtime handle.
async fn new(config_toml: &ConfigToml, runtime_handle: tokio::runtime::Handle) -> Result<Self, anyhow::Error> { async fn new(config_toml: &ConfigToml, runtime_handle: tokio::runtime::Handle) -> Result<Self, anyhow::Error> {
let (proxy_conf, app_conf) = build_settings(config_toml).map_err(|e| anyhow!("Invalid configuration: {e}"))?; let (proxy_conf, app_conf) = build_settings(config_toml).map_err(|e| anyhow!("Invalid configuration: {e}"))?;
@ -85,7 +81,7 @@ impl RpxyService {
.map(|(s, r)| (Some(Arc::new(s)), Some(r))) .map(|(s, r)| (Some(Arc::new(s)), Some(r)))
.unwrap_or((None, None)); .unwrap_or((None, None));
Ok(RpxyService { Ok(Self {
runtime_handle: runtime_handle.clone(), runtime_handle: runtime_handle.clone(),
proxy_conf, proxy_conf,
app_conf, app_conf,
@ -96,7 +92,7 @@ impl RpxyService {
}) })
} }
async fn start(&self, cancel_token: Option<CancellationToken>) -> Result<(), anyhow::Error> { async fn start(&self, cancel_token: CancellationToken) -> Result<(), anyhow::Error> {
let RpxyService { let RpxyService {
runtime_handle, runtime_handle,
proxy_conf, proxy_conf,
@ -111,17 +107,19 @@ impl RpxyService {
{ {
let (acme_join_handles, server_config_acme_challenge) = acme_manager let (acme_join_handles, server_config_acme_challenge) = acme_manager
.as_ref() .as_ref()
.map(|m| m.spawn_manager_tasks(cancel_token.as_ref().map(|t| t.child_token()))) .map(|m| m.spawn_manager_tasks(cancel_token.child_token()))
.unwrap_or((vec![], Default::default())); .unwrap_or((vec![], Default::default()));
let rpxy_opts = RpxyOptionsBuilder::default() let rpxy_opts = RpxyOptionsBuilder::default()
.proxy_config(proxy_conf.clone()) .proxy_config(proxy_conf.clone())
.app_config_list(app_conf.clone()) .app_config_list(app_conf.clone())
.cert_rx(cert_rx.clone()) .cert_rx(cert_rx.clone())
.runtime_handle(runtime_handle.clone()) .runtime_handle(runtime_handle.clone())
.cancel_token(cancel_token.as_ref().map(|t| t.child_token()))
.server_configs_acme_challenge(Arc::new(server_config_acme_challenge)) .server_configs_acme_challenge(Arc::new(server_config_acme_challenge))
.build()?; .build()?;
self.start_inner(rpxy_opts, acme_join_handles).await.map_err(|e| anyhow!(e)) self
.start_inner(rpxy_opts, cancel_token, acme_join_handles)
.await
.map_err(|e| anyhow!(e))
} }
#[cfg(not(feature = "acme"))] #[cfg(not(feature = "acme"))]
@ -131,9 +129,8 @@ impl RpxyService {
.app_config_list(app_conf.clone()) .app_config_list(app_conf.clone())
.cert_rx(cert_rx.clone()) .cert_rx(cert_rx.clone())
.runtime_handle(runtime_handle.clone()) .runtime_handle(runtime_handle.clone())
.cancel_token(cancel_token.as_ref().map(|t| t.child_token()))
.build()?; .build()?;
self.start_inner(rpxy_opts).await.map_err(|e| anyhow!(e)) self.start_inner(rpxy_opts, cancel_token).await.map_err(|e| anyhow!(e))
} }
} }
@ -141,19 +138,19 @@ impl RpxyService {
async fn start_inner( async fn start_inner(
&self, &self,
rpxy_opts: RpxyOptions, rpxy_opts: RpxyOptions,
cancel_token: CancellationToken,
#[cfg(feature = "acme")] acme_task_handles: Vec<tokio::task::JoinHandle<()>>, #[cfg(feature = "acme")] acme_task_handles: Vec<tokio::task::JoinHandle<()>>,
) -> Result<(), anyhow::Error> { ) -> Result<(), anyhow::Error> {
let cancel_token = rpxy_opts.cancel_token.clone(); let cancel_token = cancel_token.clone();
let runtime_handle = rpxy_opts.runtime_handle.clone(); let runtime_handle = rpxy_opts.runtime_handle.clone();
// spawn rpxy entrypoint, where cancellation token is possibly contained inside the service // spawn rpxy entrypoint, where cancellation token is possibly contained inside the service
let cancel_token_clone = cancel_token.clone(); let cancel_token_clone = cancel_token.clone();
let child_cancel_token = cancel_token.child_token();
let rpxy_handle = runtime_handle.spawn(async move { let rpxy_handle = runtime_handle.spawn(async move {
if let Err(e) = entrypoint(&rpxy_opts).await { if let Err(e) = entrypoint(&rpxy_opts, child_cancel_token).await {
error!("rpxy entrypoint exited on error: {e}"); error!("rpxy entrypoint exited on error: {e}");
if let Some(cancel_token) = cancel_token_clone { cancel_token_clone.cancel();
cancel_token.cancel();
}
return Err(anyhow!(e)); return Err(anyhow!(e));
} }
Ok(()) Ok(())
@ -166,15 +163,14 @@ impl RpxyService {
// spawn certificate reloader service, where cert service does not have cancellation token inside the service // spawn certificate reloader service, where cert service does not have cancellation token inside the service
let cert_service = self.cert_service.as_ref().unwrap().clone(); let cert_service = self.cert_service.as_ref().unwrap().clone();
let cancel_token_clone = cancel_token.clone(); let cancel_token_clone = cancel_token.clone();
let child_cancel_token = cancel_token.as_ref().map(|c| c.child_token()); let child_cancel_token = cancel_token.child_token();
let cert_handle = runtime_handle.spawn(async move { let cert_handle = runtime_handle.spawn(async move {
if let Some(child_cancel_token) = child_cancel_token {
tokio::select! { tokio::select! {
cert_res = cert_service.start() => { cert_res = cert_service.start() => {
if let Err(ref e) = cert_res { if let Err(ref e) = cert_res {
error!("cert reloader service exited on error: {e}"); error!("cert reloader service exited on error: {e}");
} }
cancel_token_clone.unwrap().cancel(); cancel_token_clone.cancel();
cert_res.map_err(|e| anyhow!(e)) cert_res.map_err(|e| anyhow!(e))
} }
_ = child_cancel_token.cancelled() => { _ = child_cancel_token.cancelled() => {
@ -182,9 +178,6 @@ impl RpxyService {
Ok(()) Ok(())
} }
} }
} else {
cert_service.start().await.map_err(|e| anyhow!(e))
}
}); });
#[cfg(not(feature = "acme"))] #[cfg(not(feature = "acme"))]
@ -218,9 +211,7 @@ impl RpxyService {
if let Err(ref e) = acme_res { if let Err(ref e) = acme_res {
error!("acme manager exited on error: {e}"); error!("acme manager exited on error: {e}");
} }
if let Some(cancel_token) = cancel_token_clone { cancel_token_clone.cancel();
cancel_token.cancel();
}
acme_res.map_err(|e| anyhow!(e)) acme_res.map_err(|e| anyhow!(e))
}); });
let (rpxy_res, cert_res, acme_res) = tokio::join!(rpxy_handle, cert_handle, acme_handle); let (rpxy_res, cert_res, acme_res) = tokio::join!(rpxy_handle, cert_handle, acme_handle);
@ -235,18 +226,8 @@ impl RpxyService {
} }
} }
async fn rpxy_service_without_watcher( async fn rpxy_service(
config_file_path: &str, mut config_rx: ReloaderReceiver<ConfigToml, String>,
runtime_handle: tokio::runtime::Handle,
) -> Result<(), anyhow::Error> {
info!("Start rpxy service");
let config_toml = ConfigToml::new(config_file_path).map_err(|e| anyhow!("Invalid toml file: {e}"))?;
let service = RpxyService::new(&config_toml, runtime_handle).await?;
service.start(None).await
}
async fn rpxy_service_with_watcher(
mut config_rx: ReloaderReceiver<ConfigToml>,
runtime_handle: tokio::runtime::Handle, runtime_handle: tokio::runtime::Handle,
) -> Result<(), anyhow::Error> { ) -> Result<(), anyhow::Error> {
info!("Start rpxy service with dynamic config reloader"); info!("Start rpxy service with dynamic config reloader");
@ -265,7 +246,7 @@ async fn rpxy_service_with_watcher(
tokio::select! { tokio::select! {
/* ---------- */ /* ---------- */
rpxy_res = service.start(Some(cancel_token.clone())) => { rpxy_res = service.start(cancel_token.clone()) => {
if let Err(ref e) = rpxy_res { if let Err(ref e) = rpxy_res {
error!("rpxy service exited on error: {e}"); error!("rpxy service exited on error: {e}");
} else { } else {

View file

@ -16,26 +16,26 @@ post-quantum = ["rustls-post-quantum"]
http3 = [] http3 = []
[dependencies] [dependencies]
rustc-hash = { version = "2.0.0" } ahash = { version = "0.8.12" }
tracing = { version = "0.1.40" } tracing = { version = "0.1.41" }
derive_builder = { version = "0.20.2" } derive_builder = { version = "0.20.2" }
thiserror = { version = "1.0.66" } thiserror = { version = "2.0.12" }
hot_reload = { version = "0.1.6" } hot_reload = { version = "0.1.9" }
async-trait = { version = "0.1.83" } async-trait = { version = "0.1.88" }
rustls = { version = "0.23.16", default-features = false, features = [ rustls = { version = "0.23.27", default-features = false, features = [
"std", "std",
"aws_lc_rs", "aws_lc_rs",
] } ] }
rustls-pemfile = { version = "2.2.0" } rustls-pemfile = { version = "2.2.0" }
rustls-webpki = { version = "0.102.8", default-features = false, features = [ rustls-webpki = { version = "0.103.3", default-features = false, features = [
"std", "std",
"aws_lc_rs", "aws-lc-rs",
] } ] }
rustls-post-quantum = { version = "0.1.0", optional = true } rustls-post-quantum = { version = "0.2.2", optional = true }
x509-parser = { version = "0.16.0" } x509-parser = { version = "0.17.0" }
[dev-dependencies] [dev-dependencies]
tokio = { version = "1.41.0", default-features = false, features = [ tokio = { version = "1.45.1", default-features = false, features = [
"rt-multi-thread", "rt-multi-thread",
"macros", "macros",
] } ] }

View file

@ -1,5 +1,5 @@
use crate::error::*; use crate::error::*;
use rustc_hash::FxHashMap as HashMap; use ahash::HashMap;
use rustls::{crypto::aws_lc_rs::sign::any_supported_type, pki_types, sign::CertifiedKey}; use rustls::{crypto::aws_lc_rs::sign::any_supported_type, pki_types, sign::CertifiedKey};
use std::sync::Arc; use std::sync::Arc;
use x509_parser::prelude::*; use x509_parser::prelude::*;
@ -65,7 +65,7 @@ impl SingleServerCertsKeys {
.cert_keys .cert_keys
.clone() .clone()
.iter() .iter()
.find_map(|k| if let Ok(sk) = any_supported_type(k) { Some(sk) } else { None }) .find_map(|k| any_supported_type(k).ok())
.ok_or_else(|| RpxyCertError::InvalidCertificateAndKey)?; .ok_or_else(|| RpxyCertError::InvalidCertificateAndKey)?;
let cert = self.certs.iter().map(|c| Certificate::from(c.to_vec())).collect::<Vec<_>>(); let cert = self.certs.iter().map(|c| Certificate::from(c.to_vec())).collect::<Vec<_>>();

View file

@ -10,8 +10,8 @@ mod log {
} }
use crate::{error::*, log::*, reloader_service::DynCryptoSource}; use crate::{error::*, log::*, reloader_service::DynCryptoSource};
use ahash::HashMap;
use hot_reload::{ReloaderReceiver, ReloaderService}; use hot_reload::{ReloaderReceiver, ReloaderService};
use rustc_hash::FxHashMap as HashMap;
use rustls::crypto::CryptoProvider; use rustls::crypto::CryptoProvider;
use std::sync::Arc; use std::sync::Arc;

View file

@ -4,9 +4,9 @@ use crate::{
log::*, log::*,
server_crypto::{ServerCryptoBase, ServerNameBytes}, server_crypto::{ServerCryptoBase, ServerNameBytes},
}; };
use ahash::HashMap;
use async_trait::async_trait; use async_trait::async_trait;
use hot_reload::{Reload, ReloaderError}; use hot_reload::{Reload, ReloaderError};
use rustc_hash::FxHashMap as HashMap;
use std::sync::Arc; use std::sync::Arc;
/* ------------------------------------------------ */ /* ------------------------------------------------ */

View file

@ -1,9 +1,9 @@
use crate::{certs::SingleServerCertsKeys, error::*, log::*}; use crate::{certs::SingleServerCertsKeys, error::*, log::*};
use rustc_hash::FxHashMap as HashMap; use ahash::HashMap;
use rustls::{ use rustls::{
RootCertStore, ServerConfig,
crypto::CryptoProvider, crypto::CryptoProvider,
server::{ResolvesServerCertUsingSni, WebPkiClientVerifier}, server::{ResolvesServerCertUsingSni, WebPkiClientVerifier},
RootCertStore, ServerConfig,
}; };
use std::sync::Arc; use std::sync::Arc;

View file

@ -36,12 +36,12 @@ post-quantum = [
] ]
[dependencies] [dependencies]
rand = "0.8.5" rand = "0.9.1"
rustc-hash = "2.0.0" ahash = "0.8.12"
bytes = "1.8.0" bytes = "1.10.1"
derive_builder = "0.20.2" derive_builder = "0.20.2"
futures = { version = "0.3.31", features = ["alloc", "async-await"] } futures = { version = "0.3.31", features = ["alloc", "async-await"] }
tokio = { version = "1.41.0", default-features = false, features = [ tokio = { version = "1.45.1", default-features = false, features = [
"net", "net",
"rt-multi-thread", "rt-multi-thread",
"time", "time",
@ -49,19 +49,19 @@ tokio = { version = "1.41.0", default-features = false, features = [
"macros", "macros",
"fs", "fs",
] } ] }
tokio-util = { version = "0.7.12", default-features = false } tokio-util = { version = "0.7.15", default-features = false }
pin-project-lite = "0.2.15" pin-project-lite = "0.2.16"
async-trait = "0.1.83" async-trait = "0.1.88"
# Error handling # Error handling
anyhow = "1.0.91" anyhow = "1.0.98"
thiserror = "1.0.66" thiserror = "2.0.12"
# http for both server and client # http for both server and client
http = "1.1.0" http = "1.3.1"
http-body-util = "0.1.2" http-body-util = "0.1.3"
hyper = { version = "1.5.0", default-features = false } hyper = { version = "1.6.0", default-features = false }
hyper-util = { version = "0.1.10", features = ["full"] } hyper-util = { version = "0.1.13", features = ["full"] }
futures-util = { version = "0.3.31", default-features = false } futures-util = { version = "0.3.31", default-features = false }
futures-channel = { version = "0.3.31", default-features = false } futures-channel = { version = "0.3.31", default-features = false }
@ -70,7 +70,7 @@ hyper-tls = { version = "0.6.0", features = [
"alpn", "alpn",
"vendored", "vendored",
], optional = true } ], optional = true }
hyper-rustls = { version = "0.27.3", default-features = false, features = [ hyper-rustls = { version = "0.27.6", default-features = false, features = [
"aws-lc-rs", "aws-lc-rs",
"http1", "http1",
"http2", "http2",
@ -79,40 +79,40 @@ hyper-rustls = { version = "0.27.3", default-features = false, features = [
# tls and cert management for server # tls and cert management for server
rpxy-certs = { path = "../rpxy-certs/", default-features = false } rpxy-certs = { path = "../rpxy-certs/", default-features = false }
hot_reload = "0.1.6" hot_reload = "0.1.9"
rustls = { version = "0.23.16", default-features = false } rustls = { version = "0.23.27", default-features = false }
rustls-post-quantum = { version = "0.1.0", optional = true } rustls-post-quantum = { version = "0.2.2", optional = true }
tokio-rustls = { version = "0.26.0", features = ["early-data"] } tokio-rustls = { version = "0.26.2", features = ["early-data"] }
# acme # acme
rpxy-acme = { path = "../rpxy-acme/", default-features = false, optional = true } rpxy-acme = { path = "../rpxy-acme/", default-features = false, optional = true }
# logging # logging
tracing = { version = "0.1.40" } tracing = { version = "0.1.41" }
# http/3 # http/3
quinn = { version = "0.11.5", optional = true } quinn = { version = "0.11.8", optional = true }
h3 = { version = "0.0.6", features = ["tracing"], optional = true } h3 = { version = "0.0.8", features = ["tracing"], optional = true }
h3-quinn = { version = "0.0.7", optional = true } h3-quinn = { version = "0.0.10", optional = true }
s2n-quic = { version = "1.48.0", path = "../submodules/s2n-quic/quic/s2n-quic/", default-features = false, features = [ s2n-quic = { version = "1.59.0", path = "../submodules/s2n-quic/quic/s2n-quic/", default-features = false, features = [
"provider-tls-rustls", "provider-tls-rustls",
], optional = true } ], optional = true }
s2n-quic-core = { version = "0.48.0", path = "../submodules/s2n-quic/quic/s2n-quic-core", default-features = false, optional = true } s2n-quic-core = { version = "0.59.0", path = "../submodules/s2n-quic/quic/s2n-quic-core", default-features = false, optional = true }
s2n-quic-rustls = { version = "0.48.0", path = "../submodules/s2n-quic/quic/s2n-quic-rustls", optional = true } s2n-quic-rustls = { version = "0.59.0", path = "../submodules/s2n-quic/quic/s2n-quic-rustls", optional = true }
s2n-quic-h3 = { path = "../submodules/s2n-quic/quic/s2n-quic-h3/", features = [ s2n-quic-h3 = { path = "../submodules/s2n-quic/quic/s2n-quic-h3/", features = [
"tracing", "tracing",
], optional = true } ], optional = true }
########## ##########
# for UDP socket wit SO_REUSEADDR when h3 with quinn # for UDP socket wit SO_REUSEADDR when h3 with quinn
socket2 = { version = "0.5.7", features = ["all"], optional = true } socket2 = { version = "0.5.10", features = ["all"], optional = true }
# cache # cache
http-cache-semantics = { path = "../submodules/rusty-http-cache-semantics", default-features = false, optional = true } http-cache-semantics = { path = "../submodules/rusty-http-cache-semantics", default-features = false, optional = true }
lru = { version = "0.12.5", optional = true } lru = { version = "0.14.0", optional = true }
sha2 = { version = "0.10.8", default-features = false, optional = true } sha2 = { version = "0.10.9", default-features = false, optional = true }
# cookie handling for sticky cookie # cookie handling for sticky cookie
chrono = { version = "0.4.38", default-features = false, features = [ chrono = { version = "0.4.41", default-features = false, features = [
"unstable-locales", "unstable-locales",
"alloc", "alloc",
"clock", "clock",

View file

@ -1,11 +1,11 @@
use crate::{ use crate::{
AppConfig, AppConfigList,
error::*, error::*,
log::*, log::*,
name_exp::{ByteName, ServerName}, name_exp::{ByteName, ServerName},
AppConfig, AppConfigList,
}; };
use ahash::HashMap;
use derive_builder::Builder; use derive_builder::Builder;
use rustc_hash::FxHashMap as HashMap;
use std::borrow::Cow; use std::borrow::Cow;
use super::upstream::PathManager; use super::upstream::PathManager;
@ -26,6 +26,7 @@ pub struct BackendApp {
pub https_redirection: Option<bool>, pub https_redirection: Option<bool>,
/// tls settings: mutual TLS is enabled /// tls settings: mutual TLS is enabled
#[builder(default)] #[builder(default)]
#[allow(unused)]
pub mutual_tls: Option<bool>, pub mutual_tls: Option<bool>,
} }
impl<'a> BackendAppBuilder { impl<'a> BackendAppBuilder {

View file

@ -7,8 +7,8 @@ pub use super::{
use derive_builder::Builder; use derive_builder::Builder;
use rand::Rng; use rand::Rng;
use std::sync::{ use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc, Arc,
atomic::{AtomicUsize, Ordering},
}; };
/// Constants to specify a load balance option /// Constants to specify a load balance option
@ -80,8 +80,8 @@ impl LoadBalanceRandomBuilder {
impl LoadBalanceWithPointer for LoadBalanceRandom { impl LoadBalanceWithPointer for LoadBalanceRandom {
/// Returns the random index within the range /// Returns the random index within the range
fn get_ptr(&self, _info: Option<&LoadBalanceContext>) -> PointerToUpstream { fn get_ptr(&self, _info: Option<&LoadBalanceContext>) -> PointerToUpstream {
let mut rng = rand::thread_rng(); let mut rng = rand::rng();
let ptr = rng.gen_range(0..self.num_upstreams); let ptr = rng.random_range(0..self.num_upstreams);
PointerToUpstream { ptr, context: None } PointerToUpstream { ptr, context: None }
} }
} }

View file

@ -1,16 +1,16 @@
use super::{ use super::{
Upstream,
load_balance_main::{LoadBalanceContext, LoadBalanceWithPointer, PointerToUpstream}, load_balance_main::{LoadBalanceContext, LoadBalanceWithPointer, PointerToUpstream},
sticky_cookie::StickyCookieConfig, sticky_cookie::StickyCookieConfig,
Upstream,
}; };
use crate::{constants::STICKY_COOKIE_NAME, log::*}; use crate::{constants::STICKY_COOKIE_NAME, log::*};
use ahash::HashMap;
use derive_builder::Builder; use derive_builder::Builder;
use rustc_hash::FxHashMap as HashMap;
use std::{ use std::{
borrow::Cow, borrow::Cow,
sync::{ sync::{
atomic::{AtomicUsize, Ordering},
Arc, Arc,
atomic::{AtomicUsize, Ordering},
}, },
}; };
@ -112,13 +112,16 @@ impl LoadBalanceWithPointer for LoadBalanceSticky {
} }
Some(context) => { Some(context) => {
let server_id = &context.sticky_cookie.value.value; let server_id = &context.sticky_cookie.value.value;
if let Some(server_index) = self.get_server_index_from_id(server_id) { self.get_server_index_from_id(server_id).map_or_else(
debug!("Valid sticky cookie: id={}, index={}", server_id, server_index); || {
server_index
} else {
debug!("Invalid sticky cookie: id={}", server_id); debug!("Invalid sticky cookie: id={}", server_id);
self.simple_increment_ptr() self.simple_increment_ptr()
} },
|server_index| {
debug!("Valid sticky cookie: id={}, index={}", server_id, server_index);
server_index
},
)
} }
}; };

View file

@ -9,7 +9,7 @@ use super::upstream::Upstream;
use thiserror::Error; use thiserror::Error;
pub use load_balance_main::{ pub use load_balance_main::{
load_balance_options, LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder, LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder, load_balance_options,
}; };
#[cfg(feature = "sticky-cookie")] #[cfg(feature = "sticky-cookie")]
pub use load_balance_sticky::LoadBalanceStickyBuilder; pub use load_balance_sticky::LoadBalanceStickyBuilder;

View file

@ -91,12 +91,7 @@ impl<'a> StickyCookieBuilder {
self self
} }
/// Set the meta information of sticky cookie /// Set the meta information of sticky cookie
pub fn info( pub fn info(&mut self, domain: impl Into<Cow<'a, str>>, path: impl Into<Cow<'a, str>>, duration_secs: i64) -> &mut Self {
&mut self,
domain: impl Into<Cow<'a, str>>,
path: impl Into<Cow<'a, str>>,
duration_secs: i64,
) -> &mut Self {
let info = StickyCookieInfoBuilder::default() let info = StickyCookieInfoBuilder::default()
.domain(domain) .domain(domain)
.path(path) .path(path)

View file

@ -1,7 +1,7 @@
#[cfg(feature = "sticky-cookie")] #[cfg(feature = "sticky-cookie")]
use super::load_balance::LoadBalanceStickyBuilder; use super::load_balance::LoadBalanceStickyBuilder;
use super::load_balance::{ use super::load_balance::{
load_balance_options as lb_opts, LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder, LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder, load_balance_options as lb_opts,
}; };
// use super::{BytesName, LbContext, PathNameBytesExp, UpstreamOption}; // use super::{BytesName, LbContext, PathNameBytesExp, UpstreamOption};
use super::upstream_opts::UpstreamOption; use super::upstream_opts::UpstreamOption;
@ -11,10 +11,10 @@ use crate::{
log::*, log::*,
name_exp::{ByteName, PathName}, name_exp::{ByteName, PathName},
}; };
use ahash::{HashMap, HashSet};
#[cfg(feature = "sticky-cookie")] #[cfg(feature = "sticky-cookie")]
use base64::{engine::general_purpose, Engine as _}; use base64::{Engine as _, engine::general_purpose};
use derive_builder::Builder; use derive_builder::Builder;
use rustc_hash::{FxHashMap as HashMap, FxHashSet as HashSet};
#[cfg(feature = "sticky-cookie")] #[cfg(feature = "sticky-cookie")]
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
use std::borrow::Cow; use std::borrow::Cow;
@ -72,27 +72,22 @@ impl PathManager {
.inner .inner
.iter() .iter()
.filter(|(route_bytes, _)| { .filter(|(route_bytes, _)| {
match path_name.starts_with(route_bytes) { path_name.starts_with(route_bytes) && {
true => {
route_bytes.len() == 1 // route = '/', i.e., default route_bytes.len() == 1 // route = '/', i.e., default
|| match path_name.get(route_bytes.len()) { || path_name.get(route_bytes.len()).map_or(
None => true, // exact case true, // exact case
Some(p) => p == &b'/', // sub-path case |p| p == &b'/'
} ) // sub-path case
}
_ => false,
} }
}) })
.max_by_key(|(route_bytes, _)| route_bytes.len()); .max_by_key(|(route_bytes, _)| route_bytes.len());
if let Some((path, u)) = matched_upstream { matched_upstream.map(|(path, u)| {
debug!( debug!(
"Found upstream: {:?}", "Found upstream: {:?}",
path.try_into().unwrap_or_else(|_| "<none>".to_string()) path.try_into().unwrap_or_else(|_| "<none>".to_string())
); );
Some(u) u
} else { })
None
}
} }
} }
@ -211,14 +206,15 @@ impl UpstreamCandidatesBuilder {
} }
/// Set the activated upstream options defined in [[UpstreamOption]] /// Set the activated upstream options defined in [[UpstreamOption]]
pub fn options(&mut self, v: &Option<Vec<String>>) -> &mut Self { pub fn options(&mut self, v: &Option<Vec<String>>) -> &mut Self {
let opts = if let Some(opts) = v { let opts = v.as_ref().map_or_else(
|| Default::default(),
|opts| {
opts opts
.iter() .iter()
.filter_map(|str| UpstreamOption::try_from(str.as_str()).ok()) .filter_map(|str| UpstreamOption::try_from(str.as_str()).ok())
.collect::<HashSet<UpstreamOption>>() .collect::<HashSet<UpstreamOption>>()
} else { },
Default::default() );
};
self.options = Some(opts); self.options = Some(opts);
self self
} }

View file

@ -32,3 +32,9 @@ pub const MAX_CACHE_EACH_SIZE: usize = 65_535;
pub const MAX_CACHE_EACH_SIZE_ON_MEMORY: usize = 4_096; pub const MAX_CACHE_EACH_SIZE_ON_MEMORY: usize = 4_096;
// TODO: max cache size in total // TODO: max cache size in total
/// Logging event name TODO: Other separated logs?
pub mod log_event_names {
/// access log
pub const ACCESS_LOG: &str = "rpxy::access";
}

View file

@ -1,6 +1,6 @@
use std::sync::{ use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc, Arc,
atomic::{AtomicUsize, Ordering},
}; };
#[derive(Debug, Clone, Default)] #[derive(Debug, Clone, Default)]

View file

@ -37,8 +37,11 @@ pub enum RpxyError {
// http/3 errors // http/3 errors
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
#[error("H3 error: {0}")] #[error("h3 connection error: {0}")]
H3Error(#[from] h3::Error), H3ConnectionError(#[from] h3::error::ConnectionError),
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
#[error("h3 connection error: {0}")]
H3StreamError(#[from] h3::error::StreamError),
// #[cfg(feature = "http3-s2n")] // #[cfg(feature = "http3-s2n")]
// #[error("H3 error: {0}")] // #[error("H3 error: {0}")]
// H3Error(#[from] s2n_quic_h3::h3::Error), // H3Error(#[from] s2n_quic_h3::h3::Error),

View file

@ -1,10 +1,10 @@
use super::cache_error::*; use super::cache_error::*;
use crate::{ use crate::{
globals::Globals, globals::Globals,
hyper_ext::body::{full, BoxBody, ResponseBody, UnboundedStreamBody}, hyper_ext::body::{BoxBody, ResponseBody, UnboundedStreamBody, full},
log::*, log::*,
}; };
use base64::{engine::general_purpose, Engine as _}; use base64::{Engine as _, engine::general_purpose};
use bytes::{Buf, Bytes, BytesMut}; use bytes::{Buf, Bytes, BytesMut};
use futures::channel::mpsc; use futures::channel::mpsc;
use http::{Request, Response, Uri}; use http::{Request, Response, Uri};
@ -16,8 +16,8 @@ use sha2::{Digest, Sha256};
use std::{ use std::{
path::{Path, PathBuf}, path::{Path, PathBuf},
sync::{ sync::{
atomic::{AtomicUsize, Ordering},
Arc, Mutex, Arc, Mutex,
atomic::{AtomicUsize, Ordering},
}, },
time::SystemTime, time::SystemTime,
}; };
@ -52,23 +52,30 @@ impl RpxyCache {
if !globals.proxy_config.cache_enabled { if !globals.proxy_config.cache_enabled {
return None; return None;
} }
let cache_dir = globals.proxy_config.cache_dir.as_ref().unwrap(); let cache_dir = match globals.proxy_config.cache_dir.as_ref() {
Some(dir) => dir,
None => {
warn!("Cache directory not set in proxy config");
return None;
}
};
let file_store = FileStore::new(&globals.runtime_handle).await; let file_store = FileStore::new(&globals.runtime_handle).await;
let inner = LruCacheManager::new(globals.proxy_config.cache_max_entry); let inner = LruCacheManager::new(globals.proxy_config.cache_max_entry);
let max_each_size = globals.proxy_config.cache_max_each_size; let max_each_size = globals.proxy_config.cache_max_each_size;
let mut max_each_size_on_memory = globals.proxy_config.cache_max_each_size_on_memory; let mut max_each_size_on_memory = globals.proxy_config.cache_max_each_size_on_memory;
if max_each_size < max_each_size_on_memory { if max_each_size < max_each_size_on_memory {
warn!( warn!("Maximum size of on-memory cache per entry must be smaller than or equal to the maximum of each file cache");
"Maximum size of on memory cache per entry must be smaller than or equal to the maximum of each file cache"
);
max_each_size_on_memory = max_each_size; max_each_size_on_memory = max_each_size;
} }
if let Err(e) = fs::remove_dir_all(cache_dir).await { if let Err(e) = fs::remove_dir_all(cache_dir).await {
warn!("Failed to clean up the cache dir: {e}"); warn!("Failed to clean up the cache dir: {e}");
}; }
fs::create_dir_all(&cache_dir).await.unwrap(); if let Err(e) = fs::create_dir_all(&cache_dir).await {
error!("Failed to create cache dir: {e}");
return None;
}
Some(Self { Some(Self {
file_store, file_store,
@ -89,12 +96,7 @@ impl RpxyCache {
} }
/// Put response into the cache /// Put response into the cache
pub(crate) async fn put( pub(crate) async fn put(&self, uri: &hyper::Uri, mut body: Incoming, policy: &CachePolicy) -> CacheResult<UnboundedStreamBody> {
&self,
uri: &hyper::Uri,
mut body: Incoming,
policy: &CachePolicy,
) -> CacheResult<UnboundedStreamBody> {
let cache_manager = self.inner.clone(); let cache_manager = self.inner.clone();
let mut file_store = self.file_store.clone(); let mut file_store = self.file_store.clone();
let uri = uri.clone(); let uri = uri.clone();
@ -155,7 +157,7 @@ impl RpxyCache {
let mut hasher = Sha256::new(); let mut hasher = Sha256::new();
hasher.update(buf.as_ref()); hasher.update(buf.as_ref());
let hash_bytes = Bytes::copy_from_slice(hasher.finalize().as_ref()); let hash_bytes = Bytes::copy_from_slice(hasher.finalize().as_ref());
debug!("Cached data: {} bytes, hash = {:?}", size, hash_bytes); trace!("Cached data: {} bytes, hash = {:?}", size, hash_bytes);
// Create cache object // Create cache object
let cache_key = derive_cache_key_from_uri(&uri); let cache_key = derive_cache_key_from_uri(&uri);
@ -188,16 +190,11 @@ impl RpxyCache {
/// Get cached response /// Get cached response
pub(crate) async fn get<R>(&self, req: &Request<R>) -> Option<Response<ResponseBody>> { pub(crate) async fn get<R>(&self, req: &Request<R>) -> Option<Response<ResponseBody>> {
debug!( trace!("Current cache status: (total, on-memory, file) = {:?}", self.count().await);
"Current cache status: (total, on-memory, file) = {:?}",
self.count().await
);
let cache_key = derive_cache_key_from_uri(req.uri()); let cache_key = derive_cache_key_from_uri(req.uri());
// First check cache chance // First check cache chance
let Ok(Some(cached_object)) = self.inner.get(&cache_key) else { let cached_object = self.inner.get(&cache_key).ok()??;
return None;
};
// Secondly check the cache freshness as an HTTP message // Secondly check the cache freshness as an HTTP message
let now = SystemTime::now(); let now = SystemTime::now();
@ -268,25 +265,20 @@ impl FileStore {
let inner = self.inner.read().await; let inner = self.inner.read().await;
inner.cnt inner.cnt
} }
/// Create a temporary file cache /// Create a temporary file cache, returns error if file cannot be created or written
async fn create(&mut self, cache_object: &CacheObject, body_bytes: &Bytes) -> CacheResult<()> { async fn create(&mut self, cache_object: &CacheObject, body_bytes: &Bytes) -> CacheResult<()> {
let mut inner = self.inner.write().await; let mut inner = self.inner.write().await;
inner.create(cache_object, body_bytes).await inner.create(cache_object, body_bytes).await
} }
/// Evict a temporary file cache /// Evict a temporary file cache, logs warning if removal fails
async fn evict(&self, path: impl AsRef<Path>) { async fn evict(&self, path: impl AsRef<Path>) {
// Acquire the write lock
let mut inner = self.inner.write().await; let mut inner = self.inner.write().await;
if let Err(e) = inner.remove(path).await { if let Err(e) = inner.remove(path).await {
warn!("Eviction failed during file object removal: {:?}", e); warn!("Eviction failed during file object removal: {:?}", e);
};
} }
/// Read a temporary file cache }
async fn read( /// Read a temporary file cache, returns error if file cannot be opened or hash mismatches
&self, async fn read(&self, path: impl AsRef<Path> + Send + Sync + 'static, hash: &Bytes) -> CacheResult<UnboundedStreamBody> {
path: impl AsRef<Path> + Send + Sync + 'static,
hash: &Bytes,
) -> CacheResult<UnboundedStreamBody> {
let inner = self.inner.read().await; let inner = self.inner.read().await;
inner.read(path, hash).await inner.read(path, hash).await
} }
@ -321,26 +313,22 @@ impl FileStoreInner {
return Err(CacheError::InvalidCacheTarget); return Err(CacheError::InvalidCacheTarget);
} }
}; };
let Ok(mut file) = File::create(&cache_filepath).await else { let mut file = File::create(&cache_filepath)
return Err(CacheError::FailedToCreateFileCache); .await
}; .map_err(|_| CacheError::FailedToCreateFileCache)?;
let mut bytes_clone = body_bytes.clone(); let mut bytes_clone = body_bytes.clone();
while bytes_clone.has_remaining() { while bytes_clone.has_remaining() {
if let Err(e) = file.write_buf(&mut bytes_clone).await { file.write_buf(&mut bytes_clone).await.map_err(|e| {
error!("Failed to write file cache: {e}"); error!("Failed to write file cache: {e}");
return Err(CacheError::FailedToWriteFileCache); CacheError::FailedToWriteFileCache
}; })?;
} }
self.cnt += 1; self.cnt += 1;
Ok(()) Ok(())
} }
/// Retrieve a stored temporary file cache /// Retrieve a stored temporary file cache
async fn read( async fn read(&self, path: impl AsRef<Path> + Send + Sync + 'static, hash: &Bytes) -> CacheResult<UnboundedStreamBody> {
&self,
path: impl AsRef<Path> + Send + Sync + 'static,
hash: &Bytes,
) -> CacheResult<UnboundedStreamBody> {
let Ok(mut file) = File::open(&path).await else { let Ok(mut file) = File::open(&path).await else {
warn!("Cache file object cannot be opened"); warn!("Cache file object cannot be opened");
return Err(CacheError::FailedToOpenCacheFile); return Err(CacheError::FailedToOpenCacheFile);
@ -455,11 +443,14 @@ impl LruCacheManager {
self.cnt.load(Ordering::Relaxed) self.cnt.load(Ordering::Relaxed)
} }
/// Evict an entry /// Evict an entry from the LRU cache, logs error if mutex cannot be acquired
fn evict(&self, cache_key: &str) -> Option<(String, CacheObject)> { fn evict(&self, cache_key: &str) -> Option<(String, CacheObject)> {
let Ok(mut lock) = self.inner.lock() else { let mut lock = match self.inner.lock() {
Ok(lock) => lock,
Err(_) => {
error!("Mutex can't be locked to evict a cache entry"); error!("Mutex can't be locked to evict a cache entry");
return None; return None;
}
}; };
let res = lock.pop_entry(cache_key); let res = lock.pop_entry(cache_key);
// This may be inconsistent with the actual number of entries // This may be inconsistent with the actual number of entries
@ -467,24 +458,24 @@ impl LruCacheManager {
res res
} }
/// Push an entry /// Push an entry into the LRU cache, returns error if mutex cannot be acquired
fn push(&self, cache_key: &str, cache_object: &CacheObject) -> CacheResult<Option<(String, CacheObject)>> { fn push(&self, cache_key: &str, cache_object: &CacheObject) -> CacheResult<Option<(String, CacheObject)>> {
let Ok(mut lock) = self.inner.lock() else { let mut lock = self.inner.lock().map_err(|_| {
error!("Failed to acquire mutex lock for writing cache entry"); error!("Failed to acquire mutex lock for writing cache entry");
return Err(CacheError::FailedToAcquiredMutexLockForCache); CacheError::FailedToAcquiredMutexLockForCache
}; })?;
let res = Ok(lock.push(cache_key.to_string(), cache_object.clone())); let res = Ok(lock.push(cache_key.to_string(), cache_object.clone()));
// This may be inconsistent with the actual number of entries // This may be inconsistent with the actual number of entries
self.cnt.store(lock.len(), Ordering::Relaxed); self.cnt.store(lock.len(), Ordering::Relaxed);
res res
} }
/// Get an entry /// Get an entry from the LRU cache, returns error if mutex cannot be acquired
fn get(&self, cache_key: &str) -> CacheResult<Option<CacheObject>> { fn get(&self, cache_key: &str) -> CacheResult<Option<CacheObject>> {
let Ok(mut lock) = self.inner.lock() else { let mut lock = self.inner.lock().map_err(|_| {
error!("Mutex can't be locked for checking cache entry"); error!("Mutex can't be locked for checking cache entry");
return Err(CacheError::FailedToAcquiredMutexLockForCheck); CacheError::FailedToAcquiredMutexLockForCheck
}; })?;
let Some(cached_object) = lock.get(cache_key) else { let Some(cached_object) = lock.get(cache_key) else {
return Ok(None); return Ok(None);
}; };

View file

@ -2,4 +2,4 @@ mod cache_error;
mod cache_main; mod cache_main;
pub use cache_error::CacheError; pub use cache_error::CacheError;
pub(crate) use cache_main::{get_policy_if_cacheable, RpxyCache}; pub(crate) use cache_main::{RpxyCache, get_policy_if_cacheable};

View file

@ -9,13 +9,13 @@ use async_trait::async_trait;
use http::{Request, Response, Version}; use http::{Request, Response, Version};
use hyper::body::{Body, Incoming}; use hyper::body::{Body, Incoming};
use hyper_util::client::legacy::{ use hyper_util::client::legacy::{
connect::{Connect, HttpConnector},
Client, Client,
connect::{Connect, HttpConnector},
}; };
use std::sync::Arc; use std::sync::Arc;
#[cfg(feature = "cache")] #[cfg(feature = "cache")]
use super::cache::{get_policy_if_cacheable, RpxyCache}; use super::cache::{RpxyCache, get_policy_if_cacheable};
#[async_trait] #[async_trait]
/// Definition of the forwarder that simply forward requests from downstream client to upstream app servers. /// Definition of the forwarder that simply forward requests from downstream client to upstream app servers.
@ -126,9 +126,9 @@ where
warn!( warn!(
" "
-------------------------------------------------------------------------------------------------- --------------------------------------------------------------------------------------------------
Request forwarder is working without TLS support!!! Request forwarder is working without TLS support!
We recommend to use this just for testing. This mode is intended for testing only.
Please enable native-tls-backend or rustls-backend feature to enable TLS support. Enable 'native-tls-backend' or 'rustls-backend' feature for TLS support.
--------------------------------------------------------------------------------------------------" --------------------------------------------------------------------------------------------------"
); );
let executor = LocalExecutor::new(_globals.runtime_handle.clone()); let executor = LocalExecutor::new(_globals.runtime_handle.clone());
@ -159,7 +159,7 @@ where
/// Build forwarder /// Build forwarder
pub async fn try_new(_globals: &Arc<Globals>) -> RpxyResult<Self> { pub async fn try_new(_globals: &Arc<Globals>) -> RpxyResult<Self> {
// build hyper client with hyper-tls // build hyper client with hyper-tls
info!("Native TLS support is enabled for the connection to backend applications"); info!("Native TLS support enabled for backend connections (native-tls)");
let executor = LocalExecutor::new(_globals.runtime_handle.clone()); let executor = LocalExecutor::new(_globals.runtime_handle.clone());
let try_build_connector = |alpns: &[&str]| { let try_build_connector = |alpns: &[&str]| {
@ -209,14 +209,14 @@ where
#[cfg(feature = "webpki-roots")] #[cfg(feature = "webpki-roots")]
let builder_h2 = hyper_rustls::HttpsConnectorBuilder::new().with_webpki_roots(); let builder_h2 = hyper_rustls::HttpsConnectorBuilder::new().with_webpki_roots();
#[cfg(feature = "webpki-roots")] #[cfg(feature = "webpki-roots")]
info!("Mozilla WebPKI root certs with rustls is used for the connection to backend applications"); info!("Rustls backend: Mozilla WebPKI root certs used for backend connections");
#[cfg(not(feature = "webpki-roots"))] #[cfg(not(feature = "webpki-roots"))]
let builder = hyper_rustls::HttpsConnectorBuilder::new().with_platform_verifier(); let builder = hyper_rustls::HttpsConnectorBuilder::new().with_platform_verifier();
#[cfg(not(feature = "webpki-roots"))] #[cfg(not(feature = "webpki-roots"))]
let builder_h2 = hyper_rustls::HttpsConnectorBuilder::new().with_platform_verifier(); let builder_h2 = hyper_rustls::HttpsConnectorBuilder::new().with_platform_verifier();
#[cfg(not(feature = "webpki-roots"))] #[cfg(not(feature = "webpki-roots"))]
info!("Platform verifier with rustls is used for the connection to backend applications"); info!("Rustls backend: Platform verifier used for backend connections");
let mut http = HttpConnector::new(); let mut http = HttpConnector::new();
http.enforce_http(false); http.enforce_http(false);
@ -226,7 +226,9 @@ where
let connector = builder.https_or_http().enable_all_versions().wrap_connector(http.clone()); let connector = builder.https_or_http().enable_all_versions().wrap_connector(http.clone());
let connector_h2 = builder_h2.https_or_http().enable_http2().wrap_connector(http); let connector_h2 = builder_h2.https_or_http().enable_http2().wrap_connector(http);
let inner = Client::builder(LocalExecutor::new(_globals.runtime_handle.clone())).build::<_, B1>(connector); let inner = Client::builder(LocalExecutor::new(_globals.runtime_handle.clone())).build::<_, B1>(connector);
let inner_h2 = Client::builder(LocalExecutor::new(_globals.runtime_handle.clone())).build::<_, B1>(connector_h2); let inner_h2 = Client::builder(LocalExecutor::new(_globals.runtime_handle.clone()))
.http2_only(true)
.build::<_, B1>(connector_h2);
Ok(Self { Ok(Self {
inner, inner,

View file

@ -2,7 +2,6 @@ use crate::{constants::*, count::RequestCount};
use hot_reload::ReloaderReceiver; use hot_reload::ReloaderReceiver;
use rpxy_certs::ServerCryptoBase; use rpxy_certs::ServerCryptoBase;
use std::{net::SocketAddr, time::Duration}; use std::{net::SocketAddr, time::Duration};
use tokio_util::sync::CancellationToken;
/// Global object containing proxy configurations and shared object like counters. /// Global object containing proxy configurations and shared object like counters.
/// But note that in Globals, we do not have Mutex and RwLock. It is indeed, the context shared among async tasks. /// But note that in Globals, we do not have Mutex and RwLock. It is indeed, the context shared among async tasks.
@ -13,14 +12,12 @@ pub struct Globals {
pub request_count: RequestCount, pub request_count: RequestCount,
/// Shared context - Async task runtime handler /// Shared context - Async task runtime handler
pub runtime_handle: tokio::runtime::Handle, pub runtime_handle: tokio::runtime::Handle,
/// Shared context - Notify object to stop async tasks
pub cancel_token: Option<CancellationToken>,
/// Shared context - Certificate reloader service receiver // TODO: newer one /// Shared context - Certificate reloader service receiver // TODO: newer one
pub cert_reloader_rx: Option<ReloaderReceiver<ServerCryptoBase>>, pub cert_reloader_rx: Option<ReloaderReceiver<ServerCryptoBase>>,
#[cfg(feature = "acme")] #[cfg(feature = "acme")]
/// ServerConfig used for only ACME challenge for ACME domains /// ServerConfig used for only ACME challenge for ACME domains
pub server_configs_acme_challenge: std::sync::Arc<rustc_hash::FxHashMap<String, std::sync::Arc<rustls::ServerConfig>>>, pub server_configs_acme_challenge: std::sync::Arc<ahash::HashMap<String, std::sync::Arc<rustls::ServerConfig>>>,
} }
/// Configuration parameters for proxy transport and request handlers /// Configuration parameters for proxy transport and request handlers

View file

@ -1,7 +1,7 @@
use super::watch; use super::watch;
use crate::error::*; use crate::error::*;
use futures_channel::{mpsc, oneshot}; use futures_channel::{mpsc, oneshot};
use futures_util::{stream::FusedStream, Future, Stream}; use futures_util::{Future, Stream, stream::FusedStream};
use http::HeaderMap; use http::HeaderMap;
use hyper::body::{Body, Bytes, Frame, SizeHint}; use hyper::body::{Body, Bytes, Frame, SizeHint};
use std::{ use std::{

View file

@ -1,7 +1,7 @@
use super::body::IncomingLike; use super::body::IncomingLike;
use crate::error::RpxyError; use crate::error::RpxyError;
use futures::channel::mpsc::UnboundedReceiver; use futures::channel::mpsc::UnboundedReceiver;
use http_body_util::{combinators, BodyExt, Empty, Full, StreamBody}; use http_body_util::{BodyExt, Empty, Full, StreamBody, combinators};
use hyper::body::{Body, Bytes, Frame, Incoming}; use hyper::body::{Body, Bytes, Frame, Incoming};
use std::pin::Pin; use std::pin::Pin;

View file

@ -12,5 +12,5 @@ pub(crate) mod rt {
#[allow(unused)] #[allow(unused)]
pub(crate) mod body { pub(crate) mod body {
pub(crate) use super::body_incoming_like::IncomingLike; pub(crate) use super::body_incoming_like::IncomingLike;
pub(crate) use super::body_type::{empty, full, BoxBody, RequestBody, ResponseBody, UnboundedStreamBody}; pub(crate) use super::body_type::{BoxBody, RequestBody, ResponseBody, UnboundedStreamBody, empty, full};
} }

View file

@ -7,8 +7,8 @@
use futures_util::task::AtomicWaker; use futures_util::task::AtomicWaker;
use std::sync::{ use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc, Arc,
atomic::{AtomicUsize, Ordering},
}; };
use std::task; use std::task;

View file

@ -27,6 +27,7 @@ use std::sync::Arc;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
/* ------------------------------------------------ */ /* ------------------------------------------------ */
pub use crate::constants::log_event_names;
pub use crate::globals::{AppConfig, AppConfigList, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri}; pub use crate::globals::{AppConfig, AppConfigList, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri};
pub mod reexports { pub mod reexports {
pub use hyper::Uri; pub use hyper::Uri;
@ -43,12 +44,10 @@ pub struct RpxyOptions {
pub cert_rx: Option<ReloaderReceiver<ServerCryptoBase>>, // TODO: pub cert_rx: Option<ReloaderReceiver<ServerCryptoBase>>, // TODO:
/// Async task runtime handler /// Async task runtime handler
pub runtime_handle: tokio::runtime::Handle, pub runtime_handle: tokio::runtime::Handle,
/// Notify object to stop async tasks
pub cancel_token: Option<CancellationToken>,
#[cfg(feature = "acme")] #[cfg(feature = "acme")]
/// ServerConfig used for only ACME challenge for ACME domains /// ServerConfig used for only ACME challenge for ACME domains
pub server_configs_acme_challenge: Arc<rustc_hash::FxHashMap<String, Arc<rustls::ServerConfig>>>, pub server_configs_acme_challenge: Arc<ahash::HashMap<String, Arc<rustls::ServerConfig>>>,
} }
/// Entrypoint that creates and spawns tasks of reverse proxy services /// Entrypoint that creates and spawns tasks of reverse proxy services
@ -58,10 +57,10 @@ pub async fn entrypoint(
app_config_list, app_config_list,
cert_rx, // TODO: cert_rx, // TODO:
runtime_handle, runtime_handle,
cancel_token,
#[cfg(feature = "acme")] #[cfg(feature = "acme")]
server_configs_acme_challenge, server_configs_acme_challenge,
}: &RpxyOptions, }: &RpxyOptions,
cancel_token: CancellationToken,
) -> RpxyResult<()> { ) -> RpxyResult<()> {
#[cfg(all(feature = "http3-quinn", feature = "http3-s2n"))] #[cfg(all(feature = "http3-quinn", feature = "http3-s2n"))]
warn!("Both \"http3-quinn\" and \"http3-s2n\" features are enabled. \"http3-quinn\" will be used"); warn!("Both \"http3-quinn\" and \"http3-s2n\" features are enabled. \"http3-quinn\" will be used");
@ -117,7 +116,6 @@ pub async fn entrypoint(
proxy_config: proxy_config.clone(), proxy_config: proxy_config.clone(),
request_count: Default::default(), request_count: Default::default(),
runtime_handle: runtime_handle.clone(), runtime_handle: runtime_handle.clone(),
cancel_token: cancel_token.clone(),
cert_reloader_rx: cert_rx.clone(), cert_reloader_rx: cert_rx.clone(),
#[cfg(feature = "acme")] #[cfg(feature = "acme")]
@ -153,26 +151,22 @@ pub async fn entrypoint(
message_handler: message_handler.clone(), message_handler: message_handler.clone(),
}; };
let cancel_token = globals.cancel_token.as_ref().map(|t| t.child_token()); let cancel_token = cancel_token.clone();
let parent_cancel_token_clone = globals.cancel_token.clone();
globals.runtime_handle.spawn(async move { globals.runtime_handle.spawn(async move {
info!("rpxy proxy service for {listening_on} started"); info!("rpxy proxy service for {listening_on} started");
if let Some(cancel_token) = cancel_token {
tokio::select! { tokio::select! {
_ = cancel_token.cancelled() => { _ = cancel_token.cancelled() => {
debug!("rpxy proxy service for {listening_on} terminated"); debug!("rpxy proxy service for {listening_on} terminated");
Ok(()) Ok(())
}, },
proxy_res = proxy.start() => { proxy_res = proxy.start(cancel_token.child_token()) => {
info!("rpxy proxy service for {listening_on} exited"); info!("rpxy proxy service for {listening_on} exited");
// cancel other proxy tasks // cancel other proxy tasks
parent_cancel_token_clone.unwrap().cancel(); cancel_token.cancel();
proxy_res proxy_res
} }
} }
} else {
proxy.start().await
}
}) })
}); });
@ -186,9 +180,5 @@ pub async fn entrypoint(
} }
}); });
// returns the first error as the representative error // returns the first error as the representative error
if let Some(e) = errs.next() { errs.next().map_or(Ok(()), |e| Err(e))
return Err(e);
}
Ok(())
} }

View file

@ -1 +1 @@
pub use tracing::{debug, error, info, warn}; pub use tracing::{debug, error, info, trace, warn};

View file

@ -44,10 +44,7 @@ mod tests {
} }
#[test] #[test]
fn ipv6_to_canonical() { fn ipv6_to_canonical() {
let socket = SocketAddr::new( let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0x2001, 0x0db8, 0, 0, 0, 0, 0xdead, 0xbeef)), 8080);
IpAddr::V6(Ipv6Addr::new(0x2001, 0x0db8, 0, 0, 0, 0, 0xdead, 0xbeef)),
8080,
);
assert_eq!(socket.to_canonical(), socket); assert_eq!(socket.to_canonical(), socket);
} }
#[test] #[test]

View file

@ -71,7 +71,7 @@ where
Ok(v) Ok(v)
} }
Err(e) => { Err(e) => {
error!("{e}"); error!("{e}: {log_data}");
let code = StatusCode::from(e); let code = StatusCode::from(e);
log_data.status_code(&code).output(); log_data.status_code(&code).output();
synthetic_error_response(code) synthetic_error_response(code)
@ -107,9 +107,11 @@ where
let backend_app = match self.app_manager.apps.get(&server_name) { let backend_app = match self.app_manager.apps.get(&server_name) {
Some(backend_app) => backend_app, Some(backend_app) => backend_app,
None => { None => {
let Some(default_server_name) = &self.app_manager.default_server_name else { let default_server_name = self
return Err(HttpError::NoMatchingBackendApp); .app_manager
}; .default_server_name
.as_ref()
.ok_or(HttpError::NoMatchingBackendApp)?;
debug!("Serving by default app"); debug!("Serving by default app");
self.app_manager.apps.get(default_server_name).unwrap() self.app_manager.apps.get(default_server_name).unwrap()
} }
@ -131,9 +133,7 @@ where
// Find reverse proxy for given path and choose one of upstream host // Find reverse proxy for given path and choose one of upstream host
// Longest prefix match // Longest prefix match
let path = req.uri().path(); let path = req.uri().path();
let Some(upstream_candidates) = backend_app.path_manager.get(path) else { let upstream_candidates = backend_app.path_manager.get(path).ok_or(HttpError::NoUpstreamCandidates)?;
return Err(HttpError::NoUpstreamCandidates);
};
// Upgrade in request header // Upgrade in request header
let upgrade_in_request = extract_upgrade(req.headers()); let upgrade_in_request = extract_upgrade(req.headers());
@ -147,19 +147,17 @@ where
let req_on_upgrade = hyper::upgrade::on(&mut req); let req_on_upgrade = hyper::upgrade::on(&mut req);
// Build request from destination information // Build request from destination information
let _context = match self.generate_request_forwarded( let _context = self
.generate_request_forwarded(
&client_addr, &client_addr,
&listen_addr, &listen_addr,
&mut req, &mut req,
&upgrade_in_request, &upgrade_in_request,
upstream_candidates, upstream_candidates,
tls_enabled, tls_enabled,
) { )
Err(e) => { .map_err(|e| HttpError::FailedToGenerateUpstreamRequest(e.to_string()))?;
return Err(HttpError::FailedToGenerateUpstreamRequest(e.to_string()));
}
Ok(v) => v,
};
debug!( debug!(
"Request to be forwarded: [uri {}, method: {}, version {:?}, headers {:?}]", "Request to be forwarded: [uri {}, method: {}, version {:?}, headers {:?}]",
req.uri(), req.uri(),
@ -173,12 +171,12 @@ where
////////////// //////////////
// Forward request to a chosen backend // Forward request to a chosen backend
let mut res_backend = match self.forwarder.request(req).await { let mut res_backend = self
Ok(v) => v, .forwarder
Err(e) => { .request(req)
return Err(HttpError::FailedToGetResponseFromBackend(e.to_string())); .await
} .map_err(|e| HttpError::FailedToGetResponseFromBackend(e.to_string()))?;
};
////////////// //////////////
// Process reverse proxy context generated during the forwarding request generation. // Process reverse proxy context generated during the forwarding request generation.
#[cfg(feature = "sticky-cookie")] #[cfg(feature = "sticky-cookie")]
@ -191,16 +189,16 @@ where
if res_backend.status() != StatusCode::SWITCHING_PROTOCOLS { if res_backend.status() != StatusCode::SWITCHING_PROTOCOLS {
// Generate response to client // Generate response to client
if let Err(e) = self.generate_response_forwarded(&mut res_backend, backend_app) { self
return Err(HttpError::FailedToGenerateDownstreamResponse(e.to_string())); .generate_response_forwarded(&mut res_backend, backend_app)
} .map_err(|e| HttpError::FailedToGenerateDownstreamResponse(e.to_string()))?;
return Ok(res_backend); return Ok(res_backend);
} }
// Handle StatusCode::SWITCHING_PROTOCOLS in response // Handle StatusCode::SWITCHING_PROTOCOLS in response
let upgrade_in_response = extract_upgrade(res_backend.headers()); let upgrade_in_response = extract_upgrade(res_backend.headers());
let should_upgrade = match (upgrade_in_request.as_ref(), upgrade_in_response.as_ref()) { let should_upgrade = match (upgrade_in_request.as_ref(), upgrade_in_response.as_ref()) {
(Some(u_req), Some(u_res)) => u_req.to_ascii_lowercase() == u_res.to_ascii_lowercase(), (Some(u_req), Some(u_res)) => u_req.eq_ignore_ascii_case(u_res),
_ => false, _ => false,
}; };

View file

@ -1,11 +1,11 @@
use super::{handler_main::HandlerContext, utils_headers::*, utils_request::update_request_line, HttpMessageHandler}; use super::{HttpMessageHandler, handler_main::HandlerContext, utils_headers::*, utils_request::update_request_line};
use crate::{ use crate::{
backend::{BackendApp, UpstreamCandidates}, backend::{BackendApp, UpstreamCandidates},
constants::RESPONSE_HEADER_SERVER, constants::RESPONSE_HEADER_SERVER,
log::*, log::*,
}; };
use anyhow::{anyhow, ensure, Result}; use anyhow::{Result, anyhow, ensure};
use http::{header, HeaderValue, Request, Response, Uri}; use http::{HeaderValue, Request, Response, Uri, header};
use hyper_util::client::legacy::connect::Connect; use hyper_util::client::legacy::connect::Connect;
use std::net::SocketAddr; use std::net::SocketAddr;
@ -66,17 +66,19 @@ where
upstream_candidates: &UpstreamCandidates, upstream_candidates: &UpstreamCandidates,
tls_enabled: bool, tls_enabled: bool,
) -> Result<HandlerContext> { ) -> Result<HandlerContext> {
debug!("Generate request to be forwarded"); trace!("Generate request to be forwarded");
// Add te: trailer if contained in original request // Add te: trailer if contained in original request
let contains_te_trailers = { let contains_te_trailers = {
if let Some(te) = req.headers().get(header::TE) { req
.headers()
.get(header::TE)
.map(|te| {
te.as_bytes() te.as_bytes()
.split(|v| v == &b',' || v == &b' ') .split(|v| v == &b',' || v == &b' ')
.any(|x| x == "trailers".as_bytes()) .any(|x| x == "trailers".as_bytes())
} else { })
false .unwrap_or(false)
}
}; };
let original_uri = req.uri().to_string(); let original_uri = req.uri().to_string();
@ -136,11 +138,7 @@ where
let new_uri = Uri::builder() let new_uri = Uri::builder()
.scheme(upstream_chosen.uri.scheme().unwrap().as_str()) .scheme(upstream_chosen.uri.scheme().unwrap().as_str())
.authority(upstream_chosen.uri.authority().unwrap().as_str()); .authority(upstream_chosen.uri.authority().unwrap().as_str());
let org_pq = match req.uri().path_and_query() { let org_pq = req.uri().path_and_query().map(|pq| pq.as_str()).unwrap_or("/").as_bytes();
Some(pq) => pq.to_string(),
None => "/".to_string(),
}
.into_bytes();
// replace some parts of path if opt_replace_path is enabled for chosen upstream // replace some parts of path if opt_replace_path is enabled for chosen upstream
let new_pq = match &upstream_candidates.replace_path { let new_pq = match &upstream_candidates.replace_path {
@ -155,7 +153,7 @@ where
new_pq.extend_from_slice(&org_pq[matched_path.len()..]); new_pq.extend_from_slice(&org_pq[matched_path.len()..]);
new_pq new_pq
} }
None => org_pq, None => org_pq.to_vec(),
}; };
*req.uri_mut() = new_uri.path_and_query(new_pq).build()?; *req.uri_mut() = new_uri.path_and_query(new_pq).build()?;

View file

@ -34,11 +34,7 @@ impl<T> From<&http::Request<T>> for HttpMessageLog {
client_addr: "".to_string(), client_addr: "".to_string(),
method: req.method().to_string(), method: req.method().to_string(),
host: header_mapper(header::HOST), host: header_mapper(header::HOST),
p_and_q: req p_and_q: req.uri().path_and_query().map_or_else(|| "", |v| v.as_str()).to_string(),
.uri()
.path_and_query()
.map_or_else(|| "", |v| v.as_str())
.to_string(),
version: req.version(), version: req.version(),
uri_scheme: req.uri().scheme_str().unwrap_or("").to_string(), uri_scheme: req.uri().scheme_str().unwrap_or("").to_string(),
uri_host: req.uri().host().unwrap_or("").to_string(), uri_host: req.uri().host().unwrap_or("").to_string(),
@ -50,6 +46,33 @@ impl<T> From<&http::Request<T>> for HttpMessageLog {
} }
} }
impl std::fmt::Display for HttpMessageLog {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{} <- {} -- {} {} {:?} -- {} -- {} \"{}\", \"{}\" \"{}\"",
if !self.host.is_empty() {
self.host.as_str()
} else {
self.uri_host.as_str()
},
self.client_addr,
self.method,
self.p_and_q,
self.version,
self.status,
if !self.uri_scheme.is_empty() && !self.uri_host.is_empty() {
format!("{}://{}", self.uri_scheme, self.uri_host)
} else {
"".to_string()
},
self.ua,
self.xff,
self.upstream
)
}
}
impl HttpMessageLog { impl HttpMessageLog {
pub fn client_addr(&mut self, client_addr: &SocketAddr) -> &mut Self { pub fn client_addr(&mut self, client_addr: &SocketAddr) -> &mut Self {
self.client_addr = client_addr.to_canonical().to_string(); self.client_addr = client_addr.to_canonical().to_string();
@ -74,26 +97,8 @@ impl HttpMessageLog {
pub fn output(&self) { pub fn output(&self) {
info!( info!(
"{} <- {} -- {} {} {:?} -- {} -- {} \"{}\", \"{}\" \"{}\"", name: crate::constants::log_event_names::ACCESS_LOG,
if !self.host.is_empty() { "{}", self
self.host.as_str()
} else {
self.uri_host.as_str()
},
self.client_addr,
self.method,
self.p_and_q,
self.version,
self.status,
if !self.uri_scheme.is_empty() && !self.uri_host.is_empty() {
format!("{}://{}", self.uri_scheme, self.uri_host)
} else {
"".to_string()
},
self.ua,
self.xff,
self.upstream,
// self.tls_server_name
); );
} }
} }

View file

@ -53,6 +53,7 @@ impl From<HttpError> for StatusCode {
HttpError::FailedToAddSetCookeInResponse(_) => StatusCode::INTERNAL_SERVER_ERROR, HttpError::FailedToAddSetCookeInResponse(_) => StatusCode::INTERNAL_SERVER_ERROR,
HttpError::FailedToGenerateDownstreamResponse(_) => StatusCode::INTERNAL_SERVER_ERROR, HttpError::FailedToGenerateDownstreamResponse(_) => StatusCode::INTERNAL_SERVER_ERROR,
HttpError::FailedToUpgrade(_) => StatusCode::INTERNAL_SERVER_ERROR, HttpError::FailedToUpgrade(_) => StatusCode::INTERNAL_SERVER_ERROR,
HttpError::FailedToGetResponseFromBackend(_) => StatusCode::BAD_GATEWAY,
// HttpError::NoUpgradeExtensionInRequest => StatusCode::BAD_REQUEST, // HttpError::NoUpgradeExtensionInRequest => StatusCode::BAD_REQUEST,
// HttpError::NoUpgradeExtensionInResponse => StatusCode::BAD_GATEWAY, // HttpError::NoUpgradeExtensionInResponse => StatusCode::BAD_GATEWAY,
_ => StatusCode::INTERNAL_SERVER_ERROR, _ => StatusCode::INTERNAL_SERVER_ERROR,

View file

@ -1,7 +1,7 @@
use super::http_result::{HttpError, HttpResult}; use super::http_result::{HttpError, HttpResult};
use crate::{ use crate::{
error::*, error::*,
hyper_ext::body::{empty, ResponseBody}, hyper_ext::body::{ResponseBody, empty},
name_exp::ServerName, name_exp::ServerName,
}; };
use http::{Request, Response, StatusCode, Uri}; use http::{Request, Response, StatusCode, Uri};

View file

@ -3,9 +3,9 @@ use crate::{
backend::{UpstreamCandidates, UpstreamOption}, backend::{UpstreamCandidates, UpstreamOption},
log::*, log::*,
}; };
use anyhow::{anyhow, Result}; use anyhow::{Result, anyhow, ensure};
use bytes::BufMut; use bytes::BufMut;
use http::{header, HeaderMap, HeaderName, HeaderValue, Uri}; use http::{HeaderMap, HeaderName, HeaderValue, Uri, header};
use std::{borrow::Cow, net::SocketAddr}; use std::{borrow::Cow, net::SocketAddr};
#[cfg(feature = "sticky-cookie")] #[cfg(feature = "sticky-cookie")]
@ -238,10 +238,9 @@ pub(super) fn add_forwarding_header(
pub(super) fn remove_connection_header(headers: &mut HeaderMap) { pub(super) fn remove_connection_header(headers: &mut HeaderMap) {
if let Some(values) = headers.get(header::CONNECTION) { if let Some(values) = headers.get(header::CONNECTION) {
if let Ok(v) = values.clone().to_str() { if let Ok(v) = values.clone().to_str() {
for m in v.split(',') { let keys = v.split(',').map(|m| m.trim()).filter(|m| !m.is_empty());
if !m.is_empty() { for m in keys {
headers.remove(m.trim()); headers.remove(m);
}
} }
} }
} }
@ -274,15 +273,13 @@ pub(super) fn extract_upgrade(headers: &HeaderMap) -> Option<String> {
.to_str() .to_str()
.unwrap_or("") .unwrap_or("")
.split(',') .split(',')
.any(|w| w.trim().to_ascii_lowercase() == header::UPGRADE.as_str().to_ascii_lowercase()) .any(|w| w.trim().eq_ignore_ascii_case(header::UPGRADE.as_str()))
{ {
if let Some(u) = headers.get(header::UPGRADE) { if let Some(Ok(m)) = headers.get(header::UPGRADE).map(|u| u.to_str()) {
if let Ok(m) = u.to_str() {
debug!("Upgrade in request header: {}", m); debug!("Upgrade in request header: {}", m);
return Some(m.to_owned()); return Some(m.to_owned());
} }
} }
} }
}
None None
} }

View file

@ -2,8 +2,8 @@ use crate::{
backend::{Upstream, UpstreamCandidates, UpstreamOption}, backend::{Upstream, UpstreamCandidates, UpstreamOption},
log::*, log::*,
}; };
use anyhow::{anyhow, ensure, Result}; use anyhow::{Result, anyhow, ensure};
use http::{header, uri::Scheme, Request, Version}; use http::{Request, Version, header, uri::Scheme};
/// Trait defining parser of hostname /// Trait defining parser of hostname
/// Inspect and extract hostname from either the request HOST header or request line /// Inspect and extract hostname from either the request HOST header or request line
@ -59,6 +59,18 @@ pub(super) fn update_request_line<B>(
upstream_chosen: &Upstream, upstream_chosen: &Upstream,
upstream_candidates: &UpstreamCandidates, upstream_candidates: &UpstreamCandidates,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
// If request is grpc, HTTP/2 is required
if req
.headers()
.get(header::CONTENT_TYPE)
.map(|v| v.as_bytes().starts_with(b"application/grpc"))
== Some(true)
{
debug!("Must be http/2 for gRPC request.");
*req.version_mut() = Version::HTTP_2;
return Ok(());
}
// If not specified (force_httpXX_upstream) and https, version is preserved except for http/3 // If not specified (force_httpXX_upstream) and https, version is preserved except for http/3
if upstream_chosen.uri.scheme() == Some(&Scheme::HTTP) { if upstream_chosen.uri.scheme() == Some(&Scheme::HTTP) {
// Change version to http/1.1 when destination scheme is http // Change version to http/1.1 when destination scheme is http

View file

@ -14,12 +14,11 @@ use crate::{
name_exp::ServerName, name_exp::ServerName,
}; };
use hyper_util::server::{self, conn::auto::Builder as ConnectionBuilder}; use hyper_util::server::{self, conn::auto::Builder as ConnectionBuilder};
use rustc_hash::FxHashMap as HashMap;
use rustls::ServerConfig; use rustls::ServerConfig;
use std::sync::Arc; use std::sync::Arc;
/// SNI to ServerConfig map type /// SNI to ServerConfig map type
pub type SniServerCryptoMap = HashMap<ServerName, Arc<ServerConfig>>; pub type SniServerCryptoMap = std::collections::HashMap<ServerName, Arc<ServerConfig>, ahash::RandomState>;
pub(crate) use proxy_main::Proxy; pub(crate) use proxy_main::Proxy;

View file

@ -33,7 +33,7 @@ where
<<C as OpenStreams<Bytes>>::BidiStream as BidiStream<Bytes>>::SendStream: Send, <<C as OpenStreams<Bytes>>::BidiStream as BidiStream<Bytes>>::SendStream: Send,
{ {
let mut h3_conn = h3::server::Connection::<_, Bytes>::new(quic_connection).await?; let mut h3_conn = h3::server::Connection::<_, Bytes>::new(quic_connection).await?;
info!( debug!(
"QUIC/HTTP3 connection established from {:?} {}", "QUIC/HTTP3 connection established from {:?} {}",
client_addr, client_addr,
<&ServerName as TryInto<String>>::try_into(&tls_server_name).unwrap_or_default() <&ServerName as TryInto<String>>::try_into(&tls_server_name).unwrap_or_default()
@ -49,12 +49,17 @@ where
} }
Err(e) => { Err(e) => {
warn!("HTTP/3 error on accept incoming connection: {}", e); warn!("HTTP/3 error on accept incoming connection: {}", e);
match e.get_error_level() { break;
h3::error::ErrorLevel::ConnectionError => break,
h3::error::ErrorLevel::StreamError => continue,
} }
// Ok(Some((req, stream))) => {
Ok(Some(req_resolver)) => {
let (req, stream) = match req_resolver.resolve_request().await {
Ok((req, stream)) => (req, stream),
Err(e) => {
warn!("HTTP/3 error on resolve request in stream: {}", e);
continue;
} }
Ok(Some((req, stream))) => { };
// We consider the connection count separately from the stream count. // We consider the connection count separately from the stream count.
// Max clients for h1/h2 = max 'stream' for h3. // Max clients for h1/h2 = max 'stream' for h3.
let request_count = self.globals.request_count.clone(); let request_count = self.globals.request_count.clone();
@ -63,7 +68,7 @@ where
h3_conn.shutdown(0).await?; h3_conn.shutdown(0).await?;
break; break;
} }
debug!("Request incoming: current # {}", request_count.current()); trace!("Request incoming: current # {}", request_count.current());
let self_inner = self.clone(); let self_inner = self.clone();
let tls_server_name_inner = tls_server_name.clone(); let tls_server_name_inner = tls_server_name.clone();
@ -77,7 +82,7 @@ where
warn!("HTTP/3 error on serve stream: {}", e); warn!("HTTP/3 error on serve stream: {}", e);
} }
request_count.decrement(); request_count.decrement();
debug!("Request processed: current # {}", request_count.current()); trace!("Request processed: current # {}", request_count.current());
}); });
} }
} }
@ -115,7 +120,7 @@ where
let mut sender = body_sender; let mut sender = body_sender;
let mut size = 0usize; let mut size = 0usize;
while let Some(mut body) = recv_stream.recv_data().await? { while let Some(mut body) = recv_stream.recv_data().await? {
debug!("HTTP/3 incoming request body: remaining {}", body.remaining()); trace!("HTTP/3 incoming request body: remaining {}", body.remaining());
size += body.remaining(); size += body.remaining();
if size > max_body_size { if size > max_body_size {
error!( error!(
@ -129,9 +134,9 @@ where
} }
// trailers: use inner for work around. (directly get trailer) // trailers: use inner for work around. (directly get trailer)
let trailers = recv_stream.as_mut().recv_trailers().await?; let trailers = futures_util::future::poll_fn(|cx| recv_stream.as_mut().poll_recv_trailers(cx)).await?;
if trailers.is_some() { if trailers.is_some() {
debug!("HTTP/3 incoming request trailers"); trace!("HTTP/3 incoming request trailers");
sender.send_trailers(trailers.unwrap()).await?; sender.send_trailers(trailers.unwrap()).await?;
} }
Ok(()) as RpxyResult<()> Ok(()) as RpxyResult<()>
@ -154,13 +159,13 @@ where
match send_stream.send_response(new_res).await { match send_stream.send_response(new_res).await {
Ok(_) => { Ok(_) => {
debug!("HTTP/3 response to connection successful"); trace!("HTTP/3 response to connection successful");
// on-demand body streaming to downstream without expanding the object onto memory. // on-demand body streaming to downstream without expanding the object onto memory.
loop { loop {
let frame = match new_body.frame().await { let frame = match new_body.frame().await {
Some(frame) => frame, Some(frame) => frame,
None => { None => {
debug!("Response body finished"); trace!("Response body finished");
break; break;
} }
} }

View file

@ -11,7 +11,7 @@ use crate::{
message_handler::HttpMessageHandler, message_handler::HttpMessageHandler,
name_exp::ServerName, name_exp::ServerName,
}; };
use futures::{select, FutureExt}; use futures::{FutureExt, select};
use http::{Request, Response}; use http::{Request, Response};
use hyper::{ use hyper::{
body::Incoming, body::Incoming,
@ -22,6 +22,7 @@ use hyper_util::{client::legacy::connect::Connect, rt::TokioIo, server::conn::au
use rpxy_certs::ServerCrypto; use rpxy_certs::ServerCrypto;
use std::{net::SocketAddr, sync::Arc, time::Duration}; use std::{net::SocketAddr, sync::Arc, time::Duration};
use tokio::time::timeout; use tokio::time::timeout;
use tokio_util::sync::CancellationToken;
/// Wrapper function to handle request for HTTP/1.1 and HTTP/2 /// Wrapper function to handle request for HTTP/1.1 and HTTP/2
/// HTTP/3 is handled in proxy_h3.rs which directly calls the message handler /// HTTP/3 is handled in proxy_h3.rs which directly calls the message handler
@ -79,7 +80,7 @@ where
request_count.decrement(); request_count.decrement();
return; return;
} }
debug!("Request incoming: current # {}", request_count.current()); trace!("Request incoming: current # {}", request_count.current());
let server_clone = self.connection_builder.clone(); let server_clone = self.connection_builder.clone();
let message_handler_clone = self.message_handler.clone(); let message_handler_clone = self.message_handler.clone();
@ -109,7 +110,7 @@ where
} }
request_count.decrement(); request_count.decrement();
debug!("Request processed: current # {}", request_count.current()); trace!("Request processed: current # {}", request_count.current());
}); });
} }
@ -129,31 +130,57 @@ where
} }
/// Start with TLS (HTTPS) /// Start with TLS (HTTPS)
pub(super) async fn start_with_tls(&self) -> RpxyResult<()> { pub(super) async fn start_with_tls(&self, cancel_token: CancellationToken) -> RpxyResult<()> {
// By default, TLS listener is spawned
let join_handle_tls = self.globals.runtime_handle.spawn({
let self_clone = self.clone();
let cancel_token = cancel_token.clone();
async move {
select! {
_ = self_clone.tls_listener_service().fuse() => {
error!("TCP proxy service for TLS exited");
cancel_token.cancel();
},
_ = cancel_token.cancelled().fuse() => {
debug!("Cancel token is called for TLS listener");
}
}
}
});
#[cfg(not(any(feature = "http3-quinn", feature = "http3-s2n")))] #[cfg(not(any(feature = "http3-quinn", feature = "http3-s2n")))]
{ {
self.tls_listener_service().await?; let _ = join_handle_tls.await;
error!("TCP proxy service for TLS exited");
Ok(()) Ok(())
} }
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
{ {
if self.globals.proxy_config.http3 { // If HTTP/3 is not enabled, wait for TLS listener to finish
if !self.globals.proxy_config.http3 {
let _ = join_handle_tls.await;
return Ok(());
}
// If HTTP/3 is enabled, spawn a task to handle HTTP/3 connections
let join_handle_h3 = self.globals.runtime_handle.spawn({
let self_clone = self.clone();
async move {
select! { select! {
_ = self.tls_listener_service().fuse() => { _ = self_clone.h3_listener_service().fuse() => {
error!("TCP proxy service for TLS exited");
},
_ = self.h3_listener_service().fuse() => {
error!("UDP proxy service for QUIC exited"); error!("UDP proxy service for QUIC exited");
cancel_token.cancel();
},
_ = cancel_token.cancelled().fuse() => {
debug!("Cancel token is called for QUIC listener");
} }
};
Ok(())
} else {
self.tls_listener_service().await?;
error!("TCP proxy service for TLS exited");
Ok(())
} }
} }
});
let _ = futures::future::join(join_handle_tls, join_handle_h3).await;
Ok(())
}
} }
// TCP Listener Service, i.e., http/2 and http/1.1 // TCP Listener Service, i.e., http/2 and http/1.1
@ -294,7 +321,7 @@ where
let map = server_config.individual_config_map.clone().iter().map(|(k,v)| { let map = server_config.individual_config_map.clone().iter().map(|(k,v)| {
let server_name = ServerName::from(k.as_slice()); let server_name = ServerName::from(k.as_slice());
(server_name, v.clone()) (server_name, v.clone())
}).collect::<rustc_hash::FxHashMap<_,_>>(); }).collect::<std::collections::HashMap<_,_,ahash::RandomState>>();
server_crypto_map = Some(Arc::new(map)); server_crypto_map = Some(Arc::new(map));
} }
} }
@ -303,10 +330,10 @@ where
} }
/// Entrypoint for HTTP/1.1, 2 and 3 servers /// Entrypoint for HTTP/1.1, 2 and 3 servers
pub async fn start(&self) -> RpxyResult<()> { pub async fn start(&self, cancel_token: CancellationToken) -> RpxyResult<()> {
let proxy_service = async { let proxy_service = async {
if self.tls_enabled { if self.tls_enabled {
self.start_with_tls().await self.start_with_tls(cancel_token).await
} else { } else {
self.start_without_tls().await self.start_without_tls().await
} }

View file

@ -2,8 +2,8 @@ use super::{proxy_main::Proxy, socket::bind_udp_socket};
use crate::{error::*, log::*, name_exp::ByteName}; use crate::{error::*, log::*, name_exp::ByteName};
use hyper_util::client::legacy::connect::Connect; use hyper_util::client::legacy::connect::Connect;
use quinn::{ use quinn::{
crypto::rustls::{HandshakeData, QuicServerConfig},
Endpoint, TransportConfig, Endpoint, TransportConfig,
crypto::rustls::{HandshakeData, QuicServerConfig},
}; };
use rpxy_certs::ServerCrypto; use rpxy_certs::ServerCrypto;
use rustls::ServerConfig; use rustls::ServerConfig;
@ -82,7 +82,7 @@ where
let client_addr = incoming.remote_address(); let client_addr = incoming.remote_address();
let quic_connection = match incoming.await { let quic_connection = match incoming.await {
Ok(new_conn) => { Ok(new_conn) => {
info!("New connection established"); trace!("New connection established");
h3_quinn::Connection::new(new_conn) h3_quinn::Connection::new(new_conn)
}, },
Err(e) => { Err(e) => {

View file

@ -110,7 +110,7 @@ where
// quic event loop. this immediately cancels when crypto is updated by tokio::select! // quic event loop. this immediately cancels when crypto is updated by tokio::select!
while let Some(new_conn) = server.accept().await { while let Some(new_conn) = server.accept().await {
debug!("New QUIC connection established"); trace!("New QUIC connection established");
let Ok(Some(new_server_name)) = new_conn.server_name() else { let Ok(Some(new_server_name)) = new_conn.server_name() else {
warn!("HTTP/3 no SNI is given"); warn!("HTTP/3 no SNI is given");
continue; continue;

View file

@ -16,10 +16,12 @@ pub(super) fn bind_tcp_socket(listening_on: &SocketAddr) -> RpxyResult<TcpSocket
}?; }?;
tcp_socket.set_reuseaddr(true)?; tcp_socket.set_reuseaddr(true)?;
tcp_socket.set_reuseport(true)?; tcp_socket.set_reuseport(true)?;
if let Err(e) = tcp_socket.bind(*listening_on) {
tcp_socket.bind(*listening_on).map_err(|e| {
error!("Failed to bind TCP socket: {}", e); error!("Failed to bind TCP socket: {}", e);
return Err(RpxyError::Io(e)); RpxyError::Io(e)
}; })?;
Ok(tcp_socket) Ok(tcp_socket)
} }
@ -36,11 +38,10 @@ pub(super) fn bind_udp_socket(listening_on: &SocketAddr) -> RpxyResult<UdpSocket
socket.set_reuse_port(true)?; socket.set_reuse_port(true)?;
socket.set_nonblocking(true)?; // This was made true inside quinn. so this line isn't necessary here. but just in case. socket.set_nonblocking(true)?; // This was made true inside quinn. so this line isn't necessary here. but just in case.
if let Err(e) = socket.bind(&(*listening_on).into()) { socket.bind(&(*listening_on).into()).map_err(|e| {
error!("Failed to bind UDP socket: {}", e); error!("Failed to bind UDP socket: {}", e);
return Err(RpxyError::Io(e)); RpxyError::Io(e)
}; })?;
let udp_socket: UdpSocket = socket.into();
Ok(udp_socket) Ok(socket.into())
} }

@ -1 +1 @@
Subproject commit af2d016b6aa4e09586253a0459efc4af6635c79b Subproject commit cc7aeb870a62cd8d4b962de35927a241525ea30d

@ -1 +1 @@
Subproject commit d5b5efd9de4dab3c958c50be5380652d801cc65f Subproject commit 2500716b70bd6e548cdf690188ded7afe6726330

@ -1 +1 @@
Subproject commit ffeaac1eb32589599c9be357f2273a2824741c7d Subproject commit b2e9eac31c1b620d2fd0aa40753ca965a1ec1269