Merge pull request #309 from junkurihara/develop

0.10.1
This commit is contained in:
Jun Kurihara 2025-07-05 14:38:02 +09:00 committed by GitHub
commit 78fdc29127
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
67 changed files with 1497 additions and 1075 deletions

7
.build/Jenkinsfile vendored
View file

@ -37,9 +37,6 @@ pipeline {
dir('rust-rpxy') {
sh """
# Update submodule URLs to HTTPS (allows cloning without SSH keys)
sed -i 's|git@github.com:|https://github.com/|g' .gitmodules
# Initialize and update submodules
git submodule update --init
"""
@ -59,7 +56,7 @@ pipeline {
// Build the binary
sh 'cargo build --release'
// Prepare and stash files
sh """
# Move binary to workspace root for easier access
@ -81,7 +78,7 @@ pipeline {
stash includes: "${BINARY_NAME}.spec", name: "rpm-files"
stash includes: "rpxy.service, config.toml", name: "service-file"
stash includes: "LICENSE, README.md", name: "docs"
// Archive the binary as an artifact
archiveArtifacts artifacts: "${BINARY_NAME}", allowEmptyArchive: false, fingerprint: true
}

View file

@ -1,6 +1,3 @@
# Basic dependabot.yml file with
# minimum configuration for two package managers
version: 2
updates:
# Enable version updates for cargo
@ -9,26 +6,6 @@ updates:
schedule:
interval: "daily"
- package-ecosystem: "cargo"
directory: "/rpxy-bin"
schedule:
interval: "daily"
- package-ecosystem: "cargo"
directory: "/rpxy-lib"
schedule:
interval: "daily"
- package-ecosystem: "cargo"
directory: "/rpxy-certs"
schedule:
interval: "daily"
- package-ecosystem: "cargo"
directory: "/rpxy-acme"
schedule:
interval: "daily"
# Enable version updates for Docker
- package-ecosystem: "docker"
directory: "/docker"

View file

@ -5,6 +5,9 @@ on:
pull_request:
types: [synchronize, opened]
permissions:
contents: read
env:
CARGO_TERM_COLOR: always

View file

@ -14,6 +14,10 @@ on:
jobs:
on-success:
permissions:
contents: read
packages: read
runs-on: ubuntu-latest
if: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success' }} || ${{ github.event_name == 'repositry_dispatch' }}
strategy:
@ -98,12 +102,18 @@ jobs:
path: "/tmp/${{ steps.set-env.outputs.target_name }}"
on-failure:
permissions:
contents: read
runs-on: ubuntu-latest
if: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'failure' }}
steps:
- run: echo 'The release triggering workflows failed'
release:
permissions:
contents: write
runs-on: ubuntu-latest
if: ${{ github.event_name == 'repository_dispatch' }}
needs: on-success

View file

@ -16,6 +16,10 @@ env:
jobs:
build_and_push:
permissions:
contents: read
packages: write
runs-on: ubuntu-22.04
if: ${{ github.event_name == 'push' }} || ${{ github.event_name == 'pull_request' && github.event.pull_request.merged == true }}
strategy:
@ -199,6 +203,10 @@ jobs:
labels: ${{ steps.meta.outputs.labels }}
dispatch_release_event:
permissions:
contents: write
actions: write
runs-on: ubuntu-latest
if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref == 'develop' && github.event.pull_request.base.ref == 'main' && github.event.pull_request.merged == true }}
needs: build_and_push

View file

@ -7,6 +7,8 @@ on:
jobs:
Scan-Build:
permissions:
contents: read
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4

1
.gitignore vendored
View file

@ -1,5 +1,6 @@
.vscode
.private
.tmp
docker/log
docker/cache
docker/config

14
.gitmodules vendored
View file

@ -1,10 +1,10 @@
[submodule "submodules/rusty-http-cache-semantics"]
path = submodules/rusty-http-cache-semantics
url = git@github.com:junkurihara/rusty-http-cache-semantics.git
path = submodules/rusty-http-cache-semantics
url = https://github.com/junkurihara/rusty-http-cache-semantics.git
[submodule "submodules/rustls-acme"]
path = submodules/rustls-acme
url = git@github.com:junkurihara/rustls-acme.git
path = submodules/rustls-acme
url = https://github.com/junkurihara/rustls-acme.git
[submodule "submodules/s2n-quic"]
path = submodules/s2n-quic
url = git@github.com:junkurihara/s2n-quic.git
branch = rustls-pq
path = submodules/s2n-quic
url = https://github.com/junkurihara/s2n-quic.git
branch = rustls-pq

View file

@ -1,6 +1,15 @@
# CHANGELOG
## 0.10.1 or 0.11.0 (Unreleased)
## 0.10.2 or 0.11.0 (Unreleased)
## 0.10.1
### Improvement
- Feat: Support `Forwarded` header in addition to `X-Forwarded-For` header. This is to support the standard forwarding header for reverse proxy applications (RFC 7239). Use the `forwarded_header` upstream option to enable this feature.
By default, it is not appended to the outgoing header. However, if the incoming request has the forwarded header, it would be preserved and updated simultaneously with `x-forwarded-for` header. if both forwarded and x-forwarded-for headers exists (and they are inconsistent), x-forwarded-for is prioritized. This means that x-forwarded-for is first updated and it is then copied (overridden) to `for` param of forwarded header.
- Refactor: lots of minor improvements
- Deps
## 0.10.0

784
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
[workspace.package]
version = "0.10.0"
version = "0.10.1"
authors = ["Jun Kurihara"]
homepage = "https://github.com/junkurihara/rust-rpxy"
repository = "https://github.com/junkurihara/rust-rpxy"

118
README.md
View file

@ -13,20 +13,20 @@
## Introduction
`rpxy` [ahr-pik-see] is an implementation of simple and lightweight reverse-proxy with some additional features. The implementation is based on [`hyper`](https://github.com/hyperium/hyper), [`rustls`](https://github.com/rustls/rustls) and [`tokio`](https://github.com/tokio-rs/tokio), i.e., written in Rust [^pure_rust]. Our `rpxy` routes multiple host names to appropriate backend application servers while serving TLS connections.
`rpxy` [ahr-pik-see] is a simple and lightweight reverse-proxy implementation with additional features. The implementation is based on [`hyper`](https://github.com/hyperium/hyper), [`rustls`](https://github.com/rustls/rustls) and [`tokio`](https://github.com/tokio-rs/tokio), i.e., written in Rust [^pure_rust]. `rpxy` routes multiple hostnames to appropriate backend application servers while serving TLS connections.
[^pure_rust]: Doubtfully can be claimed to be written in pure Rust since current `rpxy` is based on `aws-lc-rs` for cryptographic operations.
[^pure_rust]: It is questionable whether this can be claimed to be written in pure Rust since the current `rpxy` is based on `aws-lc-rs` for cryptographic operations.
Supported features are summarized as follows:
The supported features are summarized as follows:
- Supported HTTP(S) protocols: HTTP/1.1, HTTP/2 and brand-new HTTP/3 [^h3lib]
- Supported HTTP(S) protocols: HTTP/1.1, HTTP/2, and the brand-new HTTP/3 [^h3lib]
- gRPC is also supported
- Serving multiple domain names with TLS termination
- Mutual TLS authentication with client certificates
- Automated certificate issuance and renewal via TLS-ALPN-01 ACME protocol [^acme]
- Post-quantum key exchange for TLS/QUIC [^kyber]
- TLS connection sanitization to avoid the domain fronting [^sanitization]
- Load balancing with round-robin, random, and sticky session
- TLS connection sanitization to avoid domain fronting [^sanitization]
- Load balancing with round-robin, random, and sticky sessions
- and more...
[^h3lib]: HTTP/3 is enabled thanks to [`quinn`](https://github.com/quinn-rs/quinn), [`s2n-quic`](https://github.com/aws/s2n-quic) and [`hyperium/h3`](https://github.com/hyperium/h3). HTTP/3 libraries are mutually exclusive. You need to explicitly specify `s2n-quic` with `--no-default-features` flag. Also note that if you build `rpxy` with `s2n-quic`, then it requires `openssl` just for building the package.
@ -35,9 +35,9 @@ Supported features are summarized as follows:
[^kyber]: `rpxy` supports the hybridized post-quantum key exchange [`X25519MLKEM768`](https://www.ietf.org/archive/id/draft-kwiatkowski-tls-ecdhe-mlkem-02.html)[^kyber] for TLS/QUIC incoming and outgoing initiation thanks to [`rustls-post-quantum`](https://docs.rs/rustls-post-quantum/latest/rustls_post_quantum/). This is already a default feature. Also note that `X25519MLKEM768` is still a draft version yet this is widely used on the Internet.
[^sanitization]: By default, `rpxy` provides the *TLS connection sanitization* by correctly binding a certificate used to establish a secure channel with the backend application. Specifically, it always keeps the consistency between the given SNI (server name indication) in `ClientHello` of the underlying TLS and the domain name given by the overlaid HTTP HOST header (or URL in Request line). We should note that NGINX doesn't guarantee such a consistency by default. To this end, you have to add `if` statement in the configuration file in NGINX.
[^sanitization]: By default, `rpxy` provides *TLS connection sanitization* by correctly binding a certificate used to establish a secure channel with the backend application. Specifically, it always maintains consistency between the given SNI (server name indication) in `ClientHello` of the underlying TLS and the domain name given by the overlaid HTTP HOST header (or URL in Request line). We should note that NGINX doesn't guarantee such consistency by default. To achieve this, you have to add an `if` statement in the NGINX configuration file.
This project is still *work-in-progress*. But it is already working in some production environments and serves a number of domain names. Furthermore it *significantly outperforms* NGINX and Caddy, e.g., *1.5x faster than NGINX*, in the setting of a very simple HTTP reverse-proxy scenario (See [`bench`](./bench/) directory).
This project is still *work-in-progress*. However, it is already working in some production environments and serves a number of domain names. Furthermore, it *significantly outperforms* NGINX and Caddy, e.g., *30% ~ 60% or more faster than NGINX*, in very simple HTTP reverse-proxy scenarios (See [`bench`](./bench/) directory).
## Installing/Building an Executable Binary of `rpxy`
@ -60,15 +60,15 @@ You can build an executable binary yourself by checking out this Git repository.
% cargo build --no-default-features --features http3-s2n --release
```
Then you have an executive binary `rust-rpxy/target/release/rpxy`.
Then you have an executable binary `rust-rpxy/target/release/rpxy`.
### Package Installation for Linux (RPM/DEB)
You can find the Jenkins CI/CD build scripts for `rpxy` in the [./build](./.build) directory.
You can find the Jenkins CI/CD build scripts for `rpxy` in the [./.build](./.build) directory.
Prebuilt packages for Linux RPM and DEB are available at [https://rpxy.gamerboy59.dev](https://rpxy.gamerboy59.dev), provided by [@Gamerboy59](https://github.com/Gamerboy59).
Note that we do not have an option of installation via [`crates.io`](https://crates.io/), i.e., `cargo install`, at this point since some dependencies are not published yet. Alternatively, you can use docker image (see below) as the easiest way for `amd64` environment.
Note that we do not have an installation option via [`crates.io`](https://crates.io/), i.e., `cargo install`, at this point since some dependencies are not yet published. Alternatively, you can use the docker image (see below) as the easiest way for `amd64` environments.
## Usage
@ -80,9 +80,9 @@ You can run `rpxy` with a configuration file like
% ./target/release/rpxy --config config.toml
```
`rpxy` tracks the change of `config.toml` in the real-time manner and apply the change immediately without restarting the process.
`rpxy` tracks changes to `config.toml` in real-time and applies changes immediately without restarting the process.
The full help messages are given follows.
The full help message is as follows.
```bash:
usage: rpxy [OPTIONS] --config <FILE>
@ -116,8 +116,8 @@ server_name = 'app1.example.com'
reverse_proxy = [{ upstream = [{ location = 'app1.local:8080' }] }]
```
In the above setting, `rpxy` listens on port 80 (TCP) and serves incoming cleartext HTTP request including a `app1.example.com` in its HOST header or URL in its Request line.
For example, request messages like the followings.
In the above setting, `rpxy` listens on port 80 (TCP) and serves incoming cleartext HTTP requests that include `app1.example.com` in their HOST header or URL in their Request line.
For example, request messages like the following.
```http
GET http://app1.example.com/path/to HTTP/1.1\r\n
@ -130,9 +130,9 @@ GET /path/to HTTP/1.1\r\n
HOST: app1.example.com\r\n
```
Otherwise, say, a request to `other.example.com` is simply rejected with the status code `40x`.
Otherwise, a request to `other.example.com` is simply rejected with status code `40x`.
If you want to host multiple and distinct domain names in a single IP address/port, simply create multiple `app."<app_name>"` entries in config file like
If you want to host multiple distinct domain names on a single IP address/port, simply create multiple `app."<app_name>"` entries in the config file like
```toml
default_app = "app1"
@ -146,11 +146,11 @@ server_name = "app2.example.org"
#...
```
Here we note that by specifying `default_app` entry, *HTTP* requests will be served by the specified application if HOST header or URL in Request line doesn't match any `server_name`s in `reverse_proxy` entries. For HTTPS requests, it will be rejected since the secure connection cannot be established for the unknown server name.
Note that by specifying a `default_app` entry, *HTTP* requests will be served by the specified application if the HOST header or URL in the Request line doesn't match any `server_name`s in `reverse_proxy` entries. For HTTPS requests, it will be rejected since a secure connection cannot be established for an unknown server name.
#### HTTPS to Backend Application
The request message will be routed to the backend application specified with the domain name `app1.localdomain:8080` or IP address over cleartext HTTP. If the backend channel needs to serve TLS like forwarding to `https://app1.localdomain:8080`, you need to enable a `tls` option for the location.
The request message will be routed to the backend application specified with the domain name `app1.localdomain:8080` or IP address over cleartext HTTP. If the backend channel needs to serve TLS, like forwarding to `https://app1.localdomain:8080`, you need to enable the `tls` option for the location.
```toml
revese_proxy = [
@ -160,7 +160,7 @@ revese_proxy = [
#### Load Balancing
You can specify multiple backend locations in the `reverse_proxy` array for *load-balancing* with an appropriate `load_balance` option. Currently it works in the manner of round-robin, in the random fashion, or round-robin with *session-persistance* using cookie. if `load_balance` is not specified, the first backend location is always chosen.
You can specify multiple backend locations in the `reverse_proxy` array for *load-balancing* with an appropriate `load_balance` option. Currently it works in a round-robin manner, randomly, or round-robin with *session-persistence* using cookies. If `load_balance` is not specified, the first backend location is always chosen.
```toml
[apps."app_name"]
@ -174,7 +174,7 @@ load_balance = 'round_robin' # or 'random' or 'sticky'
### Second Step: Terminating TLS
First of all, you need to specify a port `listen_port_tls` listening the HTTPS traffic, separately from HTTP port (`listen_port`). Then, serving an HTTPS endpoint can be easily done for your desired application just by specifying TLS certificates and private keys in PEM files.
First of all, you need to specify a port `listen_port_tls` listening for HTTPS traffic, separately from the HTTP port (`listen_port`). Then, serving an HTTPS endpoint can be easily done for your desired application by simply specifying TLS certificates and private keys in PEM files.
```toml
listen_port = 80
@ -186,23 +186,23 @@ tls = { tls_cert_path = 'server.crt', tls_cert_key_path = 'server.key' }
reverse_proxy = [{ upstream = [{ location = 'app1.local:8080' }] }]
```
In the above setting, both cleartext HTTP requests to port 80 and ciphertext HTTPS requests to port 443 are routed to the backend `app1.local:8080` in the same fashion. If you don't need to serve cleartext requests, just remove `listen_port = 80` and specify only `listen_port_tls = 443`.
In the above setting, both cleartext HTTP requests to port 80 and encrypted HTTPS requests to port 443 are routed to the backend `app1.local:8080` in the same manner. If you don't need to serve cleartext requests, just remove `listen_port = 80` and specify only `listen_port_tls = 443`.
We should note that the private key specified by `tls_cert_key_path` must be *in PKCS8 format*. (See TIPS to convert PKCS1 formatted private key to PKCS8 one.)
Note that the private key specified by `tls_cert_key_path` must be *in PKCS8 format*. (See TIPS to convert PKCS1 formatted private keys to PKCS8 format.)
#### Redirecting Cleartext HTTP Requests to HTTPS
In the current Web, we believe it is common to serve everything through HTTPS rather than HTTP, and hence *https redirection* is often used for HTTP requests. When you specify both `listen_port` and `listen_port_tls`, you can enable an option of such redirection by making `https_redirection` true.
In the current Web, it is common to serve everything through HTTPS rather than HTTP, and hence *HTTPS redirection* is often used for HTTP requests. When you specify both `listen_port` and `listen_port_tls`, you can enable such redirection by setting `https_redirection` to true.
```toml
tls = { https_redirection = true, tls_cert_path = 'server.crt', tls_cert_key_path = 'server.key' }
```
If it is true, `rpxy` returns the status code `301` to the cleartext request with new location `https://<requested_host>/<requested_query_and_path>` served over TLS.
If it is true, `rpxy` returns status code `301` to the cleartext request with the new location `https://<requested_host>/<requested_query_and_path>` served over TLS.
### Third Step: More Flexible Routing Based on URL Path
`rpxy` can serves, of course, routes requests to multiple backend destination according to the path information. The routing information can be specified for each application (`server_name`) as follows.
`rpxy` can, of course, route requests to multiple backend destinations according to path information. The routing information can be specified for each application (`server_name`) as follows.
```toml
listen_port_tls = 443
@ -230,15 +230,15 @@ upstream = [
]
```
In the above example, a request to `https://app1.example.com/path/to?query=ok` matches the second `reverse_proxy` entry in the longest-prefix-matching manner, and will be routed to `path.backend.local` with preserving path and query information, i.e., served as `http://path.backend.local/path/to?query=ok`.
In the above example, a request to `https://app1.example.com/path/to?query=ok` matches the second `reverse_proxy` entry in a longest-prefix-matching manner, and will be routed to `path.backend.local` while preserving path and query information, i.e., served as `http://path.backend.local/path/to?query=ok`.
On the other hand, a request to `https://app1.example.com/path/another/xx?query=ng` matching the third entry is routed with *being rewritten its path information* specified by `replace_path` option. Namely, the matched `/path/another` part is rewritten with `/path`, and it is served as `http://another.backend.local/path/xx?query=ng`.
On the other hand, a request to `https://app1.example.com/path/another/xx?query=ng` matching the third entry is routed with *its path information being rewritten* as specified by the `replace_path` option. Namely, the matched `/path/another` part is rewritten to `/path`, and it is served as `http://another.backend.local/path/xx?query=ng`.
Requests that doesn't match any paths will be routed by the first entry that doesn't have the `path` option, which means the *default destination*. In other words, unless every `reverse_proxy` entry has an explicit `path` option, `rpxy` rejects requests that don't match any paths.
Requests that don't match any paths will be routed by the first entry that doesn't have the `path` option, which serves as the *default destination*. In other words, unless every `reverse_proxy` entry has an explicit `path` option, `rpxy` rejects requests that don't match any paths.
#### Simple Path-based Routing
This path-based routing option would be enough in many cases. For example, you can serve multiple applications with one domain by specifying unique path to each application. More specifically, see an example below.
This path-based routing option would be sufficient in many cases. For example, you can serve multiple applications with one domain by specifying a unique path for each application. More specifically, see the example below.
```toml
[apps.app]
@ -261,25 +261,25 @@ replace_path = '/'
upstream = [ { location = 'subapp3.local' } ]
```
This example configuration explains a very frequent situation of path-based routing. When a request to `app.example.com/subappN` routes to `sbappN.local` by replacing a path part `/subappN` to `/`.
This example configuration demonstrates a very common path-based routing situation. When a request to `app.example.com/subappN` is routed to `subappN.local` by replacing the path part `/subappN` with `/`.
## More Options
Since it is currently a work-in-progress project, we are frequently adding new options. We first add new option entries in the `config-example.toml` as examples. So please refer to it for up-to-date options. We will prepare a comprehensive documentation for all options.
Since this is currently a work-in-progress project, we are frequently adding new options. We first add new option entries in `config-example.toml` as examples. Please refer to it for up-to-date options. We will prepare comprehensive documentation for all options.
## Using Docker Image
You can also use `docker` image hosted on [Docker Hub](https://hub.docker.com/r/jqtype/rpxy) and [GitHub Container Registry](https://github.com/junkurihara/rust-rpxy/pkgs/container/rust-rpxy) instead of directly executing the binary. See [`./docker`](./docker/README.md) directory for more details.
You can also use the `docker` image hosted on [Docker Hub](https://hub.docker.com/r/jqtype/rpxy) and [GitHub Container Registry](https://github.com/junkurihara/rust-rpxy/pkgs/container/rust-rpxy) instead of directly executing the binary. See the [`./docker`](./docker/README.md) directory for more details.
## Example
[`./bench`](./bench/) directory could be a very simple example of configuration of `rpxy`. This can also be an example of an example of docker use case.
The [`./bench`](./bench/) directory contains a very simple example of `rpxy` configuration. This can also serve as an example of a docker use case.
## Experimental Features and Caveats
### HTTP/3
`rpxy` can serves HTTP/3 requests thanks to `quinn`, `s2n-quic` and `hyperium/h3`. To enable this experimental feature, add an entry `experimental.h3` in your `config.toml` like follows. Any values in the entry like `alt_svc_max_age` are optional.
`rpxy` can serve HTTP/3 requests thanks to `quinn`, `s2n-quic` and `hyperium/h3`. To enable this experimental feature, add an entry `experimental.h3` in your `config.toml` as follows. Any values in the entry like `alt_svc_max_age` are optional.
```toml
[experimental.h3]
@ -301,7 +301,7 @@ server_name = 'localhost' # Domain name
tls = { https_redirection = true, tls_cert_path = './server.crt', tls_cert_key_path = './server.key', client_ca_cert_path = './client_cert.ca.crt' }
```
However, currently we have a limitation on HTTP/3 support for applications that enables client authentication. If an application is set with client authentication, HTTP/3 doesn't work for the application.
However, currently we have a limitation on HTTP/3 support for applications that enable client authentication. If an application is configured with client authentication, HTTP/3 doesn't work for that application.
### Hybrid Caching Feature with Temporary File and On-Memory Cache
@ -316,11 +316,11 @@ max_cache_each_size = 65535 # optional. default is 64k
max_cache_each_size_on_memory = 4096 # optional. default is 4k if 0, it is always file cache.
```
A *storable* (in the context of an HTTP message) response is stored if its size is less than or equal to `max_cache_each_size` in bytes. If it is also less than or equal to `max_cache_each_size_on_memory`, it is stored as an on-memory object. Otherwise, it is stored as a temporary file. Note that `max_cache_each_size` must be larger or equal to `max_cache_each_size_on_memory`. Also note that once `rpxy` restarts or the config is updated, the cache is totally eliminated not only from the on-memory table but also from the file system.
A *storable* (in the context of an HTTP message) response is stored if its size is less than or equal to `max_cache_each_size` in bytes. If it is also less than or equal to `max_cache_each_size_on_memory`, it is stored as an in-memory object. Otherwise, it is stored as a temporary file. Note that `max_cache_each_size` must be greater than or equal to `max_cache_each_size_on_memory`. Also note that once `rpxy` restarts or the config is updated, the cache is completely eliminated not only from the in-memory table but also from the file system.
### Automated Certificate Issuance and Renewal via TLS-ALPN-01 ACME protocol
### Automated Certificate Issuance and Renewal via TLS-ALPN-01 ACME Protocol
This is a brand-new feature and maybe still unstable. Thanks to the [`rustls-acme`](https://github.com/FlorianUekermann/rustls-acme), the automatic issuance and renewal of certificates are finally available in `rpxy`. To enable this feature, you need to specify the following entries in `config.toml`.
This is a brand-new feature and may still be unstable. Thanks to [`rustls-acme`](https://github.com/FlorianUekermann/rustls-acme), automatic issuance and renewal of certificates are finally available in `rpxy`. To enable this feature, you need to specify the following entries in `config.toml`.
```toml
# ACME enabled domain name.
@ -342,13 +342,13 @@ email = "test@example.com"
registry_path = "./acme_registry" # optional. default is "./acme_registry" relative to the current working directory
```
The above configuration is common to all ACME enabled domains. Note that the https port must be open to the public to verify the domain ownership.
The above configuration is common to all ACME-enabled domains. Note that the HTTPS port must be open to the public to verify domain ownership.
## TIPS
### Set custom port for HTTPS redirection
### Set Custom Port for HTTPS Redirection
Consider a case where `rpxy` is running on a container. Then when the container manager maps port A (e.g., 80/443) of the host to port B (e.g., 8080/8443) of the container for http and https, `rpxy` must be configured with port B for `listen_port` and `listen_port_tls`. However, when you want to set `http_redirection=true` for some backend apps, `rpxy` issues the redirection response 301 with the port B by default, which is not accessible from the outside of the container. To avoid this, you can set a custom port for the redirection response by specifying `https_redirection_port` in `config.toml`. In this case, port A should be set for `https_redirection_port`, then the redirection response 301 will be issued with the port A.
Consider a case where `rpxy` is running in a container. When the container manager maps port A (e.g., 80/443) of the host to port B (e.g., 8080/8443) of the container for HTTP and HTTPS, `rpxy` must be configured with port B for `listen_port` and `listen_port_tls`. However, when you want to set `https_redirection=true` for some backend apps, `rpxy` issues redirection response 301 with port B by default, which is not accessible from outside the container. To avoid this, you can set a custom port for the redirection response by specifying `https_redirection_port` in `config.toml`. In this case, port A should be set for `https_redirection_port`, then redirection response 301 will be issued with port A.
```toml
listen_port = 8080
@ -356,25 +356,25 @@ listen_port_tls = 8443
https_redirection_port = 443
```
### Using Private Key Issued by Let's Encrypt
### Using Private Keys Issued by Let's Encrypt
If you obtain certificates and private keys from [Let's Encrypt](https://letsencrypt.org/), you have PKCS1-formatted private keys. So you need to convert such retrieved private keys into PKCS8 format to use in `rpxy`.
If you obtain certificates and private keys from [Let's Encrypt](https://letsencrypt.org/), you have PKCS1-formatted private keys. You need to convert such retrieved private keys into PKCS8 format to use them in `rpxy`.
The easiest way is to use `openssl` by
The easiest way is to use `openssl`:
```bash
% openssl pkcs8 -topk8 -nocrypt \
-in yoru_domain_from_le.key \
-in your_domain_from_le.key \
-inform PEM \
-out your_domain_pkcs8.key.pem \
-outform PEM
```
### Client Authentication using Client Certificate Signed by Your Own Root CA
### Client Authentication Using Client Certificates Signed by Your Own Root CA
First, you need to prepare a CA certificate used to verify client certificate. If you do not have one, you can generate CA key and certificate by OpenSSL commands as follows. *Note that `rustls` accepts X509v3 certificates and reject SHA-1, and that `rpxy` relys on Version 3 extension fields of `KeyID`s of `Subject Key Identifier` and `Authority Key Identifier`.*
First, you need to prepare a CA certificate used to verify client certificates. If you do not have one, you can generate a CA key and certificate using OpenSSL commands as follows. *Note that `rustls` accepts X509v3 certificates and rejects SHA-1, and that `rpxy` relies on Version 3 extension fields of `KeyID`s of `Subject Key Identifier` and `Authority Key Identifier`.*
1. Generate CA key of `secp256v1`, CSR, and then generate CA certificate that will be set for `tls.client_ca_cert_path` for each server app in `config.toml`.
1. Generate a CA key of `secp256v1`, CSR, and then generate a CA certificate that will be set for `tls.client_ca_cert_path` for each server app in `config.toml`.
```bash
% openssl genpkey -algorithm EC -pkeyopt ec_paramgen_curve:prime256v1 -out client.ca.key
@ -393,7 +393,7 @@ First, you need to prepare a CA certificate used to verify client certificate. I
% openssl x509 -req -days 3650 -sha256 -in client.ca.csr -signkey client.ca.key -out client.ca.crt -extfile client.ca.ext
```
2. Generate a client key of `secp256v1` and certificate signed by CA key.
2. Generate a client key of `secp256v1` and certificate signed by the CA key.
```bash
% openssl genpkey -algorithm EC -pkeyopt ec_paramgen_curve:prime256v1 -out client.key
@ -412,32 +412,32 @@ First, you need to prepare a CA certificate used to verify client certificate. I
% openssl x509 -req -days 365 -sha256 -in client.csr -CA client.ca.crt -CAkey client.ca.key -CAcreateserial -out client.crt -extfile client.ext
```
Now you have a client key `client.key` and certificate `client.crt` (version 3). `pfx` (`p12`) file can be retrieved as
Now you have a client key `client.key` and certificate `client.crt` (version 3). A `pfx` (`p12`) file can be generated as follows:
```bash
% openssl pkcs12 -export -inkey client.key -in client.crt -certfile client.ca.crt -out client.pfx
```
Note that on MacOS, a `pfx` generated by `OpenSSL 3.0.6` cannot be imported to MacOS KeyChain Access. We generated the sample `pfx` using `LibreSSL 2.8.3` instead `OpenSSL`.
Note that on macOS, a `pfx` generated by `OpenSSL 3.0.6` cannot be imported to macOS Keychain Access. We generated the sample `pfx` using `LibreSSL 2.8.3` instead of `OpenSSL`.
All of sample certificate files are found in `./example-certs/` directory.
All sample certificate files can be found in the `./example-certs/` directory.
### (Work Around) Deployment on Ubuntu 22.04LTS using docker behind `ufw`
### (Work Around) Deployment on Ubuntu 22.04 LTS Using Docker Behind `ufw`
Basically, docker automatically manage your iptables if you use the port-mapping option, i.e., `--publish` for `docker run` or `ports` in `docker-compose.yml`. This means you do not need to manually expose your port, e.g., 443 TCP/UDP for HTTPS, using `ufw`-like management command.
Basically, Docker automatically manages your iptables if you use the port-mapping option, i.e., `--publish` for `docker run` or `ports` in `docker-compose.yml`. This means you do not need to manually expose your port, e.g., 443 TCP/UDP for HTTPS, using `ufw`-like management commands.
However, we found that if you want to use the brand-new UDP-based protocol, HTTP/3, on `rpxy`, you need to explicitly expose your HTTPS port by using `ufw`-like command.
However, we found that if you want to use the brand-new UDP-based protocol, HTTP/3, on `rpxy`, you need to explicitly expose your HTTPS port using `ufw`-like commands.
```bash
% sudo ufw allow 443
% sudo ufw enable
```
Your docker container can receive only TCP-based connection, i.e., HTTP/2 or before, unless you manually manage the port. We see that this is weird and expect that it is a kind of bug (of docker? ubuntu? or something else?). But at least for Ubuntu 22.04LTS, you need to handle it as above.
Your Docker container can receive only TCP-based connections, i.e., HTTP/2 or earlier, unless you manually manage the port. We see that this is strange and expect that it is some kind of bug (of Docker? Ubuntu? or something else?). But at least for Ubuntu 22.04 LTS, you need to handle it as described above.
### Managing `rpxy` via web interface
### Managing `rpxy` via Web Interface
Check a third party project [`Gamerboy59/rpxy-webui`](https://github.com/Gamerboy59/rpxy-webui) to manage `rpxy` via web interface.
Check the third-party project [`Gamerboy59/rpxy-webui`](https://github.com/Gamerboy59/rpxy-webui) to manage `rpxy` via a web interface.
### Other TIPS

View file

@ -2,9 +2,13 @@
auto_https off
}
:80 {
# Proxy everything else to Rocket
reverse_proxy backend-nginx
log {
level ERROR
}
}

View file

@ -3,49 +3,46 @@
This test simply measures the performance of several reverse proxy through HTTP/1.1 by the following command using [`rewrk`](https://github.com/lnx-search/rewrk).
```sh:
$ rewrk -c 512 -t 4 -d 15s -h http://localhost:8080 --pct
rewrk -c 512 -t 4 -d 15s -h http://localhost:8080 --pct
```
## Tests on `linux/arm64/v8`
Done at Jul. 15, 2023
Done at May. 17, 2025
### Environment
- `rpxy` commit id: `1da7e5bfb77d1ce4ee8d6cfc59b1c725556fc192`
- Docker Desktop 4.21.1 (114176)
- `rpxy` commit id: `e259e0b58897258d98fdb7504a1cbcbd7c5b37db`
- Docker Desktop 4.41.2 (191736)
- ReWrk 0.3.2
- Macbook Pro '14 (2021, M1 Max, 64GB RAM)
- Mac mini (2024, M4 Pro, 64GB RAM)
The docker images of `nginx` and `caddy` for `linux/arm64/v8` are pulled from the official registry.
### Result for `rpxy`, `nginx` and `caddy`
```
----------------------------
```bash
Benchmark on rpxy
Beginning round 1...
Benchmarking 512 connections @ http://localhost:8080 for 15 second(s)
Latencies:
Avg Stdev Min Max
19.64ms 8.85ms 0.67ms 113.22ms
6.90ms 3.42ms 0.78ms 80.26ms
Requests:
Total: 390078 Req/Sec: 26011.25
Total: 1107885 Req/Sec: 73866.03
Transfer:
Total: 304.85 MB Transfer Rate: 20.33 MB/Sec
Total: 867.44 MB Transfer Rate: 57.83 MB/Sec
+ --------------- + --------------- +
| Percentile | Avg Latency |
+ --------------- + --------------- +
| 99.9% | 79.24ms |
| 99% | 54.28ms |
| 95% | 42.50ms |
| 90% | 37.82ms |
| 75% | 31.54ms |
| 50% | 26.37ms |
| 99.9% | 49.76ms |
| 99% | 29.57ms |
| 95% | 15.78ms |
| 90% | 13.05ms |
| 75% | 10.41ms |
| 50% | 8.72ms |
+ --------------- + --------------- +
721 Errors: error shutting down connection: Socket is not connected (os error 57)
sleep 3 secs
----------------------------
Benchmark on nginx
@ -53,23 +50,23 @@ Beginning round 1...
Benchmarking 512 connections @ http://localhost:8090 for 15 second(s)
Latencies:
Avg Stdev Min Max
33.26ms 15.18ms 1.40ms 118.94ms
11.65ms 14.04ms 0.40ms 205.93ms
Requests:
Total: 230268 Req/Sec: 15356.08
Total: 654978 Req/Sec: 43666.56
Transfer:
Total: 186.77 MB Transfer Rate: 12.46 MB/Sec
Total: 532.81 MB Transfer Rate: 35.52 MB/Sec
+ --------------- + --------------- +
| Percentile | Avg Latency |
+ --------------- + --------------- +
| 99.9% | 99.91ms |
| 99% | 83.74ms |
| 95% | 70.67ms |
| 90% | 64.03ms |
| 75% | 54.32ms |
| 50% | 45.19ms |
| 99.9% | 151.00ms |
| 99% | 102.80ms |
| 95% | 62.44ms |
| 90% | 42.98ms |
| 75% | 26.44ms |
| 50% | 18.25ms |
+ --------------- + --------------- +
677 Errors: error shutting down connection: Socket is not connected (os error 57)
512 Errors: connection closed
sleep 3 secs
----------------------------
@ -78,33 +75,31 @@ Beginning round 1...
Benchmarking 512 connections @ http://localhost:8100 for 15 second(s)
Latencies:
Avg Stdev Min Max
48.51ms 50.74ms 0.34ms 554.58ms
77.54ms 368.11ms 0.37ms 6770.73ms
Requests:
Total: 157239 Req/Sec: 10485.98
Total: 86963 Req/Sec: 5798.35
Transfer:
Total: 125.99 MB Transfer Rate: 8.40 MB/Sec
Total: 70.00 MB Transfer Rate: 4.67 MB/Sec
+ --------------- + --------------- +
| Percentile | Avg Latency |
+ --------------- + --------------- +
| 99.9% | 473.82ms |
| 99% | 307.16ms |
| 95% | 212.28ms |
| 90% | 169.05ms |
| 75% | 115.92ms |
| 50% | 80.24ms |
| 99.9% | 5789.65ms |
| 99% | 3407.02ms |
| 95% | 1022.31ms |
| 90% | 608.17ms |
| 75% | 281.95ms |
| 50% | 149.29ms |
+ --------------- + --------------- +
708 Errors: error shutting down connection: Socket is not connected (os error 57)
```
## Results on `linux/amd64`
Done at Jul. 24, 2023
Done at May 20, 2025
### Environment
- `rpxy` commit id: `7c0945a5124418aa9a1024568c1989bb77cf312f`
- Docker Desktop 4.21.1 (114176)
- `rpxy` commit id: `e259e0b58897258d98fdb7504a1cbcbd7c5b37db`
- Docker Desktop 4.41.2 (192736)
- ReWrk 0.3.2 and Wrk 0.4.2
- iMac '27 (2020, 10-Core Intel Core i9, 128GB RAM)
@ -112,8 +107,8 @@ The docker images of `nginx` and `caddy` for `linux/amd64` were pulled from the
Also, when `Sozu` is configured as an HTTP reverse proxy, it cannot handle HTTP request messages emit from `ReWrk` due to hostname parsing errors though it can correctly handle messages dispatched from `curl` and browsers. So, we additionally test using [`Wrk`](https://github.com/wg/wrk) to examine `Sozu` with the following command.
```sh:
$ wrk -c 512 -t 4 -d 15s http://localhost:8110
```bash
wrk -c 512 -t 4 -d 15s http://localhost:8110
```
<!-- ```
@ -124,7 +119,7 @@ ERROR Error connecting to backend: Could not get cluster id from request: Host
#### With ReWrk for `rpxy`, `nginx` and `caddy`
```
```bash
----------------------------
Benchmark [x86_64] with ReWrk
----------------------------
@ -133,24 +128,22 @@ Beginning round 1...
Benchmarking 512 connections @ http://localhost:8080 for 15 second(s)
Latencies:
Avg Stdev Min Max
20.37ms 8.95ms 1.63ms 160.27ms
15.75ms 6.75ms 1.75ms 124.25ms
Requests:
Total: 376345 Req/Sec: 25095.19
Total: 486635 Req/Sec: 32445.33
Transfer:
Total: 295.61 MB Transfer Rate: 19.71 MB/Sec
Total: 381.02 MB Transfer Rate: 25.40 MB/Sec
+ --------------- + --------------- +
| Percentile | Avg Latency |
+ --------------- + --------------- +
| 99.9% | 112.50ms |
| 99% | 61.33ms |
| 95% | 44.26ms |
| 90% | 38.74ms |
| 75% | 32.00ms |
| 50% | 26.82ms |
| 99.9% | 91.91ms |
| 99% | 55.53ms |
| 95% | 34.87ms |
| 90% | 29.55ms |
| 75% | 23.99ms |
| 50% | 20.17ms |
+ --------------- + --------------- +
626 Errors: error shutting down connection: Socket is not connected (os error 57)
sleep 3 secs
----------------------------
Benchmark on nginx
@ -158,24 +151,22 @@ Beginning round 1...
Benchmarking 512 connections @ http://localhost:8090 for 15 second(s)
Latencies:
Avg Stdev Min Max
23.45ms 12.42ms 1.18ms 154.44ms
24.02ms 15.84ms 1.31ms 207.97ms
Requests:
Total: 326685 Req/Sec: 21784.73
Total: 318516 Req/Sec: 21236.67
Transfer:
Total: 265.22 MB Transfer Rate: 17.69 MB/Sec
Total: 259.11 MB Transfer Rate: 17.28 MB/Sec
+ --------------- + --------------- +
| Percentile | Avg Latency |
+ --------------- + --------------- +
| 99.9% | 96.85ms |
| 99% | 73.93ms |
| 95% | 57.57ms |
| 90% | 50.36ms |
| 75% | 40.57ms |
| 50% | 32.70ms |
| 99.9% | 135.56ms |
| 99% | 92.59ms |
| 95% | 68.54ms |
| 90% | 58.75ms |
| 75% | 45.88ms |
| 50% | 35.64ms |
+ --------------- + --------------- +
657 Errors: error shutting down connection: Socket is not connected (os error 57)
sleep 3 secs
----------------------------
Benchmark on caddy
@ -183,30 +174,26 @@ Beginning round 1...
Benchmarking 512 connections @ http://localhost:8100 for 15 second(s)
Latencies:
Avg Stdev Min Max
45.71ms 50.47ms 0.88ms 908.49ms
74.60ms 181.26ms 0.94ms 2723.20ms
Requests:
Total: 166917 Req/Sec: 11129.80
Total: 101893 Req/Sec: 6792.16
Transfer:
Total: 133.77 MB Transfer Rate: 8.92 MB/Sec
Total: 82.03 MB Transfer Rate: 5.47 MB/Sec
+ --------------- + --------------- +
| Percentile | Avg Latency |
+ --------------- + --------------- +
| 99.9% | 608.92ms |
| 99% | 351.18ms |
| 95% | 210.56ms |
| 90% | 162.68ms |
| 75% | 106.97ms |
| 50% | 73.90ms |
| 99.9% | 2232.12ms |
| 99% | 1517.73ms |
| 95% | 624.63ms |
| 90% | 406.69ms |
| 75% | 222.42ms |
| 50% | 133.46ms |
+ --------------- + --------------- +
646 Errors: error shutting down connection: Socket is not connected (os error 57)
sleep 3 secs
```
#### With Wrk for `rpxy`, `nginx`, `caddy` and `sozu`
```
```bash
----------------------------
Benchmark [x86_64] with Wrk
----------------------------
@ -214,12 +201,11 @@ Benchmark on rpxy
Running 15s test @ http://localhost:8080
4 threads and 512 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 18.68ms 8.09ms 122.64ms 74.03%
Req/Sec 6.95k 815.23 8.45k 83.83%
414819 requests in 15.01s, 326.37MB read
Socket errors: connect 0, read 608, write 0, timeout 0
Requests/sec: 27627.79
Transfer/sec: 21.74MB
Latency 15.65ms 6.94ms 104.73ms 81.28%
Req/Sec 8.36k 0.90k 9.90k 77.83%
499550 requests in 15.02s, 391.14MB read
Requests/sec: 33267.61
Transfer/sec: 26.05MB
sleep 3 secs
----------------------------
@ -227,12 +213,11 @@ Benchmark on nginx
Running 15s test @ http://localhost:8090
4 threads and 512 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 23.34ms 13.80ms 126.06ms 74.66%
Req/Sec 5.71k 607.41 7.07k 73.17%
341127 requests in 15.03s, 277.50MB read
Socket errors: connect 0, read 641, write 0, timeout 0
Requests/sec: 22701.54
Transfer/sec: 18.47MB
Latency 24.26ms 15.29ms 167.43ms 73.34%
Req/Sec 5.53k 493.14 6.91k 69.67%
330569 requests in 15.02s, 268.91MB read
Requests/sec: 22014.96
Transfer/sec: 17.91MB
sleep 3 secs
----------------------------
@ -240,13 +225,13 @@ Benchmark on caddy
Running 15s test @ http://localhost:8100
4 threads and 512 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 54.19ms 55.63ms 674.53ms 88.55%
Req/Sec 2.92k 1.40k 5.57k 56.17%
174748 requests in 15.03s, 140.61MB read
Socket errors: connect 0, read 660, write 0, timeout 0
Non-2xx or 3xx responses: 70
Requests/sec: 11624.63
Transfer/sec: 9.35MB
Latency 212.89ms 300.23ms 1.99s 86.56%
Req/Sec 1.31k 1.64k 5.72k 78.79%
67749 requests in 15.04s, 51.97MB read
Socket errors: connect 0, read 0, write 0, timeout 222
Non-2xx or 3xx responses: 3686
Requests/sec: 4505.12
Transfer/sec: 3.46MB
sleep 3 secs
----------------------------
@ -254,10 +239,9 @@ Benchmark on sozu
Running 15s test @ http://localhost:8110
4 threads and 512 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 19.78ms 4.89ms 98.09ms 76.88%
Req/Sec 6.49k 824.75 8.11k 76.17%
387744 requests in 15.02s, 329.11MB read
Socket errors: connect 0, read 647, write 0, timeout 0
Requests/sec: 25821.93
Transfer/sec: 21.92MB
Latency 34.68ms 6.30ms 90.21ms 72.49%
Req/Sec 3.69k 397.85 5.08k 73.00%
220655 requests in 15.01s, 187.29MB read
Requests/sec: 14699.17
Transfer/sec: 12.48MB
```

View file

@ -1,4 +1,3 @@
version: "3"
services:
nginx:
image: nginx:alpine
@ -28,7 +27,7 @@ services:
dockerfile: docker/Dockerfile
restart: unless-stopped
environment:
- LOG_LEVEL=info
- LOG_LEVEL=error # almost nolog
- LOG_TO_FILE=false
ports:
- 127.0.0.1:8080:8080
@ -47,7 +46,7 @@ services:
tty: false
privileged: true
volumes:
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro # set as almost nolog
- /var/run/docker.sock:/tmp/docker.sock:ro
logging:
options:
@ -64,7 +63,7 @@ services:
restart: unless-stopped
tty: false
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- ./Caddyfile:/etc/caddy/Caddyfile:ro # set as almost no log
networks:
bench-nw:
@ -82,7 +81,7 @@ services:
max-size: "10m"
max-file: "3"
volumes:
- ./sozu-config.toml:/etc/sozu/config.toml
- ./sozu-config.toml:/etc/sozu/config.toml # set as almost nolog
networks:
bench-nw:

View file

@ -1,4 +1,3 @@
version: "3"
services:
nginx:
image: nginx:alpine
@ -28,7 +27,7 @@ services:
dockerfile: docker/Dockerfile
restart: unless-stopped
environment:
- LOG_LEVEL=info
- LOG_LEVEL=error # almost nolog
- LOG_TO_FILE=false
ports:
- 127.0.0.1:8080:8080
@ -47,7 +46,7 @@ services:
tty: false
privileged: true
volumes:
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro # set as almost nolog
- /var/run/docker.sock:/tmp/docker.sock:ro
logging:
options:
@ -64,7 +63,7 @@ services:
restart: unless-stopped
tty: false
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- ./Caddyfile:/etc/caddy/Caddyfile:ro # set as almost no log
networks:
bench-nw:

View file

@ -31,11 +31,14 @@
# '"$request" $status $body_bytes_sent '
# '"$http_referer" "$http_user_agent" '
# '"$upstream_addr"';
# access_log off;
access_log off;
# ssl_protocols TLSv1.2 TLSv1.3;
# ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
# ssl_prefer_server_ciphers off;
# error_log /dev/stderr;
error_log /dev/null crit;
# resolver 127.0.0.11;
# # HTTP 1.1 support
# proxy_http_version 1.1;

View file

@ -1,4 +1,4 @@
log_level = "info"
log_level = "error"
log_target = "stdout"
max_connections = 512
activate_listeners = true

View file

@ -83,7 +83,8 @@ load_balance = "random" # or "round_robin" or "sticky" (sticky session) or "none
upstream_options = [
"upgrade_insecure_requests",
"force_http11_upstream",
"set_upstream_host", # overwrite HOST value with upstream hostname (like www.yahoo.com)
"set_upstream_host", # overwrite HOST value with upstream hostname (like www.yahoo.com)
"forwarded_header" # add Forwarded header (by default, this is not added. However, if the incoming request has Forwarded header, it would be preserved and updated)
]
######################################################################

View file

@ -5,7 +5,7 @@ LABEL maintainer="Jun Kurihara"
ARG TARGETARCH
ARG CARGO_FEATURES
ENV CARGO_FEATURES ${CARGO_FEATURES}
ENV CARGO_FEATURES=${CARGO_FEATURES}
RUN if [ $TARGETARCH = "amd64" ]; then \
echo "x86_64" > /arch; \
@ -22,7 +22,7 @@ WORKDIR /tmp
COPY . /tmp/
ENV RUSTFLAGS "-C link-arg=-s"
ENV RUSTFLAGS="-C link-arg=-s"
RUN echo "Building rpxy from source" && \
cargo update && \
@ -34,7 +34,7 @@ RUN echo "Building rpxy from source" && \
FROM --platform=$TARGETPLATFORM alpine:latest AS runner
LABEL maintainer="Jun Kurihara"
ENV RUNTIME_DEPS logrotate ca-certificates su-exec
ENV RUNTIME_DEPS="logrotate ca-certificates su-exec"
RUN apk add --no-cache ${RUNTIME_DEPS} && \
update-ca-certificates && \

View file

@ -15,24 +15,27 @@ post-quantum = ["rustls-post-quantum"]
[dependencies]
url = { version = "2.5.4" }
ahash = "0.8.11"
ahash = "0.8.12"
thiserror = "2.0.12"
tracing = "0.1.41"
async-trait = "0.1.88"
base64 = "0.22.1"
aws-lc-rs = { version = "1.13.0", default-features = false, features = [
aws-lc-rs = { version = "1.13.1", default-features = false, features = [
"aws-lc-sys",
] }
blocking = "1.6.1"
rustls = { version = "0.23.26", default-features = false, features = [
rustls = { version = "0.23.28", default-features = false, features = [
"std",
"aws_lc_rs",
] }
rustls-platform-verifier = { version = "0.5.2" }
rustls-platform-verifier = { version = "0.6.0" }
rustls-acme = { path = "../submodules/rustls-acme/", default-features = false, features = [
"aws-lc-rs",
] }
rustls-post-quantum = { version = "0.2.2", optional = true }
tokio = { version = "1.44.2", default-features = false }
tokio = { version = "1.46.1", default-features = false, features = [
"rt",
"macros",
] }
tokio-util = { version = "0.7.15", default-features = false }
tokio-stream = { version = "0.1.17", default-features = false }

View file

@ -12,4 +12,7 @@ pub enum RpxyAcmeError {
/// IO error
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
/// TLS client configuration error
#[error("TLS client configuration error: {0}")]
TlsClientConfig(String),
}

View file

@ -79,11 +79,7 @@ impl AcmeManager {
&self,
cancel_token: tokio_util::sync::CancellationToken,
) -> (Vec<tokio::task::JoinHandle<()>>, HashMap<String, Arc<ServerConfig>>) {
let rustls_client_config = rustls::ClientConfig::builder()
.dangerous() // The `Verifier` we're using is actually safe
.with_custom_certificate_verifier(Arc::new(rustls_platform_verifier::Verifier::new()))
.with_no_client_auth();
let rustls_client_config = Arc::new(rustls_client_config);
let rustls_client_config = Self::create_tls_client_config().expect("Failed to create TLS client configuration for ACME");
let mut server_configs_for_challenge: HashMap<String, Arc<ServerConfig>> = HashMap::default();
let join_handles = self
@ -127,6 +123,26 @@ impl AcmeManager {
(join_handles, server_configs_for_challenge)
}
/// Creates a TLS client configuration with platform certificate verification.
///
/// This configuration uses the system's certificate store for verification,
/// which is appropriate for ACME certificate validation.
fn create_tls_client_config() -> Result<Arc<rustls::ClientConfig>, RpxyAcmeError> {
let crypto_provider = rustls::crypto::CryptoProvider::get_default().ok_or(RpxyAcmeError::TlsClientConfig(
"No default crypto provider available".to_string(),
))?;
let verifier = rustls_platform_verifier::Verifier::new(crypto_provider.clone())
.map_err(|e| RpxyAcmeError::TlsClientConfig(format!("Failed to create certificate verifier: {}", e)))?;
let client_config = rustls::ClientConfig::builder()
.dangerous() // Safe: using platform certificate verifier
.with_custom_certificate_verifier(Arc::new(verifier))
.with_no_client_auth();
Ok(Arc::new(client_config))
}
}
#[cfg(test)]

View file

@ -13,8 +13,22 @@ publish.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = ["http3-quinn", "cache", "rustls-backend", "acme", "post-quantum"]
# default = ["http3-s2n", "cache", "rustls-backend", "acme", "post-quantum"]
default = [
"http3-quinn",
"cache",
"rustls-backend",
"sticky-cookie",
"acme",
"post-quantum",
]
# default = [
# "http3-s2n",
# "cache",
# "rustls-backend",
# "sticky-cookie",
# "acme",
# "post-quantum",
# ]
http3-quinn = ["rpxy-lib/http3-quinn"]
http3-s2n = ["rpxy-lib/http3-s2n"]
native-tls-backend = ["rpxy-lib/native-tls-backend"]
@ -23,19 +37,16 @@ webpki-roots = ["rpxy-lib/webpki-roots"]
cache = ["rpxy-lib/cache"]
acme = ["rpxy-lib/acme", "rpxy-acme"]
post-quantum = ["rpxy-lib/post-quantum"]
sticky-cookie = ["rpxy-lib/sticky-cookie"]
[dependencies]
rpxy-lib = { path = "../rpxy-lib/", default-features = false, features = [
"sticky-cookie",
] }
rpxy-lib = { path = "../rpxy-lib/", default-features = false }
# TODO: pin mimalloc due to compilation failure by musl
mimalloc = { version = "=0.1.44", default-features = false }
libmimalloc-sys = { version = "=0.1.40" }
mimalloc = { version = "0.1.47", default-features = false }
anyhow = "1.0.98"
ahash = "0.8.11"
ahash = "0.8.12"
serde = { version = "1.0.219", default-features = false, features = ["derive"] }
tokio = { version = "1.44.2", default-features = false, features = [
tokio = { version = "1.46.1", default-features = false, features = [
"net",
"rt-multi-thread",
"time",
@ -47,10 +58,10 @@ async-trait = "0.1.88"
futures-util = { version = "0.3.31", default-features = false }
# config
clap = { version = "4.5.37", features = ["std", "cargo", "wrap_help"] }
toml = { version = "0.8.22", default-features = false, features = ["parse"] }
hot_reload = "0.1.9"
serde_ignored = "0.1.11"
clap = { version = "4.5.40", features = ["std", "cargo", "wrap_help"] }
toml = { version = "0.8.23", default-features = false, features = ["parse"] }
hot_reload = "0.2.0"
serde_ignored = "0.1.12"
# logging
tracing = { version = "0.1.41" }

View file

@ -1,21 +1,29 @@
use super::toml::ConfigToml;
use super::toml::{ConfigToml, ConfigTomlExt};
use crate::error::{anyhow, ensure};
use ahash::HashMap;
use clap::Arg;
use hot_reload::{ReloaderReceiver, ReloaderService};
use rpxy_certs::{CryptoFileSourceBuilder, CryptoReloader, ServerCryptoBase, build_cert_reloader};
use rpxy_lib::{AppConfig, AppConfigList, ProxyConfig};
use rpxy_lib::{AppConfigList, ProxyConfig};
#[cfg(feature = "acme")]
use rpxy_acme::{ACME_DIR_URL, ACME_REGISTRY_PATH, AcmeManager};
/// Parsed options
/// Parsed options from CLI
/// Options for configuring the application.
///
/// # Fields
/// - `config_file_path`: Path to the configuration file.
/// - `log_dir_path`: Optional path to the log directory.
pub struct Opts {
pub config_file_path: String,
pub log_dir_path: Option<String>,
}
/// Parse arg values passed from cli
/// Parses command-line arguments into an [`Opts`](rpxy-bin/src/config/parse.rs:13) struct.
///
/// Returns a populated [`Opts`](rpxy-bin/src/config/parse.rs:13) on success, or an error if parsing fails.
/// Expects a required `--config` argument and an optional `--log-dir` argument.
pub fn parse_opts() -> Result<Opts, anyhow::Error> {
let _ = include_str!("../../Cargo.toml");
let options = clap::command!()
@ -36,7 +44,6 @@ pub fn parse_opts() -> Result<Opts, anyhow::Error> {
);
let matches = options.get_matches();
///////////////////////////////////
let config_file_path = matches.get_one::<String>("config_file").unwrap().to_owned();
let log_dir_path = matches.get_one::<String>("log_dir").map(|v| v.to_owned());
@ -46,63 +53,43 @@ pub fn parse_opts() -> Result<Opts, anyhow::Error> {
})
}
pub fn build_settings(config: &ConfigToml) -> std::result::Result<(ProxyConfig, AppConfigList), anyhow::Error> {
// build proxy config
let proxy_config: ProxyConfig = config.try_into()?;
// backend_apps
let apps = config.apps.clone().ok_or(anyhow!("Missing application spec"))?;
// assertions for all backend apps
ensure!(!apps.0.is_empty(), "Wrong application spec.");
// if only https_port is specified, tls must be configured for all apps
if proxy_config.http_port.is_none() {
ensure!(
apps.0.iter().all(|(_, app)| app.tls.is_some()),
"Some apps serves only plaintext HTTP"
);
}
// https redirection port must be configured only when both http_port and https_port are configured.
if proxy_config.https_redirection_port.is_some() {
ensure!(
proxy_config.https_port.is_some() && proxy_config.http_port.is_some(),
"https_redirection_port can be specified only when both http_port and https_port are specified"
);
}
// https redirection can be configured if both ports are active
if !(proxy_config.https_port.is_some() && proxy_config.http_port.is_some()) {
ensure!(
apps.0.iter().all(|(_, app)| {
if let Some(tls) = app.tls.as_ref() {
tls.https_redirection.is_none()
} else {
true
}
}),
"https_redirection can be specified only when both http_port and https_port are specified"
);
}
// build applications
let mut app_config_list_inner = Vec::<AppConfig>::new();
for (app_name, app) in apps.0.iter() {
let _server_name_string = app.server_name.as_ref().ok_or(anyhow!("No server name"))?;
let registered_app_name = app_name.to_ascii_lowercase();
let app_config = app.build_app_config(&registered_app_name)?;
app_config_list_inner.push(app_config);
}
let app_config_list = AppConfigList {
inner: app_config_list_inner,
default_app: config.default_app.clone().map(|v| v.to_ascii_lowercase()), // default backend application for plaintext http requests
};
Ok((proxy_config, app_config_list))
/// Build proxy and app settings from config using ConfigTomlExt
pub fn build_settings(config: &ConfigToml) -> Result<(ProxyConfig, AppConfigList), anyhow::Error> {
config.validate_and_build_settings()
}
/* ----------------------- */
/// Helper to build a CryptoFileSource for an app, handling ACME if enabled
#[cfg(feature = "acme")]
fn build_tls_for_app_acme(
tls: &mut super::toml::TlsOption,
acme_option: &Option<super::toml::AcmeOption>,
server_name: &str,
acme_registry_path: &str,
acme_dir_url: &str,
) -> Result<(), anyhow::Error> {
if let Some(true) = tls.acme {
ensure!(acme_option.is_some() && tls.tls_cert_key_path.is_none() && tls.tls_cert_path.is_none());
let subdir = format!("{}/{}", acme_registry_path, server_name.to_ascii_lowercase());
let file_name =
rpxy_acme::DirCache::cached_cert_file_name(&[server_name.to_ascii_lowercase()], acme_dir_url.to_ascii_lowercase());
let cert_path = format!("{}/{}", subdir, file_name);
tls.tls_cert_key_path = Some(cert_path.clone());
tls.tls_cert_path = Some(cert_path);
}
Ok(())
}
/// Build cert map
/// Builds the certificate manager for TLS applications.
///
/// # Arguments
/// * `config` - Reference to the parsed configuration.
///
/// # Returns
/// Returns an option containing a tuple of certificate reloader service and receiver, or `None` if TLS is not enabled.
/// Returns an error if configuration is invalid or required fields are missing.
pub async fn build_cert_manager(
config: &ConfigToml,
) -> Result<
@ -139,19 +126,9 @@ pub async fn build_cert_manager(
ensure!(tls.tls_cert_key_path.is_some() && tls.tls_cert_path.is_some());
#[cfg(feature = "acme")]
let tls = {
let mut tls = tls.clone();
if let Some(true) = tls.acme {
ensure!(acme_option.is_some() && tls.tls_cert_key_path.is_none() && tls.tls_cert_path.is_none());
// Both of tls_cert_key_path and tls_cert_path must be the same for ACME since it's a single file
let subdir = format!("{}/{}", acme_registry_path, server_name.to_ascii_lowercase());
let file_name =
rpxy_acme::DirCache::cached_cert_file_name(&[server_name.to_ascii_lowercase()], acme_dir_url.to_ascii_lowercase());
tls.tls_cert_key_path = Some(format!("{}/{}", subdir, file_name));
tls.tls_cert_path = Some(format!("{}/{}", subdir, file_name));
}
tls
};
let mut tls = tls.clone();
#[cfg(feature = "acme")]
build_tls_for_app_acme(&mut tls, &acme_option, server_name, acme_registry_path, acme_dir_url)?;
let crypto_file_source = CryptoFileSourceBuilder::default()
.tls_cert_path(tls.tls_cert_path.as_ref().unwrap())
@ -168,24 +145,31 @@ pub async fn build_cert_manager(
/* ----------------------- */
#[cfg(feature = "acme")]
/// Build acme manager
/// Builds the ACME manager for automatic certificate management (enabled with the `acme` feature).
///
/// # Arguments
/// * `config` - Reference to the parsed configuration.
/// * `runtime_handle` - Tokio runtime handle for async operations.
///
/// # Returns
/// Returns an option containing an [`AcmeManager`](rpxy-bin/src/config/parse.rs:153) if ACME is configured, or `None` otherwise.
/// Returns an error if configuration is invalid or required fields are missing.
pub async fn build_acme_manager(
config: &ConfigToml,
runtime_handle: tokio::runtime::Handle,
) -> Result<Option<AcmeManager>, anyhow::Error> {
let acme_option = config.experimental.as_ref().and_then(|v| v.acme.clone());
if acme_option.is_none() {
let Some(acme_option) = acme_option else {
return Ok(None);
}
let acme_option = acme_option.unwrap();
};
let domains = config
let domains: Vec<String> = config
.apps
.as_ref()
.unwrap()
.0
.values()
.filter_map(|app| {
//
if let Some(tls) = app.tls.as_ref() {
if let Some(true) = tls.acme {
return Some(app.server_name.as_ref().unwrap().to_owned());
@ -193,7 +177,7 @@ pub async fn build_acme_manager(
}
None
})
.collect::<Vec<_>>();
.collect();
if domains.is_empty() {
return Ok(None);

View file

@ -4,12 +4,25 @@ use crate::{
log::warn,
};
use ahash::HashMap;
use rpxy_lib::{AppConfig, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri, reexports::Uri};
use rpxy_lib::{AppConfig, AppConfigList, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri, reexports::Uri};
use serde::Deserialize;
use std::{fs, net::SocketAddr};
use tokio::time::Duration;
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
/// Main configuration structure parsed from the TOML file.
///
/// # Fields
/// - `listen_port`: Optional TCP port for HTTP.
/// - `listen_port_tls`: Optional TCP port for HTTPS/TLS.
/// - `listen_ipv6`: Enable IPv6 listening.
/// - `https_redirection_port`: Optional port for HTTP to HTTPS redirection.
/// - `tcp_listen_backlog`: Optional TCP backlog size.
/// - `max_concurrent_streams`: Optional max concurrent streams.
/// - `max_clients`: Optional max client connections.
/// - `apps`: Optional application definitions.
/// - `default_app`: Optional default application name.
/// - `experimental`: Optional experimental features.
pub struct ConfigToml {
pub listen_port: Option<u16>,
pub listen_port_tls: Option<u16>,
@ -23,8 +36,75 @@ pub struct ConfigToml {
pub experimental: Option<Experimental>,
}
/// Extension trait for config validation and building
pub trait ConfigTomlExt {
fn validate_and_build_settings(&self) -> Result<(ProxyConfig, AppConfigList), anyhow::Error>;
}
impl ConfigTomlExt for ConfigToml {
fn validate_and_build_settings(&self) -> Result<(ProxyConfig, AppConfigList), anyhow::Error> {
let proxy_config: ProxyConfig = self.try_into()?;
let apps = self.apps.as_ref().ok_or(anyhow!("Missing application spec"))?;
// Ensure at least one app is defined
ensure!(!apps.0.is_empty(), "Wrong application spec.");
// Helper: all apps have TLS
let all_apps_have_tls = apps.0.values().all(|app| app.tls.is_some());
// Helper: all apps have https_redirection unset
let all_apps_no_https_redirection = apps.0.values().all(|app| {
if let Some(tls) = app.tls.as_ref() {
tls.https_redirection.is_none()
} else {
true
}
});
if proxy_config.http_port.is_none() {
ensure!(all_apps_have_tls, "Some apps serve only plaintext HTTP");
}
if proxy_config.https_redirection_port.is_some() {
ensure!(
proxy_config.https_port.is_some() && proxy_config.http_port.is_some(),
"https_redirection_port can be specified only when both http_port and https_port are specified"
);
}
if !(proxy_config.https_port.is_some() && proxy_config.http_port.is_some()) {
ensure!(
all_apps_no_https_redirection,
"https_redirection can be specified only when both http_port and https_port are specified"
);
}
// Build AppConfigList
let mut app_config_list_inner = Vec::<AppConfig>::new();
for (app_name, app) in apps.0.iter() {
let _server_name_string = app.server_name.as_ref().ok_or(anyhow!("No server name"))?;
let registered_app_name = app_name.to_ascii_lowercase();
let app_config = app.build_app_config(&registered_app_name)?;
app_config_list_inner.push(app_config);
}
let app_config_list = AppConfigList {
inner: app_config_list_inner,
default_app: self.default_app.clone().map(|v| v.to_ascii_lowercase()),
};
Ok((proxy_config, app_config_list))
}
}
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
/// HTTP/3 protocol options for server configuration.
///
/// # Fields
/// - `alt_svc_max_age`: Optional max age for Alt-Svc header.
/// - `request_max_body_size`: Optional maximum request body size.
/// - `max_concurrent_connections`: Optional maximum concurrent connections.
/// - `max_concurrent_bidistream`: Optional maximum concurrent bidirectional streams.
/// - `max_concurrent_unistream`: Optional maximum concurrent unidirectional streams.
/// - `max_idle_timeout`: Optional maximum idle timeout in milliseconds.
pub struct Http3Option {
pub alt_svc_max_age: Option<u32>,
pub request_max_body_size: Option<usize>,

View file

@ -1,9 +1,12 @@
/// Default IPv4 listen addresses for the server.
pub const LISTEN_ADDRESSES_V4: &[&str] = &["0.0.0.0"];
/// Default IPv6 listen addresses for the server.
pub const LISTEN_ADDRESSES_V6: &[&str] = &["[::]"];
/// Delay in seconds before reloading the configuration after changes.
pub const CONFIG_WATCH_DELAY_SECS: u32 = 15;
#[cfg(feature = "cache")]
// Cache directory
/// Directory path for cache storage (enabled with "cache" feature).
pub const CACHE_DIR: &str = "./cache";
pub(crate) const ACCESS_LOG_FILE: &str = "access.log";

View file

@ -1,2 +1,2 @@
#[allow(unused)]
pub use anyhow::{anyhow, bail, ensure, Context};
pub use anyhow::{Context, anyhow, bail, ensure};

View file

@ -1,126 +1,106 @@
use crate::constants::{ACCESS_LOG_FILE, SYSTEM_LOG_FILE};
use rpxy_lib::log_event_names;
use std::str::FromStr;
use tracing_subscriber::{fmt, prelude::*};
use tracing_subscriber::{filter::filter_fn, fmt, prelude::*};
#[allow(unused)]
pub use tracing::{debug, error, info, warn};
/// Initialize the logger with the RUST_LOG environment variable.
pub fn init_logger(log_dir_path: Option<&str>) {
let level_string = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string());
let level = tracing::Level::from_str(level_string.as_str()).unwrap_or(tracing::Level::INFO);
let level = std::env::var("RUST_LOG")
.ok()
.and_then(|s| tracing::Level::from_str(&s).ok())
.unwrap_or(tracing::Level::INFO);
match log_dir_path {
None => {
// log to stdout
init_stdio_logger(level);
}
Some(log_dir_path) => {
// log to files
println!("Activate logging to files: {log_dir_path}");
init_file_logger(level, log_dir_path);
}
None => init_stdio_logger(level),
Some(path) => init_file_logger(level, path),
}
}
/// file logging TODO:
/// file logging
fn init_file_logger(level: tracing::Level, log_dir_path: &str) {
let log_dir_path = std::path::PathBuf::from(log_dir_path);
// create the directory if it does not exist
if !log_dir_path.exists() {
println!("Directory does not exist, creating: {}", log_dir_path.display());
std::fs::create_dir_all(&log_dir_path).expect("Failed to create log directory");
println!("Activate logging to files: {}", log_dir_path);
let log_dir = std::path::Path::new(log_dir_path);
if !log_dir.exists() {
println!("Directory does not exist, creating: {}", log_dir.display());
std::fs::create_dir_all(log_dir).expect("Failed to create log directory");
}
let access_log_path = log_dir_path.join(ACCESS_LOG_FILE);
let system_log_path = log_dir_path.join(SYSTEM_LOG_FILE);
let access_log_path = log_dir.join(ACCESS_LOG_FILE);
let system_log_path = log_dir.join(SYSTEM_LOG_FILE);
println!("Access log: {}", access_log_path.display());
println!("System and error log: {}", system_log_path.display());
let access_log = open_log_file(&access_log_path);
let system_log = open_log_file(&system_log_path);
let reg = tracing_subscriber::registry();
let access_log_base = fmt::layer()
let access_layer = fmt::layer()
.with_line_number(false)
.with_thread_ids(false)
.with_thread_names(false)
.with_target(false)
.with_level(false)
.compact()
.with_ansi(false);
let reg = reg.with(access_log_base.with_writer(access_log).with_filter(AccessLogFilter));
.with_ansi(false)
.with_writer(access_log)
.with_filter(AccessLogFilter);
let system_log_base = fmt::layer()
let system_layer = fmt::layer()
.with_line_number(false)
.with_thread_ids(false)
.with_thread_names(false)
.with_target(false)
.with_level(true) // with level for system log
.with_level(true)
.compact()
.with_ansi(false);
let reg = reg.with(
system_log_base
.with_writer(system_log)
.with_filter(tracing_subscriber::filter::filter_fn(move |metadata| {
(metadata
.target()
.starts_with(env!("CARGO_PKG_NAME").replace('-', "_").as_str())
&& metadata.name() != log_event_names::ACCESS_LOG
&& metadata.level() <= &level)
|| metadata.level() <= &tracing::Level::WARN.min(level)
})),
);
.with_ansi(false)
.with_writer(system_log)
.with_filter(filter_fn(move |metadata| {
(is_cargo_pkg(metadata) && metadata.name() != log_event_names::ACCESS_LOG && metadata.level() <= &level)
|| metadata.level() <= &tracing::Level::WARN.min(level)
}));
reg.init();
tracing_subscriber::registry().with(access_layer).with(system_layer).init();
}
/// stdio logging
fn init_stdio_logger(level: tracing::Level) {
// This limits the logger to emits only this crate with any level above RUST_LOG, for included crates it will emit only ERROR (in prod)/INFO (in dev) or above level.
let stdio_layer = fmt::layer().with_level(true).with_thread_ids(false);
if level <= tracing::Level::INFO {
// in normal deployment environment
let stdio_layer = stdio_layer
.with_target(false)
.compact()
.with_filter(tracing_subscriber::filter::filter_fn(move |metadata| {
(metadata
.target()
.starts_with(env!("CARGO_PKG_NAME").replace('-', "_").as_str())
&& metadata.level() <= &level)
|| metadata.level() <= &tracing::Level::WARN.min(level)
}));
tracing_subscriber::registry().with(stdio_layer).init();
} else {
// debugging
let stdio_layer = stdio_layer
// This limits the logger to emit only this crate with any level above RUST_LOG,
// for included crates it will emit only ERROR (in prod)/INFO (in dev) or above level.
let base_layer = fmt::layer().with_level(true).with_thread_ids(false);
let debug = level > tracing::Level::INFO;
let filter = filter_fn(move |metadata| {
if debug {
(is_cargo_pkg(metadata) && metadata.level() <= &level) || metadata.level() <= &tracing::Level::INFO.min(level)
} else {
(is_cargo_pkg(metadata) && metadata.level() <= &level) || metadata.level() <= &tracing::Level::WARN.min(level)
}
});
let stdio_layer = if debug {
base_layer
.with_line_number(true)
.with_target(true)
.with_thread_names(true)
.with_target(true)
.compact()
.with_filter(tracing_subscriber::filter::filter_fn(move |metadata| {
(metadata
.target()
.starts_with(env!("CARGO_PKG_NAME").replace('-', "_").as_str())
&& metadata.level() <= &level)
|| metadata.level() <= &tracing::Level::INFO.min(level)
}));
tracing_subscriber::registry().with(stdio_layer).init();
.with_filter(filter)
} else {
base_layer.with_target(false).compact().with_filter(filter)
};
tracing_subscriber::registry().with(stdio_layer).init();
}
/// Access log filter
struct AccessLogFilter;
impl<S> tracing_subscriber::layer::Filter<S> for AccessLogFilter {
fn enabled(&self, metadata: &tracing::Metadata<'_>, _: &tracing_subscriber::layer::Context<'_, S>) -> bool {
metadata
.target()
.starts_with(env!("CARGO_PKG_NAME").replace('-', "_").as_str())
&& metadata.name().contains(log_event_names::ACCESS_LOG)
&& metadata.level() <= &tracing::Level::INFO
is_cargo_pkg(metadata) && metadata.name().contains(log_event_names::ACCESS_LOG) && metadata.level() <= &tracing::Level::INFO
}
}
@ -130,10 +110,17 @@ fn open_log_file<P>(path: P) -> std::fs::File
where
P: AsRef<std::path::Path>,
{
// crate a file if it does not exist
// create a file if it does not exist
std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(path)
.expect("Failed to open the log file")
}
#[inline]
/// Matches cargo package name with `_` instead of `-`
fn is_cargo_pkg(metadata: &tracing::Metadata<'_>) -> bool {
let pkg_name = env!("CARGO_PKG_NAME").replace('-', "_");
metadata.target().starts_with(&pkg_name)
}

View file

@ -33,10 +33,9 @@ fn main() {
init_logger(parsed_opts.log_dir_path.as_deref());
let (config_service, config_rx) = ReloaderService::<ConfigTomlReloader, ConfigToml, String>::new(
let (config_service, config_rx) = ReloaderService::<ConfigTomlReloader, ConfigToml, String>::with_delay(
&parsed_opts.config_file_path,
CONFIG_WATCH_DELAY_SECS,
false,
)
.await
.unwrap();
@ -71,6 +70,7 @@ struct RpxyService {
}
impl RpxyService {
/// Create a new RpxyService from config and runtime handle.
async fn new(config_toml: &ConfigToml, runtime_handle: tokio::runtime::Handle) -> Result<Self, anyhow::Error> {
let (proxy_conf, app_conf) = build_settings(config_toml).map_err(|e| anyhow!("Invalid configuration: {e}"))?;
@ -80,7 +80,7 @@ impl RpxyService {
.map(|(s, r)| (Some(Arc::new(s)), Some(r)))
.unwrap_or((None, None));
Ok(RpxyService {
Ok(Self {
runtime_handle: runtime_handle.clone(),
proxy_conf,
app_conf,
@ -255,7 +255,7 @@ async fn rpxy_service(
}
/* ---------- */
_ = config_rx.changed() => {
let Some(new_config_toml) = config_rx.borrow().clone() else {
let Some(new_config_toml) = config_rx.get() else {
error!("Something wrong in config reloader receiver");
return Err(anyhow!("Something wrong in config reloader receiver"));
};

View file

@ -16,18 +16,18 @@ post-quantum = ["rustls-post-quantum"]
http3 = []
[dependencies]
ahash = { version = "0.8.11" }
ahash = { version = "0.8.12" }
tracing = { version = "0.1.41" }
derive_builder = { version = "0.20.2" }
thiserror = { version = "2.0.12" }
hot_reload = { version = "0.1.9" }
hot_reload = { version = "0.2.0" }
async-trait = { version = "0.1.88" }
rustls = { version = "0.23.26", default-features = false, features = [
rustls = { version = "0.23.28", default-features = false, features = [
"std",
"aws_lc_rs",
] }
rustls-pemfile = { version = "2.2.0" }
rustls-webpki = { version = "0.103.1", default-features = false, features = [
rustls-webpki = { version = "0.103.3", default-features = false, features = [
"std",
"aws-lc-rs",
] }
@ -35,7 +35,7 @@ rustls-post-quantum = { version = "0.2.2", optional = true }
x509-parser = { version = "0.17.0" }
[dev-dependencies]
tokio = { version = "1.44.2", default-features = false, features = [
tokio = { version = "1.46.1", default-features = false, features = [
"rt-multi-thread",
"macros",
] }

View file

@ -75,9 +75,7 @@ impl SingleServerCertsKeys {
/* ------------------------------------------------ */
/// Parse the client CA certificates and return a hashmap of pairs of a subject key identifier (key) and a trust anchor (value)
pub fn rustls_client_certs_trust_anchors(&self) -> Result<TrustAnchors, RpxyCertError> {
let Some(certs) = self.client_ca_certs.as_ref() else {
return Err(RpxyCertError::NoClientCert);
};
let certs = self.client_ca_certs.as_ref().ok_or(RpxyCertError::NoClientCert)?;
let certs = certs.iter().map(|c| Certificate::from(c.to_vec())).collect::<Vec<_>>();
let trust_anchors = certs

View file

@ -80,77 +80,70 @@ fn read_certs_and_keys(
) -> Result<SingleServerCertsKeys, RpxyCertError> {
debug!("Read TLS server certificates and private key");
// ------------------------
// certificates
let raw_certs = {
let mut reader = BufReader::new(File::open(cert_path).map_err(|e| {
let mut reader = BufReader::new(File::open(cert_path).map_err(|e| {
io::Error::new(
e.kind(),
format!("Unable to load the certificates [{}]: {e}", cert_path.display()),
)
})?);
let raw_certs = rustls_pemfile::certs(&mut reader)
.collect::<Result<Vec<_>, _>>()
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Unable to parse the certificates"))?;
// ------------------------
// private keys
let mut encoded_keys = vec![];
File::open(cert_key_path)
.map_err(|e| {
io::Error::new(
e.kind(),
format!("Unable to load the certificates [{}]: {e}", cert_path.display()),
format!("Unable to load the certificate keys [{}]: {e}", cert_key_path.display()),
)
})?);
rustls_pemfile::certs(&mut reader)
.collect::<Result<Vec<_>, _>>()
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Unable to parse the certificates"))?
};
// private keys
let raw_cert_keys = {
let encoded_keys = {
let mut encoded_keys = vec![];
File::open(cert_key_path)
.map_err(|e| {
io::Error::new(
e.kind(),
format!("Unable to load the certificate keys [{}]: {e}", cert_key_path.display()),
)
})?
.read_to_end(&mut encoded_keys)?;
encoded_keys
};
let mut reader = Cursor::new(encoded_keys);
let pkcs8_keys = rustls_pemfile::pkcs8_private_keys(&mut reader)
.map(|v| v.map(rustls::pki_types::PrivateKeyDer::Pkcs8))
.collect::<Result<Vec<_>, _>>()
.map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to parse the certificates private keys (PKCS8)",
)
})?;
reader.set_position(0);
let mut rsa_keys = rustls_pemfile::rsa_private_keys(&mut reader)
.map(|v| v.map(rustls::pki_types::PrivateKeyDer::Pkcs1))
.collect::<Result<Vec<_>, _>>()?;
let mut keys = pkcs8_keys;
keys.append(&mut rsa_keys);
if keys.is_empty() {
return Err(RpxyCertError::IoError(io::Error::new(
})?
.read_to_end(&mut encoded_keys)?;
let mut reader = Cursor::new(encoded_keys);
let pkcs8_keys = rustls_pemfile::pkcs8_private_keys(&mut reader)
.map(|v| v.map(rustls::pki_types::PrivateKeyDer::Pkcs8))
.collect::<Result<Vec<_>, _>>()
.map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
"No private keys found - Make sure that they are in PKCS#8/PEM format",
)));
}
keys
};
"Unable to parse the certificates private keys (PKCS8)",
)
})?;
reader.set_position(0);
let mut rsa_keys = rustls_pemfile::rsa_private_keys(&mut reader)
.map(|v| v.map(rustls::pki_types::PrivateKeyDer::Pkcs1))
.collect::<Result<Vec<_>, _>>()?;
let mut raw_cert_keys = pkcs8_keys;
raw_cert_keys.append(&mut rsa_keys);
if raw_cert_keys.is_empty() {
return Err(RpxyCertError::IoError(io::Error::new(
io::ErrorKind::InvalidInput,
"No private keys found - Make sure that they are in PKCS#8/PEM format",
)));
}
// ------------------------
// client ca certificates
let client_ca_certs = if let Some(path) = client_ca_cert_path {
debug!("Read CA certificates for client authentication");
// Reads client certificate and returns client
let certs = {
let mut reader = BufReader::new(File::open(path).map_err(|e| {
let client_ca_certs = client_ca_cert_path
.map(|path| {
debug!("Read CA certificates for client authentication");
// Reads client certificate and returns client
let inner = File::open(path).map_err(|e| {
io::Error::new(
e.kind(),
format!("Unable to load the client certificates [{}]: {e}", path.display()),
)
})?);
})?;
let mut reader = BufReader::new(inner);
rustls_pemfile::certs(&mut reader)
.collect::<Result<Vec<_>, _>>()
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Unable to parse the client certificates"))?
};
Some(certs)
} else {
None
};
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "Unable to parse the client certificates"))
})
.transpose()?;
Ok(SingleServerCertsKeys::new(
&raw_certs,

View file

@ -27,8 +27,6 @@ pub use crate::{
// Constants
/// Default delay in seconds to watch certificates
const DEFAULT_CERTS_WATCH_DELAY_SECS: u32 = 60;
/// Load certificates only when updated
const LOAD_CERTS_ONLY_WHEN_UPDATED: bool = true;
/// Result type inner of certificate reloader service
type ReloaderServiceResultInner = (
@ -62,6 +60,7 @@ where
let certs_watch_period = certs_watch_period.unwrap_or(DEFAULT_CERTS_WATCH_DELAY_SECS);
let (cert_reloader_service, cert_reloader_rx) =
ReloaderService::<CryptoReloader, ServerCryptoBase>::new(&source, certs_watch_period, !LOAD_CERTS_ONLY_WHEN_UPDATED).await?;
ReloaderService::<CryptoReloader, ServerCryptoBase>::with_delay(&source, certs_watch_period).await?;
Ok((cert_reloader_service, cert_reloader_rx))
}

View file

@ -1,9 +1,9 @@
use crate::{certs::SingleServerCertsKeys, error::*, log::*};
use ahash::HashMap;
use rustls::{
RootCertStore, ServerConfig,
crypto::CryptoProvider,
server::{ResolvesServerCertUsingSni, WebPkiClientVerifier},
RootCertStore, ServerConfig,
};
use std::sync::Arc;

View file

@ -37,11 +37,11 @@ post-quantum = [
[dependencies]
rand = "0.9.1"
ahash = "0.8.11"
ahash = "0.8.12"
bytes = "1.10.1"
derive_builder = "0.20.2"
futures = { version = "0.3.31", features = ["alloc", "async-await"] }
tokio = { version = "1.44.2", default-features = false, features = [
tokio = { version = "1.46.1", default-features = false, features = [
"net",
"rt-multi-thread",
"time",
@ -61,7 +61,7 @@ thiserror = "2.0.12"
http = "1.3.1"
http-body-util = "0.1.3"
hyper = { version = "1.6.0", default-features = false }
hyper-util = { version = "0.1.11", features = ["full"] }
hyper-util = { version = "0.1.14", features = ["full"] }
futures-util = { version = "0.3.31", default-features = false }
futures-channel = { version = "0.3.31", default-features = false }
@ -70,7 +70,7 @@ hyper-tls = { version = "0.6.0", features = [
"alpn",
"vendored",
], optional = true }
hyper-rustls = { version = "0.27.5", default-features = false, features = [
hyper-rustls = { version = "0.27.7", default-features = false, features = [
"aws-lc-rs",
"http1",
"http2",
@ -79,8 +79,8 @@ hyper-rustls = { version = "0.27.5", default-features = false, features = [
# tls and cert management for server
rpxy-certs = { path = "../rpxy-certs/", default-features = false }
hot_reload = "0.1.9"
rustls = { version = "0.23.26", default-features = false }
hot_reload = "0.2.0"
rustls = { version = "0.23.28", default-features = false }
rustls-post-quantum = { version = "0.2.2", optional = true }
tokio-rustls = { version = "0.26.2", features = ["early-data"] }
@ -91,25 +91,25 @@ rpxy-acme = { path = "../rpxy-acme/", default-features = false, optional = true
tracing = { version = "0.1.41" }
# http/3
quinn = { version = "0.11.7", optional = true }
h3 = { version = "0.0.7", features = ["tracing"], optional = true }
h3-quinn = { version = "0.0.9", optional = true }
s2n-quic = { version = "1.57.0", path = "../submodules/s2n-quic/quic/s2n-quic/", default-features = false, features = [
quinn = { version = "0.11.8", optional = true }
h3 = { version = "0.0.8", features = ["tracing"], optional = true }
h3-quinn = { version = "0.0.10", optional = true }
s2n-quic = { version = "1.61.0", path = "../submodules/s2n-quic/quic/s2n-quic/", default-features = false, features = [
"provider-tls-rustls",
], optional = true }
s2n-quic-core = { version = "0.57.0", path = "../submodules/s2n-quic/quic/s2n-quic-core", default-features = false, optional = true }
s2n-quic-rustls = { version = "0.57.0", path = "../submodules/s2n-quic/quic/s2n-quic-rustls", optional = true }
s2n-quic-core = { version = "0.61.0", path = "../submodules/s2n-quic/quic/s2n-quic-core", default-features = false, optional = true }
s2n-quic-rustls = { version = "0.61.0", path = "../submodules/s2n-quic/quic/s2n-quic-rustls", optional = true }
s2n-quic-h3 = { path = "../submodules/s2n-quic/quic/s2n-quic-h3/", features = [
"tracing",
], optional = true }
##########
# for UDP socket wit SO_REUSEADDR when h3 with quinn
socket2 = { version = "0.5.9", features = ["all"], optional = true }
socket2 = { version = "0.5.10", features = ["all"], optional = true }
# cache
http-cache-semantics = { path = "../submodules/rusty-http-cache-semantics", default-features = false, optional = true }
lru = { version = "0.14.0", optional = true }
sha2 = { version = "0.10.8", default-features = false, optional = true }
lru = { version = "0.16.0", optional = true }
sha2 = { version = "0.10.9", default-features = false, optional = true }
# cookie handling for sticky cookie
chrono = { version = "0.4.41", default-features = false, features = [

View file

@ -1,8 +1,8 @@
use crate::{
AppConfig, AppConfigList,
error::*,
log::*,
name_exp::{ByteName, ServerName},
AppConfig, AppConfigList,
};
use ahash::HashMap;
use derive_builder::Builder;
@ -26,6 +26,7 @@ pub struct BackendApp {
pub https_redirection: Option<bool>,
/// tls settings: mutual TLS is enabled
#[builder(default)]
#[allow(unused)]
pub mutual_tls: Option<bool>,
}
impl<'a> BackendAppBuilder {
@ -84,24 +85,27 @@ impl TryFrom<&AppConfigList> for BackendAppManager {
}
// default backend application for plaintext http requests
if let Some(default_app_name) = &config_list.default_app {
let default_server_name = manager
.apps
.iter()
.filter(|(_k, v)| &v.app_name == default_app_name)
.map(|(_, v)| v.server_name.clone())
.collect::<Vec<_>>();
let Some(default_app_name) = &config_list.default_app else {
return Ok(manager);
};
if !default_server_name.is_empty() {
info!(
"Serving plaintext http for requests to unconfigured server_name by app {} (server_name: {}).",
&default_app_name,
(&default_server_name[0]).try_into().unwrap_or_else(|_| "".to_string())
);
let default_server_name = manager
.apps
.iter()
.filter(|(_k, v)| &v.app_name == default_app_name)
.map(|(_, v)| v.server_name.clone())
.collect::<Vec<_>>();
manager.default_server_name = Some(default_server_name[0].clone());
}
if !default_server_name.is_empty() {
info!(
"Serving plaintext http for requests to unconfigured server_name by app {} (server_name: {}).",
&default_app_name,
(&default_server_name[0]).try_into().unwrap_or_else(|_| "".to_string())
);
manager.default_server_name = Some(default_server_name[0].clone());
}
Ok(manager)
}
}

View file

@ -7,8 +7,8 @@ pub use super::{
use derive_builder::Builder;
use rand::Rng;
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
atomic::{AtomicUsize, Ordering},
};
/// Constants to specify a load balance option
@ -131,6 +131,4 @@ impl LoadBalance {
pub struct LoadBalanceContext {
#[cfg(feature = "sticky-cookie")]
pub sticky_cookie: StickyCookie,
#[cfg(not(feature = "sticky-cookie"))]
pub sticky_cookie: (),
}

View file

@ -1,7 +1,7 @@
use super::{
Upstream,
load_balance_main::{LoadBalanceContext, LoadBalanceWithPointer, PointerToUpstream},
sticky_cookie::StickyCookieConfig,
Upstream,
};
use crate::{constants::STICKY_COOKIE_NAME, log::*};
use ahash::HashMap;
@ -9,8 +9,8 @@ use derive_builder::Builder;
use std::{
borrow::Cow,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
atomic::{AtomicUsize, Ordering},
},
};
@ -112,13 +112,16 @@ impl LoadBalanceWithPointer for LoadBalanceSticky {
}
Some(context) => {
let server_id = &context.sticky_cookie.value.value;
if let Some(server_index) = self.get_server_index_from_id(server_id) {
debug!("Valid sticky cookie: id={}, index={}", server_id, server_index);
server_index
} else {
debug!("Invalid sticky cookie: id={}", server_id);
self.simple_increment_ptr()
}
self.get_server_index_from_id(server_id).map_or_else(
|| {
debug!("Invalid sticky cookie: id={}", server_id);
self.simple_increment_ptr()
},
|server_index| {
debug!("Valid sticky cookie: id={}, index={}", server_id, server_index);
server_index
},
)
}
};

View file

@ -4,11 +4,12 @@ mod load_balance_sticky;
#[cfg(feature = "sticky-cookie")]
mod sticky_cookie;
#[cfg(feature = "sticky-cookie")]
use super::upstream::Upstream;
use thiserror::Error;
pub use load_balance_main::{
load_balance_options, LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder,
LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder, load_balance_options,
};
#[cfg(feature = "sticky-cookie")]
pub use load_balance_sticky::LoadBalanceStickyBuilder;
@ -16,6 +17,7 @@ pub use load_balance_sticky::LoadBalanceStickyBuilder;
pub use sticky_cookie::{StickyCookie, StickyCookieValue};
/// Result type for load balancing
#[cfg(feature = "sticky-cookie")]
type LoadBalanceResult<T> = std::result::Result<T, LoadBalanceError>;
/// Describes things that can go wrong in the Load Balance
#[derive(Debug, Error)]

View file

@ -91,12 +91,7 @@ impl<'a> StickyCookieBuilder {
self
}
/// Set the meta information of sticky cookie
pub fn info(
&mut self,
domain: impl Into<Cow<'a, str>>,
path: impl Into<Cow<'a, str>>,
duration_secs: i64,
) -> &mut Self {
pub fn info(&mut self, domain: impl Into<Cow<'a, str>>, path: impl Into<Cow<'a, str>>, duration_secs: i64) -> &mut Self {
let info = StickyCookieInfoBuilder::default()
.domain(domain)
.path(path)

View file

@ -1,7 +1,7 @@
#[cfg(feature = "sticky-cookie")]
use super::load_balance::LoadBalanceStickyBuilder;
use super::load_balance::{
load_balance_options as lb_opts, LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder,
LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder, load_balance_options as lb_opts,
};
// use super::{BytesName, LbContext, PathNameBytesExp, UpstreamOption};
use super::upstream_opts::UpstreamOption;
@ -13,7 +13,7 @@ use crate::{
};
use ahash::{HashMap, HashSet};
#[cfg(feature = "sticky-cookie")]
use base64::{engine::general_purpose, Engine as _};
use base64::{Engine as _, engine::general_purpose};
use derive_builder::Builder;
#[cfg(feature = "sticky-cookie")]
use sha2::{Digest, Sha256};
@ -72,27 +72,22 @@ impl PathManager {
.inner
.iter()
.filter(|(route_bytes, _)| {
match path_name.starts_with(route_bytes) {
true => {
route_bytes.len() == 1 // route = '/', i.e., default
|| match path_name.get(route_bytes.len()) {
None => true, // exact case
Some(p) => p == &b'/', // sub-path case
}
}
_ => false,
path_name.starts_with(route_bytes) && {
route_bytes.len() == 1 // route = '/', i.e., default
|| path_name.get(route_bytes.len()).map_or(
true, // exact case
|p| p == &b'/'
) // sub-path case
}
})
.max_by_key(|(route_bytes, _)| route_bytes.len());
if let Some((path, u)) = matched_upstream {
matched_upstream.map(|(path, u)| {
debug!(
"Found upstream: {:?}",
path.try_into().unwrap_or_else(|_| "<none>".to_string())
);
Some(u)
} else {
None
}
u
})
}
}
@ -211,14 +206,15 @@ impl UpstreamCandidatesBuilder {
}
/// Set the activated upstream options defined in [[UpstreamOption]]
pub fn options(&mut self, v: &Option<Vec<String>>) -> &mut Self {
let opts = if let Some(opts) = v {
opts
.iter()
.filter_map(|str| UpstreamOption::try_from(str.as_str()).ok())
.collect::<HashSet<UpstreamOption>>()
} else {
Default::default()
};
let opts = v.as_ref().map_or_else(
|| Default::default(),
|opts| {
opts
.iter()
.filter_map(|str| UpstreamOption::try_from(str.as_str()).ok())
.collect::<HashSet<UpstreamOption>>()
},
);
self.options = Some(opts);
self
}

View file

@ -13,6 +13,8 @@ pub enum UpstreamOption {
ForceHttp11Upstream,
/// Force HTTP/2 upstream
ForceHttp2Upstream,
/// Add RFC 7239 Forwarded header
ForwardedHeader,
// TODO: Adds more options for heder override
}
impl TryFrom<&str> for UpstreamOption {
@ -24,6 +26,7 @@ impl TryFrom<&str> for UpstreamOption {
"upgrade_insecure_requests" => Ok(Self::UpgradeInsecureRequests),
"force_http11_upstream" => Ok(Self::ForceHttp11Upstream),
"force_http2_upstream" => Ok(Self::ForceHttp2Upstream),
"forwarded_header" => Ok(Self::ForwardedHeader),
_ => Err(RpxyError::UnsupportedUpstreamOption),
}
}

View file

@ -1,6 +1,6 @@
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
atomic::{AtomicUsize, Ordering},
};
#[derive(Debug, Clone, Default)]

View file

@ -37,8 +37,11 @@ pub enum RpxyError {
// http/3 errors
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
#[error("H3 error: {0}")]
H3Error(#[from] h3::Error),
#[error("h3 connection error: {0}")]
H3ConnectionError(#[from] h3::error::ConnectionError),
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
#[error("h3 connection error: {0}")]
H3StreamError(#[from] h3::error::StreamError),
// #[cfg(feature = "http3-s2n")]
// #[error("H3 error: {0}")]
// H3Error(#[from] s2n_quic_h3::h3::Error),

View file

@ -52,21 +52,30 @@ impl RpxyCache {
if !globals.proxy_config.cache_enabled {
return None;
}
let cache_dir = globals.proxy_config.cache_dir.as_ref().unwrap();
let cache_dir = match globals.proxy_config.cache_dir.as_ref() {
Some(dir) => dir,
None => {
warn!("Cache directory not set in proxy config");
return None;
}
};
let file_store = FileStore::new(&globals.runtime_handle).await;
let inner = LruCacheManager::new(globals.proxy_config.cache_max_entry);
let max_each_size = globals.proxy_config.cache_max_each_size;
let mut max_each_size_on_memory = globals.proxy_config.cache_max_each_size_on_memory;
if max_each_size < max_each_size_on_memory {
warn!("Maximum size of on memory cache per entry must be smaller than or equal to the maximum of each file cache");
warn!("Maximum size of on-memory cache per entry must be smaller than or equal to the maximum of each file cache");
max_each_size_on_memory = max_each_size;
}
if let Err(e) = fs::remove_dir_all(cache_dir).await {
warn!("Failed to clean up the cache dir: {e}");
};
fs::create_dir_all(&cache_dir).await.unwrap();
}
if let Err(e) = fs::create_dir_all(&cache_dir).await {
error!("Failed to create cache dir: {e}");
return None;
}
Some(Self {
file_store,
@ -185,9 +194,7 @@ impl RpxyCache {
let cache_key = derive_cache_key_from_uri(req.uri());
// First check cache chance
let Ok(Some(cached_object)) = self.inner.get(&cache_key) else {
return None;
};
let cached_object = self.inner.get(&cache_key).ok()??;
// Secondly check the cache freshness as an HTTP message
let now = SystemTime::now();
@ -258,20 +265,19 @@ impl FileStore {
let inner = self.inner.read().await;
inner.cnt
}
/// Create a temporary file cache
/// Create a temporary file cache, returns error if file cannot be created or written
async fn create(&mut self, cache_object: &CacheObject, body_bytes: &Bytes) -> CacheResult<()> {
let mut inner = self.inner.write().await;
inner.create(cache_object, body_bytes).await
}
/// Evict a temporary file cache
/// Evict a temporary file cache, logs warning if removal fails
async fn evict(&self, path: impl AsRef<Path>) {
// Acquire the write lock
let mut inner = self.inner.write().await;
if let Err(e) = inner.remove(path).await {
warn!("Eviction failed during file object removal: {:?}", e);
};
}
}
/// Read a temporary file cache
/// Read a temporary file cache, returns error if file cannot be opened or hash mismatches
async fn read(&self, path: impl AsRef<Path> + Send + Sync + 'static, hash: &Bytes) -> CacheResult<UnboundedStreamBody> {
let inner = self.inner.read().await;
inner.read(path, hash).await
@ -307,15 +313,15 @@ impl FileStoreInner {
return Err(CacheError::InvalidCacheTarget);
}
};
let Ok(mut file) = File::create(&cache_filepath).await else {
return Err(CacheError::FailedToCreateFileCache);
};
let mut file = File::create(&cache_filepath)
.await
.map_err(|_| CacheError::FailedToCreateFileCache)?;
let mut bytes_clone = body_bytes.clone();
while bytes_clone.has_remaining() {
if let Err(e) = file.write_buf(&mut bytes_clone).await {
file.write_buf(&mut bytes_clone).await.map_err(|e| {
error!("Failed to write file cache: {e}");
return Err(CacheError::FailedToWriteFileCache);
};
CacheError::FailedToWriteFileCache
})?;
}
self.cnt += 1;
Ok(())
@ -437,11 +443,14 @@ impl LruCacheManager {
self.cnt.load(Ordering::Relaxed)
}
/// Evict an entry
/// Evict an entry from the LRU cache, logs error if mutex cannot be acquired
fn evict(&self, cache_key: &str) -> Option<(String, CacheObject)> {
let Ok(mut lock) = self.inner.lock() else {
error!("Mutex can't be locked to evict a cache entry");
return None;
let mut lock = match self.inner.lock() {
Ok(lock) => lock,
Err(_) => {
error!("Mutex can't be locked to evict a cache entry");
return None;
}
};
let res = lock.pop_entry(cache_key);
// This may be inconsistent with the actual number of entries
@ -449,24 +458,24 @@ impl LruCacheManager {
res
}
/// Push an entry
/// Push an entry into the LRU cache, returns error if mutex cannot be acquired
fn push(&self, cache_key: &str, cache_object: &CacheObject) -> CacheResult<Option<(String, CacheObject)>> {
let Ok(mut lock) = self.inner.lock() else {
let mut lock = self.inner.lock().map_err(|_| {
error!("Failed to acquire mutex lock for writing cache entry");
return Err(CacheError::FailedToAcquiredMutexLockForCache);
};
CacheError::FailedToAcquiredMutexLockForCache
})?;
let res = Ok(lock.push(cache_key.to_string(), cache_object.clone()));
// This may be inconsistent with the actual number of entries
self.cnt.store(lock.len(), Ordering::Relaxed);
res
}
/// Get an entry
/// Get an entry from the LRU cache, returns error if mutex cannot be acquired
fn get(&self, cache_key: &str) -> CacheResult<Option<CacheObject>> {
let Ok(mut lock) = self.inner.lock() else {
let mut lock = self.inner.lock().map_err(|_| {
error!("Mutex can't be locked for checking cache entry");
return Err(CacheError::FailedToAcquiredMutexLockForCheck);
};
CacheError::FailedToAcquiredMutexLockForCheck
})?;
let Some(cached_object) = lock.get(cache_key) else {
return Ok(None);
};

View file

@ -2,4 +2,4 @@ mod cache_error;
mod cache_main;
pub use cache_error::CacheError;
pub(crate) use cache_main::{get_policy_if_cacheable, RpxyCache};
pub(crate) use cache_main::{RpxyCache, get_policy_if_cacheable};

View file

@ -9,13 +9,13 @@ use async_trait::async_trait;
use http::{Request, Response, Version};
use hyper::body::{Body, Incoming};
use hyper_util::client::legacy::{
connect::{Connect, HttpConnector},
Client,
connect::{Connect, HttpConnector},
};
use std::sync::Arc;
#[cfg(feature = "cache")]
use super::cache::{get_policy_if_cacheable, RpxyCache};
use super::cache::{RpxyCache, get_policy_if_cacheable};
#[async_trait]
/// Definition of the forwarder that simply forward requests from downstream client to upstream app servers.
@ -126,9 +126,9 @@ where
warn!(
"
--------------------------------------------------------------------------------------------------
Request forwarder is working without TLS support!!!
We recommend to use this just for testing.
Please enable native-tls-backend or rustls-backend feature to enable TLS support.
Request forwarder is working without TLS support!
This mode is intended for testing only.
Enable 'native-tls-backend' or 'rustls-backend' feature for TLS support.
--------------------------------------------------------------------------------------------------"
);
let executor = LocalExecutor::new(_globals.runtime_handle.clone());
@ -159,7 +159,7 @@ where
/// Build forwarder
pub async fn try_new(_globals: &Arc<Globals>) -> RpxyResult<Self> {
// build hyper client with hyper-tls
info!("Native TLS support is enabled for the connection to backend applications");
info!("Native TLS support enabled for backend connections (native-tls)");
let executor = LocalExecutor::new(_globals.runtime_handle.clone());
let try_build_connector = |alpns: &[&str]| {
@ -209,14 +209,14 @@ where
#[cfg(feature = "webpki-roots")]
let builder_h2 = hyper_rustls::HttpsConnectorBuilder::new().with_webpki_roots();
#[cfg(feature = "webpki-roots")]
info!("Mozilla WebPKI root certs with rustls is used for the connection to backend applications");
info!("Rustls backend: Mozilla WebPKI root certs used for backend connections");
#[cfg(not(feature = "webpki-roots"))]
let builder = hyper_rustls::HttpsConnectorBuilder::new().with_platform_verifier();
#[cfg(not(feature = "webpki-roots"))]
let builder_h2 = hyper_rustls::HttpsConnectorBuilder::new().with_platform_verifier();
#[cfg(not(feature = "webpki-roots"))]
info!("Platform verifier with rustls is used for the connection to backend applications");
info!("Rustls backend: Platform verifier used for backend connections");
let mut http = HttpConnector::new();
http.enforce_http(false);

View file

@ -1,7 +1,7 @@
use super::watch;
use crate::error::*;
use futures_channel::{mpsc, oneshot};
use futures_util::{stream::FusedStream, Future, Stream};
use futures_util::{Future, Stream, stream::FusedStream};
use http::HeaderMap;
use hyper::body::{Body, Bytes, Frame, SizeHint};
use std::{

View file

@ -1,7 +1,7 @@
use super::body::IncomingLike;
use crate::error::RpxyError;
use futures::channel::mpsc::UnboundedReceiver;
use http_body_util::{combinators, BodyExt, Empty, Full, StreamBody};
use http_body_util::{BodyExt, Empty, Full, StreamBody, combinators};
use hyper::body::{Body, Bytes, Frame, Incoming};
use std::pin::Pin;

View file

@ -12,5 +12,5 @@ pub(crate) mod rt {
#[allow(unused)]
pub(crate) mod body {
pub(crate) use super::body_incoming_like::IncomingLike;
pub(crate) use super::body_type::{empty, full, BoxBody, RequestBody, ResponseBody, UnboundedStreamBody};
pub(crate) use super::body_type::{BoxBody, RequestBody, ResponseBody, UnboundedStreamBody, empty, full};
}

View file

@ -7,8 +7,8 @@
use futures_util::task::AtomicWaker;
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
atomic::{AtomicUsize, Ordering},
};
use std::task;

View file

@ -180,9 +180,5 @@ pub async fn entrypoint(
}
});
// returns the first error as the representative error
if let Some(e) = errs.next() {
return Err(e);
}
Ok(())
errs.next().map_or(Ok(()), |e| Err(e))
}

View file

@ -44,10 +44,7 @@ mod tests {
}
#[test]
fn ipv6_to_canonical() {
let socket = SocketAddr::new(
IpAddr::V6(Ipv6Addr::new(0x2001, 0x0db8, 0, 0, 0, 0, 0xdead, 0xbeef)),
8080,
);
let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0x2001, 0x0db8, 0, 0, 0, 0, 0xdead, 0xbeef)), 8080);
assert_eq!(socket.to_canonical(), socket);
}
#[test]

View file

@ -24,10 +24,7 @@ use tokio::io::copy_bidirectional;
#[derive(Debug)]
/// Context object to handle sticky cookies at HTTP message handler
pub(super) struct HandlerContext {
#[cfg(feature = "sticky-cookie")]
pub(super) context_lb: Option<LoadBalanceContext>,
#[cfg(not(feature = "sticky-cookie"))]
pub(super) context_lb: Option<()>,
}
#[derive(Clone, Builder)]
@ -107,9 +104,11 @@ where
let backend_app = match self.app_manager.apps.get(&server_name) {
Some(backend_app) => backend_app,
None => {
let Some(default_server_name) = &self.app_manager.default_server_name else {
return Err(HttpError::NoMatchingBackendApp);
};
let default_server_name = self
.app_manager
.default_server_name
.as_ref()
.ok_or(HttpError::NoMatchingBackendApp)?;
debug!("Serving by default app");
self.app_manager.apps.get(default_server_name).unwrap()
}
@ -131,9 +130,7 @@ where
// Find reverse proxy for given path and choose one of upstream host
// Longest prefix match
let path = req.uri().path();
let Some(upstream_candidates) = backend_app.path_manager.get(path) else {
return Err(HttpError::NoUpstreamCandidates);
};
let upstream_candidates = backend_app.path_manager.get(path).ok_or(HttpError::NoUpstreamCandidates)?;
// Upgrade in request header
let upgrade_in_request = extract_upgrade(req.headers());
@ -147,19 +144,17 @@ where
let req_on_upgrade = hyper::upgrade::on(&mut req);
// Build request from destination information
let _context = match self.generate_request_forwarded(
&client_addr,
&listen_addr,
&mut req,
&upgrade_in_request,
upstream_candidates,
tls_enabled,
) {
Err(e) => {
return Err(HttpError::FailedToGenerateUpstreamRequest(e.to_string()));
}
Ok(v) => v,
};
let _context = self
.generate_request_forwarded(
&client_addr,
&listen_addr,
&mut req,
&upgrade_in_request,
upstream_candidates,
tls_enabled,
)
.map_err(|e| HttpError::FailedToGenerateUpstreamRequest(e.to_string()))?;
debug!(
"Request to be forwarded: [uri {}, method: {}, version {:?}, headers {:?}]",
req.uri(),
@ -173,12 +168,12 @@ where
//////////////
// Forward request to a chosen backend
let mut res_backend = match self.forwarder.request(req).await {
Ok(v) => v,
Err(e) => {
return Err(HttpError::FailedToGetResponseFromBackend(e.to_string()));
}
};
let mut res_backend = self
.forwarder
.request(req)
.await
.map_err(|e| HttpError::FailedToGetResponseFromBackend(e.to_string()))?;
//////////////
// Process reverse proxy context generated during the forwarding request generation.
#[cfg(feature = "sticky-cookie")]
@ -191,9 +186,9 @@ where
if res_backend.status() != StatusCode::SWITCHING_PROTOCOLS {
// Generate response to client
if let Err(e) = self.generate_response_forwarded(&mut res_backend, backend_app) {
return Err(HttpError::FailedToGenerateDownstreamResponse(e.to_string()));
}
self
.generate_response_forwarded(&mut res_backend, backend_app)
.map_err(|e| HttpError::FailedToGenerateDownstreamResponse(e.to_string()))?;
return Ok(res_backend);
}

View file

@ -70,22 +70,24 @@ where
// Add te: trailer if contained in original request
let contains_te_trailers = {
if let Some(te) = req.headers().get(header::TE) {
te.as_bytes()
.split(|v| v == &b',' || v == &b' ')
.any(|x| x == "trailers".as_bytes())
} else {
false
}
req
.headers()
.get(header::TE)
.map(|te| {
te.as_bytes()
.split(|v| v == &b',' || v == &b' ')
.any(|x| x == "trailers".as_bytes())
})
.unwrap_or(false)
};
let original_uri = req.uri().to_string();
let original_uri = req.uri().clone();
let headers = req.headers_mut();
// delete headers specified in header.connection
remove_connection_header(headers);
// delete hop headers including header.connection
remove_hop_header(headers);
// X-Forwarded-For
// X-Forwarded-For (and Forwarded if exists)
add_forwarding_header(headers, client_addr, listen_addr, tls_enabled, &original_uri)?;
// Add te: trailer if te_trailer
@ -124,8 +126,8 @@ where
// apply upstream-specific headers given in upstream_option
let headers = req.headers_mut();
// apply upstream options to header
apply_upstream_options_to_header(headers, &upstream_chosen.uri, upstream_candidates)?;
// apply upstream options to header, after X-Forwarded-For is added
apply_upstream_options_to_header(headers, &upstream_chosen.uri, upstream_candidates, &original_uri)?;
// update uri in request
ensure!(
@ -136,11 +138,7 @@ where
let new_uri = Uri::builder()
.scheme(upstream_chosen.uri.scheme().unwrap().as_str())
.authority(upstream_chosen.uri.authority().unwrap().as_str());
let org_pq = match req.uri().path_and_query() {
Some(pq) => pq.to_string(),
None => "/".to_string(),
}
.into_bytes();
let org_pq = req.uri().path_and_query().map(|pq| pq.as_str()).unwrap_or("/").as_bytes();
// replace some parts of path if opt_replace_path is enabled for chosen upstream
let new_pq = match &upstream_candidates.replace_path {
@ -155,7 +153,7 @@ where
new_pq.extend_from_slice(&org_pq[matched_path.len()..]);
new_pq
}
None => org_pq,
None => org_pq.to_vec(),
};
*req.uri_mut() = new_uri.path_and_query(new_pq).build()?;

View file

@ -1,5 +1,5 @@
use super::canonical_address::ToCanonical;
use crate::log::*;
use crate::{log::*, message_handler::utils_headers};
use http::header;
use std::net::SocketAddr;
@ -12,10 +12,11 @@ pub struct HttpMessageLog {
pub host: String,
pub p_and_q: String,
pub version: http::Version,
pub uri_scheme: String,
pub uri_host: String,
pub scheme: String,
pub path: String,
pub ua: String,
pub xff: String,
pub forwarded: String,
pub status: String,
pub upstream: String,
}
@ -29,17 +30,21 @@ impl<T> From<&http::Request<T>> for HttpMessageLog {
.map_or_else(|| "", |s| s.to_str().unwrap_or(""))
.to_string()
};
let host =
utils_headers::host_from_uri_or_host_header(req.uri(), req.headers().get(header::HOST).cloned()).unwrap_or_default();
Self {
// tls_server_name: "".to_string(),
client_addr: "".to_string(),
method: req.method().to_string(),
host: header_mapper(header::HOST),
host,
p_and_q: req.uri().path_and_query().map_or_else(|| "", |v| v.as_str()).to_string(),
version: req.version(),
uri_scheme: req.uri().scheme_str().unwrap_or("").to_string(),
uri_host: req.uri().host().unwrap_or("").to_string(),
scheme: req.uri().scheme_str().unwrap_or("").to_string(),
path: req.uri().path().to_string(),
ua: header_mapper(header::USER_AGENT),
xff: header_mapper(header::HeaderName::from_static("x-forwarded-for")),
forwarded: header_mapper(header::FORWARDED),
status: "".to_string(),
upstream: "".to_string(),
}
@ -48,26 +53,29 @@ impl<T> From<&http::Request<T>> for HttpMessageLog {
impl std::fmt::Display for HttpMessageLog {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let forwarded_part = if !self.forwarded.is_empty() {
format!(" \"{}\"", self.forwarded)
} else {
"".to_string()
};
write!(
f,
"{} <- {} -- {} {} {:?} -- {} -- {} \"{}\", \"{}\" \"{}\"",
if !self.host.is_empty() {
self.host.as_str()
} else {
self.uri_host.as_str()
},
"{} <- {} -- {} {} {:?} -- {} -- {} \"{}\", \"{}\"{} \"{}\"",
self.host,
self.client_addr,
self.method,
self.p_and_q,
self.version,
self.status,
if !self.uri_scheme.is_empty() && !self.uri_host.is_empty() {
format!("{}://{}", self.uri_scheme, self.uri_host)
if !self.scheme.is_empty() && !self.host.is_empty() {
format!("{}://{}{}", self.scheme, self.host, self.path)
} else {
"".to_string()
self.path.clone()
},
self.ua,
self.xff,
forwarded_part,
self.upstream
)
}
@ -102,3 +110,56 @@ impl HttpMessageLog {
);
}
}
#[cfg(test)]
mod tests {
use super::*;
use http::{Method, Version};
#[test]
fn test_log_format_without_forwarded() {
let log = HttpMessageLog {
client_addr: "192.168.1.1:8080".to_string(),
method: Method::GET.to_string(),
host: "example.com".to_string(),
p_and_q: "/path?query=value".to_string(),
version: Version::HTTP_11,
scheme: "https".to_string(),
path: "/path".to_string(),
ua: "Mozilla/5.0".to_string(),
xff: "10.0.0.1".to_string(),
forwarded: "".to_string(),
status: "200".to_string(),
upstream: "https://backend.example.com".to_string(),
};
let formatted = format!("{}", log);
assert!(!formatted.contains(" \"\""));
assert!(formatted.contains("\"Mozilla/5.0\", \"10.0.0.1\" \"https://backend.example.com\""));
}
#[test]
fn test_log_format_with_forwarded() {
let log = HttpMessageLog {
client_addr: "192.168.1.1:8080".to_string(),
method: Method::GET.to_string(),
host: "example.com".to_string(),
p_and_q: "/path?query=value".to_string(),
version: Version::HTTP_11,
scheme: "https".to_string(),
path: "/path".to_string(),
ua: "Mozilla/5.0".to_string(),
xff: "10.0.0.1".to_string(),
forwarded: "for=192.0.2.60;proto=http;by=203.0.113.43".to_string(),
status: "200".to_string(),
upstream: "https://backend.example.com".to_string(),
};
let formatted = format!("{}", log);
assert!(formatted.contains(" \"for=192.0.2.60;proto=http;by=203.0.113.43\""));
assert!(
formatted
.contains("\"Mozilla/5.0\", \"10.0.0.1\" \"for=192.0.2.60;proto=http;by=203.0.113.43\" \"https://backend.example.com\"")
);
}
}

View file

@ -1,7 +1,7 @@
use super::http_result::{HttpError, HttpResult};
use crate::{
error::*,
hyper_ext::body::{empty, ResponseBody},
hyper_ext::body::{ResponseBody, empty},
name_exp::ServerName,
};
use http::{Request, Response, StatusCode, Uri};

View file

@ -3,15 +3,22 @@ use crate::{
backend::{UpstreamCandidates, UpstreamOption},
log::*,
};
use anyhow::{anyhow, ensure, Result};
use anyhow::{Result, anyhow};
use bytes::BufMut;
use http::{header, HeaderMap, HeaderName, HeaderValue, Uri};
use http::{HeaderMap, HeaderName, HeaderValue, Uri, header};
use std::{borrow::Cow, net::SocketAddr};
#[cfg(feature = "sticky-cookie")]
use crate::backend::{LoadBalanceContext, StickyCookie, StickyCookieValue};
// use crate::backend::{UpstreamGroup, UpstreamOption};
const X_FORWARDED_FOR: &str = "x-forwarded-for";
const X_FORWARDED_PROTO: &str = "x-forwarded-proto";
const X_FORWARDED_PORT: &str = "x-forwarded-port";
const X_FORWARDED_SSL: &str = "x-forwarded-ssl";
const X_ORIGINAL_URI: &str = "x-original-uri";
const X_REAL_IP: &str = "x-real-ip";
// ////////////////////////////////////////////////////
// // Functions to manipulate headers
#[cfg(feature = "sticky-cookie")]
@ -35,7 +42,7 @@ pub(super) fn takeout_sticky_cookie_lb_context(
if sticky_cookies.is_empty() {
return Ok(None);
}
ensure!(sticky_cookies.len() == 1, "Invalid cookie: Multiple sticky cookie values");
anyhow::ensure!(sticky_cookies.len() == 1, "Invalid cookie: Multiple sticky cookie values");
let cookies_passed_to_upstream = without_sticky_cookies.join("; ");
let cookie_passed_to_lb = sticky_cookies.first().unwrap();
@ -96,11 +103,13 @@ fn override_host_header(headers: &mut HeaderMap, upstream_base_uri: &Uri) -> Res
}
/// Apply options to request header, which are specified in the configuration
/// This function is called after almost all other headers has been set and updated.
pub(super) fn apply_upstream_options_to_header(
headers: &mut HeaderMap,
upstream_base_uri: &Uri,
// _client_addr: &SocketAddr,
upstream: &UpstreamCandidates,
original_uri: &Uri,
) -> Result<()> {
for opt in upstream.options.iter() {
match opt {
@ -117,6 +126,21 @@ pub(super) fn apply_upstream_options_to_header(
.entry(header::UPGRADE_INSECURE_REQUESTS)
.or_insert(HeaderValue::from_bytes(b"1").unwrap());
}
UpstreamOption::ForwardedHeader => {
// This is called after X-Forwarded-For is added
// Generate RFC 7239 Forwarded header
let tls = upstream_base_uri.scheme_str() == Some("https");
match generate_forwarded_header(headers, tls, original_uri) {
Ok(forwarded_value) => {
add_header_entry_overwrite_if_exist(headers, header::FORWARDED.as_str(), forwarded_value)?;
}
Err(e) => {
// Log warning but don't fail the request if Forwarded generation fails
warn!("Failed to generate Forwarded header: {}", e);
}
}
}
_ => (),
}
}
@ -194,18 +218,45 @@ pub(super) fn make_cookie_single_line(headers: &mut HeaderMap) -> Result<()> {
Ok(())
}
/// Add forwarding headers like `x-forwarded-for`.
/// Add or update forwarding headers like `x-forwarded-for`.
/// If only `forwarded` header exists, it will update `x-forwarded-for` with the proxy chain.
/// If both `x-forwarded-for` and `forwarded` headers exist, it will update `x-forwarded-for` first and then add `forwarded` header.
pub(super) fn add_forwarding_header(
headers: &mut HeaderMap,
client_addr: &SocketAddr,
listen_addr: &SocketAddr,
tls: bool,
uri_str: &str,
original_uri: &Uri,
) -> Result<()> {
// default process
// optional process defined by upstream_option is applied in fn apply_upstream_options
let canonical_client_addr = client_addr.to_canonical().ip().to_string();
append_header_entry_with_comma(headers, "x-forwarded-for", &canonical_client_addr)?;
let has_forwarded = headers.contains_key(header::FORWARDED);
let has_xff = headers.contains_key(X_FORWARDED_FOR);
// Handle incoming Forwarded header (Case 2: only Forwarded exists)
if has_forwarded && !has_xff {
// Extract proxy chain from Forwarded header and update X-Forwarded-For for consistency
update_xff_from_forwarded(headers, client_addr)?;
} else {
// Case 1: only X-Forwarded-For exists, or Case 3: both exist (conservative: use X-Forwarded-For)
// TODO: In future PR, implement proper RFC 7239 precedence
// where Forwarded header should take priority over X-Forwarded-For
// This requires careful testing to ensure no breaking changes
append_header_entry_with_comma(headers, X_FORWARDED_FOR, &canonical_client_addr)?;
}
// IMPORTANT: If Forwarded header exists, always update it for consistency
// This ensures headers remain consistent even when forwarded_header upstream option is not specified
if has_forwarded {
match generate_forwarded_header(headers, tls, original_uri) {
Ok(forwarded_value) => {
add_header_entry_overwrite_if_exist(headers, header::FORWARDED.as_str(), forwarded_value)?;
}
Err(e) => {
// Log warning but don't fail the request if Forwarded generation fails
warn!("Failed to update existing Forwarded header for consistency: {}", e);
}
}
}
// Single line cookie header
// TODO: This should be only for HTTP/1.1. For 2+, this can be multi-lined.
@ -214,32 +265,134 @@ pub(super) fn add_forwarding_header(
/////////// As Nginx
// If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the
// scheme used to connect to this server
add_header_entry_if_not_exist(headers, "x-forwarded-proto", if tls { "https" } else { "http" })?;
add_header_entry_if_not_exist(headers, X_FORWARDED_PROTO, if tls { "https" } else { "http" })?;
// If we receive X-Forwarded-Port, pass it through; otherwise, pass along the
// server port the client connected to
add_header_entry_if_not_exist(headers, "x-forwarded-port", listen_addr.port().to_string())?;
add_header_entry_if_not_exist(headers, X_FORWARDED_PORT, listen_addr.port().to_string())?;
/////////// As Nginx-Proxy
// x-real-ip
add_header_entry_overwrite_if_exist(headers, "x-real-ip", canonical_client_addr)?;
add_header_entry_overwrite_if_exist(headers, X_REAL_IP, canonical_client_addr)?;
// x-forwarded-ssl
add_header_entry_overwrite_if_exist(headers, "x-forwarded-ssl", if tls { "on" } else { "off" })?;
add_header_entry_overwrite_if_exist(headers, X_FORWARDED_SSL, if tls { "on" } else { "off" })?;
// x-original-uri
add_header_entry_overwrite_if_exist(headers, "x-original-uri", uri_str.to_string())?;
add_header_entry_overwrite_if_exist(headers, X_ORIGINAL_URI, original_uri.to_string())?;
// proxy
add_header_entry_overwrite_if_exist(headers, "proxy", "")?;
Ok(())
}
/// Extract proxy chain from existing Forwarded header
fn extract_forwarded_chain(headers: &HeaderMap) -> Vec<String> {
headers
.get(header::FORWARDED)
.and_then(|h| h.to_str().ok())
.map(|forwarded_str| {
// Parse Forwarded header entries (comma-separated)
forwarded_str
.split(',')
.flat_map(|entry| entry.split(';'))
.map(str::trim)
.filter_map(|param| param.strip_prefix("for="))
.map(|for_value| {
// Remove quotes from IPv6 addresses for consistency with X-Forwarded-For
if let Some(ipv6) = for_value.strip_prefix("\"[").and_then(|s| s.strip_suffix("]\"")) {
ipv6.to_string()
} else {
for_value.to_string()
}
})
.collect()
})
.unwrap_or_default()
}
/// Update X-Forwarded-For with proxy chain from Forwarded header for consistency
fn update_xff_from_forwarded(headers: &mut HeaderMap, client_addr: &SocketAddr) -> Result<()> {
let forwarded_chain = extract_forwarded_chain(headers);
if !forwarded_chain.is_empty() {
// Replace X-Forwarded-For with the chain from Forwarded header
headers.remove(X_FORWARDED_FOR);
for ip in forwarded_chain {
append_header_entry_with_comma(headers, X_FORWARDED_FOR, &ip)?;
}
}
// Append current client IP (standard behavior)
let canonical_client_addr = client_addr.to_canonical().ip().to_string();
append_header_entry_with_comma(headers, X_FORWARDED_FOR, &canonical_client_addr)?;
Ok(())
}
/// Generate RFC 7239 Forwarded header from X-Forwarded-For
/// This function assumes that the X-Forwarded-For header is present and well-formed.
fn generate_forwarded_header(headers: &HeaderMap, tls: bool, original_uri: &Uri) -> Result<String> {
let for_values = headers
.get(X_FORWARDED_FOR)
.and_then(|h| h.to_str().ok())
.map(|xff_str| {
xff_str
.split(',')
.map(str::trim)
.filter(|ip| !ip.is_empty())
.map(|ip| {
// Format IP according to RFC 7239 (quote IPv6)
if ip.contains(':') {
format!("\"[{}]\"", ip)
} else {
ip.to_string()
}
})
.collect::<Vec<_>>()
.join(",for=")
})
.unwrap_or_default();
if for_values.is_empty() {
return Err(anyhow!("No X-Forwarded-For header found for Forwarded generation"));
}
// Build forwarded header value
let forwarded_value = format!(
"for={};proto={};host={}",
for_values,
if tls { "https" } else { "http" },
host_from_uri_or_host_header(original_uri, headers.get(header::HOST).cloned())?
);
Ok(forwarded_value)
}
/// Extract host from URI
pub(super) fn host_from_uri_or_host_header(uri: &Uri, host_header_value: Option<header::HeaderValue>) -> Result<String> {
// Prioritize uri host over host header
let uri_host = uri.host().map(|host| {
if let Some(port) = uri.port_u16() {
format!("{}:{}", host, port)
} else {
host.to_string()
}
});
if let Some(host) = uri_host {
return Ok(host);
}
// If uri host is not available, use host header
host_header_value
.map(|h| h.to_str().map(|s| s.to_string()))
.transpose()?
.ok_or_else(|| anyhow!("No host found in URI or Host header"))
}
/// Remove connection header
pub(super) fn remove_connection_header(headers: &mut HeaderMap) {
if let Some(values) = headers.get(header::CONNECTION) {
if let Ok(v) = values.clone().to_str() {
for m in v.split(',') {
if !m.is_empty() {
headers.remove(m.trim());
}
let keys = v.split(',').map(|m| m.trim()).filter(|m| !m.is_empty());
for m in keys {
headers.remove(m);
}
}
}
@ -274,11 +427,9 @@ pub(super) fn extract_upgrade(headers: &HeaderMap) -> Option<String> {
.split(',')
.any(|w| w.trim().eq_ignore_ascii_case(header::UPGRADE.as_str()))
{
if let Some(u) = headers.get(header::UPGRADE) {
if let Ok(m) = u.to_str() {
debug!("Upgrade in request header: {}", m);
return Some(m.to_owned());
}
if let Some(Ok(m)) = headers.get(header::UPGRADE).map(|u| u.to_str()) {
debug!("Upgrade in request header: {}", m);
return Some(m.to_owned());
}
}
}

View file

@ -2,8 +2,8 @@ use crate::{
backend::{Upstream, UpstreamCandidates, UpstreamOption},
log::*,
};
use anyhow::{anyhow, ensure, Result};
use http::{header, uri::Scheme, Request, Version};
use anyhow::{Result, anyhow, ensure};
use http::{Request, Version, header, uri::Scheme};
/// Trait defining parser of hostname
/// Inspect and extract hostname from either the request HOST header or request line

View file

@ -49,12 +49,17 @@ where
}
Err(e) => {
warn!("HTTP/3 error on accept incoming connection: {}", e);
match e.get_error_level() {
h3::error::ErrorLevel::ConnectionError => break,
h3::error::ErrorLevel::StreamError => continue,
}
break;
}
Ok(Some((req, stream))) => {
// Ok(Some((req, stream))) => {
Ok(Some(req_resolver)) => {
let (req, stream) = match req_resolver.resolve_request().await {
Ok((req, stream)) => (req, stream),
Err(e) => {
warn!("HTTP/3 error on resolve request in stream: {}", e);
continue;
}
};
// We consider the connection count separately from the stream count.
// Max clients for h1/h2 = max 'stream' for h3.
let request_count = self.globals.request_count.clone();
@ -63,7 +68,7 @@ where
h3_conn.shutdown(0).await?;
break;
}
debug!("Request incoming: current # {}", request_count.current());
trace!("Request incoming: current # {}", request_count.current());
let self_inner = self.clone();
let tls_server_name_inner = tls_server_name.clone();
@ -77,7 +82,7 @@ where
warn!("HTTP/3 error on serve stream: {}", e);
}
request_count.decrement();
debug!("Request processed: current # {}", request_count.current());
trace!("Request processed: current # {}", request_count.current());
});
}
}

View file

@ -11,7 +11,7 @@ use crate::{
message_handler::HttpMessageHandler,
name_exp::ServerName,
};
use futures::{select, FutureExt};
use futures::{FutureExt, select};
use http::{Request, Response};
use hyper::{
body::Incoming,
@ -80,7 +80,7 @@ where
request_count.decrement();
return;
}
debug!("Request incoming: current # {}", request_count.current());
trace!("Request incoming: current # {}", request_count.current());
let server_clone = self.connection_builder.clone();
let message_handler_clone = self.message_handler.clone();
@ -110,7 +110,7 @@ where
}
request_count.decrement();
debug!("Request processed: current # {}", request_count.current());
trace!("Request processed: current # {}", request_count.current());
});
}
@ -131,52 +131,55 @@ where
/// Start with TLS (HTTPS)
pub(super) async fn start_with_tls(&self, cancel_token: CancellationToken) -> RpxyResult<()> {
// By default, TLS listener is spawned
let join_handle_tls = self.globals.runtime_handle.spawn({
let self_clone = self.clone();
let cancel_token = cancel_token.clone();
async move {
select! {
_ = self_clone.tls_listener_service().fuse() => {
error!("TCP proxy service for TLS exited");
cancel_token.cancel();
},
_ = cancel_token.cancelled().fuse() => {
debug!("Cancel token is called for TLS listener");
}
}
}
});
#[cfg(not(any(feature = "http3-quinn", feature = "http3-s2n")))]
{
self.tls_listener_service().await?;
error!("TCP proxy service for TLS exited");
let _ = join_handle_tls.await;
Ok(())
}
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
{
if self.globals.proxy_config.http3 {
let jh_tls = self.globals.runtime_handle.spawn({
let self_clone = self.clone();
let cancel_token = cancel_token.clone();
async move {
select! {
_ = self_clone.tls_listener_service().fuse() => {
error!("TCP proxy service for TLS exited");
cancel_token.cancel();
},
_ = cancel_token.cancelled().fuse() => {
debug!("Cancel token is called for TLS listener");
}
}
}
});
let jh_h3 = self.globals.runtime_handle.spawn({
let self_clone = self.clone();
async move {
select! {
_ = self_clone.h3_listener_service().fuse() => {
error!("UDP proxy service for QUIC exited");
cancel_token.cancel();
},
_ = cancel_token.cancelled().fuse() => {
debug!("Cancel token is called for QUIC listener");
}
}
}
});
let _ = futures::future::join(jh_tls, jh_h3).await;
Ok(())
} else {
self.tls_listener_service().await?;
error!("TCP proxy service for TLS exited");
Ok(())
// If HTTP/3 is not enabled, wait for TLS listener to finish
if !self.globals.proxy_config.http3 {
let _ = join_handle_tls.await;
return Ok(());
}
// If HTTP/3 is enabled, spawn a task to handle HTTP/3 connections
let join_handle_h3 = self.globals.runtime_handle.spawn({
let self_clone = self.clone();
async move {
select! {
_ = self_clone.h3_listener_service().fuse() => {
error!("UDP proxy service for QUIC exited");
cancel_token.cancel();
},
_ = cancel_token.cancelled().fuse() => {
debug!("Cancel token is called for QUIC listener");
}
}
}
});
let _ = futures::future::join(join_handle_tls, join_handle_h3).await;
Ok(())
}
}
@ -310,7 +313,7 @@ where
error!("Reloader is broken");
break;
}
let server_crypto_base = server_crypto_rx.borrow().clone().unwrap();
let server_crypto_base = server_crypto_rx.get().unwrap();
let Some(server_config): Option<Arc<ServerCrypto>> = (&server_crypto_base).try_into().ok() else {
error!("Failed to update server crypto");
break;

View file

@ -102,7 +102,7 @@ where
error!("Reloader is broken");
break;
}
let cert_keys_map = server_crypto_rx.borrow().clone().unwrap();
let cert_keys_map = server_crypto_rx.get().unwrap();
server_crypto = (&cert_keys_map).try_into().ok();
let Some(inner) = server_crypto.clone() else {

View file

@ -52,7 +52,7 @@ where
/// Receive server crypto from reloader
fn receive_server_crypto(&self, server_crypto_rx: ReloaderReceiver<ServerCryptoBase>) -> RpxyResult<s2n_quic_rustls::Server> {
let cert_keys_map = server_crypto_rx.borrow().clone().ok_or_else(|| {
let cert_keys_map = server_crypto_rx.get().ok_or_else(|| {
error!("Reloader is broken");
RpxyError::CertificateReloadError(anyhow!("Reloader is broken").into())
})?;

View file

@ -16,10 +16,12 @@ pub(super) fn bind_tcp_socket(listening_on: &SocketAddr) -> RpxyResult<TcpSocket
}?;
tcp_socket.set_reuseaddr(true)?;
tcp_socket.set_reuseport(true)?;
if let Err(e) = tcp_socket.bind(*listening_on) {
tcp_socket.bind(*listening_on).map_err(|e| {
error!("Failed to bind TCP socket: {}", e);
return Err(RpxyError::Io(e));
};
RpxyError::Io(e)
})?;
Ok(tcp_socket)
}
@ -36,11 +38,10 @@ pub(super) fn bind_udp_socket(listening_on: &SocketAddr) -> RpxyResult<UdpSocket
socket.set_reuse_port(true)?;
socket.set_nonblocking(true)?; // This was made true inside quinn. so this line isn't necessary here. but just in case.
if let Err(e) = socket.bind(&(*listening_on).into()) {
socket.bind(&(*listening_on).into()).map_err(|e| {
error!("Failed to bind UDP socket: {}", e);
return Err(RpxyError::Io(e));
};
let udp_socket: UdpSocket = socket.into();
RpxyError::Io(e)
})?;
Ok(udp_socket)
Ok(socket.into())
}

@ -1 +1 @@
Subproject commit 2500716b70bd6e548cdf690188ded7afe6726330
Subproject commit e5069f9dbb9db846b29d6dafa4a0186c930cf906

@ -1 +1 @@
Subproject commit f9d0c4feb83160b6fe66fe34da76c443fc2b381c
Subproject commit 814e0348b5ee3aa72083a722c34d7446c91cfc69