Merge branch 'tmp/sticky-cookie' into feat/sticky-cookie-feature
This commit is contained in:
commit
d8cadf06af
80 changed files with 4870 additions and 867 deletions
7
.build/Jenkinsfile
vendored
7
.build/Jenkinsfile
vendored
|
|
@ -37,9 +37,6 @@ pipeline {
|
|||
|
||||
dir('rust-rpxy') {
|
||||
sh """
|
||||
# Update submodule URLs to HTTPS (allows cloning without SSH keys)
|
||||
sed -i 's|git@github.com:|https://github.com/|g' .gitmodules
|
||||
|
||||
# Initialize and update submodules
|
||||
git submodule update --init
|
||||
"""
|
||||
|
|
@ -59,7 +56,7 @@ pipeline {
|
|||
|
||||
// Build the binary
|
||||
sh 'cargo build --release'
|
||||
|
||||
|
||||
// Prepare and stash files
|
||||
sh """
|
||||
# Move binary to workspace root for easier access
|
||||
|
|
@ -81,7 +78,7 @@ pipeline {
|
|||
stash includes: "${BINARY_NAME}.spec", name: "rpm-files"
|
||||
stash includes: "rpxy.service, config.toml", name: "service-file"
|
||||
stash includes: "LICENSE, README.md", name: "docs"
|
||||
|
||||
|
||||
// Archive the binary as an artifact
|
||||
archiveArtifacts artifacts: "${BINARY_NAME}", allowEmptyArchive: false, fingerprint: true
|
||||
}
|
||||
|
|
|
|||
13
.github/dependabot.yml
vendored
13
.github/dependabot.yml
vendored
|
|
@ -1,6 +1,3 @@
|
|||
# Basic dependabot.yml file with
|
||||
# minimum configuration for two package managers
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
# Enable version updates for cargo
|
||||
|
|
@ -9,16 +6,6 @@ updates:
|
|||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/rpxy-bin"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/rpxy-lib"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
|
||||
# Enable version updates for Docker
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/docker"
|
||||
|
|
|
|||
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
|
|
@ -5,6 +5,9 @@ on:
|
|||
pull_request:
|
||||
types: [synchronize, opened]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
|
|
|
|||
40
.github/workflows/release.yml
vendored
40
.github/workflows/release.yml
vendored
|
|
@ -14,6 +14,10 @@ on:
|
|||
|
||||
jobs:
|
||||
on-success:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success' }} || ${{ github.event_name == 'repositry_dispatch' }}
|
||||
strategy:
|
||||
|
|
@ -34,16 +38,6 @@ jobs:
|
|||
platform: linux/arm64
|
||||
tags-suffix: "-slim"
|
||||
|
||||
- target: "musl"
|
||||
build-feature: "-slim-pq"
|
||||
platform: linux/amd64
|
||||
tags-suffix: "-slim-pq"
|
||||
|
||||
- target: "musl"
|
||||
build-feature: "-slim-pq"
|
||||
platform: linux/arm64
|
||||
tags-suffix: "-slim-pq"
|
||||
|
||||
- target: "gnu"
|
||||
build-feature: "-s2n"
|
||||
platform: linux/amd64
|
||||
|
|
@ -54,26 +48,6 @@ jobs:
|
|||
platform: linux/arm64
|
||||
tags-suffix: "-s2n"
|
||||
|
||||
- target: "gnu"
|
||||
build-feature: "-pq"
|
||||
platform: linux/amd64
|
||||
tags-suffix: "-pq"
|
||||
|
||||
- target: "gnu"
|
||||
build-feature: "-pq"
|
||||
platform: linux/arm64
|
||||
tags-suffix: "-pq"
|
||||
|
||||
- target: "gnu"
|
||||
build-feature: "-s2n-pq"
|
||||
platform: linux/amd64
|
||||
tags-suffix: "-s2n-pq"
|
||||
|
||||
- target: "gnu"
|
||||
build-feature: "-s2n-pq"
|
||||
platform: linux/arm64
|
||||
tags-suffix: "-s2n-pq"
|
||||
|
||||
- target: "gnu"
|
||||
build-feature: "-webpki-roots"
|
||||
platform: linux/amd64
|
||||
|
|
@ -128,12 +102,18 @@ jobs:
|
|||
path: "/tmp/${{ steps.set-env.outputs.target_name }}"
|
||||
|
||||
on-failure:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'failure' }}
|
||||
steps:
|
||||
- run: echo 'The release triggering workflows failed'
|
||||
|
||||
release:
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event_name == 'repository_dispatch' }}
|
||||
needs: on-success
|
||||
|
|
|
|||
65
.github/workflows/release_docker.yml
vendored
65
.github/workflows/release_docker.yml
vendored
|
|
@ -16,7 +16,11 @@ env:
|
|||
|
||||
jobs:
|
||||
build_and_push:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
runs-on: ubuntu-22.04
|
||||
if: ${{ github.event_name == 'push' }} || ${{ github.event_name == 'pull_request' && github.event.pull_request.merged == true }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
|
@ -30,17 +34,6 @@ jobs:
|
|||
jqtype/rpxy:latest
|
||||
ghcr.io/junkurihara/rust-rpxy:latest
|
||||
|
||||
- target: "default-pq"
|
||||
dockerfile: ./docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
"CARGO_FEATURES=--no-default-features --features=http3-quinn,cache,rustls-backend,acme,post-quantum"
|
||||
tags-suffix: "-pq"
|
||||
# Aliases must be used only for release builds
|
||||
aliases: |
|
||||
jqtype/rpxy:pq
|
||||
ghcr.io/junkurihara/rust-rpxy:pq
|
||||
|
||||
- target: "default-slim"
|
||||
dockerfile: ./docker/Dockerfile-slim
|
||||
build-contexts: |
|
||||
|
|
@ -53,24 +46,10 @@ jobs:
|
|||
jqtype/rpxy:slim
|
||||
ghcr.io/junkurihara/rust-rpxy:slim
|
||||
|
||||
- target: "default-slim-pq"
|
||||
dockerfile: ./docker/Dockerfile-slim
|
||||
build-args: |
|
||||
"CARGO_FEATURES=--no-default-features --features=http3-quinn,cache,rustls-backend,acme,post-quantum"
|
||||
build-contexts: |
|
||||
messense/rust-musl-cross:amd64-musl=docker-image://messense/rust-musl-cross:x86_64-musl
|
||||
messense/rust-musl-cross:arm64-musl=docker-image://messense/rust-musl-cross:aarch64-musl
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags-suffix: "-slim-pq"
|
||||
# Aliases must be used only for release builds
|
||||
aliases: |
|
||||
jqtype/rpxy:slim-pq
|
||||
ghcr.io/junkurihara/rust-rpxy:slim-pq
|
||||
|
||||
- target: "s2n"
|
||||
dockerfile: ./docker/Dockerfile
|
||||
build-args: |
|
||||
"CARGO_FEATURES=--no-default-features --features=http3-s2n,cache,rustls-backend,acme"
|
||||
"CARGO_FEATURES=--no-default-features --features=http3-s2n,cache,rustls-backend,acme,post-quantum"
|
||||
"ADDITIONAL_DEPS=pkg-config libssl-dev cmake libclang1 gcc g++"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags-suffix: "-s2n"
|
||||
|
|
@ -79,23 +58,11 @@ jobs:
|
|||
jqtype/rpxy:s2n
|
||||
ghcr.io/junkurihara/rust-rpxy:s2n
|
||||
|
||||
- target: "s2n-pq"
|
||||
dockerfile: ./docker/Dockerfile
|
||||
build-args: |
|
||||
"CARGO_FEATURES=--no-default-features --features=http3-s2n,cache,rustls-backend,acme,post-quantum"
|
||||
"ADDITIONAL_DEPS=pkg-config libssl-dev cmake libclang1 gcc g++"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags-suffix: "-s2n-pq"
|
||||
# Aliases must be used only for release builds
|
||||
aliases: |
|
||||
jqtype/rpxy:s2n-pq
|
||||
ghcr.io/junkurihara/rust-rpxy:s2n-pq
|
||||
|
||||
- target: "webpki-roots"
|
||||
dockerfile: ./docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
"CARGO_FEATURES=--no-default-features --features=http3-quinn,cache,webpki-roots,acme"
|
||||
"CARGO_FEATURES=--no-default-features --features=http3-quinn,cache,webpki-roots,acme,post-quantum"
|
||||
tags-suffix: "-webpki-roots"
|
||||
# Aliases must be used only for release builds
|
||||
aliases: |
|
||||
|
|
@ -105,7 +72,7 @@ jobs:
|
|||
- target: "slim-webpki-roots"
|
||||
dockerfile: ./docker/Dockerfile-slim
|
||||
build-args: |
|
||||
"CARGO_FEATURES=--no-default-features --features=http3-quinn,cache,webpki-roots,acme"
|
||||
"CARGO_FEATURES=--no-default-features --features=http3-quinn,cache,webpki-roots,acme,post-quantum"
|
||||
build-contexts: |
|
||||
messense/rust-musl-cross:amd64-musl=docker-image://messense/rust-musl-cross:x86_64-musl
|
||||
messense/rust-musl-cross:arm64-musl=docker-image://messense/rust-musl-cross:aarch64-musl
|
||||
|
|
@ -119,7 +86,7 @@ jobs:
|
|||
- target: "s2n-webpki-roots"
|
||||
dockerfile: ./docker/Dockerfile
|
||||
build-args: |
|
||||
"CARGO_FEATURES=--no-default-features --features=http3-s2n,cache,webpki-roots,acme"
|
||||
"CARGO_FEATURES=--no-default-features --features=http3-s2n,cache,webpki-roots,acme,post-quantum"
|
||||
"ADDITIONAL_DEPS=pkg-config libssl-dev cmake libclang1 gcc g++"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags-suffix: "-s2n-webpki-roots"
|
||||
|
|
@ -207,6 +174,14 @@ jobs:
|
|||
platforms: ${{ matrix.platforms }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
- name: check pull_request title
|
||||
if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref == 'develop' && github.event.pull_request.base.ref == 'main' && github.event.pull_request.merged == true }}
|
||||
uses: kaisugi/action-regex-match@v1.0.1
|
||||
id: regex-match
|
||||
with:
|
||||
text: ${{ github.event.pull_request.title }}
|
||||
regex: "^(\\d+\\.\\d+\\.\\d+)$"
|
||||
|
||||
- name: Release build and push from main branch
|
||||
if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref == 'develop' && github.event.pull_request.base.ref == 'main' && github.event.pull_request.merged == true }}
|
||||
uses: docker/build-push-action@v6
|
||||
|
|
@ -218,6 +193,8 @@ jobs:
|
|||
${{ env.GHCR }}/${{ env.GHCR_IMAGE_NAME }}:latest${{ matrix.tags-suffix }}
|
||||
${{ env.DH_REGISTRY_NAME }}:latest${{ matrix.tags-suffix }}
|
||||
${{ matrix.aliases }}
|
||||
${{ env.GHCR }}/${{ env.GHCR_IMAGE_NAME }}:${{ github.event.pull_request.title }}${{ matrix.tags-suffix }}
|
||||
${{ env.DH_REGISTRY_NAME }}:${{ github.event.pull_request.title }}${{ matrix.tags-suffix }}
|
||||
build-contexts: ${{ matrix.build-contexts }}
|
||||
file: ${{ matrix.dockerfile }}
|
||||
cache-from: type=gha,scope=rpxy-latest-${{ matrix.target }}
|
||||
|
|
@ -226,6 +203,10 @@ jobs:
|
|||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
dispatch_release_event:
|
||||
permissions:
|
||||
contents: write
|
||||
actions: write
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.ref == 'develop' && github.event.pull_request.base.ref == 'main' && github.event.pull_request.merged == true }}
|
||||
needs: build_and_push
|
||||
|
|
|
|||
2
.github/workflows/shift_left.yml
vendored
2
.github/workflows/shift_left.yml
vendored
|
|
@ -7,6 +7,8 @@ on:
|
|||
|
||||
jobs:
|
||||
Scan-Build:
|
||||
permissions:
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
|
|
|||
5
.gitignore
vendored
5
.gitignore
vendored
|
|
@ -3,14 +3,11 @@
|
|||
docker/log
|
||||
docker/cache
|
||||
docker/config
|
||||
docker/acme_registry
|
||||
|
||||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
/target/
|
||||
|
||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
Cargo.lock
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
|
|
|||
14
.gitmodules
vendored
14
.gitmodules
vendored
|
|
@ -1,10 +1,10 @@
|
|||
[submodule "submodules/rusty-http-cache-semantics"]
|
||||
path = submodules/rusty-http-cache-semantics
|
||||
url = git@github.com:junkurihara/rusty-http-cache-semantics.git
|
||||
path = submodules/rusty-http-cache-semantics
|
||||
url = https://github.com/junkurihara/rusty-http-cache-semantics.git
|
||||
[submodule "submodules/rustls-acme"]
|
||||
path = submodules/rustls-acme
|
||||
url = git@github.com:junkurihara/rustls-acme.git
|
||||
path = submodules/rustls-acme
|
||||
url = https://github.com/junkurihara/rustls-acme.git
|
||||
[submodule "submodules/s2n-quic"]
|
||||
path = submodules/s2n-quic
|
||||
url = git@github.com:junkurihara/s2n-quic.git
|
||||
branch = rustls-pq
|
||||
path = submodules/s2n-quic
|
||||
url = https://github.com/junkurihara/s2n-quic.git
|
||||
branch = rustls-pq
|
||||
|
|
|
|||
50
CHANGELOG.md
50
CHANGELOG.md
|
|
@ -1,6 +1,54 @@
|
|||
# CHANGELOG
|
||||
|
||||
## 0.9.4 or 0.10.0 (Unreleased)
|
||||
## 0.10.1 or 0.11.0 (Unreleased)
|
||||
|
||||
## 0.10.0
|
||||
|
||||
### Important Changes
|
||||
|
||||
- [Breaking] We removed non-`watch` execute option and enabled the dynamic reloading of the config file by default.
|
||||
- We newly added `log-dir` execute option to specify the directory for `access.log`,`error.log` and `rpxy.log`. This is optional, and if not specified, the logs are written to the standard output by default.
|
||||
|
||||
### Improvement
|
||||
|
||||
- Refactor: lots of minor improvements
|
||||
- Deps
|
||||
|
||||
## 0.9.7
|
||||
|
||||
### Improvement
|
||||
|
||||
- Feat: add version tag for docker images via github actions
|
||||
- Feat: support gRPC: This makes rpxy to serve gRPC requests on the same port as HTTP and HTTPS, i.e., listen_port and listen_port_tls. This means that by using the different subdomain for HTTP(S) and gRPC, we can multiplex them on same ports without opening another port dedicated to gRPC. To this end, this update made the forwarder to force HTTP/2 for gRPC requests towards backend (gRPC) app.
|
||||
- Deps and refactor
|
||||
|
||||
### Bugfix
|
||||
|
||||
- Fixed bug for the upstream option "force_http2_upstream"
|
||||
|
||||
### Other
|
||||
|
||||
- Tentative downgrade of github actions `runs-on` from ubuntu-latest to ubuntu-22.04.
|
||||
|
||||
## 0.9.6
|
||||
|
||||
### Improvement
|
||||
|
||||
- Feat: Change the default hashing algorithm for internal hashmaps and hashsets from FxHash to aHash. This change is to improve the security against HashDos attacks for colliding domain names and paths, and to improve the speed of hash operations for string keys (c.f., [the performance comparison](https://github.com/tkaitchuck/aHash/blob/master/compare/readme.md)).
|
||||
- Deps and refactor
|
||||
|
||||
## 0.9.5
|
||||
|
||||
### Bugfix
|
||||
|
||||
- Fix docker image build options with `post-quantum` feature.
|
||||
|
||||
## 0.9.4
|
||||
|
||||
### Improvement
|
||||
|
||||
- Feat: Enable the hybrid post-quantum key exchange for TLS and QUIC with `X25519MLKEM768` by default.
|
||||
- Deps and refactor
|
||||
|
||||
## 0.9.3
|
||||
|
||||
|
|
|
|||
3799
Cargo.lock
generated
Normal file
3799
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -1,11 +1,11 @@
|
|||
[workspace.package]
|
||||
version = "0.9.4"
|
||||
version = "0.10.0"
|
||||
authors = ["Jun Kurihara"]
|
||||
homepage = "https://github.com/junkurihara/rust-rpxy"
|
||||
repository = "https://github.com/junkurihara/rust-rpxy"
|
||||
license = "MIT"
|
||||
readme = "./README.md"
|
||||
edition = "2021"
|
||||
edition = "2024"
|
||||
publish = false
|
||||
|
||||
[workspace]
|
||||
|
|
|
|||
2
LICENSE
2
LICENSE
|
|
@ -1,6 +1,6 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2024 Jun Kurihara
|
||||
Copyright (c) 2025 Jun Kurihara
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
52
README.md
52
README.md
|
|
@ -8,21 +8,36 @@
|
|||
|
||||
> **WIP Project**
|
||||
|
||||
> [!NOTE]
|
||||
> This project is an HTTP, i.e., Layer 7, reverse-proxy. If you are looking for a TCP/UDP, i.e., Layer 4, reverse-proxy, please check my another project, [`rpxy-l4`](https://github.com/junkurihara/rust-rpxy-l4).
|
||||
|
||||
## Introduction
|
||||
|
||||
`rpxy` [ahr-pik-see] is an implementation of simple and lightweight reverse-proxy with some additional features. The implementation is based on [`hyper`](https://github.com/hyperium/hyper), [`rustls`](https://github.com/rustls/rustls) and [`tokio`](https://github.com/tokio-rs/tokio), i.e., written in Rust [^pure_rust]. Our `rpxy` routes multiple host names to appropriate backend application servers while serving TLS connections.
|
||||
|
||||
[^pure_rust]: Doubtfully can be claimed to be written in pure Rust since current `rpxy` is based on `aws-lc-rs` for cryptographic operations.
|
||||
|
||||
By default, `rpxy` provides the *TLS connection sanitization* by correctly binding a certificate used to establish a secure channel with the backend application. Specifically, it always keeps the consistency between the given SNI (server name indication) in `ClientHello` of the underlying TLS and the domain name given by the overlaid HTTP HOST header (or URL in Request line) [^1]. Additionally, as a somewhat unstable feature, our `rpxy` can handle the brand-new HTTP/3 connection thanks to [`quinn`](https://github.com/quinn-rs/quinn), [`s2n-quic`](https://github.com/aws/s2n-quic) and [`hyperium/h3`](https://github.com/hyperium/h3).[^h3lib] Furthermore, `rpxy` supports the automatic issuance and renewal of certificates via [TLS-ALPN-01 (RFC8737)](https://www.rfc-editor.org/rfc/rfc8737) of [ACME protocol (RFC8555)](https://www.rfc-editor.org/rfc/rfc8555) thanks to [`rustls-acme`](https://github.com/FlorianUekermann/rustls-acme), and the hybridized post-quantum key exchange [`X25519Kyber768Draft00`](https://datatracker.ietf.org/doc/draft-tls-westerbaan-xyber768d00/)[^kyber] for TLS incoming and outgoing initiation thanks to [`rustls-post-quantum`](https://docs.rs/rustls-post-quantum/latest/rustls_post_quantum/).
|
||||
Supported features are summarized as follows:
|
||||
|
||||
[^h3lib]: HTTP/3 libraries are mutually exclusive. You need to explicitly specify `s2n-quic` with `--no-default-features` flag. Also note that if you build `rpxy` with `s2n-quic`, then it requires `openssl` just for building the package.
|
||||
- Supported HTTP(S) protocols: HTTP/1.1, HTTP/2 and brand-new HTTP/3 [^h3lib]
|
||||
- gRPC is also supported
|
||||
- Serving multiple domain names with TLS termination
|
||||
- Mutual TLS authentication with client certificates
|
||||
- Automated certificate issuance and renewal via TLS-ALPN-01 ACME protocol [^acme]
|
||||
- Post-quantum key exchange for TLS/QUIC [^kyber]
|
||||
- TLS connection sanitization to avoid the domain fronting [^sanitization]
|
||||
- Load balancing with round-robin, random, and sticky session
|
||||
- and more...
|
||||
|
||||
[^kyber]: This is not yet a default feature. You need to specify `--features post-quantum` when building `rpxy`. Also note that `X25519Kyber768Draft00` is a draft version yet this is widely used on the Internet. We will update the feature when the newest version (`X25519MLKEM768` in [`ECDHE-MLKEM`](https://www.ietf.org/archive/id/draft-kwiatkowski-tls-ecdhe-mlkem-02.html)) is available.
|
||||
[^h3lib]: HTTP/3 is enabled thanks to [`quinn`](https://github.com/quinn-rs/quinn), [`s2n-quic`](https://github.com/aws/s2n-quic) and [`hyperium/h3`](https://github.com/hyperium/h3). HTTP/3 libraries are mutually exclusive. You need to explicitly specify `s2n-quic` with `--no-default-features` flag. Also note that if you build `rpxy` with `s2n-quic`, then it requires `openssl` just for building the package.
|
||||
|
||||
This project is still *work-in-progress*. But it is already working in some production environments and serves a number of domain names. Furthermore it *significantly outperforms* NGINX and Caddy, e.g., *1.5x faster than NGINX*, in the setting of a very simple HTTP reverse-proxy scenario (See [`bench`](./bench/) directory).
|
||||
[^acme]: `rpxy` supports the automatic issuance and renewal of certificates via [TLS-ALPN-01 (RFC8737)](https://www.rfc-editor.org/rfc/rfc8737) of [ACME protocol (RFC8555)](https://www.rfc-editor.org/rfc/rfc8555) thanks to [`rustls-acme`](https://github.com/FlorianUekermann/rustls-acme).
|
||||
|
||||
[^1]: We should note that NGINX doesn't guarantee such a consistency by default. To this end, you have to add `if` statement in the configuration file in NGINX.
|
||||
[^kyber]: `rpxy` supports the hybridized post-quantum key exchange [`X25519MLKEM768`](https://www.ietf.org/archive/id/draft-kwiatkowski-tls-ecdhe-mlkem-02.html)[^kyber] for TLS/QUIC incoming and outgoing initiation thanks to [`rustls-post-quantum`](https://docs.rs/rustls-post-quantum/latest/rustls_post_quantum/). This is already a default feature. Also note that `X25519MLKEM768` is still a draft version yet this is widely used on the Internet.
|
||||
|
||||
[^sanitization]: By default, `rpxy` provides the *TLS connection sanitization* by correctly binding a certificate used to establish a secure channel with the backend application. Specifically, it always keeps the consistency between the given SNI (server name indication) in `ClientHello` of the underlying TLS and the domain name given by the overlaid HTTP HOST header (or URL in Request line). We should note that NGINX doesn't guarantee such a consistency by default. To this end, you have to add `if` statement in the configuration file in NGINX.
|
||||
|
||||
This project is still *work-in-progress*. But it is already working in some production environments and serves a number of domain names. Furthermore it *significantly outperforms* NGINX and Caddy, e.g., *30% ~ 60% or more faster than NGINX*, in the setting of a very simple HTTP reverse-proxy scenario (See [`bench`](./bench/) directory).
|
||||
|
||||
## Installing/Building an Executable Binary of `rpxy`
|
||||
|
||||
|
|
@ -65,7 +80,7 @@ You can run `rpxy` with a configuration file like
|
|||
% ./target/release/rpxy --config config.toml
|
||||
```
|
||||
|
||||
If you specify `-w` option along with the config file path, `rpxy` tracks the change of `config.toml` in the real-time manner and apply the change immediately without restarting the process.
|
||||
`rpxy` tracks the change of `config.toml` in the real-time manner and apply the change immediately without restarting the process.
|
||||
|
||||
The full help messages are given follows.
|
||||
|
||||
|
|
@ -73,12 +88,18 @@ The full help messages are given follows.
|
|||
usage: rpxy [OPTIONS] --config <FILE>
|
||||
|
||||
Options:
|
||||
-c, --config <FILE> Configuration file path like ./config.toml
|
||||
-w, --watch Activate dynamic reloading of the config file via continuous monitoring
|
||||
-h, --help Print help
|
||||
-V, --version Print version
|
||||
-c, --config <FILE> Configuration file path like ./config.toml
|
||||
-l, --log-dir <LOG_DIR> Directory for log files. If not specified, logs are printed to stdout.
|
||||
-h, --help Print help
|
||||
-V, --version Print version
|
||||
```
|
||||
|
||||
If you set `--log-dir=<log_dir>`, the log files are created in the specified directory. Otherwise, the log is printed to stdout.
|
||||
|
||||
- `${log_dir}/access.log` for access log
|
||||
<!-- - `${log_dir}/error.log` for error log -->
|
||||
- `${log_dir}/rpxy.log` for system and error log
|
||||
|
||||
That's all!
|
||||
|
||||
## Basic Configuration
|
||||
|
|
@ -422,6 +443,17 @@ Check a third party project [`Gamerboy59/rpxy-webui`](https://github.com/Gamerbo
|
|||
|
||||
todo!
|
||||
|
||||
## Credits
|
||||
|
||||
`rpxy` cannot be built without the following projects and inspirations:
|
||||
|
||||
- [`hyper`](https://github.com/hyperium/hyper) and [`hyperium/h3`](https://github.com/hyperium/h3)
|
||||
- [`rustls`](https://github.com/rustls/rustls)
|
||||
- [`tokio`](https://github.com/tokio-rs/tokio)
|
||||
- [`quinn`](https://github.com/quinn-rs/quinn)
|
||||
- [`s2n-quic`](https://github.com/aws/s2n-quic)
|
||||
- [`rustls-acme`](https://github.com/FlorianUekermann/rustls-acme)
|
||||
|
||||
## License
|
||||
|
||||
`rpxy` is free, open-source software licensed under MIT License.
|
||||
|
|
|
|||
|
|
@ -2,9 +2,13 @@
|
|||
auto_https off
|
||||
}
|
||||
|
||||
|
||||
:80 {
|
||||
|
||||
# Proxy everything else to Rocket
|
||||
reverse_proxy backend-nginx
|
||||
log {
|
||||
level ERROR
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
196
bench/README.md
196
bench/README.md
|
|
@ -3,49 +3,46 @@
|
|||
This test simply measures the performance of several reverse proxy through HTTP/1.1 by the following command using [`rewrk`](https://github.com/lnx-search/rewrk).
|
||||
|
||||
```sh:
|
||||
$ rewrk -c 512 -t 4 -d 15s -h http://localhost:8080 --pct
|
||||
rewrk -c 512 -t 4 -d 15s -h http://localhost:8080 --pct
|
||||
```
|
||||
|
||||
## Tests on `linux/arm64/v8`
|
||||
|
||||
Done at Jul. 15, 2023
|
||||
Done at May. 17, 2025
|
||||
|
||||
### Environment
|
||||
|
||||
- `rpxy` commit id: `1da7e5bfb77d1ce4ee8d6cfc59b1c725556fc192`
|
||||
- Docker Desktop 4.21.1 (114176)
|
||||
- `rpxy` commit id: `e259e0b58897258d98fdb7504a1cbcbd7c5b37db`
|
||||
- Docker Desktop 4.41.2 (191736)
|
||||
- ReWrk 0.3.2
|
||||
- Macbook Pro '14 (2021, M1 Max, 64GB RAM)
|
||||
- Mac mini (2024, M4 Pro, 64GB RAM)
|
||||
|
||||
The docker images of `nginx` and `caddy` for `linux/arm64/v8` are pulled from the official registry.
|
||||
|
||||
### Result for `rpxy`, `nginx` and `caddy`
|
||||
|
||||
```
|
||||
----------------------------
|
||||
```bash
|
||||
Benchmark on rpxy
|
||||
Beginning round 1...
|
||||
Benchmarking 512 connections @ http://localhost:8080 for 15 second(s)
|
||||
Latencies:
|
||||
Avg Stdev Min Max
|
||||
19.64ms 8.85ms 0.67ms 113.22ms
|
||||
6.90ms 3.42ms 0.78ms 80.26ms
|
||||
Requests:
|
||||
Total: 390078 Req/Sec: 26011.25
|
||||
Total: 1107885 Req/Sec: 73866.03
|
||||
Transfer:
|
||||
Total: 304.85 MB Transfer Rate: 20.33 MB/Sec
|
||||
Total: 867.44 MB Transfer Rate: 57.83 MB/Sec
|
||||
+ --------------- + --------------- +
|
||||
| Percentile | Avg Latency |
|
||||
+ --------------- + --------------- +
|
||||
| 99.9% | 79.24ms |
|
||||
| 99% | 54.28ms |
|
||||
| 95% | 42.50ms |
|
||||
| 90% | 37.82ms |
|
||||
| 75% | 31.54ms |
|
||||
| 50% | 26.37ms |
|
||||
| 99.9% | 49.76ms |
|
||||
| 99% | 29.57ms |
|
||||
| 95% | 15.78ms |
|
||||
| 90% | 13.05ms |
|
||||
| 75% | 10.41ms |
|
||||
| 50% | 8.72ms |
|
||||
+ --------------- + --------------- +
|
||||
|
||||
721 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
Benchmark on nginx
|
||||
|
|
@ -53,23 +50,23 @@ Beginning round 1...
|
|||
Benchmarking 512 connections @ http://localhost:8090 for 15 second(s)
|
||||
Latencies:
|
||||
Avg Stdev Min Max
|
||||
33.26ms 15.18ms 1.40ms 118.94ms
|
||||
11.65ms 14.04ms 0.40ms 205.93ms
|
||||
Requests:
|
||||
Total: 230268 Req/Sec: 15356.08
|
||||
Total: 654978 Req/Sec: 43666.56
|
||||
Transfer:
|
||||
Total: 186.77 MB Transfer Rate: 12.46 MB/Sec
|
||||
Total: 532.81 MB Transfer Rate: 35.52 MB/Sec
|
||||
+ --------------- + --------------- +
|
||||
| Percentile | Avg Latency |
|
||||
+ --------------- + --------------- +
|
||||
| 99.9% | 99.91ms |
|
||||
| 99% | 83.74ms |
|
||||
| 95% | 70.67ms |
|
||||
| 90% | 64.03ms |
|
||||
| 75% | 54.32ms |
|
||||
| 50% | 45.19ms |
|
||||
| 99.9% | 151.00ms |
|
||||
| 99% | 102.80ms |
|
||||
| 95% | 62.44ms |
|
||||
| 90% | 42.98ms |
|
||||
| 75% | 26.44ms |
|
||||
| 50% | 18.25ms |
|
||||
+ --------------- + --------------- +
|
||||
|
||||
677 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
512 Errors: connection closed
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
|
|
@ -78,33 +75,31 @@ Beginning round 1...
|
|||
Benchmarking 512 connections @ http://localhost:8100 for 15 second(s)
|
||||
Latencies:
|
||||
Avg Stdev Min Max
|
||||
48.51ms 50.74ms 0.34ms 554.58ms
|
||||
77.54ms 368.11ms 0.37ms 6770.73ms
|
||||
Requests:
|
||||
Total: 157239 Req/Sec: 10485.98
|
||||
Total: 86963 Req/Sec: 5798.35
|
||||
Transfer:
|
||||
Total: 125.99 MB Transfer Rate: 8.40 MB/Sec
|
||||
Total: 70.00 MB Transfer Rate: 4.67 MB/Sec
|
||||
+ --------------- + --------------- +
|
||||
| Percentile | Avg Latency |
|
||||
+ --------------- + --------------- +
|
||||
| 99.9% | 473.82ms |
|
||||
| 99% | 307.16ms |
|
||||
| 95% | 212.28ms |
|
||||
| 90% | 169.05ms |
|
||||
| 75% | 115.92ms |
|
||||
| 50% | 80.24ms |
|
||||
| 99.9% | 5789.65ms |
|
||||
| 99% | 3407.02ms |
|
||||
| 95% | 1022.31ms |
|
||||
| 90% | 608.17ms |
|
||||
| 75% | 281.95ms |
|
||||
| 50% | 149.29ms |
|
||||
+ --------------- + --------------- +
|
||||
|
||||
708 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
```
|
||||
|
||||
## Results on `linux/amd64`
|
||||
|
||||
Done at Jul. 24, 2023
|
||||
Done at May 20, 2025
|
||||
|
||||
### Environment
|
||||
|
||||
- `rpxy` commit id: `7c0945a5124418aa9a1024568c1989bb77cf312f`
|
||||
- Docker Desktop 4.21.1 (114176)
|
||||
- `rpxy` commit id: `e259e0b58897258d98fdb7504a1cbcbd7c5b37db`
|
||||
- Docker Desktop 4.41.2 (192736)
|
||||
- ReWrk 0.3.2 and Wrk 0.4.2
|
||||
- iMac '27 (2020, 10-Core Intel Core i9, 128GB RAM)
|
||||
|
||||
|
|
@ -112,8 +107,8 @@ The docker images of `nginx` and `caddy` for `linux/amd64` were pulled from the
|
|||
|
||||
Also, when `Sozu` is configured as an HTTP reverse proxy, it cannot handle HTTP request messages emit from `ReWrk` due to hostname parsing errors though it can correctly handle messages dispatched from `curl` and browsers. So, we additionally test using [`Wrk`](https://github.com/wg/wrk) to examine `Sozu` with the following command.
|
||||
|
||||
```sh:
|
||||
$ wrk -c 512 -t 4 -d 15s http://localhost:8110
|
||||
```bash
|
||||
wrk -c 512 -t 4 -d 15s http://localhost:8110
|
||||
```
|
||||
|
||||
<!-- ```
|
||||
|
|
@ -124,7 +119,7 @@ ERROR Error connecting to backend: Could not get cluster id from request: Host
|
|||
|
||||
#### With ReWrk for `rpxy`, `nginx` and `caddy`
|
||||
|
||||
```
|
||||
```bash
|
||||
----------------------------
|
||||
Benchmark [x86_64] with ReWrk
|
||||
----------------------------
|
||||
|
|
@ -133,24 +128,22 @@ Beginning round 1...
|
|||
Benchmarking 512 connections @ http://localhost:8080 for 15 second(s)
|
||||
Latencies:
|
||||
Avg Stdev Min Max
|
||||
20.37ms 8.95ms 1.63ms 160.27ms
|
||||
15.75ms 6.75ms 1.75ms 124.25ms
|
||||
Requests:
|
||||
Total: 376345 Req/Sec: 25095.19
|
||||
Total: 486635 Req/Sec: 32445.33
|
||||
Transfer:
|
||||
Total: 295.61 MB Transfer Rate: 19.71 MB/Sec
|
||||
Total: 381.02 MB Transfer Rate: 25.40 MB/Sec
|
||||
+ --------------- + --------------- +
|
||||
| Percentile | Avg Latency |
|
||||
+ --------------- + --------------- +
|
||||
| 99.9% | 112.50ms |
|
||||
| 99% | 61.33ms |
|
||||
| 95% | 44.26ms |
|
||||
| 90% | 38.74ms |
|
||||
| 75% | 32.00ms |
|
||||
| 50% | 26.82ms |
|
||||
| 99.9% | 91.91ms |
|
||||
| 99% | 55.53ms |
|
||||
| 95% | 34.87ms |
|
||||
| 90% | 29.55ms |
|
||||
| 75% | 23.99ms |
|
||||
| 50% | 20.17ms |
|
||||
+ --------------- + --------------- +
|
||||
|
||||
626 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
Benchmark on nginx
|
||||
|
|
@ -158,24 +151,22 @@ Beginning round 1...
|
|||
Benchmarking 512 connections @ http://localhost:8090 for 15 second(s)
|
||||
Latencies:
|
||||
Avg Stdev Min Max
|
||||
23.45ms 12.42ms 1.18ms 154.44ms
|
||||
24.02ms 15.84ms 1.31ms 207.97ms
|
||||
Requests:
|
||||
Total: 326685 Req/Sec: 21784.73
|
||||
Total: 318516 Req/Sec: 21236.67
|
||||
Transfer:
|
||||
Total: 265.22 MB Transfer Rate: 17.69 MB/Sec
|
||||
Total: 259.11 MB Transfer Rate: 17.28 MB/Sec
|
||||
+ --------------- + --------------- +
|
||||
| Percentile | Avg Latency |
|
||||
+ --------------- + --------------- +
|
||||
| 99.9% | 96.85ms |
|
||||
| 99% | 73.93ms |
|
||||
| 95% | 57.57ms |
|
||||
| 90% | 50.36ms |
|
||||
| 75% | 40.57ms |
|
||||
| 50% | 32.70ms |
|
||||
| 99.9% | 135.56ms |
|
||||
| 99% | 92.59ms |
|
||||
| 95% | 68.54ms |
|
||||
| 90% | 58.75ms |
|
||||
| 75% | 45.88ms |
|
||||
| 50% | 35.64ms |
|
||||
+ --------------- + --------------- +
|
||||
|
||||
657 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
Benchmark on caddy
|
||||
|
|
@ -183,30 +174,26 @@ Beginning round 1...
|
|||
Benchmarking 512 connections @ http://localhost:8100 for 15 second(s)
|
||||
Latencies:
|
||||
Avg Stdev Min Max
|
||||
45.71ms 50.47ms 0.88ms 908.49ms
|
||||
74.60ms 181.26ms 0.94ms 2723.20ms
|
||||
Requests:
|
||||
Total: 166917 Req/Sec: 11129.80
|
||||
Total: 101893 Req/Sec: 6792.16
|
||||
Transfer:
|
||||
Total: 133.77 MB Transfer Rate: 8.92 MB/Sec
|
||||
Total: 82.03 MB Transfer Rate: 5.47 MB/Sec
|
||||
+ --------------- + --------------- +
|
||||
| Percentile | Avg Latency |
|
||||
+ --------------- + --------------- +
|
||||
| 99.9% | 608.92ms |
|
||||
| 99% | 351.18ms |
|
||||
| 95% | 210.56ms |
|
||||
| 90% | 162.68ms |
|
||||
| 75% | 106.97ms |
|
||||
| 50% | 73.90ms |
|
||||
| 99.9% | 2232.12ms |
|
||||
| 99% | 1517.73ms |
|
||||
| 95% | 624.63ms |
|
||||
| 90% | 406.69ms |
|
||||
| 75% | 222.42ms |
|
||||
| 50% | 133.46ms |
|
||||
+ --------------- + --------------- +
|
||||
|
||||
646 Errors: error shutting down connection: Socket is not connected (os error 57)
|
||||
|
||||
sleep 3 secs
|
||||
```
|
||||
|
||||
#### With Wrk for `rpxy`, `nginx`, `caddy` and `sozu`
|
||||
|
||||
```
|
||||
```bash
|
||||
----------------------------
|
||||
Benchmark [x86_64] with Wrk
|
||||
----------------------------
|
||||
|
|
@ -214,12 +201,11 @@ Benchmark on rpxy
|
|||
Running 15s test @ http://localhost:8080
|
||||
4 threads and 512 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 18.68ms 8.09ms 122.64ms 74.03%
|
||||
Req/Sec 6.95k 815.23 8.45k 83.83%
|
||||
414819 requests in 15.01s, 326.37MB read
|
||||
Socket errors: connect 0, read 608, write 0, timeout 0
|
||||
Requests/sec: 27627.79
|
||||
Transfer/sec: 21.74MB
|
||||
Latency 15.65ms 6.94ms 104.73ms 81.28%
|
||||
Req/Sec 8.36k 0.90k 9.90k 77.83%
|
||||
499550 requests in 15.02s, 391.14MB read
|
||||
Requests/sec: 33267.61
|
||||
Transfer/sec: 26.05MB
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
|
|
@ -227,12 +213,11 @@ Benchmark on nginx
|
|||
Running 15s test @ http://localhost:8090
|
||||
4 threads and 512 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 23.34ms 13.80ms 126.06ms 74.66%
|
||||
Req/Sec 5.71k 607.41 7.07k 73.17%
|
||||
341127 requests in 15.03s, 277.50MB read
|
||||
Socket errors: connect 0, read 641, write 0, timeout 0
|
||||
Requests/sec: 22701.54
|
||||
Transfer/sec: 18.47MB
|
||||
Latency 24.26ms 15.29ms 167.43ms 73.34%
|
||||
Req/Sec 5.53k 493.14 6.91k 69.67%
|
||||
330569 requests in 15.02s, 268.91MB read
|
||||
Requests/sec: 22014.96
|
||||
Transfer/sec: 17.91MB
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
|
|
@ -240,13 +225,13 @@ Benchmark on caddy
|
|||
Running 15s test @ http://localhost:8100
|
||||
4 threads and 512 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 54.19ms 55.63ms 674.53ms 88.55%
|
||||
Req/Sec 2.92k 1.40k 5.57k 56.17%
|
||||
174748 requests in 15.03s, 140.61MB read
|
||||
Socket errors: connect 0, read 660, write 0, timeout 0
|
||||
Non-2xx or 3xx responses: 70
|
||||
Requests/sec: 11624.63
|
||||
Transfer/sec: 9.35MB
|
||||
Latency 212.89ms 300.23ms 1.99s 86.56%
|
||||
Req/Sec 1.31k 1.64k 5.72k 78.79%
|
||||
67749 requests in 15.04s, 51.97MB read
|
||||
Socket errors: connect 0, read 0, write 0, timeout 222
|
||||
Non-2xx or 3xx responses: 3686
|
||||
Requests/sec: 4505.12
|
||||
Transfer/sec: 3.46MB
|
||||
|
||||
sleep 3 secs
|
||||
----------------------------
|
||||
|
|
@ -254,10 +239,9 @@ Benchmark on sozu
|
|||
Running 15s test @ http://localhost:8110
|
||||
4 threads and 512 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 19.78ms 4.89ms 98.09ms 76.88%
|
||||
Req/Sec 6.49k 824.75 8.11k 76.17%
|
||||
387744 requests in 15.02s, 329.11MB read
|
||||
Socket errors: connect 0, read 647, write 0, timeout 0
|
||||
Requests/sec: 25821.93
|
||||
Transfer/sec: 21.92MB
|
||||
Latency 34.68ms 6.30ms 90.21ms 72.49%
|
||||
Req/Sec 3.69k 397.85 5.08k 73.00%
|
||||
220655 requests in 15.01s, 187.29MB read
|
||||
Requests/sec: 14699.17
|
||||
Transfer/sec: 12.48MB
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
|
|
@ -28,7 +27,7 @@ services:
|
|||
dockerfile: docker/Dockerfile
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- LOG_LEVEL=info
|
||||
- LOG_LEVEL=error # almost nolog
|
||||
- LOG_TO_FILE=false
|
||||
ports:
|
||||
- 127.0.0.1:8080:8080
|
||||
|
|
@ -47,7 +46,7 @@ services:
|
|||
tty: false
|
||||
privileged: true
|
||||
volumes:
|
||||
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro
|
||||
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro # set as almost nolog
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
logging:
|
||||
options:
|
||||
|
|
@ -64,7 +63,7 @@ services:
|
|||
restart: unless-stopped
|
||||
tty: false
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro # set as almost no log
|
||||
networks:
|
||||
bench-nw:
|
||||
|
||||
|
|
@ -82,7 +81,7 @@ services:
|
|||
max-size: "10m"
|
||||
max-file: "3"
|
||||
volumes:
|
||||
- ./sozu-config.toml:/etc/sozu/config.toml
|
||||
- ./sozu-config.toml:/etc/sozu/config.toml # set as almost nolog
|
||||
networks:
|
||||
bench-nw:
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
version: "3"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
|
|
@ -28,7 +27,7 @@ services:
|
|||
dockerfile: docker/Dockerfile
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- LOG_LEVEL=info
|
||||
- LOG_LEVEL=error # almost nolog
|
||||
- LOG_TO_FILE=false
|
||||
ports:
|
||||
- 127.0.0.1:8080:8080
|
||||
|
|
@ -47,7 +46,7 @@ services:
|
|||
tty: false
|
||||
privileged: true
|
||||
volumes:
|
||||
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro
|
||||
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro # set as almost nolog
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
logging:
|
||||
options:
|
||||
|
|
@ -64,7 +63,7 @@ services:
|
|||
restart: unless-stopped
|
||||
tty: false
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro # set as almost no log
|
||||
networks:
|
||||
bench-nw:
|
||||
|
||||
|
|
|
|||
|
|
@ -31,11 +31,14 @@
|
|||
# '"$request" $status $body_bytes_sent '
|
||||
# '"$http_referer" "$http_user_agent" '
|
||||
# '"$upstream_addr"';
|
||||
# access_log off;
|
||||
access_log off;
|
||||
|
||||
# ssl_protocols TLSv1.2 TLSv1.3;
|
||||
# ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
|
||||
# ssl_prefer_server_ciphers off;
|
||||
# error_log /dev/stderr;
|
||||
error_log /dev/null crit;
|
||||
|
||||
# resolver 127.0.0.11;
|
||||
# # HTTP 1.1 support
|
||||
# proxy_http_version 1.1;
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
log_level = "info"
|
||||
log_level = "error"
|
||||
log_target = "stdout"
|
||||
max_connections = 512
|
||||
activate_listeners = true
|
||||
|
|
|
|||
|
|
@ -28,10 +28,10 @@ max_clients = 512
|
|||
listen_ipv6 = false
|
||||
|
||||
# Optional: App that serves all plaintext http request by referring to HOSTS or request header
|
||||
# execpt for configured application.
|
||||
# except for configured application.
|
||||
# Note that this is only for http.
|
||||
# Note that nothing is served for requests via https since secure channel cannot be
|
||||
# established for unconfigured server_name, and they are always rejected by checking SNI.
|
||||
# established for non-configured server_name, and they are always rejected by checking SNI.
|
||||
default_app = 'another_localhost'
|
||||
|
||||
###################################
|
||||
|
|
@ -106,7 +106,7 @@ tls = { https_redirection = true, acme = true }
|
|||
# Experimantal settings #
|
||||
###################################
|
||||
[experimental]
|
||||
# Higly recommend not to be true. If true, you ignore RFC. if not specified, it is always false.
|
||||
# Highly recommend not to be true. If true, you ignore RFC. if not specified, it is always false.
|
||||
# This might be required to be true when a certificate is used by multiple backend hosts, especially in case where a TLS connection is re-used.
|
||||
# We should note that this strongly depends on the client implementation.
|
||||
ignore_sni_consistency = false
|
||||
|
|
|
|||
|
|
@ -2,13 +2,13 @@ FROM ubuntu:24.04 AS base
|
|||
LABEL maintainer="Jun Kurihara"
|
||||
|
||||
SHELL ["/bin/sh", "-x", "-c"]
|
||||
ENV SERIAL 2
|
||||
ENV SERIAL=2
|
||||
|
||||
########################################
|
||||
FROM --platform=$BUILDPLATFORM base AS builder
|
||||
|
||||
ENV CFLAGS=-Ofast
|
||||
ENV BUILD_DEPS curl make ca-certificates build-essential
|
||||
ENV BUILD_DEPS="curl make ca-certificates build-essential"
|
||||
ENV TARGET_SUFFIX=unknown-linux-gnu
|
||||
|
||||
WORKDIR /tmp
|
||||
|
|
@ -17,9 +17,9 @@ COPY . /tmp/
|
|||
|
||||
ARG TARGETARCH
|
||||
ARG CARGO_FEATURES
|
||||
ENV CARGO_FEATURES ${CARGO_FEATURES}
|
||||
ENV CARGO_FEATURES="${CARGO_FEATURES}"
|
||||
ARG ADDITIONAL_DEPS
|
||||
ENV ADDITIONAL_DEPS ${ADDITIONAL_DEPS}
|
||||
ENV ADDITIONAL_DEPS="${ADDITIONAL_DEPS}"
|
||||
|
||||
RUN if [ $TARGETARCH = "amd64" ]; then \
|
||||
echo "x86_64" > /arch; \
|
||||
|
|
@ -30,7 +30,7 @@ RUN if [ $TARGETARCH = "amd64" ]; then \
|
|||
exit 1; \
|
||||
fi
|
||||
|
||||
ENV RUSTFLAGS "-C link-arg=-s"
|
||||
ENV RUSTFLAGS="-C link-arg=-s"
|
||||
|
||||
RUN update-ca-certificates 2> /dev/null || true
|
||||
|
||||
|
|
@ -40,6 +40,7 @@ RUN apt-get update && apt-get install -qy --no-install-recommends $BUILD_DEPS ${
|
|||
echo "Install toolchain" && \
|
||||
rustup target add $(cat /arch)-${TARGET_SUFFIX} && \
|
||||
echo "Building rpxy from source" && \
|
||||
cargo update &&\
|
||||
cargo build --release --target=$(cat /arch)-${TARGET_SUFFIX} ${CARGO_FEATURES} && \
|
||||
strip --strip-all /tmp/target/$(cat /arch)-${TARGET_SUFFIX}/release/rpxy &&\
|
||||
cp /tmp/target/$(cat /arch)-${TARGET_SUFFIX}/release/rpxy /tmp/target/release/rpxy
|
||||
|
|
@ -47,7 +48,7 @@ RUN apt-get update && apt-get install -qy --no-install-recommends $BUILD_DEPS ${
|
|||
########################################
|
||||
FROM --platform=$TARGETPLATFORM base AS runner
|
||||
|
||||
ENV RUNTIME_DEPS logrotate ca-certificates gosu
|
||||
ENV RUNTIME_DEPS="logrotate ca-certificates gosu"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -qy --no-install-recommends $RUNTIME_DEPS && \
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ LABEL maintainer="Jun Kurihara"
|
|||
|
||||
ARG TARGETARCH
|
||||
ARG CARGO_FEATURES
|
||||
ENV CARGO_FEATURES ${CARGO_FEATURES}
|
||||
ENV CARGO_FEATURES=${CARGO_FEATURES}
|
||||
|
||||
RUN if [ $TARGETARCH = "amd64" ]; then \
|
||||
echo "x86_64" > /arch; \
|
||||
|
|
@ -22,9 +22,10 @@ WORKDIR /tmp
|
|||
|
||||
COPY . /tmp/
|
||||
|
||||
ENV RUSTFLAGS "-C link-arg=-s"
|
||||
ENV RUSTFLAGS="-C link-arg=-s"
|
||||
|
||||
RUN echo "Building rpxy from source" && \
|
||||
cargo update && \
|
||||
cargo build --release --target $(cat /arch)-unknown-linux-musl ${CARGO_FEATURES} && \
|
||||
musl-strip --strip-all /tmp/target/$(cat /arch)-unknown-linux-musl/release/rpxy && \
|
||||
cp /tmp/target/$(cat /arch)-unknown-linux-musl/release/rpxy /tmp/target/release/rpxy
|
||||
|
|
@ -33,7 +34,7 @@ RUN echo "Building rpxy from source" && \
|
|||
FROM --platform=$TARGETPLATFORM alpine:latest AS runner
|
||||
LABEL maintainer="Jun Kurihara"
|
||||
|
||||
ENV RUNTIME_DEPS logrotate ca-certificates su-exec
|
||||
ENV RUNTIME_DEPS="logrotate ca-certificates su-exec"
|
||||
|
||||
RUN apk add --no-cache ${RUNTIME_DEPS} && \
|
||||
update-ca-certificates && \
|
||||
|
|
|
|||
|
|
@ -9,11 +9,10 @@ There are several docker-specific environment variables.
|
|||
- `HOST_USER` (default: `user`): User name executing `rpxy` inside the container.
|
||||
- `HOST_UID` (default: `900`): `UID` of `HOST_USER`.
|
||||
- `HOST_GID` (default: `900`): `GID` of `HOST_USER`
|
||||
- `LOG_LEVEL=debug|info|warn|error`: Log level
|
||||
- `LOG_TO_FILE=true|false`: Enable logging to the log file `/rpxy/log/rpxy.log` using `logrotate`. You should mount `/rpxy/log` via docker volume option if enabled. The log dir and file will be owned by the `HOST_USER` with `HOST_UID:HOST_GID` on the host machine. Hence, `HOST_USER`, `HOST_UID` and `HOST_GID` should be the same as ones of the user who executes the `rpxy` docker container on the host.
|
||||
- `WATCH=true|false` (default: `false`): Activate continuous watching of the config file if true.
|
||||
- `LOG_LEVEL=trace|debug|info|warn|error`: Log level
|
||||
- `LOG_TO_FILE=true|false`: Enable logging to the log files using `logrotate` (locations: system/error log = `/rpxy/log/rpxy.log`, and access log = `/rpxy/log/access.log`). You should mount `/rpxy/log` via docker volume option if enabled. The log dir and file will be owned by the `HOST_USER` with `HOST_UID:HOST_GID` on the host machine. Hence, `HOST_USER`, `HOST_UID` and `HOST_GID` should be the same as ones of the user who executes the `rpxy` docker container on the host.
|
||||
|
||||
Then, all you need is to mount your `config.toml` as `/etc/rpxy.toml` and certificates/private keys as you like through the docker volume option. **If `WATCH=true`, You need to mount a directory, e.g., `./rpxy-config/`, including `rpxy.toml` on `/rpxy/config` instead of a file to correctly track file changes**. This is a docker limitation. Even if `WATCH=false`, you can mount the dir onto `/rpxy/config` rather than `/etc/rpxy.toml`. A file mounted on `/etc/rpxy` is prioritized over a dir mounted on `/rpxy/config`.
|
||||
Then, all you need is to mount your `config.toml` as `/etc/rpxy.toml` and certificates/private keys as you like through the docker volume option. **You need to mount a directory, e.g., `./rpxy-config/`, including `rpxy.toml` on `/rpxy/config` instead of a file to dynamically track file changes**. This is a docker limitation. You can mount the dir onto `/rpxy/config` rather than `/etc/rpxy.toml`. A file mounted on `/etc/rpxy` is prioritized over a dir mounted on `/rpxy/config`.
|
||||
|
||||
See [`docker-compose.yml`](./docker-compose.yml) for the detailed configuration. Note that the file path of keys and certificates must be ones in your docker container.
|
||||
|
||||
|
|
@ -27,19 +26,25 @@ e.g. `-v rpxy/ca-certificates:/usr/local/share/ca-certificates`
|
|||
|
||||
Differences among tags are summarized as follows.
|
||||
|
||||
### Latest Builds
|
||||
### Latest and versioned builds
|
||||
|
||||
- `latest`: Built from the `main` branch with default features, running on Ubuntu.
|
||||
- `latest-slim`, `slim`: Built by `musl` from the `main` branch with default features, running on Alpine.
|
||||
- `latest-s2n`, `s2n`: Built from the `main` branch with the `http3-s2n` feature, running on Ubuntu.
|
||||
- `*-pq`: Built with the `post-quantum` feature. This feature supports the post-quantum key exchange using `rustls-post-quantum` crate.
|
||||
Latest builds are shipped from the `main` branch when the new version is released. For example, when the version `x.y.z` is released, the following images are provided.
|
||||
|
||||
### Nightly Builds
|
||||
- `latest`, `x.y.z`: Built with default features, running on Ubuntu.
|
||||
- `latest-slim`, `slim`, `x.y.z-slim` : Built by `musl` with default features, running on Alpine.
|
||||
- `latest-s2n`, `s2n`, `x.y.z-s2n`: Built with the `http3-s2n` feature, running on Ubuntu.
|
||||
|
||||
- `nightly`: Built from the `develop` branch with default features, running on Ubuntu.
|
||||
- `nightly-slim`: Built by `musl` from the `develop` branch with default features, running on Alpine.
|
||||
- `nightly-s2n`: Built from the `develop` branch with the `http3-s2n` feature, running on Ubuntu.
|
||||
- `*-pq`: Built with the `post-quantum` feature. This feature supports the hybridized post-quantum key exchange using `rustls-post-quantum` crate.
|
||||
Additionally, images built with `webpki-roots` are provided in a similar manner to the above (e.g., `latest-s2n-webpki-roots` and `s2n-webpki-roots` tagged for the same image).
|
||||
|
||||
### Nightly builds
|
||||
|
||||
Nightly builds are shipped from the `develop` branch for every push.
|
||||
|
||||
- `nightly`: Built with default features, running on Ubuntu.
|
||||
- `nightly-slim`: Built by `musl` with default features, running on Alpine.
|
||||
- `nightly-s2n`: Built with the `http3-s2n` feature, running on Ubuntu.
|
||||
|
||||
Additionally, images built with `webpki-roots` are provided in a similar manner to the above (e.g., `nightly-s2n-webpki-roots`).
|
||||
|
||||
## Caveats
|
||||
|
||||
|
|
|
|||
|
|
@ -20,12 +20,11 @@ services:
|
|||
# - "linux/amd64"
|
||||
- "linux/arm64"
|
||||
environment:
|
||||
- LOG_LEVEL=debug
|
||||
- LOG_LEVEL=trace
|
||||
- LOG_TO_FILE=true
|
||||
- HOST_USER=jun
|
||||
- HOST_UID=501
|
||||
- HOST_GID=501
|
||||
# - WATCH=true
|
||||
tty: false
|
||||
privileged: true
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -20,12 +20,11 @@ services:
|
|||
# - "linux/amd64"
|
||||
- "linux/arm64"
|
||||
environment:
|
||||
- LOG_LEVEL=debug
|
||||
- LOG_LEVEL=trace
|
||||
- LOG_TO_FILE=true
|
||||
- HOST_USER=jun
|
||||
- HOST_UID=501
|
||||
- HOST_GID=501
|
||||
# - WATCH=true
|
||||
tty: false
|
||||
privileged: true
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
#!/usr/bin/env sh
|
||||
LOG_DIR=/rpxy/log
|
||||
LOG_FILE=${LOG_DIR}/rpxy.log
|
||||
SYSTEM_LOG_FILE=${LOG_DIR}/rpxy.log
|
||||
ACCESS_LOG_FILE=${LOG_DIR}/access.log
|
||||
LOG_SIZE=10M
|
||||
LOG_NUM=10
|
||||
|
||||
|
|
@ -43,8 +44,24 @@ include /etc/logrotate.d
|
|||
# system-specific logs may be also be configured here.
|
||||
EOF
|
||||
|
||||
cat > /etc/logrotate.d/rpxy.conf << EOF
|
||||
${LOG_FILE} {
|
||||
cat > /etc/logrotate.d/rpxy-system.conf << EOF
|
||||
${SYSTEM_LOG_FILE} {
|
||||
dateext
|
||||
daily
|
||||
missingok
|
||||
rotate ${LOG_NUM}
|
||||
notifempty
|
||||
compress
|
||||
delaycompress
|
||||
dateformat -%Y-%m-%d-%s
|
||||
size ${LOG_SIZE}
|
||||
copytruncate
|
||||
su ${USER} ${USER}
|
||||
}
|
||||
EOF
|
||||
|
||||
cat > /etc/logrotate.d/rpxy-access.conf << EOF
|
||||
${ACCESS_LOG_FILE} {
|
||||
dateext
|
||||
daily
|
||||
missingok
|
||||
|
|
@ -157,10 +174,4 @@ fi
|
|||
# Run rpxy
|
||||
cd /rpxy
|
||||
echo "rpxy: Start with user: ${USER} (${USER_ID}:${GROUP_ID})"
|
||||
if "${LOGGING}"; then
|
||||
echo "rpxy: Start with writing log file"
|
||||
gosu ${USER} sh -c "/rpxy/run.sh 2>&1 | tee ${LOG_FILE}"
|
||||
else
|
||||
echo "rpxy: Start without writing log file"
|
||||
gosu ${USER} sh -c "/rpxy/run.sh 2>&1"
|
||||
fi
|
||||
gosu ${USER} sh -c "/rpxy/run.sh 2>&1"
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
#!/usr/bin/env sh
|
||||
CONFIG_FILE=/etc/rpxy.toml
|
||||
LOG_DIR=/rpxy/log
|
||||
LOGGING=${LOG_TO_FILE:-false}
|
||||
|
||||
# debug level logging
|
||||
if [ -z $LOG_LEVEL ]; then
|
||||
|
|
@ -7,19 +9,11 @@ if [ -z $LOG_LEVEL ]; then
|
|||
fi
|
||||
echo "rpxy: Logging with level ${LOG_LEVEL}"
|
||||
|
||||
# continuously watch and reload the config file
|
||||
if [ -z $WATCH ]; then
|
||||
WATCH=false
|
||||
else
|
||||
if [ "$WATCH" = "true" ]; then
|
||||
WATCH=true
|
||||
else
|
||||
WATCH=false
|
||||
fi
|
||||
fi
|
||||
|
||||
if $WATCH ; then
|
||||
RUST_LOG=${LOG_LEVEL} /rpxy/bin/rpxy --config ${CONFIG_FILE} -w
|
||||
if "${LOGGING}"; then
|
||||
echo "rpxy: Start with writing log files"
|
||||
RUST_LOG=${LOG_LEVEL} /rpxy/bin/rpxy --config ${CONFIG_FILE} --log-dir ${LOG_DIR}
|
||||
else
|
||||
echo "rpxy: Start without writing log files"
|
||||
RUST_LOG=${LOG_LEVEL} /rpxy/bin/rpxy --config ${CONFIG_FILE}
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -14,25 +14,28 @@ publish.workspace = true
|
|||
post-quantum = ["rustls-post-quantum"]
|
||||
|
||||
[dependencies]
|
||||
url = { version = "2.5.2" }
|
||||
rustc-hash = "2.0.0"
|
||||
thiserror = "1.0.66"
|
||||
tracing = "0.1.40"
|
||||
async-trait = "0.1.83"
|
||||
url = { version = "2.5.4" }
|
||||
ahash = "0.8.12"
|
||||
thiserror = "2.0.12"
|
||||
tracing = "0.1.41"
|
||||
async-trait = "0.1.88"
|
||||
base64 = "0.22.1"
|
||||
aws-lc-rs = { version = "1.10.0", default-features = false, features = [
|
||||
aws-lc-rs = { version = "1.13.1", default-features = false, features = [
|
||||
"aws-lc-sys",
|
||||
] }
|
||||
blocking = "1.6.1"
|
||||
rustls = { version = "0.23.16", default-features = false, features = [
|
||||
rustls = { version = "0.23.27", default-features = false, features = [
|
||||
"std",
|
||||
"aws_lc_rs",
|
||||
] }
|
||||
rustls-platform-verifier = { version = "0.3.4" }
|
||||
rustls-platform-verifier = { version = "0.6.0" }
|
||||
rustls-acme = { path = "../submodules/rustls-acme/", default-features = false, features = [
|
||||
"aws-lc-rs",
|
||||
] }
|
||||
rustls-post-quantum = { version = "0.1.0", optional = true }
|
||||
tokio = { version = "1.41.0", default-features = false }
|
||||
tokio-util = { version = "0.7.12", default-features = false }
|
||||
tokio-stream = { version = "0.1.16", default-features = false }
|
||||
rustls-post-quantum = { version = "0.2.2", optional = true }
|
||||
tokio = { version = "1.45.1", default-features = false, features = [
|
||||
"rt",
|
||||
"macros",
|
||||
] }
|
||||
tokio-util = { version = "0.7.15", default-features = false }
|
||||
tokio-stream = { version = "0.1.17", default-features = false }
|
||||
|
|
|
|||
|
|
@ -12,4 +12,7 @@ pub enum RpxyAcmeError {
|
|||
/// IO error
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
/// TLS client configuration error
|
||||
#[error("TLS client configuration error: {0}")]
|
||||
TlsClientConfig(String),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ use crate::{
|
|||
error::RpxyAcmeError,
|
||||
log::*,
|
||||
};
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use ahash::HashMap;
|
||||
use rustls::ServerConfig;
|
||||
use rustls_acme::AcmeConfig;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
|
@ -77,13 +77,9 @@ impl AcmeManager {
|
|||
/// Returns a Vec<JoinHandle<()>> as a tasks handles and a map of domain to ServerConfig for challenge.
|
||||
pub fn spawn_manager_tasks(
|
||||
&self,
|
||||
cancel_token: Option<tokio_util::sync::CancellationToken>,
|
||||
cancel_token: tokio_util::sync::CancellationToken,
|
||||
) -> (Vec<tokio::task::JoinHandle<()>>, HashMap<String, Arc<ServerConfig>>) {
|
||||
let rustls_client_config = rustls::ClientConfig::builder()
|
||||
.dangerous() // The `Verifier` we're using is actually safe
|
||||
.with_custom_certificate_verifier(Arc::new(rustls_platform_verifier::Verifier::new()))
|
||||
.with_no_client_auth();
|
||||
let rustls_client_config = Arc::new(rustls_client_config);
|
||||
let rustls_client_config = Self::create_tls_client_config().expect("Failed to create TLS client configuration for ACME");
|
||||
|
||||
let mut server_configs_for_challenge: HashMap<String, Arc<ServerConfig>> = HashMap::default();
|
||||
let join_handles = self
|
||||
|
|
@ -115,13 +111,10 @@ impl AcmeManager {
|
|||
}
|
||||
}
|
||||
};
|
||||
if let Some(cancel_token) = cancel_token.as_ref() {
|
||||
tokio::select! {
|
||||
_ = task => {},
|
||||
_ = cancel_token.cancelled() => { debug!("rpxy ACME manager task for {domain} terminated") }
|
||||
}
|
||||
} else {
|
||||
task.await;
|
||||
|
||||
tokio::select! {
|
||||
_ = task => {},
|
||||
_ = cancel_token.cancelled() => { debug!("rpxy ACME manager task for {domain} terminated") }
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
@ -130,6 +123,26 @@ impl AcmeManager {
|
|||
|
||||
(join_handles, server_configs_for_challenge)
|
||||
}
|
||||
|
||||
/// Creates a TLS client configuration with platform certificate verification.
|
||||
///
|
||||
/// This configuration uses the system's certificate store for verification,
|
||||
/// which is appropriate for ACME certificate validation.
|
||||
fn create_tls_client_config() -> Result<Arc<rustls::ClientConfig>, RpxyAcmeError> {
|
||||
let crypto_provider = rustls::crypto::CryptoProvider::get_default().ok_or(RpxyAcmeError::TlsClientConfig(
|
||||
"No default crypto provider available".to_string(),
|
||||
))?;
|
||||
|
||||
let verifier = rustls_platform_verifier::Verifier::new(crypto_provider.clone())
|
||||
.map_err(|e| RpxyAcmeError::TlsClientConfig(format!("Failed to create certificate verifier: {}", e)))?;
|
||||
|
||||
let client_config = rustls::ClientConfig::builder()
|
||||
.dangerous() // Safe: using platform certificate verifier
|
||||
.with_custom_certificate_verifier(Arc::new(verifier))
|
||||
.with_no_client_auth();
|
||||
|
||||
Ok(Arc::new(client_config))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
|||
|
|
@ -13,10 +13,8 @@ publish.workspace = true
|
|||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
# default = ["http3-quinn", "cache", "rustls-backend", "acme", "sticky-cookie", "post-quantum"]
|
||||
# default = ["http3-s2n", "cache", "rustls-backend", "acme", "sticky-cookie", "post-quantum"]
|
||||
default = ["http3-quinn", "cache", "rustls-backend", "acme", "sticky-cookie"]
|
||||
# default = ["http3-s2n", "cache", "rustls-backend", "acme", "sticky-cookie"]
|
||||
default = ["http3-quinn", "cache", "rustls-backend", "sticky-cookie", "acme", "post-quantum"]
|
||||
# default = ["http3-s2n", "cache", "rustls-backend", "sticky-cookie", "acme", "post-quantum"]
|
||||
http3-quinn = ["rpxy-lib/http3-quinn"]
|
||||
http3-s2n = ["rpxy-lib/http3-s2n"]
|
||||
native-tls-backend = ["rpxy-lib/native-tls-backend"]
|
||||
|
|
@ -30,30 +28,32 @@ sticky-cookie = ["rpxy-lib/sticky-cookie"]
|
|||
[dependencies]
|
||||
rpxy-lib = { path = "../rpxy-lib/", default-features = false }
|
||||
|
||||
mimalloc = { version = "*", default-features = false }
|
||||
anyhow = "1.0.91"
|
||||
rustc-hash = "2.0.0"
|
||||
serde = { version = "1.0.214", default-features = false, features = ["derive"] }
|
||||
tokio = { version = "1.41.0", default-features = false, features = [
|
||||
# TODO: pin mimalloc due to compilation failure by musl
|
||||
mimalloc = { version = "=0.1.44", default-features = false }
|
||||
libmimalloc-sys = { version = "=0.1.40" }
|
||||
anyhow = "1.0.98"
|
||||
ahash = "0.8.12"
|
||||
serde = { version = "1.0.219", default-features = false, features = ["derive"] }
|
||||
tokio = { version = "1.45.1", default-features = false, features = [
|
||||
"net",
|
||||
"rt-multi-thread",
|
||||
"time",
|
||||
"sync",
|
||||
"macros",
|
||||
] }
|
||||
tokio-util = { version = "0.7.12", default-features = false }
|
||||
async-trait = "0.1.83"
|
||||
tokio-util = { version = "0.7.15", default-features = false }
|
||||
async-trait = "0.1.88"
|
||||
futures-util = { version = "0.3.31", default-features = false }
|
||||
|
||||
# config
|
||||
clap = { version = "4.5.20", features = ["std", "cargo", "wrap_help"] }
|
||||
toml = { version = "0.8.19", default-features = false, features = ["parse"] }
|
||||
hot_reload = "0.1.6"
|
||||
serde_ignored = "0.1.10"
|
||||
clap = { version = "4.5.39", features = ["std", "cargo", "wrap_help"] }
|
||||
toml = { version = "0.8.22", default-features = false, features = ["parse"] }
|
||||
hot_reload = "0.1.9"
|
||||
serde_ignored = "0.1.12"
|
||||
|
||||
# logging
|
||||
tracing = { version = "0.1.40" }
|
||||
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
|
||||
tracing = { version = "0.1.41" }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
|
||||
################################
|
||||
# cert management
|
||||
|
|
|
|||
|
|
@ -1,21 +1,29 @@
|
|||
use super::toml::ConfigToml;
|
||||
use super::toml::{ConfigToml, ConfigTomlExt};
|
||||
use crate::error::{anyhow, ensure};
|
||||
use clap::{Arg, ArgAction};
|
||||
use ahash::HashMap;
|
||||
use clap::Arg;
|
||||
use hot_reload::{ReloaderReceiver, ReloaderService};
|
||||
use rpxy_certs::{build_cert_reloader, CryptoFileSourceBuilder, CryptoReloader, ServerCryptoBase};
|
||||
use rpxy_lib::{AppConfig, AppConfigList, ProxyConfig};
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use rpxy_certs::{CryptoFileSourceBuilder, CryptoReloader, ServerCryptoBase, build_cert_reloader};
|
||||
use rpxy_lib::{AppConfigList, ProxyConfig};
|
||||
|
||||
#[cfg(feature = "acme")]
|
||||
use rpxy_acme::{AcmeManager, ACME_DIR_URL, ACME_REGISTRY_PATH};
|
||||
use rpxy_acme::{ACME_DIR_URL, ACME_REGISTRY_PATH, AcmeManager};
|
||||
|
||||
/// Parsed options
|
||||
/// Parsed options from CLI
|
||||
/// Options for configuring the application.
|
||||
///
|
||||
/// # Fields
|
||||
/// - `config_file_path`: Path to the configuration file.
|
||||
/// - `log_dir_path`: Optional path to the log directory.
|
||||
pub struct Opts {
|
||||
pub config_file_path: String,
|
||||
pub watch: bool,
|
||||
pub log_dir_path: Option<String>,
|
||||
}
|
||||
|
||||
/// Parse arg values passed from cli
|
||||
/// Parses command-line arguments into an [`Opts`](rpxy-bin/src/config/parse.rs:13) struct.
|
||||
///
|
||||
/// Returns a populated [`Opts`](rpxy-bin/src/config/parse.rs:13) on success, or an error if parsing fails.
|
||||
/// Expects a required `--config` argument and an optional `--log-dir` argument.
|
||||
pub fn parse_opts() -> Result<Opts, anyhow::Error> {
|
||||
let _ = include_str!("../../Cargo.toml");
|
||||
let options = clap::command!()
|
||||
|
|
@ -28,78 +36,60 @@ pub fn parse_opts() -> Result<Opts, anyhow::Error> {
|
|||
.help("Configuration file path like ./config.toml"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("watch")
|
||||
.long("watch")
|
||||
.short('w')
|
||||
.action(ArgAction::SetTrue)
|
||||
.help("Activate dynamic reloading of the config file via continuous monitoring"),
|
||||
Arg::new("log_dir")
|
||||
.long("log-dir")
|
||||
.short('l')
|
||||
.value_name("LOG_DIR")
|
||||
.help("Directory for log files. If not specified, logs are printed to stdout."),
|
||||
);
|
||||
let matches = options.get_matches();
|
||||
|
||||
///////////////////////////////////
|
||||
let config_file_path = matches.get_one::<String>("config_file").unwrap().to_owned();
|
||||
let watch = matches.get_one::<bool>("watch").unwrap().to_owned();
|
||||
let log_dir_path = matches.get_one::<String>("log_dir").map(|v| v.to_owned());
|
||||
|
||||
Ok(Opts { config_file_path, watch })
|
||||
Ok(Opts {
|
||||
config_file_path,
|
||||
log_dir_path,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn build_settings(config: &ConfigToml) -> std::result::Result<(ProxyConfig, AppConfigList), anyhow::Error> {
|
||||
// build proxy config
|
||||
let proxy_config: ProxyConfig = config.try_into()?;
|
||||
|
||||
// backend_apps
|
||||
let apps = config.apps.clone().ok_or(anyhow!("Missing application spec"))?;
|
||||
|
||||
// assertions for all backend apps
|
||||
ensure!(!apps.0.is_empty(), "Wrong application spec.");
|
||||
// if only https_port is specified, tls must be configured for all apps
|
||||
if proxy_config.http_port.is_none() {
|
||||
ensure!(
|
||||
apps.0.iter().all(|(_, app)| app.tls.is_some()),
|
||||
"Some apps serves only plaintext HTTP"
|
||||
);
|
||||
}
|
||||
// https redirection port must be configured only when both http_port and https_port are configured.
|
||||
if proxy_config.https_redirection_port.is_some() {
|
||||
ensure!(
|
||||
proxy_config.https_port.is_some() && proxy_config.http_port.is_some(),
|
||||
"https_redirection_port can be specified only when both http_port and https_port are specified"
|
||||
);
|
||||
}
|
||||
// https redirection can be configured if both ports are active
|
||||
if !(proxy_config.https_port.is_some() && proxy_config.http_port.is_some()) {
|
||||
ensure!(
|
||||
apps.0.iter().all(|(_, app)| {
|
||||
if let Some(tls) = app.tls.as_ref() {
|
||||
tls.https_redirection.is_none()
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}),
|
||||
"https_redirection can be specified only when both http_port and https_port are specified"
|
||||
);
|
||||
}
|
||||
|
||||
// build applications
|
||||
let mut app_config_list_inner = Vec::<AppConfig>::new();
|
||||
|
||||
for (app_name, app) in apps.0.iter() {
|
||||
let _server_name_string = app.server_name.as_ref().ok_or(anyhow!("No server name"))?;
|
||||
let registered_app_name = app_name.to_ascii_lowercase();
|
||||
let app_config = app.build_app_config(®istered_app_name)?;
|
||||
app_config_list_inner.push(app_config);
|
||||
}
|
||||
|
||||
let app_config_list = AppConfigList {
|
||||
inner: app_config_list_inner,
|
||||
default_app: config.default_app.clone().map(|v| v.to_ascii_lowercase()), // default backend application for plaintext http requests
|
||||
};
|
||||
|
||||
Ok((proxy_config, app_config_list))
|
||||
/// Build proxy and app settings from config using ConfigTomlExt
|
||||
pub fn build_settings(config: &ConfigToml) -> Result<(ProxyConfig, AppConfigList), anyhow::Error> {
|
||||
config.validate_and_build_settings()
|
||||
}
|
||||
|
||||
/* ----------------------- */
|
||||
|
||||
/// Helper to build a CryptoFileSource for an app, handling ACME if enabled
|
||||
#[cfg(feature = "acme")]
|
||||
fn build_tls_for_app_acme(
|
||||
tls: &mut super::toml::TlsOption,
|
||||
acme_option: &Option<super::toml::AcmeOption>,
|
||||
server_name: &str,
|
||||
acme_registry_path: &str,
|
||||
acme_dir_url: &str,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
if let Some(true) = tls.acme {
|
||||
ensure!(acme_option.is_some() && tls.tls_cert_key_path.is_none() && tls.tls_cert_path.is_none());
|
||||
let subdir = format!("{}/{}", acme_registry_path, server_name.to_ascii_lowercase());
|
||||
let file_name =
|
||||
rpxy_acme::DirCache::cached_cert_file_name(&[server_name.to_ascii_lowercase()], acme_dir_url.to_ascii_lowercase());
|
||||
let cert_path = format!("{}/{}", subdir, file_name);
|
||||
tls.tls_cert_key_path = Some(cert_path.clone());
|
||||
tls.tls_cert_path = Some(cert_path);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build cert map
|
||||
/// Builds the certificate manager for TLS applications.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `config` - Reference to the parsed configuration.
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns an option containing a tuple of certificate reloader service and receiver, or `None` if TLS is not enabled.
|
||||
/// Returns an error if configuration is invalid or required fields are missing.
|
||||
pub async fn build_cert_manager(
|
||||
config: &ConfigToml,
|
||||
) -> Result<
|
||||
|
|
@ -136,19 +126,9 @@ pub async fn build_cert_manager(
|
|||
ensure!(tls.tls_cert_key_path.is_some() && tls.tls_cert_path.is_some());
|
||||
|
||||
#[cfg(feature = "acme")]
|
||||
let tls = {
|
||||
let mut tls = tls.clone();
|
||||
if let Some(true) = tls.acme {
|
||||
ensure!(acme_option.is_some() && tls.tls_cert_key_path.is_none() && tls.tls_cert_path.is_none());
|
||||
// Both of tls_cert_key_path and tls_cert_path must be the same for ACME since it's a single file
|
||||
let subdir = format!("{}/{}", acme_registry_path, server_name.to_ascii_lowercase());
|
||||
let file_name =
|
||||
rpxy_acme::DirCache::cached_cert_file_name(&[server_name.to_ascii_lowercase()], acme_dir_url.to_ascii_lowercase());
|
||||
tls.tls_cert_key_path = Some(format!("{}/{}", subdir, file_name));
|
||||
tls.tls_cert_path = Some(format!("{}/{}", subdir, file_name));
|
||||
}
|
||||
tls
|
||||
};
|
||||
let mut tls = tls.clone();
|
||||
#[cfg(feature = "acme")]
|
||||
build_tls_for_app_acme(&mut tls, &acme_option, server_name, acme_registry_path, acme_dir_url)?;
|
||||
|
||||
let crypto_file_source = CryptoFileSourceBuilder::default()
|
||||
.tls_cert_path(tls.tls_cert_path.as_ref().unwrap())
|
||||
|
|
@ -165,24 +145,31 @@ pub async fn build_cert_manager(
|
|||
/* ----------------------- */
|
||||
#[cfg(feature = "acme")]
|
||||
/// Build acme manager
|
||||
/// Builds the ACME manager for automatic certificate management (enabled with the `acme` feature).
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `config` - Reference to the parsed configuration.
|
||||
/// * `runtime_handle` - Tokio runtime handle for async operations.
|
||||
///
|
||||
/// # Returns
|
||||
/// Returns an option containing an [`AcmeManager`](rpxy-bin/src/config/parse.rs:153) if ACME is configured, or `None` otherwise.
|
||||
/// Returns an error if configuration is invalid or required fields are missing.
|
||||
pub async fn build_acme_manager(
|
||||
config: &ConfigToml,
|
||||
runtime_handle: tokio::runtime::Handle,
|
||||
) -> Result<Option<AcmeManager>, anyhow::Error> {
|
||||
let acme_option = config.experimental.as_ref().and_then(|v| v.acme.clone());
|
||||
if acme_option.is_none() {
|
||||
let Some(acme_option) = acme_option else {
|
||||
return Ok(None);
|
||||
}
|
||||
let acme_option = acme_option.unwrap();
|
||||
};
|
||||
|
||||
let domains = config
|
||||
let domains: Vec<String> = config
|
||||
.apps
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.0
|
||||
.values()
|
||||
.filter_map(|app| {
|
||||
//
|
||||
if let Some(tls) = app.tls.as_ref() {
|
||||
if let Some(true) = tls.acme {
|
||||
return Some(app.server_name.as_ref().unwrap().to_owned());
|
||||
|
|
@ -190,7 +177,7 @@ pub async fn build_acme_manager(
|
|||
}
|
||||
None
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
.collect();
|
||||
|
||||
if domains.is_empty() {
|
||||
return Ok(None);
|
||||
|
|
|
|||
|
|
@ -8,17 +8,16 @@ pub struct ConfigTomlReloader {
|
|||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Reload<ConfigToml> for ConfigTomlReloader {
|
||||
impl Reload<ConfigToml, String> for ConfigTomlReloader {
|
||||
type Source = String;
|
||||
async fn new(source: &Self::Source) -> Result<Self, ReloaderError<ConfigToml>> {
|
||||
async fn new(source: &Self::Source) -> Result<Self, ReloaderError<ConfigToml, String>> {
|
||||
Ok(Self {
|
||||
config_path: source.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn reload(&self) -> Result<Option<ConfigToml>, ReloaderError<ConfigToml>> {
|
||||
let conf = ConfigToml::new(&self.config_path)
|
||||
.map_err(|_e| ReloaderError::<ConfigToml>::Reload("Failed to reload config toml"))?;
|
||||
async fn reload(&self) -> Result<Option<ConfigToml>, ReloaderError<ConfigToml, String>> {
|
||||
let conf = ConfigToml::new(&self.config_path).map_err(|e| ReloaderError::<ConfigToml, String>::Reload(e.to_string()))?;
|
||||
Ok(Some(conf))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,13 +3,26 @@ use crate::{
|
|||
error::{anyhow, ensure},
|
||||
log::warn,
|
||||
};
|
||||
use rpxy_lib::{reexports::Uri, AppConfig, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri};
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use ahash::HashMap;
|
||||
use rpxy_lib::{AppConfig, AppConfigList, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri, reexports::Uri};
|
||||
use serde::Deserialize;
|
||||
use std::{fs, net::SocketAddr};
|
||||
use tokio::time::Duration;
|
||||
|
||||
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
|
||||
/// Main configuration structure parsed from the TOML file.
|
||||
///
|
||||
/// # Fields
|
||||
/// - `listen_port`: Optional TCP port for HTTP.
|
||||
/// - `listen_port_tls`: Optional TCP port for HTTPS/TLS.
|
||||
/// - `listen_ipv6`: Enable IPv6 listening.
|
||||
/// - `https_redirection_port`: Optional port for HTTP to HTTPS redirection.
|
||||
/// - `tcp_listen_backlog`: Optional TCP backlog size.
|
||||
/// - `max_concurrent_streams`: Optional max concurrent streams.
|
||||
/// - `max_clients`: Optional max client connections.
|
||||
/// - `apps`: Optional application definitions.
|
||||
/// - `default_app`: Optional default application name.
|
||||
/// - `experimental`: Optional experimental features.
|
||||
pub struct ConfigToml {
|
||||
pub listen_port: Option<u16>,
|
||||
pub listen_port_tls: Option<u16>,
|
||||
|
|
@ -23,8 +36,75 @@ pub struct ConfigToml {
|
|||
pub experimental: Option<Experimental>,
|
||||
}
|
||||
|
||||
/// Extension trait for config validation and building
|
||||
pub trait ConfigTomlExt {
|
||||
fn validate_and_build_settings(&self) -> Result<(ProxyConfig, AppConfigList), anyhow::Error>;
|
||||
}
|
||||
|
||||
impl ConfigTomlExt for ConfigToml {
|
||||
fn validate_and_build_settings(&self) -> Result<(ProxyConfig, AppConfigList), anyhow::Error> {
|
||||
let proxy_config: ProxyConfig = self.try_into()?;
|
||||
let apps = self.apps.as_ref().ok_or(anyhow!("Missing application spec"))?;
|
||||
|
||||
// Ensure at least one app is defined
|
||||
ensure!(!apps.0.is_empty(), "Wrong application spec.");
|
||||
|
||||
// Helper: all apps have TLS
|
||||
let all_apps_have_tls = apps.0.values().all(|app| app.tls.is_some());
|
||||
|
||||
// Helper: all apps have https_redirection unset
|
||||
let all_apps_no_https_redirection = apps.0.values().all(|app| {
|
||||
if let Some(tls) = app.tls.as_ref() {
|
||||
tls.https_redirection.is_none()
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
|
||||
if proxy_config.http_port.is_none() {
|
||||
ensure!(all_apps_have_tls, "Some apps serve only plaintext HTTP");
|
||||
}
|
||||
if proxy_config.https_redirection_port.is_some() {
|
||||
ensure!(
|
||||
proxy_config.https_port.is_some() && proxy_config.http_port.is_some(),
|
||||
"https_redirection_port can be specified only when both http_port and https_port are specified"
|
||||
);
|
||||
}
|
||||
if !(proxy_config.https_port.is_some() && proxy_config.http_port.is_some()) {
|
||||
ensure!(
|
||||
all_apps_no_https_redirection,
|
||||
"https_redirection can be specified only when both http_port and https_port are specified"
|
||||
);
|
||||
}
|
||||
|
||||
// Build AppConfigList
|
||||
let mut app_config_list_inner = Vec::<AppConfig>::new();
|
||||
for (app_name, app) in apps.0.iter() {
|
||||
let _server_name_string = app.server_name.as_ref().ok_or(anyhow!("No server name"))?;
|
||||
let registered_app_name = app_name.to_ascii_lowercase();
|
||||
let app_config = app.build_app_config(®istered_app_name)?;
|
||||
app_config_list_inner.push(app_config);
|
||||
}
|
||||
let app_config_list = AppConfigList {
|
||||
inner: app_config_list_inner,
|
||||
default_app: self.default_app.clone().map(|v| v.to_ascii_lowercase()),
|
||||
};
|
||||
|
||||
Ok((proxy_config, app_config_list))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
|
||||
#[derive(Deserialize, Debug, Default, PartialEq, Eq, Clone)]
|
||||
/// HTTP/3 protocol options for server configuration.
|
||||
///
|
||||
/// # Fields
|
||||
/// - `alt_svc_max_age`: Optional max age for Alt-Svc header.
|
||||
/// - `request_max_body_size`: Optional maximum request body size.
|
||||
/// - `max_concurrent_connections`: Optional maximum concurrent connections.
|
||||
/// - `max_concurrent_bidistream`: Optional maximum concurrent bidirectional streams.
|
||||
/// - `max_concurrent_unistream`: Optional maximum concurrent unidirectional streams.
|
||||
/// - `max_idle_timeout`: Optional maximum idle timeout in milliseconds.
|
||||
pub struct Http3Option {
|
||||
pub alt_svc_max_age: Option<u32>,
|
||||
pub request_max_body_size: Option<usize>,
|
||||
|
|
@ -232,7 +312,7 @@ impl ConfigToml {
|
|||
|
||||
// Check unused fields during deserialization
|
||||
let t = toml::de::Deserializer::new(&config_str);
|
||||
let mut unused = rustc_hash::FxHashSet::default();
|
||||
let mut unused = ahash::HashSet::default();
|
||||
|
||||
let res = serde_ignored::deserialize(t, |path| {
|
||||
unused.insert(path.to_string());
|
||||
|
|
|
|||
|
|
@ -1,7 +1,13 @@
|
|||
/// Default IPv4 listen addresses for the server.
|
||||
pub const LISTEN_ADDRESSES_V4: &[&str] = &["0.0.0.0"];
|
||||
/// Default IPv6 listen addresses for the server.
|
||||
pub const LISTEN_ADDRESSES_V6: &[&str] = &["[::]"];
|
||||
/// Delay in seconds before reloading the configuration after changes.
|
||||
pub const CONFIG_WATCH_DELAY_SECS: u32 = 15;
|
||||
|
||||
#[cfg(feature = "cache")]
|
||||
// Cache directory
|
||||
/// Directory path for cache storage (enabled with "cache" feature).
|
||||
pub const CACHE_DIR: &str = "./cache";
|
||||
|
||||
pub(crate) const ACCESS_LOG_FILE: &str = "access.log";
|
||||
pub(crate) const SYSTEM_LOG_FILE: &str = "rpxy.log";
|
||||
|
|
|
|||
|
|
@ -1,2 +1,2 @@
|
|||
#[allow(unused)]
|
||||
pub use anyhow::{anyhow, bail, ensure, Context};
|
||||
pub use anyhow::{Context, anyhow, bail, ensure};
|
||||
|
|
|
|||
|
|
@ -1,44 +1,126 @@
|
|||
use crate::constants::{ACCESS_LOG_FILE, SYSTEM_LOG_FILE};
|
||||
use rpxy_lib::log_event_names;
|
||||
use std::str::FromStr;
|
||||
use tracing_subscriber::{fmt, prelude::*};
|
||||
use tracing_subscriber::{filter::filter_fn, fmt, prelude::*};
|
||||
|
||||
#[allow(unused)]
|
||||
pub use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Initialize the logger with the RUST_LOG environment variable.
|
||||
pub fn init_logger() {
|
||||
let level_string = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string());
|
||||
let level = tracing::Level::from_str(level_string.as_str()).unwrap_or(tracing::Level::INFO);
|
||||
pub fn init_logger(log_dir_path: Option<&str>) {
|
||||
let level = std::env::var("RUST_LOG")
|
||||
.ok()
|
||||
.and_then(|s| tracing::Level::from_str(&s).ok())
|
||||
.unwrap_or(tracing::Level::INFO);
|
||||
|
||||
// This limits the logger to emits only this crate with any level above RUST_LOG, for included crates it will emit only ERROR (in prod)/INFO (in dev) or above level.
|
||||
let stdio_layer = fmt::layer().with_level(true).with_thread_ids(false);
|
||||
if level <= tracing::Level::INFO {
|
||||
// in normal deployment environment
|
||||
let stdio_layer = stdio_layer
|
||||
.with_target(false)
|
||||
.compact()
|
||||
.with_filter(tracing_subscriber::filter::filter_fn(move |metadata| {
|
||||
(metadata
|
||||
.target()
|
||||
.starts_with(env!("CARGO_PKG_NAME").replace('-', "_").as_str())
|
||||
&& metadata.level() <= &level)
|
||||
|| metadata.level() <= &tracing::Level::ERROR.min(level)
|
||||
}));
|
||||
tracing_subscriber::registry().with(stdio_layer).init();
|
||||
} else {
|
||||
// debugging
|
||||
let stdio_layer = stdio_layer
|
||||
match log_dir_path {
|
||||
None => init_stdio_logger(level),
|
||||
Some(path) => init_file_logger(level, path),
|
||||
}
|
||||
}
|
||||
|
||||
/// file logging
|
||||
fn init_file_logger(level: tracing::Level, log_dir_path: &str) {
|
||||
println!("Activate logging to files: {}", log_dir_path);
|
||||
let log_dir = std::path::Path::new(log_dir_path);
|
||||
|
||||
if !log_dir.exists() {
|
||||
println!("Directory does not exist, creating: {}", log_dir.display());
|
||||
std::fs::create_dir_all(log_dir).expect("Failed to create log directory");
|
||||
}
|
||||
|
||||
let access_log_path = log_dir.join(ACCESS_LOG_FILE);
|
||||
let system_log_path = log_dir.join(SYSTEM_LOG_FILE);
|
||||
|
||||
println!("Access log: {}", access_log_path.display());
|
||||
println!("System and error log: {}", system_log_path.display());
|
||||
|
||||
let access_log = open_log_file(&access_log_path);
|
||||
let system_log = open_log_file(&system_log_path);
|
||||
|
||||
let access_layer = fmt::layer()
|
||||
.with_line_number(false)
|
||||
.with_thread_ids(false)
|
||||
.with_thread_names(false)
|
||||
.with_target(false)
|
||||
.with_level(false)
|
||||
.compact()
|
||||
.with_ansi(false)
|
||||
.with_writer(access_log)
|
||||
.with_filter(AccessLogFilter);
|
||||
|
||||
let system_layer = fmt::layer()
|
||||
.with_line_number(false)
|
||||
.with_thread_ids(false)
|
||||
.with_thread_names(false)
|
||||
.with_target(false)
|
||||
.with_level(true)
|
||||
.compact()
|
||||
.with_ansi(false)
|
||||
.with_writer(system_log)
|
||||
.with_filter(filter_fn(move |metadata| {
|
||||
(is_cargo_pkg(metadata) && metadata.name() != log_event_names::ACCESS_LOG && metadata.level() <= &level)
|
||||
|| metadata.level() <= &tracing::Level::WARN.min(level)
|
||||
}));
|
||||
|
||||
tracing_subscriber::registry().with(access_layer).with(system_layer).init();
|
||||
}
|
||||
|
||||
/// stdio logging
|
||||
fn init_stdio_logger(level: tracing::Level) {
|
||||
// This limits the logger to emit only this crate with any level above RUST_LOG,
|
||||
// for included crates it will emit only ERROR (in prod)/INFO (in dev) or above level.
|
||||
let base_layer = fmt::layer().with_level(true).with_thread_ids(false);
|
||||
|
||||
let debug = level > tracing::Level::INFO;
|
||||
let filter = filter_fn(move |metadata| {
|
||||
if debug {
|
||||
(is_cargo_pkg(metadata) && metadata.level() <= &level) || metadata.level() <= &tracing::Level::INFO.min(level)
|
||||
} else {
|
||||
(is_cargo_pkg(metadata) && metadata.level() <= &level) || metadata.level() <= &tracing::Level::WARN.min(level)
|
||||
}
|
||||
});
|
||||
|
||||
let stdio_layer = if debug {
|
||||
base_layer
|
||||
.with_line_number(true)
|
||||
.with_target(true)
|
||||
.with_thread_names(true)
|
||||
.with_target(true)
|
||||
.compact()
|
||||
.with_filter(tracing_subscriber::filter::filter_fn(move |metadata| {
|
||||
(metadata
|
||||
.target()
|
||||
.starts_with(env!("CARGO_PKG_NAME").replace('-', "_").as_str())
|
||||
&& metadata.level() <= &level)
|
||||
|| metadata.level() <= &tracing::Level::INFO.min(level)
|
||||
}));
|
||||
tracing_subscriber::registry().with(stdio_layer).init();
|
||||
.with_filter(filter)
|
||||
} else {
|
||||
base_layer.with_target(false).compact().with_filter(filter)
|
||||
};
|
||||
|
||||
tracing_subscriber::registry().with(stdio_layer).init();
|
||||
}
|
||||
|
||||
/// Access log filter
|
||||
struct AccessLogFilter;
|
||||
impl<S> tracing_subscriber::layer::Filter<S> for AccessLogFilter {
|
||||
fn enabled(&self, metadata: &tracing::Metadata<'_>, _: &tracing_subscriber::layer::Context<'_, S>) -> bool {
|
||||
is_cargo_pkg(metadata) && metadata.name().contains(log_event_names::ACCESS_LOG) && metadata.level() <= &tracing::Level::INFO
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Create a file for logging
|
||||
fn open_log_file<P>(path: P) -> std::fs::File
|
||||
where
|
||||
P: AsRef<std::path::Path>,
|
||||
{
|
||||
// create a file if it does not exist
|
||||
std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(path)
|
||||
.expect("Failed to open the log file")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Matches cargo package name with `_` instead of `-`
|
||||
fn is_cargo_pkg(metadata: &tracing::Metadata<'_>) -> bool {
|
||||
let pkg_name = env!("CARGO_PKG_NAME").replace('-', "_");
|
||||
metadata.target().starts_with(&pkg_name)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,19 +9,17 @@ mod log;
|
|||
#[cfg(feature = "acme")]
|
||||
use crate::config::build_acme_manager;
|
||||
use crate::{
|
||||
config::{build_cert_manager, build_settings, parse_opts, ConfigToml, ConfigTomlReloader},
|
||||
config::{ConfigToml, ConfigTomlReloader, build_cert_manager, build_settings, parse_opts},
|
||||
constants::CONFIG_WATCH_DELAY_SECS,
|
||||
error::*,
|
||||
log::*,
|
||||
};
|
||||
use hot_reload::{ReloaderReceiver, ReloaderService};
|
||||
use rpxy_lib::{entrypoint, RpxyOptions, RpxyOptionsBuilder};
|
||||
use rpxy_lib::{RpxyOptions, RpxyOptionsBuilder, entrypoint};
|
||||
use std::sync::Arc;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
fn main() {
|
||||
init_logger();
|
||||
|
||||
let mut runtime_builder = tokio::runtime::Builder::new_multi_thread();
|
||||
runtime_builder.enable_all();
|
||||
runtime_builder.thread_name("rpxy");
|
||||
|
|
@ -30,37 +28,34 @@ fn main() {
|
|||
runtime.block_on(async {
|
||||
// Initially load options
|
||||
let Ok(parsed_opts) = parse_opts() else {
|
||||
error!("Invalid toml file");
|
||||
std::process::exit(1);
|
||||
};
|
||||
|
||||
if !parsed_opts.watch {
|
||||
if let Err(e) = rpxy_service_without_watcher(&parsed_opts.config_file_path, runtime.handle().clone()).await {
|
||||
error!("rpxy service existed: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
} else {
|
||||
let (config_service, config_rx) =
|
||||
ReloaderService::<ConfigTomlReloader, ConfigToml>::new(&parsed_opts.config_file_path, CONFIG_WATCH_DELAY_SECS, false)
|
||||
.await
|
||||
.unwrap();
|
||||
init_logger(parsed_opts.log_dir_path.as_deref());
|
||||
|
||||
tokio::select! {
|
||||
config_res = config_service.start() => {
|
||||
if let Err(e) = config_res {
|
||||
error!("config reloader service exited: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
rpxy_res = rpxy_service_with_watcher(config_rx, runtime.handle().clone()) => {
|
||||
if let Err(e) = rpxy_res {
|
||||
error!("rpxy service existed: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
let (config_service, config_rx) = ReloaderService::<ConfigTomlReloader, ConfigToml, String>::new(
|
||||
&parsed_opts.config_file_path,
|
||||
CONFIG_WATCH_DELAY_SECS,
|
||||
false,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::select! {
|
||||
config_res = config_service.start() => {
|
||||
if let Err(e) = config_res {
|
||||
error!("config reloader service exited: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
rpxy_res = rpxy_service(config_rx, runtime.handle().clone()) => {
|
||||
if let Err(e) = rpxy_res {
|
||||
error!("rpxy service existed: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
std::process::exit(0);
|
||||
}
|
||||
std::process::exit(0);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -76,6 +71,7 @@ struct RpxyService {
|
|||
}
|
||||
|
||||
impl RpxyService {
|
||||
/// Create a new RpxyService from config and runtime handle.
|
||||
async fn new(config_toml: &ConfigToml, runtime_handle: tokio::runtime::Handle) -> Result<Self, anyhow::Error> {
|
||||
let (proxy_conf, app_conf) = build_settings(config_toml).map_err(|e| anyhow!("Invalid configuration: {e}"))?;
|
||||
|
||||
|
|
@ -85,7 +81,7 @@ impl RpxyService {
|
|||
.map(|(s, r)| (Some(Arc::new(s)), Some(r)))
|
||||
.unwrap_or((None, None));
|
||||
|
||||
Ok(RpxyService {
|
||||
Ok(Self {
|
||||
runtime_handle: runtime_handle.clone(),
|
||||
proxy_conf,
|
||||
app_conf,
|
||||
|
|
@ -96,7 +92,7 @@ impl RpxyService {
|
|||
})
|
||||
}
|
||||
|
||||
async fn start(&self, cancel_token: Option<CancellationToken>) -> Result<(), anyhow::Error> {
|
||||
async fn start(&self, cancel_token: CancellationToken) -> Result<(), anyhow::Error> {
|
||||
let RpxyService {
|
||||
runtime_handle,
|
||||
proxy_conf,
|
||||
|
|
@ -111,17 +107,19 @@ impl RpxyService {
|
|||
{
|
||||
let (acme_join_handles, server_config_acme_challenge) = acme_manager
|
||||
.as_ref()
|
||||
.map(|m| m.spawn_manager_tasks(cancel_token.as_ref().map(|t| t.child_token())))
|
||||
.map(|m| m.spawn_manager_tasks(cancel_token.child_token()))
|
||||
.unwrap_or((vec![], Default::default()));
|
||||
let rpxy_opts = RpxyOptionsBuilder::default()
|
||||
.proxy_config(proxy_conf.clone())
|
||||
.app_config_list(app_conf.clone())
|
||||
.cert_rx(cert_rx.clone())
|
||||
.runtime_handle(runtime_handle.clone())
|
||||
.cancel_token(cancel_token.as_ref().map(|t| t.child_token()))
|
||||
.server_configs_acme_challenge(Arc::new(server_config_acme_challenge))
|
||||
.build()?;
|
||||
self.start_inner(rpxy_opts, acme_join_handles).await.map_err(|e| anyhow!(e))
|
||||
self
|
||||
.start_inner(rpxy_opts, cancel_token, acme_join_handles)
|
||||
.await
|
||||
.map_err(|e| anyhow!(e))
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "acme"))]
|
||||
|
|
@ -131,9 +129,8 @@ impl RpxyService {
|
|||
.app_config_list(app_conf.clone())
|
||||
.cert_rx(cert_rx.clone())
|
||||
.runtime_handle(runtime_handle.clone())
|
||||
.cancel_token(cancel_token.as_ref().map(|t| t.child_token()))
|
||||
.build()?;
|
||||
self.start_inner(rpxy_opts).await.map_err(|e| anyhow!(e))
|
||||
self.start_inner(rpxy_opts, cancel_token).await.map_err(|e| anyhow!(e))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -141,19 +138,19 @@ impl RpxyService {
|
|||
async fn start_inner(
|
||||
&self,
|
||||
rpxy_opts: RpxyOptions,
|
||||
cancel_token: CancellationToken,
|
||||
#[cfg(feature = "acme")] acme_task_handles: Vec<tokio::task::JoinHandle<()>>,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let cancel_token = rpxy_opts.cancel_token.clone();
|
||||
let cancel_token = cancel_token.clone();
|
||||
let runtime_handle = rpxy_opts.runtime_handle.clone();
|
||||
|
||||
// spawn rpxy entrypoint, where cancellation token is possibly contained inside the service
|
||||
let cancel_token_clone = cancel_token.clone();
|
||||
let child_cancel_token = cancel_token.child_token();
|
||||
let rpxy_handle = runtime_handle.spawn(async move {
|
||||
if let Err(e) = entrypoint(&rpxy_opts).await {
|
||||
if let Err(e) = entrypoint(&rpxy_opts, child_cancel_token).await {
|
||||
error!("rpxy entrypoint exited on error: {e}");
|
||||
if let Some(cancel_token) = cancel_token_clone {
|
||||
cancel_token.cancel();
|
||||
}
|
||||
cancel_token_clone.cancel();
|
||||
return Err(anyhow!(e));
|
||||
}
|
||||
Ok(())
|
||||
|
|
@ -166,24 +163,20 @@ impl RpxyService {
|
|||
// spawn certificate reloader service, where cert service does not have cancellation token inside the service
|
||||
let cert_service = self.cert_service.as_ref().unwrap().clone();
|
||||
let cancel_token_clone = cancel_token.clone();
|
||||
let child_cancel_token = cancel_token.as_ref().map(|c| c.child_token());
|
||||
let child_cancel_token = cancel_token.child_token();
|
||||
let cert_handle = runtime_handle.spawn(async move {
|
||||
if let Some(child_cancel_token) = child_cancel_token {
|
||||
tokio::select! {
|
||||
cert_res = cert_service.start() => {
|
||||
if let Err(ref e) = cert_res {
|
||||
error!("cert reloader service exited on error: {e}");
|
||||
}
|
||||
cancel_token_clone.unwrap().cancel();
|
||||
cert_res.map_err(|e| anyhow!(e))
|
||||
}
|
||||
_ = child_cancel_token.cancelled() => {
|
||||
debug!("cert reloader service terminated");
|
||||
Ok(())
|
||||
tokio::select! {
|
||||
cert_res = cert_service.start() => {
|
||||
if let Err(ref e) = cert_res {
|
||||
error!("cert reloader service exited on error: {e}");
|
||||
}
|
||||
cancel_token_clone.cancel();
|
||||
cert_res.map_err(|e| anyhow!(e))
|
||||
}
|
||||
_ = child_cancel_token.cancelled() => {
|
||||
debug!("cert reloader service terminated");
|
||||
Ok(())
|
||||
}
|
||||
} else {
|
||||
cert_service.start().await.map_err(|e| anyhow!(e))
|
||||
}
|
||||
});
|
||||
|
||||
|
|
@ -218,9 +211,7 @@ impl RpxyService {
|
|||
if let Err(ref e) = acme_res {
|
||||
error!("acme manager exited on error: {e}");
|
||||
}
|
||||
if let Some(cancel_token) = cancel_token_clone {
|
||||
cancel_token.cancel();
|
||||
}
|
||||
cancel_token_clone.cancel();
|
||||
acme_res.map_err(|e| anyhow!(e))
|
||||
});
|
||||
let (rpxy_res, cert_res, acme_res) = tokio::join!(rpxy_handle, cert_handle, acme_handle);
|
||||
|
|
@ -235,18 +226,8 @@ impl RpxyService {
|
|||
}
|
||||
}
|
||||
|
||||
async fn rpxy_service_without_watcher(
|
||||
config_file_path: &str,
|
||||
runtime_handle: tokio::runtime::Handle,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
info!("Start rpxy service");
|
||||
let config_toml = ConfigToml::new(config_file_path).map_err(|e| anyhow!("Invalid toml file: {e}"))?;
|
||||
let service = RpxyService::new(&config_toml, runtime_handle).await?;
|
||||
service.start(None).await
|
||||
}
|
||||
|
||||
async fn rpxy_service_with_watcher(
|
||||
mut config_rx: ReloaderReceiver<ConfigToml>,
|
||||
async fn rpxy_service(
|
||||
mut config_rx: ReloaderReceiver<ConfigToml, String>,
|
||||
runtime_handle: tokio::runtime::Handle,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
info!("Start rpxy service with dynamic config reloader");
|
||||
|
|
@ -265,7 +246,7 @@ async fn rpxy_service_with_watcher(
|
|||
|
||||
tokio::select! {
|
||||
/* ---------- */
|
||||
rpxy_res = service.start(Some(cancel_token.clone())) => {
|
||||
rpxy_res = service.start(cancel_token.clone()) => {
|
||||
if let Err(ref e) = rpxy_res {
|
||||
error!("rpxy service exited on error: {e}");
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -16,26 +16,26 @@ post-quantum = ["rustls-post-quantum"]
|
|||
http3 = []
|
||||
|
||||
[dependencies]
|
||||
rustc-hash = { version = "2.0.0" }
|
||||
tracing = { version = "0.1.40" }
|
||||
ahash = { version = "0.8.12" }
|
||||
tracing = { version = "0.1.41" }
|
||||
derive_builder = { version = "0.20.2" }
|
||||
thiserror = { version = "1.0.66" }
|
||||
hot_reload = { version = "0.1.6" }
|
||||
async-trait = { version = "0.1.83" }
|
||||
rustls = { version = "0.23.16", default-features = false, features = [
|
||||
thiserror = { version = "2.0.12" }
|
||||
hot_reload = { version = "0.1.9" }
|
||||
async-trait = { version = "0.1.88" }
|
||||
rustls = { version = "0.23.27", default-features = false, features = [
|
||||
"std",
|
||||
"aws_lc_rs",
|
||||
] }
|
||||
rustls-pemfile = { version = "2.2.0" }
|
||||
rustls-webpki = { version = "0.102.8", default-features = false, features = [
|
||||
rustls-webpki = { version = "0.103.3", default-features = false, features = [
|
||||
"std",
|
||||
"aws_lc_rs",
|
||||
"aws-lc-rs",
|
||||
] }
|
||||
rustls-post-quantum = { version = "0.1.0", optional = true }
|
||||
x509-parser = { version = "0.16.0" }
|
||||
rustls-post-quantum = { version = "0.2.2", optional = true }
|
||||
x509-parser = { version = "0.17.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1.41.0", default-features = false, features = [
|
||||
tokio = { version = "1.45.1", default-features = false, features = [
|
||||
"rt-multi-thread",
|
||||
"macros",
|
||||
] }
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
use crate::error::*;
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use ahash::HashMap;
|
||||
use rustls::{crypto::aws_lc_rs::sign::any_supported_type, pki_types, sign::CertifiedKey};
|
||||
use std::sync::Arc;
|
||||
use x509_parser::prelude::*;
|
||||
|
|
@ -65,7 +65,7 @@ impl SingleServerCertsKeys {
|
|||
.cert_keys
|
||||
.clone()
|
||||
.iter()
|
||||
.find_map(|k| if let Ok(sk) = any_supported_type(k) { Some(sk) } else { None })
|
||||
.find_map(|k| any_supported_type(k).ok())
|
||||
.ok_or_else(|| RpxyCertError::InvalidCertificateAndKey)?;
|
||||
|
||||
let cert = self.certs.iter().map(|c| Certificate::from(c.to_vec())).collect::<Vec<_>>();
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ mod log {
|
|||
}
|
||||
|
||||
use crate::{error::*, log::*, reloader_service::DynCryptoSource};
|
||||
use ahash::HashMap;
|
||||
use hot_reload::{ReloaderReceiver, ReloaderService};
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use rustls::crypto::CryptoProvider;
|
||||
use std::sync::Arc;
|
||||
|
||||
|
|
|
|||
|
|
@ -4,9 +4,9 @@ use crate::{
|
|||
log::*,
|
||||
server_crypto::{ServerCryptoBase, ServerNameBytes},
|
||||
};
|
||||
use ahash::HashMap;
|
||||
use async_trait::async_trait;
|
||||
use hot_reload::{Reload, ReloaderError};
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
/* ------------------------------------------------ */
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
use crate::{certs::SingleServerCertsKeys, error::*, log::*};
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use ahash::HashMap;
|
||||
use rustls::{
|
||||
RootCertStore, ServerConfig,
|
||||
crypto::CryptoProvider,
|
||||
server::{ResolvesServerCertUsingSni, WebPkiClientVerifier},
|
||||
RootCertStore, ServerConfig,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
|
|
|
|||
|
|
@ -36,12 +36,12 @@ post-quantum = [
|
|||
]
|
||||
|
||||
[dependencies]
|
||||
rand = "0.8.5"
|
||||
rustc-hash = "2.0.0"
|
||||
bytes = "1.8.0"
|
||||
rand = "0.9.1"
|
||||
ahash = "0.8.12"
|
||||
bytes = "1.10.1"
|
||||
derive_builder = "0.20.2"
|
||||
futures = { version = "0.3.31", features = ["alloc", "async-await"] }
|
||||
tokio = { version = "1.41.0", default-features = false, features = [
|
||||
tokio = { version = "1.45.1", default-features = false, features = [
|
||||
"net",
|
||||
"rt-multi-thread",
|
||||
"time",
|
||||
|
|
@ -49,19 +49,19 @@ tokio = { version = "1.41.0", default-features = false, features = [
|
|||
"macros",
|
||||
"fs",
|
||||
] }
|
||||
tokio-util = { version = "0.7.12", default-features = false }
|
||||
pin-project-lite = "0.2.15"
|
||||
async-trait = "0.1.83"
|
||||
tokio-util = { version = "0.7.15", default-features = false }
|
||||
pin-project-lite = "0.2.16"
|
||||
async-trait = "0.1.88"
|
||||
|
||||
# Error handling
|
||||
anyhow = "1.0.91"
|
||||
thiserror = "1.0.66"
|
||||
anyhow = "1.0.98"
|
||||
thiserror = "2.0.12"
|
||||
|
||||
# http for both server and client
|
||||
http = "1.1.0"
|
||||
http-body-util = "0.1.2"
|
||||
hyper = { version = "1.5.0", default-features = false }
|
||||
hyper-util = { version = "0.1.10", features = ["full"] }
|
||||
http = "1.3.1"
|
||||
http-body-util = "0.1.3"
|
||||
hyper = { version = "1.6.0", default-features = false }
|
||||
hyper-util = { version = "0.1.13", features = ["full"] }
|
||||
futures-util = { version = "0.3.31", default-features = false }
|
||||
futures-channel = { version = "0.3.31", default-features = false }
|
||||
|
||||
|
|
@ -70,7 +70,7 @@ hyper-tls = { version = "0.6.0", features = [
|
|||
"alpn",
|
||||
"vendored",
|
||||
], optional = true }
|
||||
hyper-rustls = { version = "0.27.3", default-features = false, features = [
|
||||
hyper-rustls = { version = "0.27.6", default-features = false, features = [
|
||||
"aws-lc-rs",
|
||||
"http1",
|
||||
"http2",
|
||||
|
|
@ -79,40 +79,40 @@ hyper-rustls = { version = "0.27.3", default-features = false, features = [
|
|||
|
||||
# tls and cert management for server
|
||||
rpxy-certs = { path = "../rpxy-certs/", default-features = false }
|
||||
hot_reload = "0.1.6"
|
||||
rustls = { version = "0.23.16", default-features = false }
|
||||
rustls-post-quantum = { version = "0.1.0", optional = true }
|
||||
tokio-rustls = { version = "0.26.0", features = ["early-data"] }
|
||||
hot_reload = "0.1.9"
|
||||
rustls = { version = "0.23.27", default-features = false }
|
||||
rustls-post-quantum = { version = "0.2.2", optional = true }
|
||||
tokio-rustls = { version = "0.26.2", features = ["early-data"] }
|
||||
|
||||
# acme
|
||||
rpxy-acme = { path = "../rpxy-acme/", default-features = false, optional = true }
|
||||
|
||||
# logging
|
||||
tracing = { version = "0.1.40" }
|
||||
tracing = { version = "0.1.41" }
|
||||
|
||||
# http/3
|
||||
quinn = { version = "0.11.5", optional = true }
|
||||
h3 = { version = "0.0.6", features = ["tracing"], optional = true }
|
||||
h3-quinn = { version = "0.0.7", optional = true }
|
||||
s2n-quic = { version = "1.48.0", path = "../submodules/s2n-quic/quic/s2n-quic/", default-features = false, features = [
|
||||
quinn = { version = "0.11.8", optional = true }
|
||||
h3 = { version = "0.0.8", features = ["tracing"], optional = true }
|
||||
h3-quinn = { version = "0.0.10", optional = true }
|
||||
s2n-quic = { version = "1.59.0", path = "../submodules/s2n-quic/quic/s2n-quic/", default-features = false, features = [
|
||||
"provider-tls-rustls",
|
||||
], optional = true }
|
||||
s2n-quic-core = { version = "0.48.0", path = "../submodules/s2n-quic/quic/s2n-quic-core", default-features = false, optional = true }
|
||||
s2n-quic-rustls = { version = "0.48.0", path = "../submodules/s2n-quic/quic/s2n-quic-rustls", optional = true }
|
||||
s2n-quic-core = { version = "0.59.0", path = "../submodules/s2n-quic/quic/s2n-quic-core", default-features = false, optional = true }
|
||||
s2n-quic-rustls = { version = "0.59.0", path = "../submodules/s2n-quic/quic/s2n-quic-rustls", optional = true }
|
||||
s2n-quic-h3 = { path = "../submodules/s2n-quic/quic/s2n-quic-h3/", features = [
|
||||
"tracing",
|
||||
], optional = true }
|
||||
##########
|
||||
# for UDP socket wit SO_REUSEADDR when h3 with quinn
|
||||
socket2 = { version = "0.5.7", features = ["all"], optional = true }
|
||||
socket2 = { version = "0.5.10", features = ["all"], optional = true }
|
||||
|
||||
# cache
|
||||
http-cache-semantics = { path = "../submodules/rusty-http-cache-semantics", default-features = false, optional = true }
|
||||
lru = { version = "0.12.5", optional = true }
|
||||
sha2 = { version = "0.10.8", default-features = false, optional = true }
|
||||
lru = { version = "0.14.0", optional = true }
|
||||
sha2 = { version = "0.10.9", default-features = false, optional = true }
|
||||
|
||||
# cookie handling for sticky cookie
|
||||
chrono = { version = "0.4.38", default-features = false, features = [
|
||||
chrono = { version = "0.4.41", default-features = false, features = [
|
||||
"unstable-locales",
|
||||
"alloc",
|
||||
"clock",
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
use crate::{
|
||||
AppConfig, AppConfigList,
|
||||
error::*,
|
||||
log::*,
|
||||
name_exp::{ByteName, ServerName},
|
||||
AppConfig, AppConfigList,
|
||||
};
|
||||
use ahash::HashMap;
|
||||
use derive_builder::Builder;
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use std::borrow::Cow;
|
||||
|
||||
use super::upstream::PathManager;
|
||||
|
|
@ -26,6 +26,7 @@ pub struct BackendApp {
|
|||
pub https_redirection: Option<bool>,
|
||||
/// tls settings: mutual TLS is enabled
|
||||
#[builder(default)]
|
||||
#[allow(unused)]
|
||||
pub mutual_tls: Option<bool>,
|
||||
}
|
||||
impl<'a> BackendAppBuilder {
|
||||
|
|
|
|||
|
|
@ -7,8 +7,8 @@ pub use super::{
|
|||
use derive_builder::Builder;
|
||||
use rand::Rng;
|
||||
use std::sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
};
|
||||
|
||||
/// Constants to specify a load balance option
|
||||
|
|
@ -80,8 +80,8 @@ impl LoadBalanceRandomBuilder {
|
|||
impl LoadBalanceWithPointer for LoadBalanceRandom {
|
||||
/// Returns the random index within the range
|
||||
fn get_ptr(&self, _info: Option<&LoadBalanceContext>) -> PointerToUpstream {
|
||||
let mut rng = rand::thread_rng();
|
||||
let ptr = rng.gen_range(0..self.num_upstreams);
|
||||
let mut rng = rand::rng();
|
||||
let ptr = rng.random_range(0..self.num_upstreams);
|
||||
PointerToUpstream { ptr, context: None }
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,16 +1,16 @@
|
|||
use super::{
|
||||
Upstream,
|
||||
load_balance_main::{LoadBalanceContext, LoadBalanceWithPointer, PointerToUpstream},
|
||||
sticky_cookie::StickyCookieConfig,
|
||||
Upstream,
|
||||
};
|
||||
use crate::{constants::STICKY_COOKIE_NAME, log::*};
|
||||
use ahash::HashMap;
|
||||
use derive_builder::Builder;
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -112,13 +112,16 @@ impl LoadBalanceWithPointer for LoadBalanceSticky {
|
|||
}
|
||||
Some(context) => {
|
||||
let server_id = &context.sticky_cookie.value.value;
|
||||
if let Some(server_index) = self.get_server_index_from_id(server_id) {
|
||||
debug!("Valid sticky cookie: id={}, index={}", server_id, server_index);
|
||||
server_index
|
||||
} else {
|
||||
debug!("Invalid sticky cookie: id={}", server_id);
|
||||
self.simple_increment_ptr()
|
||||
}
|
||||
self.get_server_index_from_id(server_id).map_or_else(
|
||||
|| {
|
||||
debug!("Invalid sticky cookie: id={}", server_id);
|
||||
self.simple_increment_ptr()
|
||||
},
|
||||
|server_index| {
|
||||
debug!("Valid sticky cookie: id={}, index={}", server_id, server_index);
|
||||
server_index
|
||||
},
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ use super::upstream::Upstream;
|
|||
use thiserror::Error;
|
||||
|
||||
pub use load_balance_main::{
|
||||
load_balance_options, LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder,
|
||||
LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder, load_balance_options,
|
||||
};
|
||||
#[cfg(feature = "sticky-cookie")]
|
||||
pub use load_balance_sticky::LoadBalanceStickyBuilder;
|
||||
|
|
|
|||
|
|
@ -91,12 +91,7 @@ impl<'a> StickyCookieBuilder {
|
|||
self
|
||||
}
|
||||
/// Set the meta information of sticky cookie
|
||||
pub fn info(
|
||||
&mut self,
|
||||
domain: impl Into<Cow<'a, str>>,
|
||||
path: impl Into<Cow<'a, str>>,
|
||||
duration_secs: i64,
|
||||
) -> &mut Self {
|
||||
pub fn info(&mut self, domain: impl Into<Cow<'a, str>>, path: impl Into<Cow<'a, str>>, duration_secs: i64) -> &mut Self {
|
||||
let info = StickyCookieInfoBuilder::default()
|
||||
.domain(domain)
|
||||
.path(path)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
#[cfg(feature = "sticky-cookie")]
|
||||
use super::load_balance::LoadBalanceStickyBuilder;
|
||||
use super::load_balance::{
|
||||
load_balance_options as lb_opts, LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder,
|
||||
LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder, load_balance_options as lb_opts,
|
||||
};
|
||||
// use super::{BytesName, LbContext, PathNameBytesExp, UpstreamOption};
|
||||
use super::upstream_opts::UpstreamOption;
|
||||
|
|
@ -11,10 +11,10 @@ use crate::{
|
|||
log::*,
|
||||
name_exp::{ByteName, PathName},
|
||||
};
|
||||
use ahash::{HashMap, HashSet};
|
||||
#[cfg(feature = "sticky-cookie")]
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use derive_builder::Builder;
|
||||
use rustc_hash::{FxHashMap as HashMap, FxHashSet as HashSet};
|
||||
#[cfg(feature = "sticky-cookie")]
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::borrow::Cow;
|
||||
|
|
@ -72,27 +72,22 @@ impl PathManager {
|
|||
.inner
|
||||
.iter()
|
||||
.filter(|(route_bytes, _)| {
|
||||
match path_name.starts_with(route_bytes) {
|
||||
true => {
|
||||
route_bytes.len() == 1 // route = '/', i.e., default
|
||||
|| match path_name.get(route_bytes.len()) {
|
||||
None => true, // exact case
|
||||
Some(p) => p == &b'/', // sub-path case
|
||||
}
|
||||
}
|
||||
_ => false,
|
||||
path_name.starts_with(route_bytes) && {
|
||||
route_bytes.len() == 1 // route = '/', i.e., default
|
||||
|| path_name.get(route_bytes.len()).map_or(
|
||||
true, // exact case
|
||||
|p| p == &b'/'
|
||||
) // sub-path case
|
||||
}
|
||||
})
|
||||
.max_by_key(|(route_bytes, _)| route_bytes.len());
|
||||
if let Some((path, u)) = matched_upstream {
|
||||
matched_upstream.map(|(path, u)| {
|
||||
debug!(
|
||||
"Found upstream: {:?}",
|
||||
path.try_into().unwrap_or_else(|_| "<none>".to_string())
|
||||
);
|
||||
Some(u)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
u
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -211,14 +206,15 @@ impl UpstreamCandidatesBuilder {
|
|||
}
|
||||
/// Set the activated upstream options defined in [[UpstreamOption]]
|
||||
pub fn options(&mut self, v: &Option<Vec<String>>) -> &mut Self {
|
||||
let opts = if let Some(opts) = v {
|
||||
opts
|
||||
.iter()
|
||||
.filter_map(|str| UpstreamOption::try_from(str.as_str()).ok())
|
||||
.collect::<HashSet<UpstreamOption>>()
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let opts = v.as_ref().map_or_else(
|
||||
|| Default::default(),
|
||||
|opts| {
|
||||
opts
|
||||
.iter()
|
||||
.filter_map(|str| UpstreamOption::try_from(str.as_str()).ok())
|
||||
.collect::<HashSet<UpstreamOption>>()
|
||||
},
|
||||
);
|
||||
self.options = Some(opts);
|
||||
self
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,3 +32,9 @@ pub const MAX_CACHE_EACH_SIZE: usize = 65_535;
|
|||
pub const MAX_CACHE_EACH_SIZE_ON_MEMORY: usize = 4_096;
|
||||
|
||||
// TODO: max cache size in total
|
||||
|
||||
/// Logging event name TODO: Other separated logs?
|
||||
pub mod log_event_names {
|
||||
/// access log
|
||||
pub const ACCESS_LOG: &str = "rpxy::access";
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use std::sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
|
|
|
|||
|
|
@ -37,8 +37,11 @@ pub enum RpxyError {
|
|||
|
||||
// http/3 errors
|
||||
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
|
||||
#[error("H3 error: {0}")]
|
||||
H3Error(#[from] h3::Error),
|
||||
#[error("h3 connection error: {0}")]
|
||||
H3ConnectionError(#[from] h3::error::ConnectionError),
|
||||
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
|
||||
#[error("h3 connection error: {0}")]
|
||||
H3StreamError(#[from] h3::error::StreamError),
|
||||
// #[cfg(feature = "http3-s2n")]
|
||||
// #[error("H3 error: {0}")]
|
||||
// H3Error(#[from] s2n_quic_h3::h3::Error),
|
||||
|
|
|
|||
103
rpxy-lib/src/forwarder/cache/cache_main.rs
vendored
103
rpxy-lib/src/forwarder/cache/cache_main.rs
vendored
|
|
@ -1,10 +1,10 @@
|
|||
use super::cache_error::*;
|
||||
use crate::{
|
||||
globals::Globals,
|
||||
hyper_ext::body::{full, BoxBody, ResponseBody, UnboundedStreamBody},
|
||||
hyper_ext::body::{BoxBody, ResponseBody, UnboundedStreamBody, full},
|
||||
log::*,
|
||||
};
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
use base64::{Engine as _, engine::general_purpose};
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use futures::channel::mpsc;
|
||||
use http::{Request, Response, Uri};
|
||||
|
|
@ -16,8 +16,8 @@ use sha2::{Digest, Sha256};
|
|||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc, Mutex,
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
},
|
||||
time::SystemTime,
|
||||
};
|
||||
|
|
@ -52,23 +52,30 @@ impl RpxyCache {
|
|||
if !globals.proxy_config.cache_enabled {
|
||||
return None;
|
||||
}
|
||||
let cache_dir = globals.proxy_config.cache_dir.as_ref().unwrap();
|
||||
let cache_dir = match globals.proxy_config.cache_dir.as_ref() {
|
||||
Some(dir) => dir,
|
||||
None => {
|
||||
warn!("Cache directory not set in proxy config");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
let file_store = FileStore::new(&globals.runtime_handle).await;
|
||||
let inner = LruCacheManager::new(globals.proxy_config.cache_max_entry);
|
||||
|
||||
let max_each_size = globals.proxy_config.cache_max_each_size;
|
||||
let mut max_each_size_on_memory = globals.proxy_config.cache_max_each_size_on_memory;
|
||||
if max_each_size < max_each_size_on_memory {
|
||||
warn!(
|
||||
"Maximum size of on memory cache per entry must be smaller than or equal to the maximum of each file cache"
|
||||
);
|
||||
warn!("Maximum size of on-memory cache per entry must be smaller than or equal to the maximum of each file cache");
|
||||
max_each_size_on_memory = max_each_size;
|
||||
}
|
||||
|
||||
if let Err(e) = fs::remove_dir_all(cache_dir).await {
|
||||
warn!("Failed to clean up the cache dir: {e}");
|
||||
};
|
||||
fs::create_dir_all(&cache_dir).await.unwrap();
|
||||
}
|
||||
if let Err(e) = fs::create_dir_all(&cache_dir).await {
|
||||
error!("Failed to create cache dir: {e}");
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(Self {
|
||||
file_store,
|
||||
|
|
@ -89,12 +96,7 @@ impl RpxyCache {
|
|||
}
|
||||
|
||||
/// Put response into the cache
|
||||
pub(crate) async fn put(
|
||||
&self,
|
||||
uri: &hyper::Uri,
|
||||
mut body: Incoming,
|
||||
policy: &CachePolicy,
|
||||
) -> CacheResult<UnboundedStreamBody> {
|
||||
pub(crate) async fn put(&self, uri: &hyper::Uri, mut body: Incoming, policy: &CachePolicy) -> CacheResult<UnboundedStreamBody> {
|
||||
let cache_manager = self.inner.clone();
|
||||
let mut file_store = self.file_store.clone();
|
||||
let uri = uri.clone();
|
||||
|
|
@ -155,7 +157,7 @@ impl RpxyCache {
|
|||
let mut hasher = Sha256::new();
|
||||
hasher.update(buf.as_ref());
|
||||
let hash_bytes = Bytes::copy_from_slice(hasher.finalize().as_ref());
|
||||
debug!("Cached data: {} bytes, hash = {:?}", size, hash_bytes);
|
||||
trace!("Cached data: {} bytes, hash = {:?}", size, hash_bytes);
|
||||
|
||||
// Create cache object
|
||||
let cache_key = derive_cache_key_from_uri(&uri);
|
||||
|
|
@ -188,16 +190,11 @@ impl RpxyCache {
|
|||
|
||||
/// Get cached response
|
||||
pub(crate) async fn get<R>(&self, req: &Request<R>) -> Option<Response<ResponseBody>> {
|
||||
debug!(
|
||||
"Current cache status: (total, on-memory, file) = {:?}",
|
||||
self.count().await
|
||||
);
|
||||
trace!("Current cache status: (total, on-memory, file) = {:?}", self.count().await);
|
||||
let cache_key = derive_cache_key_from_uri(req.uri());
|
||||
|
||||
// First check cache chance
|
||||
let Ok(Some(cached_object)) = self.inner.get(&cache_key) else {
|
||||
return None;
|
||||
};
|
||||
let cached_object = self.inner.get(&cache_key).ok()??;
|
||||
|
||||
// Secondly check the cache freshness as an HTTP message
|
||||
let now = SystemTime::now();
|
||||
|
|
@ -268,25 +265,20 @@ impl FileStore {
|
|||
let inner = self.inner.read().await;
|
||||
inner.cnt
|
||||
}
|
||||
/// Create a temporary file cache
|
||||
/// Create a temporary file cache, returns error if file cannot be created or written
|
||||
async fn create(&mut self, cache_object: &CacheObject, body_bytes: &Bytes) -> CacheResult<()> {
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.create(cache_object, body_bytes).await
|
||||
}
|
||||
/// Evict a temporary file cache
|
||||
/// Evict a temporary file cache, logs warning if removal fails
|
||||
async fn evict(&self, path: impl AsRef<Path>) {
|
||||
// Acquire the write lock
|
||||
let mut inner = self.inner.write().await;
|
||||
if let Err(e) = inner.remove(path).await {
|
||||
warn!("Eviction failed during file object removal: {:?}", e);
|
||||
};
|
||||
}
|
||||
}
|
||||
/// Read a temporary file cache
|
||||
async fn read(
|
||||
&self,
|
||||
path: impl AsRef<Path> + Send + Sync + 'static,
|
||||
hash: &Bytes,
|
||||
) -> CacheResult<UnboundedStreamBody> {
|
||||
/// Read a temporary file cache, returns error if file cannot be opened or hash mismatches
|
||||
async fn read(&self, path: impl AsRef<Path> + Send + Sync + 'static, hash: &Bytes) -> CacheResult<UnboundedStreamBody> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.read(path, hash).await
|
||||
}
|
||||
|
|
@ -321,26 +313,22 @@ impl FileStoreInner {
|
|||
return Err(CacheError::InvalidCacheTarget);
|
||||
}
|
||||
};
|
||||
let Ok(mut file) = File::create(&cache_filepath).await else {
|
||||
return Err(CacheError::FailedToCreateFileCache);
|
||||
};
|
||||
let mut file = File::create(&cache_filepath)
|
||||
.await
|
||||
.map_err(|_| CacheError::FailedToCreateFileCache)?;
|
||||
let mut bytes_clone = body_bytes.clone();
|
||||
while bytes_clone.has_remaining() {
|
||||
if let Err(e) = file.write_buf(&mut bytes_clone).await {
|
||||
file.write_buf(&mut bytes_clone).await.map_err(|e| {
|
||||
error!("Failed to write file cache: {e}");
|
||||
return Err(CacheError::FailedToWriteFileCache);
|
||||
};
|
||||
CacheError::FailedToWriteFileCache
|
||||
})?;
|
||||
}
|
||||
self.cnt += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retrieve a stored temporary file cache
|
||||
async fn read(
|
||||
&self,
|
||||
path: impl AsRef<Path> + Send + Sync + 'static,
|
||||
hash: &Bytes,
|
||||
) -> CacheResult<UnboundedStreamBody> {
|
||||
async fn read(&self, path: impl AsRef<Path> + Send + Sync + 'static, hash: &Bytes) -> CacheResult<UnboundedStreamBody> {
|
||||
let Ok(mut file) = File::open(&path).await else {
|
||||
warn!("Cache file object cannot be opened");
|
||||
return Err(CacheError::FailedToOpenCacheFile);
|
||||
|
|
@ -455,11 +443,14 @@ impl LruCacheManager {
|
|||
self.cnt.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Evict an entry
|
||||
/// Evict an entry from the LRU cache, logs error if mutex cannot be acquired
|
||||
fn evict(&self, cache_key: &str) -> Option<(String, CacheObject)> {
|
||||
let Ok(mut lock) = self.inner.lock() else {
|
||||
error!("Mutex can't be locked to evict a cache entry");
|
||||
return None;
|
||||
let mut lock = match self.inner.lock() {
|
||||
Ok(lock) => lock,
|
||||
Err(_) => {
|
||||
error!("Mutex can't be locked to evict a cache entry");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
let res = lock.pop_entry(cache_key);
|
||||
// This may be inconsistent with the actual number of entries
|
||||
|
|
@ -467,24 +458,24 @@ impl LruCacheManager {
|
|||
res
|
||||
}
|
||||
|
||||
/// Push an entry
|
||||
/// Push an entry into the LRU cache, returns error if mutex cannot be acquired
|
||||
fn push(&self, cache_key: &str, cache_object: &CacheObject) -> CacheResult<Option<(String, CacheObject)>> {
|
||||
let Ok(mut lock) = self.inner.lock() else {
|
||||
let mut lock = self.inner.lock().map_err(|_| {
|
||||
error!("Failed to acquire mutex lock for writing cache entry");
|
||||
return Err(CacheError::FailedToAcquiredMutexLockForCache);
|
||||
};
|
||||
CacheError::FailedToAcquiredMutexLockForCache
|
||||
})?;
|
||||
let res = Ok(lock.push(cache_key.to_string(), cache_object.clone()));
|
||||
// This may be inconsistent with the actual number of entries
|
||||
self.cnt.store(lock.len(), Ordering::Relaxed);
|
||||
res
|
||||
}
|
||||
|
||||
/// Get an entry
|
||||
/// Get an entry from the LRU cache, returns error if mutex cannot be acquired
|
||||
fn get(&self, cache_key: &str) -> CacheResult<Option<CacheObject>> {
|
||||
let Ok(mut lock) = self.inner.lock() else {
|
||||
let mut lock = self.inner.lock().map_err(|_| {
|
||||
error!("Mutex can't be locked for checking cache entry");
|
||||
return Err(CacheError::FailedToAcquiredMutexLockForCheck);
|
||||
};
|
||||
CacheError::FailedToAcquiredMutexLockForCheck
|
||||
})?;
|
||||
let Some(cached_object) = lock.get(cache_key) else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
|
|
|||
2
rpxy-lib/src/forwarder/cache/mod.rs
vendored
2
rpxy-lib/src/forwarder/cache/mod.rs
vendored
|
|
@ -2,4 +2,4 @@ mod cache_error;
|
|||
mod cache_main;
|
||||
|
||||
pub use cache_error::CacheError;
|
||||
pub(crate) use cache_main::{get_policy_if_cacheable, RpxyCache};
|
||||
pub(crate) use cache_main::{RpxyCache, get_policy_if_cacheable};
|
||||
|
|
|
|||
|
|
@ -9,13 +9,13 @@ use async_trait::async_trait;
|
|||
use http::{Request, Response, Version};
|
||||
use hyper::body::{Body, Incoming};
|
||||
use hyper_util::client::legacy::{
|
||||
connect::{Connect, HttpConnector},
|
||||
Client,
|
||||
connect::{Connect, HttpConnector},
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[cfg(feature = "cache")]
|
||||
use super::cache::{get_policy_if_cacheable, RpxyCache};
|
||||
use super::cache::{RpxyCache, get_policy_if_cacheable};
|
||||
|
||||
#[async_trait]
|
||||
/// Definition of the forwarder that simply forward requests from downstream client to upstream app servers.
|
||||
|
|
@ -126,9 +126,9 @@ where
|
|||
warn!(
|
||||
"
|
||||
--------------------------------------------------------------------------------------------------
|
||||
Request forwarder is working without TLS support!!!
|
||||
We recommend to use this just for testing.
|
||||
Please enable native-tls-backend or rustls-backend feature to enable TLS support.
|
||||
Request forwarder is working without TLS support!
|
||||
This mode is intended for testing only.
|
||||
Enable 'native-tls-backend' or 'rustls-backend' feature for TLS support.
|
||||
--------------------------------------------------------------------------------------------------"
|
||||
);
|
||||
let executor = LocalExecutor::new(_globals.runtime_handle.clone());
|
||||
|
|
@ -159,7 +159,7 @@ where
|
|||
/// Build forwarder
|
||||
pub async fn try_new(_globals: &Arc<Globals>) -> RpxyResult<Self> {
|
||||
// build hyper client with hyper-tls
|
||||
info!("Native TLS support is enabled for the connection to backend applications");
|
||||
info!("Native TLS support enabled for backend connections (native-tls)");
|
||||
let executor = LocalExecutor::new(_globals.runtime_handle.clone());
|
||||
|
||||
let try_build_connector = |alpns: &[&str]| {
|
||||
|
|
@ -209,14 +209,14 @@ where
|
|||
#[cfg(feature = "webpki-roots")]
|
||||
let builder_h2 = hyper_rustls::HttpsConnectorBuilder::new().with_webpki_roots();
|
||||
#[cfg(feature = "webpki-roots")]
|
||||
info!("Mozilla WebPKI root certs with rustls is used for the connection to backend applications");
|
||||
info!("Rustls backend: Mozilla WebPKI root certs used for backend connections");
|
||||
|
||||
#[cfg(not(feature = "webpki-roots"))]
|
||||
let builder = hyper_rustls::HttpsConnectorBuilder::new().with_platform_verifier();
|
||||
#[cfg(not(feature = "webpki-roots"))]
|
||||
let builder_h2 = hyper_rustls::HttpsConnectorBuilder::new().with_platform_verifier();
|
||||
#[cfg(not(feature = "webpki-roots"))]
|
||||
info!("Platform verifier with rustls is used for the connection to backend applications");
|
||||
info!("Rustls backend: Platform verifier used for backend connections");
|
||||
|
||||
let mut http = HttpConnector::new();
|
||||
http.enforce_http(false);
|
||||
|
|
@ -226,7 +226,9 @@ where
|
|||
let connector = builder.https_or_http().enable_all_versions().wrap_connector(http.clone());
|
||||
let connector_h2 = builder_h2.https_or_http().enable_http2().wrap_connector(http);
|
||||
let inner = Client::builder(LocalExecutor::new(_globals.runtime_handle.clone())).build::<_, B1>(connector);
|
||||
let inner_h2 = Client::builder(LocalExecutor::new(_globals.runtime_handle.clone())).build::<_, B1>(connector_h2);
|
||||
let inner_h2 = Client::builder(LocalExecutor::new(_globals.runtime_handle.clone()))
|
||||
.http2_only(true)
|
||||
.build::<_, B1>(connector_h2);
|
||||
|
||||
Ok(Self {
|
||||
inner,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ use crate::{constants::*, count::RequestCount};
|
|||
use hot_reload::ReloaderReceiver;
|
||||
use rpxy_certs::ServerCryptoBase;
|
||||
use std::{net::SocketAddr, time::Duration};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
/// Global object containing proxy configurations and shared object like counters.
|
||||
/// But note that in Globals, we do not have Mutex and RwLock. It is indeed, the context shared among async tasks.
|
||||
|
|
@ -13,14 +12,12 @@ pub struct Globals {
|
|||
pub request_count: RequestCount,
|
||||
/// Shared context - Async task runtime handler
|
||||
pub runtime_handle: tokio::runtime::Handle,
|
||||
/// Shared context - Notify object to stop async tasks
|
||||
pub cancel_token: Option<CancellationToken>,
|
||||
/// Shared context - Certificate reloader service receiver // TODO: newer one
|
||||
pub cert_reloader_rx: Option<ReloaderReceiver<ServerCryptoBase>>,
|
||||
|
||||
#[cfg(feature = "acme")]
|
||||
/// ServerConfig used for only ACME challenge for ACME domains
|
||||
pub server_configs_acme_challenge: std::sync::Arc<rustc_hash::FxHashMap<String, std::sync::Arc<rustls::ServerConfig>>>,
|
||||
pub server_configs_acme_challenge: std::sync::Arc<ahash::HashMap<String, std::sync::Arc<rustls::ServerConfig>>>,
|
||||
}
|
||||
|
||||
/// Configuration parameters for proxy transport and request handlers
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use super::watch;
|
||||
use crate::error::*;
|
||||
use futures_channel::{mpsc, oneshot};
|
||||
use futures_util::{stream::FusedStream, Future, Stream};
|
||||
use futures_util::{Future, Stream, stream::FusedStream};
|
||||
use http::HeaderMap;
|
||||
use hyper::body::{Body, Bytes, Frame, SizeHint};
|
||||
use std::{
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use super::body::IncomingLike;
|
||||
use crate::error::RpxyError;
|
||||
use futures::channel::mpsc::UnboundedReceiver;
|
||||
use http_body_util::{combinators, BodyExt, Empty, Full, StreamBody};
|
||||
use http_body_util::{BodyExt, Empty, Full, StreamBody, combinators};
|
||||
use hyper::body::{Body, Bytes, Frame, Incoming};
|
||||
use std::pin::Pin;
|
||||
|
||||
|
|
|
|||
|
|
@ -12,5 +12,5 @@ pub(crate) mod rt {
|
|||
#[allow(unused)]
|
||||
pub(crate) mod body {
|
||||
pub(crate) use super::body_incoming_like::IncomingLike;
|
||||
pub(crate) use super::body_type::{empty, full, BoxBody, RequestBody, ResponseBody, UnboundedStreamBody};
|
||||
pub(crate) use super::body_type::{BoxBody, RequestBody, ResponseBody, UnboundedStreamBody, empty, full};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,8 +7,8 @@
|
|||
|
||||
use futures_util::task::AtomicWaker;
|
||||
use std::sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
};
|
||||
use std::task;
|
||||
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ use std::sync::Arc;
|
|||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
/* ------------------------------------------------ */
|
||||
pub use crate::constants::log_event_names;
|
||||
pub use crate::globals::{AppConfig, AppConfigList, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri};
|
||||
pub mod reexports {
|
||||
pub use hyper::Uri;
|
||||
|
|
@ -43,12 +44,10 @@ pub struct RpxyOptions {
|
|||
pub cert_rx: Option<ReloaderReceiver<ServerCryptoBase>>, // TODO:
|
||||
/// Async task runtime handler
|
||||
pub runtime_handle: tokio::runtime::Handle,
|
||||
/// Notify object to stop async tasks
|
||||
pub cancel_token: Option<CancellationToken>,
|
||||
|
||||
#[cfg(feature = "acme")]
|
||||
/// ServerConfig used for only ACME challenge for ACME domains
|
||||
pub server_configs_acme_challenge: Arc<rustc_hash::FxHashMap<String, Arc<rustls::ServerConfig>>>,
|
||||
pub server_configs_acme_challenge: Arc<ahash::HashMap<String, Arc<rustls::ServerConfig>>>,
|
||||
}
|
||||
|
||||
/// Entrypoint that creates and spawns tasks of reverse proxy services
|
||||
|
|
@ -58,10 +57,10 @@ pub async fn entrypoint(
|
|||
app_config_list,
|
||||
cert_rx, // TODO:
|
||||
runtime_handle,
|
||||
cancel_token,
|
||||
#[cfg(feature = "acme")]
|
||||
server_configs_acme_challenge,
|
||||
}: &RpxyOptions,
|
||||
cancel_token: CancellationToken,
|
||||
) -> RpxyResult<()> {
|
||||
#[cfg(all(feature = "http3-quinn", feature = "http3-s2n"))]
|
||||
warn!("Both \"http3-quinn\" and \"http3-s2n\" features are enabled. \"http3-quinn\" will be used");
|
||||
|
|
@ -117,7 +116,6 @@ pub async fn entrypoint(
|
|||
proxy_config: proxy_config.clone(),
|
||||
request_count: Default::default(),
|
||||
runtime_handle: runtime_handle.clone(),
|
||||
cancel_token: cancel_token.clone(),
|
||||
cert_reloader_rx: cert_rx.clone(),
|
||||
|
||||
#[cfg(feature = "acme")]
|
||||
|
|
@ -153,25 +151,21 @@ pub async fn entrypoint(
|
|||
message_handler: message_handler.clone(),
|
||||
};
|
||||
|
||||
let cancel_token = globals.cancel_token.as_ref().map(|t| t.child_token());
|
||||
let parent_cancel_token_clone = globals.cancel_token.clone();
|
||||
let cancel_token = cancel_token.clone();
|
||||
globals.runtime_handle.spawn(async move {
|
||||
info!("rpxy proxy service for {listening_on} started");
|
||||
if let Some(cancel_token) = cancel_token {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
debug!("rpxy proxy service for {listening_on} terminated");
|
||||
Ok(())
|
||||
},
|
||||
proxy_res = proxy.start() => {
|
||||
info!("rpxy proxy service for {listening_on} exited");
|
||||
// cancel other proxy tasks
|
||||
parent_cancel_token_clone.unwrap().cancel();
|
||||
proxy_res
|
||||
}
|
||||
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
debug!("rpxy proxy service for {listening_on} terminated");
|
||||
Ok(())
|
||||
},
|
||||
proxy_res = proxy.start(cancel_token.child_token()) => {
|
||||
info!("rpxy proxy service for {listening_on} exited");
|
||||
// cancel other proxy tasks
|
||||
cancel_token.cancel();
|
||||
proxy_res
|
||||
}
|
||||
} else {
|
||||
proxy.start().await
|
||||
}
|
||||
})
|
||||
});
|
||||
|
|
@ -186,9 +180,5 @@ pub async fn entrypoint(
|
|||
}
|
||||
});
|
||||
// returns the first error as the representative error
|
||||
if let Some(e) = errs.next() {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
errs.next().map_or(Ok(()), |e| Err(e))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
pub use tracing::{debug, error, info, warn};
|
||||
pub use tracing::{debug, error, info, trace, warn};
|
||||
|
|
|
|||
|
|
@ -44,10 +44,7 @@ mod tests {
|
|||
}
|
||||
#[test]
|
||||
fn ipv6_to_canonical() {
|
||||
let socket = SocketAddr::new(
|
||||
IpAddr::V6(Ipv6Addr::new(0x2001, 0x0db8, 0, 0, 0, 0, 0xdead, 0xbeef)),
|
||||
8080,
|
||||
);
|
||||
let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0x2001, 0x0db8, 0, 0, 0, 0, 0xdead, 0xbeef)), 8080);
|
||||
assert_eq!(socket.to_canonical(), socket);
|
||||
}
|
||||
#[test]
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ where
|
|||
Ok(v)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("{e}");
|
||||
error!("{e}: {log_data}");
|
||||
let code = StatusCode::from(e);
|
||||
log_data.status_code(&code).output();
|
||||
synthetic_error_response(code)
|
||||
|
|
@ -107,9 +107,11 @@ where
|
|||
let backend_app = match self.app_manager.apps.get(&server_name) {
|
||||
Some(backend_app) => backend_app,
|
||||
None => {
|
||||
let Some(default_server_name) = &self.app_manager.default_server_name else {
|
||||
return Err(HttpError::NoMatchingBackendApp);
|
||||
};
|
||||
let default_server_name = self
|
||||
.app_manager
|
||||
.default_server_name
|
||||
.as_ref()
|
||||
.ok_or(HttpError::NoMatchingBackendApp)?;
|
||||
debug!("Serving by default app");
|
||||
self.app_manager.apps.get(default_server_name).unwrap()
|
||||
}
|
||||
|
|
@ -131,9 +133,7 @@ where
|
|||
// Find reverse proxy for given path and choose one of upstream host
|
||||
// Longest prefix match
|
||||
let path = req.uri().path();
|
||||
let Some(upstream_candidates) = backend_app.path_manager.get(path) else {
|
||||
return Err(HttpError::NoUpstreamCandidates);
|
||||
};
|
||||
let upstream_candidates = backend_app.path_manager.get(path).ok_or(HttpError::NoUpstreamCandidates)?;
|
||||
|
||||
// Upgrade in request header
|
||||
let upgrade_in_request = extract_upgrade(req.headers());
|
||||
|
|
@ -147,19 +147,17 @@ where
|
|||
let req_on_upgrade = hyper::upgrade::on(&mut req);
|
||||
|
||||
// Build request from destination information
|
||||
let _context = match self.generate_request_forwarded(
|
||||
&client_addr,
|
||||
&listen_addr,
|
||||
&mut req,
|
||||
&upgrade_in_request,
|
||||
upstream_candidates,
|
||||
tls_enabled,
|
||||
) {
|
||||
Err(e) => {
|
||||
return Err(HttpError::FailedToGenerateUpstreamRequest(e.to_string()));
|
||||
}
|
||||
Ok(v) => v,
|
||||
};
|
||||
let _context = self
|
||||
.generate_request_forwarded(
|
||||
&client_addr,
|
||||
&listen_addr,
|
||||
&mut req,
|
||||
&upgrade_in_request,
|
||||
upstream_candidates,
|
||||
tls_enabled,
|
||||
)
|
||||
.map_err(|e| HttpError::FailedToGenerateUpstreamRequest(e.to_string()))?;
|
||||
|
||||
debug!(
|
||||
"Request to be forwarded: [uri {}, method: {}, version {:?}, headers {:?}]",
|
||||
req.uri(),
|
||||
|
|
@ -173,12 +171,12 @@ where
|
|||
|
||||
//////////////
|
||||
// Forward request to a chosen backend
|
||||
let mut res_backend = match self.forwarder.request(req).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(HttpError::FailedToGetResponseFromBackend(e.to_string()));
|
||||
}
|
||||
};
|
||||
let mut res_backend = self
|
||||
.forwarder
|
||||
.request(req)
|
||||
.await
|
||||
.map_err(|e| HttpError::FailedToGetResponseFromBackend(e.to_string()))?;
|
||||
|
||||
//////////////
|
||||
// Process reverse proxy context generated during the forwarding request generation.
|
||||
#[cfg(feature = "sticky-cookie")]
|
||||
|
|
@ -191,16 +189,16 @@ where
|
|||
|
||||
if res_backend.status() != StatusCode::SWITCHING_PROTOCOLS {
|
||||
// Generate response to client
|
||||
if let Err(e) = self.generate_response_forwarded(&mut res_backend, backend_app) {
|
||||
return Err(HttpError::FailedToGenerateDownstreamResponse(e.to_string()));
|
||||
}
|
||||
self
|
||||
.generate_response_forwarded(&mut res_backend, backend_app)
|
||||
.map_err(|e| HttpError::FailedToGenerateDownstreamResponse(e.to_string()))?;
|
||||
return Ok(res_backend);
|
||||
}
|
||||
|
||||
// Handle StatusCode::SWITCHING_PROTOCOLS in response
|
||||
let upgrade_in_response = extract_upgrade(res_backend.headers());
|
||||
let should_upgrade = match (upgrade_in_request.as_ref(), upgrade_in_response.as_ref()) {
|
||||
(Some(u_req), Some(u_res)) => u_req.to_ascii_lowercase() == u_res.to_ascii_lowercase(),
|
||||
(Some(u_req), Some(u_res)) => u_req.eq_ignore_ascii_case(u_res),
|
||||
_ => false,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
use super::{handler_main::HandlerContext, utils_headers::*, utils_request::update_request_line, HttpMessageHandler};
|
||||
use super::{HttpMessageHandler, handler_main::HandlerContext, utils_headers::*, utils_request::update_request_line};
|
||||
use crate::{
|
||||
backend::{BackendApp, UpstreamCandidates},
|
||||
constants::RESPONSE_HEADER_SERVER,
|
||||
log::*,
|
||||
};
|
||||
use anyhow::{anyhow, ensure, Result};
|
||||
use http::{header, HeaderValue, Request, Response, Uri};
|
||||
use anyhow::{Result, anyhow, ensure};
|
||||
use http::{HeaderValue, Request, Response, Uri, header};
|
||||
use hyper_util::client::legacy::connect::Connect;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
|
|
@ -66,17 +66,19 @@ where
|
|||
upstream_candidates: &UpstreamCandidates,
|
||||
tls_enabled: bool,
|
||||
) -> Result<HandlerContext> {
|
||||
debug!("Generate request to be forwarded");
|
||||
trace!("Generate request to be forwarded");
|
||||
|
||||
// Add te: trailer if contained in original request
|
||||
let contains_te_trailers = {
|
||||
if let Some(te) = req.headers().get(header::TE) {
|
||||
te.as_bytes()
|
||||
.split(|v| v == &b',' || v == &b' ')
|
||||
.any(|x| x == "trailers".as_bytes())
|
||||
} else {
|
||||
false
|
||||
}
|
||||
req
|
||||
.headers()
|
||||
.get(header::TE)
|
||||
.map(|te| {
|
||||
te.as_bytes()
|
||||
.split(|v| v == &b',' || v == &b' ')
|
||||
.any(|x| x == "trailers".as_bytes())
|
||||
})
|
||||
.unwrap_or(false)
|
||||
};
|
||||
|
||||
let original_uri = req.uri().to_string();
|
||||
|
|
@ -136,11 +138,7 @@ where
|
|||
let new_uri = Uri::builder()
|
||||
.scheme(upstream_chosen.uri.scheme().unwrap().as_str())
|
||||
.authority(upstream_chosen.uri.authority().unwrap().as_str());
|
||||
let org_pq = match req.uri().path_and_query() {
|
||||
Some(pq) => pq.to_string(),
|
||||
None => "/".to_string(),
|
||||
}
|
||||
.into_bytes();
|
||||
let org_pq = req.uri().path_and_query().map(|pq| pq.as_str()).unwrap_or("/").as_bytes();
|
||||
|
||||
// replace some parts of path if opt_replace_path is enabled for chosen upstream
|
||||
let new_pq = match &upstream_candidates.replace_path {
|
||||
|
|
@ -155,7 +153,7 @@ where
|
|||
new_pq.extend_from_slice(&org_pq[matched_path.len()..]);
|
||||
new_pq
|
||||
}
|
||||
None => org_pq,
|
||||
None => org_pq.to_vec(),
|
||||
};
|
||||
*req.uri_mut() = new_uri.path_and_query(new_pq).build()?;
|
||||
|
||||
|
|
|
|||
|
|
@ -34,11 +34,7 @@ impl<T> From<&http::Request<T>> for HttpMessageLog {
|
|||
client_addr: "".to_string(),
|
||||
method: req.method().to_string(),
|
||||
host: header_mapper(header::HOST),
|
||||
p_and_q: req
|
||||
.uri()
|
||||
.path_and_query()
|
||||
.map_or_else(|| "", |v| v.as_str())
|
||||
.to_string(),
|
||||
p_and_q: req.uri().path_and_query().map_or_else(|| "", |v| v.as_str()).to_string(),
|
||||
version: req.version(),
|
||||
uri_scheme: req.uri().scheme_str().unwrap_or("").to_string(),
|
||||
uri_host: req.uri().host().unwrap_or("").to_string(),
|
||||
|
|
@ -50,6 +46,33 @@ impl<T> From<&http::Request<T>> for HttpMessageLog {
|
|||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for HttpMessageLog {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{} <- {} -- {} {} {:?} -- {} -- {} \"{}\", \"{}\" \"{}\"",
|
||||
if !self.host.is_empty() {
|
||||
self.host.as_str()
|
||||
} else {
|
||||
self.uri_host.as_str()
|
||||
},
|
||||
self.client_addr,
|
||||
self.method,
|
||||
self.p_and_q,
|
||||
self.version,
|
||||
self.status,
|
||||
if !self.uri_scheme.is_empty() && !self.uri_host.is_empty() {
|
||||
format!("{}://{}", self.uri_scheme, self.uri_host)
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
self.ua,
|
||||
self.xff,
|
||||
self.upstream
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpMessageLog {
|
||||
pub fn client_addr(&mut self, client_addr: &SocketAddr) -> &mut Self {
|
||||
self.client_addr = client_addr.to_canonical().to_string();
|
||||
|
|
@ -74,26 +97,8 @@ impl HttpMessageLog {
|
|||
|
||||
pub fn output(&self) {
|
||||
info!(
|
||||
"{} <- {} -- {} {} {:?} -- {} -- {} \"{}\", \"{}\" \"{}\"",
|
||||
if !self.host.is_empty() {
|
||||
self.host.as_str()
|
||||
} else {
|
||||
self.uri_host.as_str()
|
||||
},
|
||||
self.client_addr,
|
||||
self.method,
|
||||
self.p_and_q,
|
||||
self.version,
|
||||
self.status,
|
||||
if !self.uri_scheme.is_empty() && !self.uri_host.is_empty() {
|
||||
format!("{}://{}", self.uri_scheme, self.uri_host)
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
self.ua,
|
||||
self.xff,
|
||||
self.upstream,
|
||||
// self.tls_server_name
|
||||
name: crate::constants::log_event_names::ACCESS_LOG,
|
||||
"{}", self
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -53,6 +53,7 @@ impl From<HttpError> for StatusCode {
|
|||
HttpError::FailedToAddSetCookeInResponse(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
HttpError::FailedToGenerateDownstreamResponse(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
HttpError::FailedToUpgrade(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
HttpError::FailedToGetResponseFromBackend(_) => StatusCode::BAD_GATEWAY,
|
||||
// HttpError::NoUpgradeExtensionInRequest => StatusCode::BAD_REQUEST,
|
||||
// HttpError::NoUpgradeExtensionInResponse => StatusCode::BAD_GATEWAY,
|
||||
_ => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use super::http_result::{HttpError, HttpResult};
|
||||
use crate::{
|
||||
error::*,
|
||||
hyper_ext::body::{empty, ResponseBody},
|
||||
hyper_ext::body::{ResponseBody, empty},
|
||||
name_exp::ServerName,
|
||||
};
|
||||
use http::{Request, Response, StatusCode, Uri};
|
||||
|
|
|
|||
|
|
@ -3,9 +3,9 @@ use crate::{
|
|||
backend::{UpstreamCandidates, UpstreamOption},
|
||||
log::*,
|
||||
};
|
||||
use anyhow::{anyhow, Result};
|
||||
use anyhow::{Result, anyhow, ensure};
|
||||
use bytes::BufMut;
|
||||
use http::{header, HeaderMap, HeaderName, HeaderValue, Uri};
|
||||
use http::{HeaderMap, HeaderName, HeaderValue, Uri, header};
|
||||
use std::{borrow::Cow, net::SocketAddr};
|
||||
|
||||
#[cfg(feature = "sticky-cookie")]
|
||||
|
|
@ -238,10 +238,9 @@ pub(super) fn add_forwarding_header(
|
|||
pub(super) fn remove_connection_header(headers: &mut HeaderMap) {
|
||||
if let Some(values) = headers.get(header::CONNECTION) {
|
||||
if let Ok(v) = values.clone().to_str() {
|
||||
for m in v.split(',') {
|
||||
if !m.is_empty() {
|
||||
headers.remove(m.trim());
|
||||
}
|
||||
let keys = v.split(',').map(|m| m.trim()).filter(|m| !m.is_empty());
|
||||
for m in keys {
|
||||
headers.remove(m);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -274,13 +273,11 @@ pub(super) fn extract_upgrade(headers: &HeaderMap) -> Option<String> {
|
|||
.to_str()
|
||||
.unwrap_or("")
|
||||
.split(',')
|
||||
.any(|w| w.trim().to_ascii_lowercase() == header::UPGRADE.as_str().to_ascii_lowercase())
|
||||
.any(|w| w.trim().eq_ignore_ascii_case(header::UPGRADE.as_str()))
|
||||
{
|
||||
if let Some(u) = headers.get(header::UPGRADE) {
|
||||
if let Ok(m) = u.to_str() {
|
||||
debug!("Upgrade in request header: {}", m);
|
||||
return Some(m.to_owned());
|
||||
}
|
||||
if let Some(Ok(m)) = headers.get(header::UPGRADE).map(|u| u.to_str()) {
|
||||
debug!("Upgrade in request header: {}", m);
|
||||
return Some(m.to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@ use crate::{
|
|||
backend::{Upstream, UpstreamCandidates, UpstreamOption},
|
||||
log::*,
|
||||
};
|
||||
use anyhow::{anyhow, ensure, Result};
|
||||
use http::{header, uri::Scheme, Request, Version};
|
||||
use anyhow::{Result, anyhow, ensure};
|
||||
use http::{Request, Version, header, uri::Scheme};
|
||||
|
||||
/// Trait defining parser of hostname
|
||||
/// Inspect and extract hostname from either the request HOST header or request line
|
||||
|
|
@ -59,6 +59,18 @@ pub(super) fn update_request_line<B>(
|
|||
upstream_chosen: &Upstream,
|
||||
upstream_candidates: &UpstreamCandidates,
|
||||
) -> anyhow::Result<()> {
|
||||
// If request is grpc, HTTP/2 is required
|
||||
if req
|
||||
.headers()
|
||||
.get(header::CONTENT_TYPE)
|
||||
.map(|v| v.as_bytes().starts_with(b"application/grpc"))
|
||||
== Some(true)
|
||||
{
|
||||
debug!("Must be http/2 for gRPC request.");
|
||||
*req.version_mut() = Version::HTTP_2;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// If not specified (force_httpXX_upstream) and https, version is preserved except for http/3
|
||||
if upstream_chosen.uri.scheme() == Some(&Scheme::HTTP) {
|
||||
// Change version to http/1.1 when destination scheme is http
|
||||
|
|
|
|||
|
|
@ -14,12 +14,11 @@ use crate::{
|
|||
name_exp::ServerName,
|
||||
};
|
||||
use hyper_util::server::{self, conn::auto::Builder as ConnectionBuilder};
|
||||
use rustc_hash::FxHashMap as HashMap;
|
||||
use rustls::ServerConfig;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// SNI to ServerConfig map type
|
||||
pub type SniServerCryptoMap = HashMap<ServerName, Arc<ServerConfig>>;
|
||||
pub type SniServerCryptoMap = std::collections::HashMap<ServerName, Arc<ServerConfig>, ahash::RandomState>;
|
||||
|
||||
pub(crate) use proxy_main::Proxy;
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ where
|
|||
<<C as OpenStreams<Bytes>>::BidiStream as BidiStream<Bytes>>::SendStream: Send,
|
||||
{
|
||||
let mut h3_conn = h3::server::Connection::<_, Bytes>::new(quic_connection).await?;
|
||||
info!(
|
||||
debug!(
|
||||
"QUIC/HTTP3 connection established from {:?} {}",
|
||||
client_addr,
|
||||
<&ServerName as TryInto<String>>::try_into(&tls_server_name).unwrap_or_default()
|
||||
|
|
@ -49,12 +49,17 @@ where
|
|||
}
|
||||
Err(e) => {
|
||||
warn!("HTTP/3 error on accept incoming connection: {}", e);
|
||||
match e.get_error_level() {
|
||||
h3::error::ErrorLevel::ConnectionError => break,
|
||||
h3::error::ErrorLevel::StreamError => continue,
|
||||
}
|
||||
break;
|
||||
}
|
||||
Ok(Some((req, stream))) => {
|
||||
// Ok(Some((req, stream))) => {
|
||||
Ok(Some(req_resolver)) => {
|
||||
let (req, stream) = match req_resolver.resolve_request().await {
|
||||
Ok((req, stream)) => (req, stream),
|
||||
Err(e) => {
|
||||
warn!("HTTP/3 error on resolve request in stream: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
// We consider the connection count separately from the stream count.
|
||||
// Max clients for h1/h2 = max 'stream' for h3.
|
||||
let request_count = self.globals.request_count.clone();
|
||||
|
|
@ -63,7 +68,7 @@ where
|
|||
h3_conn.shutdown(0).await?;
|
||||
break;
|
||||
}
|
||||
debug!("Request incoming: current # {}", request_count.current());
|
||||
trace!("Request incoming: current # {}", request_count.current());
|
||||
|
||||
let self_inner = self.clone();
|
||||
let tls_server_name_inner = tls_server_name.clone();
|
||||
|
|
@ -77,7 +82,7 @@ where
|
|||
warn!("HTTP/3 error on serve stream: {}", e);
|
||||
}
|
||||
request_count.decrement();
|
||||
debug!("Request processed: current # {}", request_count.current());
|
||||
trace!("Request processed: current # {}", request_count.current());
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
@ -115,7 +120,7 @@ where
|
|||
let mut sender = body_sender;
|
||||
let mut size = 0usize;
|
||||
while let Some(mut body) = recv_stream.recv_data().await? {
|
||||
debug!("HTTP/3 incoming request body: remaining {}", body.remaining());
|
||||
trace!("HTTP/3 incoming request body: remaining {}", body.remaining());
|
||||
size += body.remaining();
|
||||
if size > max_body_size {
|
||||
error!(
|
||||
|
|
@ -129,9 +134,9 @@ where
|
|||
}
|
||||
|
||||
// trailers: use inner for work around. (directly get trailer)
|
||||
let trailers = recv_stream.as_mut().recv_trailers().await?;
|
||||
let trailers = futures_util::future::poll_fn(|cx| recv_stream.as_mut().poll_recv_trailers(cx)).await?;
|
||||
if trailers.is_some() {
|
||||
debug!("HTTP/3 incoming request trailers");
|
||||
trace!("HTTP/3 incoming request trailers");
|
||||
sender.send_trailers(trailers.unwrap()).await?;
|
||||
}
|
||||
Ok(()) as RpxyResult<()>
|
||||
|
|
@ -154,13 +159,13 @@ where
|
|||
|
||||
match send_stream.send_response(new_res).await {
|
||||
Ok(_) => {
|
||||
debug!("HTTP/3 response to connection successful");
|
||||
trace!("HTTP/3 response to connection successful");
|
||||
// on-demand body streaming to downstream without expanding the object onto memory.
|
||||
loop {
|
||||
let frame = match new_body.frame().await {
|
||||
Some(frame) => frame,
|
||||
None => {
|
||||
debug!("Response body finished");
|
||||
trace!("Response body finished");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ use crate::{
|
|||
message_handler::HttpMessageHandler,
|
||||
name_exp::ServerName,
|
||||
};
|
||||
use futures::{select, FutureExt};
|
||||
use futures::{FutureExt, select};
|
||||
use http::{Request, Response};
|
||||
use hyper::{
|
||||
body::Incoming,
|
||||
|
|
@ -22,6 +22,7 @@ use hyper_util::{client::legacy::connect::Connect, rt::TokioIo, server::conn::au
|
|||
use rpxy_certs::ServerCrypto;
|
||||
use std::{net::SocketAddr, sync::Arc, time::Duration};
|
||||
use tokio::time::timeout;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
/// Wrapper function to handle request for HTTP/1.1 and HTTP/2
|
||||
/// HTTP/3 is handled in proxy_h3.rs which directly calls the message handler
|
||||
|
|
@ -79,7 +80,7 @@ where
|
|||
request_count.decrement();
|
||||
return;
|
||||
}
|
||||
debug!("Request incoming: current # {}", request_count.current());
|
||||
trace!("Request incoming: current # {}", request_count.current());
|
||||
|
||||
let server_clone = self.connection_builder.clone();
|
||||
let message_handler_clone = self.message_handler.clone();
|
||||
|
|
@ -109,7 +110,7 @@ where
|
|||
}
|
||||
|
||||
request_count.decrement();
|
||||
debug!("Request processed: current # {}", request_count.current());
|
||||
trace!("Request processed: current # {}", request_count.current());
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -129,30 +130,56 @@ where
|
|||
}
|
||||
|
||||
/// Start with TLS (HTTPS)
|
||||
pub(super) async fn start_with_tls(&self) -> RpxyResult<()> {
|
||||
pub(super) async fn start_with_tls(&self, cancel_token: CancellationToken) -> RpxyResult<()> {
|
||||
// By default, TLS listener is spawned
|
||||
let join_handle_tls = self.globals.runtime_handle.spawn({
|
||||
let self_clone = self.clone();
|
||||
let cancel_token = cancel_token.clone();
|
||||
async move {
|
||||
select! {
|
||||
_ = self_clone.tls_listener_service().fuse() => {
|
||||
error!("TCP proxy service for TLS exited");
|
||||
cancel_token.cancel();
|
||||
},
|
||||
_ = cancel_token.cancelled().fuse() => {
|
||||
debug!("Cancel token is called for TLS listener");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(not(any(feature = "http3-quinn", feature = "http3-s2n")))]
|
||||
{
|
||||
self.tls_listener_service().await?;
|
||||
error!("TCP proxy service for TLS exited");
|
||||
let _ = join_handle_tls.await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))]
|
||||
{
|
||||
if self.globals.proxy_config.http3 {
|
||||
select! {
|
||||
_ = self.tls_listener_service().fuse() => {
|
||||
error!("TCP proxy service for TLS exited");
|
||||
},
|
||||
_ = self.h3_listener_service().fuse() => {
|
||||
error!("UDP proxy service for QUIC exited");
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
} else {
|
||||
self.tls_listener_service().await?;
|
||||
error!("TCP proxy service for TLS exited");
|
||||
Ok(())
|
||||
// If HTTP/3 is not enabled, wait for TLS listener to finish
|
||||
if !self.globals.proxy_config.http3 {
|
||||
let _ = join_handle_tls.await;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// If HTTP/3 is enabled, spawn a task to handle HTTP/3 connections
|
||||
let join_handle_h3 = self.globals.runtime_handle.spawn({
|
||||
let self_clone = self.clone();
|
||||
async move {
|
||||
select! {
|
||||
_ = self_clone.h3_listener_service().fuse() => {
|
||||
error!("UDP proxy service for QUIC exited");
|
||||
cancel_token.cancel();
|
||||
},
|
||||
_ = cancel_token.cancelled().fuse() => {
|
||||
debug!("Cancel token is called for QUIC listener");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let _ = futures::future::join(join_handle_tls, join_handle_h3).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -294,7 +321,7 @@ where
|
|||
let map = server_config.individual_config_map.clone().iter().map(|(k,v)| {
|
||||
let server_name = ServerName::from(k.as_slice());
|
||||
(server_name, v.clone())
|
||||
}).collect::<rustc_hash::FxHashMap<_,_>>();
|
||||
}).collect::<std::collections::HashMap<_,_,ahash::RandomState>>();
|
||||
server_crypto_map = Some(Arc::new(map));
|
||||
}
|
||||
}
|
||||
|
|
@ -303,10 +330,10 @@ where
|
|||
}
|
||||
|
||||
/// Entrypoint for HTTP/1.1, 2 and 3 servers
|
||||
pub async fn start(&self) -> RpxyResult<()> {
|
||||
pub async fn start(&self, cancel_token: CancellationToken) -> RpxyResult<()> {
|
||||
let proxy_service = async {
|
||||
if self.tls_enabled {
|
||||
self.start_with_tls().await
|
||||
self.start_with_tls(cancel_token).await
|
||||
} else {
|
||||
self.start_without_tls().await
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@ use super::{proxy_main::Proxy, socket::bind_udp_socket};
|
|||
use crate::{error::*, log::*, name_exp::ByteName};
|
||||
use hyper_util::client::legacy::connect::Connect;
|
||||
use quinn::{
|
||||
crypto::rustls::{HandshakeData, QuicServerConfig},
|
||||
Endpoint, TransportConfig,
|
||||
crypto::rustls::{HandshakeData, QuicServerConfig},
|
||||
};
|
||||
use rpxy_certs::ServerCrypto;
|
||||
use rustls::ServerConfig;
|
||||
|
|
@ -82,7 +82,7 @@ where
|
|||
let client_addr = incoming.remote_address();
|
||||
let quic_connection = match incoming.await {
|
||||
Ok(new_conn) => {
|
||||
info!("New connection established");
|
||||
trace!("New connection established");
|
||||
h3_quinn::Connection::new(new_conn)
|
||||
},
|
||||
Err(e) => {
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ where
|
|||
|
||||
// quic event loop. this immediately cancels when crypto is updated by tokio::select!
|
||||
while let Some(new_conn) = server.accept().await {
|
||||
debug!("New QUIC connection established");
|
||||
trace!("New QUIC connection established");
|
||||
let Ok(Some(new_server_name)) = new_conn.server_name() else {
|
||||
warn!("HTTP/3 no SNI is given");
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -16,10 +16,12 @@ pub(super) fn bind_tcp_socket(listening_on: &SocketAddr) -> RpxyResult<TcpSocket
|
|||
}?;
|
||||
tcp_socket.set_reuseaddr(true)?;
|
||||
tcp_socket.set_reuseport(true)?;
|
||||
if let Err(e) = tcp_socket.bind(*listening_on) {
|
||||
|
||||
tcp_socket.bind(*listening_on).map_err(|e| {
|
||||
error!("Failed to bind TCP socket: {}", e);
|
||||
return Err(RpxyError::Io(e));
|
||||
};
|
||||
RpxyError::Io(e)
|
||||
})?;
|
||||
|
||||
Ok(tcp_socket)
|
||||
}
|
||||
|
||||
|
|
@ -36,11 +38,10 @@ pub(super) fn bind_udp_socket(listening_on: &SocketAddr) -> RpxyResult<UdpSocket
|
|||
socket.set_reuse_port(true)?;
|
||||
socket.set_nonblocking(true)?; // This was made true inside quinn. so this line isn't necessary here. but just in case.
|
||||
|
||||
if let Err(e) = socket.bind(&(*listening_on).into()) {
|
||||
socket.bind(&(*listening_on).into()).map_err(|e| {
|
||||
error!("Failed to bind UDP socket: {}", e);
|
||||
return Err(RpxyError::Io(e));
|
||||
};
|
||||
let udp_socket: UdpSocket = socket.into();
|
||||
RpxyError::Io(e)
|
||||
})?;
|
||||
|
||||
Ok(udp_socket)
|
||||
Ok(socket.into())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
Subproject commit af2d016b6aa4e09586253a0459efc4af6635c79b
|
||||
Subproject commit cc7aeb870a62cd8d4b962de35927a241525ea30d
|
||||
|
|
@ -1 +1 @@
|
|||
Subproject commit d5b5efd9de4dab3c958c50be5380652d801cc65f
|
||||
Subproject commit 2500716b70bd6e548cdf690188ded7afe6726330
|
||||
|
|
@ -1 +1 @@
|
|||
Subproject commit ffeaac1eb32589599c9be357f2273a2824741c7d
|
||||
Subproject commit b2e9eac31c1b620d2fd0aa40753ca965a1ec1269
|
||||
Loading…
Add table
Add a link
Reference in a new issue