wip: feat: define response body enum

This commit is contained in:
Jun Kurihara 2023-12-12 19:58:33 +09:00
commit 008b62a925
No known key found for this signature in database
GPG key ID: 48ADFD173ED22B03
12 changed files with 215 additions and 104 deletions

View file

@ -0,0 +1,35 @@
use thiserror::Error;
pub type CacheResult<T> = std::result::Result<T, CacheError>;
/// Describes things that can go wrong in the Rpxy
#[derive(Debug, Error)]
pub enum CacheError {
// Cache errors,
#[error("Invalid null request and/or response")]
NullRequestOrResponse,
#[error("Failed to write byte buffer")]
FailedToWriteByteBufferForCache,
#[error("Failed to acquire mutex lock for cache")]
FailedToAcquiredMutexLockForCache,
#[error("Failed to create file cache")]
FailedToCreateFileCache,
#[error("Failed to write file cache")]
FailedToWriteFileCache,
#[error("Failed to open cache file")]
FailedToOpenCacheFile,
#[error("Too large to cache")]
TooLargeToCache,
#[error("Failed to cache bytes: {0}")]
FailedToCacheBytes(String),
#[error("Failed to send frame to cache {0}")]
FailedToSendFrameToCache(String),
}

View file

@ -1,8 +1,11 @@
use crate::{error::*, globals::Globals, log::*};
use super::cache_error::*;
use crate::{globals::Globals, hyper_ext::body::UnboundedStreamBody, log::*};
use bytes::{Buf, Bytes, BytesMut};
use futures::channel::mpsc;
use http::{Request, Response};
use http_body_util::StreamBody;
use http_body_util::{BodyExt, StreamBody};
use http_cache_semantics::CachePolicy;
use hyper::body::{Body, Frame, Incoming};
use lru::LruCache;
use std::{
convert::Infallible,
@ -69,6 +72,73 @@ impl RpxyCache {
let on_memory = total - file;
(total, on_memory, file)
}
/// Put response into the cache
pub async fn put(
&self,
uri: &hyper::Uri,
mut body: Incoming,
policy: &CachePolicy,
) -> CacheResult<UnboundedStreamBody> {
let my_cache = self.inner.clone();
let mut file_store = self.file_store.clone();
let uri = uri.clone();
let policy_clone = policy.clone();
let max_each_size = self.max_each_size;
let max_each_size_on_memory = self.max_each_size_on_memory;
let (body_tx, body_rx) = mpsc::unbounded::<Result<Frame<Bytes>, hyper::Error>>();
self.runtime_handle.spawn(async move {
let mut size = 0usize;
loop {
let frame = match body.frame().await {
Some(frame) => frame,
None => {
debug!("Response body finished");
break;
}
};
let frame_size = frame.as_ref().map(|f| {
if f.is_data() {
f.data_ref().map(|bytes| bytes.remaining()).unwrap_or_default()
} else {
0
}
});
size += frame_size.unwrap_or_default();
// check size
if size > max_each_size {
warn!("Too large to cache");
return Err(CacheError::TooLargeToCache);
}
frame
.as_ref()
.map(|f| {
if f.is_data() {
let data_bytes = f.data_ref().unwrap().clone();
println!("ddddde");
// TODO: cache data bytes as file or on memory
// fileにするかmemoryにするかの判断はある程度までバッファしてやってという手を使うことになる。途中までキャッシュしたやつはどうするかとかいう判断も必要。
// ファイルとObjectのbindをどうやってするか
}
})
.map_err(|e| CacheError::FailedToCacheBytes(e.to_string()))?;
// send data to use response downstream
body_tx
.unbounded_send(frame)
.map_err(|e| CacheError::FailedToSendFrameToCache(e.to_string()))?;
}
Ok(()) as CacheResult<()>
});
let stream_body = StreamBody::new(body_rx);
Ok(stream_body)
}
}
/* ---------------------------------------------- */
@ -93,7 +163,7 @@ impl FileStore {
inner.cnt
}
/// Create a temporary file cache
async fn create(&mut self, cache_filename: &str, body_bytes: &Bytes) -> RpxyResult<CacheFileOrOnMemory> {
async fn create(&mut self, cache_filename: &str, body_bytes: &Bytes) -> CacheResult<CacheFileOrOnMemory> {
let mut inner = self.inner.write().await;
inner.create(cache_filename, body_bytes).await
}
@ -106,7 +176,7 @@ impl FileStore {
// };
// }
// /// Read a temporary file cache
// async fn read(&self, path: impl AsRef<Path>) -> RpxyResult<Bytes> {
// async fn read(&self, path: impl AsRef<Path>) -> CacheResult<Bytes> {
// let inner = self.inner.read().await;
// inner.read(&path).await
// }
@ -141,16 +211,16 @@ impl FileStoreInner {
}
/// Create a new temporary file cache
async fn create(&mut self, cache_filename: &str, body_bytes: &Bytes) -> RpxyResult<CacheFileOrOnMemory> {
async fn create(&mut self, cache_filename: &str, body_bytes: &Bytes) -> CacheResult<CacheFileOrOnMemory> {
let cache_filepath = self.cache_dir.join(cache_filename);
let Ok(mut file) = File::create(&cache_filepath).await else {
return Err(RpxyError::FailedToCreateFileCache);
return Err(CacheError::FailedToCreateFileCache);
};
let mut bytes_clone = body_bytes.clone();
while bytes_clone.has_remaining() {
if let Err(e) = file.write_buf(&mut bytes_clone).await {
error!("Failed to write file cache: {e}");
return Err(RpxyError::FailedToWriteFileCache);
return Err(CacheError::FailedToWriteFileCache);
};
}
self.cnt += 1;
@ -158,15 +228,14 @@ impl FileStoreInner {
}
/// Retrieve a stored temporary file cache
async fn read(&self, path: impl AsRef<Path>) -> RpxyResult<()> {
async fn read(&self, path: impl AsRef<Path>) -> CacheResult<()> {
let Ok(mut file) = File::open(&path).await else {
warn!("Cache file object cannot be opened");
return Err(RpxyError::FailedToOpenCacheFile);
return Err(CacheError::FailedToOpenCacheFile);
};
/* ----------------------------- */
// PoC for streaming body
use futures::channel::mpsc;
let (tx, rx) = mpsc::unbounded::<Result<hyper::body::Frame<bytes::Bytes>, Infallible>>();
// let (body_sender, res_body) = Body::channel();
@ -263,10 +332,10 @@ impl LruCacheManager {
}
/// Push an entry
fn push(&self, cache_key: &str, cache_object: CacheObject) -> RpxyResult<Option<(String, CacheObject)>> {
fn push(&self, cache_key: &str, cache_object: CacheObject) -> CacheResult<Option<(String, CacheObject)>> {
let Ok(mut lock) = self.inner.lock() else {
error!("Failed to acquire mutex lock for writing cache entry");
return Err(RpxyError::FailedToAcquiredMutexLockForCache);
return Err(CacheError::FailedToAcquiredMutexLockForCache);
};
let res = Ok(lock.push(cache_key.to_string(), cache_object));
// This may be inconsistent with the actual number of entries
@ -280,13 +349,13 @@ impl LruCacheManager {
pub fn get_policy_if_cacheable<B1, B2>(
req: Option<&Request<B1>>,
res: Option<&Response<B2>>,
) -> RpxyResult<Option<CachePolicy>>
) -> CacheResult<Option<CachePolicy>>
// where
// B1: core::fmt::Debug,
{
// deduce cache policy from req and res
let (Some(req), Some(res)) = (req, res) else {
return Err(RpxyError::NullRequestOrResponse);
return Err(CacheError::NullRequestOrResponse);
};
let new_policy = CachePolicy::new(req, res);

5
rpxy-lib/src/forwarder/cache/mod.rs vendored Normal file
View file

@ -0,0 +1,5 @@
mod cache_error;
mod cache_main;
pub use cache_error::CacheError;
pub use cache_main::{get_policy_if_cacheable, CacheFileOrOnMemory, RpxyCache};

View file

@ -1,14 +1,10 @@
use crate::{
error::{RpxyError, RpxyResult},
globals::Globals,
hyper_ext::{
body::{wrap_incoming_body_response, BoxBody, IncomingOr},
rt::LocalExecutor,
},
hyper_ext::{body::ResponseBody, rt::LocalExecutor},
log::*,
};
use async_trait::async_trait;
use chrono::Duration;
use http::{Request, Response, Version};
use hyper::body::{Body, Incoming};
use hyper_util::client::legacy::{
@ -19,10 +15,6 @@ use std::sync::Arc;
#[cfg(feature = "cache")]
use super::cache::{get_policy_if_cacheable, RpxyCache};
#[cfg(feature = "cache")]
use crate::hyper_ext::body::{full, wrap_synthetic_body_response};
#[cfg(feature = "cache")]
use http_body_util::BodyExt;
#[async_trait]
/// Definition of the forwarder that simply forward requests from downstream client to upstream app servers.
@ -40,7 +32,7 @@ pub struct Forwarder<C, B> {
}
#[async_trait]
impl<C, B1> ForwardRequest<B1, IncomingOr<BoxBody>> for Forwarder<C, B1>
impl<C, B1> ForwardRequest<B1, ResponseBody> for Forwarder<C, B1>
where
C: Send + Sync + Connect + Clone + 'static,
B1: Body + Send + Sync + Unpin + 'static,
@ -49,7 +41,7 @@ where
{
type Error = RpxyError;
async fn request(&self, req: Request<B1>) -> Result<Response<IncomingOr<BoxBody>>, Self::Error> {
async fn request(&self, req: Request<B1>) -> Result<Response<ResponseBody>, Self::Error> {
// TODO: cache handling
#[cfg(feature = "cache")]
{
@ -67,38 +59,27 @@ where
let res = self.request_directly(req).await;
if self.cache.is_none() {
return res.map(wrap_incoming_body_response::<BoxBody>);
return res.map(|inner| inner.map(ResponseBody::Incoming));
}
// check cacheability and store it if cacheable
let Ok(Some(cache_policy)) = get_policy_if_cacheable(synth_req.as_ref(), res.as_ref().ok()) else {
return res.map(wrap_incoming_body_response::<BoxBody>);
return res.map(|inner| inner.map(ResponseBody::Incoming));
};
let (parts, body) = res.unwrap().into_parts();
// TODO: This is inefficient since current strategy needs to copy the whole body onto memory to cache it.
// This should be handled by copying buffer simultaneously while forwarding response to downstream.
let Ok(bytes) = body.collect().await.map(|v| v.to_bytes()) else {
return Err(RpxyError::FailedToWriteByteBufferForCache);
};
let bytes_clone = bytes.clone();
// Get streamed body without waiting for the arrival of the body,
// which is done simultaneously with caching.
let stream_body = self
.cache
.as_ref()
.unwrap()
.put(synth_req.unwrap().uri(), body, &cache_policy)
.await?;
// TODO: this is inefficient. needs to be reconsidered to avoid unnecessary copy and should spawn async task to store cache.
// We may need to use the same logic as h3.
// Is bytes.clone() enough?
// if let Err(cache_err) = self
// .cache
// .as_ref()
// .unwrap()
// .put(synth_req.unwrap().uri(), &bytes, &cache_policy)
// .await
// {
// error!("{:?}", cache_err);
// };
// response with cached body
Ok(wrap_synthetic_body_response(Response::from_parts(parts, full(bytes))))
// response with body being cached in background
let new_res = Response::from_parts(parts, ResponseBody::Streamed(stream_body));
Ok(new_res)
}
// No cache handling
@ -107,7 +88,7 @@ where
self
.request_directly(req)
.await
.map(wrap_incoming_body_response::<BoxBody>)
.map(|inner| inner.map(ResponseBody::Incoming))
}
}
}

View file

@ -3,6 +3,9 @@ mod cache;
mod client;
use crate::hyper_ext::body::{IncomingLike, IncomingOr};
pub type Forwarder<C> = client::Forwarder<C, IncomingOr<IncomingLike>>;
pub use client::ForwardRequest;
pub(crate) type Forwarder<C> = client::Forwarder<C, IncomingOr<IncomingLike>>;
pub(crate) use client::ForwardRequest;
#[cfg(feature = "cache")]
pub(crate) use cache::CacheError;