pub async fn make_streaming_llm_request(
chat_request: LLMRequest,
model_deployment_name: &str,
app_config: &ApplicationConfiguration,
) -> Result<Response>Expand description
Makes a streaming request to an LLM
pub async fn make_streaming_llm_request(
chat_request: LLMRequest,
model_deployment_name: &str,
app_config: &ApplicationConfiguration,
) -> Result<Response>Makes a streaming request to an LLM