More smoothing, as they say

This commit is contained in:
Dave
2026-02-16 16:35:25 +00:00
parent 5923165fcf
commit f76376b203
10 changed files with 256 additions and 265 deletions

View File

@@ -1,27 +1,41 @@
use crate::http::context::{AppContext, OpenApiResult, bad_request};
use crate::http::payloads::ModelPayload;
use crate::io::fs;
use crate::llm::chat;
use poem_openapi::{param::Query, payload::Json};
use poem_openapi::{Object, OpenApi, param::Query, payload::Json};
use serde::Deserialize;
use std::sync::Arc;
pub async fn get_model_preference(ctx: &AppContext) -> OpenApiResult<Json<Option<String>>> {
let result = fs::get_model_preference(ctx.store.as_ref()).map_err(bad_request)?;
Ok(Json(result))
#[derive(Deserialize, Object)]
struct ModelPayload {
model: String,
}
pub async fn set_model_preference(
payload: Json<ModelPayload>,
ctx: &AppContext,
) -> OpenApiResult<Json<bool>> {
fs::set_model_preference(payload.0.model, ctx.store.as_ref()).map_err(bad_request)?;
Ok(Json(true))
pub struct ModelApi {
pub ctx: Arc<AppContext>,
}
pub async fn get_ollama_models(
base_url: Query<Option<String>>,
) -> OpenApiResult<Json<Vec<String>>> {
let models = chat::get_ollama_models(base_url.0)
.await
.map_err(bad_request)?;
Ok(Json(models))
#[OpenApi]
impl ModelApi {
#[oai(path = "/model", method = "get")]
async fn get_model_preference(&self) -> OpenApiResult<Json<Option<String>>> {
let result = fs::get_model_preference(self.ctx.store.as_ref()).map_err(bad_request)?;
Ok(Json(result))
}
#[oai(path = "/model", method = "post")]
async fn set_model_preference(&self, payload: Json<ModelPayload>) -> OpenApiResult<Json<bool>> {
fs::set_model_preference(payload.0.model, self.ctx.store.as_ref()).map_err(bad_request)?;
Ok(Json(true))
}
#[oai(path = "/ollama/models", method = "get")]
async fn get_ollama_models(
&self,
base_url: Query<Option<String>>,
) -> OpenApiResult<Json<Vec<String>>> {
let models = chat::get_ollama_models(base_url.0)
.await
.map_err(bad_request)?;
Ok(Json(models))
}
}