Files
storkit/server/src/http/model.rs

42 lines
1.2 KiB
Rust
Raw Normal View History

2026-02-16 16:24:21 +00:00
use crate::http::context::{AppContext, OpenApiResult, bad_request};
use crate::io::fs;
use crate::llm::chat;
2026-02-16 16:35:25 +00:00
use poem_openapi::{Object, OpenApi, param::Query, payload::Json};
use serde::Deserialize;
use std::sync::Arc;
2026-02-16 16:24:21 +00:00
2026-02-16 16:35:25 +00:00
#[derive(Deserialize, Object)]
struct ModelPayload {
model: String,
2026-02-16 16:24:21 +00:00
}
2026-02-16 16:35:25 +00:00
pub struct ModelApi {
pub ctx: Arc<AppContext>,
2026-02-16 16:24:21 +00:00
}
2026-02-16 16:35:25 +00:00
#[OpenApi]
impl ModelApi {
#[oai(path = "/model", method = "get")]
async fn get_model_preference(&self) -> OpenApiResult<Json<Option<String>>> {
let result = fs::get_model_preference(self.ctx.store.as_ref()).map_err(bad_request)?;
Ok(Json(result))
}
#[oai(path = "/model", method = "post")]
async fn set_model_preference(&self, payload: Json<ModelPayload>) -> OpenApiResult<Json<bool>> {
fs::set_model_preference(payload.0.model, self.ctx.store.as_ref()).map_err(bad_request)?;
Ok(Json(true))
}
#[oai(path = "/ollama/models", method = "get")]
async fn get_ollama_models(
&self,
base_url: Query<Option<String>>,
) -> OpenApiResult<Json<Vec<String>>> {
let models = chat::get_ollama_models(base_url.0)
.await
.map_err(bad_request)?;
Ok(Json(models))
}
2026-02-16 16:24:21 +00:00
}