feat(api): add reasoning_effort field to MessageRequest and OpenAI-compat path

Users of OpenAI-compatible reasoning models (o4-mini, o3, deepseek-r1,
etc.) had no way to control reasoning effort — the field was missing from
MessageRequest and never emitted in the request body.

Changes:
- Add `reasoning_effort: Option<String>` to `MessageRequest` in types.rs
  - Annotated with skip_serializing_if = "Option::is_none" for clean JSON
  - Accepted values: "low", "medium", "high" (passed through verbatim)
- In `build_chat_completion_request`, emit `"reasoning_effort"` when set
- Two unit tests:
  - `reasoning_effort_is_included_when_set`: o4-mini + "high" → field present
  - `reasoning_effort_omitted_when_not_set`: gpt-4o, no field → absent

Existing callers use `..Default::default()` and are unaffected.
One struct-literal test that listed all fields explicitly updated with
`reasoning_effort: None`.

The CLI flag to expose this to users is a follow-up (ROADMAP #34 partial).
This commit lands the foundational API-layer plumbing needed for that.

Partial ROADMAP #34.
This commit is contained in:
Jobdori
2026-04-09 04:02:59 +09:00
parent beb09df4b8
commit e4c3871882
2 changed files with 39 additions and 0 deletions

View File

@@ -801,6 +801,10 @@ fn build_chat_completion_request(request: &MessageRequest, config: OpenAiCompatC
payload["stop"] = json!(stop); payload["stop"] = json!(stop);
} }
} }
// reasoning_effort for OpenAI-compatible reasoning models (o4-mini, o3, etc.)
if let Some(effort) = &request.reasoning_effort {
payload["reasoning_effort"] = json!(effort);
}
payload payload
} }
@@ -1216,6 +1220,35 @@ mod tests {
); );
} }
#[test]
fn reasoning_effort_is_included_when_set() {
let payload = build_chat_completion_request(
&MessageRequest {
model: "o4-mini".to_string(),
max_tokens: 1024,
messages: vec![InputMessage::user_text("think hard")],
reasoning_effort: Some("high".to_string()),
..Default::default()
},
OpenAiCompatConfig::openai(),
);
assert_eq!(payload["reasoning_effort"], json!("high"));
}
#[test]
fn reasoning_effort_omitted_when_not_set() {
let payload = build_chat_completion_request(
&MessageRequest {
model: "gpt-4o".to_string(),
max_tokens: 64,
messages: vec![InputMessage::user_text("hello")],
..Default::default()
},
OpenAiCompatConfig::openai(),
);
assert!(payload.get("reasoning_effort").is_none());
}
#[test] #[test]
fn openai_streaming_requests_include_usage_opt_in() { fn openai_streaming_requests_include_usage_opt_in() {
let payload = build_chat_completion_request( let payload = build_chat_completion_request(
@@ -1333,6 +1366,7 @@ mod tests {
frequency_penalty: Some(0.5), frequency_penalty: Some(0.5),
presence_penalty: Some(0.3), presence_penalty: Some(0.3),
stop: Some(vec!["\n".to_string()]), stop: Some(vec!["\n".to_string()]),
reasoning_effort: None,
}; };
let payload = build_chat_completion_request(&request, OpenAiCompatConfig::openai()); let payload = build_chat_completion_request(&request, OpenAiCompatConfig::openai());
assert_eq!(payload["temperature"], 0.7); assert_eq!(payload["temperature"], 0.7);

View File

@@ -26,6 +26,11 @@ pub struct MessageRequest {
pub presence_penalty: Option<f64>, pub presence_penalty: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub stop: Option<Vec<String>>, pub stop: Option<Vec<String>>,
/// Reasoning effort level for OpenAI-compatible reasoning models (e.g. `o4-mini`).
/// Accepted values: `"low"`, `"medium"`, `"high"`. Omitted when `None`.
/// Silently ignored by backends that do not support it.
#[serde(skip_serializing_if = "Option::is_none")]
pub reasoning_effort: Option<String>,
} }
impl MessageRequest { impl MessageRequest {