mirror of
https://github.com/instructkr/claw-code.git
synced 2026-04-09 01:24:49 +08:00
fix(api): route DashScope models to dashscope config, not openai
ProviderClient::from_model_with_anthropic_auth was dispatching every
ProviderKind::OpenAi match to OpenAiCompatConfig::openai(), which reads
OPENAI_API_KEY and points at api.openai.com. But DashScope models
(qwen-plus, qwen/qwen3-coder, etc.) also return ProviderKind::OpenAi
from detect_provider_kind because DashScope speaks the OpenAI wire
format. The metadata layer correctly identifies them as needing
DASHSCOPE_API_KEY and the DashScope compatible-mode endpoint, but that
metadata was being ignored at dispatch time.
Result: users running `claw --model qwen-plus` with DASHSCOPE_API_KEY
set would get a "missing OPENAI_API_KEY" error instead of being routed
to DashScope.
Fix: consult providers::metadata_for_model in the OpenAi dispatch arm
and pick dashscope() vs openai() based on metadata.auth_env.
Adds a regression test asserting ProviderClient::from_model("qwen-plus")
builds with the DashScope base URL. Exposes a pub base_url() accessor
on OpenAiCompatClient so the test can verify the routing.
Authored by droid (Kimi K2.5 Turbo) via acpx, cleaned up by Jobdori
(removed unsafe blocks unnecessary under edition 2021, imported
ProviderClient from super, adopted EnvVarGuard pattern from
providers/mod.rs tests).
Co-Authored-By: Droid <noreply@factory.ai>
This commit is contained in:
@@ -31,9 +31,18 @@ impl ProviderClient {
|
||||
ProviderKind::Xai => Ok(Self::Xai(OpenAiCompatClient::from_env(
|
||||
OpenAiCompatConfig::xai(),
|
||||
)?)),
|
||||
ProviderKind::OpenAi => Ok(Self::OpenAi(OpenAiCompatClient::from_env(
|
||||
OpenAiCompatConfig::openai(),
|
||||
)?)),
|
||||
ProviderKind::OpenAi => {
|
||||
// DashScope models (qwen-*) also return ProviderKind::OpenAi because they
|
||||
// speak the OpenAI wire format, but they need the DashScope config which
|
||||
// reads DASHSCOPE_API_KEY and points at dashscope.aliyuncs.com.
|
||||
let config = match providers::metadata_for_model(&resolved_model) {
|
||||
Some(meta) if meta.auth_env == "DASHSCOPE_API_KEY" => {
|
||||
OpenAiCompatConfig::dashscope()
|
||||
}
|
||||
_ => OpenAiCompatConfig::openai(),
|
||||
};
|
||||
Ok(Self::OpenAi(OpenAiCompatClient::from_env(config)?))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,8 +144,21 @@ pub fn read_xai_base_url() -> String {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
|
||||
use super::ProviderClient;
|
||||
use crate::providers::{detect_provider_kind, resolve_model_alias, ProviderKind};
|
||||
|
||||
/// Serializes every test in this module that mutates process-wide
|
||||
/// environment variables so concurrent test threads cannot observe
|
||||
/// each other's partially-applied state.
|
||||
fn env_lock() -> std::sync::MutexGuard<'static, ()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(|| Mutex::new(()))
|
||||
.lock()
|
||||
.unwrap_or_else(std::sync::PoisonError::into_inner)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolves_existing_and_grok_aliases() {
|
||||
assert_eq!(resolve_model_alias("opus"), "claude-opus-4-6");
|
||||
@@ -152,4 +174,68 @@ mod tests {
|
||||
ProviderKind::Anthropic
|
||||
);
|
||||
}
|
||||
|
||||
/// Snapshot-restore guard for a single environment variable. Mirrors
|
||||
/// the pattern used in `providers/mod.rs` tests: captures the original
|
||||
/// value on construction, applies the override, and restores on drop so
|
||||
/// tests leave the process env untouched even when they panic.
|
||||
struct EnvVarGuard {
|
||||
key: &'static str,
|
||||
original: Option<std::ffi::OsString>,
|
||||
}
|
||||
|
||||
impl EnvVarGuard {
|
||||
fn set(key: &'static str, value: Option<&str>) -> Self {
|
||||
let original = std::env::var_os(key);
|
||||
match value {
|
||||
Some(value) => std::env::set_var(key, value),
|
||||
None => std::env::remove_var(key),
|
||||
}
|
||||
Self { key, original }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for EnvVarGuard {
|
||||
fn drop(&mut self) {
|
||||
match self.original.take() {
|
||||
Some(value) => std::env::set_var(self.key, value),
|
||||
None => std::env::remove_var(self.key),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dashscope_model_uses_dashscope_config_not_openai() {
|
||||
// Regression: qwen-plus was being routed to OpenAiCompatConfig::openai()
|
||||
// which reads OPENAI_API_KEY and points at api.openai.com, when it should
|
||||
// use OpenAiCompatConfig::dashscope() which reads DASHSCOPE_API_KEY and
|
||||
// points at dashscope.aliyuncs.com.
|
||||
let _lock = env_lock();
|
||||
let _dashscope = EnvVarGuard::set("DASHSCOPE_API_KEY", Some("test-dashscope-key"));
|
||||
let _openai = EnvVarGuard::set("OPENAI_API_KEY", None);
|
||||
|
||||
let client = ProviderClient::from_model("qwen-plus");
|
||||
|
||||
// Must succeed (not fail with "missing OPENAI_API_KEY")
|
||||
assert!(
|
||||
client.is_ok(),
|
||||
"qwen-plus with DASHSCOPE_API_KEY set should build successfully, got: {:?}",
|
||||
client.err()
|
||||
);
|
||||
|
||||
// Verify it's the OpenAi variant pointed at the DashScope base URL.
|
||||
match client.unwrap() {
|
||||
ProviderClient::OpenAi(openai_client) => {
|
||||
assert!(
|
||||
openai_client.base_url().contains("dashscope.aliyuncs.com"),
|
||||
"qwen-plus should route to DashScope base URL (contains 'dashscope.aliyuncs.com'), got: {}",
|
||||
openai_client.base_url()
|
||||
);
|
||||
}
|
||||
other => panic!(
|
||||
"Expected ProviderClient::OpenAi for qwen-plus, got: {:?}",
|
||||
other
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,6 +98,11 @@ impl OpenAiCompatClient {
|
||||
const fn config(&self) -> OpenAiCompatConfig {
|
||||
self.config
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn base_url(&self) -> &str {
|
||||
&self.base_url
|
||||
}
|
||||
#[must_use]
|
||||
pub fn new(api_key: impl Into<String>, config: OpenAiCompatConfig) -> Self {
|
||||
Self {
|
||||
|
||||
Reference in New Issue
Block a user