Coverage for providers / openai.py: 33%

36 statements  

« prev     ^ index     » next       coverage.py v7.13.5, created at 2026-03-29 02:55 +0800

1from openai import OpenAI 

2from qrclaw.providers.base import LLMProvider, LLMResponse, ToolCall 

3from qrclaw.config import OPENAI_API_KEY, OPENAI_MODEL, OPENAI_BASE_URL 

4from qrclaw.logger import get_logger 

5 

6logger = get_logger("qrclaw.providers.openai") 

7 

8 

9class OpenAIProvider(LLMProvider): 

10 

11 def __init__(self): 

12 self._client = OpenAI( 

13 api_key=OPENAI_API_KEY, 

14 base_url=OPENAI_BASE_URL or None, 

15 ) 

16 logger.info("OpenAI 渠道已初始化") 

17 

18 @staticmethod 

19 def _sanitize(messages: list[dict]) -> list[dict]: 

20 """过滤顶层 null 字段,避免 Gemini 兼容层报 400""" 

21 drop_if_null = {"refusal", "annotations", "audio", "function_call"} 

22 result = [] 

23 for msg in messages: 

24 cleaned = {k: v for k, v in msg.items() if not (k in drop_if_null and v is None)} 

25 if cleaned.get("content") is None: 

26 cleaned["content"] = "" 

27 result.append(cleaned) 

28 return result 

29 

30 def chat(self, messages: list[dict], tools: list[dict] | None = None) -> LLMResponse: 

31 kwargs = {"model": OPENAI_MODEL, "messages": self._sanitize(messages)} 

32 if tools: 

33 kwargs["tools"] = tools 

34 

35 response = self._client.chat.completions.create(**kwargs) 

36 usage = response.usage 

37 choice = response.choices[0] 

38 message = choice.message 

39 

40 tool_calls = [] 

41 if message.tool_calls: 

42 for tc in message.tool_calls: 

43 tool_calls.append(ToolCall( 

44 id=tc.id, 

45 name=tc.function.name, 

46 arguments=tc.function.arguments, 

47 )) 

48 

49 finish_reason = choice.finish_reason 

50 if tool_calls and finish_reason not in ("tool_calls", "stop"): 

51 finish_reason = "tool_calls" 

52 

53 logger.info(f"LLM 响应成功,使用 {usage.total_tokens} tokens") 

54 return LLMResponse( 

55 content=message.content or "", 

56 tool_calls=tool_calls, 

57 finish_reason=finish_reason, 

58 prompt_tokens=usage.prompt_tokens, 

59 completion_tokens=usage.completion_tokens, 

60 total_tokens=usage.total_tokens, 

61 raw=response, 

62 )