Coverage for python / weflayr / sdk / openai / client.py: 100%

36 statements  

« prev     ^ index     » next       coverage.py v7.13.5, created at 2026-03-23 15:28 +0100

1"""Weflayr instrumented wrapper for the OpenAI SDK. 

2 

3Connector: **OpenAI** 

4Provider: https://openai.com 

5 

6Available connectors 

7-------------------- 

8- :class:`OpenAI` — synchronous client (drop-in for ``openai.OpenAI``) 

9- :class:`AsyncOpenAI` — async client (drop-in for ``openai.AsyncOpenAI``) 

10 

11Available methods 

12----------------- 

13Via ``client.chat.completions``: 

14 

15- :meth:`Completions.create` — synchronous chat completion with telemetry 

16- :meth:`AsyncCompletions.create` — async chat completion with telemetry 

17 

18Example:: 

19 

20 from weflayr.sdk.openai.client import OpenAI, AsyncOpenAI 

21 

22 # Sync 

23 client = OpenAI(api_key="sk-...") 

24 response = client.chat.completions.create( 

25 model="gpt-4o-mini", 

26 messages=[{"role": "user", "content": "Hello"}], 

27 ) 

28 

29 # Async 

30 async_client = AsyncOpenAI(api_key="sk-...") 

31 response = await async_client.chat.completions.create( 

32 model="gpt-4o-mini", 

33 messages=[{"role": "user", "content": "Hello"}], 

34 ) 

35""" 

36 

37from __future__ import annotations 

38 

39from typing import Any 

40 

41from openai import AsyncOpenAI as _AsyncOpenAI 

42from openai import OpenAI as _OpenAI 

43from openai.resources.chat.completions import AsyncCompletions as _AsyncCompletions 

44from openai.resources.chat.completions import Completions as _Completions 

45 

46from weflayr.sdk.helpers import CLIENT_ID, CLIENT_SECRET, INTAKE_URL, track_async, track_sync 

47 

48 

49def _usage(response) -> dict: 

50 """Extract token usage from an OpenAI chat completion response. 

51 

52 Args: 

53 response: A ``ChatCompletion`` returned by the OpenAI SDK. 

54 

55 Returns: 

56 A dict with ``prompt_tokens`` and ``completion_tokens`` (both ``int | None``). 

57 """ 

58 usage = getattr(response, "usage", None) 

59 return { 

60 "prompt_tokens": getattr(usage, "prompt_tokens", None), 

61 "completion_tokens": getattr(usage, "completion_tokens", None), 

62 } 

63 

64 

65class Completions(_Completions): 

66 """Instrumented synchronous completions client. 

67 

68 Args: 

69 client: The parent ``openai.OpenAI`` instance. 

70 intake_url: Weflayr intake API base URL. 

71 client_id: Client identifier sent in the endpoint path. 

72 bearer_token: Bearer token for the Authorization header. 

73 """ 

74 

75 def __init__( 

76 self, 

77 client: _OpenAI, 

78 *, 

79 intake_url: str = INTAKE_URL, 

80 client_id: str = CLIENT_ID, 

81 bearer_token: str = CLIENT_SECRET, 

82 ) -> None: 

83 super().__init__(client) 

84 self._intake_url = intake_url 

85 self._client_id = client_id 

86 self._bearer_token = bearer_token 

87 

88 def create(self, **kwargs: Any): 

89 """Send a synchronous chat completion request with telemetry. 

90 

91 Args: 

92 model (str): OpenAI model identifier (e.g. ``"gpt-4o-mini"``). 

93 messages (list[dict]): Conversation history in OpenAI message format. 

94 tags (dict, optional): Arbitrary key/value metadata. Stripped before the upstream call. 

95 **kwargs: Any other kwargs accepted by ``openai`` ``chat.completions.create()``. 

96 

97 Returns: 

98 ``ChatCompletion``: The upstream OpenAI response, unmodified. 

99 """ 

100 tags = kwargs.pop("tags", {}) 

101 return track_sync( 

102 url=self._intake_url, 

103 call="chat.completions.create", 

104 before={"model": kwargs.get("model"), "message_count": len(kwargs.get("messages", [])), "tags": tags}, 

105 fn=lambda: super(Completions, self).create(**kwargs), 

106 after_extra=_usage, 

107 client_id=self._client_id, 

108 bearer_token=self._bearer_token, 

109 ) 

110 

111 

112class AsyncCompletions(_AsyncCompletions): 

113 """Instrumented async completions client. 

114 

115 Args: 

116 client: The parent ``openai.AsyncOpenAI`` instance. 

117 intake_url: Weflayr intake API base URL. 

118 client_id: Client identifier sent in the endpoint path. 

119 bearer_token: Bearer token for the Authorization header. 

120 """ 

121 

122 def __init__( 

123 self, 

124 client: _AsyncOpenAI, 

125 *, 

126 intake_url: str = INTAKE_URL, 

127 client_id: str = CLIENT_ID, 

128 bearer_token: str = CLIENT_SECRET, 

129 ) -> None: 

130 super().__init__(client) 

131 self._intake_url = intake_url 

132 self._client_id = client_id 

133 self._bearer_token = bearer_token 

134 

135 async def create(self, **kwargs: Any): 

136 """Send an async chat completion request with telemetry. 

137 

138 Args: 

139 model (str): OpenAI model identifier (e.g. ``"gpt-4o-mini"``). 

140 messages (list[dict]): Conversation history in OpenAI message format. 

141 tags (dict, optional): Arbitrary key/value metadata. Stripped before the upstream call. 

142 **kwargs: Any other kwargs accepted by ``openai`` ``chat.completions.create()``. 

143 

144 Returns: 

145 ``ChatCompletion``: The upstream OpenAI response, unmodified. 

146 """ 

147 tags = kwargs.pop("tags", {}) 

148 return await track_async( 

149 url=self._intake_url, 

150 call="chat.completions.create_async", 

151 before={"model": kwargs.get("model"), "message_count": len(kwargs.get("messages", [])), "tags": tags}, 

152 fn=lambda: super(AsyncCompletions, self).create(**kwargs), 

153 after_extra=_usage, 

154 client_id=self._client_id, 

155 bearer_token=self._bearer_token, 

156 ) 

157 

158 

159class OpenAI(_OpenAI): 

160 """Drop-in replacement for ``openai.OpenAI`` with Weflayr telemetry. 

161 

162 Args: 

163 api_key (str): Your OpenAI API key. 

164 intake_url (str, optional): Weflayr intake API base URL. 

165 client_id (str, optional): Client identifier sent in the endpoint path. 

166 bearer_token (str, optional): Bearer token for the Authorization header. 

167 **kwargs: Forwarded unchanged to ``openai.OpenAI``. 

168 

169 Example:: 

170 

171 client = OpenAI(api_key="sk-...") 

172 client.chat.completions.create(model="gpt-4o-mini", messages=[...]) 

173 """ 

174 

175 def __init__( 

176 self, 

177 *args: Any, 

178 intake_url: str = INTAKE_URL, 

179 client_id: str = CLIENT_ID, 

180 bearer_token: str = CLIENT_SECRET, 

181 **kwargs: Any, 

182 ) -> None: 

183 super().__init__(*args, **kwargs) 

184 self.chat.completions = Completions( 

185 self, intake_url=intake_url, client_id=client_id, bearer_token=bearer_token 

186 ) 

187 

188 

189class AsyncOpenAI(_AsyncOpenAI): 

190 """Drop-in replacement for ``openai.AsyncOpenAI`` with Weflayr telemetry. 

191 

192 Args: 

193 api_key (str): Your OpenAI API key. 

194 intake_url (str, optional): Weflayr intake API base URL. 

195 client_id (str, optional): Client identifier sent in the endpoint path. 

196 bearer_token (str, optional): Bearer token for the Authorization header. 

197 **kwargs: Forwarded unchanged to ``openai.AsyncOpenAI``. 

198 

199 Example:: 

200 

201 client = AsyncOpenAI(api_key="sk-...") 

202 await client.chat.completions.create(model="gpt-4o-mini", messages=[...]) 

203 """ 

204 

205 def __init__( 

206 self, 

207 *args: Any, 

208 intake_url: str = INTAKE_URL, 

209 client_id: str = CLIENT_ID, 

210 bearer_token: str = CLIENT_SECRET, 

211 **kwargs: Any, 

212 ) -> None: 

213 super().__init__(*args, **kwargs) 

214 self.chat.completions = AsyncCompletions( 

215 self, intake_url=intake_url, client_id=client_id, bearer_token=bearer_token 

216 )