Skip to content

Commit 6e078bf

Browse files
authored
Fix Gemini API content filter handling (#746)
## Summary - avoid AttributeError when Gemini API returns `None` for chat message - return empty output if message is filtered - add regression test ## Testing - `make format` - `make lint` - `make mypy` - `make tests` Towards #744
1 parent a96108e commit 6e078bf

File tree

2 files changed

+51
-5
lines changed

2 files changed

+51
-5
lines changed

src/agents/models/openai_chatcompletions.py

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -71,12 +71,22 @@ async def get_response(
7171
stream=False,
7272
)
7373

74+
first_choice = response.choices[0]
75+
message = first_choice.message
76+
7477
if _debug.DONT_LOG_MODEL_DATA:
7578
logger.debug("Received model response")
7679
else:
77-
logger.debug(
78-
f"LLM resp:\n{json.dumps(response.choices[0].message.model_dump(), indent=2)}\n"
79-
)
80+
if message is not None:
81+
logger.debug(
82+
"LLM resp:\n%s\n",
83+
json.dumps(message.model_dump(), indent=2),
84+
)
85+
else:
86+
logger.debug(
87+
"LLM resp had no message. finish_reason: %s",
88+
first_choice.finish_reason,
89+
)
8090

8191
usage = (
8292
Usage(
@@ -101,13 +111,15 @@ async def get_response(
101111
else Usage()
102112
)
103113
if tracing.include_data():
104-
span_generation.span_data.output = [response.choices[0].message.model_dump()]
114+
span_generation.span_data.output = (
115+
[message.model_dump()] if message is not None else []
116+
)
105117
span_generation.span_data.usage = {
106118
"input_tokens": usage.input_tokens,
107119
"output_tokens": usage.output_tokens,
108120
}
109121

110-
items = Converter.message_to_output_items(response.choices[0].message)
122+
items = Converter.message_to_output_items(message) if message is not None else []
111123

112124
return ModelResponse(
113125
output=items,

tests/test_openai_chatcompletions.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,6 +191,40 @@ async def patched_fetch_response(self, *args, **kwargs):
191191
assert fn_call_item.arguments == "{'x':1}"
192192

193193

194+
@pytest.mark.allow_call_model_methods
195+
@pytest.mark.asyncio
196+
async def test_get_response_with_no_message(monkeypatch) -> None:
197+
"""If the model returns no message, get_response should return an empty output."""
198+
msg = ChatCompletionMessage(role="assistant", content="ignored")
199+
choice = Choice(index=0, finish_reason="content_filter", message=msg)
200+
choice.message = None # type: ignore[assignment]
201+
chat = ChatCompletion(
202+
id="resp-id",
203+
created=0,
204+
model="fake",
205+
object="chat.completion",
206+
choices=[choice],
207+
usage=None,
208+
)
209+
210+
async def patched_fetch_response(self, *args, **kwargs):
211+
return chat
212+
213+
monkeypatch.setattr(OpenAIChatCompletionsModel, "_fetch_response", patched_fetch_response)
214+
model = OpenAIProvider(use_responses=False).get_model("gpt-4")
215+
resp: ModelResponse = await model.get_response(
216+
system_instructions=None,
217+
input="",
218+
model_settings=ModelSettings(),
219+
tools=[],
220+
output_schema=None,
221+
handoffs=[],
222+
tracing=ModelTracing.DISABLED,
223+
previous_response_id=None,
224+
)
225+
assert resp.output == []
226+
227+
194228
@pytest.mark.asyncio
195229
async def test_fetch_response_non_stream(monkeypatch) -> None:
196230
"""

0 commit comments

Comments
 (0)