diff --git a/src/openai/lib/_parsing/_responses.py b/src/openai/lib/_parsing/_responses.py index 8853a0749f..78ee10db2f 100644 --- a/src/openai/lib/_parsing/_responses.py +++ b/src/openai/lib/_parsing/_responses.py @@ -1,7 +1,7 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING, List, Iterable, cast +from typing import TYPE_CHECKING, List, Iterable, Optional, cast from typing_extensions import TypeVar, assert_never import pydantic @@ -138,10 +138,13 @@ def parse_response( ) -def parse_text(text: str, text_format: type[TextFormatT] | Omit) -> TextFormatT | None: +def parse_text(text: Optional[str], text_format: type[TextFormatT] | Omit) -> TextFormatT | None: if not is_given(text_format): return None + if text is None: + return None + if is_basemodel_type(text_format): return cast(TextFormatT, model_parse_json(text_format, text)) diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index ada0783bce..e32a017e85 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -315,7 +315,7 @@ def output_text(self) -> str: for output in self.output: if output.type == "message": for content in output.content: - if content.type == "output_text": + if content.type == "output_text" and content.text is not None: texts.append(content.text) return "".join(texts) diff --git a/src/openai/types/responses/response_output_text.py b/src/openai/types/responses/response_output_text.py index 2386fcb3c0..cc739e450c 100644 --- a/src/openai/types/responses/response_output_text.py +++ b/src/openai/types/responses/response_output_text.py @@ -122,7 +122,7 @@ class ResponseOutputText(BaseModel): annotations: List[Annotation] """The annotations of the text output.""" - text: str + text: Optional[str] """The text output from the model.""" type: Literal["output_text"] diff --git a/tests/lib/responses/test_responses.py b/tests/lib/responses/test_responses.py index 8e5f16df95..c83f2fd6dd 100644 --- a/tests/lib/responses/test_responses.py +++ b/tests/lib/responses/test_responses.py @@ -4,6 +4,7 @@ import pytest from respx import MockRouter +from pydantic import BaseModel from inline_snapshot import snapshot from openai import OpenAI, AsyncOpenAI @@ -41,6 +42,47 @@ def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None: ) +@pytest.mark.respx(base_url=base_url) +def test_output_text_ignores_null_text_items(client: OpenAI, respx_mock: MockRouter) -> None: + response = make_snapshot_request( + lambda c: c.responses.create( + model="gpt-4o-mini", + input="hi", + ), + content_snapshot=snapshot( + '{"id": "resp_null_output_text", "object": "response", "created_at": 1754925861, "status": "completed", "background": false, "error": null, "incomplete_details": null, "instructions": null, "max_output_tokens": null, "max_tool_calls": null, "model": "gpt-4o-mini-2024-07-18", "output": [{"id": "msg_null_output_text", "type": "message", "status": "completed", "content": [{"type": "output_text", "annotations": [], "logprobs": [], "text": null}, {"type": "output_text", "annotations": [], "logprobs": [], "text": "hello"}], "role": "assistant"}], "parallel_tool_calls": true, "previous_response_id": null, "prompt_cache_key": null, "reasoning": {"effort": null, "summary": null}, "safety_identifier": null, "service_tier": "default", "store": true, "temperature": 1.0, "text": {"format": {"type": "text"}, "verbosity": "medium"}, "tool_choice": "auto", "tools": [], "top_logprobs": 0, "top_p": 1.0, "truncation": "disabled", "usage": {"input_tokens": 1, "input_tokens_details": {"cached_tokens": 0}, "output_tokens": 1, "output_tokens_details": {"reasoning_tokens": 0}, "total_tokens": 2}, "user": null, "metadata": {}}' + ), + path="/responses", + mock_client=client, + respx_mock=respx_mock, + ) + + assert response.output_text == "hello" + + +@pytest.mark.respx(base_url=base_url) +def test_parse_ignores_null_output_text_items(client: OpenAI, respx_mock: MockRouter) -> None: + class Payload(BaseModel): + message: str + + response = make_snapshot_request( + lambda c: c.responses.parse( + model="gpt-4o-mini", + input="hi", + text_format=Payload, + ), + content_snapshot=snapshot( + '{"id": "resp_null_output_text_parse", "object": "response", "created_at": 1754925861, "status": "completed", "background": false, "error": null, "incomplete_details": null, "instructions": null, "max_output_tokens": null, "max_tool_calls": null, "model": "gpt-4o-mini-2024-07-18", "output": [{"id": "msg_null_output_text_parse", "type": "message", "status": "completed", "content": [{"type": "output_text", "annotations": [], "logprobs": [], "text": null}, {"type": "output_text", "annotations": [], "logprobs": [], "text": "{\\"message\\":\\"hello\\"}"}], "role": "assistant"}], "parallel_tool_calls": true, "previous_response_id": null, "prompt_cache_key": null, "reasoning": {"effort": null, "summary": null}, "safety_identifier": null, "service_tier": "default", "store": true, "temperature": 1.0, "text": {"format": {"type": "text"}, "verbosity": "medium"}, "tool_choice": "auto", "tools": [], "top_logprobs": 0, "top_p": 1.0, "truncation": "disabled", "usage": {"input_tokens": 1, "input_tokens_details": {"cached_tokens": 0}, "output_tokens": 1, "output_tokens_details": {"reasoning_tokens": 0}, "total_tokens": 2}, "user": null, "metadata": {}}' + ), + path="/responses", + mock_client=client, + respx_mock=respx_mock, + ) + + assert response.output[0].content[0].parsed is None + assert response.output[0].content[1].parsed == Payload(message="hello") + + @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None: checking_client: OpenAI | AsyncOpenAI = client if sync else async_client