Skip to content

Commit 30ee31b

Browse files
committed
fix(core): backfill OpenAI responses model for streaming responses
1 parent 7e3614c commit 30ee31b

File tree

2 files changed

+89
-0
lines changed

2 files changed

+89
-0
lines changed

packages/core/src/tracing/openai/streaming.ts

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,14 @@
11
import { captureException } from '../../exports';
22
import { SPAN_STATUS_ERROR } from '../../tracing';
33
import type { Span } from '../../types-hoist/span';
4+
import { updateSpanName } from '../../utils/spanUtils';
45
import {
6+
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
57
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
68
GEN_AI_RESPONSE_STREAMING_ATTRIBUTE,
79
GEN_AI_RESPONSE_TEXT_ATTRIBUTE,
810
GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE,
11+
OPENAI_OPERATIONS,
912
} from '../ai/gen-ai-attributes';
1013
import { RESPONSE_EVENT_TYPES } from './constants';
1114
import type {
@@ -26,6 +29,8 @@ import {
2629
* State object used to accumulate information from a stream of OpenAI events/chunks.
2730
*/
2831
interface StreamingState {
32+
/** Whether this stream contained Responses API events. */
33+
sawResponsesApiEvent: boolean;
2934
/** Types of events encountered in the stream. */
3035
eventTypes: string[];
3136
/** Collected response text fragments (for output recording). */
@@ -222,6 +227,7 @@ export async function* instrumentStream<T>(
222227
recordOutputs: boolean,
223228
): AsyncGenerator<T, void, unknown> {
224229
const state: StreamingState = {
230+
sawResponsesApiEvent: false,
225231
eventTypes: [],
226232
responseTexts: [],
227233
finishReasons: [],
@@ -240,12 +246,19 @@ export async function* instrumentStream<T>(
240246
if (isChatCompletionChunk(event)) {
241247
processChatCompletionChunk(event as ChatCompletionChunk, state, recordOutputs);
242248
} else if (isResponsesApiStreamEvent(event)) {
249+
state.sawResponsesApiEvent = true;
243250
processResponsesApiEvent(event as ResponseStreamingEvent, state, recordOutputs, span);
244251
}
245252
yield event;
246253
}
247254
} finally {
248255
setCommonResponseAttributes(span, state.responseId, state.responseModel, state.responseTimestamp);
256+
if (state.sawResponsesApiEvent && state.responseModel) {
257+
span.setAttributes({
258+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: state.responseModel,
259+
});
260+
updateSpanName(span, `${OPENAI_OPERATIONS.CHAT} ${state.responseModel}`);
261+
}
249262
setTokenUsageAttributes(span, state.promptTokens, state.completionTokens, state.totalTokens);
250263

251264
span.setAttributes({
Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
import { describe, expect, it, vi } from 'vitest';
2+
import { instrumentStream } from '../../../src/tracing/openai/streaming';
3+
import type { ChatCompletionChunk, ResponseStreamingEvent } from '../../../src/tracing/openai/types';
4+
5+
async function collectStream<T>(stream: AsyncIterable<T>): Promise<T[]> {
6+
const events: T[] = [];
7+
for await (const event of stream) {
8+
events.push(event);
9+
}
10+
return events;
11+
}
12+
13+
describe('openai-streaming', () => {
14+
it('should backfill the request model and span name for streamed Responses API events', async () => {
15+
async function* createStream(): AsyncGenerator<ResponseStreamingEvent> {
16+
yield {
17+
type: 'response.completed',
18+
response: {
19+
object: 'response',
20+
id: 'resp_123',
21+
model: 'gpt-4.1-mini',
22+
created_at: 1704067200,
23+
status: 'completed',
24+
},
25+
} as ResponseStreamingEvent;
26+
}
27+
28+
const span = {
29+
setAttributes: vi.fn(),
30+
setStatus: vi.fn(),
31+
updateName: vi.fn(),
32+
end: vi.fn(),
33+
};
34+
35+
const events = await collectStream(
36+
instrumentStream(createStream(), span as unknown as Parameters<typeof instrumentStream>[1], false),
37+
);
38+
39+
expect(events).toHaveLength(1);
40+
expect(span.setAttributes).toHaveBeenCalledWith({
41+
'gen_ai.request.model': 'gpt-4.1-mini',
42+
});
43+
expect(span.updateName).toHaveBeenCalledWith('chat gpt-4.1-mini');
44+
expect(span.end).toHaveBeenCalled();
45+
});
46+
47+
it('should not backfill the request model or rename the span for chat completion streams', async () => {
48+
async function* createStream(): AsyncGenerator<ChatCompletionChunk> {
49+
yield {
50+
object: 'chat.completion.chunk',
51+
id: 'chatcmpl_123',
52+
created: 1704067200,
53+
model: 'gpt-4o-2024-08-06',
54+
choices: [],
55+
} as ChatCompletionChunk;
56+
}
57+
58+
const span = {
59+
setAttributes: vi.fn(),
60+
setStatus: vi.fn(),
61+
updateName: vi.fn(),
62+
end: vi.fn(),
63+
};
64+
65+
const events = await collectStream(
66+
instrumentStream(createStream(), span as unknown as Parameters<typeof instrumentStream>[1], false),
67+
);
68+
69+
expect(events).toHaveLength(1);
70+
expect(span.setAttributes).not.toHaveBeenCalledWith({
71+
'gen_ai.request.model': 'gpt-4o-2024-08-06',
72+
});
73+
expect(span.updateName).not.toHaveBeenCalledWith('chat gpt-4o-2024-08-06');
74+
expect(span.end).toHaveBeenCalled();
75+
});
76+
});

0 commit comments

Comments
 (0)