Skip to content

Commit e46afd3

Browse files
committed
test: update output.usage -> output.generation.usage
Rebased and now the new tests need updating. Signed-off-by: Mark Sturdevant <mark.sturdevant@ibm.com>
1 parent f3c9d85 commit e46afd3

1 file changed

Lines changed: 16 additions & 4 deletions

File tree

test/cli/test_serve_streaming_tool_calls.py

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
"""
66

77
import json
8-
from unittest.mock import AsyncMock, Mock
8+
from unittest.mock import Mock
99

1010
import pytest
1111

@@ -328,7 +328,11 @@ class TestStreamingUsageField:
328328
async def test_usage_included_when_stream_options_set(self):
329329
"""Test that usage is included in final chunk when stream_options.include_usage=True."""
330330
output = ModelOutputThunk("Response")
331-
output.usage = {"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}
331+
output.generation.usage = {
332+
"prompt_tokens": 10,
333+
"completion_tokens": 5,
334+
"total_tokens": 15,
335+
}
332336

333337
chunks = []
334338
async for chunk_data in stream_chat_completion_chunks(
@@ -353,7 +357,11 @@ async def test_usage_included_when_stream_options_set(self):
353357
async def test_usage_excluded_when_stream_options_not_set(self):
354358
"""Test that usage is excluded when stream_options is None."""
355359
output = ModelOutputThunk("Response")
356-
output.usage = {"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}
360+
output.generation.usage = {
361+
"prompt_tokens": 10,
362+
"completion_tokens": 5,
363+
"total_tokens": 15,
364+
}
357365

358366
chunks = []
359367
async for chunk_data in stream_chat_completion_chunks(
@@ -375,7 +383,11 @@ async def test_usage_excluded_when_stream_options_not_set(self):
375383
async def test_usage_excluded_when_include_usage_false(self):
376384
"""Test that usage is excluded when stream_options.include_usage=False."""
377385
output = ModelOutputThunk("Response")
378-
output.usage = {"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}
386+
output.generation.usage = {
387+
"prompt_tokens": 10,
388+
"completion_tokens": 5,
389+
"total_tokens": 15,
390+
}
379391

380392
chunks = []
381393
async for chunk_data in stream_chat_completion_chunks(

0 commit comments

Comments
 (0)