@@ -4889,6 +4889,135 @@ [new ChatMessage(ChatRole.User, "test")],
48894889 } ) ;
48904890 }
48914891
4892+ [ Fact ]
4893+ public async Task StreamingErrorUpdate_DocumentedFormat_ParsesCorrectly ( )
4894+ {
4895+ const string Input = """
4896+ {
4897+ "model":"gpt-4o-mini",
4898+ "input":[{"type":"message","role":"user","content":[{"type":"input_text","text":"test"}]}],
4899+ "stream":true
4900+ }
4901+ """ ;
4902+
4903+ const string Output = """
4904+ event: response.created
4905+ data: {"type":"response.created","sequence_number":0,"response":{"id":"resp_001","object":"response","created_at":1741892091,"status":"in_progress","model":"gpt-4o-mini","output":[]}}
4906+
4907+ event: error
4908+ data: {"type":"error","sequence_number":1,"message":"Rate limit exceeded","code":"rate_limit_exceeded","param":"requests"}
4909+
4910+ event: response.failed
4911+ data: {"type":"response.failed","sequence_number":2,"response":{"id":"resp_001","object":"response","created_at":1741892091,"status":"failed","model":"gpt-4o-mini","output":[],"error":{"code":"rate_limit_exceeded","message":"Rate limit exceeded"}}}
4912+
4913+
4914+ """ ;
4915+
4916+ using VerbatimHttpHandler handler = new ( Input , Output ) ;
4917+ using HttpClient httpClient = new ( handler ) ;
4918+ using IChatClient client = CreateResponseClient ( httpClient , "gpt-4o-mini" ) ;
4919+
4920+ List < ChatResponseUpdate > updates = [ ] ;
4921+ await foreach ( var update in client . GetStreamingResponseAsync ( "test" ) )
4922+ {
4923+ updates . Add ( update ) ;
4924+ }
4925+
4926+ var errorUpdate = updates . FirstOrDefault ( u => u . Contents . Any ( c => c is ErrorContent ) ) ;
4927+ Assert . NotNull ( errorUpdate ) ;
4928+
4929+ var errorContent = errorUpdate . Contents . OfType < ErrorContent > ( ) . First ( ) ;
4930+ Assert . Equal ( "Rate limit exceeded" , errorContent . Message ) ;
4931+ Assert . Equal ( "rate_limit_exceeded" , errorContent . ErrorCode ) ;
4932+ Assert . Equal ( "requests" , errorContent . Details ) ;
4933+ }
4934+
4935+ [ Fact ]
4936+ public async Task StreamingErrorUpdate_ActualErroneousFormat_ParsesCorrectly ( )
4937+ {
4938+ const string Input = """
4939+ {
4940+ "model":"gpt-4o-mini",
4941+ "input":[{"type":"message","role":"user","content":[{"type":"input_text","text":"test"}]}],
4942+ "stream":true
4943+ }
4944+ """ ;
4945+
4946+ const string Output = """
4947+ event: response.created
4948+ data: {"type":"response.created","sequence_number":0,"response":{"id":"resp_002","object":"response","created_at":1741892091,"status":"in_progress","model":"gpt-4o-mini","output":[]}}
4949+
4950+ event: error
4951+ data: {"type":"error","sequence_number":1,"error":{"message":"Content filter triggered","code":"content_filter","param":"safety"}}
4952+
4953+ event: response.failed
4954+ data: {"type":"response.failed","sequence_number":2,"response":{"id":"resp_002","object":"response","created_at":1741892091,"status":"failed","model":"gpt-4o-mini","output":[],"error":{"code":"content_filter","message":"Content filter triggered"}}}
4955+
4956+
4957+ """ ;
4958+
4959+ using VerbatimHttpHandler handler = new ( Input , Output ) ;
4960+ using HttpClient httpClient = new ( handler ) ;
4961+ using IChatClient client = CreateResponseClient ( httpClient , "gpt-4o-mini" ) ;
4962+
4963+ List < ChatResponseUpdate > updates = [ ] ;
4964+ await foreach ( var update in client . GetStreamingResponseAsync ( "test" ) )
4965+ {
4966+ updates . Add ( update ) ;
4967+ }
4968+
4969+ var errorUpdate = updates . FirstOrDefault ( u => u . Contents . Any ( c => c is ErrorContent ) ) ;
4970+ Assert . NotNull ( errorUpdate ) ;
4971+
4972+ var errorContent = errorUpdate . Contents . OfType < ErrorContent > ( ) . First ( ) ;
4973+ Assert . Equal ( "Content filter triggered" , errorContent . Message ) ;
4974+ Assert . Equal ( "content_filter" , errorContent . ErrorCode ) ;
4975+ Assert . Equal ( "safety" , errorContent . Details ) ;
4976+ }
4977+
4978+ [ Fact ]
4979+ public async Task StreamingErrorUpdate_NoErrorInformation_HandlesGracefully ( )
4980+ {
4981+ const string Input = """
4982+ {
4983+ "model":"gpt-4o-mini",
4984+ "input":[{"type":"message","role":"user","content":[{"type":"input_text","text":"test"}]}],
4985+ "stream":true
4986+ }
4987+ """ ;
4988+
4989+ const string Output = """
4990+ event: response.created
4991+ data: {"type":"response.created","sequence_number":0,"response":{"id":"resp_003","object":"response","created_at":1741892091,"status":"in_progress","model":"gpt-4o-mini","output":[]}}
4992+
4993+ event: error
4994+ data: {"type":"error","sequence_number":1}
4995+
4996+ event: response.failed
4997+ data: {"type":"response.failed","sequence_number":2,"response":{"id":"resp_003","object":"response","created_at":1741892091,"status":"failed","model":"gpt-4o-mini","output":[]}}
4998+
4999+
5000+ """ ;
5001+
5002+ using VerbatimHttpHandler handler = new ( Input , Output ) ;
5003+ using HttpClient httpClient = new ( handler ) ;
5004+ using IChatClient client = CreateResponseClient ( httpClient , "gpt-4o-mini" ) ;
5005+
5006+ List < ChatResponseUpdate > updates = [ ] ;
5007+ await foreach ( var update in client . GetStreamingResponseAsync ( "test" ) )
5008+ {
5009+ updates . Add ( update ) ;
5010+ }
5011+
5012+ var errorUpdate = updates . FirstOrDefault ( u => u . Contents . Any ( c => c is ErrorContent ) ) ;
5013+ Assert . NotNull ( errorUpdate ) ;
5014+
5015+ var errorContent = errorUpdate . Contents . OfType < ErrorContent > ( ) . First ( ) ;
5016+ Assert . True ( string . IsNullOrEmpty ( errorContent . Message ) ) ;
5017+ Assert . True ( string . IsNullOrEmpty ( errorContent . ErrorCode ) ) ;
5018+ Assert . True ( string . IsNullOrEmpty ( errorContent . Details ) ) ;
5019+ }
5020+
48925021 [ Fact ]
48935022 public async Task StreamingResponseWithAnnotations_HandlesCorrectly ( )
48945023 {
0 commit comments