-
Notifications
You must be signed in to change notification settings - Fork 1.6k
Expand file tree
/
Copy pathmultimodal.text.with.voice.html
More file actions
197 lines (163 loc) · 8.36 KB
/
multimodal.text.with.voice.html
File metadata and controls
197 lines (163 loc) · 8.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
<!doctype html>
<html lang="en-US">
<head>
<link href="/assets/index.css" rel="stylesheet" type="text/css" />
<script crossorigin="anonymous" src="https://unpkg.com/@babel/standalone@7.8.7/babel.min.js"></script>
<script crossorigin="anonymous" src="https://unpkg.com/react@16.8.6/umd/react.production.min.js"></script>
<script crossorigin="anonymous" src="https://unpkg.com/react-dom@16.8.6/umd/react-dom.production.min.js"></script>
<script crossorigin="anonymous" src="/test-harness.js"></script>
<script crossorigin="anonymous" src="/test-page-object.js"></script>
<script crossorigin="anonymous" src="/__dist__/webchat-es5.js"></script>
<script crossorigin="anonymous" src="/__dist__/botframework-webchat-fluent-theme.production.min.js"></script>
</head>
<body>
<main id="webchat"></main>
<!--
Test: Multi-modal experience — text and voice coexist in the same send box.
Verifies the realistic interleaving:
1. Server announces audio capability + consumer opts into voice mode (`enableVoiceMode`).
2. Text turn: user types → bot replies as text. Both ride the WebSocket fire-and-forget,
saga renders user message optimistically, bot text arrives as a normal incoming activity.
3. Voice turn: user clicks mic → user speaks → bot replies via media.end transcript.
While recording, the text input is read-only and the send button is disabled.
4. Mic toggled off → text turn again (user types → bot replies as text).
5. Snapshot captures the full mixed transcript.
-->
<script type="module">
import { setupMockMediaDevices } from '/assets/esm/speechToSpeech/mockMediaDevices.js';
import { setupMockAudioPlayback } from '/assets/esm/speechToSpeech/mockAudioPlayback.js';
setupMockMediaDevices();
setupMockAudioPlayback();
</script>
<script type="text/babel">
run(async function () {
const {
React,
ReactDOM: { render },
WebChat: { FluentThemeProvider, ReactWebChat, testIds }
} = window;
const { directLine, store } = testHelpers.createDirectLineEmulator();
// Mirror real DirectLine when `enableVoiceMode` is true: server announces audio,
// and outgoing traffic flows over the WebSocket without echo back.
directLine.setCapability('getVoiceConfiguration', { sampleRate: 24000, chunkIntervalMs: 100 }, { emitEvent: false });
directLine.setCapability('getIsVoiceModeEnabled', true, { emitEvent: false });
// Capture outgoing activities to assert WebSocket-style fire-and-forget delivery.
const outgoingActivities = [];
const originalPostActivity = directLine.postActivity.bind(directLine);
directLine.postActivity = activity => {
outgoingActivities.push(activity);
return originalPostActivity(activity);
};
render(
<FluentThemeProvider variant="fluent">
<ReactWebChat directLine={directLine} store={store} />
</FluentThemeProvider>,
document.getElementById('webchat')
);
await pageConditions.uiConnected();
const micButton = document.querySelector(`[data-testid="${testIds.sendBoxMicrophoneButton}"]`);
const sendButton = document.querySelector(`[data-testid="${testIds.sendBoxSendButton}"]`);
const textArea = document.querySelector(`[data-testid="${testIds.sendBoxTextBox}"]`);
const isSendDisabled = () => sendButton.getAttribute('aria-disabled') === 'true';
// GIVEN: Multi-modal idle — mic, send button and a writable text box all coexist.
expect(micButton).toBeTruthy();
expect(sendButton).toBeTruthy();
expect(isSendDisabled()).toBe(false);
expect(textArea.hasAttribute('readonly')).toBe(false);
// ===== TURN 1: Text in → Text out =====
await pageObjects.sendMessageViaSendBox('What is the weather today?', { waitForSend: false });
await pageConditions.became(
'Outgoing text activity captured',
() => outgoingActivities.some(a => a.type === 'message' && a.text === 'What is the weather today?'),
1000
);
await pageConditions.numActivitiesShown(1);
await directLine.emulateIncomingActivity('The weather today is sunny with a high of 75 degrees.');
await pageConditions.numActivitiesShown(2);
// ===== TURN 2: Voice in → Voice out =====
await host.click(micButton);
await pageConditions.became(
'Recording started',
() => micButton.getAttribute('aria-label')?.includes('Microphone on'),
2000
);
// While recording, text path is locked down.
await pageConditions.became(
'Recording active disables text path',
() => isSendDisabled() && textArea.hasAttribute('readonly'),
2000
);
// User speech is identified, processed, then transcript arrives.
await directLine.emulateIncomingVoiceActivity({
type: 'event',
name: 'request.update',
from: { role: 'bot' },
value: { state: 'detected', message: 'Your request is identified' },
valueType: 'application/vnd.microsoft.activity.azure.directline.audio.state'
});
await directLine.emulateIncomingVoiceActivity({
type: 'event',
name: 'request.update',
from: { role: 'bot' },
value: { state: 'processing', message: 'Your request is being processed' },
valueType: 'application/vnd.microsoft.activity.azure.directline.audio.state'
});
await directLine.emulateIncomingVoiceActivity({
type: 'event',
name: 'media.end',
value: { transcription: 'Will it rain tomorrow?', origin: 'user' },
valueType: 'application/vnd.microsoft.activity.azure.directline.audio.transcript'
});
await pageConditions.numActivitiesShown(3);
// Bot replies as voice (audio chunk + transcript).
await directLine.emulateIncomingVoiceActivity({
type: 'event',
name: 'media.chunk',
from: { role: 'bot' },
value: { content: 'AAAAAA==', contentType: 'audio/webm' },
valueType: 'application/vnd.microsoft.activity.azure.directline.audio.chunk'
});
await directLine.emulateIncomingVoiceActivity({
type: 'event',
name: 'media.end',
from: { role: 'bot' },
value: { transcription: 'No rain expected tomorrow.', origin: 'agent' },
valueType: 'application/vnd.microsoft.activity.azure.directline.audio.transcript'
});
await pageConditions.numActivitiesShown(4);
// Toggle mic off — back to idle text mode.
await host.click(micButton);
await pageConditions.became(
'Recording stopped',
() => micButton.getAttribute('aria-label')?.includes('Microphone off'),
2000
);
await pageConditions.became(
'Idle re-enables text path',
() => !isSendDisabled() && !textArea.hasAttribute('readonly'),
2000
);
// ===== TURN 3: Text in → Text out =====
await pageObjects.sendMessageViaSendBox('Thanks!', { waitForSend: false });
await pageConditions.became(
'Second outgoing text captured',
() => outgoingActivities.some(a => a.type === 'message' && a.text === 'Thanks!'),
1000
);
await pageConditions.numActivitiesShown(5);
await directLine.emulateIncomingActivity("You're welcome!");
await pageConditions.numActivitiesShown(6);
// ===== Verify final transcript order =====
const activities = pageElements.activityContents();
expect(activities[0]).toHaveProperty('textContent', 'What is the weather today?');
expect(activities[1]).toHaveProperty('textContent', 'The weather today is sunny with a high of 75 degrees.');
expect(activities[2]).toHaveProperty('textContent', 'Will it rain tomorrow?');
expect(activities[3]).toHaveProperty('textContent', 'No rain expected tomorrow.');
expect(activities[4]).toHaveProperty('textContent', 'Thanks!');
expect(activities[5]).toHaveProperty('textContent', "You're welcome!");
await pageConditions.scrollToBottomCompleted();
await host.snapshot('local');
});
</script>
</body>
</html>