-
Notifications
You must be signed in to change notification settings - Fork 1.6k
Expand file tree
/
Copy pathoutgoing.audio.interval.html
More file actions
141 lines (119 loc) · 5.53 KB
/
outgoing.audio.interval.html
File metadata and controls
141 lines (119 loc) · 5.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
<!doctype html>
<html lang="en-US">
<head>
<link href="/assets/index.css" rel="stylesheet" type="text/css" />
<script crossorigin="anonymous" src="https://unpkg.com/@babel/standalone@7.8.7/babel.min.js"></script>
<script crossorigin="anonymous" src="https://unpkg.com/react@16.8.6/umd/react.production.min.js"></script>
<script crossorigin="anonymous" src="https://unpkg.com/react-dom@16.8.6/umd/react-dom.production.min.js"></script>
<script crossorigin="anonymous" src="/test-harness.js"></script>
<script crossorigin="anonymous" src="/test-page-object.js"></script>
<script crossorigin="anonymous" src="/__dist__/webchat-es5.js"></script>
<script crossorigin="anonymous" src="/__dist__/botframework-webchat-fluent-theme.production.min.js"></script>
</head>
<body>
<main id="webchat"></main>
<!--
Test: Audio chunks outgoing flow
This test validates:
1. Mic on → state becomes "listening"
2. Audio chunks are sent via postActivity (fire-and-forget)
3. Verify chunk structure (type, name, value.content)
4. Verify ~100ms interval between chunk timestamps
Note: Since voice activities use postVoiceActivity (fire-and-forget),
we intercept postActivity to capture outgoing chunks for verification.
-->
<script type="module">
import { setupMockMediaDevices } from '/assets/esm/speechToSpeech/mockMediaDevices.js';
import { setupMockAudioPlayback } from '/assets/esm/speechToSpeech/mockAudioPlayback.js';
setupMockMediaDevices();
setupMockAudioPlayback();
</script>
<script type="text/babel">
run(async function () {
const {
React,
ReactDOM: { render },
WebChat: { FluentThemeProvider, ReactWebChat, testIds }
} = window;
const { directLine, store } = testHelpers.createDirectLineEmulator();
// Multi-modal experience: server announces audio, consumer opted into voice mode.
directLine.setCapability('getVoiceConfiguration', { sampleRate: 24000, chunkIntervalMs: 100 }, { emitEvent: false });
directLine.setCapability('getIsVoiceModeEnabled', true, { emitEvent: false });
// Intercept postActivity to capture outgoing voice chunks
const capturedChunks = [];
const originalPostActivity = directLine.postActivity.bind(directLine);
directLine.postActivity = (activity) => {
if (activity.name === 'media.chunk' && activity.type === 'event') {
capturedChunks.push({
...activity,
capturedAt: Date.now()
});
}
return originalPostActivity(activity);
};
render(
<FluentThemeProvider variant="fluent">
<ReactWebChat
directLine={directLine}
store={store}
/>
</FluentThemeProvider>,
document.getElementById('webchat')
);
await pageConditions.uiConnected();
const micButton = document.querySelector(`[data-testid="${testIds.sendBoxMicrophoneButton}"]`);
const textArea = document.querySelector(`[data-testid="${testIds.sendBoxTextBox}"]`);
expect(micButton).toBeTruthy();
expect(textArea).toBeTruthy();
// ===== STEP 1: Start recording =====
await host.click(micButton);
await pageConditions.became(
'Recording started',
() => micButton.getAttribute('aria-label')?.includes('Microphone on'),
1000
);
// VERIFY: State is "listening"
await pageConditions.became(
'State: listening → Placeholder: "Listening..."',
() => textArea.getAttribute('placeholder') === 'Listening...',
1000
);
// ===== STEP 2: Wait for multiple chunks =====
// Default chunk interval is 100ms, wait for at least 5 chunks for better interval calculation
await pageConditions.became(
'Multiple audio chunks sent via postActivity',
() => capturedChunks.length >= 5,
2000
);
// ===== STEP 3: Stop recording =====
await host.click(micButton);
await pageConditions.became(
'Recording stopped',
() => micButton.getAttribute('aria-label')?.includes('Microphone off'),
1000
);
// ===== STEP 4: Verify captured chunks =====
expect(capturedChunks.length).toBeGreaterThanOrEqual(5);
// ===== STEP 5: Verify chunk structure =====
const sampleChunk = capturedChunks[0];
expect(sampleChunk.type).toBe('event');
expect(sampleChunk.name).toBe('media.chunk');
expect(sampleChunk.value).toBeTruthy();
expect(sampleChunk.value.content).toBeTruthy();
expect(sampleChunk.value.contentType).toBe('audio/webm');
// ===== STEP 6: Verify interval using capturedAt timestamps =====
const timestamps = capturedChunks.map(c => c.capturedAt).sort((a, b) => a - b);
// Calculate intervals between consecutive chunks
const intervals = [];
for (let i = 1; i < timestamps.length; i++) {
intervals.push(timestamps[i] - timestamps[i - 1]);
}
// Calculate average interval
const avgInterval = intervals.reduce((sum, i) => sum + i, 0) / intervals.length;
// Verify average interval is approximately 100ms (allow 50-150ms range for test stability)
expect(avgInterval).toBeGreaterThanOrEqual(50);
expect(avgInterval).toBeLessThanOrEqual(150);
});
</script>
</body>
</html>