Skip to content

Commit 9e632da

Browse files
committed
Parallelize chunk moderation and restrict connection indicator
Updated ai_moderator.py to process content chunks in parallel using ThreadPoolExecutor for improved performance. Modified base.html to display the real-time connection indicator only for authenticated users.
1 parent da8df91 commit 9e632da

2 files changed

Lines changed: 28 additions & 15 deletions

File tree

app/services/ai/ai_moderator.py

Lines changed: 25 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import json
22
import time
3+
from concurrent.futures import ThreadPoolExecutor, as_completed
34

45
import openai
56
import tiktoken
@@ -310,14 +311,19 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
310311
for i in range(0, content_chars, MAX_CHARS_PER_CHUNK):
311312
chunks.append(content[i:i + MAX_CHARS_PER_CHUNK])
312313

314+
# Process all chunks IN PARALLEL for maximum speed
313315
chunk_results = []
314-
for i, chunk in enumerate(chunks):
315-
result = self._analyze_with_custom_prompt(chunk, custom_prompt)
316-
chunk_results.append(result)
317-
318-
# Early exit if chunk is rejected
319-
if result['decision'] == 'rejected':
320-
break
316+
with ThreadPoolExecutor(max_workers=min(len(chunks), 10)) as executor:
317+
# Submit all chunks at once
318+
future_to_chunk = {
319+
executor.submit(self._analyze_with_custom_prompt, chunk, custom_prompt): i
320+
for i, chunk in enumerate(chunks)
321+
}
322+
323+
# Collect results as they complete
324+
for future in as_completed(future_to_chunk):
325+
result = future.result()
326+
chunk_results.append(result)
321327

322328
return self._combine_chunk_results(chunk_results, len(content))
323329
else:
@@ -340,14 +346,19 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
340346
# Split content and analyze each chunk
341347
chunks = self.split_text_into_chunks(content, max_content_tokens)
342348

349+
# Process all chunks IN PARALLEL for maximum speed
343350
chunk_results = []
344-
for i, chunk in enumerate(chunks):
345-
result = self._run_enhanced_default_moderation(chunk)
346-
chunk_results.append(result)
347-
348-
# Early exit if chunk is rejected (for efficiency)
349-
if result['decision'] == 'rejected':
350-
break
351+
with ThreadPoolExecutor(max_workers=min(len(chunks), 10)) as executor:
352+
# Submit all chunks at once
353+
future_to_chunk = {
354+
executor.submit(self._run_enhanced_default_moderation, chunk): i
355+
for i, chunk in enumerate(chunks)
356+
}
357+
358+
# Collect results as they complete
359+
for future in as_completed(future_to_chunk):
360+
result = future.result()
361+
chunk_results.append(result)
351362

352363
return self._combine_chunk_results(chunk_results, len(content))
353364

app/templates/base.html

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,12 +40,14 @@
4040
<link href="{{ url_for('static', filename='css/utilities/responsive.css') }}" rel="stylesheet">
4141
</head>
4242
<body>
43-
<!-- Real-time connection indicator -->
43+
<!-- Real-time connection indicator (only for authenticated users) -->
44+
{% if current_user.is_authenticated %}
4445
<div id="connection-status" class="real-time-indicator">
4546
<span class="badge bg-secondary" style="font-size: 0.75rem; padding: 0.25rem 0.5rem;">
4647
<i class="fas fa-circle"></i> <span class="connection-text">Connecting...</span>
4748
</span>
4849
</div>
50+
{% endif %}
4951

5052
<!-- Toast Notification Container -->
5153
<div aria-live="polite" aria-atomic="true" class="position-fixed top-0 end-0 p-3" style="z-index: 1080; min-width: 300px;">

0 commit comments

Comments
 (0)