Skip to content

Commit 7e4ca4e

Browse files
committed
Reduce ThreadPoolExecutor max_workers to 100
Lowered the maximum number of workers in ThreadPoolExecutor from 200 to 100 in ai_moderator.py and rule_processor.py to optimize resource usage and prevent potential overload during parallel processing.
1 parent 1d82dae commit 7e4ca4e

2 files changed

Lines changed: 3 additions & 6 deletions

File tree

app/services/ai/ai_moderator.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -325,9 +325,8 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
325325
chunks.append(content[i:i + MAX_CHARS_PER_CHUNK])
326326

327327
# Process all chunks IN PARALLEL for maximum speed
328-
# Increased from 10 -> 50 -> 200 workers for OpenAI API concurrency
329328
chunk_results = []
330-
with ThreadPoolExecutor(max_workers=min(len(chunks), 200)) as executor:
329+
with ThreadPoolExecutor(max_workers=min(len(chunks), 100)) as executor:
331330
# Submit all chunks at once with context wrapper
332331
future_to_chunk = {
333332
executor.submit(self._context_wrapper, self._analyze_with_custom_prompt, chunk, custom_prompt): i
@@ -375,9 +374,8 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
375374
chunks = self.split_text_into_chunks(content, max_content_tokens)
376375

377376
# Process all chunks IN PARALLEL for maximum speed
378-
# Increased from 10 -> 50 -> 200 workers for OpenAI API concurrency
379377
chunk_results = []
380-
with ThreadPoolExecutor(max_workers=min(len(chunks), 200)) as executor:
378+
with ThreadPoolExecutor(max_workers=min(len(chunks), 100)) as executor:
381379
# Submit all chunks at once with context wrapper
382380
future_to_chunk = {
383381
executor.submit(self._context_wrapper, self._run_enhanced_default_moderation, chunk): i

app/services/moderation/rule_processor.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,8 +103,7 @@ def process_single_ai_rule(rule):
103103
return (rule.id, None)
104104

105105
# Execute in parallel
106-
# Increased from 10 -> 50 -> 200 workers for OpenAI API concurrency
107-
with ThreadPoolExecutor(max_workers=min(len(ai_rules), 200)) as executor:
106+
with ThreadPoolExecutor(max_workers=min(len(ai_rules), 100)) as executor:
108107
futures = {executor.submit(
109108
process_single_ai_rule, rule): rule for rule in ai_rules}
110109

0 commit comments

Comments
 (0)