Skip to content

Commit 1d82dae

Browse files
committed
Increase concurrency and DB pool sizes for scalability
Raised ThreadPoolExecutor worker limits from 50 to 200 in AI moderation and rule processing to improve OpenAI API concurrency. Increased SQLAlchemy pool_size to 50, pool_timeout to 60, max_overflow to 100, and DB_THREAD_POOL_WORKERS to 100 for better database performance under load.
1 parent 464fb0e commit 1d82dae

3 files changed

Lines changed: 10 additions & 10 deletions

File tree

app/services/ai/ai_moderator.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -325,9 +325,9 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
325325
chunks.append(content[i:i + MAX_CHARS_PER_CHUNK])
326326

327327
# Process all chunks IN PARALLEL for maximum speed
328-
# Increased from 10 to 50 workers for better concurrency under load
328+
# Increased from 10 -> 50 -> 200 workers for OpenAI API concurrency
329329
chunk_results = []
330-
with ThreadPoolExecutor(max_workers=min(len(chunks), 50)) as executor:
330+
with ThreadPoolExecutor(max_workers=min(len(chunks), 200)) as executor:
331331
# Submit all chunks at once with context wrapper
332332
future_to_chunk = {
333333
executor.submit(self._context_wrapper, self._analyze_with_custom_prompt, chunk, custom_prompt): i
@@ -375,9 +375,9 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
375375
chunks = self.split_text_into_chunks(content, max_content_tokens)
376376

377377
# Process all chunks IN PARALLEL for maximum speed
378-
# Increased from 10 to 50 workers for better concurrency under load
378+
# Increased from 10 -> 50 -> 200 workers for OpenAI API concurrency
379379
chunk_results = []
380-
with ThreadPoolExecutor(max_workers=min(len(chunks), 50)) as executor:
380+
with ThreadPoolExecutor(max_workers=min(len(chunks), 200)) as executor:
381381
# Submit all chunks at once with context wrapper
382382
future_to_chunk = {
383383
executor.submit(self._context_wrapper, self._run_enhanced_default_moderation, chunk): i

app/services/moderation/rule_processor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,8 +103,8 @@ def process_single_ai_rule(rule):
103103
return (rule.id, None)
104104

105105
# Execute in parallel
106-
# Increased from 10 to 50 workers for better concurrency under load
107-
with ThreadPoolExecutor(max_workers=min(len(ai_rules), 50)) as executor:
106+
# Increased from 10 -> 50 -> 200 workers for OpenAI API concurrency
107+
with ThreadPoolExecutor(max_workers=min(len(ai_rules), 200)) as executor:
108108
futures = {executor.submit(
109109
process_single_ai_rule, rule): rule for rule in ai_rules}
110110

config/config.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -41,16 +41,16 @@ class Config:
4141

4242
# SQLAlchemy connection pool configuration
4343
SQLALCHEMY_ENGINE_OPTIONS = {
44-
'pool_size': 5, # Number of connections to maintain in pool
45-
'pool_timeout': 30, # Seconds to wait for connection from pool
44+
'pool_size': 50, # Number of connections to maintain in pool (was 5)
45+
'pool_timeout': 60, # Seconds to wait for connection from pool (was 30)
4646
'pool_recycle': 1800, # Seconds before recreating connection (30 min)
4747
'pool_pre_ping': True, # Verify connections before use
48-
'max_overflow': 10, # Additional connections beyond pool_size
48+
'max_overflow': 100, # Additional connections beyond pool_size (was 10)
4949
'echo': bool(os.environ.get('SQL_DEBUG', False)) # SQL debugging via env var
5050
}
5151

5252
# ThreadPoolExecutor configuration for async database operations
53-
DB_THREAD_POOL_WORKERS = int(os.environ.get('DB_THREAD_POOL_WORKERS', '8'))
53+
DB_THREAD_POOL_WORKERS = int(os.environ.get('DB_THREAD_POOL_WORKERS', '100'))
5454

5555

5656
class DevelopmentConfig(Config):

0 commit comments

Comments
 (0)