@@ -16,6 +16,8 @@ class AIModerator:
1616 def __init__ (self ):
1717 self .client_manager = OpenAIClient ()
1818 self .cache = ResultCache ()
19+ # Capture Flask app instance for context propagation in parallel processing
20+ self .app = current_app ._get_current_object ()
1921 # Load model and token settings from config
2022 cfg = current_app .config
2123 self .model_name = cfg .get ('OPENAI_CHAT_MODEL' , 'gpt-5-2025-08-07' )
@@ -30,6 +32,14 @@ def __init__(self):
3032 except (KeyError , ValueError ):
3133 self .tokenizer = tiktoken .get_encoding ("cl100k_base" )
3234
35+ def _context_wrapper (self , func , * args , ** kwargs ):
36+ """
37+ Wrapper to execute functions within Flask application context.
38+ Required for parallel processing to access current_app.
39+ """
40+ with self .app .app_context ():
41+ return func (* args , ** kwargs )
42+
3343 def count_tokens (self , text ):
3444 """Count the number of tokens in a text string"""
3545 try :
@@ -303,8 +313,8 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
303313
304314 # Force chunking if content is too large BY CHARACTER COUNT
305315 if content_chars > MAX_CHARS_PER_CHUNK :
306- current_app .logger .warning (
307- f"FORCING CHUNKING: Content too large ( { content_chars } chars > { MAX_CHARS_PER_CHUNK } ) " )
316+ current_app .logger .debug ( f"Chunking content: { content_chars } chars split into {
317+ ( content_chars // MAX_CHARS_PER_CHUNK ) + 1 } chunks " )
308318
309319 # Split by character count, not tokens
310320 chunks = []
@@ -314,9 +324,9 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
314324 # Process all chunks IN PARALLEL for maximum speed
315325 chunk_results = []
316326 with ThreadPoolExecutor (max_workers = min (len (chunks ), 10 )) as executor :
317- # Submit all chunks at once
327+ # Submit all chunks at once with context wrapper
318328 future_to_chunk = {
319- executor .submit (self ._analyze_with_custom_prompt , chunk , custom_prompt ): i
329+ executor .submit (self ._context_wrapper , self . _analyze_with_custom_prompt , chunk , custom_prompt ): i
320330 for i , chunk in enumerate (chunks )
321331 }
322332
@@ -349,9 +359,9 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
349359 # Process all chunks IN PARALLEL for maximum speed
350360 chunk_results = []
351361 with ThreadPoolExecutor (max_workers = min (len (chunks ), 10 )) as executor :
352- # Submit all chunks at once
362+ # Submit all chunks at once with context wrapper
353363 future_to_chunk = {
354- executor .submit (self ._run_enhanced_default_moderation , chunk ): i
364+ executor .submit (self ._context_wrapper , self . _run_enhanced_default_moderation , chunk ): i
355365 for i , chunk in enumerate (chunks )
356366 }
357367
0 commit comments