Skip to content

Commit da8df91

Browse files
committed
Remove excessive logging from AI moderator service
Eliminated several info and error log statements related to content size, chunking, and API call details to reduce log verbosity and potential exposure of sensitive content. This streamlines the logging output and focuses on essential warnings and errors.
1 parent b72d7ee commit da8df91

1 file changed

Lines changed: 0 additions & 20 deletions

File tree

app/services/ai/ai_moderator.py

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -300,9 +300,6 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
300300
MAX_CHARS_PER_CHUNK = 50000 # ~50k tokens worst case, safe for any prompt
301301
content_chars = len(content)
302302

303-
current_app.logger.info(
304-
f"Content: {content_tokens} tokens (tiktoken), {content_chars} chars")
305-
306303
# Force chunking if content is too large BY CHARACTER COUNT
307304
if content_chars > MAX_CHARS_PER_CHUNK:
308305
current_app.logger.warning(
@@ -313,9 +310,6 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
313310
for i in range(0, content_chars, MAX_CHARS_PER_CHUNK):
314311
chunks.append(content[i:i + MAX_CHARS_PER_CHUNK])
315312

316-
current_app.logger.info(
317-
f"Split content into {len(chunks)} chunks by character count")
318-
319313
chunk_results = []
320314
for i, chunk in enumerate(chunks):
321315
result = self._analyze_with_custom_prompt(chunk, custom_prompt)
@@ -332,7 +326,6 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
332326

333327
# STEP 2: For default moderation, run baseline check first
334328
# Note: OpenAI moderation API has its own limits, but typically handles larger content
335-
current_app.logger.info(f"Content has {content_tokens} tokens")
336329
baseline_result = self._run_baseline_moderation(content)
337330
if baseline_result['decision'] == 'rejected':
338331
return baseline_result
@@ -346,8 +339,6 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
346339
else:
347340
# Split content and analyze each chunk
348341
chunks = self.split_text_into_chunks(content, max_content_tokens)
349-
current_app.logger.info(
350-
f"Split content into {len(chunks)} chunks for enhanced moderation")
351342

352343
chunk_results = []
353344
for i, chunk in enumerate(chunks):
@@ -444,17 +435,6 @@ def _analyze_with_custom_prompt(self, content, custom_prompt):
444435
445436
Does content violate this rule? JSON only:"""
446437

447-
# CRITICAL: Log what we're actually sending to identify cost issue
448-
current_app.logger.error(
449-
f"[COST INVESTIGATION] Sending to OpenAI - "
450-
f"Content length: {len(content)} chars, "
451-
f"Custom prompt length: {len(custom_prompt)} chars, "
452-
f"System message length: {len(system_message)} chars, "
453-
f"User message length: {len(user_message)} chars, "
454-
f"Content type: {type(content).__name__}, "
455-
f"Content first 200 chars: {content[:200] if len(content) > 0 else 'EMPTY'}"
456-
)
457-
458438
# Wrap API call with retry logic
459439
def make_api_call():
460440
client = self.client_manager.get_client()

0 commit comments

Comments
 (0)