Skip to content

Commit c5ad918

Browse files
committed
Improve auth redirects, AI moderation, and cache handling
Redirect authenticated users from login/register to dashboard. Increase AI moderator chunk size and add early exit on rejection for faster moderation. Expand result cache size and TTL, refine aggressive cleanup to only remove expired entries, and add warnings when cache is full but no expired entries are found.
1 parent b26a25b commit c5ad918

3 files changed

Lines changed: 57 additions & 16 deletions

File tree

app/routes/auth.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,10 @@
1313

1414
@auth_bp.route('/login', methods=['GET', 'POST'])
1515
async def login():
16+
# Redirect to dashboard if already logged in
17+
if current_user.is_authenticated:
18+
return redirect(url_for('dashboard.index'))
19+
1620
if request.method == 'POST':
1721
# Input validation and sanitization
1822
if request.is_json:
@@ -97,6 +101,10 @@ async def login():
97101

98102
@auth_bp.route('/register', methods=['GET', 'POST'])
99103
async def register():
104+
# Redirect to dashboard if already logged in
105+
if current_user.is_authenticated:
106+
return redirect(url_for('dashboard.index'))
107+
100108
# Check if registration is enabled
101109
if not SystemSettings.is_registration_enabled():
102110
flash('Registration is currently disabled. Please contact an administrator.', 'error')

app/services/ai/ai_moderator.py

Lines changed: 33 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,8 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
308308
if custom_prompt:
309309
# CRITICAL FIX: Hard limit on character count (tiktoken is broken for some content)
310310
# Assume worst case: 1 char = 1 token for safety
311-
MAX_CHARS_PER_CHUNK = 50000 # ~50k tokens worst case, safe for any prompt
311+
# Increased from 50k to 100k for better performance (fewer chunks = faster)
312+
MAX_CHARS_PER_CHUNK = 100000 # ~100k tokens worst case, safe for large context models
312313
content_chars = len(content)
313314

314315
# Force chunking if content is too large BY CHARACTER COUNT
@@ -331,11 +332,25 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
331332
for i, chunk in enumerate(chunks)
332333
}
333334

334-
# Collect results as they complete
335+
# Collect results as they complete with early exit on rejection
335336
for future in as_completed(future_to_chunk):
336337
result = future.result()
337338
chunk_results.append(result)
338339

340+
# Early exit: if any chunk is rejected, cancel remaining and return immediately
341+
if result.get('decision') == 'rejected':
342+
current_app.logger.info(
343+
f"Early exit: Chunk rejected, cancelling {
344+
len(future_to_chunk) - len(chunk_results)} remaining chunks")
345+
346+
# Cancel all pending futures
347+
for f in future_to_chunk:
348+
if not f.done():
349+
f.cancel()
350+
351+
# Return immediately with rejection
352+
return self._combine_chunk_results(chunk_results, len(content))
353+
339354
return self._combine_chunk_results(chunk_results, len(content))
340355
else:
341356
# Content is small enough, process normally
@@ -366,11 +381,26 @@ def moderate_content(self, content, content_type='text', custom_prompt=None):
366381
for i, chunk in enumerate(chunks)
367382
}
368383

369-
# Collect results as they complete
384+
# Collect results as they complete with early exit on rejection
370385
for future in as_completed(future_to_chunk):
371386
result = future.result()
372387
chunk_results.append(result)
373388

389+
# Early exit: if any chunk is rejected, cancel remaining and return immediately
390+
if result.get('decision') == 'rejected':
391+
current_app.logger.info(
392+
f"Early exit: Chunk rejected, cancelling {
393+
len(future_to_chunk) -
394+
len(chunk_results)} remaining chunks")
395+
396+
# Cancel all pending futures
397+
for f in future_to_chunk:
398+
if not f.done():
399+
f.cancel()
400+
401+
# Return immediately with rejection
402+
return self._combine_chunk_results(chunk_results, len(content))
403+
374404
return self._combine_chunk_results(chunk_results, len(content))
375405

376406
except (openai.APIConnectionError, openai.APITimeoutError, openai.InternalServerError) as e:

app/services/ai/result_cache.py

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,10 @@ class ResultCache:
1313
_shared_cache_ttl = 3600 # 1 hour
1414
_current_request_stores = 0 # Track stores per request
1515
_cache_lock = RLock() # Thread-safe operations
16-
_max_cache_size = 500 # Reduced from 1000 for high-volume deployments
17-
_cleanup_threshold = 400 # Start cleanup earlier
16+
_max_cache_size = 10000 # Maximum cache entries (increased to handle high volume)
17+
_cleanup_threshold = 9000 # Start cleanup when reaching 90% capacity
1818
_last_cleanup_time = 0
19-
_cleanup_interval = 180 # Cleanup every 3 minutes (more frequent)
19+
_cleanup_interval = 900 # Check for expired entries every 15 minutes
2020

2121
def __init__(self, cache_ttl=3600): # 1 hour default
2222
self._cache_ttl = cache_ttl
@@ -144,24 +144,27 @@ def _cleanup_expired_entries(self):
144144
current_app.logger.info(f"Cleaned up {len(expired_keys)} expired cache entries")
145145

146146
def _aggressive_cleanup(self):
147-
"""Perform aggressive cleanup when cache is full"""
147+
"""Perform aggressive cleanup when cache is full - ONLY remove expired entries"""
148148
current_time = time.time()
149149
entries_to_remove = []
150150

151-
# Remove entries older than 50% of TTL
151+
# ONLY remove entries that are actually expired (>= 1 hour old)
152+
# This guarantees all entries last at least the full TTL
152153
for key, data in list(ResultCache._shared_cache.items()):
153-
if current_time - data['timestamp'] >= ResultCache._shared_cache_ttl * 0.5:
154+
if current_time - data['timestamp'] >= ResultCache._shared_cache_ttl:
154155
entries_to_remove.append((key, data['timestamp']))
155156

156-
# If not enough old entries, remove oldest 25%
157-
if len(entries_to_remove) < len(ResultCache._shared_cache) * 0.25:
158-
all_entries = [(key, data['timestamp']) for key, data in ResultCache._shared_cache.items()]
159-
all_entries.sort(key=lambda x: x[1]) # Sort by timestamp
160-
entries_to_remove.extend(all_entries[:len(ResultCache._shared_cache) // 4])
157+
# If no expired entries and cache is full, log warning but don't remove valid entries
158+
if not entries_to_remove:
159+
current_app.logger.warning(
160+
f"Cache full at {len(ResultCache._shared_cache)} entries with no expired entries. "
161+
"Consider increasing max_cache_size or reducing TTL."
162+
)
163+
return
161164

162-
# Remove the entries
165+
# Remove the entries (increased limit from 500 to 1000 for larger cache)
163166
removed_count = 0
164-
for key, _ in entries_to_remove[:500]: # Limit to prevent blocking
167+
for key, _ in entries_to_remove[:1000]: # Limit to prevent blocking
165168
if ResultCache._shared_cache.pop(key, None) is not None:
166169
removed_count += 1
167170

0 commit comments

Comments
 (0)