Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 10 additions & 9 deletions src/praisonai/praisonai/agents_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,10 +232,8 @@ def __init__(self, agent_file, framework, config_list, log_level=None, agent_cal
self.tool_registry = ToolRegistry()
self.tool_registry.register_builtin_autogen_adapters()

# Get framework adapter and validate availability
# Get framework adapter (availability already validated at CLI entry)
self.framework_adapter = self._get_framework_adapter(framework)
if not self.framework_adapter.is_available():
raise ImportError(f"Framework '{framework}' is not available. Please install the required dependencies.")

def _get_framework_adapter(self, framework: str) -> FrameworkAdapter:
"""
Expand Down Expand Up @@ -597,10 +595,10 @@ def generate_crew_and_kickoff(self):
self.framework = framework
self.framework_adapter = self._get_framework_adapter(framework)

# Final availability check
if not self.framework_adapter.is_available():
raise ImportError(f"Framework '{framework}' is not available. Please install the required dependencies.")
# Validate framework availability for non-CLI callers
from .framework_adapters.validators import assert_framework_available
assert_framework_available(framework)

self.logger.info(f"Using framework: {framework}")
return self.framework_adapter.run(
Comment thread
greptile-apps[bot] marked this conversation as resolved.
config,
Expand Down Expand Up @@ -892,10 +890,13 @@ def _resolve(key, env_var=None, default=None):
api_type = _resolve("api_type", default="openai").lower()
model_name = _resolve("model", default="gpt-4o-mini")
api_key = _resolve("api_key", env_var="OPENAI_API_KEY")
# Use resolver for consistent env-var precedence as fallback
from praisonai.llm.env import resolve_llm_endpoint
ep = resolve_llm_endpoint()

base_url = (model_config.get("base_url")
or yaml_llm.get("base_url")
or os.environ.get("OPENAI_BASE_URL")
or os.environ.get("OPENAI_API_BASE"))
or ep.base_url)
Comment on lines 892 to +899
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major | ⚡ Quick win

Use the resolved endpoint as the single fallback tuple here.

This only takes ep.base_url, while model_name and api_key still use separate fallback chains. In the AG2 path that can mix config from different sources and break the precedence the new resolver is supposed to enforce.

Suggested change
         api_type = _resolve("api_type", default="openai").lower()
-        model_name = _resolve("model", default="gpt-4o-mini")
-        api_key = _resolve("api_key", env_var="OPENAI_API_KEY")
-        # Use resolver for consistent env-var precedence as fallback
         from praisonai.llm.env import resolve_llm_endpoint
         ep = resolve_llm_endpoint()
-        
-        base_url = (model_config.get("base_url")
-                    or yaml_llm.get("base_url")
-                    or ep.base_url)
+        model_name = _resolve("model", default=ep.model)
+        api_key = _resolve("api_key", env_var="OPENAI_API_KEY") or ep.api_key
+        base_url = (
+            yaml_llm.get("base_url")
+            or first_role_llm.get("base_url")
+            or model_config.get("base_url")
+            or ep.base_url
+        )
🤖 Prompt for AI Agents
Verify each finding against current code. Fix only still-valid issues, skip the
rest with a brief reason, keep changes minimal, and validate.

In `@src/praisonai/praisonai/agents_generator.py` around lines 889 - 896, The
current code mixes separate fallback chains (api_key from
_resolve/OPENAI_API_KEY and base_url from ep.base_url) which can mix sources;
instead use the resolved endpoint object from resolve_llm_endpoint() as the
single fallback source for all related values. Update the assignments that set
api_key, base_url and model_name (references: api_key, base_url, model_name,
model_config, yaml_llm, resolve_llm_endpoint, ep) so each checks model_config
and yaml_llm first and then falls back to the corresponding attribute on ep
(e.g., ep.api_key, ep.base_url, ep.model_name) rather than calling separate
resolvers or env-var lookups, ensuring a single resolver-enforced precedence
chain.


# Build LLMConfig — Bedrock needs no api_key
if api_type == "bedrock":
Expand Down
16 changes: 6 additions & 10 deletions src/praisonai/praisonai/auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -467,19 +467,15 @@ def __init__(self, config_list: Optional[List[Dict]] = None):
Args:
config_list: Optional LLM configuration list
"""
# Support multiple environment variable patterns for better compatibility
model_name = os.environ.get("MODEL_NAME") or os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini")
base_url = (
os.environ.get("OPENAI_BASE_URL") or
os.environ.get("OPENAI_API_BASE") or
os.environ.get("OLLAMA_API_BASE", "https://api.openai.com/v1")
)
# Resolve LLM endpoint configuration from environment variables
from praisonai.llm.env import resolve_llm_endpoint
ep = resolve_llm_endpoint()

self.config_list = config_list or [
{
'model': model_name,
'base_url': base_url,
'api_key': os.environ.get("OPENAI_API_KEY")
'model': ep.model,
'base_url': ep.base_url,
'api_key': ep.api_key
}
]

Expand Down
9 changes: 6 additions & 3 deletions src/praisonai/praisonai/bots/_approval_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,12 +94,15 @@ async def classify_with_llm(
)

try:
from praisonai.llm.env import resolve_llm_endpoint
ep = resolve_llm_endpoint()

client = OpenAI(
api_key=_os.environ.get("OPENAI_API_KEY", ""),
base_url=_os.environ.get("OPENAI_BASE_URL"),
api_key=ep.api_key or "",
base_url=ep.base_url,
)
response = client.chat.completions.create(
model=_os.environ.get("APPROVAL_LLM_MODEL", "gpt-4o-mini"),
model=_os.environ.get("APPROVAL_LLM_MODEL", ep.model),
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
Expand Down
5 changes: 3 additions & 2 deletions src/praisonai/praisonai/browser/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -2707,9 +2707,10 @@ async def run_goal():
if debug:
console.print("[dim] [DEBUG] Page loaded, content script should be active[/dim]")
break
except:
except (asyncio.TimeoutError, Exception) as e:
if debug:
console.print("[dim] [DEBUG] Page load event timeout, continuing anyway[/dim]")
console.print(f"[dim] [DEBUG] Page load event timeout/error ({type(e).__name__}), continuing anyway[/dim]")
logging.debug("Page load event timeout/error: %s", e)

# Wait for content script to inject
await asyncio.sleep(2)
Expand Down
15 changes: 13 additions & 2 deletions src/praisonai/praisonai/capabilities/realtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,22 @@ def realtime_connect(
session_id = f"realtime-{uuid.uuid4().hex[:12]}"

# Build WebSocket URL
base = api_base or os.environ.get("OPENAI_API_BASE", "wss://api.openai.com")
if api_base:
base = api_base
else:
from praisonai.llm.env import resolve_llm_endpoint
ep = resolve_llm_endpoint(default_base="wss://api.openai.com")
base = ep.base_url
Comment on lines +68 to +70
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 OLLAMA_API_BASE now silently reroutes realtime WebSocket connections

resolve_llm_endpoint checks OPENAI_BASE_URL → OPENAI_API_BASE → OLLAMA_API_BASE in that order. Before this PR the realtime module only consulted OPENAI_API_BASE; OLLAMA_API_BASE was never considered. Any user with OLLAMA_API_BASE=http://localhost:11434 set for regular Ollama requests will now see their realtime connection routed to ws://localhost:11434/v1/realtime?model=… instead of OpenAI, and the /v1-stripping guard does not help because the Ollama URL has no /v1 to strip.

The realtime endpoint needs to restrict its lookup to the OpenAI-specific vars (OPENAI_BASE_URL, OPENAI_API_BASE) only, or accept an explicit default_base without falling through to OLLAMA_API_BASE.


if base.startswith("http"):
base = base.replace("https://", "wss://").replace("http://", "ws://")

url = f"{base.rstrip('/')}/v1/realtime?model={model}"
# Strip any existing /v1 path to avoid double /v1/v1/realtime
base = base.rstrip('/')
if base.endswith('/v1'):
base = base[:-3]

url = f"{base}/v1/realtime?model={model}"

return RealtimeSession(
id=session_id,
Expand Down
4 changes: 2 additions & 2 deletions src/praisonai/praisonai/cli/features/agent_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,8 +434,8 @@ def _handle_describe(unknown_args, state_manager, daemon_manager) -> int:
hours = int(uptime_delta.total_seconds() // 3600)
minutes = int((uptime_delta.total_seconds() % 3600) // 60)
uptime = f"{hours}h {minutes}m"
except:
pass
except (ValueError, TypeError) as e:
logger.debug("Failed to parse started_at timestamp '%s': %s", started_at, e)

# Display detailed info
print(f"\n{'='*60}")
Expand Down
35 changes: 17 additions & 18 deletions src/praisonai/praisonai/cli/features/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,12 @@
Usage: praisonai "prompt" --metrics
"""

import logging
from typing import Any, Dict, Tuple
from .base import FlagHandler

logger = logging.getLogger(__name__)


class MetricsHandler(FlagHandler):
"""
Expand Down Expand Up @@ -133,33 +136,29 @@ def extract_metrics_from_agent(self, agent: Any) -> Dict[str, Any]:
ctx = litellm._thread_context
if hasattr(ctx, 'usage'):
usage = ctx.usage
if hasattr(usage, 'prompt_tokens'):
metrics['prompt_tokens'] = usage.prompt_tokens
if hasattr(usage, 'completion_tokens'):
metrics['completion_tokens'] = usage.completion_tokens
if hasattr(usage, 'total_tokens'):
metrics['total_tokens'] = usage.total_tokens
except:
pass
metrics['prompt_tokens'] = getattr(usage, 'prompt_tokens', None)
metrics['completion_tokens'] = getattr(usage, 'completion_tokens', None)
metrics['total_tokens'] = getattr(usage, 'total_tokens', None)
except ImportError:
logger.debug("litellm not installed; skipping token metrics")
except AttributeError as e:
logger.debug("litellm internals changed (%s); skipping token metrics", e)

# Try to get cost from litellm cost tracking
try:
import litellm
from litellm import completion_cost
# If we have token counts, estimate cost
if 'prompt_tokens' in metrics and 'completion_tokens' in metrics:
if metrics.get('prompt_tokens') and metrics.get('completion_tokens'):
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2 Changing 'prompt_tokens' in metrics to metrics.get('prompt_tokens') silently skips cost calculation when either token count is 0 (falsy). If the litellm block sets keys to None via getattr(..., None), the cost block should guard on is not None rather than truthiness.

Suggested change
if metrics.get('prompt_tokens') and metrics.get('completion_tokens'):
if metrics.get('prompt_tokens') is not None and metrics.get('completion_tokens') is not None:

model = metrics.get('model', 'gpt-4o-mini')
try:
cost = completion_cost(
metrics['cost'] = completion_cost(
model=model,
prompt_tokens=metrics['prompt_tokens'],
completion_tokens=metrics['completion_tokens']
completion_tokens=metrics['completion_tokens'],
)
metrics['cost'] = cost
except:
pass
except:
pass
except Exception as e:
logger.debug("cost calc failed for model=%s: %s", model, e)
except ImportError:
logger.debug("litellm.completion_cost unavailable; skipping cost metrics")

return metrics

Expand Down
32 changes: 17 additions & 15 deletions src/praisonai/praisonai/cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,29 +272,26 @@ def __init__(self, agent_file="agents.yaml", framework="", auto=False, init=Fals
self.agent_yaml = agent_yaml
self._interactive_mode = False # Flag for interactive TUI mode
# Create config_list with AutoGen compatibility
# Support multiple environment variable patterns for better compatibility
# Priority order: MODEL_NAME > OPENAI_MODEL_NAME for model selection
model_name = os.environ.get("MODEL_NAME") or os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini")

# Priority order for base_url: OPENAI_BASE_URL > OPENAI_API_BASE > OLLAMA_API_BASE
# OPENAI_BASE_URL is the standard OpenAI SDK environment variable
base_url = (
os.environ.get("OPENAI_BASE_URL") or
os.environ.get("OPENAI_API_BASE") or
os.environ.get("OLLAMA_API_BASE", "https://api.openai.com/v1")
)
# Resolve LLM endpoint configuration from environment variables
from praisonai.llm.env import resolve_llm_endpoint
ep = resolve_llm_endpoint()

api_key = os.environ.get("OPENAI_API_KEY")
self.config_list = [
{
'model': model_name,
'base_url': base_url,
'api_key': api_key,
'model': ep.model,
'base_url': ep.base_url,
'api_key': ep.api_key,
'api_type': 'openai' # AutoGen expects this field
}
]
self.agent_file = agent_file
self.framework = framework

# Validate framework availability early to fail fast
if self.framework:
from praisonai.framework_adapters.validators import assert_framework_available
assert_framework_available(self.framework)
Comment on lines 287 to +293
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2 assert_framework_available is called in both __init__() (line 291–293) and again at the top of main() (line 384–386). When .main() is the normal call path, the framework is validated twice. Moving the check to only main() avoids the redundant validation and lets objects be constructed for introspection without triggering the framework check prematurely.

Suggested change
self.agent_file = agent_file
self.framework = framework
# Validate framework availability early to fail fast
if self.framework:
from praisonai.framework_adapters.validators import assert_framework_available
assert_framework_available(self.framework)
self.agent_file = agent_file
self.framework = framework


self.auto = auto
self.init = init
self.tools = tools or [] # Store tool class names as a list
Expand Down Expand Up @@ -383,6 +380,11 @@ def main(self):

self.framework = args.framework or self.framework

# Validate framework availability early to fail fast
if self.framework:
from praisonai.framework_adapters.validators import assert_framework_available
assert_framework_available(self.framework)

# Update config_list model if --model flag is provided
if getattr(args, 'model', None):
self.config_list[0]['model'] = args.model
Expand Down
17 changes: 11 additions & 6 deletions src/praisonai/praisonai/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,9 +92,12 @@ def create_api_file(self):

def set_environment_variables(self):
"""Sets environment variables with fallback to .env values or defaults."""
os.environ["OPENAI_MODEL_NAME"] = os.getenv("OPENAI_MODEL_NAME", "gpt-4o-mini")
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "Enter your API key")
os.environ["OPENAI_API_BASE"] = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
from praisonai.llm.env import resolve_llm_endpoint
ep = resolve_llm_endpoint()

os.environ["OPENAI_MODEL_NAME"] = ep.model
os.environ["OPENAI_API_KEY"] = ep.api_key or "Enter your API key"
os.environ["OPENAI_API_BASE"] = ep.base_url

def run_commands(self):
"""
Expand Down Expand Up @@ -129,9 +132,11 @@ def run_commands(self):
return

# Get environment variables
openai_model = os.environ.get('OPENAI_MODEL_NAME', 'gpt-5-nano')
openai_key = os.environ.get('OPENAI_API_KEY', 'Enter your API key')
openai_base = os.environ.get('OPENAI_API_BASE', 'https://api.openai.com/v1')
from praisonai.llm.env import resolve_llm_endpoint
ep = resolve_llm_endpoint()
openai_model = ep.model
openai_key = ep.api_key or 'Enter your API key'
openai_base = ep.base_url

# Build commands with actual values
commands = [
Expand Down
13 changes: 5 additions & 8 deletions src/praisonai/praisonai/framework_adapters/autogen_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,8 @@ def run(self, config: Dict[str, Any], llm_config: List[Dict], topic: str) -> str
Returns:
Execution result as string
"""
if not self.is_available():
raise ImportError("AutoGen v0.2 is not available. Install with: pip install autogen")

# Availability already validated at CLI entry

# Import AutoGen only when needed
import autogen

Expand Down Expand Up @@ -123,9 +122,8 @@ def run(self, config: Dict[str, Any], llm_config: List[Dict], topic: str) -> str
Returns:
Execution result as string
"""
if not self.is_available():
raise ImportError("AutoGen v0.4 is not available. Install with: pip install autogen-agentchat autogen-ext")

# Availability already validated at CLI entry

logger.info("Starting AutoGen v0.4 execution...")
# For now, return a proper error message instead of delegating
# TODO: Implement full AutoGen v0.4 adapter logic
Expand Down Expand Up @@ -162,8 +160,7 @@ def run(self, config: Dict[str, Any], llm_config: List[Dict], topic: str) -> str
Returns:
Execution result as string
"""
if not self.is_available():
raise ImportError("AG2 is not available. Install with: pip install ag2")
# Availability already validated at CLI entry

logger.info("Starting AG2 execution...")
# For now, return a proper error message instead of delegating
Expand Down
5 changes: 1 addition & 4 deletions src/praisonai/praisonai/framework_adapters/crewai_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,7 @@ def run(
Returns:
Execution result as string
"""
if not self.is_available():
raise ImportError("CrewAI is not available. Install with: pip install crewai")

# Import CrewAI only when needed
# Import CrewAI only when needed (availability already validated at CLI entry)
from crewai import Agent, Task, Crew
from crewai.telemetry import Telemetry

Comment on lines +55 to 58
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor | ⚡ Quick win

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Find all sites that instantiate CrewAIAdapter / call its run() directly,
# to confirm whether non-CLI paths exist that bypass cli/main.py validation.
rg -nP -C3 '\bCrewAIAdapter\s*\('
rg -nP -C3 '\.run\s*\(' --type=py -g '*adapter*'
# Confirm assert_framework_available is only invoked from cli/main.py
rg -nP -C2 '\bassert_framework_available\s*\('

Repository: MervinPraison/PraisonAI

Length of output: 6224


🏁 Script executed:

# Read the full CrewAIAdapter implementation to see current state of run() method
cat -n src/praisonai/praisonai/framework_adapters/crewai_adapter.py

Repository: MervinPraison/PraisonAI

Length of output: 5032


🏁 Script executed:

# Search for any instantiation or direct usage of CrewAIAdapter outside CLI
rg -l 'CrewAIAdapter' --type=py | head -20

Repository: MervinPraison/PraisonAI

Length of output: 238


🏁 Script executed:

# Look for any tests or programmatic usage patterns
rg -n 'from.*crewai_adapter|import.*CrewAIAdapter' --type=py

Repository: MervinPraison/PraisonAI

Length of output: 278


🏁 Script executed:

# Search for calls to .run() method on adapters
rg -n '\.run\(' --type=py | grep -i adapter

Repository: MervinPraison/PraisonAI

Length of output: 1182


🏁 Script executed:

# Check test files for any direct adapter.run() calls
fd -e test.py -e tests.py | head -20
rg -n 'adapter.*\.run\|run.*adapter' --type=py -A2 -B2

Repository: MervinPraison/PraisonAI

Length of output: 49


🏁 Script executed:

# Check how the adapter is instantiated and used in the framework
rg -n 'get_adapter|get_framework_adapter' --type=py -A3

Repository: MervinPraison/PraisonAI

Length of output: 8419


🏁 Script executed:

# Check AgentsGenerator and how it validates framework
sed -n '230,310p' src/praisonai/praisonai/agents_generator.py

Repository: MervinPraison/PraisonAI

Length of output: 3526


🏁 Script executed:

# Check _get_framework_adapter implementation
sed -n '238,260p' src/praisonai/praisonai/agents_generator.py

Repository: MervinPraison/PraisonAI

Length of output: 959


🏁 Script executed:

# Check where AgentsGenerator is instantiated
rg -n 'AgentsGenerator\(' --type=py -B2 -A2

Repository: MervinPraison/PraisonAI

Length of output: 11135


🏁 Script executed:

# Check if AgentsGenerator validation really happens only at CLI
rg -n 'class AgentsGenerator' --type=py -A30

Repository: MervinPraison/PraisonAI

Length of output: 3452


Add framework availability guard to CrewAIAdapter.run() for consistent error messaging across all code paths.

Non-CLI callers exist that bypass cli/main.py validation: MCP server adapters (mcp_server/adapters/agents.py), feature modules (cli/features/eval.py), and tests directly instantiate AgentsGenerator without going through assert_framework_available. When these code paths call adapter.run() with an unavailable framework, they hit a bare ImportError instead of the actionable install hint.

Adding a lightweight assert_framework_available(self.name) call at the start of run() ensures the clear error message is preserved regardless of entry point while still failing fast at the CLI.

🤖 Prompt for AI Agents
Verify each finding against current code. Fix only still-valid issues, skip the
rest with a brief reason, keep changes minimal, and validate.

In `@src/praisonai/praisonai/framework_adapters/crewai_adapter.py` around lines 55
- 58, Add a framework-availability guard at the start of CrewAIAdapter.run():
call assert_framework_available(self.name) before importing or using CrewAI so
non-CLI callers (e.g., MCP server adapters, feature modules, tests) receive the
same actionable install hint instead of a bare ImportError; update the run()
method (in class CrewAIAdapter) to invoke assert_framework_available(self.name)
immediately on entry.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,8 @@ def run(
Returns:
Execution result as string
"""
if not self.is_available():
raise ImportError("PraisonAI agents is not available. Install with: pip install praisonaiagents")

# Availability already validated at CLI entry

# Import PraisonAI components only when needed
from praisonaiagents import Agent as PraisonAgent, Task as PraisonTask, AgentTeam

Expand Down
36 changes: 36 additions & 0 deletions src/praisonai/praisonai/framework_adapters/validators.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
"""
Framework availability validators.

Provides early validation of framework availability to fail fast at CLI entry
rather than inside run() methods after expensive setup work.
"""

from .registry import FrameworkAdapterRegistry


# Install hints for common frameworks
_INSTALL_HINTS = {
"crewai": "pip install 'praisonai[crewai]' # or: pip install crewai",
"autogen": "pip install 'praisonai[autogen]' # or: pip install pyautogen",
"praisonai": "pip install praisonaiagents",
}


def assert_framework_available(name: str) -> None:
"""
Raise ImportError immediately if the chosen framework is missing.

Args:
name: Framework name to validate

Raises:
ImportError: If framework is not available with actionable install hint
"""
registry = FrameworkAdapterRegistry.get_instance()

if not registry.is_available(name):
hint = _INSTALL_HINTS.get(name, f"pip install {name}")
raise ImportError(
f"Framework '{name}' was requested but is not installed.\n"
f"Install it with:\n {hint}"
)
Loading