forked from microsoft/agent-framework
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathollama_agent_basic.py
More file actions
68 lines (48 loc) · 2 KB
/
ollama_agent_basic.py
File metadata and controls
68 lines (48 loc) · 2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
# Copyright (c) Microsoft. All rights reserved.
import asyncio
from datetime import datetime
from agent_framework.ollama import OllamaChatClient
"""
Ollama Agent Basic Example
This sample demonstrates implementing a Ollama agent with basic tool usage.
Ensure to install Ollama and have a model running locally before running the sample
Not all Models support function calling, to test function calling try llama3.2 or qwen3:4b
Set the model to use via the OLLAMA_CHAT_MODEL_ID environment variable or modify the code below.
https://ollama.com/
"""
def get_time(location: str) -> str:
"""Get the current time."""
return f"The current time in {location} is {datetime.now().strftime('%I:%M %p')}."
async def non_streaming_example() -> None:
"""Example of non-streaming response (get the complete result at once)."""
print("=== Non-streaming Response Example ===")
agent = OllamaChatClient().create_agent(
name="TimeAgent",
instructions="You are a helpful time agent answer in one sentence.",
tools=get_time,
)
query = "What time is it in Seattle? Use a tool call"
print(f"User: {query}")
result = await agent.run(query)
print(f"Result: {result}\n")
async def streaming_example() -> None:
"""Example of streaming response (get results as they are generated)."""
print("=== Streaming Response Example ===")
agent = OllamaChatClient().create_agent(
name="TimeAgent",
instructions="You are a helpful time agent answer in one sentence.",
tools=get_time,
)
query = "What time is it in San Francisco? Use a tool call"
print(f"User: {query}")
print("Agent: ", end="", flush=True)
async for chunk in agent.run_stream(query):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
async def main() -> None:
print("=== Basic Ollama Chat Client Agent Example ===")
await non_streaming_example()
await streaming_example()
if __name__ == "__main__":
asyncio.run(main())