import osfrom deepagents import create_deep_agentos.environ["OPENAI_API_KEY"] = "sk-..."agent = create_deep_agent(model="openai:gpt-5.2")# this calls init_chat_model for the specified model with default parameters# to use specific modele parameters, use init_chat_model directly
import osfrom deepagents import create_deep_agentos.environ["ANTHROPIC_API_KEY"] = "sk-..."agent = create_deep_agent(model="claude-sonnet-4-5-20250929")# this calls init_chat_model for the specified model with default parameters# to use specific modele parameters, use init_chat_model directly
import osfrom deepagents import create_deep_agentos.environ["AZURE_OPENAI_API_KEY"] = "..."os.environ["AZURE_OPENAI_ENDPOINT"] = "..."os.environ["OPENAI_API_VERSION"] = "2025-03-01-preview"agent = create_deep_agent(model="azure_openai:gpt-4.1")# this calls init_chat_model for the specified model with default parameters# to use specific modele parameters, use init_chat_model directly
import osfrom deepagents import create_deep_agentos.environ["GOOGLE_API_KEY"] = "..."agent = create_deep_agent(model="google_genai:gemini-2.5-flash-lite")# this calls init_chat_model for the specified model with default parameters# to use specific modele parameters, use init_chat_model directly
from deepagents import create_deep_agent# Follow the steps here to configure your credentials:# https://docs.aws.amazon.com/bedrock/latest/userguide/getting-started.htmlagent = create_deep_agent( model="anthropic.claude-3-5-sonnet-20240620-v1:0", model_provider="bedrock_converse",)# this calls init_chat_model for the specified model with default parameters# to use specific modele parameters, use init_chat_model directly
import osfrom deepagents import create_deep_agentos.environ["HUGGINGFACEHUB_API_TOKEN"] = "hf_..."agent = create_deep_agent( model="microsoft/Phi-3-mini-4k-instruct", model_provider="huggingface", temperature=0.7, max_tokens=1024,)# this calls init_chat_model for the specified model with default parameters# to use specific modele parameters, use init_chat_model directly
Deep agents come with a built-in system prompt. The default system prompt contains detailed instructions for using the built-in planning tool, file system tools, and subagents.
When middleware add special tools, like the filesystem tools, it appends them to the system prompt.Each deep agent should also include a custom system prompt specific to its specific use case:
Copy
from deepagents import create_deep_agentresearch_instructions = """\You are an expert researcher. Your job is to conduct \thorough research, and then write a polished report. \"""agent = create_deep_agent( system_prompt=research_instructions,)
Deep agent tools can make use of virtual file systems to store, access, and edit files. By default, deep agents use a StateBackend.If you are using skills or memory, you must add the expected skill or memory files to the backend before creating the agent.
StateBackend
FilesystemBackend
StoreBackend
CompositeBackend
An ephemeral filesystem backend stored in langgraph state.This filesystem only persists for a single thread.
Copy
# By default we provide a StateBackendagent = create_deep_agent()# Under the hood, it looks likefrom deepagents.backends import StateBackendagent = create_deep_agent( backend=(lambda rt: StateBackend(rt)) # Note that the tools access State through the runtime.state)
The local machine’s filesystem.
This backend grants agents direct filesystem read/write access.
Use with caution and only in appropriate environments.
For more information, see FilesystemBackend.
Copy
from deepagents.backends import FilesystemBackendagent = create_deep_agent( backend=FilesystemBackend(root_dir=".", virtual_mode=True))
A filesystem that provides long-term storage that is persisted across threads.
Copy
from langgraph.store.memory import InMemoryStorefrom deepagents.backends import StoreBackendagent = create_deep_agent( backend=(lambda rt: StoreBackend(rt)), store=InMemoryStore() # Good for local dev; omit for LangSmith Deployment)
When deploying to LangSmith Deployment, omit the store parameter. The platform automatically provisions a store for your agent.
A flexible backend where you can specify different routes in the filesystem to point towards different backends.
Copy
from deepagents import create_deep_agentfrom deepagents.backends import CompositeBackend, StateBackend, StoreBackendfrom langgraph.store.memory import InMemoryStorecomposite_backend = lambda rt: CompositeBackend( default=StateBackend(rt), routes={ "/memories/": StoreBackend(rt), })agent = create_deep_agent( backend=composite_backend, store=InMemoryStore() # Store passed to create_deep_agent, not backend)
Sandboxes are specialized backends that run agent code in an isolated environment with their own filesystem and an execute tool for shell commands.
Use a sandbox backend when you want your deep agent to write files, install dependencies, and run commands without changing anything on your local machine.You configure sandboxes by passing a sandbox backend to backend when creating your deep agent:
Modal
Runloop
Daytona
Copy
pip install langchain-modal
Copy
import modalfrom langchain_anthropic import ChatAnthropicfrom deepagents import create_deep_agentfrom langchain_modal import ModalSandboxapp = modal.App.lookup("your-app")modal_sandbox = modal.Sandbox.create(app=app)backend = ModalSandbox(sandbox=modal_sandbox)agent = create_deep_agent( model=ChatAnthropic(model="claude-sonnet-4-20250514"), system_prompt="You are a Python coding assistant with sandbox access.", backend=backend,)result = agent.invoke( { "messages": [ { "role": "user", "content": "Create a small Python package and run pytest", } ] })modal_sandbox.terminate()
Copy
pip install langchain-runloop
Copy
import osfrom runloop_api_client import RunloopSDKfrom langchain_anthropic import ChatAnthropicfrom deepagents import create_deep_agentfrom langchain_runloop import RunloopSandboxclient = RunloopSDK(bearer_token=os.environ["RUNLOOP_API_KEY"])devbox = client.devbox.create()backend = RunloopSandbox(devbox=devbox)agent = create_deep_agent( model=ChatAnthropic(model="claude-sonnet-4-20250514"), system_prompt="You are a Python coding assistant with sandbox access.", backend=backend,)try: result = agent.invoke( { "messages": [ { "role": "user", "content": "Create a small Python package and run pytest", } ] } )finally: devbox.shutdown()
Copy
pip install langchain-daytona
Copy
from daytona import Daytonafrom langchain_anthropic import ChatAnthropicfrom deepagents import create_deep_agentfrom langchain_daytona import DaytonaSandboxsandbox = Daytona().create()backend = DaytonaSandbox(sandbox=sandbox)agent = create_deep_agent( model=ChatAnthropic(model="claude-sonnet-4-20250514"), system_prompt="You are a Python coding assistant with sandbox access.", backend=backend,)result = agent.invoke( { "messages": [ { "role": "user", "content": "Create a small Python package and run pytest", } ] })sandbox.stop()
You can use skills to provide your deep agent with new capabilities and expertise.
While tools tend to cover lower level functionality like native file system actions or planning, skills can contain detailed instructions on how to complete tasks, reference info, and other assets, such as templates.
These files are only loaded by the agent when the agent has determined that the skill is useful for the current prompt.
This progressive disclosure reduces the amount of tokens and context the agent has to consider upon startup.For example skills, see Deep Agent example skills.To add skills to your deep agent, pass them as an argument to create_deep_agent:
Use AGENTS.md files to provide extra context to your deep agent.You can pass one or more file paths to the memory parameter when creating your deep agent:
StateBackend
StoreBackend
FilesystemBackend
Copy
from urllib.request import urlopenfrom deepagents import create_deep_agentfrom deepagents.backends.utils import create_file_datafrom langgraph.checkpoint.memory import MemorySaverwith urlopen("https://raw.githubusercontent.com/langchain-ai/deepagents/refs/heads/main/examples/text-to-sql-agent/AGENTS.md") as response: agents_md = response.read().decode("utf-8")checkpointer = MemorySaver()agent = create_deep_agent( memory=[ "/AGENTS.md" ], checkpointer=checkpointer,)result = agent.invoke( { "messages": [ { "role": "user", "content": "Please tell me what's in your memory files.", } ], # Seed the default StateBackend's in-state filesystem (virtual paths must start with "/"). "files": {"/AGENTS.md": create_file_data(agents_md)}, }, config={"configurable": {"thread_id": "123456"}},)
Copy
from urllib.request import urlopenfrom deepagents import create_deep_agentfrom deepagents.backends import StoreBackendfrom deepagents.backends.utils import create_file_datafrom langgraph.store.memory import InMemoryStorewith urlopen("https://raw.githubusercontent.com/langchain-ai/deepagents/refs/heads/main/examples/text-to-sql-agent/AGENTS.md") as response: agents_md = response.read().decode("utf-8")# Create the store and add the file to itstore = InMemoryStore()file_data = create_file_data(agents_md)store.put( namespace=("filesystem",), key="/AGENTS.md", value=file_data)agent = create_deep_agent( backend=(lambda rt: StoreBackend(rt)), store=store, memory=[ "/AGENTS.md" ])result = agent.invoke( { "messages": [ { "role": "user", "content": "Please tell me what's in your memory files.", } ], "files": {"/AGENTS.md": create_file_data(agents_md)}, }, config={"configurable": {"thread_id": "12345"}},)
Deep agents support structured ouput.
You can set a desired structured output schema by passing it as the response_format argument to the call to create_deep_agent().
When the model generates the structured data, it’s captured, validated, and returned in the ‘structured_response’ key of the deep agent’s state.
Copy
import osfrom typing import Literalfrom pydantic import BaseModel, Fieldfrom tavily import TavilyClientfrom deepagents import create_deep_agenttavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])def internet_search( query: str, max_results: int = 5, topic: Literal["general", "news", "finance"] = "general", include_raw_content: bool = False,): """Run a web search""" return tavily_client.search( query, max_results=max_results, include_raw_content=include_raw_content, topic=topic, )class WeatherReport(BaseModel): """A structured weather report with current conditions and forecast.""" location: str = Field(description="The location for this weather report") temperature: float = Field(description="Current temperature in Celsius") condition: str = Field(description="Current weather condition (e.g., sunny, cloudy, rainy)") humidity: int = Field(description="Humidity percentage") wind_speed: float = Field(description="Wind speed in km/h") forecast: str = Field(description="Brief forecast for the next 24 hours")agent = create_deep_agent( response_format=WeatherReport, tools=[internet_search])result = agent.invoke({ "messages": [{ "role": "user", "content": "What's the weather like in San Francisco?" }]})print(result["structured_response"])# location='San Francisco, California' temperature=18.3 condition='Sunny' humidity=48 wind_speed=7.6 forecast='Pleasant sunny conditions expected to continue with temperatures around 64°F (18°C) during the day, dropping to around 52°F (11°C) at night. Clear skies with minimal precipitation expected.'