CrewAI Integration
Secure credential management for CrewAI multi-agent workflows. Each agent in your crew gets the minimum credentials it needs — nothing more.
Installation
pip install agentsecretstore crewai crewai-toolsVault tool for CrewAI
Create a BaseTool subclass that wraps the vault SDK. Each tool instance holds a scoped token so the agent can only read the secrets within its allowed scope:
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
from typing import Optional
from agentsecretstore import AgentVault
import asyncio
class VaultToolInput(BaseModel):
path: str = Field(description="The vault secret path (e.g. 'production/openai/api-key')")
token: Optional[str] = Field(
default=None,
description="Optional scoped token. If omitted, uses the agent's configured token."
)
class AgentSecretStoreTool(BaseTool):
name: str = "vault_get_secret"
description: str = (
"Retrieve a secret or API key from Agent Secret Store secure vault. "
"Always use this before calling any external API that requires authentication. "
"Never ask users for credentials — fetch them from the vault."
)
args_schema: type[BaseModel] = VaultToolInput
scoped_token: str # Injected by orchestrator
def _run(self, path: str, token: Optional[str] = None) -> str:
"""Fetch secret from vault synchronously."""
use_token = token or self.scoped_token
async def fetch():
async with AgentVault() as vault:
secret = await vault.get_secret(path, token=use_token)
return secret.value
loop = asyncio.new_event_loop()
try:
return loop.run_until_complete(fetch())
finally:
loop.close()Full crew example
A research → write → publish crew where the orchestrator issues a separate minimum-privilege token for each agent. The researcher can't read the Slack token; the publisher can't read the search API keys.
import asyncio
from crewai import Agent, Task, Crew, Process
from agentsecretstore import AgentVault
# ── Orchestrator: issue scoped tokens per agent ──────────────
async def provision_tokens(vault: AgentVault) -> dict[str, str]:
"""Issue minimum-privilege tokens for each crew member."""
# Research agent only needs to read API keys for search services
research_token = await vault.request_token(
scope="secrets:read:production/search/*",
ttl_seconds=3600,
description="CrewAI research agent token",
)
# Writer agent only needs the LLM key
writer_token = await vault.request_token(
scope="secrets:read:production/openai/*",
ttl_seconds=3600,
description="CrewAI writer agent token",
)
# Publisher agent needs Slack and email keys
publisher_token = await vault.request_token(
scope="secrets:read:production/slack/*,secrets:read:production/sendgrid/*",
ttl_seconds=3600,
description="CrewAI publisher agent token",
)
return {
"research": research_token.value,
"writer": writer_token.value,
"publisher": publisher_token.value,
}
# ── Vault-aware tools ─────────────────────────────────────────
def get_openai_key(scoped_token: str) -> str:
"""Fetch OpenAI key from vault using a scoped token."""
async def fetch():
async with AgentVault() as vault:
secret = await vault.get_secret(
"production/openai/api-key",
token=scoped_token,
)
return secret.value
return asyncio.run(fetch())
# ── Build the crew ────────────────────────────────────────────
async def run_crew():
async with AgentVault() as vault:
tokens = await provision_tokens(vault)
# Fetch OpenAI key for all agents (using writer's token)
openai_key = await vault.get_secret(
"production/openai/api-key",
token=tokens["writer"],
)
vault_tools = {
role: AgentSecretStoreTool(scoped_token=token)
for role, token in tokens.items()
}
# ── Agents ───────────────────────────────────────────────
researcher = Agent(
role="Research Analyst",
goal="Find comprehensive, accurate information on the given topic",
backstory="An expert researcher who always uses proper data sources.",
tools=[vault_tools["research"]],
llm_api_key=openai_key.value,
verbose=True,
)
writer = Agent(
role="Content Writer",
goal="Transform research into compelling, well-structured content",
backstory="A skilled technical writer who produces clear, accurate content.",
tools=[vault_tools["writer"]],
llm_api_key=openai_key.value,
verbose=True,
)
publisher = Agent(
role="Content Publisher",
goal="Distribute finished content to the appropriate channels",
backstory="A distribution specialist who ensures content reaches the right audience.",
tools=[vault_tools["publisher"]],
llm_api_key=openai_key.value,
verbose=True,
)
# ── Tasks ─────────────────────────────────────────────────
research_task = Task(
description="Research the latest developments in AI agent security",
expected_output="A detailed report with key findings and source citations",
agent=researcher,
)
writing_task = Task(
description="Write a technical blog post based on the research report",
expected_output="A 1500-word blog post in Markdown format",
agent=writer,
context=[research_task],
)
publish_task = Task(
description="Publish the blog post to Slack #engineering channel",
expected_output="Confirmation that the post was published successfully",
agent=publisher,
context=[writing_task],
)
# ── Run the crew ─────────────────────────────────────────
crew = Crew(
agents=[researcher, writer, publisher],
tasks=[research_task, writing_task, publish_task],
process=Process.sequential,
verbose=True,
)
result = crew.kickoff()
return result
if __name__ == "__main__":
asyncio.run(run_crew())Token scoping per agent
The key pattern is one scoped token per agent, issued at crew startup by the orchestrator. Each token is tightly scoped — if an agent is compromised, the blast radius is limited to exactly the secrets in its scope.
Parallel crews
For parallel task execution, issue tokens concurrently with asyncio.gather:
# Pattern: parallel crew with shared vault tool
# Each task runs concurrently — each agent has its own scoped token
from crewai import Agent, Task, Crew, Process
async def run_parallel_research_crew():
async with AgentVault() as vault:
# Issue separate tokens for parallel agents
tokens = await asyncio.gather(*[
vault.request_token(
scope=f"secrets:read:production/search/*",
ttl_seconds=1800,
description=f"Research worker {i}",
)
for i in range(3)
])
agents = [
Agent(
role=f"Research Worker {i+1}",
goal=f"Research topic segment {i+1}",
tools=[AgentSecretStoreTool(scoped_token=token.value)],
)
for i, token in enumerate(tokens)
]
# Tasks can run in parallel
tasks = [
Task(
description=f"Research topic: AI agent frameworks (segment {i+1})",
expected_output="Detailed findings for this segment",
agent=agent,
)
for i, agent in enumerate(agents)
]
crew = Crew(
agents=agents,
tasks=tasks,
process=Process.parallel, # Run research tasks concurrently
)
return crew.kickoff()LangChain Integration →
Use the vault with LangChain agents and tools.
Scoped Tokens →
Deep dive into the token scoping model.