import { TealTiger } from 'tealtiger';
import { Langfuse } from 'langfuse';
import { ChatOpenAI } from 'langchain/chat_models/openai';
// Initialize Langfuse
const langfuse = new Langfuse({
publicKey: process.env.LANGFUSE_PUBLIC_KEY,
secretKey: process.env.LANGFUSE_SECRET_KEY
});
// Initialize TealTiger with Langfuse export
const teal = new TealTiger({
policies: {
tools: {
web_search: { allowed: true },
file_delete: { allowed: false }
},
budget: {
maxCostPerRequest: 0.50
}
},
telemetry: {
langfuse: {
enabled: true,
client: langfuse,
exportDecisions: true,
exportCosts: true
}
}
});
// Create a trace in Langfuse
const trace = langfuse.trace({
name: "agent-execution",
userId: "user-123"
});
// Evaluate with TealTiger (exports to Langfuse)
const decision = await teal.evaluate({
action: 'tool.execute',
tool: 'web_search',
context: {
traceId: trace.id // Link to Langfuse trace
}
});
// Add LLM call to trace
const generation = trace.generation({
name: "llm-call",
model: "gpt-4",
input: "Search for AI news"
});
// Execute if allowed
if (decision.action === 'ALLOW') {
const model = new ChatOpenAI();
const response = await model.call([
{ role: 'user', content: 'Search for AI news' }
]);
generation.end({
output: response.content
});
}
// Finalize trace
await langfuse.flushAsync();