Skip to main content

TealEngine

TealEngine is the core policy evaluation engine that provides mode-based enforcement with the deterministic Decision contract.

Overview

TealEngine evaluates requests against configured policies and returns Decision objects with:
  • Mode-specific behavior (ENFORCE, MONITOR, REPORT_ONLY)
  • Risk scoring for security analysis
  • Reason codes for explainability
  • Correlation IDs for traceability

Class

from tealtiger import TealEngine, ModeConfig, PolicyMode

class TealEngine:
    def __init__(
        self,
        policies: Dict[str, Any],
        mode: Optional[ModeConfig] = None,
        cache_ttl: Optional[int] = None,
        cache_enabled: bool = True,
        cache_max_size: int = 1000
    ):
        ...
    
    def evaluate_with_mode(
        self,
        context: Dict[str, Any],
        execution_context: Optional[ExecutionContext] = None
    ) -> Decision:
        ...
    
    def get_mode_config(self) -> ModeConfig:
        ...

Configuration

from pydantic import BaseModel
from typing import Dict, Optional
from tealtiger import PolicyMode

class ModeConfig(BaseModel):
    default: PolicyMode
    environment: Dict[str, PolicyMode] = {}
    policy: Dict[str, PolicyMode] = {}

Creating an Engine

Basic Configuration

from tealtiger import TealEngine, ModeConfig, PolicyMode

engine = TealEngine(
    policies={
        'tools': {
            'file_delete': {'allowed': False},
            'database_query': {'allowed': True}
        }
    },
    mode=ModeConfig(default=PolicyMode.ENFORCE)
)

Environment-Specific Modes

engine = TealEngine(
    policies=my_policies,
    mode=ModeConfig(
        default=PolicyMode.ENFORCE,
        environment={
            'development': PolicyMode.REPORT_ONLY,
            'staging': PolicyMode.MONITOR,
            'production': PolicyMode.ENFORCE
        }
    )
)

Policy-Specific Modes

engine = TealEngine(
    policies=my_policies,
    mode=ModeConfig(
        default=PolicyMode.ENFORCE,
        policy={
            'tools.file_delete': PolicyMode.MONITOR,
            'cost-limit': PolicyMode.REPORT_ONLY
        }
    )
)

Evaluating Requests

Basic Evaluation

from tealtiger.core.context import create_execution_context

context = create_execution_context(user_id='user-123')

decision = engine.evaluate_with_mode(
    {
        'agentId': 'agent-001',
        'action': 'tool.execute',
        'tool': 'file_delete'
    },
    context
)

print(decision.action)  # DENY or ALLOW
print(decision.risk_score)  # 0-100
print(decision.reason_codes)  # [ReasonCode.TOOL_NOT_ALLOWED]

With Metadata

decision = engine.evaluate_with_mode(
    {
        'agentId': 'agent-001',
        'action': 'llm.call',
        'model': 'gpt-4',
        'metadata': {
            'provider': 'openai',
            'estimated_cost': 0.05
        }
    },
    context
)

Policy Modes

ENFORCE Mode

# Blocks violations, allows compliant requests
engine = TealEngine(
    policies=my_policies,
    mode=ModeConfig(default=PolicyMode.ENFORCE)
)

decision = engine.evaluate_with_mode(request, context)

if decision.action == DecisionAction.DENY:
    raise Exception(f"Request denied: {decision.reason}")

MONITOR Mode

# Always allows but logs violations
engine = TealEngine(
    policies=my_policies,
    mode=ModeConfig(default=PolicyMode.MONITOR)
)

decision = engine.evaluate_with_mode(request, context)

# Always ALLOW, but check for violations
if ReasonCode.MONITOR_MODE_VIOLATION in decision.reason_codes:
    print(f"Policy violation detected: {decision.reason}")

REPORT_ONLY Mode

# Skips evaluation, always allows
engine = TealEngine(
    policies=my_policies,
    mode=ModeConfig(default=PolicyMode.REPORT_ONLY)
)

decision = engine.evaluate_with_mode(request, context)

# Always ALLOW, no evaluation performed
print(decision.metadata['evaluation_performed'])  # False

Mode Resolution

Mode resolution follows this priority:
  1. Policy-specific override (highest priority)
  2. Environment-specific override
  3. Global default (lowest priority)
engine = TealEngine(
    policies=my_policies,
    mode=ModeConfig(
        default=PolicyMode.ENFORCE,
        environment={
            'staging': PolicyMode.MONITOR
        },
        policy={
            'tools.file_delete': PolicyMode.ENFORCE
        }
    )
)

# In staging environment:
# - tools.file_delete → ENFORCE (policy-specific wins)
# - other policies → MONITOR (environment-specific)

Caching

engine = TealEngine(
    policies=my_policies,
    cache_enabled=True,
    cache_ttl=60000,  # 60 seconds
    cache_max_size=1000
)

decision = engine.evaluate_with_mode(request, context)

print(decision.metadata['cache_hit'])  # True or False

Integration with TealAudit

from tealtiger import TealEngine, TealAudit
from tealtiger.core.audit import ConsoleOutput, AuditEventType

engine = TealEngine(config)
audit = TealAudit(outputs=[ConsoleOutput()])

decision = engine.evaluate_with_mode(request, context)

# Log decision to audit
audit.log({
    'schema_version': '1.0.0',
    'event_type': AuditEventType.POLICY_EVALUATION,
    'timestamp': datetime.utcnow().isoformat() + 'Z',
    'correlation_id': decision.correlation_id,
    'action': decision.action,
    'risk_score': decision.risk_score
}, context)

Error Handling

try:
    decision = engine.evaluate_with_mode(request, context)
    
    if decision.action == DecisionAction.DENY:
        # Handle denial
        print(f"Request denied: {decision.reason}")
        print(f"Risk score: {decision.risk_score}")
        print(f"Reason codes: {', '.join(str(rc) for rc in decision.reason_codes)}")
except Exception as error:
    print(f"Policy evaluation failed: {error}")

Async Support

import asyncio

async def evaluate_async():
    decision = engine.evaluate_with_mode(request, context)
    return decision

# Run async
decision = asyncio.run(evaluate_async())

Performance

TealEngine targets:
  • < 10ms per evaluation (p99, without external calls)
  • < 50ms with caching (p99, cache hit)
  • Deterministic results (same inputs → same outputs)

Best Practices

Always Provide Context

# ❌ Bad: No context
decision = engine.evaluate_with_mode(request)

# ✅ Good: Always provide context
context = create_execution_context(user_id='user-123')
decision = engine.evaluate_with_mode(request, context)

Use Appropriate Modes

# ✅ Good: Gradual rollout
engine = TealEngine(
    policies=new_policies,
    mode=ModeConfig(
        default=PolicyMode.MONITOR,  # Start with monitoring
        environment={
            'production': PolicyMode.ENFORCE  # Enforce in prod after testing
        }
    )
)

Handle Denials Gracefully

decision = engine.evaluate_with_mode(request, context)

if decision.action == DecisionAction.DENY:
    # Log for investigation
    logger.warning('Request denied', extra={
        'correlation_id': decision.correlation_id,
        'reason': decision.reason,
        'risk_score': decision.risk_score
    })
    
    # Return user-friendly error
    raise PolicyViolationError(
        'This action is not allowed by policy'
    )

Type Hints

from typing import Dict, Any, Optional
from tealtiger import TealEngine, Decision, ExecutionContext

def evaluate_with_context(
    engine: TealEngine,
    request: Dict[str, Any],
    context: Optional[ExecutionContext] = None
) -> Decision:
    """Evaluate request with optional context."""
    if context is None:
        context = create_execution_context()
    
    return engine.evaluate_with_mode(request, context)