How to Add AI Agent Safety to Django Projects
SafeClaw by Authensor integrates into Django projects as middleware, providing deny-by-default action gating for AI agent tool calls. Every subprocess execution, file operation, and outbound HTTP request your agent makes is checked against your YAML policy before Django processes it. Install with npx @authensor/safeclaw and add the gate to your Django views or middleware.
Why Django AI Agents Need Gating
Django applications increasingly incorporate AI agents for content generation, data analysis, and automation. These agents run with the same permissions as the Django process — typically read/write access to media directories, database credentials in environment variables, and ability to execute management commands. SafeClaw ensures no agent action executes without explicit policy approval.
446 tests validate the gate engine. Every decision is recorded in a hash-chained audit trail.
Installation
npx @authensor/safeclaw
pip install requests # for HTTP communication with SafeClaw
Policy
version: 1
defaultAction: deny
rules:
- action: file.read
path:
glob: "/app/media/uploads/**"
decision: allow
- action: file.write
path:
glob: "/app/media/output/**"
decision: allow
- action: process.exec
command:
startsWith: "python manage.py"
decision: prompt # human approval for management commands
- action: network.request
host:
in: ["api.openai.com", "api.anthropic.com"]
decision: allow
- action: file.read
path:
glob: "*/.env"
decision: deny
- action: file.read
path:
glob: "*/settings.py"
decision: deny # protect Django settings
Django Gate Service
# agents/safeclaw.py
import requests
import logging
logger = logging.getLogger(__name__)
class SafeClawGate:
def __init__(self, endpoint="http://localhost:9800"):
self.endpoint = endpoint
self.session = requests.Session()
def check(self, action_request: dict) -> dict:
try:
response = self.session.post(
f"{self.endpoint}/check",
json=action_request,
timeout=5
)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
logger.error(f"SafeClaw unreachable: {e}")
# Fail closed
return {"allowed": False, "reason": "SafeClaw unreachable"}
def require(self, action_request: dict) -> None:
decision = self.check(action_request)
if not decision["allowed"]:
from django.core.exceptions import PermissionDenied
raise PermissionDenied(f"SafeClaw denied: {decision['reason']}")
gate = SafeClawGate()
Django View Integration
# agents/views.py
from django.http import JsonResponse
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from agents.safeclaw import gate
import subprocess
from pathlib import Path
@csrf_exempt
@require_POST
def agent_read_file(request):
import json
body = json.loads(request.body)
path = body["path"]
gate.require({"action": "file.read", "path": path})
content = Path(path).read_text()
return JsonResponse({"content": content})
@csrf_exempt
@require_POST
def agent_exec(request):
import json
body = json.loads(request.body)
command = body["command"]
gate.require({"action": "process.exec", "command": command})
result = subprocess.run(
command.split(),
capture_output=True,
text=True,
timeout=30
)
return JsonResponse({"stdout": result.stdout, "stderr": result.stderr})
@csrf_exempt
@require_POST
def agent_fetch(request):
import json
from urllib.parse import urlparse
body = json.loads(request.body)
url = body["url"]
host = urlparse(url).hostname
gate.require({
"action": "network.request",
"host": host,
"url": url,
"method": "GET"
})
import requests as http_requests
response = http_requests.get(url)
return JsonResponse({"status": response.status_code, "body": response.text})
URL Configuration
# agents/urls.py
from django.urls import path
from agents.views import agent_read_file, agent_exec, agent_fetch
urlpatterns = [
path("agent/read/", agent_read_file, name="agent_read"),
path("agent/exec/", agent_exec, name="agent_exec"),
path("agent/fetch/", agent_fetch, name="agent_fetch"),
]
Django Management Command for Audit
# agents/management/commands/safeclaw_audit.py
from django.core.management.base import BaseCommand
import requests
class Command(BaseCommand):
help = "Export SafeClaw audit trail"
def handle(self, args, *options):
response = requests.get("http://localhost:9800/audit")
for entry in response.json():
self.stdout.write(
f"{entry['timestamp']} | {entry['action']} | "
f"{entry['decision']} | {entry['hash'][:12]}"
)
MIT licensed, works with Claude and OpenAI, hash-chained audit trail.
Cross-References
- Python Integration
- FastAPI Integration
- Docker Compose Deployment
- Hash-Chained Audit Logs
- Deny-by-Default Explained
Try SafeClaw
Action-level gating for AI agents. Set it up in your browser in 60 seconds.
$ npx @authensor/safeclaw