feat: MACP Phase 1 — Core Protocol Implementation (#9)

This commit was merged in pull request #9.
This commit is contained in:
2026-03-28 01:39:26 +00:00
parent 24496cea01
commit 28392914a7
19 changed files with 1724 additions and 53 deletions

View File

@@ -14,6 +14,12 @@ import time
import uuid
from typing import Any
DISPATCHER_DIR = pathlib.Path(__file__).resolve().parent.parent / "dispatcher"
if str(DISPATCHER_DIR) not in sys.path:
sys.path.insert(0, str(DISPATCHER_DIR))
import macp_dispatcher
def now_iso() -> str:
return dt.datetime.now(dt.timezone.utc).isoformat()
@@ -115,6 +121,29 @@ def is_completed_status(status: str) -> bool:
return status in {"completed", "done"}
def is_macp_task(task: dict[str, Any]) -> bool:
return "dispatch" in task
def normalize_gate_result(gate: Any) -> dict[str, Any]:
if isinstance(gate, str):
return {"command": gate, "type": "mechanical"}
if isinstance(gate, dict):
return {
"command": str(gate.get("command") or ""),
"type": str(gate.get("type") or "mechanical"),
}
return {"command": "", "type": "mechanical"}
def detect_worker_escalation(output: str) -> str | None:
marker = "MACP_ESCALATE:"
for line in output.splitlines():
if marker in line:
return line.split(marker, 1)[1].strip() or "Worker requested escalation"
return None
def pick_next_task(tasks: list[dict[str, Any]]) -> dict[str, Any] | None:
status_by_id = {str(t.get("id", "")): str(t.get("status", "")) for t in tasks}
for task in tasks:
@@ -164,27 +193,58 @@ def run_single_task(repo_root: pathlib.Path, orch_dir: pathlib.Path, config: dic
task_file = orch_dir / f"task-{task_id}.json"
save_json(task_file, task)
cmd = str(task.get("command", "")).strip()
if not cmd:
template = str(config.get("worker", {}).get("command_template", "")).strip()
if template:
cmd = render_command_template(template, task, task_file)
if not cmd:
task["status"] = "failed"
task["failed_at"] = now_iso()
task["error"] = "No task command or worker command_template configured."
save_json(tasks_path, {"tasks": task_items})
emit_event(events_path, "task.failed", task_id, "failed", "controller", task["error"])
state["running_task_id"] = None
state["updated_at"] = now_iso()
save_json(state_path, state)
return True
timeout_sec = int(task.get("timeout_seconds") or config.get("worker", {}).get("timeout_seconds") or 7200)
rc, _, timed_out = run_shell(cmd, repo_root, log_path, timeout_sec)
timed_out = False
output = ""
rc = 0
if is_macp_task(task):
try:
rc, output = macp_dispatcher.dispatch_task(task, repo_root, orch_dir, config)
timed_out = bool(task.get("_timed_out"))
except Exception as exc:
rc = 1
task["error"] = str(exc)
else:
cmd = str(task.get("command", "")).strip()
if not cmd:
template = str(config.get("worker", {}).get("command_template", "")).strip()
if template:
cmd = render_command_template(template, task, task_file)
if not cmd:
task["status"] = "failed"
task["failed_at"] = now_iso()
task["error"] = "No task command or worker command_template configured."
save_json(tasks_path, {"tasks": task_items})
emit_event(events_path, "task.failed", task_id, "failed", "controller", task["error"])
state["running_task_id"] = None
state["updated_at"] = now_iso()
save_json(state_path, state)
return True
rc, output, timed_out = run_shell(cmd, repo_root, log_path, timeout_sec)
if rc != 0:
task["error"] = f"Worker command timed out after {timeout_sec}s" if timed_out else f"Worker command failed with exit code {rc}"
if is_macp_task(task) and str(task.get("status") or "") == "escalated":
task["failed_at"] = str(task.get("failed_at") or now_iso())
emit_event(
events_path,
"task.escalated",
task_id,
"escalated",
"controller",
str(task.get("escalation_reason") or task.get("error") or "Task requires human intervention."),
)
save_json(tasks_path, {"tasks": task_items})
state["running_task_id"] = None
state["updated_at"] = now_iso()
save_json(state_path, state)
macp_dispatcher.collect_result(task, rc, [], orch_dir)
if bool(config.get("macp", {}).get("cleanup_worktrees", True)):
macp_dispatcher.cleanup_worktree(task, config)
return True
if not task.get("error"):
task["error"] = f"Worker command timed out after {timeout_sec}s" if timed_out else f"Worker command failed with exit code {rc}"
if attempt < max_attempts:
task["status"] = "pending"
task["last_failed_at"] = now_iso()
@@ -204,34 +264,61 @@ def run_single_task(repo_root: pathlib.Path, orch_dir: pathlib.Path, config: dic
state["running_task_id"] = None
state["updated_at"] = now_iso()
save_json(state_path, state)
save_json(
results_dir / f"{task_id}.json",
{"task_id": task_id, "status": task["status"], "exit_code": rc, "attempt": attempt, "max_attempts": max_attempts},
)
if is_macp_task(task):
if task["status"] == "failed":
macp_dispatcher.collect_result(task, rc, [], orch_dir)
if bool(config.get("macp", {}).get("cleanup_worktrees", True)):
macp_dispatcher.cleanup_worktree(task, config)
else:
save_json(
results_dir / f"{task_id}.json",
{"task_id": task_id, "status": task["status"], "exit_code": rc, "attempt": attempt, "max_attempts": max_attempts},
)
return True
escalation_reason = detect_worker_escalation(output) if is_macp_task(task) else None
if escalation_reason:
task["status"] = "escalated"
task["failed_at"] = now_iso()
task["escalation_reason"] = escalation_reason
emit_event(events_path, "task.escalated", task_id, "escalated", "controller", escalation_reason)
save_json(tasks_path, {"tasks": task_items})
state["running_task_id"] = None
state["updated_at"] = now_iso()
save_json(state_path, state)
macp_dispatcher.collect_result(task, rc, [], orch_dir)
if bool(config.get("macp", {}).get("cleanup_worktrees", True)):
macp_dispatcher.cleanup_worktree(task, config)
return True
task["status"] = "gated"
save_json(tasks_path, {"tasks": task_items})
emit_event(events_path, "task.gated", task_id, "gated", "controller", "Worker completed; quality gates starting")
gates = task.get("quality_gates") or config.get("quality_gates") or []
all_passed = True
gate_results: list[dict[str, Any]] = []
for gate in gates:
gate_cmd = str(gate).strip()
gate_entry = normalize_gate_result(gate)
gate_cmd = gate_entry["command"]
if not gate_cmd:
continue
emit_event(events_path, "rail.check.started", task_id, "running", "quality-gate", f"Running gate: {gate_cmd}")
gate_rc, _, gate_timed_out = run_shell(gate_cmd, repo_root, log_path, timeout_sec)
gate_cwd = pathlib.Path(os.path.expanduser(str(task.get("worktree") or repo_root))).resolve() if is_macp_task(task) else repo_root
emit_event(events_path, "rail.check.started", task_id, "gated", "quality-gate", f"Running gate: {gate_cmd}")
gate_rc, _, gate_timed_out = run_shell(gate_cmd, gate_cwd, log_path, timeout_sec)
if gate_rc == 0:
emit_event(events_path, "rail.check.passed", task_id, "running", "quality-gate", f"Gate passed: {gate_cmd}")
emit_event(events_path, "rail.check.passed", task_id, "gated", "quality-gate", f"Gate passed: {gate_cmd}")
else:
all_passed = False
emit_event(
events_path,
"rail.check.failed",
task_id,
"failed",
"gated",
"quality-gate",
f"Gate timed out after {timeout_sec}s: {gate_cmd}" if gate_timed_out else f"Gate failed ({gate_rc}): {gate_cmd}",
)
gate_results.append({"command": gate_cmd, "exit_code": gate_rc})
gate_results.append({"command": gate_cmd, "exit_code": gate_rc, "type": gate_entry["type"]})
if all_passed:
task["status"] = "completed"
@@ -251,24 +338,34 @@ def run_single_task(repo_root: pathlib.Path, orch_dir: pathlib.Path, config: dic
f"{task['error']}; retry {attempt + 1}/{max_attempts}",
)
else:
task["status"] = "failed"
task["status"] = "escalated" if is_macp_task(task) else "failed"
task["failed_at"] = now_iso()
emit_event(events_path, "task.failed", task_id, "failed", "controller", task["error"])
if is_macp_task(task):
task["escalation_reason"] = "Quality gates failed after max retries"
emit_event(events_path, "task.escalated", task_id, "escalated", "controller", task["escalation_reason"])
else:
emit_event(events_path, "task.failed", task_id, "failed", "controller", task["error"])
save_json(tasks_path, {"tasks": task_items})
state["running_task_id"] = None
state["updated_at"] = now_iso()
save_json(state_path, state)
save_json(
results_dir / f"{task_id}.json",
{
"task_id": task_id,
"status": task["status"],
"completed_at": task.get("completed_at"),
"failed_at": task.get("failed_at"),
"gate_results": gate_results,
},
)
if is_macp_task(task):
if task["status"] in {"completed", "failed", "escalated"}:
macp_dispatcher.collect_result(task, rc, gate_results, orch_dir)
if bool(config.get("macp", {}).get("cleanup_worktrees", True)):
macp_dispatcher.cleanup_worktree(task, config)
else:
save_json(
results_dir / f"{task_id}.json",
{
"task_id": task_id,
"status": task["status"],
"completed_at": task.get("completed_at"),
"failed_at": task.get("failed_at"),
"gate_results": gate_results,
},
)
return True
@@ -276,10 +373,14 @@ def queue_state(orch_dir: pathlib.Path) -> dict[str, int]:
tasks = load_json(orch_dir / "tasks.json", {"tasks": []})
task_items = tasks.get("tasks", [])
if not isinstance(task_items, list):
return {"pending": 0, "running": 0, "runnable": 0}
return {"pending": 0, "running": 0, "gated": 0, "completed": 0, "failed": 0, "escalated": 0, "runnable": 0}
pending = 0
running = 0
gated = 0
completed = 0
failed = 0
escalated = 0
runnable = 0
status_by_id = {str(t.get("id", "")): str(t.get("status", "")) for t in task_items}
for task in task_items:
@@ -291,7 +392,23 @@ def queue_state(orch_dir: pathlib.Path) -> dict[str, int]:
runnable += 1
if status == "running":
running += 1
return {"pending": pending, "running": running, "runnable": runnable}
if status == "gated":
gated += 1
if status == "completed":
completed += 1
if status == "failed":
failed += 1
if status == "escalated":
escalated += 1
return {
"pending": pending,
"running": running,
"gated": gated,
"completed": completed,
"failed": failed,
"escalated": escalated,
"runnable": runnable,
}
def main() -> int: