mirror of
https://github.com/instructkr/claw-code.git
synced 2026-04-27 01:04:58 +08:00
fix: #168 — exec-command / exec-tool / route / bootstrap now accept --output-format; CLI family JSON parity COMPLETE
Extends the #167 inspect-surface parity fix to the four remaining CLI outliers: the commands claws actually invoke to DO work, not just inspect state. After this commit, the entire claw-code CLI family speaks a unified JSON envelope contract. Concrete additions: - exec-command: --output-format {text,json} - exec-tool: --output-format {text,json} - route: --output-format {text,json} - bootstrap: --output-format {text,json} JSON envelope shapes: exec-command (handled): {name, prompt, source_hint, handled: true, message} exec-command (not-found): {name, prompt, handled: false, error: {kind:'command_not_found', message, retryable: false}} exec-tool (handled): {name, payload, source_hint, handled: true, message} exec-tool (not-found): {name, payload, handled: false, error: {kind:'tool_not_found', message, retryable: false}} route: {prompt, limit, match_count, matches: [{kind, name, score, source_hint}]} bootstrap: {prompt, limit, setup: {python_version, implementation, platform_name, test_command}, routed_matches: [{kind, name, score, source_hint}], command_execution_messages: [str], tool_execution_messages: [str], turn: {prompt, output, stop_reason}, persisted_session_path} Exit codes (unchanged from pre-#168): 0 = success 1 = exec not-found (exec-command, exec-tool only) Backward compatibility: - Default (no --output-format) is 'text' - exec-command/exec-tool text output byte-identical - route text output: unchanged tab-separated kind/name/score/source_hint - bootstrap text output: unchanged Markdown runtime session report Tests (13 new, test_exec_route_bootstrap_output_format.py): - TestExecCommandOutputFormat (3): handled + not-found JSON; text compat - TestExecToolOutputFormat (3): handled + not-found JSON; text compat - TestRouteOutputFormat (3): JSON envelope; zero-matches case; text compat - TestBootstrapOutputFormat (2): JSON envelope; text-mode Markdown compat - TestFamilyWideJsonParity (2): parametrised over ALL 6 family commands (show-command, show-tool, exec-command, exec-tool, route, bootstrap) — every one accepts --output-format json and emits parseable JSON; every one defaults to text mode without a leading {. One future regression on any family member breaks this test. Full suite: 124 → 137 passing, zero regression. Closes ROADMAP #168. This completes the CLI-wide JSON parity sweep: - Session-lifecycle family: #160 (list/delete), #165 (load), #166 (flush) - Inspect family: #167 (show-command, show-tool) - Work-verb family: #168 (exec-command, exec-tool, route, bootstrap) ENTIRE CLI SURFACE is now machine-readable via --output-format json with typed errors, deterministic exit codes, and consistent envelope shape. Claws no longer need to regex-parse any CLI output. Related clusters: - Clawability principle: 'machine-readable in state and failure modes' (ROADMAP top-level). 9 pinpoints in this cluster; all now landed. - Typed-error envelope consistency: command_not_found / tool_not_found / session_not_found / session_load_failed all share {kind, message, retryable} shape. - Work-verb semantics: exec-* surfaces expose 'handled' boolean (not 'found') because 'not handled' is the operational signal — claws dispatch on whether the work was performed, not whether the entry exists in the inventory.
This commit is contained in:
114
src/main.py
114
src/main.py
@@ -55,10 +55,14 @@ def build_parser() -> argparse.ArgumentParser:
|
||||
route_parser = subparsers.add_parser('route', help='route a prompt across mirrored command/tool inventories')
|
||||
route_parser.add_argument('prompt')
|
||||
route_parser.add_argument('--limit', type=int, default=5)
|
||||
# #168: parity with show-command/show-tool/session-lifecycle CLI family
|
||||
route_parser.add_argument('--output-format', choices=['text', 'json'], default='text')
|
||||
|
||||
bootstrap_parser = subparsers.add_parser('bootstrap', help='build a runtime-style session report from the mirrored inventories')
|
||||
bootstrap_parser.add_argument('prompt')
|
||||
bootstrap_parser.add_argument('--limit', type=int, default=5)
|
||||
# #168: parity with CLI family
|
||||
bootstrap_parser.add_argument('--output-format', choices=['text', 'json'], default='text')
|
||||
|
||||
loop_parser = subparsers.add_parser('turn-loop', help='run a small stateful turn loop for the mirrored runtime')
|
||||
loop_parser.add_argument('prompt')
|
||||
@@ -165,10 +169,14 @@ def build_parser() -> argparse.ArgumentParser:
|
||||
exec_command_parser = subparsers.add_parser('exec-command', help='execute a mirrored command shim by exact name')
|
||||
exec_command_parser.add_argument('name')
|
||||
exec_command_parser.add_argument('prompt')
|
||||
# #168: parity with CLI family
|
||||
exec_command_parser.add_argument('--output-format', choices=['text', 'json'], default='text')
|
||||
|
||||
exec_tool_parser = subparsers.add_parser('exec-tool', help='execute a mirrored tool shim by exact name')
|
||||
exec_tool_parser.add_argument('name')
|
||||
exec_tool_parser.add_argument('payload')
|
||||
# #168: parity with CLI family
|
||||
exec_tool_parser.add_argument('--output-format', choices=['text', 'json'], default='text')
|
||||
return parser
|
||||
|
||||
|
||||
@@ -222,6 +230,25 @@ def main(argv: list[str] | None = None) -> int:
|
||||
return 0
|
||||
if args.command == 'route':
|
||||
matches = PortRuntime().route_prompt(args.prompt, limit=args.limit)
|
||||
# #168: JSON envelope for machine parsing
|
||||
if args.output_format == 'json':
|
||||
import json
|
||||
envelope = {
|
||||
'prompt': args.prompt,
|
||||
'limit': args.limit,
|
||||
'match_count': len(matches),
|
||||
'matches': [
|
||||
{
|
||||
'kind': m.kind,
|
||||
'name': m.name,
|
||||
'score': m.score,
|
||||
'source_hint': m.source_hint,
|
||||
}
|
||||
for m in matches
|
||||
],
|
||||
}
|
||||
print(json.dumps(envelope))
|
||||
return 0
|
||||
if not matches:
|
||||
print('No mirrored command/tool matches found.')
|
||||
return 0
|
||||
@@ -229,7 +256,40 @@ def main(argv: list[str] | None = None) -> int:
|
||||
print(f'{match.kind}\t{match.name}\t{match.score}\t{match.source_hint}')
|
||||
return 0
|
||||
if args.command == 'bootstrap':
|
||||
print(PortRuntime().bootstrap_session(args.prompt, limit=args.limit).as_markdown())
|
||||
session = PortRuntime().bootstrap_session(args.prompt, limit=args.limit)
|
||||
# #168: JSON envelope for machine parsing
|
||||
if args.output_format == 'json':
|
||||
import json
|
||||
envelope = {
|
||||
'prompt': session.prompt,
|
||||
'limit': args.limit,
|
||||
'setup': {
|
||||
'python_version': session.setup.python_version,
|
||||
'implementation': session.setup.implementation,
|
||||
'platform_name': session.setup.platform_name,
|
||||
'test_command': session.setup.test_command,
|
||||
},
|
||||
'routed_matches': [
|
||||
{
|
||||
'kind': m.kind,
|
||||
'name': m.name,
|
||||
'score': m.score,
|
||||
'source_hint': m.source_hint,
|
||||
}
|
||||
for m in session.routed_matches
|
||||
],
|
||||
'command_execution_messages': list(session.command_execution_messages),
|
||||
'tool_execution_messages': list(session.tool_execution_messages),
|
||||
'turn': {
|
||||
'prompt': session.turn_result.prompt,
|
||||
'output': session.turn_result.output,
|
||||
'stop_reason': session.turn_result.stop_reason,
|
||||
},
|
||||
'persisted_session_path': session.persisted_session_path,
|
||||
}
|
||||
print(json.dumps(envelope))
|
||||
return 0
|
||||
print(session.as_markdown())
|
||||
return 0
|
||||
if args.command == 'turn-loop':
|
||||
results = PortRuntime().run_turn_loop(
|
||||
@@ -455,11 +515,59 @@ def main(argv: list[str] | None = None) -> int:
|
||||
return 0
|
||||
if args.command == 'exec-command':
|
||||
result = execute_command(args.name, args.prompt)
|
||||
print(result.message)
|
||||
# #168: JSON envelope with typed not-found error
|
||||
if args.output_format == 'json':
|
||||
import json
|
||||
if not result.handled:
|
||||
envelope = {
|
||||
'name': args.name,
|
||||
'prompt': args.prompt,
|
||||
'handled': False,
|
||||
'error': {
|
||||
'kind': 'command_not_found',
|
||||
'message': result.message,
|
||||
'retryable': False,
|
||||
},
|
||||
}
|
||||
else:
|
||||
envelope = {
|
||||
'name': result.name,
|
||||
'prompt': result.prompt,
|
||||
'source_hint': result.source_hint,
|
||||
'handled': True,
|
||||
'message': result.message,
|
||||
}
|
||||
print(json.dumps(envelope))
|
||||
else:
|
||||
print(result.message)
|
||||
return 0 if result.handled else 1
|
||||
if args.command == 'exec-tool':
|
||||
result = execute_tool(args.name, args.payload)
|
||||
print(result.message)
|
||||
# #168: JSON envelope with typed not-found error
|
||||
if args.output_format == 'json':
|
||||
import json
|
||||
if not result.handled:
|
||||
envelope = {
|
||||
'name': args.name,
|
||||
'payload': args.payload,
|
||||
'handled': False,
|
||||
'error': {
|
||||
'kind': 'tool_not_found',
|
||||
'message': result.message,
|
||||
'retryable': False,
|
||||
},
|
||||
}
|
||||
else:
|
||||
envelope = {
|
||||
'name': result.name,
|
||||
'payload': result.payload,
|
||||
'source_hint': result.source_hint,
|
||||
'handled': True,
|
||||
'message': result.message,
|
||||
}
|
||||
print(json.dumps(envelope))
|
||||
else:
|
||||
print(result.message)
|
||||
return 0 if result.handled else 1
|
||||
parser.error(f'unknown command: {args.command}')
|
||||
return 2
|
||||
|
||||
Reference in New Issue
Block a user