mirror of
https://github.com/tvytlx/ai-agent-deep-dive.git
synced 2026-04-05 00:24:50 +08:00
Add extracted source directory and README navigation
This commit is contained in:
9
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/index.js
generated
vendored
Normal file
9
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/index.js
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Library exports
|
||||
export { SandboxManager } from './sandbox/sandbox-manager.js';
|
||||
export { SandboxViolationStore } from './sandbox/sandbox-violation-store.js';
|
||||
export { SandboxRuntimeConfigSchema, NetworkConfigSchema, FilesystemConfigSchema, IgnoreViolationsConfigSchema, RipgrepConfigSchema, } from './sandbox/sandbox-config.js';
|
||||
// Utility functions
|
||||
export { getDefaultWritePaths } from './sandbox/sandbox-utils.js';
|
||||
// Platform utilities
|
||||
export { getWslVersion } from './utils/platform.js';
|
||||
//# sourceMappingURL=index.js.map
|
||||
263
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/generate-seccomp-filter.js
generated
vendored
Normal file
263
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/generate-seccomp-filter.js
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
||||
import { join, dirname } from 'node:path';
|
||||
import { fileURLToPath } from 'node:url';
|
||||
import * as fs from 'node:fs';
|
||||
import { execSync } from 'node:child_process';
|
||||
import { homedir } from 'node:os';
|
||||
import { logForDebugging } from '../utils/debug.js';
|
||||
// Cache for path lookups (key: explicit path or empty string, value: resolved path or null)
|
||||
const bpfPathCache = new Map();
|
||||
const applySeccompPathCache = new Map();
|
||||
// Cache for global npm paths (computed once per process)
|
||||
let cachedGlobalNpmPaths = null;
|
||||
/**
|
||||
* Get paths to check for globally installed @anthropic-ai/sandbox-runtime package.
|
||||
* This is used as a fallback when the binaries aren't bundled (e.g., native builds).
|
||||
*/
|
||||
function getGlobalNpmPaths() {
|
||||
if (cachedGlobalNpmPaths)
|
||||
return cachedGlobalNpmPaths;
|
||||
const paths = [];
|
||||
// Try to get the actual global npm root
|
||||
try {
|
||||
const npmRoot = execSync('npm root -g', {
|
||||
encoding: 'utf8',
|
||||
timeout: 5000,
|
||||
stdio: ['pipe', 'pipe', 'ignore'],
|
||||
}).trim();
|
||||
if (npmRoot) {
|
||||
paths.push(join(npmRoot, '@anthropic-ai', 'sandbox-runtime'));
|
||||
}
|
||||
}
|
||||
catch {
|
||||
// npm not available or failed
|
||||
}
|
||||
// Common global npm locations as fallbacks
|
||||
const home = homedir();
|
||||
paths.push(
|
||||
// npm global (Linux/macOS)
|
||||
join('/usr', 'lib', 'node_modules', '@anthropic-ai', 'sandbox-runtime'), join('/usr', 'local', 'lib', 'node_modules', '@anthropic-ai', 'sandbox-runtime'),
|
||||
// npm global with prefix (common on macOS with homebrew)
|
||||
join('/opt', 'homebrew', 'lib', 'node_modules', '@anthropic-ai', 'sandbox-runtime'),
|
||||
// User-local npm global
|
||||
join(home, '.npm', 'lib', 'node_modules', '@anthropic-ai', 'sandbox-runtime'), join(home, '.npm-global', 'lib', 'node_modules', '@anthropic-ai', 'sandbox-runtime'));
|
||||
cachedGlobalNpmPaths = paths;
|
||||
return paths;
|
||||
}
|
||||
/**
|
||||
* Map Node.js process.arch to our vendor directory architecture names
|
||||
* Returns null for unsupported architectures
|
||||
*/
|
||||
function getVendorArchitecture() {
|
||||
const arch = process.arch;
|
||||
switch (arch) {
|
||||
case 'x64':
|
||||
case 'x86_64':
|
||||
return 'x64';
|
||||
case 'arm64':
|
||||
case 'aarch64':
|
||||
return 'arm64';
|
||||
case 'ia32':
|
||||
case 'x86':
|
||||
// TODO: Add support for 32-bit x86 (ia32)
|
||||
// Currently blocked because the seccomp filter does not block the socketcall() syscall,
|
||||
// which is used on 32-bit x86 for all socket operations (socket, socketpair, bind, connect, etc.).
|
||||
// On 32-bit x86, the direct socket() syscall doesn't exist - instead, all socket operations
|
||||
// are multiplexed through socketcall(SYS_SOCKET, ...), socketcall(SYS_SOCKETPAIR, ...), etc.
|
||||
//
|
||||
// To properly support 32-bit x86, we need to:
|
||||
// 1. Build a separate i386 BPF filter (BPF bytecode is architecture-specific)
|
||||
// 2. Modify vendor/seccomp-src/seccomp-unix-block.c to conditionally add rules that block:
|
||||
// - socketcall(SYS_SOCKET, [AF_UNIX, ...])
|
||||
// - socketcall(SYS_SOCKETPAIR, [AF_UNIX, ...])
|
||||
// 3. This requires complex BPF logic to inspect socketcall's sub-function argument
|
||||
//
|
||||
// Until then, 32-bit x86 is not supported to avoid a security bypass.
|
||||
logForDebugging(`[SeccompFilter] 32-bit x86 (ia32) is not currently supported due to missing socketcall() syscall blocking. ` +
|
||||
`The current seccomp filter only blocks socket(AF_UNIX, ...), but on 32-bit x86, socketcall() can be used to bypass this.`, { level: 'error' });
|
||||
return null;
|
||||
default:
|
||||
logForDebugging(`[SeccompFilter] Unsupported architecture: ${arch}. Only x64 and arm64 are supported.`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Get local paths to check for seccomp files (bundled or package installs).
|
||||
*/
|
||||
function getLocalSeccompPaths(filename) {
|
||||
const arch = getVendorArchitecture();
|
||||
if (!arch)
|
||||
return [];
|
||||
const baseDir = dirname(fileURLToPath(import.meta.url));
|
||||
const relativePath = join('vendor', 'seccomp', arch, filename);
|
||||
return [
|
||||
join(baseDir, relativePath), // bundled: same directory as bundle (e.g., when bundled into claude-cli)
|
||||
join(baseDir, '..', '..', relativePath), // package root: vendor/seccomp/...
|
||||
join(baseDir, '..', relativePath), // dist: dist/vendor/seccomp/...
|
||||
];
|
||||
}
|
||||
/**
|
||||
* Get the path to a pre-generated BPF filter file from the vendor directory
|
||||
* Returns the path if it exists, null otherwise
|
||||
*
|
||||
* Pre-generated BPF files are organized by architecture:
|
||||
* - vendor/seccomp/{x64,arm64}/unix-block.bpf
|
||||
*
|
||||
* Tries multiple paths for resilience:
|
||||
* 0. Explicit path provided via parameter (checked first if provided)
|
||||
* 1. vendor/seccomp/{arch}/unix-block.bpf (bundled - when bundled into consuming packages)
|
||||
* 2. ../../vendor/seccomp/{arch}/unix-block.bpf (package root - standard npm installs)
|
||||
* 3. ../vendor/seccomp/{arch}/unix-block.bpf (dist/vendor - for bundlers)
|
||||
* 4. Global npm install (if seccompBinaryPath not provided) - for native builds
|
||||
*
|
||||
* @param seccompBinaryPath - Optional explicit path to the BPF filter file. If provided and
|
||||
* exists, it will be used. If not provided, falls back to searching local paths and then
|
||||
* global npm install (for native builds where vendor directory isn't bundled).
|
||||
*/
|
||||
export function getPreGeneratedBpfPath(seccompBinaryPath) {
|
||||
const cacheKey = seccompBinaryPath ?? '';
|
||||
if (bpfPathCache.has(cacheKey)) {
|
||||
return bpfPathCache.get(cacheKey);
|
||||
}
|
||||
const result = findBpfPath(seccompBinaryPath);
|
||||
bpfPathCache.set(cacheKey, result);
|
||||
return result;
|
||||
}
|
||||
// NOTE: This is a slow operation (synchronous fs lookups + execSync). Ensure calls
|
||||
// are memoized at the top level rather than invoked repeatedly.
|
||||
function findBpfPath(seccompBinaryPath) {
|
||||
// Check explicit path first (highest priority)
|
||||
if (seccompBinaryPath) {
|
||||
if (fs.existsSync(seccompBinaryPath)) {
|
||||
logForDebugging(`[SeccompFilter] Using BPF filter from explicit path: ${seccompBinaryPath}`);
|
||||
return seccompBinaryPath;
|
||||
}
|
||||
logForDebugging(`[SeccompFilter] Explicit path provided but file not found: ${seccompBinaryPath}`);
|
||||
}
|
||||
const arch = getVendorArchitecture();
|
||||
if (!arch) {
|
||||
logForDebugging(`[SeccompFilter] Cannot find pre-generated BPF filter: unsupported architecture ${process.arch}`);
|
||||
return null;
|
||||
}
|
||||
logForDebugging(`[SeccompFilter] Detected architecture: ${arch}`);
|
||||
// Check local paths first (bundled or package install)
|
||||
for (const bpfPath of getLocalSeccompPaths('unix-block.bpf')) {
|
||||
if (fs.existsSync(bpfPath)) {
|
||||
logForDebugging(`[SeccompFilter] Found pre-generated BPF filter: ${bpfPath} (${arch})`);
|
||||
return bpfPath;
|
||||
}
|
||||
}
|
||||
// Fallback: check global npm install (for native builds without bundled vendor)
|
||||
for (const globalBase of getGlobalNpmPaths()) {
|
||||
const bpfPath = join(globalBase, 'vendor', 'seccomp', arch, 'unix-block.bpf');
|
||||
if (fs.existsSync(bpfPath)) {
|
||||
logForDebugging(`[SeccompFilter] Found pre-generated BPF filter in global install: ${bpfPath} (${arch})`);
|
||||
return bpfPath;
|
||||
}
|
||||
}
|
||||
logForDebugging(`[SeccompFilter] Pre-generated BPF filter not found in any expected location (${arch})`);
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
* Get the path to the apply-seccomp binary from the vendor directory
|
||||
* Returns the path if it exists, null otherwise
|
||||
*
|
||||
* Pre-built apply-seccomp binaries are organized by architecture:
|
||||
* - vendor/seccomp/{x64,arm64}/apply-seccomp
|
||||
*
|
||||
* Tries multiple paths for resilience:
|
||||
* 0. Explicit path provided via parameter (checked first if provided)
|
||||
* 1. vendor/seccomp/{arch}/apply-seccomp (bundled - when bundled into consuming packages)
|
||||
* 2. ../../vendor/seccomp/{arch}/apply-seccomp (package root - standard npm installs)
|
||||
* 3. ../vendor/seccomp/{arch}/apply-seccomp (dist/vendor - for bundlers)
|
||||
* 4. Global npm install (if seccompBinaryPath not provided) - for native builds
|
||||
*
|
||||
* @param seccompBinaryPath - Optional explicit path to the apply-seccomp binary. If provided
|
||||
* and exists, it will be used. If not provided, falls back to searching local paths and
|
||||
* then global npm install (for native builds where vendor directory isn't bundled).
|
||||
*/
|
||||
export function getApplySeccompBinaryPath(seccompBinaryPath) {
|
||||
const cacheKey = seccompBinaryPath ?? '';
|
||||
if (applySeccompPathCache.has(cacheKey)) {
|
||||
return applySeccompPathCache.get(cacheKey);
|
||||
}
|
||||
const result = findApplySeccompPath(seccompBinaryPath);
|
||||
applySeccompPathCache.set(cacheKey, result);
|
||||
return result;
|
||||
}
|
||||
function findApplySeccompPath(seccompBinaryPath) {
|
||||
// Check explicit path first (highest priority)
|
||||
if (seccompBinaryPath) {
|
||||
if (fs.existsSync(seccompBinaryPath)) {
|
||||
logForDebugging(`[SeccompFilter] Using apply-seccomp binary from explicit path: ${seccompBinaryPath}`);
|
||||
return seccompBinaryPath;
|
||||
}
|
||||
logForDebugging(`[SeccompFilter] Explicit path provided but file not found: ${seccompBinaryPath}`);
|
||||
}
|
||||
const arch = getVendorArchitecture();
|
||||
if (!arch) {
|
||||
logForDebugging(`[SeccompFilter] Cannot find apply-seccomp binary: unsupported architecture ${process.arch}`);
|
||||
return null;
|
||||
}
|
||||
logForDebugging(`[SeccompFilter] Looking for apply-seccomp binary for architecture: ${arch}`);
|
||||
// Check local paths first (bundled or package install)
|
||||
for (const binaryPath of getLocalSeccompPaths('apply-seccomp')) {
|
||||
if (fs.existsSync(binaryPath)) {
|
||||
logForDebugging(`[SeccompFilter] Found apply-seccomp binary: ${binaryPath} (${arch})`);
|
||||
return binaryPath;
|
||||
}
|
||||
}
|
||||
// Fallback: check global npm install (for native builds without bundled vendor)
|
||||
for (const globalBase of getGlobalNpmPaths()) {
|
||||
const binaryPath = join(globalBase, 'vendor', 'seccomp', arch, 'apply-seccomp');
|
||||
if (fs.existsSync(binaryPath)) {
|
||||
logForDebugging(`[SeccompFilter] Found apply-seccomp binary in global install: ${binaryPath} (${arch})`);
|
||||
return binaryPath;
|
||||
}
|
||||
}
|
||||
logForDebugging(`[SeccompFilter] apply-seccomp binary not found in any expected location (${arch})`);
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
* Get the path to a pre-generated seccomp BPF filter that blocks Unix domain socket creation
|
||||
* Returns the path to the BPF filter file, or null if not available
|
||||
*
|
||||
* The filter blocks socket(AF_UNIX, ...) syscalls while allowing all other syscalls.
|
||||
* This prevents creation of new Unix domain socket file descriptors.
|
||||
*
|
||||
* Security scope:
|
||||
* - Blocks: socket(AF_UNIX, ...) syscall (creating new Unix socket FDs)
|
||||
* - Does NOT block: Operations on inherited Unix socket FDs (bind, connect, sendto, etc.)
|
||||
* - Does NOT block: Unix socket FDs passed via SCM_RIGHTS
|
||||
* - For most sandboxing scenarios, blocking socket creation is sufficient
|
||||
*
|
||||
* Note: This blocks ALL Unix socket creation, regardless of path. The allowUnixSockets
|
||||
* configuration is not supported on Linux due to seccomp-bpf limitations (it cannot
|
||||
* read user-space memory to inspect socket paths).
|
||||
*
|
||||
* Requirements:
|
||||
* - Pre-generated BPF filters included for x64 and ARM64 only
|
||||
* - Other architectures are not supported
|
||||
*
|
||||
* @param seccompBinaryPath - Optional explicit path to the BPF filter file
|
||||
* @returns Path to the pre-generated BPF filter file, or null if not available
|
||||
*/
|
||||
export function generateSeccompFilter(seccompBinaryPath) {
|
||||
const preGeneratedBpf = getPreGeneratedBpfPath(seccompBinaryPath);
|
||||
if (preGeneratedBpf) {
|
||||
logForDebugging('[SeccompFilter] Using pre-generated BPF filter');
|
||||
return preGeneratedBpf;
|
||||
}
|
||||
logForDebugging('[SeccompFilter] Pre-generated BPF filter not available for this architecture. ' +
|
||||
'Only x64 and arm64 are supported.', { level: 'error' });
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
* Clean up a seccomp filter file
|
||||
* Since we only use pre-generated BPF files from vendor/, this is a no-op.
|
||||
* Pre-generated files are never deleted.
|
||||
* Kept for backward compatibility with existing code that calls it.
|
||||
*/
|
||||
export function cleanupSeccompFilter(_filterPath) {
|
||||
// No-op: pre-generated BPF files are never cleaned up
|
||||
}
|
||||
//# sourceMappingURL=generate-seccomp-filter.js.map
|
||||
217
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/http-proxy.js
generated
vendored
Normal file
217
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/http-proxy.js
generated
vendored
Normal file
@@ -0,0 +1,217 @@
|
||||
import { Agent, createServer } from 'node:http';
|
||||
import { request as httpRequest } from 'node:http';
|
||||
import { request as httpsRequest } from 'node:https';
|
||||
import { connect } from 'node:net';
|
||||
import { URL } from 'node:url';
|
||||
import { logForDebugging } from '../utils/debug.js';
|
||||
export function createHttpProxyServer(options) {
|
||||
const server = createServer();
|
||||
// Handle CONNECT requests for HTTPS traffic
|
||||
server.on('connect', async (req, socket) => {
|
||||
// Attach error handler immediately to prevent unhandled errors
|
||||
socket.on('error', err => {
|
||||
logForDebugging(`Client socket error: ${err.message}`, { level: 'error' });
|
||||
});
|
||||
try {
|
||||
const [hostname, portStr] = req.url.split(':');
|
||||
const port = portStr === undefined ? undefined : parseInt(portStr, 10);
|
||||
if (!hostname || !port) {
|
||||
logForDebugging(`Invalid CONNECT request: ${req.url}`, {
|
||||
level: 'error',
|
||||
});
|
||||
socket.end('HTTP/1.1 400 Bad Request\r\n\r\n');
|
||||
return;
|
||||
}
|
||||
const allowed = await options.filter(port, hostname, socket);
|
||||
if (!allowed) {
|
||||
logForDebugging(`Connection blocked to ${hostname}:${port}`, {
|
||||
level: 'error',
|
||||
});
|
||||
socket.end('HTTP/1.1 403 Forbidden\r\n' +
|
||||
'Content-Type: text/plain\r\n' +
|
||||
'X-Proxy-Error: blocked-by-allowlist\r\n' +
|
||||
'\r\n' +
|
||||
'Connection blocked by network allowlist');
|
||||
return;
|
||||
}
|
||||
// Check if this host should be routed through a MITM proxy
|
||||
const mitmSocketPath = options.getMitmSocketPath?.(hostname);
|
||||
if (mitmSocketPath) {
|
||||
// Route through MITM proxy via Unix socket
|
||||
logForDebugging(`Routing CONNECT ${hostname}:${port} through MITM proxy at ${mitmSocketPath}`);
|
||||
const mitmSocket = connect({ path: mitmSocketPath }, () => {
|
||||
// Send CONNECT request to the MITM proxy
|
||||
mitmSocket.write(`CONNECT ${hostname}:${port} HTTP/1.1\r\n` +
|
||||
`Host: ${hostname}:${port}\r\n` +
|
||||
'\r\n');
|
||||
});
|
||||
// Buffer to accumulate the MITM proxy's response
|
||||
let responseBuffer = '';
|
||||
const onMitmData = (chunk) => {
|
||||
responseBuffer += chunk.toString();
|
||||
// Check if we've received the full HTTP response headers
|
||||
const headerEndIndex = responseBuffer.indexOf('\r\n\r\n');
|
||||
if (headerEndIndex !== -1) {
|
||||
// Remove data listener, we're done parsing the response
|
||||
mitmSocket.removeListener('data', onMitmData);
|
||||
// Check if MITM proxy accepted the connection
|
||||
const statusLine = responseBuffer.substring(0, responseBuffer.indexOf('\r\n'));
|
||||
if (statusLine.includes(' 200 ')) {
|
||||
// Connection established, now pipe data between client and MITM
|
||||
socket.write('HTTP/1.1 200 Connection Established\r\n\r\n');
|
||||
// If there's any data after the headers, write it to the client
|
||||
const remainingData = responseBuffer.substring(headerEndIndex + 4);
|
||||
if (remainingData.length > 0) {
|
||||
socket.write(remainingData);
|
||||
}
|
||||
mitmSocket.pipe(socket);
|
||||
socket.pipe(mitmSocket);
|
||||
}
|
||||
else {
|
||||
logForDebugging(`MITM proxy rejected CONNECT: ${statusLine}`, {
|
||||
level: 'error',
|
||||
});
|
||||
socket.end('HTTP/1.1 502 Bad Gateway\r\n\r\n');
|
||||
mitmSocket.destroy();
|
||||
}
|
||||
}
|
||||
};
|
||||
mitmSocket.on('data', onMitmData);
|
||||
mitmSocket.on('error', err => {
|
||||
logForDebugging(`MITM proxy connection failed: ${err.message}`, {
|
||||
level: 'error',
|
||||
});
|
||||
socket.end('HTTP/1.1 502 Bad Gateway\r\n\r\n');
|
||||
});
|
||||
socket.on('error', err => {
|
||||
logForDebugging(`Client socket error: ${err.message}`, {
|
||||
level: 'error',
|
||||
});
|
||||
mitmSocket.destroy();
|
||||
});
|
||||
socket.on('end', () => mitmSocket.end());
|
||||
mitmSocket.on('end', () => socket.end());
|
||||
}
|
||||
else {
|
||||
// Direct connection (original behavior)
|
||||
const serverSocket = connect(port, hostname, () => {
|
||||
socket.write('HTTP/1.1 200 Connection Established\r\n\r\n');
|
||||
serverSocket.pipe(socket);
|
||||
socket.pipe(serverSocket);
|
||||
});
|
||||
serverSocket.on('error', err => {
|
||||
logForDebugging(`CONNECT tunnel failed: ${err.message}`, {
|
||||
level: 'error',
|
||||
});
|
||||
socket.end('HTTP/1.1 502 Bad Gateway\r\n\r\n');
|
||||
});
|
||||
socket.on('error', err => {
|
||||
logForDebugging(`Client socket error: ${err.message}`, {
|
||||
level: 'error',
|
||||
});
|
||||
serverSocket.destroy();
|
||||
});
|
||||
socket.on('end', () => serverSocket.end());
|
||||
serverSocket.on('end', () => socket.end());
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
logForDebugging(`Error handling CONNECT: ${err}`, { level: 'error' });
|
||||
socket.end('HTTP/1.1 500 Internal Server Error\r\n\r\n');
|
||||
}
|
||||
});
|
||||
// Handle regular HTTP requests
|
||||
server.on('request', async (req, res) => {
|
||||
try {
|
||||
const url = new URL(req.url);
|
||||
const hostname = url.hostname;
|
||||
const port = url.port
|
||||
? parseInt(url.port, 10)
|
||||
: url.protocol === 'https:'
|
||||
? 443
|
||||
: 80;
|
||||
const allowed = await options.filter(port, hostname, req.socket);
|
||||
if (!allowed) {
|
||||
logForDebugging(`HTTP request blocked to ${hostname}:${port}`, {
|
||||
level: 'error',
|
||||
});
|
||||
res.writeHead(403, {
|
||||
'Content-Type': 'text/plain',
|
||||
'X-Proxy-Error': 'blocked-by-allowlist',
|
||||
});
|
||||
res.end('Connection blocked by network allowlist');
|
||||
return;
|
||||
}
|
||||
// Check if this host should be routed through a MITM proxy
|
||||
const mitmSocketPath = options.getMitmSocketPath?.(hostname);
|
||||
if (mitmSocketPath) {
|
||||
// Route through MITM proxy via Unix socket
|
||||
// Use an agent that connects via the Unix socket
|
||||
logForDebugging(`Routing HTTP ${req.method} ${hostname}:${port} through MITM proxy at ${mitmSocketPath}`);
|
||||
const mitmAgent = new Agent({
|
||||
// @ts-expect-error - socketPath is valid but not in types
|
||||
socketPath: mitmSocketPath,
|
||||
});
|
||||
// Send request to MITM proxy with full URL (proxy-style request)
|
||||
const proxyReq = httpRequest({
|
||||
agent: mitmAgent,
|
||||
// For proxy requests, path should be the full URL
|
||||
path: req.url,
|
||||
method: req.method,
|
||||
headers: {
|
||||
...req.headers,
|
||||
host: url.host,
|
||||
},
|
||||
}, proxyRes => {
|
||||
res.writeHead(proxyRes.statusCode, proxyRes.headers);
|
||||
proxyRes.pipe(res);
|
||||
});
|
||||
proxyReq.on('error', err => {
|
||||
logForDebugging(`MITM proxy request failed: ${err.message}`, {
|
||||
level: 'error',
|
||||
});
|
||||
if (!res.headersSent) {
|
||||
res.writeHead(502, { 'Content-Type': 'text/plain' });
|
||||
res.end('Bad Gateway');
|
||||
}
|
||||
});
|
||||
req.pipe(proxyReq);
|
||||
}
|
||||
else {
|
||||
// Direct request (original behavior)
|
||||
// Choose http or https module
|
||||
const requestFn = url.protocol === 'https:' ? httpsRequest : httpRequest;
|
||||
const proxyReq = requestFn({
|
||||
hostname,
|
||||
port,
|
||||
path: url.pathname + url.search,
|
||||
method: req.method,
|
||||
headers: {
|
||||
...req.headers,
|
||||
host: url.host,
|
||||
},
|
||||
}, proxyRes => {
|
||||
res.writeHead(proxyRes.statusCode, proxyRes.headers);
|
||||
proxyRes.pipe(res);
|
||||
});
|
||||
proxyReq.on('error', err => {
|
||||
logForDebugging(`Proxy request failed: ${err.message}`, {
|
||||
level: 'error',
|
||||
});
|
||||
if (!res.headersSent) {
|
||||
res.writeHead(502, { 'Content-Type': 'text/plain' });
|
||||
res.end('Bad Gateway');
|
||||
}
|
||||
});
|
||||
req.pipe(proxyReq);
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
logForDebugging(`Error handling HTTP request: ${err}`, { level: 'error' });
|
||||
res.writeHead(500, { 'Content-Type': 'text/plain' });
|
||||
res.end('Internal Server Error');
|
||||
}
|
||||
});
|
||||
return server;
|
||||
}
|
||||
//# sourceMappingURL=http-proxy.js.map
|
||||
875
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/linux-sandbox-utils.js
generated
vendored
Normal file
875
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/linux-sandbox-utils.js
generated
vendored
Normal file
@@ -0,0 +1,875 @@
|
||||
import shellquote from 'shell-quote';
|
||||
import { logForDebugging } from '../utils/debug.js';
|
||||
import { whichSync } from '../utils/which.js';
|
||||
import { randomBytes } from 'node:crypto';
|
||||
import * as fs from 'fs';
|
||||
import { spawn } from 'node:child_process';
|
||||
import { tmpdir } from 'node:os';
|
||||
import path, { join } from 'node:path';
|
||||
import { ripGrep } from '../utils/ripgrep.js';
|
||||
import { generateProxyEnvVars, normalizePathForSandbox, normalizeCaseForComparison, isSymlinkOutsideBoundary, DANGEROUS_FILES, getDangerousDirectories, } from './sandbox-utils.js';
|
||||
import { generateSeccompFilter, cleanupSeccompFilter, getPreGeneratedBpfPath, getApplySeccompBinaryPath, } from './generate-seccomp-filter.js';
|
||||
/** Default max depth for searching dangerous files */
|
||||
const DEFAULT_MANDATORY_DENY_SEARCH_DEPTH = 3;
|
||||
/**
|
||||
* Find if any component of the path is a symlink within the allowed write paths.
|
||||
* Returns the symlink path if found, or null if no symlinks.
|
||||
*
|
||||
* This is used to detect and block symlink replacement attacks where an attacker
|
||||
* could delete a symlink and create a real directory with malicious content.
|
||||
*/
|
||||
function findSymlinkInPath(targetPath, allowedWritePaths) {
|
||||
const parts = targetPath.split(path.sep);
|
||||
let currentPath = '';
|
||||
for (const part of parts) {
|
||||
if (!part)
|
||||
continue; // Skip empty parts (leading /)
|
||||
const nextPath = currentPath + path.sep + part;
|
||||
try {
|
||||
const stats = fs.lstatSync(nextPath);
|
||||
if (stats.isSymbolicLink()) {
|
||||
// Check if this symlink is within an allowed write path
|
||||
const isWithinAllowedPath = allowedWritePaths.some(allowedPath => nextPath.startsWith(allowedPath + '/') || nextPath === allowedPath);
|
||||
if (isWithinAllowedPath) {
|
||||
return nextPath;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch {
|
||||
// Path doesn't exist - no symlink issue here
|
||||
break;
|
||||
}
|
||||
currentPath = nextPath;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
* Check if any existing component in the path is a file (not a directory).
|
||||
* If so, the target path can never be created because you can't mkdir under a file.
|
||||
*
|
||||
* This handles the git worktree case: .git is a file, so .git/hooks can never
|
||||
* exist and there's nothing to deny.
|
||||
*/
|
||||
function hasFileAncestor(targetPath) {
|
||||
const parts = targetPath.split(path.sep);
|
||||
let currentPath = '';
|
||||
for (const part of parts) {
|
||||
if (!part)
|
||||
continue; // Skip empty parts (leading /)
|
||||
const nextPath = currentPath + path.sep + part;
|
||||
try {
|
||||
const stat = fs.statSync(nextPath);
|
||||
if (stat.isFile() || stat.isSymbolicLink()) {
|
||||
// This component exists as a file — nothing below it can be created
|
||||
return true;
|
||||
}
|
||||
}
|
||||
catch {
|
||||
// Path doesn't exist — stop checking
|
||||
break;
|
||||
}
|
||||
currentPath = nextPath;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Find the first non-existent path component.
|
||||
* E.g., for "/existing/parent/nonexistent/child/file.txt" where /existing/parent exists,
|
||||
* returns "/existing/parent/nonexistent"
|
||||
*
|
||||
* This is used to block creation of non-existent deny paths by mounting /dev/null
|
||||
* at the first missing component, preventing mkdir from creating the parent directories.
|
||||
*/
|
||||
function findFirstNonExistentComponent(targetPath) {
|
||||
const parts = targetPath.split(path.sep);
|
||||
let currentPath = '';
|
||||
for (const part of parts) {
|
||||
if (!part)
|
||||
continue; // Skip empty parts (leading /)
|
||||
const nextPath = currentPath + path.sep + part;
|
||||
if (!fs.existsSync(nextPath)) {
|
||||
return nextPath;
|
||||
}
|
||||
currentPath = nextPath;
|
||||
}
|
||||
return targetPath; // Shouldn't reach here if called correctly
|
||||
}
|
||||
/**
|
||||
* Get mandatory deny paths using ripgrep (Linux only).
|
||||
* Uses a SINGLE ripgrep call with multiple glob patterns for efficiency.
|
||||
* With --max-depth limiting, this is fast enough to run on each command without memoization.
|
||||
*/
|
||||
async function linuxGetMandatoryDenyPaths(ripgrepConfig = { command: 'rg' }, maxDepth = DEFAULT_MANDATORY_DENY_SEARCH_DEPTH, allowGitConfig = false, abortSignal) {
|
||||
const cwd = process.cwd();
|
||||
// Use provided signal or create a fallback controller
|
||||
const fallbackController = new AbortController();
|
||||
const signal = abortSignal ?? fallbackController.signal;
|
||||
const dangerousDirectories = getDangerousDirectories();
|
||||
// Note: Settings files are added at the callsite in sandbox-manager.ts
|
||||
const denyPaths = [
|
||||
// Dangerous files in CWD
|
||||
...DANGEROUS_FILES.map(f => path.resolve(cwd, f)),
|
||||
// Dangerous directories in CWD
|
||||
...dangerousDirectories.map(d => path.resolve(cwd, d)),
|
||||
];
|
||||
// Git hooks and config are only denied when .git exists as a directory.
|
||||
// In git worktrees, .git is a file (e.g., "gitdir: /path/..."), so
|
||||
// .git/hooks can never exist — denying it would cause bwrap to fail.
|
||||
// When .git doesn't exist at all, mounting at .git would block its
|
||||
// creation and break git init.
|
||||
const dotGitPath = path.resolve(cwd, '.git');
|
||||
let dotGitIsDirectory = false;
|
||||
try {
|
||||
dotGitIsDirectory = fs.statSync(dotGitPath).isDirectory();
|
||||
}
|
||||
catch {
|
||||
// .git doesn't exist
|
||||
}
|
||||
if (dotGitIsDirectory) {
|
||||
// Git hooks always blocked for security
|
||||
denyPaths.push(path.resolve(cwd, '.git/hooks'));
|
||||
// Git config conditionally blocked based on allowGitConfig setting
|
||||
if (!allowGitConfig) {
|
||||
denyPaths.push(path.resolve(cwd, '.git/config'));
|
||||
}
|
||||
}
|
||||
// Build iglob args for all patterns in one ripgrep call
|
||||
const iglobArgs = [];
|
||||
for (const fileName of DANGEROUS_FILES) {
|
||||
iglobArgs.push('--iglob', fileName);
|
||||
}
|
||||
for (const dirName of dangerousDirectories) {
|
||||
iglobArgs.push('--iglob', `**/${dirName}/**`);
|
||||
}
|
||||
// Git hooks always blocked in nested repos
|
||||
iglobArgs.push('--iglob', '**/.git/hooks/**');
|
||||
// Git config conditionally blocked in nested repos
|
||||
if (!allowGitConfig) {
|
||||
iglobArgs.push('--iglob', '**/.git/config');
|
||||
}
|
||||
// Single ripgrep call to find all dangerous paths in subdirectories
|
||||
// Limit depth for performance - deeply nested dangerous files are rare
|
||||
// and the security benefit doesn't justify the traversal cost
|
||||
let matches = [];
|
||||
try {
|
||||
matches = await ripGrep([
|
||||
'--files',
|
||||
'--hidden',
|
||||
'--max-depth',
|
||||
String(maxDepth),
|
||||
...iglobArgs,
|
||||
'-g',
|
||||
'!**/node_modules/**',
|
||||
], cwd, signal, ripgrepConfig);
|
||||
}
|
||||
catch (error) {
|
||||
logForDebugging(`[Sandbox] ripgrep scan failed: ${error}`);
|
||||
}
|
||||
// Process matches
|
||||
for (const match of matches) {
|
||||
const absolutePath = path.resolve(cwd, match);
|
||||
// File inside a dangerous directory -> add the directory path
|
||||
let foundDir = false;
|
||||
for (const dirName of [...dangerousDirectories, '.git']) {
|
||||
const normalizedDirName = normalizeCaseForComparison(dirName);
|
||||
const segments = absolutePath.split(path.sep);
|
||||
const dirIndex = segments.findIndex(s => normalizeCaseForComparison(s) === normalizedDirName);
|
||||
if (dirIndex !== -1) {
|
||||
// For .git, we want hooks/ or config, not the whole .git dir
|
||||
if (dirName === '.git') {
|
||||
const gitDir = segments.slice(0, dirIndex + 1).join(path.sep);
|
||||
if (match.includes('.git/hooks')) {
|
||||
denyPaths.push(path.join(gitDir, 'hooks'));
|
||||
}
|
||||
else if (match.includes('.git/config')) {
|
||||
denyPaths.push(path.join(gitDir, 'config'));
|
||||
}
|
||||
}
|
||||
else {
|
||||
denyPaths.push(segments.slice(0, dirIndex + 1).join(path.sep));
|
||||
}
|
||||
foundDir = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Dangerous file match
|
||||
if (!foundDir) {
|
||||
denyPaths.push(absolutePath);
|
||||
}
|
||||
}
|
||||
return [...new Set(denyPaths)];
|
||||
}
|
||||
// Track generated seccomp filters for cleanup on process exit
|
||||
const generatedSeccompFilters = new Set();
|
||||
// Track mount points created by bwrap for non-existent deny paths.
|
||||
// When bwrap does --ro-bind /dev/null /nonexistent/path, it creates an empty
|
||||
// file on the host as a mount point. These persist after bwrap exits and must
|
||||
// be cleaned up explicitly.
|
||||
const bwrapMountPoints = new Set();
|
||||
let exitHandlerRegistered = false;
|
||||
/**
|
||||
* Register cleanup handler for generated seccomp filters and bwrap mount points
|
||||
*/
|
||||
function registerExitCleanupHandler() {
|
||||
if (exitHandlerRegistered) {
|
||||
return;
|
||||
}
|
||||
process.on('exit', () => {
|
||||
for (const filterPath of generatedSeccompFilters) {
|
||||
try {
|
||||
cleanupSeccompFilter(filterPath);
|
||||
}
|
||||
catch {
|
||||
// Ignore cleanup errors during exit
|
||||
}
|
||||
}
|
||||
cleanupBwrapMountPoints();
|
||||
});
|
||||
exitHandlerRegistered = true;
|
||||
}
|
||||
/**
|
||||
* Clean up mount point files created by bwrap for non-existent deny paths.
|
||||
*
|
||||
* When protecting non-existent deny paths, bwrap creates empty files on the
|
||||
* host filesystem as mount points for --ro-bind. These files persist after
|
||||
* bwrap exits. This function removes them.
|
||||
*
|
||||
* This should be called after each sandboxed command completes to prevent
|
||||
* ghost dotfiles (e.g. .bashrc, .gitconfig) from appearing in the working
|
||||
* directory. It is also called automatically on process exit as a safety net.
|
||||
*
|
||||
* Safe to call at any time — it only removes files that were tracked during
|
||||
* generateFilesystemArgs() and skips any that no longer exist.
|
||||
*/
|
||||
export function cleanupBwrapMountPoints() {
|
||||
for (const mountPoint of bwrapMountPoints) {
|
||||
try {
|
||||
// Only remove if it's still the empty file/directory bwrap created.
|
||||
// If something else has written real content, leave it alone.
|
||||
const stat = fs.statSync(mountPoint);
|
||||
if (stat.isFile() && stat.size === 0) {
|
||||
fs.unlinkSync(mountPoint);
|
||||
logForDebugging(`[Sandbox Linux] Cleaned up bwrap mount point (file): ${mountPoint}`);
|
||||
}
|
||||
else if (stat.isDirectory()) {
|
||||
// Empty directory mount points are created for intermediate
|
||||
// components (Fix 2). Only remove if still empty.
|
||||
const entries = fs.readdirSync(mountPoint);
|
||||
if (entries.length === 0) {
|
||||
fs.rmdirSync(mountPoint);
|
||||
logForDebugging(`[Sandbox Linux] Cleaned up bwrap mount point (dir): ${mountPoint}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch {
|
||||
// Ignore cleanup errors — the file may have already been removed
|
||||
}
|
||||
}
|
||||
bwrapMountPoints.clear();
|
||||
}
|
||||
/**
|
||||
* Get detailed status of Linux sandbox dependencies
|
||||
*/
|
||||
export function getLinuxDependencyStatus(seccompConfig) {
|
||||
return {
|
||||
hasBwrap: whichSync('bwrap') !== null,
|
||||
hasSocat: whichSync('socat') !== null,
|
||||
hasSeccompBpf: getPreGeneratedBpfPath(seccompConfig?.bpfPath) !== null,
|
||||
hasSeccompApply: getApplySeccompBinaryPath(seccompConfig?.applyPath) !== null,
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Check sandbox dependencies and return structured result
|
||||
*/
|
||||
export function checkLinuxDependencies(seccompConfig) {
|
||||
const errors = [];
|
||||
const warnings = [];
|
||||
if (whichSync('bwrap') === null)
|
||||
errors.push('bubblewrap (bwrap) not installed');
|
||||
if (whichSync('socat') === null)
|
||||
errors.push('socat not installed');
|
||||
const hasBpf = getPreGeneratedBpfPath(seccompConfig?.bpfPath) !== null;
|
||||
const hasApply = getApplySeccompBinaryPath(seccompConfig?.applyPath) !== null;
|
||||
if (!hasBpf || !hasApply) {
|
||||
warnings.push('seccomp not available - unix socket access not restricted');
|
||||
}
|
||||
return { warnings, errors };
|
||||
}
|
||||
/**
|
||||
* Initialize the Linux network bridge for sandbox networking
|
||||
*
|
||||
* ARCHITECTURE NOTE:
|
||||
* Linux network sandboxing uses bwrap --unshare-net which creates a completely isolated
|
||||
* network namespace with NO network access. To enable network access, we:
|
||||
*
|
||||
* 1. Host side: Run socat bridges that listen on Unix sockets and forward to host proxy servers
|
||||
* - HTTP bridge: Unix socket -> host HTTP proxy (for HTTP/HTTPS traffic)
|
||||
* - SOCKS bridge: Unix socket -> host SOCKS5 proxy (for SSH/git traffic)
|
||||
*
|
||||
* 2. Sandbox side: Bind the Unix sockets into the isolated namespace and run socat listeners
|
||||
* - HTTP listener on port 3128 -> HTTP Unix socket -> host HTTP proxy
|
||||
* - SOCKS listener on port 1080 -> SOCKS Unix socket -> host SOCKS5 proxy
|
||||
*
|
||||
* 3. Configure environment:
|
||||
* - HTTP_PROXY=http://localhost:3128 for HTTP/HTTPS tools
|
||||
* - GIT_SSH_COMMAND with socat for SSH through SOCKS5
|
||||
*
|
||||
* LIMITATION: Unlike macOS sandbox which can enforce domain-based allowlists at the kernel level,
|
||||
* Linux's --unshare-net provides only all-or-nothing network isolation. Domain filtering happens
|
||||
* at the host proxy level, not the sandbox boundary. This means network restrictions on Linux
|
||||
* depend on the proxy's filtering capabilities.
|
||||
*
|
||||
* DEPENDENCIES: Requires bwrap (bubblewrap) and socat
|
||||
*/
|
||||
export async function initializeLinuxNetworkBridge(httpProxyPort, socksProxyPort) {
|
||||
const socketId = randomBytes(8).toString('hex');
|
||||
const httpSocketPath = join(tmpdir(), `claude-http-${socketId}.sock`);
|
||||
const socksSocketPath = join(tmpdir(), `claude-socks-${socketId}.sock`);
|
||||
// Start HTTP bridge
|
||||
const httpSocatArgs = [
|
||||
`UNIX-LISTEN:${httpSocketPath},fork,reuseaddr`,
|
||||
`TCP:localhost:${httpProxyPort},keepalive,keepidle=10,keepintvl=5,keepcnt=3`,
|
||||
];
|
||||
logForDebugging(`Starting HTTP bridge: socat ${httpSocatArgs.join(' ')}`);
|
||||
const httpBridgeProcess = spawn('socat', httpSocatArgs, {
|
||||
stdio: 'ignore',
|
||||
});
|
||||
if (!httpBridgeProcess.pid) {
|
||||
throw new Error('Failed to start HTTP bridge process');
|
||||
}
|
||||
// Add error and exit handlers to monitor bridge health
|
||||
httpBridgeProcess.on('error', err => {
|
||||
logForDebugging(`HTTP bridge process error: ${err}`, { level: 'error' });
|
||||
});
|
||||
httpBridgeProcess.on('exit', (code, signal) => {
|
||||
logForDebugging(`HTTP bridge process exited with code ${code}, signal ${signal}`, { level: code === 0 ? 'info' : 'error' });
|
||||
});
|
||||
// Start SOCKS bridge
|
||||
const socksSocatArgs = [
|
||||
`UNIX-LISTEN:${socksSocketPath},fork,reuseaddr`,
|
||||
`TCP:localhost:${socksProxyPort},keepalive,keepidle=10,keepintvl=5,keepcnt=3`,
|
||||
];
|
||||
logForDebugging(`Starting SOCKS bridge: socat ${socksSocatArgs.join(' ')}`);
|
||||
const socksBridgeProcess = spawn('socat', socksSocatArgs, {
|
||||
stdio: 'ignore',
|
||||
});
|
||||
if (!socksBridgeProcess.pid) {
|
||||
// Clean up HTTP bridge
|
||||
if (httpBridgeProcess.pid) {
|
||||
try {
|
||||
process.kill(httpBridgeProcess.pid, 'SIGTERM');
|
||||
}
|
||||
catch {
|
||||
// Ignore errors
|
||||
}
|
||||
}
|
||||
throw new Error('Failed to start SOCKS bridge process');
|
||||
}
|
||||
// Add error and exit handlers to monitor bridge health
|
||||
socksBridgeProcess.on('error', err => {
|
||||
logForDebugging(`SOCKS bridge process error: ${err}`, { level: 'error' });
|
||||
});
|
||||
socksBridgeProcess.on('exit', (code, signal) => {
|
||||
logForDebugging(`SOCKS bridge process exited with code ${code}, signal ${signal}`, { level: code === 0 ? 'info' : 'error' });
|
||||
});
|
||||
// Wait for both sockets to be ready
|
||||
const maxAttempts = 5;
|
||||
for (let i = 0; i < maxAttempts; i++) {
|
||||
if (!httpBridgeProcess.pid ||
|
||||
httpBridgeProcess.killed ||
|
||||
!socksBridgeProcess.pid ||
|
||||
socksBridgeProcess.killed) {
|
||||
throw new Error('Linux bridge process died unexpectedly');
|
||||
}
|
||||
try {
|
||||
// fs already imported
|
||||
if (fs.existsSync(httpSocketPath) && fs.existsSync(socksSocketPath)) {
|
||||
logForDebugging(`Linux bridges ready after ${i + 1} attempts`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
logForDebugging(`Error checking sockets (attempt ${i + 1}): ${err}`, {
|
||||
level: 'error',
|
||||
});
|
||||
}
|
||||
if (i === maxAttempts - 1) {
|
||||
// Clean up both processes
|
||||
if (httpBridgeProcess.pid) {
|
||||
try {
|
||||
process.kill(httpBridgeProcess.pid, 'SIGTERM');
|
||||
}
|
||||
catch {
|
||||
// Ignore errors
|
||||
}
|
||||
}
|
||||
if (socksBridgeProcess.pid) {
|
||||
try {
|
||||
process.kill(socksBridgeProcess.pid, 'SIGTERM');
|
||||
}
|
||||
catch {
|
||||
// Ignore errors
|
||||
}
|
||||
}
|
||||
throw new Error(`Failed to create bridge sockets after ${maxAttempts} attempts`);
|
||||
}
|
||||
await new Promise(resolve => setTimeout(resolve, i * 100));
|
||||
}
|
||||
return {
|
||||
httpSocketPath,
|
||||
socksSocketPath,
|
||||
httpBridgeProcess,
|
||||
socksBridgeProcess,
|
||||
httpProxyPort,
|
||||
socksProxyPort,
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Build the command that runs inside the sandbox.
|
||||
* Sets up HTTP proxy on port 3128 and SOCKS proxy on port 1080
|
||||
*/
|
||||
function buildSandboxCommand(httpSocketPath, socksSocketPath, userCommand, seccompFilterPath, shell, applySeccompPath) {
|
||||
// Default to bash for backward compatibility
|
||||
const shellPath = shell || 'bash';
|
||||
const socatCommands = [
|
||||
`socat TCP-LISTEN:3128,fork,reuseaddr UNIX-CONNECT:${httpSocketPath} >/dev/null 2>&1 &`,
|
||||
`socat TCP-LISTEN:1080,fork,reuseaddr UNIX-CONNECT:${socksSocketPath} >/dev/null 2>&1 &`,
|
||||
'trap "kill %1 %2 2>/dev/null; exit" EXIT',
|
||||
];
|
||||
// If seccomp filter is provided, use apply-seccomp to apply it
|
||||
if (seccompFilterPath) {
|
||||
// apply-seccomp approach:
|
||||
// 1. Outer bwrap/bash: starts socat processes (can use Unix sockets)
|
||||
// 2. apply-seccomp: applies seccomp filter and execs user command
|
||||
// 3. User command runs with seccomp active (Unix sockets blocked)
|
||||
//
|
||||
// apply-seccomp is a simple C program that:
|
||||
// - Sets PR_SET_NO_NEW_PRIVS
|
||||
// - Applies the seccomp BPF filter via prctl(PR_SET_SECCOMP)
|
||||
// - Execs the user command
|
||||
//
|
||||
// This is simpler and more portable than nested bwrap, with no FD redirects needed.
|
||||
const applySeccompBinary = getApplySeccompBinaryPath(applySeccompPath);
|
||||
if (!applySeccompBinary) {
|
||||
throw new Error('apply-seccomp binary not found. This should have been caught earlier. ' +
|
||||
'Ensure vendor/seccomp/{x64,arm64}/apply-seccomp binaries are included in the package.');
|
||||
}
|
||||
const applySeccompCmd = shellquote.quote([
|
||||
applySeccompBinary,
|
||||
seccompFilterPath,
|
||||
shellPath,
|
||||
'-c',
|
||||
userCommand,
|
||||
]);
|
||||
const innerScript = [...socatCommands, applySeccompCmd].join('\n');
|
||||
return `${shellPath} -c ${shellquote.quote([innerScript])}`;
|
||||
}
|
||||
else {
|
||||
// No seccomp filter - run user command directly
|
||||
const innerScript = [
|
||||
...socatCommands,
|
||||
`eval ${shellquote.quote([userCommand])}`,
|
||||
].join('\n');
|
||||
return `${shellPath} -c ${shellquote.quote([innerScript])}`;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Generate filesystem bind mount arguments for bwrap
|
||||
*/
|
||||
async function generateFilesystemArgs(readConfig, writeConfig, ripgrepConfig = { command: 'rg' }, mandatoryDenySearchDepth = DEFAULT_MANDATORY_DENY_SEARCH_DEPTH, allowGitConfig = false, abortSignal) {
|
||||
const args = [];
|
||||
// fs already imported
|
||||
// Determine initial root mount based on write restrictions
|
||||
if (writeConfig) {
|
||||
// Write restrictions: Start with read-only root, then allow writes to specific paths
|
||||
args.push('--ro-bind', '/', '/');
|
||||
// Collect normalized allowed write paths for later checking
|
||||
const allowedWritePaths = [];
|
||||
// Allow writes to specific paths
|
||||
for (const pathPattern of writeConfig.allowOnly || []) {
|
||||
const normalizedPath = normalizePathForSandbox(pathPattern);
|
||||
logForDebugging(`[Sandbox Linux] Processing write path: ${pathPattern} -> ${normalizedPath}`);
|
||||
// Skip /dev/* paths since --dev /dev already handles them
|
||||
if (normalizedPath.startsWith('/dev/')) {
|
||||
logForDebugging(`[Sandbox Linux] Skipping /dev path: ${normalizedPath}`);
|
||||
continue;
|
||||
}
|
||||
if (!fs.existsSync(normalizedPath)) {
|
||||
logForDebugging(`[Sandbox Linux] Skipping non-existent write path: ${normalizedPath}`);
|
||||
continue;
|
||||
}
|
||||
// Check if path is a symlink pointing outside expected boundaries
|
||||
// bwrap follows symlinks, so --bind on a symlink makes the target writable
|
||||
// This could unexpectedly expose paths the user didn't intend to allow
|
||||
try {
|
||||
const resolvedPath = fs.realpathSync(normalizedPath);
|
||||
// Trim trailing slashes before comparing: realpathSync never returns
|
||||
// a trailing slash, but normalizedPath may have one, which would cause
|
||||
// a false mismatch and incorrectly treat the path as a symlink.
|
||||
const normalizedForComparison = normalizedPath.replace(/\/+$/, '');
|
||||
if (resolvedPath !== normalizedForComparison &&
|
||||
isSymlinkOutsideBoundary(normalizedPath, resolvedPath)) {
|
||||
logForDebugging(`[Sandbox Linux] Skipping symlink write path pointing outside expected location: ${pathPattern} -> ${resolvedPath}`);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
catch {
|
||||
// realpathSync failed - path might not exist or be accessible, skip it
|
||||
logForDebugging(`[Sandbox Linux] Skipping write path that could not be resolved: ${normalizedPath}`);
|
||||
continue;
|
||||
}
|
||||
args.push('--bind', normalizedPath, normalizedPath);
|
||||
allowedWritePaths.push(normalizedPath);
|
||||
}
|
||||
// Deny writes within allowed paths (user-specified + mandatory denies)
|
||||
const denyPaths = [
|
||||
...(writeConfig.denyWithinAllow || []),
|
||||
...(await linuxGetMandatoryDenyPaths(ripgrepConfig, mandatoryDenySearchDepth, allowGitConfig, abortSignal)),
|
||||
];
|
||||
for (const pathPattern of denyPaths) {
|
||||
const normalizedPath = normalizePathForSandbox(pathPattern);
|
||||
// Skip /dev/* paths since --dev /dev already handles them
|
||||
if (normalizedPath.startsWith('/dev/')) {
|
||||
continue;
|
||||
}
|
||||
// Check for symlinks in the path - if any parent component is a symlink,
|
||||
// mount /dev/null there to prevent symlink replacement attacks.
|
||||
// Attack scenario: .claude is a symlink to ./decoy/, attacker deletes
|
||||
// symlink and creates real .claude/settings.json with malicious hooks.
|
||||
const symlinkInPath = findSymlinkInPath(normalizedPath, allowedWritePaths);
|
||||
if (symlinkInPath) {
|
||||
args.push('--ro-bind', '/dev/null', symlinkInPath);
|
||||
logForDebugging(`[Sandbox Linux] Mounted /dev/null at symlink ${symlinkInPath} to prevent symlink replacement attack`);
|
||||
continue;
|
||||
}
|
||||
// Handle non-existent paths by mounting /dev/null to block creation.
|
||||
// Without this, a sandboxed process could mkdir+write a denied path that
|
||||
// doesn't exist yet, bypassing the deny rule entirely.
|
||||
//
|
||||
// bwrap creates empty files on the host as mount points for these binds.
|
||||
// We track them in bwrapMountPoints so cleanupBwrapMountPoints() can
|
||||
// remove them after the command exits.
|
||||
if (!fs.existsSync(normalizedPath)) {
|
||||
// Fix 1 (worktree): If any existing component in the deny path is a
|
||||
// file (not a directory), skip the deny entirely. You can't mkdir
|
||||
// under a file, so the deny path can never be created. This handles
|
||||
// git worktrees where .git is a file.
|
||||
if (hasFileAncestor(normalizedPath)) {
|
||||
logForDebugging(`[Sandbox Linux] Skipping deny path with file ancestor (cannot create paths under a file): ${normalizedPath}`);
|
||||
continue;
|
||||
}
|
||||
// Find the deepest existing ancestor directory
|
||||
let ancestorPath = path.dirname(normalizedPath);
|
||||
while (ancestorPath !== '/' && !fs.existsSync(ancestorPath)) {
|
||||
ancestorPath = path.dirname(ancestorPath);
|
||||
}
|
||||
// Only protect if the existing ancestor is within an allowed write path.
|
||||
// If not, the path is already read-only from --ro-bind / /.
|
||||
const ancestorIsWithinAllowedPath = allowedWritePaths.some(allowedPath => ancestorPath.startsWith(allowedPath + '/') ||
|
||||
ancestorPath === allowedPath ||
|
||||
normalizedPath.startsWith(allowedPath + '/'));
|
||||
if (ancestorIsWithinAllowedPath) {
|
||||
const firstNonExistent = findFirstNonExistentComponent(normalizedPath);
|
||||
// Fix 2: If firstNonExistent is an intermediate component (not the
|
||||
// leaf deny path itself), mount a read-only empty directory instead
|
||||
// of /dev/null. This prevents the component from appearing as a file
|
||||
// which breaks tools that expect to traverse it as a directory.
|
||||
if (firstNonExistent !== normalizedPath) {
|
||||
const emptyDir = fs.mkdtempSync(path.join(tmpdir(), 'claude-empty-'));
|
||||
args.push('--ro-bind', emptyDir, firstNonExistent);
|
||||
bwrapMountPoints.add(firstNonExistent);
|
||||
registerExitCleanupHandler();
|
||||
logForDebugging(`[Sandbox Linux] Mounted empty dir at ${firstNonExistent} to block creation of ${normalizedPath}`);
|
||||
}
|
||||
else {
|
||||
args.push('--ro-bind', '/dev/null', firstNonExistent);
|
||||
bwrapMountPoints.add(firstNonExistent);
|
||||
registerExitCleanupHandler();
|
||||
logForDebugging(`[Sandbox Linux] Mounted /dev/null at ${firstNonExistent} to block creation of ${normalizedPath}`);
|
||||
}
|
||||
}
|
||||
else {
|
||||
logForDebugging(`[Sandbox Linux] Skipping non-existent deny path not within allowed paths: ${normalizedPath}`);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// Only add deny binding if this path is within an allowed write path
|
||||
// Otherwise it's already read-only from the initial --ro-bind / /
|
||||
const isWithinAllowedPath = allowedWritePaths.some(allowedPath => normalizedPath.startsWith(allowedPath + '/') ||
|
||||
normalizedPath === allowedPath);
|
||||
if (isWithinAllowedPath) {
|
||||
args.push('--ro-bind', normalizedPath, normalizedPath);
|
||||
}
|
||||
else {
|
||||
logForDebugging(`[Sandbox Linux] Skipping deny path not within allowed paths: ${normalizedPath}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// No write restrictions: Allow all writes
|
||||
args.push('--bind', '/', '/');
|
||||
}
|
||||
// Handle read restrictions by mounting tmpfs over denied paths
|
||||
const readDenyPaths = [...(readConfig?.denyOnly || [])];
|
||||
const readAllowPaths = (readConfig?.allowWithinDeny || []).map(p => normalizePathForSandbox(p));
|
||||
// Always hide /etc/ssh/ssh_config.d to avoid permission issues with OrbStack
|
||||
// SSH is very strict about config file permissions and ownership, and they can
|
||||
// appear wrong inside the sandbox causing "Bad owner or permissions" errors
|
||||
if (fs.existsSync('/etc/ssh/ssh_config.d')) {
|
||||
readDenyPaths.push('/etc/ssh/ssh_config.d');
|
||||
}
|
||||
for (const pathPattern of readDenyPaths) {
|
||||
const normalizedPath = normalizePathForSandbox(pathPattern);
|
||||
if (!fs.existsSync(normalizedPath)) {
|
||||
logForDebugging(`[Sandbox Linux] Skipping non-existent read deny path: ${normalizedPath}`);
|
||||
continue;
|
||||
}
|
||||
const readDenyStat = fs.statSync(normalizedPath);
|
||||
if (readDenyStat.isDirectory()) {
|
||||
args.push('--tmpfs', normalizedPath);
|
||||
// Re-allow specific paths within the denied directory (allowRead overrides denyRead).
|
||||
// After mounting tmpfs over the denied dir, bind back the allowed subdirectories
|
||||
// so they are readable again.
|
||||
for (const allowPath of readAllowPaths) {
|
||||
if (allowPath.startsWith(normalizedPath + '/') ||
|
||||
allowPath === normalizedPath) {
|
||||
if (!fs.existsSync(allowPath)) {
|
||||
logForDebugging(`[Sandbox Linux] Skipping non-existent read allow path: ${allowPath}`);
|
||||
continue;
|
||||
}
|
||||
// Bind the allowed path back over the tmpfs so it's readable
|
||||
args.push('--ro-bind', allowPath, allowPath);
|
||||
logForDebugging(`[Sandbox Linux] Re-allowed read access within denied region: ${allowPath}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// For files, check if this specific file is re-allowed
|
||||
const isReAllowed = readAllowPaths.some(allowPath => normalizedPath === allowPath ||
|
||||
normalizedPath.startsWith(allowPath + '/'));
|
||||
if (isReAllowed) {
|
||||
logForDebugging(`[Sandbox Linux] Skipping read deny for re-allowed path: ${normalizedPath}`);
|
||||
continue;
|
||||
}
|
||||
// For files, bind /dev/null instead of tmpfs
|
||||
args.push('--ro-bind', '/dev/null', normalizedPath);
|
||||
}
|
||||
}
|
||||
return args;
|
||||
}
|
||||
/**
|
||||
* Wrap a command with sandbox restrictions on Linux
|
||||
*
|
||||
* UNIX SOCKET BLOCKING (APPLY-SECCOMP):
|
||||
* This implementation uses a custom apply-seccomp binary to block Unix domain socket
|
||||
* creation for user commands while allowing network infrastructure:
|
||||
*
|
||||
* Stage 1: Outer bwrap - Network and filesystem isolation (NO seccomp)
|
||||
* - Bubblewrap starts with isolated network namespace (--unshare-net)
|
||||
* - Bubblewrap applies PID namespace isolation (--unshare-pid and --proc)
|
||||
* - Filesystem restrictions are applied (read-only mounts, bind mounts, etc.)
|
||||
* - Socat processes start and connect to Unix socket bridges (can use socket(AF_UNIX, ...))
|
||||
*
|
||||
* Stage 2: apply-seccomp - Seccomp filter application (ONLY seccomp)
|
||||
* - apply-seccomp binary applies seccomp filter via prctl(PR_SET_SECCOMP)
|
||||
* - Sets PR_SET_NO_NEW_PRIVS to allow seccomp without root
|
||||
* - Execs user command with seccomp active (cannot create new Unix sockets)
|
||||
*
|
||||
* This solves the conflict between:
|
||||
* - Security: Blocking arbitrary Unix socket creation in user commands
|
||||
* - Functionality: Network sandboxing requires socat to call socket(AF_UNIX, ...) for bridge connections
|
||||
*
|
||||
* The seccomp-bpf filter blocks socket(AF_UNIX, ...) syscalls, preventing:
|
||||
* - Creating new Unix domain socket file descriptors
|
||||
*
|
||||
* Security limitations:
|
||||
* - Does NOT block operations (bind, connect, sendto, etc.) on inherited Unix socket FDs
|
||||
* - Does NOT prevent passing Unix socket FDs via SCM_RIGHTS
|
||||
* - For most sandboxing use cases, blocking socket creation is sufficient
|
||||
*
|
||||
* The filter allows:
|
||||
* - All TCP/UDP sockets (AF_INET, AF_INET6) for normal network operations
|
||||
* - All other syscalls
|
||||
*
|
||||
* PLATFORM NOTE:
|
||||
* The allowUnixSockets configuration is not path-based on Linux (unlike macOS)
|
||||
* because seccomp-bpf cannot inspect user-space memory to read socket paths.
|
||||
*
|
||||
* Requirements for seccomp filtering:
|
||||
* - Pre-built apply-seccomp binaries are included for x64 and ARM64
|
||||
* - Pre-generated BPF filters are included for x64 and ARM64
|
||||
* - Other architectures are not currently supported (no apply-seccomp binary available)
|
||||
* - To use sandboxing without Unix socket blocking on unsupported architectures,
|
||||
* set allowAllUnixSockets: true in your configuration
|
||||
* Dependencies are checked by checkLinuxDependencies() before enabling the sandbox.
|
||||
*/
|
||||
export async function wrapCommandWithSandboxLinux(params) {
|
||||
const { command, needsNetworkRestriction, httpSocketPath, socksSocketPath, httpProxyPort, socksProxyPort, readConfig, writeConfig, enableWeakerNestedSandbox, allowAllUnixSockets, binShell, ripgrepConfig = { command: 'rg' }, mandatoryDenySearchDepth = DEFAULT_MANDATORY_DENY_SEARCH_DEPTH, allowGitConfig = false, seccompConfig, abortSignal, } = params;
|
||||
// Determine if we have restrictions to apply
|
||||
// Read: denyOnly pattern - empty array means no restrictions
|
||||
// Write: allowOnly pattern - undefined means no restrictions, any config means restrictions
|
||||
const hasReadRestrictions = readConfig && readConfig.denyOnly.length > 0;
|
||||
const hasWriteRestrictions = writeConfig !== undefined;
|
||||
// Check if we need any sandboxing
|
||||
if (!needsNetworkRestriction &&
|
||||
!hasReadRestrictions &&
|
||||
!hasWriteRestrictions) {
|
||||
return command;
|
||||
}
|
||||
const bwrapArgs = ['--new-session', '--die-with-parent'];
|
||||
let seccompFilterPath = undefined;
|
||||
try {
|
||||
// ========== SECCOMP FILTER (Unix Socket Blocking) ==========
|
||||
// Use bwrap's --seccomp flag to apply BPF filter that blocks Unix socket creation
|
||||
//
|
||||
// NOTE: Seccomp filtering is only enabled when allowAllUnixSockets is false
|
||||
// (when true, Unix sockets are allowed)
|
||||
if (!allowAllUnixSockets) {
|
||||
seccompFilterPath =
|
||||
generateSeccompFilter(seccompConfig?.bpfPath) ?? undefined;
|
||||
const applySeccompBinary = getApplySeccompBinaryPath(seccompConfig?.applyPath);
|
||||
if (!seccompFilterPath || !applySeccompBinary) {
|
||||
// Seccomp binaries not found - warn but continue without unix socket blocking
|
||||
logForDebugging('[Sandbox Linux] Seccomp binaries not available - unix socket blocking disabled. ' +
|
||||
'Install @anthropic-ai/sandbox-runtime globally for full protection.', { level: 'warn' });
|
||||
// Clear the filter path so we don't try to use it
|
||||
seccompFilterPath = undefined;
|
||||
}
|
||||
else {
|
||||
// Track filter for cleanup and register exit handler
|
||||
// Only track runtime-generated filters (not pre-generated ones from vendor/)
|
||||
if (!seccompFilterPath.includes('/vendor/seccomp/')) {
|
||||
generatedSeccompFilters.add(seccompFilterPath);
|
||||
registerExitCleanupHandler();
|
||||
}
|
||||
logForDebugging('[Sandbox Linux] Generated seccomp BPF filter for Unix socket blocking');
|
||||
}
|
||||
}
|
||||
else {
|
||||
logForDebugging('[Sandbox Linux] Skipping seccomp filter - allowAllUnixSockets is enabled');
|
||||
}
|
||||
// ========== NETWORK RESTRICTIONS ==========
|
||||
if (needsNetworkRestriction) {
|
||||
// Always unshare network namespace to isolate network access
|
||||
// This removes all network interfaces, effectively blocking all network
|
||||
bwrapArgs.push('--unshare-net');
|
||||
// If proxy sockets are provided, bind them into the sandbox to allow
|
||||
// filtered network access through the proxy. If not provided, network
|
||||
// is completely blocked (empty allowedDomains = block all)
|
||||
if (httpSocketPath && socksSocketPath) {
|
||||
// Verify socket files still exist before trying to bind them
|
||||
if (!fs.existsSync(httpSocketPath)) {
|
||||
throw new Error(`Linux HTTP bridge socket does not exist: ${httpSocketPath}. ` +
|
||||
'The bridge process may have died. Try reinitializing the sandbox.');
|
||||
}
|
||||
if (!fs.existsSync(socksSocketPath)) {
|
||||
throw new Error(`Linux SOCKS bridge socket does not exist: ${socksSocketPath}. ` +
|
||||
'The bridge process may have died. Try reinitializing the sandbox.');
|
||||
}
|
||||
// Bind both sockets into the sandbox
|
||||
bwrapArgs.push('--bind', httpSocketPath, httpSocketPath);
|
||||
bwrapArgs.push('--bind', socksSocketPath, socksSocketPath);
|
||||
// Add proxy environment variables
|
||||
// HTTP_PROXY points to the socat listener inside the sandbox (port 3128)
|
||||
// which forwards to the Unix socket that bridges to the host's proxy server
|
||||
const proxyEnv = generateProxyEnvVars(3128, // Internal HTTP listener port
|
||||
1080);
|
||||
bwrapArgs.push(...proxyEnv.flatMap((env) => {
|
||||
const firstEq = env.indexOf('=');
|
||||
const key = env.slice(0, firstEq);
|
||||
const value = env.slice(firstEq + 1);
|
||||
return ['--setenv', key, value];
|
||||
}));
|
||||
// Add host proxy port environment variables for debugging/transparency
|
||||
// These show which host ports the Unix socket bridges connect to
|
||||
if (httpProxyPort !== undefined) {
|
||||
bwrapArgs.push('--setenv', 'CLAUDE_CODE_HOST_HTTP_PROXY_PORT', String(httpProxyPort));
|
||||
}
|
||||
if (socksProxyPort !== undefined) {
|
||||
bwrapArgs.push('--setenv', 'CLAUDE_CODE_HOST_SOCKS_PROXY_PORT', String(socksProxyPort));
|
||||
}
|
||||
}
|
||||
// If no sockets provided, network is completely blocked (--unshare-net without proxy)
|
||||
}
|
||||
// ========== FILESYSTEM RESTRICTIONS ==========
|
||||
const fsArgs = await generateFilesystemArgs(readConfig, writeConfig, ripgrepConfig, mandatoryDenySearchDepth, allowGitConfig, abortSignal);
|
||||
bwrapArgs.push(...fsArgs);
|
||||
// Always bind /dev
|
||||
bwrapArgs.push('--dev', '/dev');
|
||||
// ========== PID NAMESPACE ISOLATION ==========
|
||||
// IMPORTANT: These must come AFTER filesystem binds for nested bwrap to work
|
||||
// By default, always unshare PID namespace and mount fresh /proc.
|
||||
// If we don't have --unshare-pid, it is possible to escape the sandbox.
|
||||
// If we don't have --proc, it is possible to read host /proc and leak information about code running
|
||||
// outside the sandbox. But, --proc is not available when running in unprivileged docker containers
|
||||
// so we support running without it if explicitly requested.
|
||||
bwrapArgs.push('--unshare-pid');
|
||||
if (!enableWeakerNestedSandbox) {
|
||||
// Mount fresh /proc if PID namespace is isolated (secure mode)
|
||||
bwrapArgs.push('--proc', '/proc');
|
||||
}
|
||||
// ========== COMMAND ==========
|
||||
// Use the user's shell (zsh, bash, etc.) to ensure aliases/snapshots work
|
||||
// Resolve the full path to the shell binary since bwrap doesn't use $PATH
|
||||
const shellName = binShell || 'bash';
|
||||
const shell = whichSync(shellName);
|
||||
if (!shell) {
|
||||
throw new Error(`Shell '${shellName}' not found in PATH`);
|
||||
}
|
||||
bwrapArgs.push('--', shell, '-c');
|
||||
// If we have network restrictions, use the network bridge setup with apply-seccomp for seccomp
|
||||
// Otherwise, just run the command directly with apply-seccomp if needed
|
||||
if (needsNetworkRestriction && httpSocketPath && socksSocketPath) {
|
||||
// Pass seccomp filter to buildSandboxCommand for apply-seccomp application
|
||||
// This allows socat to start before seccomp is applied
|
||||
const sandboxCommand = buildSandboxCommand(httpSocketPath, socksSocketPath, command, seccompFilterPath, shell, seccompConfig?.applyPath);
|
||||
bwrapArgs.push(sandboxCommand);
|
||||
}
|
||||
else if (seccompFilterPath) {
|
||||
// No network restrictions but we have seccomp - use apply-seccomp directly
|
||||
// apply-seccomp is a simple C program that applies the seccomp filter and execs the command
|
||||
const applySeccompBinary = getApplySeccompBinaryPath(seccompConfig?.applyPath);
|
||||
if (!applySeccompBinary) {
|
||||
throw new Error('apply-seccomp binary not found. This should have been caught earlier. ' +
|
||||
'Ensure vendor/seccomp/{x64,arm64}/apply-seccomp binaries are included in the package.');
|
||||
}
|
||||
const applySeccompCmd = shellquote.quote([
|
||||
applySeccompBinary,
|
||||
seccompFilterPath,
|
||||
shell,
|
||||
'-c',
|
||||
command,
|
||||
]);
|
||||
bwrapArgs.push(applySeccompCmd);
|
||||
}
|
||||
else {
|
||||
bwrapArgs.push(command);
|
||||
}
|
||||
// Build the outer bwrap command
|
||||
const wrappedCommand = shellquote.quote(['bwrap', ...bwrapArgs]);
|
||||
const restrictions = [];
|
||||
if (needsNetworkRestriction)
|
||||
restrictions.push('network');
|
||||
if (hasReadRestrictions || hasWriteRestrictions)
|
||||
restrictions.push('filesystem');
|
||||
if (seccompFilterPath)
|
||||
restrictions.push('seccomp(unix-block)');
|
||||
logForDebugging(`[Sandbox Linux] Wrapped command with bwrap (${restrictions.join(', ')} restrictions)`);
|
||||
return wrappedCommand;
|
||||
}
|
||||
catch (error) {
|
||||
// Clean up seccomp filter on error
|
||||
if (seccompFilterPath && !seccompFilterPath.includes('/vendor/seccomp/')) {
|
||||
generatedSeccompFilters.delete(seccompFilterPath);
|
||||
try {
|
||||
cleanupSeccompFilter(seccompFilterPath);
|
||||
}
|
||||
catch (cleanupError) {
|
||||
logForDebugging(`[Sandbox Linux] Failed to clean up seccomp filter on error: ${cleanupError}`, { level: 'error' });
|
||||
}
|
||||
}
|
||||
// Re-throw the original error
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=linux-sandbox-utils.js.map
|
||||
630
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/macos-sandbox-utils.js
generated
vendored
Normal file
630
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/macos-sandbox-utils.js
generated
vendored
Normal file
@@ -0,0 +1,630 @@
|
||||
import shellquote from 'shell-quote';
|
||||
import { spawn } from 'child_process';
|
||||
import * as path from 'path';
|
||||
import { logForDebugging } from '../utils/debug.js';
|
||||
import { whichSync } from '../utils/which.js';
|
||||
import { normalizePathForSandbox, generateProxyEnvVars, encodeSandboxedCommand, decodeSandboxedCommand, containsGlobChars, globToRegex, DANGEROUS_FILES, getDangerousDirectories, } from './sandbox-utils.js';
|
||||
/**
|
||||
* Get mandatory deny patterns as glob patterns (no filesystem scanning).
|
||||
* macOS sandbox profile supports regex/glob matching directly via globToRegex().
|
||||
*/
|
||||
export function macGetMandatoryDenyPatterns(allowGitConfig = false) {
|
||||
const cwd = process.cwd();
|
||||
const denyPaths = [];
|
||||
// Dangerous files - static paths in CWD + glob patterns for subtree
|
||||
for (const fileName of DANGEROUS_FILES) {
|
||||
denyPaths.push(path.resolve(cwd, fileName));
|
||||
denyPaths.push(`**/${fileName}`);
|
||||
}
|
||||
// Dangerous directories
|
||||
for (const dirName of getDangerousDirectories()) {
|
||||
denyPaths.push(path.resolve(cwd, dirName));
|
||||
denyPaths.push(`**/${dirName}/**`);
|
||||
}
|
||||
// Git hooks are always blocked for security
|
||||
denyPaths.push(path.resolve(cwd, '.git/hooks'));
|
||||
denyPaths.push('**/.git/hooks/**');
|
||||
// Git config - conditionally blocked based on allowGitConfig setting
|
||||
if (!allowGitConfig) {
|
||||
denyPaths.push(path.resolve(cwd, '.git/config'));
|
||||
denyPaths.push('**/.git/config');
|
||||
}
|
||||
return [...new Set(denyPaths)];
|
||||
}
|
||||
const sessionSuffix = `_${Math.random().toString(36).slice(2, 11)}_SBX`;
|
||||
/**
|
||||
* Generate a unique log tag for sandbox monitoring
|
||||
* @param command - The command being executed (will be base64 encoded)
|
||||
*/
|
||||
function generateLogTag(command) {
|
||||
const encodedCommand = encodeSandboxedCommand(command);
|
||||
return `CMD64_${encodedCommand}_END_${sessionSuffix}`;
|
||||
}
|
||||
/**
|
||||
* Get all ancestor directories for a path, up to (but not including) root
|
||||
* Example: /private/tmp/test/file.txt -> ["/private/tmp/test", "/private/tmp", "/private"]
|
||||
*/
|
||||
function getAncestorDirectories(pathStr) {
|
||||
const ancestors = [];
|
||||
let currentPath = path.dirname(pathStr);
|
||||
// Walk up the directory tree until we reach root
|
||||
while (currentPath !== '/' && currentPath !== '.') {
|
||||
ancestors.push(currentPath);
|
||||
const parentPath = path.dirname(currentPath);
|
||||
// Break if we've reached the top (path.dirname returns the same path for root)
|
||||
if (parentPath === currentPath) {
|
||||
break;
|
||||
}
|
||||
currentPath = parentPath;
|
||||
}
|
||||
return ancestors;
|
||||
}
|
||||
/**
|
||||
* Generate deny rules for file movement (file-write-unlink) to protect paths
|
||||
* This prevents bypassing read or write restrictions by moving files/directories
|
||||
*
|
||||
* @param pathPatterns - Array of path patterns to protect (can include globs)
|
||||
* @param logTag - Log tag for sandbox violations
|
||||
* @returns Array of sandbox profile rule lines
|
||||
*/
|
||||
function generateMoveBlockingRules(pathPatterns, logTag) {
|
||||
const rules = [];
|
||||
for (const pathPattern of pathPatterns) {
|
||||
const normalizedPath = normalizePathForSandbox(pathPattern);
|
||||
if (containsGlobChars(normalizedPath)) {
|
||||
// Use regex matching for glob patterns
|
||||
const regexPattern = globToRegex(normalizedPath);
|
||||
// Block moving/renaming files matching this pattern
|
||||
rules.push(`(deny file-write-unlink`, ` (regex ${escapePath(regexPattern)})`, ` (with message "${logTag}"))`);
|
||||
// For glob patterns, extract the static prefix and block ancestor moves
|
||||
// Remove glob characters to get the directory prefix
|
||||
const staticPrefix = normalizedPath.split(/[*?[\]]/)[0];
|
||||
if (staticPrefix && staticPrefix !== '/') {
|
||||
// Get the directory containing the glob pattern
|
||||
const baseDir = staticPrefix.endsWith('/')
|
||||
? staticPrefix.slice(0, -1)
|
||||
: path.dirname(staticPrefix);
|
||||
// Block moves of the base directory itself
|
||||
rules.push(`(deny file-write-unlink`, ` (literal ${escapePath(baseDir)})`, ` (with message "${logTag}"))`);
|
||||
// Block moves of ancestor directories
|
||||
for (const ancestorDir of getAncestorDirectories(baseDir)) {
|
||||
rules.push(`(deny file-write-unlink`, ` (literal ${escapePath(ancestorDir)})`, ` (with message "${logTag}"))`);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Use subpath matching for literal paths
|
||||
// Block moving/renaming the denied path itself
|
||||
rules.push(`(deny file-write-unlink`, ` (subpath ${escapePath(normalizedPath)})`, ` (with message "${logTag}"))`);
|
||||
// Block moves of ancestor directories
|
||||
for (const ancestorDir of getAncestorDirectories(normalizedPath)) {
|
||||
rules.push(`(deny file-write-unlink`, ` (literal ${escapePath(ancestorDir)})`, ` (with message "${logTag}"))`);
|
||||
}
|
||||
}
|
||||
}
|
||||
return rules;
|
||||
}
|
||||
/**
|
||||
* Generate filesystem read rules for sandbox profile
|
||||
*
|
||||
* Supports two layers:
|
||||
* 1. denyOnly: deny reads from these paths (broad regions like /Users)
|
||||
* 2. allowWithinDeny: re-allow reads within denied regions (like CWD)
|
||||
* allowWithinDeny takes precedence over denyOnly.
|
||||
*
|
||||
* In Seatbelt profiles, later rules take precedence, so we emit:
|
||||
* (allow file-read*) ← default: allow everything
|
||||
* (deny file-read* ...) ← deny broad regions
|
||||
* (allow file-read* ...) ← re-allow specific paths within denied regions
|
||||
*/
|
||||
function generateReadRules(config, logTag) {
|
||||
if (!config) {
|
||||
return [`(allow file-read*)`];
|
||||
}
|
||||
const rules = [];
|
||||
// Start by allowing everything
|
||||
rules.push(`(allow file-read*)`);
|
||||
// Then deny specific paths
|
||||
for (const pathPattern of config.denyOnly || []) {
|
||||
const normalizedPath = normalizePathForSandbox(pathPattern);
|
||||
if (containsGlobChars(normalizedPath)) {
|
||||
// Use regex matching for glob patterns
|
||||
const regexPattern = globToRegex(normalizedPath);
|
||||
rules.push(`(deny file-read*`, ` (regex ${escapePath(regexPattern)})`, ` (with message "${logTag}"))`);
|
||||
}
|
||||
else {
|
||||
// Use subpath matching for literal paths
|
||||
rules.push(`(deny file-read*`, ` (subpath ${escapePath(normalizedPath)})`, ` (with message "${logTag}"))`);
|
||||
}
|
||||
}
|
||||
// Re-allow specific paths within denied regions (allowWithinDeny takes precedence)
|
||||
for (const pathPattern of config.allowWithinDeny || []) {
|
||||
const normalizedPath = normalizePathForSandbox(pathPattern);
|
||||
if (containsGlobChars(normalizedPath)) {
|
||||
const regexPattern = globToRegex(normalizedPath);
|
||||
rules.push(`(allow file-read*`, ` (regex ${escapePath(regexPattern)})`, ` (with message "${logTag}"))`);
|
||||
}
|
||||
else {
|
||||
rules.push(`(allow file-read*`, ` (subpath ${escapePath(normalizedPath)})`, ` (with message "${logTag}"))`);
|
||||
}
|
||||
}
|
||||
// Allow stat/lstat on all directories so that realpath() can traverse
|
||||
// path components within denied regions. Without this, C realpath() fails
|
||||
// when resolving symlinks because it needs to lstat every intermediate
|
||||
// directory (e.g. /Users, /Users/chris) even if only a subdirectory like
|
||||
// ~/.local is in allowWithinDeny. This only allows metadata reads on
|
||||
// directories — not listing contents (readdir) or reading files.
|
||||
if ((config.denyOnly).length > 0) {
|
||||
rules.push(`(allow file-read-metadata`, ` (vnode-type DIRECTORY))`);
|
||||
}
|
||||
// Block file movement to prevent bypass via mv/rename
|
||||
rules.push(...generateMoveBlockingRules(config.denyOnly || [], logTag));
|
||||
return rules;
|
||||
}
|
||||
/**
|
||||
* Generate filesystem write rules for sandbox profile
|
||||
*/
|
||||
function generateWriteRules(config, logTag, allowGitConfig = false) {
|
||||
if (!config) {
|
||||
return [`(allow file-write*)`];
|
||||
}
|
||||
const rules = [];
|
||||
// Automatically allow TMPDIR parent on macOS when write restrictions are enabled
|
||||
const tmpdirParents = getTmpdirParentIfMacOSPattern();
|
||||
for (const tmpdirParent of tmpdirParents) {
|
||||
const normalizedPath = normalizePathForSandbox(tmpdirParent);
|
||||
rules.push(`(allow file-write*`, ` (subpath ${escapePath(normalizedPath)})`, ` (with message "${logTag}"))`);
|
||||
}
|
||||
// Generate allow rules
|
||||
for (const pathPattern of config.allowOnly || []) {
|
||||
const normalizedPath = normalizePathForSandbox(pathPattern);
|
||||
if (containsGlobChars(normalizedPath)) {
|
||||
// Use regex matching for glob patterns
|
||||
const regexPattern = globToRegex(normalizedPath);
|
||||
rules.push(`(allow file-write*`, ` (regex ${escapePath(regexPattern)})`, ` (with message "${logTag}"))`);
|
||||
}
|
||||
else {
|
||||
// Use subpath matching for literal paths
|
||||
rules.push(`(allow file-write*`, ` (subpath ${escapePath(normalizedPath)})`, ` (with message "${logTag}"))`);
|
||||
}
|
||||
}
|
||||
// Combine user-specified and mandatory deny patterns (no ripgrep needed on macOS)
|
||||
const denyPaths = [
|
||||
...(config.denyWithinAllow || []),
|
||||
...macGetMandatoryDenyPatterns(allowGitConfig),
|
||||
];
|
||||
for (const pathPattern of denyPaths) {
|
||||
const normalizedPath = normalizePathForSandbox(pathPattern);
|
||||
if (containsGlobChars(normalizedPath)) {
|
||||
// Use regex matching for glob patterns
|
||||
const regexPattern = globToRegex(normalizedPath);
|
||||
rules.push(`(deny file-write*`, ` (regex ${escapePath(regexPattern)})`, ` (with message "${logTag}"))`);
|
||||
}
|
||||
else {
|
||||
// Use subpath matching for literal paths
|
||||
rules.push(`(deny file-write*`, ` (subpath ${escapePath(normalizedPath)})`, ` (with message "${logTag}"))`);
|
||||
}
|
||||
}
|
||||
// Block file movement to prevent bypass via mv/rename
|
||||
rules.push(...generateMoveBlockingRules(denyPaths, logTag));
|
||||
return rules;
|
||||
}
|
||||
/**
|
||||
* Generate complete sandbox profile
|
||||
*/
|
||||
function generateSandboxProfile({ readConfig, writeConfig, httpProxyPort, socksProxyPort, needsNetworkRestriction, allowUnixSockets, allowAllUnixSockets, allowLocalBinding, allowPty, allowGitConfig = false, enableWeakerNetworkIsolation = false, logTag, }) {
|
||||
const profile = [
|
||||
'(version 1)',
|
||||
`(deny default (with message "${logTag}"))`,
|
||||
'',
|
||||
`; LogTag: ${logTag}`,
|
||||
'',
|
||||
'; Essential permissions - based on Chrome sandbox policy',
|
||||
'; Process permissions',
|
||||
'(allow process-exec)',
|
||||
'(allow process-fork)',
|
||||
'(allow process-info* (target same-sandbox))',
|
||||
'(allow signal (target same-sandbox))',
|
||||
'(allow mach-priv-task-port (target same-sandbox))',
|
||||
'',
|
||||
'; User preferences',
|
||||
'(allow user-preference-read)',
|
||||
'',
|
||||
'; Mach IPC - specific services only (no wildcard)',
|
||||
'(allow mach-lookup',
|
||||
' (global-name "com.apple.audio.systemsoundserver")',
|
||||
' (global-name "com.apple.distributed_notifications@Uv3")',
|
||||
' (global-name "com.apple.FontObjectsServer")',
|
||||
' (global-name "com.apple.fonts")',
|
||||
' (global-name "com.apple.logd")',
|
||||
' (global-name "com.apple.lsd.mapdb")',
|
||||
' (global-name "com.apple.PowerManagement.control")',
|
||||
' (global-name "com.apple.system.logger")',
|
||||
' (global-name "com.apple.system.notification_center")',
|
||||
' (global-name "com.apple.system.opendirectoryd.libinfo")',
|
||||
' (global-name "com.apple.system.opendirectoryd.membership")',
|
||||
' (global-name "com.apple.bsd.dirhelper")',
|
||||
' (global-name "com.apple.securityd.xpc")',
|
||||
' (global-name "com.apple.coreservices.launchservicesd")',
|
||||
')',
|
||||
'',
|
||||
...(enableWeakerNetworkIsolation
|
||||
? [
|
||||
'; trustd.agent - needed for Go TLS certificate verification (weaker network isolation)',
|
||||
'(allow mach-lookup (global-name "com.apple.trustd.agent"))',
|
||||
]
|
||||
: []),
|
||||
'',
|
||||
'; POSIX IPC - shared memory',
|
||||
'(allow ipc-posix-shm)',
|
||||
'',
|
||||
'; POSIX IPC - semaphores for Python multiprocessing',
|
||||
'(allow ipc-posix-sem)',
|
||||
'',
|
||||
'; IOKit - specific operations only',
|
||||
'(allow iokit-open',
|
||||
' (iokit-registry-entry-class "IOSurfaceRootUserClient")',
|
||||
' (iokit-registry-entry-class "RootDomainUserClient")',
|
||||
' (iokit-user-client-class "IOSurfaceSendRight")',
|
||||
')',
|
||||
'',
|
||||
'; IOKit properties',
|
||||
'(allow iokit-get-properties)',
|
||||
'',
|
||||
"; Specific safe system-sockets, doesn't allow network access",
|
||||
'(allow system-socket (require-all (socket-domain AF_SYSTEM) (socket-protocol 2)))',
|
||||
'',
|
||||
'; sysctl - specific sysctls only',
|
||||
'(allow sysctl-read',
|
||||
' (sysctl-name "hw.activecpu")',
|
||||
' (sysctl-name "hw.busfrequency_compat")',
|
||||
' (sysctl-name "hw.byteorder")',
|
||||
' (sysctl-name "hw.cacheconfig")',
|
||||
' (sysctl-name "hw.cachelinesize_compat")',
|
||||
' (sysctl-name "hw.cpufamily")',
|
||||
' (sysctl-name "hw.cpufrequency")',
|
||||
' (sysctl-name "hw.cpufrequency_compat")',
|
||||
' (sysctl-name "hw.cputype")',
|
||||
' (sysctl-name "hw.l1dcachesize_compat")',
|
||||
' (sysctl-name "hw.l1icachesize_compat")',
|
||||
' (sysctl-name "hw.l2cachesize_compat")',
|
||||
' (sysctl-name "hw.l3cachesize_compat")',
|
||||
' (sysctl-name "hw.logicalcpu")',
|
||||
' (sysctl-name "hw.logicalcpu_max")',
|
||||
' (sysctl-name "hw.machine")',
|
||||
' (sysctl-name "hw.memsize")',
|
||||
' (sysctl-name "hw.ncpu")',
|
||||
' (sysctl-name "hw.nperflevels")',
|
||||
' (sysctl-name "hw.packages")',
|
||||
' (sysctl-name "hw.pagesize_compat")',
|
||||
' (sysctl-name "hw.pagesize")',
|
||||
' (sysctl-name "hw.physicalcpu")',
|
||||
' (sysctl-name "hw.physicalcpu_max")',
|
||||
' (sysctl-name "hw.tbfrequency_compat")',
|
||||
' (sysctl-name "hw.vectorunit")',
|
||||
' (sysctl-name "kern.argmax")',
|
||||
' (sysctl-name "kern.bootargs")',
|
||||
' (sysctl-name "kern.hostname")',
|
||||
' (sysctl-name "kern.maxfiles")',
|
||||
' (sysctl-name "kern.maxfilesperproc")',
|
||||
' (sysctl-name "kern.maxproc")',
|
||||
' (sysctl-name "kern.ngroups")',
|
||||
' (sysctl-name "kern.osproductversion")',
|
||||
' (sysctl-name "kern.osrelease")',
|
||||
' (sysctl-name "kern.ostype")',
|
||||
' (sysctl-name "kern.osvariant_status")',
|
||||
' (sysctl-name "kern.osversion")',
|
||||
' (sysctl-name "kern.secure_kernel")',
|
||||
' (sysctl-name "kern.tcsm_available")',
|
||||
' (sysctl-name "kern.tcsm_enable")',
|
||||
' (sysctl-name "kern.usrstack64")',
|
||||
' (sysctl-name "kern.version")',
|
||||
' (sysctl-name "kern.willshutdown")',
|
||||
' (sysctl-name "machdep.cpu.brand_string")',
|
||||
' (sysctl-name "machdep.ptrauth_enabled")',
|
||||
' (sysctl-name "security.mac.lockdown_mode_state")',
|
||||
' (sysctl-name "sysctl.proc_cputype")',
|
||||
' (sysctl-name "vm.loadavg")',
|
||||
' (sysctl-name-prefix "hw.optional.arm")',
|
||||
' (sysctl-name-prefix "hw.optional.arm.")',
|
||||
' (sysctl-name-prefix "hw.optional.armv8_")',
|
||||
' (sysctl-name-prefix "hw.perflevel")',
|
||||
' (sysctl-name-prefix "kern.proc.all")',
|
||||
' (sysctl-name-prefix "kern.proc.pgrp.")',
|
||||
' (sysctl-name-prefix "kern.proc.pid.")',
|
||||
' (sysctl-name-prefix "machdep.cpu.")',
|
||||
' (sysctl-name-prefix "net.routetable.")',
|
||||
')',
|
||||
'',
|
||||
'; V8 thread calculations',
|
||||
'(allow sysctl-write',
|
||||
' (sysctl-name "kern.tcsm_enable")',
|
||||
')',
|
||||
'',
|
||||
'; Distributed notifications',
|
||||
'(allow distributed-notification-post)',
|
||||
'',
|
||||
'; Specific mach-lookup permissions for security operations',
|
||||
'(allow mach-lookup (global-name "com.apple.SecurityServer"))',
|
||||
'',
|
||||
'; File I/O on device files',
|
||||
'(allow file-ioctl (literal "/dev/null"))',
|
||||
'(allow file-ioctl (literal "/dev/zero"))',
|
||||
'(allow file-ioctl (literal "/dev/random"))',
|
||||
'(allow file-ioctl (literal "/dev/urandom"))',
|
||||
'(allow file-ioctl (literal "/dev/dtracehelper"))',
|
||||
'(allow file-ioctl (literal "/dev/tty"))',
|
||||
'',
|
||||
'(allow file-ioctl file-read-data file-write-data',
|
||||
' (require-all',
|
||||
' (literal "/dev/null")',
|
||||
' (vnode-type CHARACTER-DEVICE)',
|
||||
' )',
|
||||
')',
|
||||
'',
|
||||
];
|
||||
// Network rules
|
||||
profile.push('; Network');
|
||||
if (!needsNetworkRestriction) {
|
||||
profile.push('(allow network*)');
|
||||
}
|
||||
else {
|
||||
// Allow local binding if requested
|
||||
// Use "*:*" instead of "localhost:*" because modern runtimes (Java, etc.) create
|
||||
// IPv6 dual-stack sockets by default. When binding such a socket to 127.0.0.1,
|
||||
// the kernel represents it as ::ffff:127.0.0.1 (IPv4-mapped IPv6). Seatbelt's
|
||||
// "localhost" filter only matches 127.0.0.1 and ::1, NOT ::ffff:127.0.0.1.
|
||||
// Using (local ip "*:*") is safe because it only matches the LOCAL endpoint —
|
||||
// internet-bound connections originate from non-loopback interfaces, so they
|
||||
// remain blocked by (deny default).
|
||||
if (allowLocalBinding) {
|
||||
profile.push('(allow network-bind (local ip "*:*"))');
|
||||
profile.push('(allow network-inbound (local ip "*:*"))');
|
||||
profile.push('(allow network-outbound (local ip "*:*"))');
|
||||
}
|
||||
// Unix domain sockets for local IPC (SSH agent, Docker, Gradle, etc.)
|
||||
// Three separate operations must be allowed:
|
||||
// 1. system-socket: socket(AF_UNIX, ...) syscall — creates the socket fd (no path context)
|
||||
// 2. network-bind: bind() to a local Unix socket path
|
||||
// 3. network-outbound: connect() to a remote Unix socket path
|
||||
// Note: (subpath ...) and (path-regex ...) are path-based filters that can only match
|
||||
// bind/connect operations — socket() creation has no path, so it requires system-socket.
|
||||
if (allowAllUnixSockets) {
|
||||
// Allow creating AF_UNIX sockets and all Unix socket paths
|
||||
profile.push('(allow system-socket (socket-domain AF_UNIX))');
|
||||
profile.push('(allow network-bind (local unix-socket (path-regex #"^/")))');
|
||||
profile.push('(allow network-outbound (remote unix-socket (path-regex #"^/")))');
|
||||
}
|
||||
else if (allowUnixSockets && allowUnixSockets.length > 0) {
|
||||
// Allow creating AF_UNIX sockets (required for any Unix socket use)
|
||||
profile.push('(allow system-socket (socket-domain AF_UNIX))');
|
||||
// Allow specific Unix socket paths
|
||||
for (const socketPath of allowUnixSockets) {
|
||||
const normalizedPath = normalizePathForSandbox(socketPath);
|
||||
profile.push(`(allow network-bind (local unix-socket (subpath ${escapePath(normalizedPath)})))`);
|
||||
profile.push(`(allow network-outbound (remote unix-socket (subpath ${escapePath(normalizedPath)})))`);
|
||||
}
|
||||
}
|
||||
// If both allowAllUnixSockets and allowUnixSockets are false/undefined/empty, Unix sockets are blocked by default
|
||||
// Allow localhost TCP operations for the HTTP proxy
|
||||
if (httpProxyPort !== undefined) {
|
||||
profile.push(`(allow network-bind (local ip "localhost:${httpProxyPort}"))`);
|
||||
profile.push(`(allow network-inbound (local ip "localhost:${httpProxyPort}"))`);
|
||||
profile.push(`(allow network-outbound (remote ip "localhost:${httpProxyPort}"))`);
|
||||
}
|
||||
// Allow localhost TCP operations for the SOCKS proxy
|
||||
if (socksProxyPort !== undefined) {
|
||||
profile.push(`(allow network-bind (local ip "localhost:${socksProxyPort}"))`);
|
||||
profile.push(`(allow network-inbound (local ip "localhost:${socksProxyPort}"))`);
|
||||
profile.push(`(allow network-outbound (remote ip "localhost:${socksProxyPort}"))`);
|
||||
}
|
||||
}
|
||||
profile.push('');
|
||||
// Read rules
|
||||
profile.push('; File read');
|
||||
profile.push(...generateReadRules(readConfig, logTag));
|
||||
profile.push('');
|
||||
// Write rules
|
||||
profile.push('; File write');
|
||||
profile.push(...generateWriteRules(writeConfig, logTag, allowGitConfig));
|
||||
// Pseudo-terminal (pty) support
|
||||
if (allowPty) {
|
||||
profile.push('');
|
||||
profile.push('; Pseudo-terminal (pty) support');
|
||||
profile.push('(allow pseudo-tty)');
|
||||
profile.push('(allow file-ioctl');
|
||||
profile.push(' (literal "/dev/ptmx")');
|
||||
profile.push(' (regex #"^/dev/ttys")');
|
||||
profile.push(')');
|
||||
profile.push('(allow file-read* file-write*');
|
||||
profile.push(' (literal "/dev/ptmx")');
|
||||
profile.push(' (regex #"^/dev/ttys")');
|
||||
profile.push(')');
|
||||
}
|
||||
return profile.join('\n');
|
||||
}
|
||||
/**
|
||||
* Escape path for sandbox profile using JSON.stringify for proper escaping
|
||||
*/
|
||||
function escapePath(pathStr) {
|
||||
return JSON.stringify(pathStr);
|
||||
}
|
||||
/**
|
||||
* Get TMPDIR parent directory if it matches macOS pattern /var/folders/XX/YYY/T/
|
||||
* Returns both /var/ and /private/var/ versions since /var is a symlink
|
||||
*/
|
||||
function getTmpdirParentIfMacOSPattern() {
|
||||
const tmpdir = process.env.TMPDIR;
|
||||
if (!tmpdir)
|
||||
return [];
|
||||
const match = tmpdir.match(/^\/(private\/)?var\/folders\/[^/]{2}\/[^/]+\/T\/?$/);
|
||||
if (!match)
|
||||
return [];
|
||||
const parent = tmpdir.replace(/\/T\/?$/, '');
|
||||
// Return both /var/ and /private/var/ versions since /var is a symlink
|
||||
if (parent.startsWith('/private/var/')) {
|
||||
return [parent, parent.replace('/private', '')];
|
||||
}
|
||||
else if (parent.startsWith('/var/')) {
|
||||
return [parent, '/private' + parent];
|
||||
}
|
||||
return [parent];
|
||||
}
|
||||
/**
|
||||
* Wrap command with macOS sandbox
|
||||
*/
|
||||
export function wrapCommandWithSandboxMacOS(params) {
|
||||
const { command, needsNetworkRestriction, httpProxyPort, socksProxyPort, allowUnixSockets, allowAllUnixSockets, allowLocalBinding, readConfig, writeConfig, allowPty, allowGitConfig = false, enableWeakerNetworkIsolation = false, binShell, } = params;
|
||||
// Determine if we have restrictions to apply
|
||||
// Read: denyOnly pattern - empty array means no restrictions
|
||||
// Write: allowOnly pattern - undefined means no restrictions, any config means restrictions
|
||||
const hasReadRestrictions = readConfig && readConfig.denyOnly.length > 0;
|
||||
const hasWriteRestrictions = writeConfig !== undefined;
|
||||
// No sandboxing needed
|
||||
if (!needsNetworkRestriction &&
|
||||
!hasReadRestrictions &&
|
||||
!hasWriteRestrictions) {
|
||||
return command;
|
||||
}
|
||||
const logTag = generateLogTag(command);
|
||||
const profile = generateSandboxProfile({
|
||||
readConfig,
|
||||
writeConfig,
|
||||
httpProxyPort,
|
||||
socksProxyPort,
|
||||
needsNetworkRestriction,
|
||||
allowUnixSockets,
|
||||
allowAllUnixSockets,
|
||||
allowLocalBinding,
|
||||
allowPty,
|
||||
allowGitConfig,
|
||||
enableWeakerNetworkIsolation,
|
||||
logTag,
|
||||
});
|
||||
// Generate proxy environment variables using shared utility
|
||||
const proxyEnvArgs = generateProxyEnvVars(httpProxyPort, socksProxyPort);
|
||||
// Use the user's shell (zsh, bash, etc.) to ensure aliases/snapshots work
|
||||
// Resolve the full path to the shell binary
|
||||
const shellName = binShell || 'bash';
|
||||
const shell = whichSync(shellName);
|
||||
if (!shell) {
|
||||
throw new Error(`Shell '${shellName}' not found in PATH`);
|
||||
}
|
||||
// Use `env` command to set environment variables - each VAR=value is a separate
|
||||
// argument that shellquote handles properly, avoiding shell quoting issues
|
||||
const wrappedCommand = shellquote.quote([
|
||||
'env',
|
||||
...proxyEnvArgs,
|
||||
'sandbox-exec',
|
||||
'-p',
|
||||
profile,
|
||||
shell,
|
||||
'-c',
|
||||
command,
|
||||
]);
|
||||
logForDebugging(`[Sandbox macOS] Applied restrictions - network: ${!!(httpProxyPort || socksProxyPort)}, read: ${readConfig
|
||||
? 'allowAllExcept' in readConfig
|
||||
? 'allowAllExcept'
|
||||
: 'denyAllExcept'
|
||||
: 'none'}, write: ${writeConfig
|
||||
? 'allowAllExcept' in writeConfig
|
||||
? 'allowAllExcept'
|
||||
: 'denyAllExcept'
|
||||
: 'none'}`);
|
||||
return wrappedCommand;
|
||||
}
|
||||
/**
|
||||
* Start monitoring macOS system logs for sandbox violations
|
||||
* Look for sandbox-related kernel deny events ending in {logTag}
|
||||
*/
|
||||
export function startMacOSSandboxLogMonitor(callback, ignoreViolations) {
|
||||
// Pre-compile regex patterns for better performance
|
||||
const cmdExtractRegex = /CMD64_(.+?)_END/;
|
||||
const sandboxExtractRegex = /Sandbox:\s+(.+)$/;
|
||||
// Pre-process ignore patterns for faster lookup
|
||||
const wildcardPaths = ignoreViolations?.['*'] || [];
|
||||
const commandPatterns = ignoreViolations
|
||||
? Object.entries(ignoreViolations).filter(([pattern]) => pattern !== '*')
|
||||
: [];
|
||||
// Stream and filter kernel logs for all sandbox violations
|
||||
// We can't filter by specific logTag since it's dynamic per command
|
||||
const logProcess = spawn('log', [
|
||||
'stream',
|
||||
'--predicate',
|
||||
`(eventMessage ENDSWITH "${sessionSuffix}")`,
|
||||
'--style',
|
||||
'compact',
|
||||
]);
|
||||
logProcess.stdout?.on('data', (data) => {
|
||||
const lines = data.toString().split('\n');
|
||||
// Get violation and command lines
|
||||
const violationLine = lines.find(line => line.includes('Sandbox:') && line.includes('deny'));
|
||||
const commandLine = lines.find(line => line.startsWith('CMD64_'));
|
||||
if (!violationLine)
|
||||
return;
|
||||
// Extract violation details
|
||||
const sandboxMatch = violationLine.match(sandboxExtractRegex);
|
||||
if (!sandboxMatch?.[1])
|
||||
return;
|
||||
const violationDetails = sandboxMatch[1];
|
||||
// Try to get command
|
||||
let command;
|
||||
let encodedCommand;
|
||||
if (commandLine) {
|
||||
const cmdMatch = commandLine.match(cmdExtractRegex);
|
||||
encodedCommand = cmdMatch?.[1];
|
||||
if (encodedCommand) {
|
||||
try {
|
||||
command = decodeSandboxedCommand(encodedCommand);
|
||||
}
|
||||
catch {
|
||||
// Failed to decode, continue without command
|
||||
}
|
||||
}
|
||||
}
|
||||
// Always filter out noisey violations
|
||||
if (violationDetails.includes('mDNSResponder') ||
|
||||
violationDetails.includes('mach-lookup com.apple.diagnosticd') ||
|
||||
violationDetails.includes('mach-lookup com.apple.analyticsd')) {
|
||||
return;
|
||||
}
|
||||
// Check if we should ignore this violation
|
||||
if (ignoreViolations && command) {
|
||||
// Check wildcard patterns first
|
||||
if (wildcardPaths.length > 0) {
|
||||
const shouldIgnore = wildcardPaths.some(path => violationDetails.includes(path));
|
||||
if (shouldIgnore)
|
||||
return;
|
||||
}
|
||||
// Check command-specific patterns
|
||||
for (const [pattern, paths] of commandPatterns) {
|
||||
if (command.includes(pattern)) {
|
||||
const shouldIgnore = paths.some(path => violationDetails.includes(path));
|
||||
if (shouldIgnore)
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Not ignored - report the violation
|
||||
callback({
|
||||
line: violationDetails,
|
||||
command,
|
||||
encodedCommand,
|
||||
timestamp: new Date(), // We could parse the timestamp from the log but this feels more reliable
|
||||
});
|
||||
});
|
||||
logProcess.stderr?.on('data', (data) => {
|
||||
logForDebugging(`[Sandbox Monitor] Log stream stderr: ${data.toString()}`);
|
||||
});
|
||||
logProcess.on('error', (error) => {
|
||||
logForDebugging(`[Sandbox Monitor] Failed to start log stream: ${error.message}`);
|
||||
});
|
||||
logProcess.on('exit', (code) => {
|
||||
logForDebugging(`[Sandbox Monitor] Log stream exited with code: ${code}`);
|
||||
});
|
||||
return () => {
|
||||
logForDebugging('[Sandbox Monitor] Stopping log monitor');
|
||||
logProcess.kill('SIGTERM');
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=macos-sandbox-utils.js.map
|
||||
180
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/sandbox-config.js
generated
vendored
Normal file
180
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/sandbox-config.js
generated
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
/**
|
||||
* Configuration for Sandbox Runtime
|
||||
* This is the main configuration interface that consumers pass to SandboxManager.initialize()
|
||||
*/
|
||||
import { z } from 'zod';
|
||||
/**
|
||||
* Schema for domain patterns (e.g., "example.com", "*.npmjs.org")
|
||||
* Validates that domain patterns are safe and don't include overly broad wildcards
|
||||
*/
|
||||
const domainPatternSchema = z.string().refine(val => {
|
||||
// Reject protocols, paths, ports, etc.
|
||||
if (val.includes('://') || val.includes('/') || val.includes(':')) {
|
||||
return false;
|
||||
}
|
||||
// Allow localhost
|
||||
if (val === 'localhost')
|
||||
return true;
|
||||
// Allow wildcard domains like *.example.com
|
||||
if (val.startsWith('*.')) {
|
||||
const domain = val.slice(2);
|
||||
// After the *. there must be a valid domain with at least one more dot
|
||||
// e.g., *.example.com is valid, *.com is not (too broad)
|
||||
if (!domain.includes('.') ||
|
||||
domain.startsWith('.') ||
|
||||
domain.endsWith('.')) {
|
||||
return false;
|
||||
}
|
||||
// Count dots - must have at least 2 parts after the wildcard (e.g., example.com)
|
||||
const parts = domain.split('.');
|
||||
return parts.length >= 2 && parts.every(p => p.length > 0);
|
||||
}
|
||||
// Reject any other use of wildcards (e.g., *, *., etc.)
|
||||
if (val.includes('*')) {
|
||||
return false;
|
||||
}
|
||||
// Regular domains must have at least one dot and only valid characters
|
||||
return val.includes('.') && !val.startsWith('.') && !val.endsWith('.');
|
||||
}, {
|
||||
message: 'Invalid domain pattern. Must be a valid domain (e.g., "example.com") or wildcard (e.g., "*.example.com"). Overly broad patterns like "*.com" or "*" are not allowed for security reasons.',
|
||||
});
|
||||
/**
|
||||
* Schema for filesystem paths
|
||||
*/
|
||||
const filesystemPathSchema = z.string().min(1, 'Path cannot be empty');
|
||||
/**
|
||||
* Schema for MITM proxy configuration
|
||||
* Allows routing specific domains through an upstream MITM proxy via Unix socket
|
||||
*/
|
||||
const MitmProxyConfigSchema = z.object({
|
||||
socketPath: z.string().min(1).describe('Unix socket path to the MITM proxy'),
|
||||
domains: z
|
||||
.array(domainPatternSchema)
|
||||
.min(1)
|
||||
.describe('Domains to route through the MITM proxy (e.g., ["api.example.com", "*.internal.org"])'),
|
||||
});
|
||||
/**
|
||||
* Network configuration schema for validation
|
||||
*/
|
||||
export const NetworkConfigSchema = z.object({
|
||||
allowedDomains: z
|
||||
.array(domainPatternSchema)
|
||||
.describe('List of allowed domains (e.g., ["github.com", "*.npmjs.org"])'),
|
||||
deniedDomains: z
|
||||
.array(domainPatternSchema)
|
||||
.describe('List of denied domains'),
|
||||
allowUnixSockets: z
|
||||
.array(z.string())
|
||||
.optional()
|
||||
.describe('macOS only: Unix socket paths to allow. Ignored on Linux (seccomp cannot filter by path).'),
|
||||
allowAllUnixSockets: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('If true, allow all Unix sockets (disables blocking on both platforms).'),
|
||||
allowLocalBinding: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Whether to allow binding to local ports (default: false)'),
|
||||
httpProxyPort: z
|
||||
.number()
|
||||
.int()
|
||||
.min(1)
|
||||
.max(65535)
|
||||
.optional()
|
||||
.describe('Port of an external HTTP proxy to use instead of starting a local one. When provided, the library will skip starting its own HTTP proxy and use this port. The external proxy must handle domain filtering.'),
|
||||
socksProxyPort: z
|
||||
.number()
|
||||
.int()
|
||||
.min(1)
|
||||
.max(65535)
|
||||
.optional()
|
||||
.describe('Port of an external SOCKS proxy to use instead of starting a local one. When provided, the library will skip starting its own SOCKS proxy and use this port. The external proxy must handle domain filtering.'),
|
||||
mitmProxy: MitmProxyConfigSchema.optional().describe('Optional MITM proxy configuration. Routes matching domains through an upstream proxy via Unix socket while SRT still handles allow/deny filtering.'),
|
||||
});
|
||||
/**
|
||||
* Filesystem configuration schema for validation
|
||||
*/
|
||||
export const FilesystemConfigSchema = z.object({
|
||||
denyRead: z.array(filesystemPathSchema).describe('Paths denied for reading'),
|
||||
allowRead: z
|
||||
.array(filesystemPathSchema)
|
||||
.optional()
|
||||
.describe('Paths to re-allow reading within denied regions (takes precedence over denyRead). ' +
|
||||
'Use with denyRead to deny a broad region then allow back specific subdirectories.'),
|
||||
allowWrite: z
|
||||
.array(filesystemPathSchema)
|
||||
.describe('Paths allowed for writing'),
|
||||
denyWrite: z
|
||||
.array(filesystemPathSchema)
|
||||
.describe('Paths denied for writing (takes precedence over allowWrite)'),
|
||||
allowGitConfig: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Allow writes to .git/config files (default: false). Enables git remote URL updates while keeping .git/hooks protected.'),
|
||||
});
|
||||
/**
|
||||
* Configuration schema for ignoring specific sandbox violations
|
||||
* Maps command patterns to filesystem paths to ignore violations for.
|
||||
*/
|
||||
export const IgnoreViolationsConfigSchema = z
|
||||
.record(z.string(), z.array(z.string()))
|
||||
.describe('Map of command patterns to filesystem paths to ignore violations for. Use "*" to match all commands');
|
||||
/**
|
||||
* Ripgrep configuration schema
|
||||
*/
|
||||
export const RipgrepConfigSchema = z.object({
|
||||
command: z.string().describe('The ripgrep command to execute'),
|
||||
args: z
|
||||
.array(z.string())
|
||||
.optional()
|
||||
.describe('Additional arguments to pass before ripgrep args'),
|
||||
argv0: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Override argv[0] when spawning (for multicall binaries that dispatch on argv[0])'),
|
||||
});
|
||||
/**
|
||||
* Seccomp configuration schema (Linux only)
|
||||
* Allows specifying custom paths to seccomp binaries
|
||||
*/
|
||||
export const SeccompConfigSchema = z.object({
|
||||
bpfPath: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Path to the unix-block.bpf filter file'),
|
||||
applyPath: z.string().optional().describe('Path to the apply-seccomp binary'),
|
||||
});
|
||||
/**
|
||||
* Main configuration schema for Sandbox Runtime validation
|
||||
*/
|
||||
export const SandboxRuntimeConfigSchema = z.object({
|
||||
network: NetworkConfigSchema.describe('Network restrictions configuration'),
|
||||
filesystem: FilesystemConfigSchema.describe('Filesystem restrictions configuration'),
|
||||
ignoreViolations: IgnoreViolationsConfigSchema.optional().describe('Optional configuration for ignoring specific violations'),
|
||||
enableWeakerNestedSandbox: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Enable weaker nested sandbox mode (for Docker environments)'),
|
||||
enableWeakerNetworkIsolation: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Enable weaker network isolation to allow access to com.apple.trustd.agent (macOS only). ' +
|
||||
'This is needed for Go programs (gh, gcloud, terraform, kubectl, etc.) to verify TLS certificates ' +
|
||||
'when using httpProxyPort with a MITM proxy and custom CA. Enabling this opens a potential data ' +
|
||||
'exfiltration vector through the trustd service. Only enable if you need Go TLS verification.'),
|
||||
ripgrep: RipgrepConfigSchema.optional().describe('Custom ripgrep configuration (default: { command: "rg" })'),
|
||||
mandatoryDenySearchDepth: z
|
||||
.number()
|
||||
.int()
|
||||
.min(1)
|
||||
.max(10)
|
||||
.optional()
|
||||
.describe('Maximum directory depth to search for dangerous files on Linux (default: 3). ' +
|
||||
'Higher values provide more protection but slower performance.'),
|
||||
allowPty: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Allow pseudo-terminal (pty) operations (macOS only)'),
|
||||
seccomp: SeccompConfigSchema.optional().describe('Custom seccomp binary paths (Linux only).'),
|
||||
});
|
||||
//# sourceMappingURL=sandbox-config.js.map
|
||||
786
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/sandbox-manager.js
generated
vendored
Normal file
786
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/sandbox-manager.js
generated
vendored
Normal file
@@ -0,0 +1,786 @@
|
||||
import { createHttpProxyServer } from './http-proxy.js';
|
||||
import { createSocksProxyServer } from './socks-proxy.js';
|
||||
import { logForDebugging } from '../utils/debug.js';
|
||||
import { whichSync } from '../utils/which.js';
|
||||
import { cloneDeep } from 'lodash-es';
|
||||
import { getPlatform, getWslVersion } from '../utils/platform.js';
|
||||
import * as fs from 'fs';
|
||||
import { wrapCommandWithSandboxLinux, initializeLinuxNetworkBridge, checkLinuxDependencies, cleanupBwrapMountPoints, } from './linux-sandbox-utils.js';
|
||||
import { wrapCommandWithSandboxMacOS, startMacOSSandboxLogMonitor, } from './macos-sandbox-utils.js';
|
||||
import { getDefaultWritePaths, containsGlobChars, removeTrailingGlobSuffix, expandGlobPattern, } from './sandbox-utils.js';
|
||||
import { SandboxViolationStore } from './sandbox-violation-store.js';
|
||||
import { EOL } from 'node:os';
|
||||
// ============================================================================
|
||||
// Private Module State
|
||||
// ============================================================================
|
||||
let config;
|
||||
let httpProxyServer;
|
||||
let socksProxyServer;
|
||||
let managerContext;
|
||||
let initializationPromise;
|
||||
let cleanupRegistered = false;
|
||||
let logMonitorShutdown;
|
||||
const sandboxViolationStore = new SandboxViolationStore();
|
||||
// ============================================================================
|
||||
// Private Helper Functions (not exported)
|
||||
// ============================================================================
|
||||
function registerCleanup() {
|
||||
if (cleanupRegistered) {
|
||||
return;
|
||||
}
|
||||
const cleanupHandler = () => reset().catch(e => {
|
||||
logForDebugging(`Cleanup failed in registerCleanup ${e}`, {
|
||||
level: 'error',
|
||||
});
|
||||
});
|
||||
process.once('exit', cleanupHandler);
|
||||
process.once('SIGINT', cleanupHandler);
|
||||
process.once('SIGTERM', cleanupHandler);
|
||||
cleanupRegistered = true;
|
||||
}
|
||||
function matchesDomainPattern(hostname, pattern) {
|
||||
// Support wildcard patterns like *.example.com
|
||||
// This matches any subdomain but not the base domain itself
|
||||
if (pattern.startsWith('*.')) {
|
||||
const baseDomain = pattern.substring(2); // Remove '*.'
|
||||
return hostname.toLowerCase().endsWith('.' + baseDomain.toLowerCase());
|
||||
}
|
||||
// Exact match for non-wildcard patterns
|
||||
return hostname.toLowerCase() === pattern.toLowerCase();
|
||||
}
|
||||
async function filterNetworkRequest(port, host, sandboxAskCallback) {
|
||||
if (!config) {
|
||||
logForDebugging('No config available, denying network request');
|
||||
return false;
|
||||
}
|
||||
// Check denied domains first
|
||||
for (const deniedDomain of config.network.deniedDomains) {
|
||||
if (matchesDomainPattern(host, deniedDomain)) {
|
||||
logForDebugging(`Denied by config rule: ${host}:${port}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// Check allowed domains
|
||||
for (const allowedDomain of config.network.allowedDomains) {
|
||||
if (matchesDomainPattern(host, allowedDomain)) {
|
||||
logForDebugging(`Allowed by config rule: ${host}:${port}`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// No matching rules - ask user or deny
|
||||
if (!sandboxAskCallback) {
|
||||
logForDebugging(`No matching config rule, denying: ${host}:${port}`);
|
||||
return false;
|
||||
}
|
||||
logForDebugging(`No matching config rule, asking user: ${host}:${port}`);
|
||||
try {
|
||||
const userAllowed = await sandboxAskCallback({ host, port });
|
||||
if (userAllowed) {
|
||||
logForDebugging(`User allowed: ${host}:${port}`);
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
logForDebugging(`User denied: ${host}:${port}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
logForDebugging(`Error in permission callback: ${error}`, {
|
||||
level: 'error',
|
||||
});
|
||||
return false;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Get the MITM proxy socket path for a given host, if configured.
|
||||
* Returns the socket path if the host matches any MITM domain pattern,
|
||||
* otherwise returns undefined.
|
||||
*/
|
||||
function getMitmSocketPath(host) {
|
||||
if (!config?.network.mitmProxy) {
|
||||
return undefined;
|
||||
}
|
||||
const { socketPath, domains } = config.network.mitmProxy;
|
||||
for (const pattern of domains) {
|
||||
if (matchesDomainPattern(host, pattern)) {
|
||||
logForDebugging(`Host ${host} matches MITM pattern ${pattern}`);
|
||||
return socketPath;
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
async function startHttpProxyServer(sandboxAskCallback) {
|
||||
httpProxyServer = createHttpProxyServer({
|
||||
filter: (port, host) => filterNetworkRequest(port, host, sandboxAskCallback),
|
||||
getMitmSocketPath,
|
||||
});
|
||||
return new Promise((resolve, reject) => {
|
||||
if (!httpProxyServer) {
|
||||
reject(new Error('HTTP proxy server undefined before listen'));
|
||||
return;
|
||||
}
|
||||
const server = httpProxyServer;
|
||||
server.once('error', reject);
|
||||
server.once('listening', () => {
|
||||
const address = server.address();
|
||||
if (address && typeof address === 'object') {
|
||||
server.unref();
|
||||
logForDebugging(`HTTP proxy listening on localhost:${address.port}`);
|
||||
resolve(address.port);
|
||||
}
|
||||
else {
|
||||
reject(new Error('Failed to get proxy server address'));
|
||||
}
|
||||
});
|
||||
server.listen(0, '127.0.0.1');
|
||||
});
|
||||
}
|
||||
async function startSocksProxyServer(sandboxAskCallback) {
|
||||
socksProxyServer = createSocksProxyServer({
|
||||
filter: (port, host) => filterNetworkRequest(port, host, sandboxAskCallback),
|
||||
});
|
||||
return new Promise((resolve, reject) => {
|
||||
if (!socksProxyServer) {
|
||||
// This is mostly just for the typechecker
|
||||
reject(new Error('SOCKS proxy server undefined before listen'));
|
||||
return;
|
||||
}
|
||||
socksProxyServer
|
||||
.listen(0, '127.0.0.1')
|
||||
.then((port) => {
|
||||
socksProxyServer?.unref();
|
||||
resolve(port);
|
||||
})
|
||||
.catch(reject);
|
||||
});
|
||||
}
|
||||
// ============================================================================
|
||||
// Public Module Functions (will be exported via namespace)
|
||||
// ============================================================================
|
||||
async function initialize(runtimeConfig, sandboxAskCallback, enableLogMonitor = false) {
|
||||
// Return if already initializing
|
||||
if (initializationPromise) {
|
||||
await initializationPromise;
|
||||
return;
|
||||
}
|
||||
// Store config for use by other functions
|
||||
config = runtimeConfig;
|
||||
// Check dependencies
|
||||
const deps = checkDependencies();
|
||||
if (deps.errors.length > 0) {
|
||||
throw new Error(`Sandbox dependencies not available: ${deps.errors.join(', ')}`);
|
||||
}
|
||||
// Start log monitor for macOS if enabled
|
||||
if (enableLogMonitor && getPlatform() === 'macos') {
|
||||
logMonitorShutdown = startMacOSSandboxLogMonitor(sandboxViolationStore.addViolation.bind(sandboxViolationStore), config.ignoreViolations);
|
||||
logForDebugging('Started macOS sandbox log monitor');
|
||||
}
|
||||
// Register cleanup handlers first time
|
||||
registerCleanup();
|
||||
// Initialize network infrastructure
|
||||
initializationPromise = (async () => {
|
||||
try {
|
||||
// Conditionally start proxy servers based on config
|
||||
let httpProxyPort;
|
||||
if (config.network.httpProxyPort !== undefined) {
|
||||
// Use external HTTP proxy (don't start a server)
|
||||
httpProxyPort = config.network.httpProxyPort;
|
||||
logForDebugging(`Using external HTTP proxy on port ${httpProxyPort}`);
|
||||
}
|
||||
else {
|
||||
// Start local HTTP proxy
|
||||
httpProxyPort = await startHttpProxyServer(sandboxAskCallback);
|
||||
}
|
||||
let socksProxyPort;
|
||||
if (config.network.socksProxyPort !== undefined) {
|
||||
// Use external SOCKS proxy (don't start a server)
|
||||
socksProxyPort = config.network.socksProxyPort;
|
||||
logForDebugging(`Using external SOCKS proxy on port ${socksProxyPort}`);
|
||||
}
|
||||
else {
|
||||
// Start local SOCKS proxy
|
||||
socksProxyPort = await startSocksProxyServer(sandboxAskCallback);
|
||||
}
|
||||
// Initialize platform-specific infrastructure
|
||||
let linuxBridge;
|
||||
if (getPlatform() === 'linux') {
|
||||
linuxBridge = await initializeLinuxNetworkBridge(httpProxyPort, socksProxyPort);
|
||||
}
|
||||
const context = {
|
||||
httpProxyPort,
|
||||
socksProxyPort,
|
||||
linuxBridge,
|
||||
};
|
||||
managerContext = context;
|
||||
logForDebugging('Network infrastructure initialized');
|
||||
return context;
|
||||
}
|
||||
catch (error) {
|
||||
// Clear state on error so initialization can be retried
|
||||
initializationPromise = undefined;
|
||||
managerContext = undefined;
|
||||
reset().catch(e => {
|
||||
logForDebugging(`Cleanup failed in initializationPromise ${e}`, {
|
||||
level: 'error',
|
||||
});
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
})();
|
||||
await initializationPromise;
|
||||
}
|
||||
function isSupportedPlatform() {
|
||||
const platform = getPlatform();
|
||||
if (platform === 'linux') {
|
||||
// WSL1 doesn't support bubblewrap
|
||||
return getWslVersion() !== '1';
|
||||
}
|
||||
return platform === 'macos';
|
||||
}
|
||||
function isSandboxingEnabled() {
|
||||
// Sandboxing is enabled if config has been set (via initialize())
|
||||
return config !== undefined;
|
||||
}
|
||||
/**
|
||||
* Check sandbox dependencies for the current platform
|
||||
* @param ripgrepConfig - Ripgrep command to check. If not provided, uses config from initialization or defaults to 'rg'
|
||||
* @returns { warnings, errors } - errors mean sandbox cannot run, warnings mean degraded functionality
|
||||
*/
|
||||
function checkDependencies(ripgrepConfig) {
|
||||
if (!isSupportedPlatform()) {
|
||||
return { errors: ['Unsupported platform'], warnings: [] };
|
||||
}
|
||||
const errors = [];
|
||||
const warnings = [];
|
||||
// Check ripgrep - use provided config, then initialized config, then default 'rg'
|
||||
const rgToCheck = ripgrepConfig ?? config?.ripgrep ?? { command: 'rg' };
|
||||
if (whichSync(rgToCheck.command) === null) {
|
||||
errors.push(`ripgrep (${rgToCheck.command}) not found`);
|
||||
}
|
||||
const platform = getPlatform();
|
||||
if (platform === 'linux') {
|
||||
const linuxDeps = checkLinuxDependencies(config?.seccomp);
|
||||
errors.push(...linuxDeps.errors);
|
||||
warnings.push(...linuxDeps.warnings);
|
||||
}
|
||||
return { errors, warnings };
|
||||
}
|
||||
function getFsReadConfig() {
|
||||
if (!config) {
|
||||
return { denyOnly: [], allowWithinDeny: [] };
|
||||
}
|
||||
const denyPaths = [];
|
||||
for (const p of config.filesystem.denyRead) {
|
||||
const stripped = removeTrailingGlobSuffix(p);
|
||||
if (getPlatform() === 'linux' && containsGlobChars(stripped)) {
|
||||
// Expand glob to concrete paths on Linux (bubblewrap doesn't support globs)
|
||||
const expanded = expandGlobPattern(p);
|
||||
logForDebugging(`[Sandbox] Expanded glob pattern "${p}" to ${expanded.length} paths on Linux`);
|
||||
denyPaths.push(...expanded);
|
||||
}
|
||||
else {
|
||||
denyPaths.push(stripped);
|
||||
}
|
||||
}
|
||||
// Process allowRead paths (re-allow within denied regions)
|
||||
const allowPaths = [];
|
||||
for (const p of config.filesystem.allowRead ?? []) {
|
||||
const stripped = removeTrailingGlobSuffix(p);
|
||||
if (getPlatform() === 'linux' && containsGlobChars(stripped)) {
|
||||
const expanded = expandGlobPattern(p);
|
||||
logForDebugging(`[Sandbox] Expanded allowRead glob pattern "${p}" to ${expanded.length} paths on Linux`);
|
||||
allowPaths.push(...expanded);
|
||||
}
|
||||
else {
|
||||
allowPaths.push(stripped);
|
||||
}
|
||||
}
|
||||
return {
|
||||
denyOnly: denyPaths,
|
||||
allowWithinDeny: allowPaths,
|
||||
};
|
||||
}
|
||||
function getFsWriteConfig() {
|
||||
if (!config) {
|
||||
return { allowOnly: getDefaultWritePaths(), denyWithinAllow: [] };
|
||||
}
|
||||
// Filter out glob patterns on Linux/WSL for allowWrite (bubblewrap doesn't support globs)
|
||||
const allowPaths = config.filesystem.allowWrite
|
||||
.map(path => removeTrailingGlobSuffix(path))
|
||||
.filter(path => {
|
||||
if (getPlatform() === 'linux' && containsGlobChars(path)) {
|
||||
logForDebugging(`Skipping glob pattern on Linux/WSL: ${path}`);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
// Filter out glob patterns on Linux/WSL for denyWrite (bubblewrap doesn't support globs)
|
||||
const denyPaths = config.filesystem.denyWrite
|
||||
.map(path => removeTrailingGlobSuffix(path))
|
||||
.filter(path => {
|
||||
if (getPlatform() === 'linux' && containsGlobChars(path)) {
|
||||
logForDebugging(`Skipping glob pattern on Linux/WSL: ${path}`);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
// Build allowOnly list: default paths + configured allow paths
|
||||
const allowOnly = [...getDefaultWritePaths(), ...allowPaths];
|
||||
return {
|
||||
allowOnly,
|
||||
denyWithinAllow: denyPaths,
|
||||
};
|
||||
}
|
||||
function getNetworkRestrictionConfig() {
|
||||
if (!config) {
|
||||
return {};
|
||||
}
|
||||
const allowedHosts = config.network.allowedDomains;
|
||||
const deniedHosts = config.network.deniedDomains;
|
||||
return {
|
||||
...(allowedHosts.length > 0 && { allowedHosts }),
|
||||
...(deniedHosts.length > 0 && { deniedHosts }),
|
||||
};
|
||||
}
|
||||
function getAllowUnixSockets() {
|
||||
return config?.network?.allowUnixSockets;
|
||||
}
|
||||
function getAllowAllUnixSockets() {
|
||||
return config?.network?.allowAllUnixSockets;
|
||||
}
|
||||
function getAllowLocalBinding() {
|
||||
return config?.network?.allowLocalBinding;
|
||||
}
|
||||
function getIgnoreViolations() {
|
||||
return config?.ignoreViolations;
|
||||
}
|
||||
function getEnableWeakerNestedSandbox() {
|
||||
return config?.enableWeakerNestedSandbox;
|
||||
}
|
||||
function getEnableWeakerNetworkIsolation() {
|
||||
return config?.enableWeakerNetworkIsolation;
|
||||
}
|
||||
function getRipgrepConfig() {
|
||||
return config?.ripgrep ?? { command: 'rg' };
|
||||
}
|
||||
function getMandatoryDenySearchDepth() {
|
||||
return config?.mandatoryDenySearchDepth ?? 3;
|
||||
}
|
||||
function getAllowGitConfig() {
|
||||
return config?.filesystem?.allowGitConfig ?? false;
|
||||
}
|
||||
function getSeccompConfig() {
|
||||
return config?.seccomp;
|
||||
}
|
||||
function getProxyPort() {
|
||||
return managerContext?.httpProxyPort;
|
||||
}
|
||||
function getSocksProxyPort() {
|
||||
return managerContext?.socksProxyPort;
|
||||
}
|
||||
function getLinuxHttpSocketPath() {
|
||||
return managerContext?.linuxBridge?.httpSocketPath;
|
||||
}
|
||||
function getLinuxSocksSocketPath() {
|
||||
return managerContext?.linuxBridge?.socksSocketPath;
|
||||
}
|
||||
/**
|
||||
* Wait for network initialization to complete if already in progress
|
||||
* Returns true if initialized successfully, false otherwise
|
||||
*/
|
||||
async function waitForNetworkInitialization() {
|
||||
if (!config) {
|
||||
return false;
|
||||
}
|
||||
if (initializationPromise) {
|
||||
try {
|
||||
await initializationPromise;
|
||||
return true;
|
||||
}
|
||||
catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return managerContext !== undefined;
|
||||
}
|
||||
async function wrapWithSandbox(command, binShell, customConfig, abortSignal) {
|
||||
const platform = getPlatform();
|
||||
// Get configs - use custom if provided, otherwise fall back to main config
|
||||
// If neither exists, defaults to empty arrays (most restrictive)
|
||||
// Always include default system write paths (like /dev/null, /tmp/claude)
|
||||
//
|
||||
// Strip trailing /** and filter remaining globs on Linux (bwrap needs
|
||||
// real paths, not globs; macOS subpath matching is also recursive so
|
||||
// stripping is harmless there).
|
||||
const stripWriteGlobs = (paths) => paths
|
||||
.map(p => removeTrailingGlobSuffix(p))
|
||||
.filter(p => {
|
||||
if (getPlatform() === 'linux' && containsGlobChars(p)) {
|
||||
logForDebugging(`[Sandbox] Skipping glob write pattern on Linux: ${p}`);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
const userAllowWrite = stripWriteGlobs(customConfig?.filesystem?.allowWrite ?? config?.filesystem.allowWrite ?? []);
|
||||
const writeConfig = {
|
||||
allowOnly: [...getDefaultWritePaths(), ...userAllowWrite],
|
||||
denyWithinAllow: stripWriteGlobs(customConfig?.filesystem?.denyWrite ?? config?.filesystem.denyWrite ?? []),
|
||||
};
|
||||
const rawDenyRead = customConfig?.filesystem?.denyRead ?? config?.filesystem.denyRead ?? [];
|
||||
const expandedDenyRead = [];
|
||||
for (const p of rawDenyRead) {
|
||||
const stripped = removeTrailingGlobSuffix(p);
|
||||
if (getPlatform() === 'linux' && containsGlobChars(stripped)) {
|
||||
expandedDenyRead.push(...expandGlobPattern(p));
|
||||
}
|
||||
else {
|
||||
expandedDenyRead.push(stripped);
|
||||
}
|
||||
}
|
||||
const rawAllowRead = customConfig?.filesystem?.allowRead ?? config?.filesystem.allowRead ?? [];
|
||||
const expandedAllowRead = [];
|
||||
for (const p of rawAllowRead) {
|
||||
const stripped = removeTrailingGlobSuffix(p);
|
||||
if (getPlatform() === 'linux' && containsGlobChars(stripped)) {
|
||||
expandedAllowRead.push(...expandGlobPattern(p));
|
||||
}
|
||||
else {
|
||||
expandedAllowRead.push(stripped);
|
||||
}
|
||||
}
|
||||
const readConfig = {
|
||||
denyOnly: expandedDenyRead,
|
||||
allowWithinDeny: expandedAllowRead,
|
||||
};
|
||||
// Check if network config is specified - this determines if we need network restrictions
|
||||
// Network restriction is needed when:
|
||||
// 1. customConfig has network.allowedDomains defined (even if empty array = block all)
|
||||
// 2. OR config has network.allowedDomains defined (even if empty array = block all)
|
||||
// An empty allowedDomains array means "no domains allowed" = block all network access
|
||||
const hasNetworkConfig = customConfig?.network?.allowedDomains !== undefined ||
|
||||
config?.network?.allowedDomains !== undefined;
|
||||
// Network RESTRICTION is needed whenever network config is specified
|
||||
// This includes empty allowedDomains which means "block all network"
|
||||
const needsNetworkRestriction = hasNetworkConfig;
|
||||
// Network PROXY is needed whenever network config is specified
|
||||
// Even with empty allowedDomains, we route through proxy so that:
|
||||
// 1. updateConfig() can enable network access for already-running processes
|
||||
// 2. The proxy blocks all requests when allowlist is empty
|
||||
const needsNetworkProxy = hasNetworkConfig;
|
||||
// Wait for network initialization only if proxy is actually needed
|
||||
if (needsNetworkProxy) {
|
||||
await waitForNetworkInitialization();
|
||||
}
|
||||
// Check custom config to allow pseudo-terminal (can be applied dynamically)
|
||||
const allowPty = customConfig?.allowPty ?? config?.allowPty;
|
||||
switch (platform) {
|
||||
case 'macos':
|
||||
// macOS sandbox profile supports glob patterns directly, no ripgrep needed
|
||||
return wrapCommandWithSandboxMacOS({
|
||||
command,
|
||||
needsNetworkRestriction,
|
||||
// Only pass proxy ports if proxy is running (when there are domains to filter)
|
||||
httpProxyPort: needsNetworkProxy ? getProxyPort() : undefined,
|
||||
socksProxyPort: needsNetworkProxy ? getSocksProxyPort() : undefined,
|
||||
readConfig,
|
||||
writeConfig,
|
||||
allowUnixSockets: getAllowUnixSockets(),
|
||||
allowAllUnixSockets: getAllowAllUnixSockets(),
|
||||
allowLocalBinding: getAllowLocalBinding(),
|
||||
ignoreViolations: getIgnoreViolations(),
|
||||
allowPty,
|
||||
allowGitConfig: getAllowGitConfig(),
|
||||
enableWeakerNetworkIsolation: getEnableWeakerNetworkIsolation(),
|
||||
binShell,
|
||||
});
|
||||
case 'linux':
|
||||
return wrapCommandWithSandboxLinux({
|
||||
command,
|
||||
needsNetworkRestriction,
|
||||
// Only pass socket paths if proxy is running (when there are domains to filter)
|
||||
httpSocketPath: needsNetworkProxy
|
||||
? getLinuxHttpSocketPath()
|
||||
: undefined,
|
||||
socksSocketPath: needsNetworkProxy
|
||||
? getLinuxSocksSocketPath()
|
||||
: undefined,
|
||||
httpProxyPort: needsNetworkProxy
|
||||
? managerContext?.httpProxyPort
|
||||
: undefined,
|
||||
socksProxyPort: needsNetworkProxy
|
||||
? managerContext?.socksProxyPort
|
||||
: undefined,
|
||||
readConfig,
|
||||
writeConfig,
|
||||
enableWeakerNestedSandbox: getEnableWeakerNestedSandbox(),
|
||||
allowAllUnixSockets: getAllowAllUnixSockets(),
|
||||
binShell,
|
||||
ripgrepConfig: getRipgrepConfig(),
|
||||
mandatoryDenySearchDepth: getMandatoryDenySearchDepth(),
|
||||
allowGitConfig: getAllowGitConfig(),
|
||||
seccompConfig: getSeccompConfig(),
|
||||
abortSignal,
|
||||
});
|
||||
default:
|
||||
// Unsupported platform - this should not happen since isSandboxingEnabled() checks platform support
|
||||
throw new Error(`Sandbox configuration is not supported on platform: ${platform}`);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Get the current sandbox configuration
|
||||
* @returns The current configuration, or undefined if not initialized
|
||||
*/
|
||||
function getConfig() {
|
||||
return config;
|
||||
}
|
||||
/**
|
||||
* Update the sandbox configuration
|
||||
* @param newConfig - The new configuration to use
|
||||
*/
|
||||
function updateConfig(newConfig) {
|
||||
// Deep clone the config to avoid mutations
|
||||
config = cloneDeep(newConfig);
|
||||
logForDebugging('Sandbox configuration updated');
|
||||
}
|
||||
/**
|
||||
* Lightweight cleanup to call after each sandboxed command completes.
|
||||
*
|
||||
* On Linux, bwrap creates empty files on the host filesystem as mount points
|
||||
* when protecting non-existent deny paths (e.g. ~/.bashrc, ~/.gitconfig).
|
||||
* These persist after bwrap exits. This function removes them.
|
||||
*
|
||||
* Safe to call on any platform — it's a no-op on macOS.
|
||||
* Also called automatically by reset() and on process exit as safety nets.
|
||||
*/
|
||||
function cleanupAfterCommand() {
|
||||
cleanupBwrapMountPoints();
|
||||
}
|
||||
async function reset() {
|
||||
// Clean up any leftover bwrap mount points
|
||||
cleanupAfterCommand();
|
||||
// Stop log monitor
|
||||
if (logMonitorShutdown) {
|
||||
logMonitorShutdown();
|
||||
logMonitorShutdown = undefined;
|
||||
}
|
||||
if (managerContext?.linuxBridge) {
|
||||
const { httpSocketPath, socksSocketPath, httpBridgeProcess, socksBridgeProcess, } = managerContext.linuxBridge;
|
||||
// Create array to wait for process exits
|
||||
const exitPromises = [];
|
||||
// Kill HTTP bridge and wait for it to exit
|
||||
if (httpBridgeProcess.pid && !httpBridgeProcess.killed) {
|
||||
try {
|
||||
process.kill(httpBridgeProcess.pid, 'SIGTERM');
|
||||
logForDebugging('Sent SIGTERM to HTTP bridge process');
|
||||
// Wait for process to exit
|
||||
exitPromises.push(new Promise(resolve => {
|
||||
httpBridgeProcess.once('exit', () => {
|
||||
logForDebugging('HTTP bridge process exited');
|
||||
resolve();
|
||||
});
|
||||
// Timeout after 5 seconds
|
||||
setTimeout(() => {
|
||||
if (!httpBridgeProcess.killed) {
|
||||
logForDebugging('HTTP bridge did not exit, forcing SIGKILL', {
|
||||
level: 'warn',
|
||||
});
|
||||
try {
|
||||
if (httpBridgeProcess.pid) {
|
||||
process.kill(httpBridgeProcess.pid, 'SIGKILL');
|
||||
}
|
||||
}
|
||||
catch {
|
||||
// Process may have already exited
|
||||
}
|
||||
}
|
||||
resolve();
|
||||
}, 5000);
|
||||
}));
|
||||
}
|
||||
catch (err) {
|
||||
if (err.code !== 'ESRCH') {
|
||||
logForDebugging(`Error killing HTTP bridge: ${err}`, {
|
||||
level: 'error',
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
// Kill SOCKS bridge and wait for it to exit
|
||||
if (socksBridgeProcess.pid && !socksBridgeProcess.killed) {
|
||||
try {
|
||||
process.kill(socksBridgeProcess.pid, 'SIGTERM');
|
||||
logForDebugging('Sent SIGTERM to SOCKS bridge process');
|
||||
// Wait for process to exit
|
||||
exitPromises.push(new Promise(resolve => {
|
||||
socksBridgeProcess.once('exit', () => {
|
||||
logForDebugging('SOCKS bridge process exited');
|
||||
resolve();
|
||||
});
|
||||
// Timeout after 5 seconds
|
||||
setTimeout(() => {
|
||||
if (!socksBridgeProcess.killed) {
|
||||
logForDebugging('SOCKS bridge did not exit, forcing SIGKILL', {
|
||||
level: 'warn',
|
||||
});
|
||||
try {
|
||||
if (socksBridgeProcess.pid) {
|
||||
process.kill(socksBridgeProcess.pid, 'SIGKILL');
|
||||
}
|
||||
}
|
||||
catch {
|
||||
// Process may have already exited
|
||||
}
|
||||
}
|
||||
resolve();
|
||||
}, 5000);
|
||||
}));
|
||||
}
|
||||
catch (err) {
|
||||
if (err.code !== 'ESRCH') {
|
||||
logForDebugging(`Error killing SOCKS bridge: ${err}`, {
|
||||
level: 'error',
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
// Wait for both processes to exit
|
||||
await Promise.all(exitPromises);
|
||||
// Clean up sockets
|
||||
if (httpSocketPath) {
|
||||
try {
|
||||
fs.rmSync(httpSocketPath, { force: true });
|
||||
logForDebugging('Cleaned up HTTP socket');
|
||||
}
|
||||
catch (err) {
|
||||
logForDebugging(`HTTP socket cleanup error: ${err}`, {
|
||||
level: 'error',
|
||||
});
|
||||
}
|
||||
}
|
||||
if (socksSocketPath) {
|
||||
try {
|
||||
fs.rmSync(socksSocketPath, { force: true });
|
||||
logForDebugging('Cleaned up SOCKS socket');
|
||||
}
|
||||
catch (err) {
|
||||
logForDebugging(`SOCKS socket cleanup error: ${err}`, {
|
||||
level: 'error',
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
// Close servers in parallel (only if they exist, i.e., were started by us)
|
||||
const closePromises = [];
|
||||
if (httpProxyServer) {
|
||||
const server = httpProxyServer; // Capture reference to avoid TypeScript error
|
||||
const httpClose = new Promise(resolve => {
|
||||
server.close(error => {
|
||||
if (error && error.message !== 'Server is not running.') {
|
||||
logForDebugging(`Error closing HTTP proxy server: ${error.message}`, {
|
||||
level: 'error',
|
||||
});
|
||||
}
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
closePromises.push(httpClose);
|
||||
}
|
||||
if (socksProxyServer) {
|
||||
const socksClose = socksProxyServer.close().catch((error) => {
|
||||
logForDebugging(`Error closing SOCKS proxy server: ${error.message}`, {
|
||||
level: 'error',
|
||||
});
|
||||
});
|
||||
closePromises.push(socksClose);
|
||||
}
|
||||
// Wait for all servers to close
|
||||
await Promise.all(closePromises);
|
||||
// Clear references
|
||||
httpProxyServer = undefined;
|
||||
socksProxyServer = undefined;
|
||||
managerContext = undefined;
|
||||
initializationPromise = undefined;
|
||||
}
|
||||
function getSandboxViolationStore() {
|
||||
return sandboxViolationStore;
|
||||
}
|
||||
function annotateStderrWithSandboxFailures(command, stderr) {
|
||||
if (!config) {
|
||||
return stderr;
|
||||
}
|
||||
const violations = sandboxViolationStore.getViolationsForCommand(command);
|
||||
if (violations.length === 0) {
|
||||
return stderr;
|
||||
}
|
||||
let annotated = stderr;
|
||||
annotated += EOL + '<sandbox_violations>' + EOL;
|
||||
for (const violation of violations) {
|
||||
annotated += violation.line + EOL;
|
||||
}
|
||||
annotated += '</sandbox_violations>';
|
||||
return annotated;
|
||||
}
|
||||
/**
|
||||
* Returns glob patterns from Edit/Read permission rules that are not
|
||||
* fully supported on Linux. Returns empty array on macOS or when
|
||||
* sandboxing is disabled.
|
||||
*
|
||||
* Patterns ending with /** are excluded since they work as subpaths.
|
||||
*/
|
||||
function getLinuxGlobPatternWarnings() {
|
||||
// Only warn on Linux/WSL (bubblewrap doesn't support globs)
|
||||
// macOS supports glob patterns via regex conversion
|
||||
if (getPlatform() !== 'linux' || !config) {
|
||||
return [];
|
||||
}
|
||||
const globPatterns = [];
|
||||
// Check filesystem paths for glob patterns
|
||||
// Note: denyRead is excluded because globs are now expanded to concrete paths on Linux
|
||||
const allPaths = [
|
||||
...config.filesystem.allowWrite,
|
||||
...config.filesystem.denyWrite,
|
||||
];
|
||||
for (const path of allPaths) {
|
||||
// Strip trailing /** since that's just a subpath (directory and everything under it)
|
||||
const pathWithoutTrailingStar = removeTrailingGlobSuffix(path);
|
||||
// Only warn if there are still glob characters after removing trailing /**
|
||||
if (containsGlobChars(pathWithoutTrailingStar)) {
|
||||
globPatterns.push(path);
|
||||
}
|
||||
}
|
||||
return globPatterns;
|
||||
}
|
||||
// ============================================================================
|
||||
// Export as Namespace with Interface
|
||||
// ============================================================================
|
||||
/**
|
||||
* Global sandbox manager that handles both network and filesystem restrictions
|
||||
* for this session. This runs outside of the sandbox, on the host machine.
|
||||
*/
|
||||
export const SandboxManager = {
|
||||
initialize,
|
||||
isSupportedPlatform,
|
||||
isSandboxingEnabled,
|
||||
checkDependencies,
|
||||
getFsReadConfig,
|
||||
getFsWriteConfig,
|
||||
getNetworkRestrictionConfig,
|
||||
getAllowUnixSockets,
|
||||
getAllowLocalBinding,
|
||||
getIgnoreViolations,
|
||||
getEnableWeakerNestedSandbox,
|
||||
getProxyPort,
|
||||
getSocksProxyPort,
|
||||
getLinuxHttpSocketPath,
|
||||
getLinuxSocksSocketPath,
|
||||
waitForNetworkInitialization,
|
||||
wrapWithSandbox,
|
||||
cleanupAfterCommand,
|
||||
reset,
|
||||
getSandboxViolationStore,
|
||||
annotateStderrWithSandboxFailures,
|
||||
getLinuxGlobPatternWarnings,
|
||||
getConfig,
|
||||
updateConfig,
|
||||
};
|
||||
//# sourceMappingURL=sandbox-manager.js.map
|
||||
435
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/sandbox-utils.js
generated
vendored
Normal file
435
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/sandbox-utils.js
generated
vendored
Normal file
@@ -0,0 +1,435 @@
|
||||
import { homedir } from 'os';
|
||||
import * as path from 'path';
|
||||
import * as fs from 'fs';
|
||||
import { getPlatform } from '../utils/platform.js';
|
||||
import { logForDebugging } from '../utils/debug.js';
|
||||
/**
|
||||
* Dangerous files that should be protected from writes.
|
||||
* These files can be used for code execution or data exfiltration.
|
||||
*/
|
||||
export const DANGEROUS_FILES = [
|
||||
'.gitconfig',
|
||||
'.gitmodules',
|
||||
'.bashrc',
|
||||
'.bash_profile',
|
||||
'.zshrc',
|
||||
'.zprofile',
|
||||
'.profile',
|
||||
'.ripgreprc',
|
||||
'.mcp.json',
|
||||
];
|
||||
/**
|
||||
* Dangerous directories that should be protected from writes.
|
||||
* These directories contain sensitive configuration or executable files.
|
||||
*/
|
||||
export const DANGEROUS_DIRECTORIES = ['.git', '.vscode', '.idea'];
|
||||
/**
|
||||
* Get the list of dangerous directories to deny writes to.
|
||||
* Excludes .git since we need it writable for git operations -
|
||||
* instead we block specific paths within .git (hooks and config).
|
||||
*/
|
||||
export function getDangerousDirectories() {
|
||||
return [
|
||||
...DANGEROUS_DIRECTORIES.filter(d => d !== '.git'),
|
||||
'.claude/commands',
|
||||
'.claude/agents',
|
||||
];
|
||||
}
|
||||
/**
|
||||
* Normalizes a path for case-insensitive comparison.
|
||||
* This prevents bypassing security checks using mixed-case paths on case-insensitive
|
||||
* filesystems (macOS/Windows) like `.cLauDe/Settings.locaL.json`.
|
||||
*
|
||||
* We always normalize to lowercase regardless of platform for consistent security.
|
||||
* @param path The path to normalize
|
||||
* @returns The lowercase path for safe comparison
|
||||
*/
|
||||
export function normalizeCaseForComparison(pathStr) {
|
||||
return pathStr.toLowerCase();
|
||||
}
|
||||
/**
|
||||
* Check if a path pattern contains glob characters
|
||||
*/
|
||||
export function containsGlobChars(pathPattern) {
|
||||
return (pathPattern.includes('*') ||
|
||||
pathPattern.includes('?') ||
|
||||
pathPattern.includes('[') ||
|
||||
pathPattern.includes(']'));
|
||||
}
|
||||
/**
|
||||
* Remove trailing /** glob suffix from a path pattern
|
||||
* Used to normalize path patterns since /** just means "directory and everything under it"
|
||||
*/
|
||||
export function removeTrailingGlobSuffix(pathPattern) {
|
||||
const stripped = pathPattern.replace(/\/\*\*$/, '');
|
||||
return stripped || '/';
|
||||
}
|
||||
/**
|
||||
* Check if a symlink resolution crosses expected path boundaries.
|
||||
*
|
||||
* When resolving symlinks for sandbox path normalization, we need to ensure
|
||||
* the resolved path doesn't unexpectedly broaden the scope. This function
|
||||
* returns true if the resolved path is an ancestor of the original path
|
||||
* or resolves to a system root, which would indicate the symlink points
|
||||
* outside expected boundaries.
|
||||
*
|
||||
* @param originalPath - The original path before symlink resolution
|
||||
* @param resolvedPath - The path after fs.realpathSync() resolution
|
||||
* @returns true if the resolved path is outside expected boundaries
|
||||
*/
|
||||
export function isSymlinkOutsideBoundary(originalPath, resolvedPath) {
|
||||
const normalizedOriginal = path.normalize(originalPath);
|
||||
const normalizedResolved = path.normalize(resolvedPath);
|
||||
// Same path after normalization - OK
|
||||
if (normalizedResolved === normalizedOriginal) {
|
||||
return false;
|
||||
}
|
||||
// Handle macOS /tmp -> /private/tmp canonical resolution
|
||||
// This is a legitimate system symlink that should be allowed
|
||||
// /tmp/claude -> /private/tmp/claude is OK
|
||||
// /var/folders/... -> /private/var/folders/... is OK
|
||||
if (normalizedOriginal.startsWith('/tmp/') &&
|
||||
normalizedResolved === '/private' + normalizedOriginal) {
|
||||
return false;
|
||||
}
|
||||
if (normalizedOriginal.startsWith('/var/') &&
|
||||
normalizedResolved === '/private' + normalizedOriginal) {
|
||||
return false;
|
||||
}
|
||||
// Also handle the reverse: /private/tmp/... resolving to itself
|
||||
if (normalizedOriginal.startsWith('/private/tmp/') &&
|
||||
normalizedResolved === normalizedOriginal) {
|
||||
return false;
|
||||
}
|
||||
if (normalizedOriginal.startsWith('/private/var/') &&
|
||||
normalizedResolved === normalizedOriginal) {
|
||||
return false;
|
||||
}
|
||||
// If resolved path is "/" it's outside expected boundaries
|
||||
if (normalizedResolved === '/') {
|
||||
return true;
|
||||
}
|
||||
// If resolved path is very short (single component like /tmp, /usr, /var),
|
||||
// it's likely outside expected boundaries
|
||||
const resolvedParts = normalizedResolved.split('/').filter(Boolean);
|
||||
if (resolvedParts.length <= 1) {
|
||||
return true;
|
||||
}
|
||||
// If original path starts with resolved path, the resolved path is an ancestor
|
||||
// e.g., /tmp/claude -> /tmp means the symlink points to a broader scope
|
||||
if (normalizedOriginal.startsWith(normalizedResolved + '/')) {
|
||||
return true;
|
||||
}
|
||||
// Also check the canonical form of the original path for macOS
|
||||
// e.g., /tmp/claude should also be checked as /private/tmp/claude
|
||||
let canonicalOriginal = normalizedOriginal;
|
||||
if (normalizedOriginal.startsWith('/tmp/')) {
|
||||
canonicalOriginal = '/private' + normalizedOriginal;
|
||||
}
|
||||
else if (normalizedOriginal.startsWith('/var/')) {
|
||||
canonicalOriginal = '/private' + normalizedOriginal;
|
||||
}
|
||||
if (canonicalOriginal !== normalizedOriginal &&
|
||||
canonicalOriginal.startsWith(normalizedResolved + '/')) {
|
||||
return true;
|
||||
}
|
||||
// STRICT CHECK: Only allow resolutions that stay within the expected path tree
|
||||
// The resolved path must either:
|
||||
// 1. Start with the original path (deeper/same) - already covered by returning false below
|
||||
// 2. Start with the canonical original (deeper/same under canonical form)
|
||||
// 3. BE the canonical form of the original (e.g., /tmp/x -> /private/tmp/x)
|
||||
// Any other resolution (e.g., /tmp/claude -> /Users/dworken) is outside expected bounds
|
||||
const resolvedStartsWithOriginal = normalizedResolved.startsWith(normalizedOriginal + '/');
|
||||
const resolvedStartsWithCanonical = canonicalOriginal !== normalizedOriginal &&
|
||||
normalizedResolved.startsWith(canonicalOriginal + '/');
|
||||
const resolvedIsCanonical = canonicalOriginal !== normalizedOriginal &&
|
||||
normalizedResolved === canonicalOriginal;
|
||||
const resolvedIsSame = normalizedResolved === normalizedOriginal;
|
||||
// If resolved path is not within expected tree, it's outside boundary
|
||||
if (!resolvedIsSame &&
|
||||
!resolvedIsCanonical &&
|
||||
!resolvedStartsWithOriginal &&
|
||||
!resolvedStartsWithCanonical) {
|
||||
return true;
|
||||
}
|
||||
// Allow resolution to same directory level or deeper within expected tree
|
||||
return false;
|
||||
}
|
||||
/**
|
||||
* Normalize a path for use in sandbox configurations
|
||||
* Handles:
|
||||
* - Tilde (~) expansion for home directory
|
||||
* - Relative paths (./foo, ../foo, etc.) converted to absolute
|
||||
* - Absolute paths remain unchanged
|
||||
* - Symlinks are resolved to their real paths for non-glob patterns
|
||||
* - Glob patterns preserve wildcards after path normalization
|
||||
*
|
||||
* Returns the absolute path with symlinks resolved (or normalized glob pattern)
|
||||
*/
|
||||
export function normalizePathForSandbox(pathPattern) {
|
||||
const cwd = process.cwd();
|
||||
let normalizedPath = pathPattern;
|
||||
// Expand ~ to home directory
|
||||
if (pathPattern === '~') {
|
||||
normalizedPath = homedir();
|
||||
}
|
||||
else if (pathPattern.startsWith('~/')) {
|
||||
normalizedPath = homedir() + pathPattern.slice(1);
|
||||
}
|
||||
else if (pathPattern.startsWith('./') || pathPattern.startsWith('../')) {
|
||||
// Convert relative to absolute based on current working directory
|
||||
normalizedPath = path.resolve(cwd, pathPattern);
|
||||
}
|
||||
else if (!path.isAbsolute(pathPattern)) {
|
||||
// Handle other relative paths (e.g., ".", "..", "foo/bar")
|
||||
normalizedPath = path.resolve(cwd, pathPattern);
|
||||
}
|
||||
// For glob patterns, resolve symlinks for the directory portion only
|
||||
if (containsGlobChars(normalizedPath)) {
|
||||
// Extract the static directory prefix before glob characters
|
||||
const staticPrefix = normalizedPath.split(/[*?[\]]/)[0];
|
||||
if (staticPrefix && staticPrefix !== '/') {
|
||||
// Get the directory containing the glob pattern
|
||||
// If staticPrefix ends with /, remove it to get the directory
|
||||
const baseDir = staticPrefix.endsWith('/')
|
||||
? staticPrefix.slice(0, -1)
|
||||
: path.dirname(staticPrefix);
|
||||
// Try to resolve symlinks for the base directory
|
||||
try {
|
||||
const resolvedBaseDir = fs.realpathSync(baseDir);
|
||||
// Validate that resolution stays within expected boundaries
|
||||
if (!isSymlinkOutsideBoundary(baseDir, resolvedBaseDir)) {
|
||||
// Reconstruct the pattern with the resolved directory
|
||||
const patternSuffix = normalizedPath.slice(baseDir.length);
|
||||
return resolvedBaseDir + patternSuffix;
|
||||
}
|
||||
// If resolution would broaden scope, keep original pattern
|
||||
}
|
||||
catch {
|
||||
// If directory doesn't exist or can't be resolved, keep the original pattern
|
||||
}
|
||||
}
|
||||
return normalizedPath;
|
||||
}
|
||||
// Resolve symlinks to real paths to avoid bwrap issues
|
||||
// Validate that the resolution stays within expected boundaries
|
||||
try {
|
||||
const resolvedPath = fs.realpathSync(normalizedPath);
|
||||
// Only use resolved path if it doesn't cross boundary (e.g., symlink to parent dir)
|
||||
if (isSymlinkOutsideBoundary(normalizedPath, resolvedPath)) {
|
||||
// Symlink points outside expected boundaries - keep original path
|
||||
}
|
||||
else {
|
||||
normalizedPath = resolvedPath;
|
||||
}
|
||||
}
|
||||
catch {
|
||||
// If path doesn't exist or can't be resolved, keep the normalized path
|
||||
}
|
||||
return normalizedPath;
|
||||
}
|
||||
/**
|
||||
* Get recommended system paths that should be writable for commands to work properly
|
||||
*
|
||||
* WARNING: These default paths are intentionally broad for compatibility but may
|
||||
* allow access to files from other processes. In highly security-sensitive
|
||||
* environments, you should configure more restrictive write paths.
|
||||
*/
|
||||
export function getDefaultWritePaths() {
|
||||
const homeDir = homedir();
|
||||
const recommendedPaths = [
|
||||
'/dev/stdout',
|
||||
'/dev/stderr',
|
||||
'/dev/null',
|
||||
'/dev/tty',
|
||||
'/dev/dtracehelper',
|
||||
'/dev/autofs_nowait',
|
||||
'/tmp/claude',
|
||||
'/private/tmp/claude',
|
||||
path.join(homeDir, '.npm/_logs'),
|
||||
path.join(homeDir, '.claude/debug'),
|
||||
];
|
||||
return recommendedPaths;
|
||||
}
|
||||
/**
|
||||
* Generate proxy environment variables for sandboxed processes
|
||||
*/
|
||||
export function generateProxyEnvVars(httpProxyPort, socksProxyPort) {
|
||||
// Respect CLAUDE_TMPDIR if set, otherwise default to /tmp/claude
|
||||
const tmpdir = process.env.CLAUDE_TMPDIR || '/tmp/claude';
|
||||
const envVars = [`SANDBOX_RUNTIME=1`, `TMPDIR=${tmpdir}`];
|
||||
// If no proxy ports provided, return minimal env vars
|
||||
if (!httpProxyPort && !socksProxyPort) {
|
||||
return envVars;
|
||||
}
|
||||
// Always set NO_PROXY to exclude localhost and private networks from proxying
|
||||
const noProxyAddresses = [
|
||||
'localhost',
|
||||
'127.0.0.1',
|
||||
'::1',
|
||||
'*.local',
|
||||
'.local',
|
||||
'169.254.0.0/16', // Link-local
|
||||
'10.0.0.0/8', // Private network
|
||||
'172.16.0.0/12', // Private network
|
||||
'192.168.0.0/16', // Private network
|
||||
].join(',');
|
||||
envVars.push(`NO_PROXY=${noProxyAddresses}`);
|
||||
envVars.push(`no_proxy=${noProxyAddresses}`);
|
||||
if (httpProxyPort) {
|
||||
envVars.push(`HTTP_PROXY=http://localhost:${httpProxyPort}`);
|
||||
envVars.push(`HTTPS_PROXY=http://localhost:${httpProxyPort}`);
|
||||
// Lowercase versions for compatibility with some tools
|
||||
envVars.push(`http_proxy=http://localhost:${httpProxyPort}`);
|
||||
envVars.push(`https_proxy=http://localhost:${httpProxyPort}`);
|
||||
}
|
||||
if (socksProxyPort) {
|
||||
// Use socks5h:// for proper DNS resolution through proxy
|
||||
envVars.push(`ALL_PROXY=socks5h://localhost:${socksProxyPort}`);
|
||||
envVars.push(`all_proxy=socks5h://localhost:${socksProxyPort}`);
|
||||
// Configure Git to use SSH through the proxy so DNS resolution happens outside the sandbox
|
||||
const platform = getPlatform();
|
||||
if (platform === 'macos') {
|
||||
// macOS: use BSD nc SOCKS5 proxy support (-X 5 -x)
|
||||
envVars.push(`GIT_SSH_COMMAND=ssh -o ProxyCommand='nc -X 5 -x localhost:${socksProxyPort} %h %p'`);
|
||||
}
|
||||
else if (platform === 'linux' && httpProxyPort) {
|
||||
// Linux: use socat HTTP CONNECT via the HTTP proxy bridge.
|
||||
// socat is already a required Linux sandbox dependency, and PROXY: is
|
||||
// portable across all socat versions (unlike SOCKS5-CONNECT which needs >= 1.8.0).
|
||||
envVars.push(`GIT_SSH_COMMAND=ssh -o ProxyCommand='socat - PROXY:localhost:%h:%p,proxyport=${httpProxyPort}'`);
|
||||
}
|
||||
// FTP proxy support (use socks5h for DNS resolution through proxy)
|
||||
envVars.push(`FTP_PROXY=socks5h://localhost:${socksProxyPort}`);
|
||||
envVars.push(`ftp_proxy=socks5h://localhost:${socksProxyPort}`);
|
||||
// rsync proxy support
|
||||
envVars.push(`RSYNC_PROXY=localhost:${socksProxyPort}`);
|
||||
// Database tools NOTE: Most database clients don't have built-in proxy support
|
||||
// You typically need to use SSH tunneling or a SOCKS wrapper like tsocks/proxychains
|
||||
// Docker CLI uses HTTP for the API
|
||||
// This makes Docker use the HTTP proxy for registry operations
|
||||
envVars.push(`DOCKER_HTTP_PROXY=http://localhost:${httpProxyPort || socksProxyPort}`);
|
||||
envVars.push(`DOCKER_HTTPS_PROXY=http://localhost:${httpProxyPort || socksProxyPort}`);
|
||||
// Kubernetes kubectl - uses standard HTTPS_PROXY
|
||||
// kubectl respects HTTPS_PROXY which we already set above
|
||||
// AWS CLI - uses standard HTTPS_PROXY (v2 supports it well)
|
||||
// AWS CLI v2 respects HTTPS_PROXY which we already set above
|
||||
// Google Cloud SDK - has specific proxy settings
|
||||
// Use HTTPS proxy to match other HTTP-based tools
|
||||
if (httpProxyPort) {
|
||||
envVars.push(`CLOUDSDK_PROXY_TYPE=https`);
|
||||
envVars.push(`CLOUDSDK_PROXY_ADDRESS=localhost`);
|
||||
envVars.push(`CLOUDSDK_PROXY_PORT=${httpProxyPort}`);
|
||||
}
|
||||
// Azure CLI - uses HTTPS_PROXY
|
||||
// Azure CLI respects HTTPS_PROXY which we already set above
|
||||
// Terraform - uses standard HTTP/HTTPS proxy vars
|
||||
// Terraform respects HTTP_PROXY/HTTPS_PROXY which we already set above
|
||||
// gRPC-based tools - use standard proxy vars
|
||||
envVars.push(`GRPC_PROXY=socks5h://localhost:${socksProxyPort}`);
|
||||
envVars.push(`grpc_proxy=socks5h://localhost:${socksProxyPort}`);
|
||||
}
|
||||
// WARNING: Do not set HTTP_PROXY/HTTPS_PROXY to SOCKS URLs when only SOCKS proxy is available
|
||||
// Most HTTP clients do not support SOCKS URLs in these variables and will fail, and we want
|
||||
// to avoid overriding the client otherwise respecting the ALL_PROXY env var which points to SOCKS.
|
||||
return envVars;
|
||||
}
|
||||
/**
|
||||
* Encode a command for sandbox monitoring
|
||||
* Truncates to 100 chars and base64 encodes to avoid parsing issues
|
||||
*/
|
||||
export function encodeSandboxedCommand(command) {
|
||||
const truncatedCommand = command.slice(0, 100);
|
||||
return Buffer.from(truncatedCommand).toString('base64');
|
||||
}
|
||||
/**
|
||||
* Decode a base64-encoded command from sandbox monitoring
|
||||
*/
|
||||
export function decodeSandboxedCommand(encodedCommand) {
|
||||
return Buffer.from(encodedCommand, 'base64').toString('utf8');
|
||||
}
|
||||
/**
|
||||
* Convert a glob pattern to a regular expression
|
||||
*
|
||||
* This implements gitignore-style pattern matching to match the behavior of the
|
||||
* `ignore` library used by the permission system.
|
||||
*
|
||||
* Supported patterns:
|
||||
* - * matches any characters except / (e.g., *.ts matches foo.ts but not foo/bar.ts)
|
||||
* - ** matches any characters including / (e.g., src/**\/*.ts matches all .ts files in src/)
|
||||
* - ? matches any single character except / (e.g., file?.txt matches file1.txt)
|
||||
* - [abc] matches any character in the set (e.g., file[0-9].txt matches file3.txt)
|
||||
*
|
||||
* Exported for testing and shared between macOS sandbox profiles and Linux glob expansion.
|
||||
*/
|
||||
export function globToRegex(globPattern) {
|
||||
return ('^' +
|
||||
globPattern
|
||||
// Escape regex special characters (except glob chars * ? [ ])
|
||||
.replace(/[.^$+{}()|\\]/g, '\\$&')
|
||||
// Escape unclosed brackets (no matching ])
|
||||
.replace(/\[([^\]]*?)$/g, '\\[$1')
|
||||
// Convert glob patterns to regex (order matters - ** before *)
|
||||
.replace(/\*\*\//g, '__GLOBSTAR_SLASH__') // Placeholder for **/
|
||||
.replace(/\*\*/g, '__GLOBSTAR__') // Placeholder for **
|
||||
.replace(/\*/g, '[^/]*') // * matches anything except /
|
||||
.replace(/\?/g, '[^/]') // ? matches single character except /
|
||||
// Restore placeholders
|
||||
.replace(/__GLOBSTAR_SLASH__/g, '(.*/)?') // **/ matches zero or more dirs
|
||||
.replace(/__GLOBSTAR__/g, '.*') + // ** matches anything including /
|
||||
'$');
|
||||
}
|
||||
/**
|
||||
* Expand a glob pattern into concrete file paths.
|
||||
*
|
||||
* Used on Linux where bubblewrap doesn't support glob patterns natively.
|
||||
* Resolves the static directory prefix, lists files recursively, and filters
|
||||
* using globToRegex().
|
||||
*
|
||||
* @param globPath - A path pattern containing glob characters (e.g., ~/test/*.env)
|
||||
* @returns Array of absolute paths matching the glob pattern
|
||||
*/
|
||||
export function expandGlobPattern(globPath) {
|
||||
const normalizedPattern = normalizePathForSandbox(globPath);
|
||||
// Extract the static directory prefix before any glob characters
|
||||
const staticPrefix = normalizedPattern.split(/[*?[\]]/)[0];
|
||||
if (!staticPrefix || staticPrefix === '/') {
|
||||
logForDebugging(`[Sandbox] Glob pattern too broad, skipping: ${globPath}`);
|
||||
return [];
|
||||
}
|
||||
// Get the base directory from the static prefix
|
||||
const baseDir = staticPrefix.endsWith('/')
|
||||
? staticPrefix.slice(0, -1)
|
||||
: path.dirname(staticPrefix);
|
||||
if (!fs.existsSync(baseDir)) {
|
||||
logForDebugging(`[Sandbox] Base directory for glob does not exist: ${baseDir}`);
|
||||
return [];
|
||||
}
|
||||
// Build regex from the normalized glob pattern
|
||||
const regex = new RegExp(globToRegex(normalizedPattern));
|
||||
// List all entries recursively under the base directory
|
||||
const results = [];
|
||||
try {
|
||||
const entries = fs.readdirSync(baseDir, {
|
||||
recursive: true,
|
||||
withFileTypes: true,
|
||||
});
|
||||
for (const entry of entries) {
|
||||
// Build the full path for this entry
|
||||
// entry.parentPath is the directory containing this entry (available in Node 20+/Bun)
|
||||
// For compatibility, fall back to entry.path if parentPath is not available
|
||||
const parentDir = entry.parentPath ??
|
||||
entry.path ??
|
||||
baseDir;
|
||||
const fullPath = path.join(parentDir, entry.name);
|
||||
if (regex.test(fullPath)) {
|
||||
results.push(fullPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
logForDebugging(`[Sandbox] Error expanding glob pattern ${globPath}: ${err}`);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
//# sourceMappingURL=sandbox-utils.js.map
|
||||
54
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/sandbox-violation-store.js
generated
vendored
Normal file
54
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/sandbox-violation-store.js
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
import { encodeSandboxedCommand } from './sandbox-utils.js';
|
||||
/**
|
||||
* In-memory tail for sandbox violations
|
||||
*/
|
||||
export class SandboxViolationStore {
|
||||
constructor() {
|
||||
this.violations = [];
|
||||
this.totalCount = 0;
|
||||
this.maxSize = 100;
|
||||
this.listeners = new Set();
|
||||
}
|
||||
addViolation(violation) {
|
||||
this.violations.push(violation);
|
||||
this.totalCount++;
|
||||
if (this.violations.length > this.maxSize) {
|
||||
this.violations = this.violations.slice(-this.maxSize);
|
||||
}
|
||||
this.notifyListeners();
|
||||
}
|
||||
getViolations(limit) {
|
||||
if (limit === undefined) {
|
||||
return [...this.violations];
|
||||
}
|
||||
return this.violations.slice(-limit);
|
||||
}
|
||||
getCount() {
|
||||
return this.violations.length;
|
||||
}
|
||||
getTotalCount() {
|
||||
return this.totalCount;
|
||||
}
|
||||
getViolationsForCommand(command) {
|
||||
const commandBase64 = encodeSandboxedCommand(command);
|
||||
return this.violations.filter(v => v.encodedCommand === commandBase64);
|
||||
}
|
||||
clear() {
|
||||
this.violations = [];
|
||||
// Don't reset totalCount when clearing
|
||||
this.notifyListeners();
|
||||
}
|
||||
subscribe(listener) {
|
||||
this.listeners.add(listener);
|
||||
listener(this.getViolations());
|
||||
return () => {
|
||||
this.listeners.delete(listener);
|
||||
};
|
||||
}
|
||||
notifyListeners() {
|
||||
// Always notify with all violations so listeners can track the full count
|
||||
const violations = this.getViolations();
|
||||
this.listeners.forEach(listener => listener(violations));
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=sandbox-violation-store.js.map
|
||||
95
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/socks-proxy.js
generated
vendored
Normal file
95
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/sandbox/socks-proxy.js
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
import { createServer } from '@pondwader/socks5-server';
|
||||
import { logForDebugging } from '../utils/debug.js';
|
||||
export function createSocksProxyServer(options) {
|
||||
const socksServer = createServer();
|
||||
socksServer.setRulesetValidator(async (conn) => {
|
||||
try {
|
||||
const hostname = conn.destAddress;
|
||||
const port = conn.destPort;
|
||||
logForDebugging(`Connection request to ${hostname}:${port}`);
|
||||
const allowed = await options.filter(port, hostname);
|
||||
if (!allowed) {
|
||||
logForDebugging(`Connection blocked to ${hostname}:${port}`, {
|
||||
level: 'error',
|
||||
});
|
||||
return false;
|
||||
}
|
||||
logForDebugging(`Connection allowed to ${hostname}:${port}`);
|
||||
return true;
|
||||
}
|
||||
catch (error) {
|
||||
logForDebugging(`Error validating connection: ${error}`, {
|
||||
level: 'error',
|
||||
});
|
||||
return false;
|
||||
}
|
||||
});
|
||||
return {
|
||||
server: socksServer,
|
||||
getPort() {
|
||||
// Access the internal server to get the port
|
||||
// We need to use type assertion here as the server property is private
|
||||
try {
|
||||
const serverInternal = socksServer?.server;
|
||||
if (serverInternal && typeof serverInternal?.address === 'function') {
|
||||
const address = serverInternal.address();
|
||||
if (address && typeof address === 'object' && 'port' in address) {
|
||||
return address.port;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
// Server might not be listening yet or property access failed
|
||||
logForDebugging(`Error getting port: ${error}`, { level: 'error' });
|
||||
}
|
||||
return undefined;
|
||||
},
|
||||
listen(port, hostname) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const listeningCallback = () => {
|
||||
const actualPort = this.getPort();
|
||||
if (actualPort) {
|
||||
logForDebugging(`SOCKS proxy listening on ${hostname}:${actualPort}`);
|
||||
resolve(actualPort);
|
||||
}
|
||||
else {
|
||||
reject(new Error('Failed to get SOCKS proxy server port'));
|
||||
}
|
||||
};
|
||||
socksServer.listen(port, hostname, listeningCallback);
|
||||
});
|
||||
},
|
||||
async close() {
|
||||
return new Promise((resolve, reject) => {
|
||||
socksServer.close(error => {
|
||||
if (error) {
|
||||
// Only reject for actual errors, not for "already closed" states
|
||||
// Check for common "already closed" error patterns
|
||||
const errorMessage = error.message?.toLowerCase() || '';
|
||||
const isAlreadyClosed = errorMessage.includes('not running') ||
|
||||
errorMessage.includes('already closed') ||
|
||||
errorMessage.includes('not listening');
|
||||
if (!isAlreadyClosed) {
|
||||
reject(error);
|
||||
return;
|
||||
}
|
||||
}
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
},
|
||||
unref() {
|
||||
// Access the internal server to call unref
|
||||
try {
|
||||
const serverInternal = socksServer?.server;
|
||||
if (serverInternal && typeof serverInternal?.unref === 'function') {
|
||||
serverInternal.unref();
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
logForDebugging(`Error calling unref: ${error}`, { level: 'error' });
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=socks-proxy.js.map
|
||||
25
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/utils/debug.js
generated
vendored
Normal file
25
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/utils/debug.js
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
/**
|
||||
* Simple debug logging for standalone sandbox
|
||||
*/
|
||||
export function logForDebugging(message, options) {
|
||||
// Only log if SRT_DEBUG environment variable is set
|
||||
// Using SRT_DEBUG instead of DEBUG to avoid conflicts with other tools
|
||||
// (DEBUG is commonly used by Node.js debug libraries and VS Code)
|
||||
if (!process.env.SRT_DEBUG) {
|
||||
return;
|
||||
}
|
||||
const level = options?.level || 'info';
|
||||
const prefix = '[SandboxDebug]';
|
||||
// Always use stderr to avoid corrupting stdout JSON streams
|
||||
switch (level) {
|
||||
case 'error':
|
||||
console.error(`${prefix} ${message}`);
|
||||
break;
|
||||
case 'warn':
|
||||
console.warn(`${prefix} ${message}`);
|
||||
break;
|
||||
default:
|
||||
console.error(`${prefix} ${message}`);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=debug.js.map
|
||||
49
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/utils/platform.js
generated
vendored
Normal file
49
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/utils/platform.js
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
/**
|
||||
* Platform detection utilities
|
||||
*/
|
||||
import * as fs from 'fs';
|
||||
/**
|
||||
* Get the WSL version (1 or 2+) if running in WSL.
|
||||
* Returns undefined if not running in WSL.
|
||||
*/
|
||||
export function getWslVersion() {
|
||||
if (process.platform !== 'linux') {
|
||||
return undefined;
|
||||
}
|
||||
try {
|
||||
const procVersion = fs.readFileSync('/proc/version', { encoding: 'utf8' });
|
||||
// Check for explicit WSL version markers (e.g., "WSL2", "WSL3", etc.)
|
||||
const wslVersionMatch = procVersion.match(/WSL(\d+)/i);
|
||||
if (wslVersionMatch && wslVersionMatch[1]) {
|
||||
return wslVersionMatch[1];
|
||||
}
|
||||
// If no explicit WSL version but contains Microsoft, assume WSL1
|
||||
// This handles the original WSL1 format: "4.4.0-19041-Microsoft"
|
||||
if (procVersion.toLowerCase().includes('microsoft')) {
|
||||
return '1';
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Detect the current platform.
|
||||
* Note: All Linux including WSL returns 'linux'. Use getWslVersion() to detect WSL1 (unsupported).
|
||||
*/
|
||||
export function getPlatform() {
|
||||
switch (process.platform) {
|
||||
case 'darwin':
|
||||
return 'macos';
|
||||
case 'linux':
|
||||
// WSL2+ is treated as Linux (same sandboxing)
|
||||
// WSL1 is also returned as 'linux' but will fail isSupportedPlatform check
|
||||
return 'linux';
|
||||
case 'win32':
|
||||
return 'windows';
|
||||
default:
|
||||
return 'unknown';
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=platform.js.map
|
||||
45
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/utils/ripgrep.js
generated
vendored
Normal file
45
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/utils/ripgrep.js
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
import { spawn } from 'child_process';
|
||||
import { text } from 'node:stream/consumers';
|
||||
import { whichSync } from './which.js';
|
||||
/**
|
||||
* Check if ripgrep (rg) is available synchronously
|
||||
* Returns true if rg is installed, false otherwise
|
||||
*/
|
||||
export function hasRipgrepSync() {
|
||||
return whichSync('rg') !== null;
|
||||
}
|
||||
/**
|
||||
* Execute ripgrep with the given arguments
|
||||
* @param args Command-line arguments to pass to rg
|
||||
* @param target Target directory or file to search
|
||||
* @param abortSignal AbortSignal to cancel the operation
|
||||
* @param config Ripgrep configuration (command and optional args)
|
||||
* @returns Array of matching lines (one per line of output)
|
||||
* @throws Error if ripgrep exits with non-zero status (except exit code 1 which means no matches)
|
||||
*/
|
||||
export async function ripGrep(args, target, abortSignal, config = { command: 'rg' }) {
|
||||
const { command, args: commandArgs = [], argv0 } = config;
|
||||
const child = spawn(command, [...commandArgs, ...args, target], {
|
||||
argv0,
|
||||
signal: abortSignal,
|
||||
timeout: 10000,
|
||||
windowsHide: true,
|
||||
});
|
||||
const [stdout, stderr, code] = await Promise.all([
|
||||
text(child.stdout),
|
||||
text(child.stderr),
|
||||
new Promise((resolve, reject) => {
|
||||
child.on('close', resolve);
|
||||
child.on('error', reject);
|
||||
}),
|
||||
]);
|
||||
if (code === 0) {
|
||||
return stdout.trim().split('\n').filter(Boolean);
|
||||
}
|
||||
if (code === 1) {
|
||||
// Exit code 1 means "no matches found" - this is normal
|
||||
return [];
|
||||
}
|
||||
throw new Error(`ripgrep failed with exit code ${code}: ${stderr}`);
|
||||
}
|
||||
//# sourceMappingURL=ripgrep.js.map
|
||||
25
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/utils/which.js
generated
vendored
Normal file
25
extracted-source/node_modules/@anthropic-ai/sandbox-runtime/dist/utils/which.js
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
import { spawnSync } from 'node:child_process';
|
||||
/**
|
||||
* Find the path to an executable, similar to the `which` command.
|
||||
* Uses Bun.which when running in Bun, falls back to spawnSync for Node.js.
|
||||
*
|
||||
* @param bin - The name of the executable to find
|
||||
* @returns The full path to the executable, or null if not found
|
||||
*/
|
||||
export function whichSync(bin) {
|
||||
// Check if we're running in Bun
|
||||
if (typeof globalThis.Bun !== 'undefined') {
|
||||
return globalThis.Bun.which(bin);
|
||||
}
|
||||
// Fallback to Node.js implementation
|
||||
const result = spawnSync('which', [bin], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['ignore', 'pipe', 'ignore'],
|
||||
timeout: 1000,
|
||||
});
|
||||
if (result.status === 0 && result.stdout) {
|
||||
return result.stdout.trim();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
//# sourceMappingURL=which.js.map
|
||||
Reference in New Issue
Block a user