Files
openclaude/src/utils/codex-fetch-adapter.ts
YoVinchen 7dd3095974 privacy: remove external data transmissions & add GitHub release workflow
Remove three active external data transmission paths:

1. WebFetch domain blocklist (api.anthropic.com/api/web/domain_info)
   - src/tools/WebFetchTool/utils.ts
   - Was sending every domain a user tried to fetch to Anthropic
   - Replaced with always-allowed stub; tool permission dialog is
     the primary security boundary

2. Codex API router (chatgpt.com/backend-api/codex/responses)
   - src/services/api/codex-fetch-adapter.ts
   - Would have forwarded full conversation content to OpenAI
   - createCodexFetch now returns HTTP 403 stub

3. OpenAI API adapter (api.openai.com/v1/chat/completions)
   - src/utils/codex-fetch-adapter.ts
   - Would have forwarded messages to OpenAI
   - fetchCodexResponse now throws immediately

Already-disabled paths (no changes needed):
- Analytics logEvent/logEventAsync: empty stubs in services/analytics/index.ts
- GrowthBook/Statsig: local cache only, no outbound requests
- Auto-updater GCS: already guarded by CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC
- MCP registry: already guarded by CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC
- Release notes GitHub: already guarded by isEssentialTrafficOnly()

Add .github/workflows/release.yml:
- Builds self-contained binaries for macOS (x64+arm64), Linux (x64+arm64),
  Windows (x64) using bun compile on each native runner
- Triggers on version tags (v*.*.*) or manual workflow_dispatch
- Publishes binaries + SHA256SUMS.txt as a GitHub Release with
  per-platform install instructions

Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
2026-04-14 15:46:47 +08:00

126 lines
2.8 KiB
TypeScript
Executable File

/**
* OpenAI Codex API adapter for Claude Code
* Provides compatibility layer between Claude's API expectations and OpenAI's Codex API
*/
import type { Message } from '../types/message.js'
import { logError } from './log.js'
/**
* OpenAI message format for API requests
*/
interface OpenAIMessage {
role: 'system' | 'user' | 'assistant'
content: string | Array<{
type: 'text' | 'image_url'
text?: string
image_url?: {
url: string
}
}>
}
/**
* OpenAI API response format
*/
interface OpenAIResponse {
id: string
object: string
created: number
model: string
choices: Array<{
index: number
message: {
role: string
content: string
}
finish_reason: string
}>
usage: {
prompt_tokens: number
completion_tokens: number
total_tokens: number
}
}
/**
* Convert Claude Code message format to OpenAI format
*/
function convertToOpenAIMessage(message: Message): OpenAIMessage {
if (typeof message.content === 'string') {
return {
role: message.role === 'human' ? 'user' : message.role as 'system' | 'assistant',
content: message.content,
}
}
// Handle multi-modal content
const content: Array<any> = []
for (const item of message.content) {
if (item.type === 'text') {
content.push({
type: 'text',
text: item.text,
})
} else if (item.type === 'image') {
// Convert Anthropic base64 image schema to OpenAI format
content.push({
type: 'image_url',
image_url: {
url: item.source.type === 'base64'
? `data:${item.source.media_type};base64,${item.source.data}`
: item.source.data
}
})
}
}
return {
role: message.role === 'human' ? 'user' : message.role as 'system' | 'assistant',
content,
}
}
/**
* fetchCodexResponse is disabled: sending conversation content to
* api.openai.com would leak user data to a third-party service.
* This function is retained as a stub to avoid breaking any call sites.
*/
export async function fetchCodexResponse(
_messages: Message[],
_model: string,
_options: {
apiKey?: string
baseUrl?: string
stream?: boolean
} = {}
): Promise<OpenAIResponse> {
throw new Error(
'OpenAI Codex API calls are disabled for privacy. External data forwarding has been removed.',
)
}
/**
* Convert OpenAI response to Claude Code format
*/
export function convertFromOpenAIResponse(response: OpenAIResponse): {
content: string
usage: {
input_tokens: number
output_tokens: number
}
} {
const choice = response.choices[0]
if (!choice) {
throw new Error('No choices in OpenAI response')
}
return {
content: choice.message.content,
usage: {
input_tokens: response.usage.prompt_tokens,
output_tokens: response.usage.completion_tokens,
},
}
}