feat(M3-003): OpenAI provider adapter for Codex gpt-5.4 (#310)
Some checks failed
ci/woodpecker/push/ci Pipeline failed
Some checks failed
ci/woodpecker/push/ci Pipeline failed
Co-authored-by: Jason Woltje <jason@diversecanvas.com> Co-committed-by: Jason Woltje <jason@diversecanvas.com>
This commit was merged in pull request #310.
This commit is contained in:
@@ -47,6 +47,7 @@
|
|||||||
"dotenv": "^17.3.1",
|
"dotenv": "^17.3.1",
|
||||||
"fastify": "^5.0.0",
|
"fastify": "^5.0.0",
|
||||||
"node-cron": "^4.2.1",
|
"node-cron": "^4.2.1",
|
||||||
|
"openai": "^6.32.0",
|
||||||
"reflect-metadata": "^0.2.0",
|
"reflect-metadata": "^0.2.0",
|
||||||
"rxjs": "^7.8.0",
|
"rxjs": "^7.8.0",
|
||||||
"socket.io": "^4.8.0",
|
"socket.io": "^4.8.0",
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
export { OllamaAdapter } from './ollama.adapter.js';
|
export { OllamaAdapter } from './ollama.adapter.js';
|
||||||
export { AnthropicAdapter } from './anthropic.adapter.js';
|
export { AnthropicAdapter } from './anthropic.adapter.js';
|
||||||
|
export { OpenAIAdapter } from './openai.adapter.js';
|
||||||
|
|||||||
201
apps/gateway/src/agent/adapters/openai.adapter.ts
Normal file
201
apps/gateway/src/agent/adapters/openai.adapter.ts
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
import { Logger } from '@nestjs/common';
|
||||||
|
import OpenAI from 'openai';
|
||||||
|
import type { ModelRegistry } from '@mariozechner/pi-coding-agent';
|
||||||
|
import type {
|
||||||
|
CompletionEvent,
|
||||||
|
CompletionParams,
|
||||||
|
IProviderAdapter,
|
||||||
|
ModelInfo,
|
||||||
|
ProviderHealth,
|
||||||
|
} from '@mosaic/types';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* OpenAI provider adapter.
|
||||||
|
*
|
||||||
|
* Registers OpenAI models (including Codex gpt-5.4) with the Pi ModelRegistry.
|
||||||
|
* Configuration is driven by environment variables:
|
||||||
|
* OPENAI_API_KEY — OpenAI API key (required; adapter skips registration when absent)
|
||||||
|
*/
|
||||||
|
export class OpenAIAdapter implements IProviderAdapter {
|
||||||
|
readonly name = 'openai';
|
||||||
|
|
||||||
|
private readonly logger = new Logger(OpenAIAdapter.name);
|
||||||
|
private registeredModels: ModelInfo[] = [];
|
||||||
|
private client: OpenAI | null = null;
|
||||||
|
|
||||||
|
/** Model ID used for Codex gpt-5.4 in the Pi registry. */
|
||||||
|
static readonly CODEX_MODEL_ID = 'codex-gpt-5-4';
|
||||||
|
|
||||||
|
constructor(private readonly registry: ModelRegistry) {}
|
||||||
|
|
||||||
|
async register(): Promise<void> {
|
||||||
|
const apiKey = process.env['OPENAI_API_KEY'];
|
||||||
|
if (!apiKey) {
|
||||||
|
this.logger.debug('Skipping OpenAI provider registration: OPENAI_API_KEY not set');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.client = new OpenAI({ apiKey });
|
||||||
|
|
||||||
|
const codexModel = {
|
||||||
|
id: OpenAIAdapter.CODEX_MODEL_ID,
|
||||||
|
name: 'Codex gpt-5.4',
|
||||||
|
/** OpenAI-compatible completions API */
|
||||||
|
api: 'openai-completions' as never,
|
||||||
|
reasoning: false,
|
||||||
|
input: ['text', 'image'] as ('text' | 'image')[],
|
||||||
|
cost: { input: 0.003, output: 0.012, cacheRead: 0.0015, cacheWrite: 0 },
|
||||||
|
contextWindow: 128_000,
|
||||||
|
maxTokens: 16_384,
|
||||||
|
};
|
||||||
|
|
||||||
|
this.registry.registerProvider('openai', {
|
||||||
|
apiKey,
|
||||||
|
baseUrl: 'https://api.openai.com/v1',
|
||||||
|
models: [codexModel],
|
||||||
|
});
|
||||||
|
|
||||||
|
this.registeredModels = [
|
||||||
|
{
|
||||||
|
id: OpenAIAdapter.CODEX_MODEL_ID,
|
||||||
|
provider: 'openai',
|
||||||
|
name: 'Codex gpt-5.4',
|
||||||
|
reasoning: false,
|
||||||
|
contextWindow: 128_000,
|
||||||
|
maxTokens: 16_384,
|
||||||
|
inputTypes: ['text', 'image'] as ('text' | 'image')[],
|
||||||
|
cost: { input: 0.003, output: 0.012, cacheRead: 0.0015, cacheWrite: 0 },
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
this.logger.log(`OpenAI provider registered with model: ${OpenAIAdapter.CODEX_MODEL_ID}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
listModels(): ModelInfo[] {
|
||||||
|
return this.registeredModels;
|
||||||
|
}
|
||||||
|
|
||||||
|
async healthCheck(): Promise<ProviderHealth> {
|
||||||
|
const apiKey = process.env['OPENAI_API_KEY'];
|
||||||
|
if (!apiKey) {
|
||||||
|
return {
|
||||||
|
status: 'down',
|
||||||
|
lastChecked: new Date().toISOString(),
|
||||||
|
error: 'OPENAI_API_KEY not configured',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const start = Date.now();
|
||||||
|
try {
|
||||||
|
// Lightweight call — list models to verify key validity
|
||||||
|
const res = await fetch('https://api.openai.com/v1/models', {
|
||||||
|
method: 'GET',
|
||||||
|
headers: {
|
||||||
|
Authorization: `Bearer ${apiKey}`,
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
signal: AbortSignal.timeout(5000),
|
||||||
|
});
|
||||||
|
const latencyMs = Date.now() - start;
|
||||||
|
|
||||||
|
if (!res.ok) {
|
||||||
|
return {
|
||||||
|
status: 'degraded',
|
||||||
|
latencyMs,
|
||||||
|
lastChecked: new Date().toISOString(),
|
||||||
|
error: `HTTP ${res.status}`,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return { status: 'healthy', latencyMs, lastChecked: new Date().toISOString() };
|
||||||
|
} catch (err) {
|
||||||
|
const latencyMs = Date.now() - start;
|
||||||
|
const error = err instanceof Error ? err.message : String(err);
|
||||||
|
return { status: 'down', latencyMs, lastChecked: new Date().toISOString(), error };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stream a completion from OpenAI using the chat completions API.
|
||||||
|
*
|
||||||
|
* Maps OpenAI streaming chunks to the Mosaic CompletionEvent format.
|
||||||
|
*/
|
||||||
|
async *createCompletion(params: CompletionParams): AsyncIterable<CompletionEvent> {
|
||||||
|
if (!this.client) {
|
||||||
|
throw new Error(
|
||||||
|
'OpenAIAdapter: client not initialized. ' +
|
||||||
|
'Ensure OPENAI_API_KEY is set and register() was called.',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const stream = await this.client.chat.completions.create({
|
||||||
|
model: params.model,
|
||||||
|
messages: params.messages.map((m) => ({
|
||||||
|
role: m.role,
|
||||||
|
content: m.content,
|
||||||
|
})),
|
||||||
|
...(params.temperature !== undefined && { temperature: params.temperature }),
|
||||||
|
...(params.maxTokens !== undefined && { max_tokens: params.maxTokens }),
|
||||||
|
...(params.tools &&
|
||||||
|
params.tools.length > 0 && {
|
||||||
|
tools: params.tools.map((t) => ({
|
||||||
|
type: 'function' as const,
|
||||||
|
function: {
|
||||||
|
name: t.name,
|
||||||
|
description: t.description,
|
||||||
|
parameters: t.parameters,
|
||||||
|
},
|
||||||
|
})),
|
||||||
|
}),
|
||||||
|
stream: true,
|
||||||
|
stream_options: { include_usage: true },
|
||||||
|
});
|
||||||
|
|
||||||
|
let inputTokens = 0;
|
||||||
|
let outputTokens = 0;
|
||||||
|
|
||||||
|
for await (const chunk of stream) {
|
||||||
|
const choice = chunk.choices[0];
|
||||||
|
|
||||||
|
// Accumulate usage when present (final chunk with stream_options.include_usage)
|
||||||
|
if (chunk.usage) {
|
||||||
|
inputTokens = chunk.usage.prompt_tokens;
|
||||||
|
outputTokens = chunk.usage.completion_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!choice) continue;
|
||||||
|
|
||||||
|
const delta = choice.delta;
|
||||||
|
|
||||||
|
// Text content delta
|
||||||
|
if (delta.content) {
|
||||||
|
yield { type: 'text_delta', content: delta.content };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tool call delta — emit when arguments are complete
|
||||||
|
if (delta.tool_calls) {
|
||||||
|
for (const toolCallDelta of delta.tool_calls) {
|
||||||
|
if (toolCallDelta.function?.name && toolCallDelta.function.arguments !== undefined) {
|
||||||
|
yield {
|
||||||
|
type: 'tool_call',
|
||||||
|
name: toolCallDelta.function.name,
|
||||||
|
arguments: toolCallDelta.function.arguments,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream finished
|
||||||
|
if (choice.finish_reason === 'stop' || choice.finish_reason === 'tool_calls') {
|
||||||
|
yield {
|
||||||
|
type: 'done',
|
||||||
|
usage: { inputTokens, outputTokens },
|
||||||
|
};
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback done event when stream ends without explicit finish_reason
|
||||||
|
yield { type: 'done', usage: { inputTokens, outputTokens } };
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -8,7 +8,7 @@ import type {
|
|||||||
ProviderHealth,
|
ProviderHealth,
|
||||||
ProviderInfo,
|
ProviderInfo,
|
||||||
} from '@mosaic/types';
|
} from '@mosaic/types';
|
||||||
import { AnthropicAdapter, OllamaAdapter } from './adapters/index.js';
|
import { AnthropicAdapter, OllamaAdapter, OpenAIAdapter } from './adapters/index.js';
|
||||||
import type { TestConnectionResultDto } from './provider.dto.js';
|
import type { TestConnectionResultDto } from './provider.dto.js';
|
||||||
|
|
||||||
/** Default health check interval in seconds */
|
/** Default health check interval in seconds */
|
||||||
@@ -42,7 +42,11 @@ export class ProviderService implements OnModuleInit, OnModuleDestroy {
|
|||||||
this.registry = new ModelRegistry(authStorage);
|
this.registry = new ModelRegistry(authStorage);
|
||||||
|
|
||||||
// Build the default set of adapters that rely on the registry
|
// Build the default set of adapters that rely on the registry
|
||||||
this.adapters = [new OllamaAdapter(this.registry), new AnthropicAdapter(this.registry)];
|
this.adapters = [
|
||||||
|
new OllamaAdapter(this.registry),
|
||||||
|
new AnthropicAdapter(this.registry),
|
||||||
|
new OpenAIAdapter(this.registry),
|
||||||
|
];
|
||||||
|
|
||||||
// Run all adapter registrations first (Ollama, Anthropic, and any future adapters)
|
// Run all adapter registrations first (Ollama, Anthropic, and any future adapters)
|
||||||
await this.registerAll();
|
await this.registerAll();
|
||||||
|
|||||||
20
pnpm-lock.yaml
generated
20
pnpm-lock.yaml
generated
@@ -146,6 +146,9 @@ importers:
|
|||||||
node-cron:
|
node-cron:
|
||||||
specifier: ^4.2.1
|
specifier: ^4.2.1
|
||||||
version: 4.2.1
|
version: 4.2.1
|
||||||
|
openai:
|
||||||
|
specifier: ^6.32.0
|
||||||
|
version: 6.32.0(ws@8.19.0)(zod@4.3.6)
|
||||||
reflect-metadata:
|
reflect-metadata:
|
||||||
specifier: ^0.2.0
|
specifier: ^0.2.0
|
||||||
version: 0.2.2
|
version: 0.2.2
|
||||||
@@ -4891,6 +4894,18 @@ packages:
|
|||||||
zod:
|
zod:
|
||||||
optional: true
|
optional: true
|
||||||
|
|
||||||
|
openai@6.32.0:
|
||||||
|
resolution: {integrity: sha512-j3k+BjydAf8yQlcOI7WUQMQTbbF5GEIMAE2iZYCOzwwB3S2pCheaWYp+XZRNAch4jWVc52PMDGRRjutao3lLCg==}
|
||||||
|
hasBin: true
|
||||||
|
peerDependencies:
|
||||||
|
ws: ^8.18.0
|
||||||
|
zod: ^3.25 || ^4.0
|
||||||
|
peerDependenciesMeta:
|
||||||
|
ws:
|
||||||
|
optional: true
|
||||||
|
zod:
|
||||||
|
optional: true
|
||||||
|
|
||||||
optionator@0.9.4:
|
optionator@0.9.4:
|
||||||
resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==}
|
resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==}
|
||||||
engines: {node: '>= 0.8.0'}
|
engines: {node: '>= 0.8.0'}
|
||||||
@@ -10639,6 +10654,11 @@ snapshots:
|
|||||||
ws: 8.19.0
|
ws: 8.19.0
|
||||||
zod: 4.3.6
|
zod: 4.3.6
|
||||||
|
|
||||||
|
openai@6.32.0(ws@8.19.0)(zod@4.3.6):
|
||||||
|
optionalDependencies:
|
||||||
|
ws: 8.19.0
|
||||||
|
zod: 4.3.6
|
||||||
|
|
||||||
optionator@0.9.4:
|
optionator@0.9.4:
|
||||||
dependencies:
|
dependencies:
|
||||||
deep-is: 0.1.4
|
deep-is: 0.1.4
|
||||||
|
|||||||
Reference in New Issue
Block a user