When calling OpenAI LLM proxies the only model I can get to work so far is the gpt 4 series. IE gpt-4.1, gpt-4.1-mini etc. Is the GPT 5 series supported? How am I susppoed to know which models are supported?
import {
SupportedFoundryClients,
type OpenAIService,
} from '@codestrap/developer-foundations-types';
import OpenAI from 'openai';
import { foundryClientFactory } from '../factory/foundryClientFactory';
import type { ChatCompletionCreateParamsStreaming } from 'openai/resources/chat';
import type { RequestOptions } from 'openai/core';
import type { ResponseCreateParamsStreaming } from 'openai/resources/responses/responses';
// ADd tpe definitions for the OpenAI response here, or in a separate file and import them in, to ensure type safety when working with the API response data.
export function makeOpenAIService(): OpenAIService {
const { getToken, url, ontologyRid } = foundryClientFactory(
process.env.FOUNDRY_CLIENT_TYPE || SupportedFoundryClients.PRIVATE,
undefined,
);
return {
// TODO code out all methods using OSDK API calls
completions: async (
body: ChatCompletionCreateParamsStreaming,
options?: RequestOptions,
) => {
const token = await getToken();
const client = new OpenAI({
baseURL: `${url}/api/v2/llm/proxy/openai/v1`,
apiKey: process.env.FOUNDRY_TOKEN,
});
const stream = await client.chat.completions.create(body, options);
let text = '';
for await (const chatCompletionChunk of stream) {
text += chatCompletionChunk.choices[0]?.delta?.content || '';
}
return text;
},
responses: async (
body: ResponseCreateParamsStreaming,
options?: RequestOptions,
) => {
const token = await getToken();
const client = new OpenAI({
baseURL: `${url}/api/v2/llm/proxy/openai/v1`,
apiKey: process.env.FOUNDRY_TOKEN,
});
// Responses API streaming emits semantic events (delta, completed, error, etc.)
const stream = await client.responses.create(
{ ...body, stream: true },
options,
);
let text = '';
for await (const event of stream) {
if (event.type === 'error') {
throw new Error(`OpenAI API error: ${event.code} - ${event.message}`);
}
if (event.type === 'response.output_text.delta') {
text += event.delta ?? '';
}
}
return text;
},
};
}