If anyone out there is looking to understand how you can use structured outputs using functions v1 here is a working example:
import { Function, OntologyEditFunction, Integer } from "@foundry/functions-api";
// Uncomment the import statement below to start importing object types
// import { Objects, ExampleDataAircraft } from "@foundry/ontology-api";
import { GPT_5_mini } from "@foundry/models-api/language-models"
import { GPT_5_nano } from "@foundry/models-api/language-models"
import { GPT_5_codex } from "@foundry/models-api/language-models"
import { GPT_5_1 } from "@foundry/models-api/language-models"
export class MyFunctions {
@Function()
public async chatCompletions(model: string, user: string, system: string, jsonSchema?: string): Promise<string> {
let response: string | undefined = 'undefined';
let responseFormat = {};
let parsedJSON;
let completion = undefined;
let params = {};
switch(model) {
case 'gpt5.1':
if (jsonSchema) {
const parsed = JSON.parse(jsonSchema);
responseFormat = {
responseFormat: {
jsonSchema: JSON.stringify({
name: parsed.name,
schema: parsed,
}),
type: 'json_schema'
}};
}
completion = await GPT_5_1.createChatCompletion({
params: {
...responseFormat,
},
messages: [{
role: "SYSTEM", contents: [{ text: system }] },
{ role: "USER", contents: [{ text: user }], }],
});
response = completion.choices[0].message.content;
break;
case 'gpt5-codex':
completion = await GPT_5_codex.createChatCompletion({
input: [
{ inputMessage: { role: "system", content: { text: system } } },
{ inputMessage: { role: "user", content: { text: user } } },
],
});
response = completion.output[0].outputMessage?.content[0].text?.text;
break;
case 'gpt5-mini':
if (jsonSchema) {
const parsed = JSON.parse(jsonSchema);
responseFormat = {
responseFormat: {
jsonSchema: JSON.stringify({
name: parsed.name,
schema: parsed,
}),
type: 'json_schema'
}};
}
completion = await GPT_5_mini.createChatCompletion({
params: {
...responseFormat,
},
messages: [{
role: "SYSTEM", contents: [{ text: system }] },
{ role: "USER", contents: [{ text: user }], }],
});
response = completion.choices[0].message.content;
break;
case 'gpt5-nano':
if (jsonSchema) {
const parsed = JSON.parse(jsonSchema);
responseFormat = {
responseFormat: {
jsonSchema: JSON.stringify({
name: parsed.name,
schema: parsed,
}),
type: 'json_schema'
}};
}
completion = await GPT_5_nano.createChatCompletion({
params: {
...responseFormat,
},
messages: [{
role: "SYSTEM", contents: [{ text: system }] },
{ role: "USER", contents: [{ text: user }], }],
});
response = completion.choices[0].message.content;
break;
}
return response || 'undefined';
}
}
If you would like to to use the models directly via a LLM Proxy below is also a working example for OpenAI models using the OpenAI SDK. This is ultimately the solution I was looking for, but due to the limited model support I am resorting to proxies in functions v1 for any models not supported by proxies. Please note that I have only been able to get this to work with some of the GPT4 series models. I can not find documentation on what models are supported behind these proxies. One other note is this requires personal access tokens as the OSDK does not support auth for LLM proxies.
import {
SupportedFoundryClients,
type OpenAIService,
} from '@codestrap/developer-foundations-types';
import OpenAI from 'openai';
import { foundryClientFactory } from '../factory/foundryClientFactory';
import type { ChatCompletionCreateParamsStreaming } from 'openai/resources/chat';
import type { RequestOptions } from 'openai/core';
import type { ResponseCreateParamsStreaming } from 'openai/resources/responses/responses';
// ADd tpe definitions for the OpenAI response here, or in a separate file and import them in, to ensure type safety when working with the API response data.
export function makeOpenAIService(): OpenAIService {
const { getToken, url, ontologyRid } = foundryClientFactory(
process.env.FOUNDRY_CLIENT_TYPE || SupportedFoundryClients.PRIVATE,
undefined,
);
return {
// TODO code out all methods using OSDK API calls
completions: async (
body: ChatCompletionCreateParamsStreaming,
options?: RequestOptions,
) => {
const token = await getToken();
const client = new OpenAI({
baseURL: `${url}/api/v2/llm/proxy/openai/v1`,
apiKey: process.env.FOUNDRY_TOKEN,
});
const stream = await client.chat.completions.create(body, options);
let text = '';
for await (const chatCompletionChunk of stream) {
text += chatCompletionChunk.choices[0]?.delta?.content || '';
}
return text;
},
responses: async (
body: ResponseCreateParamsStreaming,
options?: RequestOptions,
) => {
const token = await getToken();
const client = new OpenAI({
baseURL: `${url}/api/v2/llm/proxy/openai/v1`,
apiKey: process.env.FOUNDRY_TOKEN,
});
// Responses API streaming emits semantic events (delta, completed, error, etc.)
const stream = await client.responses.create(
{ ...body, stream: true },
options,
);
let text = '';
for await (const event of stream) {
if (event.type === 'error') {
throw new Error(`OpenAI API error: ${event.code} - ${event.message}`);
}
if (event.type === 'response.output_text.delta') {
text += event.delta ?? '';
}
}
return text;
},
};
}